prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import utm as UTM
import unittest
import numpy as np
class UTMTestCase(unittest.TestCase):
def assert_utm_equal(self, a, b):
self.assertTrue(np.allclose(a[0], b[0]))
self.assertTrue(np.allclose(a[1], b[1]))
self.assertEqual(a[2], b[2])
self.assertEqual(a[3].upper(), b[3].upper())
def assert_latlon_equal(self, a, b):
self.assertTrue(np.allclose(a[0], b[0], rtol=1e-4, atol=1e-4))
self.assertTrue(np.allclose(a[1], b[1], rtol=1e-4, atol=1e-4))
class KnownValues(UTMTestCase):
known_values = [
# Aachen, Germany
(
(50.77535, 6.08389),
(294409, 5628898, 32, 'U'),
{'northern': True},
),
# New York, USA
(
(40.71435, -74.00597),
(583960, 4507523, 18, 'T'),
{'northern': True},
),
# Wellington, New Zealand
(
(-41.28646, 174.77624),
(313784, 5427057, 60, 'G'),
{'northern': False},
),
# Capetown, South Africa
(
(-33.92487, 18.42406),
(261878, 6243186, 34, 'H'),
{'northern': False},
),
# Mendoza, Argentina
(
(-32.89018, -68.84405),
(514586, 6360877, 19, 'h'),
{'northern': False},
),
# Fairbanks, Alaska, USA
(
(64.83778, -147.71639),
(466013, 7190568, 6, 'W'),
{'northern': True},
),
# <NAME>, Scotland, UK
(
(56.79680, -5.00601),
(377486, 6296562, 30, 'V'),
{'northern': True},
),
# Latitude 84
(
(84, -5.00601),
(476594, 9328501, 30, 'X'),
{'northern': True},
),
]
def test_from_latlon(self):
lats = np.array([0.0, 3.0, 6.0])
lons = np.array([0.0, 1.0, 3.4])
result = UTM.from_latlon(lats, lons)
self.assert_utm_equal((np.array([166021.44317933032,
277707.83075574087,
544268.12794623]),
np.array([0.0,
331796.29167519242,
663220.7198366751]),
31, 'N'), result)
for latlon, utm, _ in self.known_values:
result = UTM.from_latlon(*[np.array([x]) for x in latlon])
self.assert_utm_equal(utm, result)
def test_to_latlon(self):
result = UTM.to_latlon(np.array([166021.44317933032,
277707.83075574087,
544268.12794623]),
np.array([0.0,
331796.29167519242,
663220.7198366751]),
31, 'N')
self.assert_latlon_equal((np.array([0.0, 3.0, 6.0]),
np.array([0.0, 1.0, 3.4])),
result)
for latlon, utm, utm_kw in self.known_values:
utm = [np.array([x]) for x in utm[:2]] + list(utm[2:])
result = UTM.to_latlon(*utm)
self.assert_latlon_equal(latlon, result)
class BadInput(UTMTestCase):
def test_from_latlon_range_checks(self):
'''from_latlon should fail with out-of-bounds input'''
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(-100), np.array(0))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(-80.1), np.array(0))
for i in range(-8000, 8400):
UTM.from_latlon(np.array(i / 100.0), np.array(0))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(84.1), np.array(0))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(100), np.array(0))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(0), np.array(-300))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(0), np.array(-180.1))
for i in range(-18000, 18000):
UTM.from_latlon(np.array(0), np.array(i / 100.0))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(0), np.array(180.1))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(0), np.array(300))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(-100), np.array(-300))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(100), np.array(-300))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(-100), np.array(300))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(100), np.array(300))
# test forcing zone ranges
# NYC should be zone 18T
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(40.71435), np.array(-74.00597), 70, 'T')
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(40.71435), np.array(-74.00597), 18, 'A')
def test_to_latlon_range_checks(self):
'''to_latlon should fail with out-of-bounds input'''
# test easting range
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(0), np.array(5000000), 32, 'U')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(99999), np.array(5000000), 32, 'U')
for i in range(100000, 999999, 1000):
UTM.to_latlon(np.array(i), np.array(5000000), 32, 'U')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(1000000), | np.array(5000000) | numpy.array |
#! /usr/bin/env python
"""
Module with frame px resampling/rescaling functions.
"""
__author__ = '<NAME>, <NAME>, <NAME>'
__all__ = ['frame_px_resampling',
'cube_px_resampling',
'cube_rescaling_wavelengths',
'frame_rescaling',
'check_scal_vector',
'find_scal_vector',
'scale_fft']
import numpy as np
import warnings
try:
import cv2
no_opencv = False
except ImportError:
warnings.warn("Opencv python bindings are missing.", ImportWarning)
no_opencv = True
from scipy.ndimage import geometric_transform, zoom
from scipy.optimize import minimize
from ..var import frame_center, get_square
from .subsampling import cube_collapse
from .recentering import frame_shift
from .cosmetics import frame_crop
def cube_px_resampling(array, scale, imlib='vip-fft', interpolation='lanczos4',
keep_center=False, verbose=True):
"""
Resample the frames of a cube with a single scale factor. Can deal with NaN
values.
Wrapper of ``frame_px_resampling``. Useful when we need to upsample
(upscaling) or downsample (pixel binning) a set of frames, e.g. an ADI cube.
Parameters
----------
array : 3d numpy ndarray
Input cube, 3d array.
scale : int, float or tuple
Scale factor for upsampling or downsampling the frames in the cube. If
a tuple it corresponds to the scale along x and y.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_px_resampling``
function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_px_resampling``
function.
keep_center: bool, opt
If input dimensions are even and the star centered (i.e. on
dim//2, dim//2), whether to keep the star centered after scaling, i.e.
on (new_dim//2, new_dim//2). For a non-centered input cube, better to
leave it to False.
verbose : bool, optional
Whether to print out additional info such as the new cube shape.
Returns
-------
array_resc : numpy ndarray
Output cube with resampled frames.
"""
if array.ndim != 3:
raise TypeError('Input array is not a cube or 3d array.')
array_resc = []
for i in range(array.shape[0]):
imresc = frame_px_resampling(array[i], scale=scale, imlib=imlib,
interpolation=interpolation,
keep_center=keep_center)
array_resc.append(imresc)
array_resc = np.array(array_resc)
if verbose:
print("Cube successfully rescaled")
print("New shape: {}".format(array_resc.shape))
return array_resc
def frame_px_resampling(array, scale, imlib='vip-fft', interpolation='lanczos4',
keep_center=False, verbose=False):
"""
Resample the pixels of a frame changing the frame size.
Can deal with NaN values.
If ``scale`` < 1 then the frame is downsampled and if ``scale`` > 1 then its
pixels are upsampled.
Warning: if imlib is not 'vip-fft', the input size is even and keep_center
set to True, an additional interpolation (shifting by (0.5,0.5)px) may
occur after rescaling, to ensure center location stays on (dim//2,dim//2).
Parameters
----------
array : numpy ndarray
Input frame, 2d array.
scale : int, float or tuple
Scale factor for upsampling or downsampling the frame. If a tuple it
corresponds to the scale along x and y.
imlib : {'ndimage', 'opencv', 'vip-fft'}, optional
Library used for image transformations. 'vip-fft' corresponds to a
FFT-based rescaling algorithm implemented in VIP
(``vip_hci.preproc.scale_fft``).
interpolation : str, optional
For 'ndimage' library: 'nearneig', bilinear', 'biquadratic', 'bicubic',
'biquartic', 'biquintic'. The 'nearneig' interpolation is the fastest
and the 'biquintic' the slowest. The 'nearneig' is the worst
option for interpolation of noisy astronomical images.
For 'opencv' library: 'nearneig', 'bilinear', 'bicubic', 'lanczos4'.
The 'nearneig' interpolation is the fastest and the 'lanczos4' the
slowest and accurate.
keep_center: bool, opt
If input dimensions are even and the star centered (i.e. on
dim//2, dim//2), whether to keep the star centered after scaling, i.e.
on (new_dim//2, new_dim//2). For a non-centered input frame, better to
leave it to False.
verbose : bool, optional
Whether to print out additional info such as the new image shape.
Returns
-------
array_resc : numpy ndarray
Output resampled frame.
"""
if array.ndim != 2:
raise TypeError('Input array is not a frame or 2d array')
if isinstance(scale, tuple):
scale_x, scale_y = scale
elif isinstance(scale, (float, int)):
scale_x = scale
scale_y = scale
else:
raise TypeError('`scale` must be float, int or tuple')
# Replace any NaN with real values before scaling
mask = None
nan_mask = np.isnan(array)
if np.any(nan_mask):
medval = np.nanmedian(array)
array[nan_mask] = medval
mask = np.zeros_like(array)
mask[nan_mask] = 1
if array.shape[0] % 2:
odd = True
else:
odd = False
# expected output size
out_sz = int(round(array.shape[0]*scale_y)
), int(round(array.shape[1]*scale_x))
if not odd and keep_center and imlib != 'vip-fft':
def _make_odd(img):
img_odd = np.zeros([img.shape[0]+1, img.shape[1]+1])
img_odd[:-1, :-1] = img.copy()
img_odd[-1, :-1] = img[-1].copy()
img_odd[:-1, -1] = img[:, -1].copy()
img_odd[-1, -1] = np.mean([img[-1, -2], img[-2, -1], img[-2, -2]])
return img_odd
array = _make_odd(array)
if mask is not None:
mask = _make_odd(mask)
if imlib == 'ndimage':
if interpolation == 'nearneig':
order = 0
elif interpolation == 'bilinear':
order = 1
elif interpolation == 'biquadratic':
order = 2
elif interpolation == 'bicubic':
order = 3
elif interpolation == 'biquartic' or interpolation == 'lanczos4':
order = 4
elif interpolation == 'biquintic':
order = 5
else:
raise TypeError('Scipy.ndimage interpolation method not recognized')
if mask is not None:
mask = zoom(mask, zoom=(scale_y, scale_x), order=order)
array_resc = zoom(array, zoom=(scale_y, scale_x), order=order)
# For flux conservation:
array_resc /= scale_y * scale_x
elif imlib == 'opencv':
if no_opencv:
msg = 'Opencv python bindings cannot be imported. Install opencv or'
msg += ' set imlib to ndimage'
raise RuntimeError(msg)
if interpolation == 'bilinear':
intp = cv2.INTER_LINEAR
elif interpolation == 'bicubic':
intp = cv2.INTER_CUBIC
elif interpolation == 'nearneig':
intp = cv2.INTER_NEAREST
elif interpolation == 'lanczos4':
intp = cv2.INTER_LANCZOS4
else:
raise TypeError('Opencv interpolation method not recognized')
if mask is not None:
mask = cv2.resize(mask.astype(np.float32), (0, 0), fx=scale_x,
fy=scale_y, interpolation=intp)
array_resc = cv2.resize(array.astype(np.float32), (0, 0), fx=scale_x,
fy=scale_y, interpolation=intp)
# For flux conservation:
array_resc /= scale_y * scale_x
elif imlib == 'vip-fft':
if scale_x != scale_y:
msg = 'FFT scaling only supports identical factors along x and y'
raise ValueError(msg)
if array.shape[0] != array.shape[1]:
msg = 'FFT scaling only supports square input arrays'
raise ValueError(msg)
# make array with even dimensions before FFT-scaling
if odd:
array_even = np.zeros([array.shape[0]+1, array.shape[1]+1])
array_even[1:, 1:] = array
array = array_even
if mask is not None:
if odd:
mask_even = np.zeros([mask.shape[0]+1, mask.shape[1]+1])
mask_even[1:, 1:] = mask
mask = mask_even
mask = scale_fft(mask, scale_x)
if odd:
mask_odd = np.zeros([mask.shape[0]-1, mask.shape[1]-1])
mask_odd = mask[1:, 1:]
mask = mask_odd
array_resc = scale_fft(array, scale_x)
if odd:
array = np.zeros([array_resc.shape[0]-1, array_resc.shape[1]-1])
array = array_resc[1:, 1:]
array_resc = array
# Note: FFT preserves flux - no need to scale flux separately
else:
raise ValueError('Image transformation library not recognized')
# Place back NaN values in scaled array
if mask is not None:
array_resc[mask >= 0.5] = np.nan
if keep_center and not array_resc.shape[0] % 2 and imlib != 'vip-fft':
if imlib == 'ndimage':
imlib_s = 'ndimage-interp'
else:
imlib_s = imlib
array_resc = frame_shift(array_resc, 0.5, 0.5, imlib_s, interpolation)
if array_resc.shape != out_sz and imlib != 'vip-fft':
if out_sz[0] == out_sz[1]:
if out_sz[0] < array_resc.shape[0]:
array_resc = frame_crop(array_resc, out_sz[0], force=True,
verbose=False)
else:
# crop manually along each axis
cy, cx = frame_center(array_resc)
wing_y = (out_sz[0]-1)/2
y0 = int(cy-wing_y)
yN = int(cy+wing_y+1)
wing_x = (out_sz[1]-1)/2
x0 = int(cx-wing_x)
xN = int(cx+wing_x+1)
array_resc = array_resc[y0:yN, x0:xN]
if verbose:
print("Image successfully rescaled")
print("New shape: {}".format(array_resc.shape))
return array_resc
def cube_rescaling_wavelengths(cube, scal_list, full_output=True, inverse=False,
y_in=None, x_in=None, imlib='vip-fft',
interpolation='lanczos4', collapse='median',
pad_mode='reflect'):
"""
Scale/Descale a cube by scal_list, with padding. Can deal with NaN values.
Wrapper to scale or descale a cube by factors given in scal_list,
without any loss of information (zero-padding if scaling > 1).
Important: in case of IFS data, the scaling factors in scal_list should be
>= 1 (ie. provide the scaling factors as for scaling to the longest
wavelength channel).
Parameters
----------
cube: 3D-array
Data cube with frames to be rescaled.
scal_list: 1D-array
Vector of same dimension as the first dimension of datacube, containing
the scaling factor for each frame.
full_output: bool, optional
Whether to output just the rescaled cube (False) or also its median,
the new y and x shapes of the cube, and the new centers cy and cx of the
frames (True).
inverse: bool, optional
Whether to inverse the scaling factors in scal_list before applying them
or not; i.e. True is to descale the cube (typically after a first scaling
has already been done)
y_in, x_in: int
Initial y and x sizes, required for ``inverse=True``. In case the cube is
descaled, these values will be used to crop back the cubes/frames to
their original size.
imlib : {'opencv', 'ndimage', 'vip-fft'}, str optional
Library used for image transformations. Opencv is faster than ndimage or
skimage. 'vip-fft' corresponds to a FFT-based rescaling algorithm
implemented in VIP (``vip_hci.preproc.scale_fft``).
interpolation : str, optional
For 'ndimage' library: 'nearneig', bilinear', 'bicuadratic', 'bicubic',
'biquartic', 'biquintic'. The 'nearneig' interpolation is the fastest
and the 'biquintic' the slowest. The 'nearneig' is the poorer
option for interpolation of noisy astronomical images.
For 'opencv' library: 'nearneig', 'bilinear', 'bicubic', 'lanczos4'.
The 'nearneig' interpolation is the fastest and the 'lanczos4' the
slowest and accurate. 'lanczos4' is the default.
collapse : {'median', 'mean', 'sum', 'trimmean'}, str optional
Sets the way of collapsing the frames for producing a final image.
pad_mode : str, optional
One of the following string values:
``'constant'``
pads with a constant value
``'edge'``
pads with the edge values of array
``'linear_ramp'``
pads with the linear ramp between end_value and the array edge
value.
``'maximum'``
pads with the maximum value of all or part of the vector along
each axis
``'mean'``
pads with the mean value of all or part of the vector along each
axis
``'median'``
pads with the median value of all or part of the vector along
each axis
``'minimum'``
pads with the minimum value of all or part of the vector along
each axis
``'reflect'``
pads with the reflection of the vector mirrored on the first and
last values of the vector along each axis
``'symmetric'``
pads with the reflection of the vector mirrored along the edge
of the array
``'wrap'``
pads with the wrap of the vector along the axis. The first
values are used to pad the end and the end values are used to
pad the beginning
Returns
-------
frame: 2d array
The median of the rescaled cube.
cube : 3d array
[full_output] rescaled cube
frame : 2d array
[full_output] median of the rescaled cube
y,x,cy,cx : float
[full_output] New y and x shapes of the cube, and the new centers cy and
cx of the frames
"""
n, y, x = cube.shape
max_sc = np.amax(scal_list)
if not inverse and max_sc > 1:
new_y = int(np.ceil(max_sc * y))
new_x = int(np.ceil(max_sc * x))
if (new_y - y) % 2 != 0:
new_y += 1
if (new_x - x) % 2 != 0:
new_x += 1
pad_len_y = (new_y - y) // 2
pad_len_x = (new_x - x) // 2
pad_width = ((0, 0), (pad_len_y, pad_len_y), (pad_len_x, pad_len_x))
big_cube = np.pad(cube, pad_width, pad_mode)
else:
big_cube = cube.copy()
n, y, x = big_cube.shape
cy, cx = frame_center(big_cube[0])
if inverse:
scal_list = 1. / scal_list
cy, cx = frame_center(cube[0])
# (de)scale the cube, so that a planet would now move radially
cube = _cube_resc_wave(big_cube, scal_list, ref_xy=(cx, cy),
imlib=imlib, interpolation=interpolation)
frame = cube_collapse(cube, collapse)
if inverse and max_sc > 1:
if y_in is None or x_in is None:
raise ValueError("You need to provide y_in and x_in when "
"inverse=True!")
siz = max(y_in, x_in)
if frame.shape[0] > siz:
frame = get_square(frame, siz, cy, cx)
if full_output and cube.shape[-1] > siz:
n_z = cube.shape[0]
array_old = cube.copy()
cube = np.zeros([n_z, siz, siz])
for zz in range(n_z):
cube[zz] = get_square(array_old[zz], siz, cy, cx)
if full_output:
return cube, frame, y, x, cy, cx
else:
return frame
def _scale_func(output_coords, ref_xy=0, scaling=1.0, scale_y=None,
scale_x=None):
"""
For each coordinate point in a new scaled image (output_coords),
coordinates in the image before the scaling are returned. This scaling
function is used within geometric_transform which, for each point in the
output image, will compute the (spline) interpolated value at the
corresponding frame coordinates before the scaling.
"""
ref_x, ref_y = ref_xy
if scale_y is None:
scale_y = scaling
if scale_x is None:
scale_x = scaling
return (ref_y + (output_coords[0] - ref_y) / scale_y,
ref_x + (output_coords[1] - ref_x) / scale_x)
def frame_rescaling(array, ref_xy=None, scale=1.0, imlib='vip-fft',
interpolation='lanczos4', scale_y=None, scale_x=None):
"""
Rescale a frame by a factor wrt a reference point.
The reference point is by default the center of the frame (typically the
exact location of the star). However, it keeps the same dimensions.
Parameters
----------
array : numpy ndarray
Input frame, 2d array.
ref_xy : float, optional
Coordinates X,Y of the point wrt which the rescaling will be
applied. By default the rescaling is done with respect to the center
of the frame.
scale : float
Scaling factor. If > 1, it will upsample the input array equally
along y and x by this factor.
imlib : {'ndimage', 'opencv', 'vip-fft'}, optional
Library used for image transformations. 'vip-fft' corresponds to a
FFT-based rescaling algorithm implemented in VIP
(``vip_hci.preproc.scale_fft``).
interpolation : str, optional
For 'ndimage' library: 'nearneig', bilinear', 'biquadratic', 'bicubic',
'biquartic', 'biquintic'. The 'nearneig' interpolation is the fastest
and the 'biquintic' the slowest. The 'nearneig' is the worst
option for interpolation of noisy astronomical images.
For 'opencv' library: 'nearneig', 'bilinear', 'bicubic', 'lanczos4'.
The 'nearneig' interpolation is the fastest and the 'lanczos4' the
slowest and accurate.
scale_y : float
Scaling factor only for y axis. If provided, it takes priority on
scale parameter.
scale_x : float
Scaling factor only for x axis. If provided, it takes priority on
scale parameter.
Returns
-------
array_out : numpy ndarray
Resulting frame.
"""
if array.ndim != 2:
raise TypeError('Input array is not a frame or 2d array.')
if scale_y is None:
scale_y = scale
if scale_x is None:
scale_x = scale
outshape = array.shape
if ref_xy is None:
ref_xy = frame_center(array)
else:
if imlib == 'vip-fft' and ref_xy != frame_center(array):
msg = "'vip-fft'imlib does not yet allow for custom center to be "
msg += " provided "
raise ValueError(msg)
# Replace any NaN with real values before scaling
mask = None
nan_mask = np.isnan(array)
if np.any(nan_mask):
medval = np.nanmedian(array)
array[nan_mask] = medval
mask = np.zeros_like(array)
mask[nan_mask] = 1
if imlib == 'ndimage':
if interpolation == 'nearneig':
order = 0
elif interpolation == 'bilinear':
order = 1
elif interpolation == 'biquadratic':
order = 2
elif interpolation == 'bicubic':
order = 3
elif interpolation == 'biquartic' or interpolation == 'lanczos4':
order = 4
elif interpolation == 'biquintic':
order = 5
else:
raise TypeError(
'Scipy.ndimage interpolation method not recognized')
array_out = geometric_transform(array, _scale_func, order=order,
output_shape=outshape,
extra_keywords={'ref_xy': ref_xy,
'scaling': scale,
'scale_y': scale_y,
'scale_x': scale_x})
array_out /= scale_y * scale_x
elif imlib == 'opencv':
if no_opencv:
msg = 'Opencv python bindings cannot be imported. Install '
msg += ' opencv or set imlib to skimage'
raise RuntimeError(msg)
if interpolation == 'bilinear':
intp = cv2.INTER_LINEAR
elif interpolation == 'bicubic':
intp = cv2.INTER_CUBIC
elif interpolation == 'nearneig':
intp = cv2.INTER_NEAREST
elif interpolation == 'lanczos4':
intp = cv2.INTER_LANCZOS4
else:
raise TypeError('Opencv interpolation method not recognized')
M = np.array([[scale_x, 0, (1. - scale_x) * ref_xy[0]],
[0, scale_y, (1. - scale_y) * ref_xy[1]]])
array_out = cv2.warpAffine(array.astype(np.float32), M, outshape,
flags=intp)
array_out /= scale_y * scale_x
elif imlib == 'vip-fft':
if scale_x != scale_y:
msg = 'FFT scaling only supports identical factors along x and y'
raise ValueError(msg)
if array.shape[0] != array.shape[1]:
msg = 'FFT scaling only supports square input arrays'
raise ValueError(msg)
# make array with even dimensions before FFT-scaling
if array.shape[0] % 2:
odd = True
array_even = np.zeros([array.shape[0]+1, array.shape[1]+1])
array_even[1:, 1:] = array
array = array_even
else:
odd = False
if mask is not None:
if odd:
mask_even = np.zeros([mask.shape[0]+1, mask.shape[1]+1])
mask_even[1:, 1:] = mask
mask = mask_even
mask = scale_fft(mask, scale_x, ori_dim=True)
if odd:
mask_odd = np.zeros([mask.shape[0]-1, mask.shape[1]-1])
mask_odd = mask[1:, 1:]
mask = mask_odd
array_out = scale_fft(array, scale_x, ori_dim=True)
if odd:
array = np.zeros([array_out.shape[0]-1, array_out.shape[1]-1])
array = array_out[1:, 1:]
array_out = array
else:
raise ValueError('Image transformation library not recognized')
# Place back NaN values in scaled array
if mask is not None:
array_out[mask >= 0.5] = np.nan
return array_out
def _cube_resc_wave(array, scaling_list, ref_xy=None, imlib='vip-fft',
interpolation='lanczos4', scaling_y=None, scaling_x=None):
"""
Rescale a cube by factors from ``scaling_list`` wrt a position.
Parameters
----------
array : numpy ndarray
Input 3d array, cube.
scaling_list : 1D-array
Scale corresponding to each frame in the cube.
ref_xy : float, optional
Coordinates X,Y of the point with respect to which the rescaling will be
performed. By default the rescaling is done with respect to the center
of the frames; central pixel if the frames have odd size.
imlib : str optional
See the documentation of ``vip_hci.preproc.cube_rescaling_wavelengths``.
interpolation : str, optional
See the documentation of ``vip_hci.preproc.cube_rescaling_wavelengths``.
scaling_y : 1D-array or list
Scaling factor only for y axis. If provided, it takes priority on
scaling_list.
scaling_x : 1D-array or list
Scaling factor only for x axis. If provided, it takes priority on
scaling_list.
Returns
-------
array_sc : numpy ndarray
Resulting cube with rescaled frames.
"""
if array.ndim != 3:
raise TypeError('Input array is not a cube or 3d array')
array_sc = []
if scaling_list is None:
scaling_list = [None]*array.shape[0]
for i in range(array.shape[0]):
array_sc.append(frame_rescaling(array[i], ref_xy=ref_xy,
scale=scaling_list[i], imlib=imlib,
interpolation=interpolation,
scale_y=scaling_y, scale_x=scaling_x))
return np.array(array_sc)
def check_scal_vector(scal_vec):
"""
Checks that the scaling factor list has the right format (i.e. all factors
>= 1). If not, it returns the vector after normalization by the minimum
value.
Parameters
----------
scal_vec: 1d array or list
Vector with the wavelengths.
Returns
-------
scal_vec: numpy ndarray, 1d
Vector containing the scaling factors (after correction to comply with
the condition >= 1).
"""
if not isinstance(scal_vec, (list, np.ndarray)):
raise TypeError('`Scal_vec` is neither a list or an np.ndarray')
scal_vec = np.array(scal_vec)
# checking if min factor is 1:
if scal_vec.min() != 1:
scal_vec = scal_vec/scal_vec.min()
return scal_vec
def find_scal_vector(cube, lbdas, fluxes, mask=None, nfp=2, fm="stddev",
simplex_options=None, debug=False, imlib='vip-fft',
interpolation='lanczos4', **kwargs):
"""
Find the optimal scaling factor for the channels of an IFS cube (or of
dual-band pairs of images).
The algorithm finds the optimal scaling factor that minimizes residuals in
the rescaled frames. It takes the inverse of the wavelength vector as a
first guess, and uses a similar method as the negative fake companion
technique, but minimizing residuals in either a mask or the whole field.
Parameters
----------
cube: 3D-array
Data cube with frames to be rescaled.
lbdas: 1d array or list
Vector with the wavelengths, used for first guess on scaling factor.
fluxes: 1d array or list
Vector with the (unsaturated) fluxes at the different wavelengths,
used for first guess on flux factor.
mask: 2D-array, opt
Binary mask, with ones where the residual intensities should be
evaluated. If None is provided, the whole field is used.
nfp: int, opt, {1,2}
Number of free parameters: spatial scaling alone or spatial scaling +
flux scaling.
fm: str, opt, {"sum","stddev"}
Figure of merit to use: sum of squared residuals or stddev of residual
pixels.
options: dict, optional
The scipy.optimize.minimize options.
**kwargs: optional
Optional arguments to the scipy.optimize.minimize function
Returns
-------
scal_vec: numpy ndarray, 1d
Vector containing the scaling factors (after correction to comply with
the condition >= 1).
flux_vec: numpy ndarray, 1d [only returned if nfp==2]
Vector containing the associated flux factors.
"""
scal_vec_ini = lbdas[-1]/lbdas
n_z = len(lbdas)
if n_z != len(fluxes) or n_z != cube.shape[0]:
msg = "first axis of cube, fluxes and lbda must have same length"
raise TypeError(msg)
if simplex_options is None:
simplex_options = {'xatol': 1e-6, 'fatol': 1e-6, 'maxiter': 800,
'maxfev': 2000}
scal_vec = np.ones(n_z)
flux_vec = np.ones(n_z)
for z in range(n_z-1):
flux_scal = fluxes[-1]/fluxes[z]
cube_tmp = np.array([cube[z], cube[-1]])
if nfp == 1:
p_ini = (scal_vec_ini[z],)
solu = minimize(_chisquare_scal, p_ini, args=(cube_tmp, flux_scal,
mask, fm, imlib,
interpolation),
method='Nelder-Mead', bounds=((1e-1, None),),
options=simplex_options,
**kwargs)
scal_fac, = solu.x
flux_fac = flux_scal
else:
p_ini = (scal_vec_ini[z], flux_scal)
solu = minimize(_chisquare_scal_2fp, p_ini, args=(cube_tmp, mask,
fm, imlib,
interpolation),
method='Nelder-Mead', options=simplex_options,
bounds=((1e-1, None), (1e-2, None)), **kwargs)
scal_fac, flux_fac = solu.x
if debug:
print("channel {:.0f}:".format(z), solu.x)
scal_vec[z] = scal_fac
flux_vec[z] = flux_fac
scal_vec = check_scal_vector(scal_vec)
return scal_vec, flux_vec
def _find_indices_sdi(scal, dist, index_ref, fwhm, delta_sep=1, nframes=None,
debug=False):
"""
Find optimal wavelengths which minimize self-subtraction in model PSF
subtraction.
Parameters
----------
scal : numpy ndarray or list
Vector with the scaling factors.
dist : float
Separation or distance (in pixels) from the center of the array.
index_ref : int
The spectral channel index for which we are finding the indices of
suitable spectral channels for the model PSF.
fwhm : float
Mean FWHM of all the wavelengths (in pixels).
delta_sep : float, optional
The threshold separation in terms of the mean FWHM.
nframes : None or int, optional
Must be an even value. In not None, then between 2 and adjacent
``nframes`` are kept.
debug : bool, optional
It True it prints out debug information.
Returns
-------
indices : numpy ndarray
List of good indices.
"""
scal = np.asarray(scal)
scal_ref = scal[index_ref]
sep_lft = (scal_ref - scal) / scal_ref * ((dist + fwhm * delta_sep) / fwhm)
sep_rgt = (scal - scal_ref) / scal_ref * ((dist - fwhm * delta_sep) / fwhm)
map_lft = sep_lft >= delta_sep
map_rgt = sep_rgt >= delta_sep
indices = np.nonzero(map_lft | map_rgt)[0]
if debug:
print("dist: {}, index_ref: {}".format(dist, index_ref))
print("sep_lft:", " ".join(["{:+.2f}".format(x) for x in sep_lft]))
print("sep_rgt:", " ".join(["{:+.2f}".format(x) for x in sep_rgt]))
print("indices:", indices)
print("indices size: {}".format(indices.size))
if indices.size == 0:
raise RuntimeError("No frames left after radial motion threshold. Try "
"decreasing the value of `delta_sep`")
if nframes is not None:
i1 = map_lft.sum()
window = nframes // 2
if i1 - window < 0 or i1 + window > indices[-1]:
window = nframes
ind1 = max(0, i1 - window)
ind2 = min(scal.size, i1 + window)
indices = indices[ind1: ind2]
if indices.size < 2:
raise RuntimeError("No frames left after radial motion threshold. "
"Try decreasing the value of `delta_sep` or "
"`nframes`")
if debug:
print("indices (nframes):", indices)
return indices
def _chisquare_scal(modelParameters, cube, flux_fac=1, mask=None, fm='sum',
imlib='vip-fft', interpolation='lanczos4'):
r"""
Calculate the reduced math:`\chi^2`:
.. math:: \chi^2_r = \frac{1}{N-3}\sum_{j=1}^{N} |I_j|,
where N is the number of pixels in the image (or mask if provided), and
:math:`I_j` the j-th pixel intensity, considering one free parameter: the
physical scaling factor between images of the cube, for a given
input flux scaling factor.
Parameters
----------
modelParameters: tuple
The model parameters, typically (scal_fac, flux_fac).
cube: numpy.array
The cube of fits images expressed as a numpy.array.
flux_fac:
mask: 2D-array, opt
Binary mask, with ones where the residual intensities should be
evaluated. If None is provided, the whole field is used.
fm: str, opt, {"sum","stddev"}
Figure of merit to use: sum of squared residuals or stddev of residual
pixels.
Returns
-------
chi: float
The reduced chi squared.
"""
# rescale in flux and spatially
array = cube.copy()
#scale_fac, flux_fac = modelParameters
scale_fac, = modelParameters
array[0] *= flux_fac
scaling_list = np.array([scale_fac, 1])
array = _cube_resc_wave(array, scaling_list, imlib=imlib,
interpolation=interpolation)
frame = array[1]-array[0]
if mask is None:
mask = np.ones_like(frame)
if fm == 'sum':
chi = np.sum(np.power(frame[np.where(mask)], 2))
elif fm == 'stddev':
values = frame[np.where(mask)]
values = values[values != 0]
chi = np.std(values)
else:
raise RuntimeError('fm choice not recognized.')
return chi
def _chisquare_scal_2fp(modelParameters, cube, mask=None, fm='sum',
imlib='vip-fft', interpolation='lanczos4'):
r"""
Calculate the reduced :math:`\chi^2`:
.. math:: \chi^2_r = \frac{1}{N-3}\sum_{j=1}^{N} |I_j|,
where N is the number of pixels within a circular aperture centered on the
first estimate of the planet position, and :math:`I_j` the j-th pixel
intensity. Two free parameters: physical and flux scaling factors.
Parameters
----------
modelParameters: tuple
The model parameters, typically (scal_fac, flux_fac).
cube: numpy.array
The cube of fits images expressed as a numpy.array.
mask: 2D-array, opt
Binary mask, with ones where the residual intensities should be
evaluated. If None is provided, the whole field is used.
fm: str, opt, {"sum","stddev"}
Figure of merit to use: sum of squared residuals or stddev of residual
pixels.
Returns
-------
chi: float
The reduced chi squared.
"""
# rescale in flux and spatially
array = cube.copy()
scale_fac, flux_fac = modelParameters
array[0] *= flux_fac
scaling_list = | np.array([scale_fac, 1]) | numpy.array |
import numpy as np
import cv2
from collections import deque
import pickle
import os
class ImageProcessor:
"""
Class used to process an image for the LaneDetector. Applies both color and gradient thresholding and produces a set of
images (undistored, thresholded and warped) that can be used for debugging.
"""
def __init__(self, calibration_data_file):
# Camera calibration data
calibration_data = self._load_calibration_data(file_path = calibration_data_file)
self.mtx = calibration_data['mtx']
self.dist = calibration_data['dist']
# Gradient and color thresholding parameters
self.sobel_kernel = 5
self.grad_x_thresh = (15, 255) # Sobel x threshold
self.grad_y_thresh = (25, 255) # Sobel y threshold
self.grad_mag_thresh = (40, 255) # Sobel mag threshold
self.grad_dir_thresh = (0.7, 1.3) # Sobel direction range
self.grad_v_thresh = (180, 255) # HSV, V channel threshold to filter gradient
self.r_thresh = (195, 255) # RGB, Red channel threshold
self.s_thresh = (100, 255) # HSL, S channel threshold
self.l_thresh = (195, 255) # HSL, L channel threshold
self.b_thresh = (150, 255) # LAB, B channel threshold
self.v_thresh = (140, 255) # HSV, V channel threshold
# Perspective transformation parameters
# slope = (y2 - y1) / (x2 - x1)
# intercept = y1 - slope * x1
# top left, top right = (570, 470), (722, 470)
# bottom left, bottom right = (220, 720), (1110, 720)
self.persp_src_left_line = (-0.7142857143, 877.142857146) # Slope and intercept for left line
self.persp_src_right_line = (0.6443298969, 4.793814441) # Slope and intercept for right line
self.persp_src_top_pct = 0.645 # Percentage from the top
self.persp_src_bottom_pct = 0.02 # Percentage from bottom
self.persp_dst_x_pct = 0.22 # Destination offset percent
self.persp_src = None
self.persp_dst = None
def _load_calibration_data(self, file_path = os.path.join('camera_cal', 'calibration.p')):
with open(file_path, 'rb') as f:
return pickle.load(f)
def _warp_coordinates(self, img):
if self.persp_src is None or self.persp_dst is None:
cols = img.shape[1]
rows = img.shape[0]
src_top_offset = rows * self.persp_src_top_pct
src_bottom_offset = rows * self.persp_src_bottom_pct
left_slope, left_intercept = self.persp_src_left_line
right_slope, right_intercept = self.persp_src_right_line
top_left = [(src_top_offset - left_intercept) / left_slope, src_top_offset]
top_right = [(src_top_offset - right_intercept) / right_slope, src_top_offset]
bottom_left = [(rows - src_bottom_offset - left_intercept) / left_slope, rows - src_bottom_offset]
bottom_right = [(rows - src_bottom_offset - right_intercept) / right_slope, rows - src_bottom_offset]
#Top left, Top right, Bottom right, Bottom left
src = np.float32([top_left, top_right, bottom_right, bottom_left])
dst_x_offset = cols * self.persp_dst_x_pct
top_left = [dst_x_offset, 0]
top_right = [cols - dst_x_offset, 0]
bottom_left = [dst_x_offset, rows]
bottom_right = [cols - dst_x_offset, rows]
dst = np.float32([top_left, top_right, bottom_right, bottom_left])
self.persp_src = src
self.persp_dst = dst
return self.persp_src, self.persp_dst
def _sobel(self, img, orient = 'x', sobel_kernel = 3):
# Take the derivative in x or y given orient = 'x' or 'y'
if orient == 'x':
sobel = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize = sobel_kernel)
else:
sobel = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize = sobel_kernel)
return sobel
def _apply_thresh(self, img, thresh = [0, 255]):
result = np.zeros_like(img)
result[(img >= thresh[0]) & (img <= thresh[1])] = 1
return result
def unwarp_image(self, img):
img_shape = img.shape[1::-1]
src, dst = self._warp_coordinates(img)
warp_m = cv2.getPerspectiveTransform(dst, src)
unwarped = cv2.warpPerspective(img, warp_m, img_shape)
return unwarped
def warp_image(self, img):
img_shape = img.shape[1::-1]
src, dst = self._warp_coordinates(img)
warp_m = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, warp_m, img_shape)
return warped
def undistort_image(self, img):
return cv2.undistort(img, self.mtx, self.dist, None, self.mtx)
def sobel_abs_thresh(self, sobel, thresh=[0,255]):
# Take the absolute value of the derivative or gradient
abs_sobel = np.absolute(sobel)
# Scale to 8-bit (0 - 255) then convert to type = np.uint8
scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))
binary_output = self._apply_thresh(scaled_sobel, thresh)
return binary_output
def sobel_mag_thresh(self, sobel_x, sobel_y, thresh=(0, 255)):
# Calculate the gradient magnitude
gradmag = np.sqrt(sobel_x**2 + sobel_y**2)
# Rescale to 8 bit
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
binary_output = self._apply_thresh(gradmag, thresh)
return binary_output
def sobel_dir_thresh(self, sobel_x, sobel_y, thresh=(0, np.pi/2)):
# Take the absolute value of the x and y gradients
abs_sobel_x = np.absolute(sobel_x)
abs_sobel_y = np.absolute(sobel_y)
# Calculate the direction of the gradient
abs_grad_dir = np.arctan2(abs_sobel_y, abs_sobel_x)
binary_output = self._apply_thresh(abs_grad_dir, thresh)
return binary_output
def gradient_thresh(self, img):
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
v_ch = hsv_img[:,:,2]
v_binary = self._apply_thresh(v_ch, self.grad_v_thresh)
sobel_x = self._sobel(gray_img, sobel_kernel = self.sobel_kernel, orient = 'x')
sobel_y = self._sobel(gray_img, sobel_kernel = self.sobel_kernel, orient = 'y')
sobel_x_binary = self.sobel_abs_thresh(sobel_x, thresh = self.grad_x_thresh)
sobel_y_binary = self.sobel_abs_thresh(sobel_y, thresh = self.grad_y_thresh)
sobel_mag_binary = self.sobel_mag_thresh(sobel_x, sobel_y, thresh = self.grad_mag_thresh)
sobel_dir_binary = self.sobel_dir_thresh(sobel_x, sobel_y, thresh = self.grad_dir_thresh)
sobel_binary = | np.zeros_like(sobel_x_binary) | numpy.zeros_like |
import numpy as np
import scipy.signal
def delay(vis, inverse=False, taper=None):
"""
Perform delay transform on visibility data.
``vis`` must have shape (Nfreqs, Ntimes).
"""
# Construct taper function
if taper is not None:
w = np.array(taper(vis.shape[0]))
else:
w = np.ones(vis.shape[0])
# Perform either forward or inverse FFT
if inverse:
# Do the inverse FFT on each time sample
return np.fft.ifft(vis * w[:,np.newaxis], axis=0)
else:
# Do the forward FFT on each time sample
return np.fft.fft(vis * w[:,np.newaxis], axis=0)
def fringe_rate(vis, inverse=False, taper=None):
"""
Perform fringe rate transform on visibility data.
``vis`` must have shape (Nfreqs, Ntimes).
"""
# Construct taper function
if taper is not None:
w = np.array(taper(vis.shape[1]))
else:
w = np.ones(vis.shape[1])
# Perform either forward or inverse FFT
if inverse:
# Do the inverse FFT on each frequency sample
return np.fft.ifft(vis * w[np.newaxis,:], axis=1)
else:
# Do the forward FFT on each frequency sample
return | np.fft.fft(vis * w[np.newaxis,:], axis=1) | numpy.fft.fft |
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
from copy import deepcopy
from math import sqrt
import numpy as np
from .unit_cell_lattice import UnitCell, UnitCellLattice
from ..geometry import Cube
from ..tiling import CubicTiling
from ..transform_func import ScaleFunc, RotateFunc
from ...util.util import ListHasPoint
class DiamondLattice(UnitCellLattice):
RefIAD = sqrt(3) / 4
# === STANDARD CONSTRUCTOR
def __init__(self, IAD):
RefUnitCellShape = Cube(1, BotBackLeftCorner=np.array([0, 0, 0], dtype=float))
RefUnitCellTiling = CubicTiling(RefUnitCellShape)
RefFracPositions = [np.array([0.0, 0.0, 0.0]),
np.array([0.5, 0.5, 0.0]),
np.array([0.0, 0.5, 0.5]),
np.array([0.5, 0.0, 0.5]),
np.array([0.25, 0.25, 0.25]),
np.array([0.25, 0.75, 0.75]),
np.array([0.75, 0.25, 0.75]),
np.array([0.75, 0.75, 0.25])]
RefUnitCell = UnitCell(RefUnitCellTiling, RefFracPositions)
UnitCellLattice.__init__(self, RefUnitCell)
self._IAD = DiamondLattice.RefIAD # IAD is set correctly after calling applyTransF
self.applyTransF(ScaleFunc(IAD / DiamondLattice.RefIAD))
self._NthNeighbors = [[[np.array([0.25, 0.25, 0.25]),
| np.array([-0.25, -0.25, 0.25]) | numpy.array |
"""Functions used by least-squares algorithms."""
from math import copysign
import numpy as np
from numpy.linalg import norm
from scipy.linalg import cho_factor, cho_solve, LinAlgError
from scipy.sparse import issparse
from scipy.sparse.linalg import LinearOperator, aslinearoperator
EPS = np.finfo(float).eps
# Functions related to a trust-region problem.
def intersect_trust_region(x, s, Delta):
"""Find the intersection of a line with the boundary of a trust region.
This function solves the quadratic equation with respect to t
||(x + s*t)||**2 = Delta**2.
Returns
-------
t_neg, t_pos : tuple of float
Negative and positive roots.
Raises
------
ValueError
If `s` is zero or `x` is not within the trust region.
"""
a = np.dot(s, s)
if a == 0:
raise ValueError("`s` is zero.")
b = np.dot(x, s)
c = np.dot(x, x) - Delta**2
if c > 0:
raise ValueError("`x` is not within the trust region.")
d = np.sqrt(b*b - a*c) # Root from one fourth of the discriminant.
# Computations below avoid loss of significance, see "Numerical Recipes".
q = -(b + copysign(d, b))
t1 = q / a
t2 = c / q
if t1 < t2:
return t1, t2
else:
return t2, t1
def solve_lsq_trust_region(n, m, uf, s, V, Delta, initial_alpha=None,
rtol=0.01, max_iter=10):
"""Solve a trust-region problem arising in least-squares minimization.
This function implements a method described by <NAME> [1]_ and used
in MINPACK, but it relies on a single SVD of Jacobian instead of series
of Cholesky decompositions. Before running this function, compute:
``U, s, VT = svd(J, full_matrices=False)``.
Parameters
----------
n : int
Number of variables.
m : int
Number of residuals.
uf : ndarray
Computed as U.T.dot(f).
s : ndarray
Singular values of J.
V : ndarray
Transpose of VT.
Delta : float
Radius of a trust region.
initial_alpha : float, optional
Initial guess for alpha, which might be available from a previous
iteration. If None, determined automatically.
rtol : float, optional
Stopping tolerance for the root-finding procedure. Namely, the
solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``.
max_iter : int, optional
Maximum allowed number of iterations for the root-finding procedure.
Returns
-------
p : ndarray, shape (n,)
Found solution of a trust-region problem.
alpha : float
Positive value such that (J.T*J + alpha*I)*p = -J.T*f.
Sometimes called Levenberg-Marquardt parameter.
n_iter : int
Number of iterations made by root-finding procedure. Zero means
that Gauss-Newton step was selected as the solution.
References
----------
.. [1] More, <NAME>., "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. <NAME>, Lecture Notes
in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
"""
def phi_and_derivative(alpha, suf, s, Delta):
"""Function of which to find zero.
It is defined as "norm of regularized (by alpha) least-squares
solution minus `Delta`". Refer to [1]_.
"""
denom = s**2 + alpha
p_norm = norm(suf / denom)
phi = p_norm - Delta
phi_prime = -np.sum(suf ** 2 / denom**3) / p_norm
return phi, phi_prime
suf = s * uf
# Check if J has full rank and try Gauss-Newton step.
if m >= n:
threshold = EPS * m * s[0]
full_rank = s[-1] > threshold
else:
full_rank = False
if full_rank:
p = -V.dot(uf / s)
if norm(p) <= Delta:
return p, 0.0, 0
alpha_upper = norm(suf) / Delta
if full_rank:
phi, phi_prime = phi_and_derivative(0.0, suf, s, Delta)
alpha_lower = -phi / phi_prime
else:
alpha_lower = 0.0
if initial_alpha is None or not full_rank and initial_alpha == 0:
alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)
else:
alpha = initial_alpha
for it in range(max_iter):
if alpha < alpha_lower or alpha > alpha_upper:
alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)
phi, phi_prime = phi_and_derivative(alpha, suf, s, Delta)
if phi < 0:
alpha_upper = alpha
ratio = phi / phi_prime
alpha_lower = max(alpha_lower, alpha - ratio)
alpha -= (phi + Delta) * ratio / Delta
if np.abs(phi) < rtol * Delta:
break
p = -V.dot(suf / (s**2 + alpha))
# Make the norm of p equal to Delta, p is changed only slightly during
# this. It is done to prevent p lie outside the trust region (which can
# cause problems later).
p *= Delta / norm(p)
return p, alpha, it + 1
def solve_trust_region_2d(B, g, Delta):
"""Solve a general trust-region problem in 2 dimensions.
The problem is reformulated as a 4-th order algebraic equation,
the solution of which is found by numpy.roots.
Parameters
----------
B : ndarray, shape (2, 2)
Symmetric matrix, defines a quadratic term of the function.
g : ndarray, shape (2,)
Defines a linear term of the function.
Delta : float
Radius of a trust region.
Returns
-------
p : ndarray, shape (2,)
Found solution.
newton_step : bool
Whether the returned solution is the Newton step which lies within
the trust region.
"""
try:
R, lower = cho_factor(B)
p = -cho_solve((R, lower), g)
if np.dot(p, p) <= Delta**2:
return p, True
except LinAlgError:
pass
a = B[0, 0] * Delta**2
b = B[0, 1] * Delta**2
c = B[1, 1] * Delta**2
d = g[0] * Delta
f = g[1] * Delta
coeffs = np.array(
[-b + d, 2 * (a - c + f), 6 * b, 2 * (-a + c + f), -b - d])
t = np.roots(coeffs) # Can handle leading zeros.
t = np.real(t[np.isreal(t)])
p = Delta * np.vstack((2 * t / (1 + t**2), (1 - t**2) / (1 + t**2)))
value = 0.5 * np.sum(p * B.dot(p), axis=0) + np.dot(g, p)
i = np.argmin(value)
p = p[:, i]
return p, False
def update_tr_radius(Delta, actual_reduction, predicted_reduction,
step_norm, bound_hit):
"""Update the radius of a trust region based on the cost reduction.
Returns
-------
Delta : float
New radius.
ratio : float
Ratio between actual and predicted reductions. Zero if predicted
reduction is zero.
"""
if predicted_reduction > 0:
ratio = actual_reduction / predicted_reduction
else:
ratio = 0
if ratio < 0.25:
Delta = 0.25 * step_norm
elif ratio > 0.75 and bound_hit:
Delta *= 2.0
return Delta, ratio
# Construction and minimization of quadratic functions.
def build_quadratic_1d(J, g, s, diag=None, s0=None):
"""Parameterize a multivariate quadratic function along a line.
The resulting univariate quadratic function is given as follows:
::
f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) +
g.T * (s0 + s*t)
Parameters
----------
J : ndarray, sparse matrix or LinearOperator shape (m, n)
Jacobian matrix, affects the quadratic term.
g : ndarray, shape (n,)
Gradient, defines the linear term.
s : ndarray, shape (n,)
Direction vector of a line.
diag : None or ndarray with shape (n,), optional
Addition diagonal part, affects the quadratic term.
If None, assumed to be 0.
s0 : None or ndarray with shape (n,), optional
Initial point. If None, assumed to be 0.
Returns
-------
a : float
Coefficient for t**2.
b : float
Coefficient for t.
c : float
Free term. Returned only if `s0` is provided.
"""
v = J.dot(s)
a = np.dot(v, v)
if diag is not None:
a += np.dot(s * diag, s)
a *= 0.5
b = np.dot(g, s)
if s0 is not None:
u = J.dot(s0)
b += np.dot(u, v)
c = 0.5 * np.dot(u, u) + np.dot(g, s0)
if diag is not None:
b += np.dot(s0 * diag, s)
c += 0.5 * np.dot(s0 * diag, s0)
return a, b, c
else:
return a, b
def minimize_quadratic_1d(a, b, lb, ub, c=0):
"""Minimize a 1-d quadratic function subject to bounds.
The free term `c` is 0 by default. Bounds must be finite.
Returns
-------
t : float
Minimum point.
y : float
Minimum value.
"""
t = [lb, ub]
if a != 0:
extremum = -0.5 * b / a
if lb < extremum < ub:
t.append(extremum)
t = np.asarray(t)
y = a * t**2 + b * t + c
min_index = np.argmin(y)
return t[min_index], y[min_index]
def evaluate_quadratic(J, g, s, diag=None):
"""Compute values of a quadratic function arising in least squares.
The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s.
Parameters
----------
J : ndarray, sparse matrix or LinearOperator, shape (m, n)
Jacobian matrix, affects the quadratic term.
g : ndarray, shape (n,)
Gradient, defines the linear term.
s : ndarray, shape (k, n) or (n,)
Array containing steps as rows.
diag : ndarray, shape (n,), optional
Addition diagonal part, affects the quadratic term.
If None, assumed to be 0.
Returns
-------
values : ndarray with shape (k,) or float
Values of the function. If `s` was 2-dimensional then ndarray is
returned, otherwise float is returned.
"""
if s.ndim == 1:
Js = J.dot(s)
q = np.dot(Js, Js)
if diag is not None:
q += np.dot(s * diag, s)
else:
Js = J.dot(s.T)
q = np.sum(Js**2, axis=0)
if diag is not None:
q += np.sum(diag * s**2, axis=1)
l = np.dot(s, g)
return 0.5 * q + l
# Utility functions to work with bound constraints.
def in_bounds(x, lb, ub):
"""Check if a point lies within bounds."""
return | np.all((x >= lb) & (x <= ub)) | numpy.all |
import torch
import matplotlib.pyplot as plt
from torch.nn import functional as F
import numpy as np
from seqwise_cont_skillspace.algo.algo_cont_skillspace import \
SeqwiseAlgoRevisedContSkills
import self_supervised.utils.typed_dicts as td
from self_supervised.base.replay_buffer.env_replay_buffer import \
NormalSequenceReplayBuffer
import rlkit.torch.pytorch_util as ptu
from seqwise_cont_skillspace.utils.get_colors import get_colors
class SeqwiseAlgoRevisedContSkillsHighdimusingvae(SeqwiseAlgoRevisedContSkills):
@torch.no_grad()
def _classfier_perf_eval(self):
num_paths = 2
eval_paths = self._get_paths_mode_influence_test(
num_paths=num_paths,
seq_len=self.seq_len,
)
assert type(eval_paths[0]) == td.TransitonModeMappingDiscreteSkills
obs_dim = eval_paths[0].obs.shape[0]
next_obs = []
mode = []
skill_id = []
for path in eval_paths:
next_obs.append(path.next_obs)
mode.append(path.mode)
skill_id.append(path.skill_id)
next_obs = ptu.from_numpy(
np.stack(next_obs, axis=0)
).transpose(-1, -2)
mode = ptu.from_numpy(
np.stack(mode, axis=0)
).transpose(-1, -2)
skill_id = ptu.from_numpy(
| np.stack(skill_id, axis=0) | numpy.stack |
import copy
import sys
import numpy as np
import alfred.gen.constants as constants
from alfred.gen.game_states.game_state_base import GameStateBase
from alfred.gen.game_states.planned_game_state import PlannedGameState
from alfred.gen.game_states.task_game_state import TaskGameState
from alfred.gen.utils import bb_util
from alfred.gen.utils import game_util
class TaskGameStateFullKnowledge(TaskGameState):
def __init__(self, env, seed=None, action_space=None):
super(TaskGameStateFullKnowledge, self).__init__(env, seed, action_space)
# Updated with Physics to calculate nearest point to every object along the way.
def update_receptacle_nearest_points(self):
if self.receptacle_to_point is None:
# Read pre-calculated best points from files generated by precompute_layout_locations.py
# These points should be used where available because they have been vetted for whether openable
# receptacles collide with the agent from the given point.
object_dict = game_util.get_object_dict(self.env.last_event.metadata)
object_to_point_reliable_point = self.openable_object_to_point
points = self.gt_graph.points
self.receptacle_to_point = {}
self.point_to_receptacle = {}
self.object_to_point = {}
self.point_to_object = {}
self.in_receptacle_ids = {}
receptacle_types = constants.RECEPTACLES - constants.MOVABLE_RECEPTACLES_SET
hold_size = sys.maxsize
for _ in range(4):
event = self.env.step({'action': 'RotateRight'})
if constants.FULL_OBSERVABLE_STATE:
objects = []
receptacles = []
# Movable receptacles will be added to both the objects and receptacles lists
for obj in self.env.last_event.metadata['objects']:
cls = obj['objectType']
if cls not in constants.OBJECTS_SET:
continue
if cls in constants.MOVABLE_RECEPTACLES_SET:
objects.append(obj)
receptacles.append(obj)
elif cls in receptacle_types:
receptacles.append(obj)
else:
objects.append(obj)
for obj in receptacles:
cls = obj['objectType']
obj_id = obj['objectId']
obj_name_s = obj['objectId']
# Instantiate a 'box' that looks like the one previously derived from bounds3D, but with the minimum
# and maximum points both set by the object's 'position' var.
box = np.array([[obj['position']['x'], obj['position']['x']],
[obj['position']['z'], obj['position']['z']],
[obj['position']['y'], obj['position']['y']]]) / constants.AGENT_STEP_SIZE
# Get best coordinate from which to open object, possibly reading x,z value from pre-calculated values.
known_point = None
if obj_name_s in object_to_point_reliable_point:
known_point = np.asarray(object_to_point_reliable_point[obj_name_s][:2]) / constants.AGENT_STEP_SIZE
coord = self.get_obj_coords(box, cls, obj_id, points, known_point=known_point,
object_type=cls, current_scene=self.scene_num)
if (obj['openable'] and not obj['pickupable'] and
known_point is None and constants.PRUNE_UNREACHABLE_POINTS):
print("WARNING: no precomputed, good opening point for '%s'; will drop openability from planner"
% obj_name_s)
self.receptacle_to_point[obj_id] = np.array(coord)
if coord not in self.point_to_receptacle:
self.point_to_receptacle[coord] = []
self.point_to_receptacle[coord].append(obj_id)
if obj_id not in self.in_receptacle_ids:
self.in_receptacle_ids[obj_id] = set()
if obj_id not in self.was_in_receptacle_ids:
self.was_in_receptacle_ids[obj_id] = set()
# Do objects second so receptacles are already set up.
for obj in objects:
cls = obj['objectType']
obj_id = obj['objectId']
# Instantiate a 'box' that looks like the one previously derived from bounds3D, but with the minimum
# and maximum points both set by the object's 'position' var.
box = np.array([[obj['position']['x'], obj['position']['x']],
[obj['position']['z'], obj['position']['z']],
[obj['position']['y'], obj['position']['y']]]) / constants.AGENT_STEP_SIZE
coord = self.get_obj_coords(box, cls, obj_id, points,
object_type=cls, current_scene=self.scene_num)
if not isinstance(obj['parentReceptacles'], list):
obj['parentReceptacles'] = [obj['parentReceptacles']]
for parent in obj['parentReceptacles']:
if parent is None:
break
parent_obj = object_dict[parent]
if parent_obj['objectType'] not in constants.RECEPTACLES:
# Weird corner cases of things that aren't listed as receptacles
continue
# TODA: cleanup suffix fix?
fix_basin = False
if parent.startswith('Sink') and not parent.endswith('Basin'):
fix_basin = True
parent = parent + "|SinkBasin"
elif parent.startswith('Bathtub') and not parent.endswith('Basin'):
fix_basin = True
parent = parent + "|BathtubBasin"
if fix_basin:
try:
self.in_receptacle_ids[parent].add(obj_id)
self.was_in_receptacle_ids[parent].add(obj_id)
except KeyError:
raise Exception('No object named %s in scene %s' % (parent, self.scene_name))
else:
self.in_receptacle_ids[parent].add(obj_id)
self.was_in_receptacle_ids[parent].add(obj_id)
self.object_to_point[obj_id] = | np.array(coord) | numpy.array |
#!/usr/bin/env python
#
# Cauchy Wavelet for EXAFS, adopted from
# matlab code from Munoz, Argoul, and Farges:
#
# CONTINUOUS CAUCHY WAVELET TRANSFORM OF EXAFS SIGNAL
# code freely downloaded from http://www.univ-mlv.fr/~farges/waw
# (c) 2000, Univ. Marne la Vallee, France
#
# please cite us of this code with:
# <NAME>., <NAME>. and <NAME>.
# Continuous Cauchy wavelet transform analyses of
# EXAFS spectra: a qualitative approach.
# American Mineralogist 88, pp. 694-700 (2003).
#
# version history:
# 1999 Hans-Argoul : core wavelet algorithm
# 1999-2002 Argoul-Munoz : EXAFS adapation
# 2002 Farges : graphical and user interface
# 2003 Munoz : CPU optimizations and graphical updates
# 2003 Farges-Munoz : various fixes and web version
#
# 2014-Apr <NAME> : translated to Python for Larch
import numpy as np
from larch import Make_CallArgs, parse_group_args
from larch.math import complex_phase
from .xafsutils import set_xafsGroup
@Make_CallArgs(["k" ,"chi"])
def cauchy_wavelet(k, chi=None, group=None, kweight=0, rmax_out=10,
nfft=2048, _larch=None):
"""
Cauchy Wavelet Transform for XAFS, following work of Munoz, Argoul, and Farges
Parameters:
-----------
k: 1-d array of photo-electron wavenumber in Ang^-1 or group
chi: 1-d array of chi
group: output Group
rmax_out: highest R for output data (10 Ang)
kweight: exponent for weighting spectra by k**kweight
nfft: value to use for N_fft (2048).
Returns:
---------
None -- outputs are written to supplied group.
Notes:
-------
Arrays written to output group:
r uniform array of R, out to rmax_out.
wcauchy complex cauchy wavelet(k, R)
wcauchy_mag magnitude of wavelet(k, R)
wcauchy_re real part of wavelet(k, R)
wcauchy_im imaginary part of wavelet(k, R)
Supports First Argument Group convention (with group
member names 'k' and 'chi')
"""
k, chi, group = parse_group_args(k, members=('k', 'chi'),
defaults=(chi,), group=group,
fcn_name='cauchy_wavelet')
kstep = np.round(1000.*(k[1]-k[0]))/1000.0
rstep = (np.pi/2048)/kstep
rmin = 1.e-7
rmax = rmax_out
nrpts = int(np.round((rmax-rmin)/rstep))
nkout = len(k)
if kweight != 0:
chi = chi * k**kweight
# extend EXAFS to 1024 data points...
NFT = int(nfft/2)
if len(k) < NFT:
knew = np.arange(NFT) * kstep
xnew = np.zeros(NFT) * kstep
xnew[:len(k)] = chi
else:
knew = k[:NFT]
xnew = chi[:NFT]
# FT parameters
freq = (1.0/kstep)*np.arange(nfft)/(2*nfft)
omega = 2*np.pi*freq
# simple FT calculation
tff = np.fft.fft(xnew, n= 2*nfft)
# scale parameter
r = np.linspace(0, rmax, nrpts)
r[0] = 1.e-19
a = nrpts/(2*r)
# Characteristic values for Cauchy wavelet:
cauchy_sum = np.log(2*np.pi) - np.log(1.0+np.arange(nrpts)).sum()
# Main calculation:
out = np.zeros(nkout*nrpts,
dtype='complex128').reshape(nrpts, nkout)
for i in range(nrpts):
aom = a[i]*omega
aom[np.where(aom==0)] = 1.e-19
filt = cauchy_sum + nrpts*np.log(aom) - aom
tmp = np.conj(np.exp(filt))*tff[:nfft]
out[i, :] = np.fft.ifft(tmp, 2*nfft)[:nkout]
group = set_xafsGroup(group, _larch=_larch)
group.r = r
group.wcauchy = out
group.wcauchy_mag = | np.sqrt(out.real**2 + out.imag**2) | numpy.sqrt |
'''
StationSim
author: aw-west
created: 19/06/18
version: 0.8.1
Changelog:
.0 merged models
.0 batch() moved to experiments
.0 separated figures
.1 checked figures and created experiment notebooks
.1 fixed _heightmap
Todo:
new entering
speeds[-1] separation drawn
set_pickle?
get_pickle?
Look at model get_state and set_state - should comparisons be '==' or 'is'?
'''
import warnings
import numpy as np
import os
from scipy.spatial import cKDTree
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
# Dont automatically load seaborn as it isn't needed on the HPC
try:
from seaborn import kdeplot as sns_kdeplot
except ImportError as e:
warnings.warn("The seaborn module is not available. If you try to create kde plots for this model (i.e. a wiggle map or density map) then it will fail.")
class Agent:
'''
A class representing a generic agent for the StationSim ABM.
'''
def __init__(self, model, unique_id):
'''
Initialise a new agent.
Desctiption:
Creates a new agent and gives it a randomly chosen entrance, exit,
and desired speed.
All agents start with active state 0 ('not started').
Their initial location (** (x,y) tuple-floats **) is set to the location
of the entrance that they are assigned to.
Parameters:
model - a pointer to the StationSim model that is creating this agent
'''
self.unique_id = unique_id
# Required
self.status = 0 # 0 Not Started, 1 Active, 2 Finished
# Location
perturb = model.gates_space * np.random.uniform(-1, +1)
gate_in = np.random.randint(model.gates_in)
self.loc_start = model.gates_locations[gate_in] + [0, perturb]
gate_out = np.random.randint(model.gates_out) + model.gates_in
self.loc_desire = model.gates_locations[gate_out]
self.location = self.loc_start
# Speed
speed_max = 0
while speed_max <= model.speed_min:
speed_max = np.random.normal(model.speed_mean, model.speed_std)
self.speeds = np.arange(speed_max, model.speed_min, -model.speed_step)
self.speed = None
# Others
self.steps_activate = np.random.exponential(model.gates_speed)
self.wiggle = min(model.max_wiggle, speed_max)
# History
if model.do_history:
self.history_locations = []
self.history_speeds = []
self.history_wiggles = 0
self.history_collisions = 0
self.step_start = None
def step(self, model):
'''
Iterate the agent.
Description:
If they are inactive then it checks to see if they should become active.
If they are active then they move or leave the model.
'''
if self.status == 0:
self.activate(model)
elif self.status == 1:
self.move(model)
self.deactivate(model)
self.history(model)
def activate(self, model):
'''
Test whether an agent should become active.
This happens when the model time is greater than the agent's activate time.
'''
if model.step_id > self.steps_activate:
self.status = 1
model.pop_active += 1
self.step_start = model.step_id
@staticmethod
def distance(loc1, loc2):
'''
A helpful function to calculate the distance between two points.
This simply takes the square root of the sum of the square of the elements.
This appears to be faster than using np.linalg.norm.
No doubt the numpy implementation would be faster for large arrays.
Fortunately, all of our norms are of two-element arrays.
:param arr: A numpy array (or array-like DS) with length two.
:return norm: The norm of the array.
'''
x = loc1[0] - loc2[0]
y = loc1[1] - loc2[1]
norm = (x*x + y*y)**.5
return norm
def move(self, model):
'''
Move the agent towards their destination. If the way is clear then the
agent moves the maximum distance they can given their maximum possible
speed (self.speed_desire). If not, then they iteratively test smaller
and smaller distances until they find one that they can travel to
without causing a colision with another agent.
'''
direction = (self.loc_desire - self.location) / self.distance(self.loc_desire, self.location)
for speed in self.speeds:
# Direct. Try to move forwards by gradually smaller and smaller amounts
new_location = self.location + speed * direction
if self.collision(model, new_location):
if model.do_history:
self.history_collisions += 1
model.history_collision_locs.append(new_location)
model.history_collision_times.append(model.step_id)
else:
break
# If even the slowest speed results in a colision, then wiggle.
if speed == self.speeds[-1]:
new_location = self.location + [0, self.wiggle*np.random.randint(-1, 1+1)]
if model.do_history:
self.history_wiggles += 1
model.history_wiggle_locs.append(new_location)
# Rebound
if not model.is_within_bounds(new_location):
new_location = model.re_bound(new_location)
# Move
self.location = new_location
self.speed = speed
def collision(self, model, new_location):
'''
Detects whether a move to the new_location will cause a collision
(either with the model boundary or another agent).
'''
if not model.is_within_bounds(new_location):
collide = True
elif self.neighbourhood(model, new_location):
collide = True
else:
collide = False
return collide
def neighbourhood(self, model, new_location):
'''
This method finds whether or not nearby neighbours are a collision.
:param model: the model that this agent is part of
:param new_location: the proposed new location that the agent will move to
(a standard (x,y) floats-tuple)
'''
neighbours = False
neighbouring_agents = model.tree.query_ball_point(new_location, model.separation)
for neighbouring_agent in neighbouring_agents:
agent = model.agents[neighbouring_agent]
if agent.status == 1 and self.unique_id != agent.unique_id and new_location[0] <= agent.location[0]:
neighbours = True
break
return neighbours
def deactivate(self, model):
'''
Determine whether the agent should leave the model and, if so,
remove them. Otherwise do nothing.
'''
if self.distance(self.location, self.loc_desire) < model.gates_space:
self.status = 2
model.pop_active -= 1
model.pop_finished += 1
if model.do_history:
steps_exped = (self.distance(self.loc_start, self.loc_desire) - model.gates_space) / self.speeds[0]
model.steps_exped.append(steps_exped)
steps_taken = model.step_id - self.step_start
model.steps_taken.append(steps_taken)
steps_delay = steps_taken - steps_exped
model.steps_delay.append(steps_delay)
def history(self, model):
'''
Save agent location.
'''
if model.do_history:
if self.status == 1:
self.history_locations.append(self.location)
else:
self.history_locations.append((None, None))
class Model:
'''
StationSim Model
Description:
An Agent-Based Model (ABM) that synchronously `steps`
step()
Params:
unique_id
**kwargs # check `params`, and `params_changed`
do_history # save memory
do_print # mute printing
Returns:
step_id
params
params_changed
get_state()
set_state()
get_analytics()
get_trails()
get_timehist()
get_location_map()
get_wiggle_map()
get_ani()
'''
def __init__(self, unique_id=None, **kwargs):
'''
Create a new model, reading parameters from a keyword arguement dictionary.
'''
self.unique_id = unique_id
self.status = 1
# Default Parameters (usually overridden by the caller)
params = {
'pop_total': 100,
'width': 400,
'height': 200,
'gates_in': 3,
'gates_out': 2,
'gates_space': 1, # Distance around gate that will cause the agent to leave the simulation
'gates_speed': 1,
# Agent maximum speed is chosen when the agent is created and drawn from a normal distribution
'speed_min': .2, # No speeds can be below this
'speed_mean': 1,
'speed_std': 1,
'speed_steps': 3, #
'separation': 5,
'max_wiggle': 1,
'step_limit': 3600,
'do_history': True,
'do_print': True,
'random_seed': int.from_bytes(os.urandom(4), byteorder='little')
}
if len(kwargs) == 0:
warnings.warn(
"No parameters have been passed to the model; using the default parameters: {}".format(params),
RuntimeWarning
)
self.params, self.params_changed = Model._init_kwargs(params, kwargs)
[setattr(self, key, value) for key, value in self.params.items()]
# Set the random seed
np.random.seed(self.random_seed)
# Constants
self.speed_step = (self.speed_mean - self.speed_min) / self.speed_steps
self.boundaries = np.array([[0, 0], [self.width, self.height]])
# Following replaced with a normal function
#gates_init = lambda x, y, n: np.array([np.full(n, x), np.linspace(0, y, n + 2)[1:-1]]).T
self.gates_locations = np.concatenate([Model._gates_init(0, self.height, self.gates_in), Model._gates_init(self.width, self.height, self.gates_out)])
# Variables
self.step_id = 0
self.pop_active = 0
self.pop_finished = 0
# Initialise
self.agents = [Agent(self, unique_id) for unique_id in range(self.pop_total)]
# Following replaced with a normal function
#self.is_within_bounds = lambda loc: all(self.boundaries[0] <= loc) and all(loc <= self.boundaries[1])
#self.re_bound = lambda loc: np.clip(loc, self.boundaries[0], self.boundaries[1])
if self.do_history:
self.history_state = []
self.history_wiggle_locs = []
self.history_collision_locs = []
self.history_collision_times = []
self.steps_taken = []
self.steps_exped = []
self.steps_delay = []
# Figure Shape Stuff
self._wid = 8
self._rel = self._wid / self.width
self._hei = self._rel * self.height
self._figsize = (self._wid, self._hei)
self._dpi = 160
@staticmethod
def _gates_init(x, y, n):
return np.array([np.full(n, x), np.linspace(0, y, n+2)[1:-1]]).T
def is_within_bounds(self, loc):
return all(self.boundaries[0] <= loc) and all(loc <= self.boundaries[1])
def re_bound(self, loc):
return np.clip(loc, self.boundaries[0], self.boundaries[1])
@staticmethod
def _init_kwargs(dict0, dict1):
'''
Internal dictionary update tool
dict0 is updated by dict1 adding no new keys.
dict2 is the changes excluding 'do_' keys.
'''
dict2 = dict()
for key in dict1.keys():
if key in dict0:
if dict0[key] is not dict1[key]:
dict0[key] = dict1[key]
if 'do_' not in key:
dict2[key] = dict1[key]
else:
print(f'BadKeyWarning: {key} is not a model parameter.')
return dict0, dict2
def step(self):
'''
Iterate model forward one step.
'''
if self.pop_finished < self.pop_total and self.step_id < self.step_limit and self.status==1:
if self.do_print and self.step_id%100==0:
print(f'\tIteration: {self.step_id}/{self.step_limit}')
state = self.get_state('location2D')
self.tree = cKDTree(state)
[agent.step(self) for agent in self.agents]
if self.do_history:
self.history_state.append(state)
self.step_id += 1
else:
if self.do_print and self.status==1:
print(f'StationSim {self.unique_id} - Everyone made it!')
self.status = 0
# State
def get_state(self, sensor=None):
'''
Convert list of agents in model to state vector.
'''
if sensor is None:
state = [(agent.status, *agent.location, agent.speed) for agent in self.agents]
state = np.append(self.step_id, np.ravel(state))
elif sensor is 'location':
state = [agent.location for agent in self.agents]
state = np.ravel(state)
elif sensor is 'location2D':
state = [agent.location for agent in self.agents]
return state
def set_state(self, state, sensor=None):
'''
Use state vector to set agent locations.
'''
if sensor is None:
self.step_id = int(state[0])
state = np.reshape(state[1:], (self.pop_total, 3))
for i, agent in enumerate(self.agents):
agent.status = int(state[i, 0])
agent.location = state[i, 1:]
elif sensor is 'location':
state = np.reshape(state, (self.pop_total, 2))
for i, agent in enumerate(self.agents):
agent.location = state[i, :]
elif sensor is 'location2D':
for i, agent in enumerate(self.agents):
agent.location = state[i, :]
# TODO: Deprecated, update PF
def agents2state(self, do_ravel=True):
warnings.warn("Replace 'state = agents2state()' with 'state = get_state(sensor='location')'", DeprecationWarning)
return self.get_state(sensor='location')
def state2agents(self, state):
warnings.warn("Replace 'state2agents(state)' with 'set_state(state, sensor='location')'", DeprecationWarning)
return self.set_state(state, sensor='location')
# Analytics
def get_analytics(self, sig_fig=None):
'''
A collection of analytics.
'''
analytics = {
'Finish Time': self.step_id,
'Total': self.pop_total,
'Active': self.pop_active,
'Finished': self.pop_finished,
'Mean Time Taken': np.mean(self.steps_taken),
'Mean Time Expected': np.mean(self.steps_exped),
'Mean Time Delay': np.mean(self.steps_delay),
'Mean Collisions': np.mean([agent.history_collisions for agent in self.agents]),
'Mean Wiggles': np.mean([agent.history_wiggles for agent in self.agents]),
# 'GateWiggles': sum(wig[0]<self.gates_space for wig in self.history_wiggle_locs)/self.pop_total
}
return analytics
def get_trails(self, plot_axis=False, plot_legend=True, colours=('b','g','r'), xlim=None, ylim=None):
"""
Make a figure showing the trails of the agents.
:param plot_axis: Whether to show the axis (default False)
:param plot_legend: Whether to show the legend (default False)
:param colours: Optional tuple with three values representing the colours of agents in states
1 (no started), 2 (active), 3 (finished). Default: ('b','g','r')
:param xlim Optional x axis limits (usually a tuple of (xmin,xmax)).
:param ylim Optional y axis limits (usually a tuple of (ymin,ymax)).
:return: The matplotlib Figure object.
"""
fig = plt.figure(figsize=self._figsize, dpi=self._dpi)
plt.axis(np.ravel(self.boundaries, 'f'))
if not plot_axis:
plt.axis('off')
else:
plt.ylabel("Y position")
plt.xlabel("X position")
plt.plot([], 'b')
plt.plot([], 'g')
plt.title('Agent Trails')
if plot_legend:
plt.legend(['Active', 'Finished'])
plt.tight_layout(pad=0)
for agent in self.agents:
if agent.status == 1:
alpha = 1
colour = colours[0]
elif agent.status == 2:
alpha = .5
colour = colours[1]
else:
alpha = 1
colour = colours[2]
locs = np.array(agent.history_locations).T
plt.plot(*locs, color=colour, alpha=alpha, linewidth=.5)
if xlim != None: # Optionally set the x limits
plt.xlim(xlim)
if ylim != None: # Optionally set the x limits
plt.xlim(ylim)
return fig
def get_histogram(self):
fig = plt.figure(figsize=self._figsize, dpi=self._dpi)
fmax = max(np.amax(self.steps_exped), np.amax(self.steps_taken), np.amax(self.steps_delay))
sround = lambda x, p: float(f'%.{p-1}e'%x)
bins = np.linspace(0, sround(fmax, 2), 20)
plt.hist(self.steps_exped, bins=bins+4, alpha=.5, label='Expected')
plt.hist(self.steps_taken, bins=bins+2, alpha=.5, label='Taken')
plt.hist(self.steps_delay, bins=bins+0, alpha=.5, label='Delayed')
plt.xlabel('Time')
plt.ylabel('Number of Agents')
plt.grid(False)
plt.legend()
plt.tight_layout(pad=0)
return fig
@staticmethod
def _heightmap(data, ax=None, kdeplot=True, cmap=None, alpha=.7, cbar=False):
if kdeplot:
from seaborn import kdeplot as sns_kdeplot
sns_kdeplot(*data, ax=ax, cmap=cmap, alpha=alpha, shade=True, shade_lowest=False, cbar=cbar)
else:
hdata, binx, biny = np.histogram2d(*data, (20, 10))
ax.contourf(hdata.T, cmap=cmap, alpha=alpha, extend='min', extent=(binx[0],binx[-1],biny[0],biny[-1]))
return ax
def get_wiggle_map(self, do_kdeplot=True, title="Collision Map"):
""" Show where wiggles and collisions took place
:param do_kdeplot:
:param title: (optional) title for the graph
:return: The figure object
"""
fig, ax = plt.subplots(1, figsize=self._figsize, dpi=self._dpi)
fig.tight_layout(pad=0)
self._heightmap(np.array(self.history_collision_locs).T, ax=ax, kdeplot=do_kdeplot)
self._heightmap(np.array(self.history_wiggle_locs).T, ax=ax)
ax.set(frame_on=False, aspect='equal', xlim=self.boundaries[:,0], xticks=[],
ylim=self.boundaries[:,1], yticks=[], title=title)
return fig
def get_collision_map(self, *args, **kwargs):
"""For making a map of collisions and wiggles. Just calls get_wiggle_map()"""
self.get_wiggle_map(*args, **kwargs)
def get_location_map(self, do_kdeplot=True, title="Location Map", color_bar = False, plot_axis=False):
"""
Create a density plot of the agents' locations
:param do_kdeplot:
:param title: (optional) title for the plot
:return:
"""
history_locs = []
for agent in self.agents:
for loc in agent.history_locations:
if None not in loc:
history_locs.append(loc)
history_locs = np.array(history_locs).T
fig, ax = plt.subplots(1, figsize=self._figsize, dpi=self._dpi)
fig.tight_layout(pad=0)
self._heightmap(data=history_locs, ax=ax, kdeplot=do_kdeplot, cmap='gray_r', cbar=color_bar)
ax.set(frame_on=plot_axis, aspect='equal', xlim=self.boundaries[:,0], xticks=[],
ylim=self.boundaries[:,1], yticks=[], title=title)
if plot_axis:
ax.set_ylabel("Y position")
ax.set_xlabel("X position")
return fig
def get_ani(self, agents=None, colour='k', alpha=.5, show_separation=False, wiggle_map=False):
# Load Data
locs = np.array([agent.history_locations for agent in self.agents[:agents]]).transpose((1,2,0))
markersize = self.separation * 216*self._rel # 3*72px/in=216
#
fig, ax = plt.subplots(figsize=self._figsize, dpi=self._dpi)
if wiggle_map:
sns.kdeplot(*np.array(self.collision_map).T, ax=ax, cmap='gray_r', alpha=.3, shade=True, shade_lowest=False)
ln0, = plt.plot([], [], '.', alpha=.05, color=colour, markersize=markersize)
ln1, = plt.plot([], [], '.', alpha=alpha, color=colour)
def init():
fig.tight_layout(pad=0)
ax.set(frame_on=False, aspect='equal', xlim=self.boundaries[:,0], xticks=[], ylim=self.boundaries[:,1], yticks=[])
return ln0, ln1,
def func(frame):
if show_separation:
ln0.set_data(*locs[frame])
ln1.set_data(*locs[frame])
return ln0, ln1,
frames = self.step_id
ani = FuncAnimation(fig, func, frames, init, interval=100, blit=True)
return ani
@classmethod
def set_random_seed(cls, seed=None):
"""Set a new numpy random seed
:param seed: the optional seed value (if None then get one from os.urandom)
"""
new_seed = int.from_bytes(os.urandom(4), byteorder='little') if seed == None else seed
| np.random.seed(new_seed) | numpy.random.seed |
import numpy as np
import itertools
import sys
import argparse
import os
import random
import tqdm
import time
###############################################################################
def get_distance_matrix(dist_matrix_file):
tstart = time.time()
if not os.path.exists(dist_matrix_file):
sys.stderr.write("File '%s' do not exist\n"%(dist_matrix_file))
#end if
dist_matrix = np.loadtxt(fname=dist_matrix_file, delimiter=",", dtype=float)
#sys.stdout.write("get-distance-matrix: [total: %.2fs]\n"%(time.time()-tstart))
#sys.stdout.flush()
return dist_matrix
#end get_distance_matrix()
def get_color_matrix(color_matrix_file):
tstart= time.time()
if not os.path.exists(color_matrix_file):
sys.stderr.write("File '%s' do not exist\n"%(dist_matrix_file))
#end if
color_matrix = np.loadtxt(fname=color_matrix_file, delimiter=",", dtype=int)
#sys.stdout.write("get-color-matrix: [total: %.2fs]\n"%(time.time()-tstart))
#sys.stdout.flush()
return color_matrix
#end get_distance_matrix()
################################################################################
def local_search_v3_iter(A, C, R, F, S_in, cost_in):
cost = cost_in
S = np.sort(S_in)
iters = 0
for i in range(len(S)):
u = S[i]
S_d = S.copy()
for j in range(len(F)):
v = F[j]
if v in S_d:
continue;
iters += 1
S_d[i] = v
R_d = C[:, S_d].sum(axis=1)
temp_R = np.subtract(R, R_d)
if np.any(temp_R < 0):
continue;
temp_cost = np.sum(A[:, S_d].min(axis=1))
if temp_cost < cost:
cost = temp_cost
S[i] = v
#end if
#end for
#end for
return cost, S, iters
#end local_search_v3_iter()
def local_search_v3(A, C, R, seed):
np.random.seed(seed)
r0 = R[0]
r1 = R[1]
F0 = np.array(np.nonzero(C[0])[0])
F1 = np.array(np.nonzero(C[1])[0])
F = np.sort(np.concatenate([F0, F1]))
if (len(F0) < r0) or (len(F1) < r1):
cost = np.inf
solution = []
return cost, solution, 0
#end if
# initialise a random assignment
S0 = np.random.choice(F0, r0)
S1 = np.random.choice(F1, r1)
S = np.sort(np.concatenate([S0, S1]))
cost = np.sum(A[:, S].min(axis=1))
iters = 0
while(1):
cur_cost, cur_S, cur_iters = local_search_v3_iter(A, C, R, F, S, cost)
cur_R = C[:, cur_S].sum(axis=1)
iters += cur_iters
if cur_cost >= cost:
break;
else:
cost = cur_cost
S = cur_S
#end if
#end while
return cost, S, iters
#end local_search_v3()
###############################################################################
def run_exp1(output = sys.stdout):
dataset_list = ["heart-switzerland",\
"heart-va",\
"heart-hungarian",\
"heart-cleveland",\
"student-mat",\
"house-votes-84",\
"student-por",\
"student-per2",\
"autism",\
"hcv-egy-data",\
#"cmc"
]
k = 10
min_frac_list = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
#min_frac_list = np.array([0.1, 0.2])
#seed_list = [ 94236883, 2611535, 34985942, 6378810, 15208894, 25557092,\
# 43871896, 15786068, 86513484, 118111772]
seed_list = [123456789]
for dataset in dataset_list:
#dataset = "heart-switzerland"
dist_file = "../dataset_c2/%s-distances-l1.csv"%(dataset)
color_file = "../dataset_c2/%s-colors.csv"%(dataset)
A = get_distance_matrix(dist_file)
C = get_color_matrix(color_file)
for seed in seed_list:
for min_frac in min_frac_list:
total_time = 0.0
cost = np.inf
r0_min = int(k*min_frac)
for r0 in range(r0_min, k+1):
r1 = int(k - r0)
R = np.array([r0, r1])
tstart = time.time()
cur_cost, cur_S, cur_iters = local_search_v3(A, C, R, seed)
total_time += time.time() - tstart
if cur_cost < cost:
cost = cur_cost
S = cur_S
#end if
#end for
S = np.sort(S)
R_d = C[:, S].sum(axis=1)
output.write("%s, %d, %.2f, %d, %d, %.2f, %.2f, %d\n"%\
(dataset, k, min_frac, R_d[0], R_d[1], cost,\
total_time, seed))
output.flush()
sys.stdout.write("%s, %d, %.2f, %d, %d, %.2f, %.2f, %d\n"%\
(dataset, k, min_frac, R_d[0], R_d[1], cost,\
total_time, seed))
sys.stdout.flush()
#print(cost, S, total_time)
#end for
#end for
#end run_exp1()
def run_exp2(output = sys.stdout):
dataset_list = ["cmc",\
"abalone",\
"mushroom",\
"nursery",\
"census-income"\
]
k = 10
min_frac_list = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
#seed_list = [ 94236883, 2611535, 34985942, 6378810, 15208894, 25557092,\
# 43871896, 15786068, 86513484, 118111772]
seed_list = [123456789]
for dataset in dataset_list:
#dataset = "heart-switzerland"
dist_file = "../dataset_c2/%s-distances-l1.csv"%(dataset)
color_file = "../dataset_c2/%s-colors.csv"%(dataset)
A = get_distance_matrix(dist_file)
C = get_color_matrix(color_file)
for seed in seed_list:
for min_frac in min_frac_list:
total_time = 0.0
cost = np.inf
r0_min = int(k*min_frac)
for r0 in range(r0_min, k+1):
r1 = int(k - r0)
R = np.array([r0, r1])
tstart = time.time()
cur_cost, cur_S0, cur_S1, cur_iters = local_search_v3(A, C, R, seed)
total_time += time.time() - tstart
if cur_cost < cost:
cost = cur_cost
S0 = cur_S0
S1 = cur_S1
#end if
#end for
S = np.sort(np.concatenate([S0, S1]))
output.write("%s, %d, %.2f, %d, %d, %.2f, %.2f, %d\n"%\
(dataset, k, min_frac, len(S0), len(S1), cost,\
total_time, seed))
output.flush()
sys.stdout.write("%s, %d, %.2f, %d, %d, %.2f, %.2f, %d\n"%\
(dataset, k, min_frac, len(S0), len(S1), cost,\
total_time, seed))
sys.stdout.flush()
#print(cost, S0, S1, S, total_time)
#end for
#end for
#end run_exp1()
################################################################################
def local_search_v3_gen(A, C, R, seed):
np.random.seed(seed)
F = np.array([], dtype=int)
S = np.array([], dtype=int)
for i in range(len(R)):
F_i = np.array(np.nonzero(C[i])[0])
if len(F_i) < R[i]:
return np.inf, [], 0
S_i = np.random.choice(F_i, R[i])
F = np.concatenate([F, F_i])
S = np.concatenate([S, S_i])
#end for
F = np.sort(F)
S = np.sort(S)
cost = np.sum(A[:, S].min(axis=1))
iters = 0
while(1):
cur_cost, cur_S, cur_iters = local_search_v3_iter(A, C, R, F, S, cost)
cur_R = C[:, cur_S].sum(axis=1)
iters += cur_iters
if cur_cost >= cost:
break;
else:
cost = cur_cost
S = cur_S
#end if
#end while
return cost, S, iters
#end local_search_v3()
def run_exp3(output = sys.stdout):
dataset_list = ["heart-switzerland",\
"heart-va",\
"heart-hungarian",\
"heart-cleveland",\
"student-mat",\
"house-votes-84",\
"student-por",\
"student-per2",\
"autism",\
"hcv-egy-data",\
#"cmc"
]
k = 10
min_frac_list = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
seed_list = [ 94236883, 2611535, 34985942, 6378810, 15208894, 25557092,\
43871896, 15786068, 86513484, 118111772]
for dataset in dataset_list:
#dataset = "heart-switzerland"
dist_file = "../dataset_c2/%s-distances-l1.csv"%(dataset)
color_file = "../dataset_c2/%s-colors.csv"%(dataset)
A = get_distance_matrix(dist_file)
C = get_color_matrix(color_file)
for seed in seed_list:
for min_frac in min_frac_list:
total_time = 0.0
cost = np.inf
r0_min = int(k*min_frac)
for r0 in range(r0_min, k+1):
r1 = int(k - r0)
R = | np.array([r0, r1]) | numpy.array |
# imports
import numpy as np
import matplotlib.pyplot as plt
from nnnn import NNNN
from sklearn.datasets import load_digits
# globals
TRAIN_TEST_RATIO = 0.5
ITERATIONS = 100
# functions
def nnnnormalize(x):
"""Normalize x to [-1.0, 1.0]"""
return -1.0+2.0*(x-x.min())/(x.max()-x.min())
# main
digits = load_digits()
X = digits.data
T = digits.target
X = nnnnormalize(X)
n_train = int(len(X)*TRAIN_TEST_RATIO)
X_train = X[:n_train]
X_test = X[n_train:]
T_train = T[:n_train]
T_test = T[n_train:]
T_train_encoded = | np.zeros((n_train, 10)) | numpy.zeros |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of pelicun.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You should have received a copy of the BSD 3-Clause License along with
# pelicun. If not, see <http://www.opensource.org/licenses/>.
#
# Contributors:
# <NAME>
"""
This subpackage performs system tests on the control module of pelicun.
"""
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy.stats import truncnorm as tnorm
from copy import deepcopy
import os, sys, inspect
current_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0,os.path.dirname(parent_dir))
from pelicun.control import *
from pelicun.uq import mvn_orthotope_density as mvn_od
from pelicun.tests.test_pelicun import prob_allclose, prob_approx
# -----------------------------------------------------------------------------
# FEMA_P58_Assessment
# -----------------------------------------------------------------------------
def test_FEMA_P58_Assessment_central_tendencies():
"""
Perform a loss assessment with customized inputs that reduce the
dispersion of calculation parameters to negligible levels. This allows us
to test the results against pre-defined reference values in spite of the
randomness involved in the calculations.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())[0]
assert RV_EDP.theta[0] == pytest.approx(0.5 * g)
assert RV_EDP.theta[1] == pytest.approx(0.5 * g * 1e-6, abs=1e-7)
assert RV_EDP._distribution == 'lognormal'
# QNT
assert A._QNT_dict is None
#RV_QNT = A._RV_dict['QNT']
#assert RV_QNT is None
# FRG
RV_FRG = list(A._FF_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_FRG]).T
assert_allclose(thetas, np.array([0.444, 0.6, 0.984]) * g, rtol=0.01)
assert_allclose(betas, np.array([0.3, 0.4, 0.5]), rtol=0.01)
rho = RV_FRG[0].RV_set.Rho()
assert_allclose(rho, np.ones((3, 3)), rtol=0.01)
assert np.all([rv.distribution == 'lognormal' for rv in RV_FRG])
# RED
RV_RED = list(A._DV_RED_dict.values())
mus, sigmas = np.array([rv.theta for rv in RV_RED]).T
assert_allclose(mus, np.ones(2), rtol=0.01)
assert_allclose(sigmas, np.array([1e-4, 1e-4]), rtol=0.01)
rho = RV_RED[0].RV_set.Rho()
assert_allclose(rho, np.array([[1, 0], [0, 1]]), rtol=0.01)
assert np.all([rv.distribution == 'normal' for rv in RV_RED])
assert_allclose (RV_RED[0].truncation_limits, [0., 2.], rtol=0.01)
assert_allclose (RV_RED[1].truncation_limits, [0., 4.], rtol=0.01)
# INJ
RV_INJ = list(A._DV_INJ_dict.values())
mus, sigmas = np.array([rv.theta for rv in RV_INJ]).T
assert_allclose(mus, np.ones(4), rtol=0.01)
assert_allclose(sigmas, np.ones(4) * 1e-4, rtol=0.01)
rho = RV_INJ[0].RV_set.Rho()
rho_target = np.zeros((4, 4))
np.fill_diagonal(rho_target, 1.)
assert_allclose(rho, rho_target, rtol=0.01)
assert np.all([rv.distribution == 'normal' for rv in RV_INJ])
assert_allclose(RV_INJ[0].truncation_limits, [0., 10./3.], rtol=0.01)
assert_allclose(RV_INJ[1].truncation_limits, [0., 10./3.], rtol=0.01)
assert_allclose(RV_INJ[2].truncation_limits, [0., 10.], rtol=0.01)
assert_allclose(RV_INJ[3].truncation_limits, [0., 10.], rtol=0.01)
# REP
RV_REP = list(A._DV_REP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_REP]).T
assert_allclose(thetas, np.ones(6), rtol=0.01)
assert_allclose(betas, np.ones(6) * 1e-4, rtol=0.01)
rho = RV_REP[0].RV_set.Rho()
rho_target = np.zeros((6, 6))
np.fill_diagonal(rho_target, 1.)
assert_allclose(rho, rho_target, rtol=0.01)
assert np.all([rv.distribution == 'lognormal' for rv in RV_REP])
# ------------------------------------------------------------------------
A.define_loss_model()
# QNT (deterministic)
QNT = A._FG_dict['T0001.001']._performance_groups[0]._quantity
assert QNT == pytest.approx(50., rel=0.01)
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# TIME
T_check = A._TIME.describe().T.loc[['hour','month','weekday?'],:]
assert_allclose(T_check['mean'], np.array([11.5, 5.5, 5. / 7.]), rtol=0.05)
assert_allclose(T_check['min'], np.array([0., 0., 0.]), rtol=0.01)
assert_allclose(T_check['max'], np.array([23., 11., 1.]), rtol=0.01)
assert_allclose(T_check['50%'], np.array([12., 5., 1.]), atol=1.0)
assert_allclose(T_check['count'], np.array([10000., 10000., 10000.]),
rtol=0.01)
# POP
P_CDF = A._POP.describe(np.arange(1, 27) / 27.).iloc[:, 0].values[4:]
vals, counts = np.unique(P_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]), rtol=0.01)
assert_allclose(counts, np.array([14, 2, 7, 5]), atol=1)
# COL
COL_check = A._COL.describe().T
assert COL_check['mean'].values[0] == pytest.approx(0.5, rel=0.05)
assert len(A._ID_dict['non-collapse']) == pytest.approx(5000, rel=0.05)
assert len(A._ID_dict['collapse']) == pytest.approx(5000, rel=0.05)
# DMG
DMG_check = A._DMG.describe().T
assert_allclose(DMG_check['mean'], np.array([17.074, 17.074, 7.9361]),
rtol=0.1, atol=1.0)
assert_allclose(DMG_check['min'], np.zeros(3), rtol=0.01)
assert_allclose(DMG_check['max'], np.ones(3) * 50.0157, rtol=0.05)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# RED
DV_RED = A._DV_dict['red_tag'].describe().T
assert_allclose(DV_RED['mean'], np.array([0.341344, 0.1586555]), rtol=0.1)
# INJ - collapse
DV_INJ_C = deepcopy(A._COL[['INJ-0', 'INJ-1']])
DV_INJ_C.dropna(inplace=True)
NC_count = DV_INJ_C.describe().T['count'][0]
assert_allclose(NC_count, np.ones(2) * 5000, rtol=0.05)
# lvl 1
vals, counts = np.unique(DV_INJ_C.iloc[:, 0].values, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]) * 0.1, rtol=0.01)
assert_allclose(counts / NC_count, np.array([14, 2, 7, 5]) / 28., atol=0.01, rtol=0.1)
# lvl 2
vals, counts = np.unique(DV_INJ_C.iloc[:, 1].values, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]) * 0.9, rtol=0.01)
assert_allclose(counts / NC_count, np.array([14, 2, 7, 5]) / 28., atol=0.01, rtol=0.1)
# INJ - non-collapse
DV_INJ_NC = deepcopy(A._DV_dict['injuries'])
DV_INJ_NC[0].dropna(inplace=True)
assert_allclose(DV_INJ_NC[0].describe().T['count'], np.ones(2) * 5000,
rtol=0.05)
# lvl 1 DS2
I_CDF = DV_INJ_NC[0].iloc[:, 0]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.075, 0.15, 0.3]), rtol=0.01)
target_prob = np.array(
[0.6586555, 0., 0., 0.] + 0.3413445 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# lvl 1 DS3
I_CDF = DV_INJ_NC[0].iloc[:, 1]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.075, 0.15, 0.3]), rtol=0.01)
target_prob = np.array(
[0.8413445, 0., 0., 0.] + 0.1586555 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# lvl 2 DS2
I_CDF = DV_INJ_NC[1].iloc[:, 0]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.025, 0.05, 0.1]), rtol=0.01)
target_prob = np.array(
[0.6586555, 0., 0., 0.] + 0.3413445 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# lvl2 DS3
I_CDF = DV_INJ_NC[1].iloc[:, 1]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.025, 0.05, 0.1]), rtol=0.01)
target_prob = np.array(
[0.8413445, 0., 0., 0.] + 0.1586555 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# REP
assert len(A._ID_dict['non-collapse']) == len(A._ID_dict['repairable'])
assert len(A._ID_dict['irreparable']) == 0
# cost
DV_COST = A._DV_dict['rec_cost']
# DS1
C_CDF = DV_COST.iloc[:, 0]
C_CDF = np.around(C_CDF / 10., decimals=0) * 10.
vals, counts = np.unique(C_CDF, return_counts=True)
assert_allclose(vals, [0, 2500], rtol=0.01)
t_prob = 0.3413445
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# DS2
C_CDF = DV_COST.iloc[:, 1]
C_CDF = np.around(C_CDF / 100., decimals=0) * 100.
vals, counts = np.unique(C_CDF, return_counts=True)
assert_allclose(vals, [0, 25000], rtol=0.01)
t_prob = 0.3413445
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# DS3
C_CDF = DV_COST.iloc[:, 2]
C_CDF = np.around(C_CDF / 1000., decimals=0) * 1000.
vals, counts = np.unique(C_CDF, return_counts=True)
assert_allclose(vals, [0, 250000], rtol=0.01)
t_prob = 0.1586555
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# time
DV_TIME = A._DV_dict['rec_time']
# DS1
T_CDF = DV_TIME.iloc[:, 0]
T_CDF = np.around(T_CDF, decimals=1)
vals, counts = np.unique(T_CDF, return_counts=True)
assert_allclose(vals, [0, 2.5], rtol=0.01)
t_prob = 0.3413445
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# DS2
T_CDF = DV_TIME.iloc[:, 1]
T_CDF = np.around(T_CDF, decimals=0)
vals, counts = np.unique(T_CDF, return_counts=True)
assert_allclose(vals, [0, 25], rtol=0.01)
t_prob = 0.3413445
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# DS3
T_CDF = DV_TIME.iloc[:, 2]
T_CDF = np.around(T_CDF / 10., decimals=0) * 10.
vals, counts = np.unique(T_CDF, return_counts=True)
assert_allclose(vals, [0, 250], rtol=0.01)
t_prob = 0.1586555
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
S = A._SUMMARY
SD = S.describe().T
assert_allclose(S[('event time', 'month')], A._TIME['month'] + 1)
assert_allclose(S[('event time', 'weekday?')], A._TIME['weekday?'])
assert_allclose(S[('event time', 'hour')], A._TIME['hour'])
assert_allclose(S[('inhabitants', '')], A._POP.iloc[:, 0])
assert SD.loc[('collapses', 'collapsed'), 'mean'] == pytest.approx(0.5,
rel=0.05)
assert SD.loc[('collapses', 'mode'), 'mean'] == 0.
assert SD.loc[('collapses', 'mode'), 'count'] == pytest.approx(5000,
rel=0.05)
assert SD.loc[('red tagged', ''), 'mean'] == pytest.approx(0.5, rel=0.05)
assert SD.loc[('red tagged', ''), 'count'] == pytest.approx(5000, rel=0.05)
for col in ['irreparable', 'cost impractical', 'time impractical']:
assert SD.loc[('reconstruction', col), 'mean'] == 0.
assert SD.loc[('reconstruction', col), 'count'] == pytest.approx(5000,
rel=0.05)
RC = deepcopy(S.loc[:, ('reconstruction', 'cost')])
RC_CDF = np.around(RC / 1000., decimals=0) * 1000.
vals, counts = np.unique(RC_CDF, return_counts=True)
assert_allclose(vals, np.array([0, 2., 3., 25., 250., 300.]) * 1000.)
t_prob1 = 0.3413445 / 2.
t_prob2 = 0.1586555 / 2.
assert_allclose(counts / 10000.,
[t_prob2, t_prob1 / 2., t_prob1 / 2., t_prob1, t_prob2,
0.5], atol=0.01, rtol=0.1)
RT = deepcopy(S.loc[:, ('reconstruction', 'time-parallel')])
RT_CDF = np.around(RT, decimals=0)
vals, counts = np.unique(RT_CDF, return_counts=True)
assert_allclose(vals, np.array([0, 2., 3., 25., 250., 300.]))
t_prob1 = 0.3413445 / 2.
t_prob2 = 0.1586555 / 2.
assert_allclose(counts / 10000.,
[t_prob2, t_prob1 / 2., t_prob1 / 2., t_prob1, t_prob2,
0.5], atol=0.01, rtol=0.1)
assert_allclose(S.loc[:, ('reconstruction', 'time-parallel')],
S.loc[:, ('reconstruction', 'time-sequential')])
CAS = deepcopy(S.loc[:, ('injuries', 'sev1')])
CAS_CDF = np.around(CAS, decimals=3)
vals, counts = np.unique(CAS_CDF, return_counts=True)
assert_allclose(vals, [0, 0.075, 0.15, 0.25, 0.3, 0.5, 1.])
assert_allclose(counts / 10000.,
np.array([35, 1, 3.5, 2, 2.5, 7, 5]) / 56., atol=0.01,
rtol=0.1)
CAS = deepcopy(S.loc[:, ('injuries', 'sev2')])
CAS_CDF = np.around(CAS, decimals=3)
vals, counts = np.unique(CAS_CDF, return_counts=True)
assert_allclose(vals, [0, 0.025, 0.05, 0.1, 2.25, 4.5, 9.])
assert_allclose(counts / 10000.,
np.array([35, 1, 3.5, 2.5, 2, 7, 5]) / 56., atol=0.01,
rtol=0.1)
def test_FEMA_P58_Assessment_EDP_uncertainty_basic():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_2.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_2.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
assert_allclose(thetas, [9.80665, 12.59198, 0.074081, 0.044932], rtol=0.02)
assert_allclose(betas, [0.25, 0.25, 0.3, 0.4], rtol=0.02)
rho = RV_EDP[0].RV_set.Rho()
rho_target = [
[1.0, 0.6, 0.3, 0.3],
[0.6, 1.0, 0.3, 0.3],
[0.3, 0.3, 1.0, 0.7],
[0.3, 0.3, 0.7, 1.0]]
assert_allclose(rho, rho_target, atol=0.05)
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# COL
COL_check = A._COL.describe().T
col_target = 1.0 - mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer(
[0.3, 0.4], [0.3, 0.4]),
upper=np.log([0.1, 0.1]))[0]
assert COL_check['mean'].values[0] == pytest.approx(col_target, rel=0.1)
# DMG
DMG_check = [len(np.where(A._DMG.iloc[:, i] > 0.0)[0]) / 10000. for i in
range(8)]
DMG_1_PID = mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([0.05488, 1e-6]), upper=np.log([0.1, 0.1]))[
0]
DMG_2_PID = mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([1e-6, 0.05488]), upper=np.log([0.1, 0.1]))[
0]
DMG_1_PFA = mvn_od(np.log([0.074081, 9.80665]),
np.array([[1, 0.3], [0.3, 1]]) * np.outer([0.3, 0.25],
[0.3, 0.25]),
lower=np.log([1e-6, 9.80665]),
upper=np.log([0.1, np.inf]))[0]
DMG_2_PFA = mvn_od(np.log([0.074081, 12.59198]),
np.array([[1, 0.3], [0.3, 1]]) * np.outer([0.3, 0.25],
[0.3, 0.25]),
lower=np.log([1e-6, 9.80665]),
upper=np.log([0.1, np.inf]))[0]
assert DMG_check[0] == pytest.approx(DMG_check[1], rel=0.01)
assert DMG_check[2] == pytest.approx(DMG_check[3], rel=0.01)
assert DMG_check[4] == pytest.approx(DMG_check[5], rel=0.01)
assert DMG_check[6] == pytest.approx(DMG_check[7], rel=0.01)
assert DMG_check[0] == pytest.approx(DMG_1_PID, rel=0.10)
assert DMG_check[2] == pytest.approx(DMG_2_PID, rel=0.10)
assert DMG_check[4] == pytest.approx(DMG_1_PFA, rel=0.10)
assert DMG_check[6] == pytest.approx(DMG_2_PFA, rel=0.10)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# COST
DV_COST = A._DV_dict['rec_cost']
DV_TIME = A._DV_dict['rec_time']
C_target = [0., 250., 1250.]
T_target = [0., 0.25, 1.25]
# PG 1011 and 1012
P_target = [
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([1e-6, 1e-6]), upper=np.log([0.05488, 0.1]))[0],
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([0.05488, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
]
for i in [0, 1]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(P_target, P_test, atol=0.02)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 1021 and 1022
P_target = [
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([1e-6, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([1e-6, 0.05488]), upper=np.log([0.05488, 0.1]))[0],
]
for i in [2, 3]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(P_target, P_test, atol=0.02)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 2011 and 2012
P_target = [
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 9.80665, np.inf]))[0],
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 9.80665, 9.80665]),
upper=np.log([0.1, np.inf, np.inf]))[0],
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 9.80665, 1e-6]),
upper=np.log([0.1, np.inf, 9.80665]))[0],
]
for i in [4, 5]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(P_target, P_test, atol=0.02)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 2021 and 2022
P_target = [
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, np.inf, 9.80665]))[0],
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 9.80665, 9.80665]),
upper=np.log([0.1, np.inf, np.inf]))[0],
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 1e-6, 9.80665]),
upper=np.log([0.1, 9.80665, np.inf]))[0],
]
for i in [6, 7]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(P_target, P_test, atol=0.02)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# RED TAG
RED_check = A._DV_dict['red_tag'].describe().T
RED_check = (RED_check['mean'] * RED_check['count'] / 10000.).values
assert RED_check[0] == pytest.approx(RED_check[1], rel=0.01)
assert RED_check[2] == pytest.approx(RED_check[3], rel=0.01)
assert RED_check[4] == pytest.approx(RED_check[5], rel=0.01)
assert RED_check[6] == pytest.approx(RED_check[7], rel=0.01)
assert RED_check[0] == pytest.approx(DMG_1_PID, rel=0.10)
assert RED_check[2] == pytest.approx(DMG_2_PID, rel=0.10)
assert RED_check[4] == pytest.approx(DMG_1_PFA, rel=0.10)
assert RED_check[6] == pytest.approx(DMG_2_PFA, rel=0.10)
DMG_on = np.where(A._DMG > 0.0)[0]
RED_on = np.where(A._DV_dict['red_tag'] > 0.0)[0]
assert_allclose(DMG_on, RED_on)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
P_no_RED_target = mvn_od(np.log([0.074081, 0.044932, 9.80665, 12.59198]),
np.array(
[[1.0, 0.7, 0.3, 0.3], [0.7, 1.0, 0.3, 0.3],
[0.3, 0.3, 1.0, 0.6],
[0.3, 0.3, 0.6, 1.0]]) * np.outer(
[0.3, 0.4, 0.25, 0.25],
[0.3, 0.4, 0.25, 0.25]),
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[0.05488, 0.05488, 9.80665, 9.80665]))[0]
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = (1.0 - SD.loc[('red tagged', ''), 'mean']) * SD.loc[
('red tagged', ''), 'count'] / 10000.
def test_FEMA_P58_Assessment_EDP_uncertainty_detection_limit():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
This test differs from the basic case in having unreliable EDP values above
a certain limit - a typical feature of interstory drifts in dynamic
simulations. Such cases should not be a problem if the limits can be
estimated and they are specified as detection limits in input file.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_3.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_3.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
EDP_theta_target = [9.80665, 12.59198, 0.074081, 0.044932]
EDP_beta_target = [0.25, 0.25, 0.3, 0.4]
assert_allclose(EDP_theta_test, EDP_theta_target, rtol=0.025)
assert_allclose(EDP_beta_test, EDP_beta_target, rtol=0.1)
rho = RV_EDP[0].RV_set.Rho()
EDP_rho_test = rho
EDP_rho_target = [
[1.0, 0.6, 0.3, 0.3],
[0.6, 1.0, 0.3, 0.3],
[0.3, 0.3, 1.0, 0.7],
[0.3, 0.3, 0.7, 1.0]]
EDP_COV_test = EDP_rho_test * np.outer(EDP_beta_test, EDP_beta_test)
assert_allclose(EDP_rho_test, EDP_rho_target, atol=0.15)
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# COL
COL_check = A._COL.describe().T
col_target = 1.0 - mvn_od(np.log(EDP_theta_test[2:]),
EDP_COV_test[2:, 2:],
upper=np.log([0.1, 0.1]))[0]
assert COL_check['mean'].values[0] == prob_approx(col_target, 0.03)
# DMG
DMG_check = [len(np.where(A._DMG.iloc[:, i] > 0.0)[0]) / 10000.
for i in range(8)]
DMG_1_PID = mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 1e-6]),
upper=np.log([0.1, 0.1]))[0]
DMG_2_PID = mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 0.05488]),
upper=np.log([0.1, 0.1]))[0]
DMG_1_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0]
DMG_2_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0]
assert DMG_check[0] == pytest.approx(DMG_check[1], rel=0.01)
assert DMG_check[2] == pytest.approx(DMG_check[3], rel=0.01)
assert DMG_check[4] == pytest.approx(DMG_check[5], rel=0.01)
assert DMG_check[6] == pytest.approx(DMG_check[7], rel=0.01)
assert DMG_check[0] == prob_approx(DMG_1_PID, 0.03)
assert DMG_check[2] == prob_approx(DMG_2_PID, 0.03)
assert DMG_check[4] == prob_approx(DMG_1_PFA, 0.03)
assert DMG_check[6] == prob_approx(DMG_2_PFA, 0.03)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# COST
DV_COST = A._DV_dict['rec_cost']
DV_TIME = A._DV_dict['rec_time']
C_target = [0., 250., 1250.]
T_target = [0., 0.25, 1.25]
# PG 1011 and 1012
P_target = [
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 1e-6]), upper=np.log([0.05488, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
]
for i in [0, 1]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 1021 and 1022
P_target = [
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 0.05488]), upper=np.log([0.05488, 0.1]))[0],
]
for i in [2, 3]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 2011 and 2012
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([9.80665, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, 9.80665, 0.1, 0.1]))[0],
]
for i in [4, 5]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 2021 and 2022
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, 9.80665, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6]),
upper=np.log([9.80665, np.inf, 0.1, 0.1]))[0],
]
for i in [6, 7]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# RED TAG
RED_check = A._DV_dict['red_tag'].describe().T
RED_check = (RED_check['mean'] * RED_check['count'] / 10000.).values
assert RED_check[0] == pytest.approx(RED_check[1], rel=0.01)
assert RED_check[2] == pytest.approx(RED_check[3], rel=0.01)
assert RED_check[4] == pytest.approx(RED_check[5], rel=0.01)
assert RED_check[6] == pytest.approx(RED_check[7], rel=0.01)
assert RED_check[0] == prob_approx(DMG_1_PID, 0.03)
assert RED_check[2] == prob_approx(DMG_2_PID, 0.03)
assert RED_check[4] == prob_approx(DMG_1_PFA, 0.03)
assert RED_check[6] == prob_approx(DMG_2_PFA, 0.03)
DMG_on = np.where(A._DMG > 0.0)[0]
RED_on = np.where(A._DV_dict['red_tag'] > 0.0)[0]
assert_allclose(DMG_on, RED_on)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
P_no_RED_target = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([9.80665, 9.80665, 0.05488, 0.05488]))[0]
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = ((1.0 - SD.loc[('red tagged', ''), 'mean'])
* SD.loc[('red tagged', ''), 'count'] / 10000.)
assert P_no_RED_target == prob_approx(P_no_RED_test, 0.04)
def test_FEMA_P58_Assessment_EDP_uncertainty_failed_analyses():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
Here we use EDP results with unique values assigned to failed analyses.
In particular, PID=1.0 and PFA=100.0 are used when an analysis fails.
These values shall be handled by detection limits of 10 and 100 for PID
and PFA, respectively.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_4.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_4.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
EDP_theta_target = [9.80665, 12.59198, 0.074081, 0.044932]
EDP_beta_target = [0.25, 0.25, 0.3, 0.4]
assert_allclose(EDP_theta_test, EDP_theta_target, rtol=0.025)
assert_allclose(EDP_beta_test, EDP_beta_target, rtol=0.1)
rho = RV_EDP[0].RV_set.Rho()
EDP_rho_test = rho
EDP_rho_target = [
[1.0, 0.6, 0.3, 0.3],
[0.6, 1.0, 0.3, 0.3],
[0.3, 0.3, 1.0, 0.7],
[0.3, 0.3, 0.7, 1.0]]
EDP_COV_test = EDP_rho_test * np.outer(EDP_beta_test, EDP_beta_test)
assert_allclose(EDP_rho_test, EDP_rho_target, atol=0.15)
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# COL
COL_check = A._COL.describe().T
col_target = 1.0 - mvn_od(np.log(EDP_theta_test[2:]),
EDP_COV_test[2:,2:],
upper=np.log([0.1, 0.1]))[0]
assert COL_check['mean'].values[0] == prob_approx(col_target, 0.03)
# DMG
DMG_check = [len(np.where(A._DMG.iloc[:, i] > 0.0)[0]) / 10000.
for i in range(8)]
DMG_1_PID = mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:,2:],
lower=np.log([0.05488, 1e-6]),
upper=np.log([0.1, 0.1]))[0]
DMG_2_PID = mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 0.05488]),
upper=np.log([0.1, 0.1]))[0]
DMG_1_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0]
DMG_2_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0]
assert DMG_check[0] == pytest.approx(DMG_check[1], rel=0.01)
assert DMG_check[2] == pytest.approx(DMG_check[3], rel=0.01)
assert DMG_check[4] == pytest.approx(DMG_check[5], rel=0.01)
assert DMG_check[6] == pytest.approx(DMG_check[7], rel=0.01)
assert DMG_check[0] == prob_approx(DMG_1_PID, 0.03)
assert DMG_check[2] == prob_approx(DMG_2_PID, 0.03)
assert DMG_check[4] == prob_approx(DMG_1_PFA, 0.03)
assert DMG_check[6] == prob_approx(DMG_2_PFA, 0.03)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# COST
DV_COST = A._DV_dict['rec_cost']
DV_TIME = A._DV_dict['rec_time']
C_target = [0., 250., 1250.]
T_target = [0., 0.25, 1.25]
# PG 1011 and 1012
P_target = [
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 1e-6]), upper=np.log([0.05488, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
]
for i in [0, 1]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 1021 and 1022
P_target = [
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 0.05488]), upper=np.log([0.05488, 0.1]))[0],
]
for i in [2, 3]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 2011 and 2012
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([9.80665, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, 9.80665, 0.1, 0.1]))[0],
]
for i in [4, 5]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 2021 and 2022
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, 9.80665, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6]),
upper=np.log([9.80665, np.inf, 0.1, 0.1]))[0],
]
for i in [6, 7]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# RED TAG
RED_check = A._DV_dict['red_tag'].describe().T
RED_check = (RED_check['mean'] * RED_check['count'] / 10000.).values
assert RED_check[0] == pytest.approx(RED_check[1], rel=0.01)
assert RED_check[2] == pytest.approx(RED_check[3], rel=0.01)
assert RED_check[4] == pytest.approx(RED_check[5], rel=0.01)
assert RED_check[6] == pytest.approx(RED_check[7], rel=0.01)
assert RED_check[0] == prob_approx(DMG_1_PID, 0.03)
assert RED_check[2] == prob_approx(DMG_2_PID, 0.03)
assert RED_check[4] == prob_approx(DMG_1_PFA, 0.03)
assert RED_check[6] == prob_approx(DMG_2_PFA, 0.03)
DMG_on = np.where(A._DMG > 0.0)[0]
RED_on = np.where(A._DV_dict['red_tag'] > 0.0)[0]
assert_allclose(DMG_on, RED_on)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
P_no_RED_target = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([9.80665, 9.80665, 0.05488, 0.05488]))[0]
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = ((1.0 - SD.loc[('red tagged', ''), 'mean'])
* SD.loc[('red tagged', ''), 'count'] / 10000.)
assert P_no_RED_target == prob_approx(P_no_RED_test, 0.04)
def test_FEMA_P58_Assessment_EDP_uncertainty_3D():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
In this test we look at the propagation of EDP values provided for two
different directions. (3D refers to the numerical model used for response
estimation.)
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_5.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_5.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
EDP_theta_target = [9.80665, 8.65433, 12.59198, 11.11239,
0.074081, 0.063763, 0.044932, 0.036788]
EDP_beta_target = [0.25, 0.25, 0.25, 0.25, 0.3, 0.3, 0.4, 0.4]
assert_allclose(EDP_theta_test, EDP_theta_target, rtol=0.05)
assert_allclose(EDP_beta_test, EDP_beta_target, rtol=0.1)
rho = RV_EDP[0].RV_set.Rho()
EDP_rho_test = rho
EDP_rho_target = np.array([
[1.0, 0.8, 0.6, 0.5, 0.3, 0.3, 0.3, 0.3],
[0.8, 1.0, 0.5, 0.6, 0.3, 0.3, 0.3, 0.3],
[0.6, 0.5, 1.0, 0.8, 0.3, 0.3, 0.3, 0.3],
[0.5, 0.6, 0.8, 1.0, 0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 0.3, 0.3, 1.0, 0.8, 0.7, 0.6],
[0.3, 0.3, 0.3, 0.3, 0.8, 1.0, 0.6, 0.7],
[0.3, 0.3, 0.3, 0.3, 0.7, 0.6, 1.0, 0.8],
[0.3, 0.3, 0.3, 0.3, 0.6, 0.7, 0.8, 1.0]])
large_rho_ids = np.where(EDP_rho_target >= 0.5)
small_rho_ids = np.where(EDP_rho_target < 0.5)
assert_allclose(EDP_rho_test[large_rho_ids], EDP_rho_target[large_rho_ids],
atol=0.1)
assert_allclose(EDP_rho_test[small_rho_ids], EDP_rho_target[small_rho_ids],
atol=0.2)
EDP_COV_test = EDP_rho_test * np.outer(EDP_beta_test, EDP_beta_test)
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
theta_PID = np.log(EDP_theta_target[4:])
COV_PID = EDP_COV_test[4:, 4:]
# COL
COL_check = A._COL.describe().T
col_target = 1.0 - mvn_od(theta_PID, COV_PID,
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0]
assert COL_check['mean'].values[0] == pytest.approx(col_target, rel=0.1, abs=0.05)
# DMG
realization_count = float(A._AIM_in['general']['realizations'])
DMG_check = [len(np.where(A._DMG.iloc[:, i] > 0.0)[0]) / realization_count for i in
range(8)]
DMG_1_1_PID = mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0]
DMG_1_2_PID = mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0]
DMG_2_1_PID = mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 1e-6, 0.05488, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0]
DMG_2_2_PID = mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 1e-6, 1e-6, 0.05488]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0]
DMG_1_1_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6,
1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, np.inf, np.inf,
0.1, 0.1, 0.1, 0.1]))[0]
DMG_1_2_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6,
1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, np.inf, np.inf,
0.1, 0.1, 0.1, 0.1]))[0]
DMG_2_1_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 9.80665, 1e-6,
1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, np.inf, np.inf,
0.1, 0.1, 0.1, 0.1]))[0]
DMG_2_2_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 9.80665,
1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, np.inf, np.inf,
0.1, 0.1, 0.1, 0.1]))[0]
DMG_ref = [DMG_1_1_PID, DMG_1_2_PID, DMG_2_1_PID, DMG_2_2_PID,
DMG_1_1_PFA, DMG_1_2_PFA, DMG_2_1_PFA, DMG_2_2_PFA]
assert_allclose(DMG_check, DMG_ref, rtol=0.10, atol=0.01)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# COST
DV_COST = A._DV_dict['rec_cost']
DV_TIME = A._DV_dict['rec_time']
C_target = [0., 249., 624., 1251., 1875.]
T_target = [0., 0.249, 0.624, 1.251, 1.875]
# PG 1011
P_target = [
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.05488, 0.1, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 0.05488, 0.05488]),
upper=np.log([0.1, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 1e-6, 0.05488]),
upper=np.log([0.1, 0.1, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.05488]))[0], ]),
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 1e-6, 0.05488]),
upper=np.log([0.1, 0.05488, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.05488, 0.05488]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 0.05488, 1e-6]),
upper=np.log([0.1, 0.05488, 0.1, 0.05488]))[0], ]),
mvn_od(theta_PID, COV_PID, lower=np.log([0.05488, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 0.05488, 0.05488, 0.05488]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 0].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 0].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 1012
P_target = [
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.05488, 0.1, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 1e-6, 0.05488]),
upper=np.log([0.1, 0.1, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.05488]))[0], ]),
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 1e-6, 0.05488]),
upper=np.log([0.05488, 0.1, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.05488, 0.05488]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 0.05488, 1e-6]),
upper=np.log([0.05488, 0.1, 0.1, 0.05488]))[0], ]),
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 0.05488, 1e-6, 1e-6]),
upper=np.log([0.05488, 0.1, 0.05488, 0.05488]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 1].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 1].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 1021
P_target = [
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.05488, 0.1, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 0.05488, 0.05488]),
upper=np.log([0.1, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.05488]))[0], ]),
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 1e-6, 0.05488, 0.05488]),
upper=np.log([0.05488, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 0.05488, 1e-6]),
upper=np.log([0.1, 0.05488, 0.1, 0.05488]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 0.05488, 1e-6]),
upper=np.log([0.05488, 0.1, 0.1, 0.05488]))[0], ]),
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 0.05488, 1e-6]),
upper=np.log([0.05488, 0.05488, 0.1, 0.05488]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 2].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 2].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
#print('------------------------')
#print('P_target')
#print(P_target)
#print('------------------------')
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 1022
P_target = [
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.05488]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.05488, 0.1, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 0.05488, 0.05488]),
upper=np.log([0.1, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 1e-6, 0.05488]),
upper=np.log([0.1, 0.1, 0.05488, 0.1]))[0], ]),
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 1e-6, 0.05488, 0.05488]),
upper=np.log([0.05488, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 1e-6, 0.05488]),
upper=np.log([0.1, 0.05488, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 1e-6, 0.05488]),
upper=np.log([0.05488, 0.1, 0.05488, 0.1]))[0], ]),
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 1e-6, 0.05488]),
upper=np.log([0.05488, 0.05488, 0.05488, 0.1]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 3].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 5)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 3].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 5)]
P_test = P_test[np.where(P_test > 5)]
P_test = P_test / realization_count
assert_allclose(P_target[:-1], P_test[:4], atol=0.05)
assert_allclose(C_target[:-1], C_test[:4], rtol=0.001)
assert_allclose(T_target[:-1], T_test[:4], rtol=0.001)
# PG 2011
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, 9.80665, 9.80665, 0.1, 0.1, 0.1, 0.1]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 4].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 4].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 2012
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, 9.80665, 9.80665, 0.1, 0.1, 0.1, 0.1]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 5].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 5].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target[:4], P_test[:4], atol=0.05)
assert_allclose(C_target[:4], C_test[:4], rtol=0.001)
assert_allclose(T_target[:4], T_test[:4], rtol=0.001)
# PG 2021
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, 9.80665, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 6].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 6].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 2022
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 1e-6, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, 9.80665, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 7].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 7].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# RED TAG
RED_check = A._DV_dict['red_tag'].describe().T
RED_check = (RED_check['mean'] * RED_check['count'] / realization_count).values
assert_allclose(RED_check, DMG_ref, atol=0.02, rtol=0.10)
DMG_on = np.where(A._DMG > 0.0)[0]
RED_on = np.where(A._DV_dict['red_tag'] > 0.0)[0]
assert_allclose(DMG_on, RED_on)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
P_no_RED_target = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
upper=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 0.05488,
0.05488, 0.05488, 0.05488]))[0]
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = (1.0 - SD.loc[('red tagged', ''), 'mean']) * SD.loc[
('red tagged', ''), 'count'] / realization_count
assert P_no_RED_target == pytest.approx(P_no_RED_test, abs=0.03)
def test_FEMA_P58_Assessment_EDP_uncertainty_single_sample():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
In this test we provide only one structural response result and see if it
is properly handled as a deterministic value or a random EDP using the
additional sources of uncertainty.
"""
print()
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_6.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_6.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
EDP_theta_target = np.array(
[7.634901, 6.85613, 11.685934, 10.565554,
0.061364, 0.048515, 0.033256, 0.020352])
EDP_beta_target = EDP_theta_target * 1e-6
assert_allclose(EDP_theta_test, EDP_theta_target, rtol=0.05)
assert_allclose(EDP_beta_test, EDP_beta_target, rtol=0.1)
assert RV_EDP[0].RV_set == None
# ------------------------------------------------- perform the calculation
A.define_loss_model()
A.calculate_damage()
A.calculate_losses()
A.aggregate_results()
# ------------------------------------------------ check result aggregation
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = (1.0 - SD.loc[('red tagged', ''), 'mean']) * SD.loc[
('red tagged', ''), 'count'] / 10000.
assert P_no_RED_test == 0.0
# -------------------------------------------------------------------------
# now do the same analysis, but consider additional uncertainty
# -------------------------------------------------------------------------
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
AU = A._AIM_in['general']['added_uncertainty']
AU['beta_m'] = 0.3
AU['beta_gm'] = 0.4
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
EDP_beta_target = np.sqrt((EDP_theta_target * 1e-6)**2. +
np.ones(8)*(0.3**2. + 0.4**2.))
assert_allclose(EDP_theta_test, EDP_theta_target, rtol=0.05)
assert_allclose(EDP_beta_test, EDP_beta_target, rtol=0.1)
assert RV_EDP[0].RV_set == None
EDP_rho_target = np.zeros((8, 8))
np.fill_diagonal(EDP_rho_target, 1.0)
EDP_COV_test = EDP_rho_target * np.outer(EDP_beta_test, EDP_beta_test)
# ------------------------------------------------- perform the calculation
A.define_loss_model()
A.calculate_damage()
A.calculate_losses()
A.aggregate_results()
# ------------------------------------------------ check result aggregation
P_no_RED_target = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
upper=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 0.05488,
0.05488, 0.05488, 0.05488]))[0]
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = (1.0 - SD.loc[('red tagged', ''), 'mean']) * SD.loc[
('red tagged', ''), 'count'] / 10000.
assert P_no_RED_target == pytest.approx(P_no_RED_test, abs=0.01)
def test_FEMA_P58_Assessment_EDP_uncertainty_zero_variance():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
This test simulates a scenario when one of the EDPs is identical in all
of the available samples. This results in zero variance in that dimension
and the purpose of the test is to ensure that such cases are handled
appropriately.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_7.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_7.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
assert EDP_theta_test[4] == pytest.approx(0.061364, rel=0.05)
assert EDP_beta_test[4] < 0.061364 * 1e-3
rho = RV_EDP[0].RV_set.Rho()
EDP_rho_test = rho
EDP_rho_target = np.zeros((8, 8))
np.fill_diagonal(EDP_rho_target, 1.0)
assert_allclose(EDP_rho_test[4], EDP_rho_target[4], atol=1e-6)
# ------------------------------------------------- perform the calculation
A.define_loss_model()
A.calculate_damage()
A.calculate_losses()
A.aggregate_results()
# ------------------------------------------------ check result aggregation
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = (1.0 - SD.loc[('red tagged', ''), 'mean']) * SD.loc[
('red tagged', ''), 'count'] / 10000.
assert P_no_RED_test == 0.0
def test_FEMA_P58_Assessment_QNT_uncertainty_independent():
"""
Perform loss assessment with customized inputs that focus on testing the
propagation of uncertainty in component quantities. Dispersions in other
calculation parameters are reduced to negligible levels. This allows us to
test the results against pre-defined reference values in spite of the
randomness involved in the calculations.
This test assumes that component quantities are independent.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_8.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_8.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# QNT
RV_QNT = list(A._QNT_dict.values())
QNT_theta_test, QNT_beta_test = np.array([rv.theta for rv in RV_QNT]).T
QNT_theta_target = np.ones(8) * 25.
QNT_beta_target = [25.0] * 4 + [0.4] * 4
assert_allclose(QNT_theta_test, QNT_theta_target, rtol=0.001)
assert_allclose(QNT_beta_test, QNT_beta_target, rtol=0.001)
for i in range(4):
assert RV_QNT[i].distribution == 'normal'
for i in range(4, 8):
assert RV_QNT[i].distribution == 'lognormal'
QNT_rho_target = [
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
]
QNT_rho_test = RV_QNT[0].RV_set.Rho()
assert_allclose(QNT_rho_test, QNT_rho_target, atol=0.001)
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# COL
# there shall be no collapses
assert A._COL.describe().T['mean'].values == 0
# DMG
DMG_check = A._DMG.describe().T
mu_test = DMG_check['mean']
sig_test = DMG_check['std']
rho_test = A._DMG.corr()
mu_target_1 = 25.0 + 25.0 * norm.pdf(-1.0) / (1.0 - norm.cdf(-1.0))
sig_target_1 = np.sqrt(25.0 ** 2.0 * (
1 - norm.pdf(-1.0) / (1.0 - norm.cdf(-1.0)) - (
norm.pdf(-1.0) / (1.0 - norm.cdf(-1.0))) ** 2.0))
mu_target_2 = np.exp(np.log(25.0) + 0.4 ** 2. / 2.)
sig_target_2 = np.sqrt(
(np.exp(0.4 ** 2.0) - 1.0) * np.exp(2 * np.log(25.0) + 0.4 ** 2.0))
assert_allclose(mu_test[:4], mu_target_1, rtol=0.05)
assert_allclose(mu_test[4:], mu_target_2, rtol=0.05)
assert_allclose(sig_test[:4], sig_target_1, rtol=0.05)
assert_allclose(sig_test[4:], sig_target_2, rtol=0.05)
assert_allclose(rho_test, QNT_rho_target, atol=0.05)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
DV_COST = A._DV_dict['rec_cost'] / A._DMG
rho_DV_target = [
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
]
assert_allclose(DV_COST.corr(), rho_DV_target, atol=0.05)
# Uncertainty in decision variables is controlled by the correlation
# between damages
RND = [tnorm.rvs(-1., np.inf, loc=25, scale=25, size=10000) for i in
range(4)]
RND = np.sum(RND, axis=0)
P_target_PID = np.sum(RND > 90.) / 10000.
P_test_PID = np.sum(DV_COST.iloc[:, 0] < 10.01) / 10000.
assert P_target_PID == pytest.approx(P_test_PID, rel=0.02)
RND = [np.exp(norm.rvs(loc=np.log(25.), scale=0.4, size=10000)) for i in
range(4)]
RND = np.sum(RND, axis=0)
P_target_PFA = np.sum(RND > 90.) / 10000.
P_test_PFA = np.sum(DV_COST.iloc[:, 4] < 10.01) / 10000.
assert P_target_PFA == pytest.approx(P_test_PFA, rel=0.02)
# the same checks can be performed for reconstruction time
DV_TIME = A._DV_dict['rec_time'] / A._DMG
assert_allclose(DV_TIME.corr(), rho_DV_target, atol=0.05)
P_test_PID = np.sum(DV_TIME.iloc[:, 0] < 0.0101) / 10000.
assert P_target_PID == pytest.approx(P_test_PID, rel=0.02)
P_test_PFA = np.sum(DV_TIME.iloc[:, 4] < 0.0101) / 10000.
assert P_target_PFA == pytest.approx(P_test_PFA, rel=0.02)
# injuries...
DV_INJ_dict = deepcopy(A._DV_dict['injuries'])
DV_INJ0 = (DV_INJ_dict[0] / A._DMG).describe()
DV_INJ1 = (DV_INJ_dict[1] / A._DMG).describe()
assert_allclose(DV_INJ0.loc['mean', :][:4], np.ones(4) * 0.025, rtol=0.001)
assert_allclose(DV_INJ0.loc['mean', :][4:], np.ones(4) * 0.1, rtol=0.001)
assert_allclose(DV_INJ1.loc['mean', :][:4], np.ones(4) * 0.005, rtol=0.001)
assert_allclose(DV_INJ1.loc['mean', :][4:], np.ones(4) * 0.02, rtol=0.001)
assert_allclose(DV_INJ0.loc['std', :], np.zeros(8), atol=1e-4)
assert_allclose(DV_INJ1.loc['std', :], np.zeros(8), atol=1e-4)
# and for red tag...
# Since every component is damaged in every realization, the red tag
# results should all be 1.0
assert_allclose(A._DV_dict['red_tag'], np.ones((10000, 8)))
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
S = A._SUMMARY
SD = S.describe().T
assert SD.loc[('inhabitants', ''), 'mean'] == 20.0
assert SD.loc[('inhabitants', ''), 'std'] == 0.0
assert SD.loc[('collapses', 'collapsed'), 'mean'] == 0.0
assert SD.loc[('collapses', 'collapsed'), 'std'] == 0.0
assert SD.loc[('red tagged', ''), 'mean'] == 1.0
assert SD.loc[('red tagged', ''), 'std'] == 0.0
assert np.corrcoef(S.loc[:, ('reconstruction', 'cost')],
S.loc[:, ('reconstruction', 'time-sequential')])[
0, 1] == pytest.approx(1.0)
assert_allclose(A._DV_dict['rec_cost'].sum(axis=1),
S.loc[:, ('reconstruction', 'cost')])
assert_allclose(A._DV_dict['rec_time'].sum(axis=1),
S.loc[:, ('reconstruction', 'time-sequential')])
assert_allclose(A._DV_dict['rec_time'].max(axis=1),
S.loc[:, ('reconstruction', 'time-parallel')])
assert_allclose(A._DV_dict['injuries'][0].sum(axis=1),
S.loc[:, ('injuries', 'sev1')])
assert_allclose(A._DV_dict['injuries'][1].sum(axis=1),
S.loc[:, ('injuries', 'sev2')])
def test_FEMA_P58_Assessment_QNT_uncertainty_dependencies():
"""
Perform loss assessment with customized inputs that focus on testing the
propagation of uncertainty in component quantities. Dispersions in other
calculation parameters are reduced to negligible levels. This allows us to
test the results against pre-defined reference values in spite of the
randomness involved in the calculations.
This test checks if dependencies between component quantities are handled
appropriately.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_8.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_8.out"
for dep in ['FG', 'PG', 'DIR', 'LOC']:
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A._AIM_in['dependencies']['quantities'] = dep
A.define_random_variables()
# ---------------------------------------------- check random variables
# QNT
RV_QNT = list(A._QNT_dict.values())
QNT_theta_test, QNT_beta_test = np.array([rv.theta for rv in RV_QNT]).T
QNT_theta_target = np.ones(8) * 25.
QNT_beta_target = [25.0] * 4 + [0.4] * 4
assert_allclose(QNT_theta_test, QNT_theta_target, rtol=0.001)
assert_allclose(QNT_beta_test, QNT_beta_target, rtol=0.001)
for i in range(4):
assert RV_QNT[i].distribution == 'normal'
for i in range(4, 8):
assert RV_QNT[i].distribution == 'lognormal'
if dep == 'FG':
QNT_rho_target = np.array([
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
])
elif dep == 'PG':
QNT_rho_target = np.array([
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
])
elif dep == 'DIR':
QNT_rho_target = np.array([
[1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1],
])
elif dep == 'LOC':
QNT_rho_target = np.array([
[1, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 1],
])
QNT_rho_test = RV_QNT[0].RV_set.Rho()
assert_allclose(QNT_rho_test, QNT_rho_target, atol=0.001)
# ---------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# -------------------------------------------- check damage calculation
# COL
# there shall be no collapses
assert A._COL.describe().T['mean'].values == 0
# DMG
# Because the correlations are enforced after truncation, the marginals
# shall be unaffected by the correlation structure. Hence, the
# distribution of damaged quantities within a PG shall be identical in
# all dep cases.
# The specified dependencies are apparent in the correlation between
# damaged quantities in various PGs.
DMG_check = A._DMG.describe().T
mu_test = DMG_check['mean']
sig_test = DMG_check['std']
rho_test = A._DMG.corr()
mu_target_1 = 25.0 + 25.0 * norm.pdf(-1.0) / (1.0 - norm.cdf(-1.0))
sig_target_1 = np.sqrt(25.0 ** 2.0 * (
1 - norm.pdf(-1.0) / (1.0 - norm.cdf(-1.0)) - (
norm.pdf(-1.0) / (1.0 - norm.cdf(-1.0))) ** 2.0))
mu_target_2 = np.exp(np.log(25.0) + 0.4 ** 2. / 2.)
sig_target_2 = np.sqrt(
(np.exp(0.4 ** 2.0) - 1.0) * np.exp(2 * np.log(25.0) + 0.4 ** 2.0))
assert_allclose(mu_test[:4], mu_target_1, rtol=0.05)
assert_allclose(mu_test[4:], mu_target_2, rtol=0.05)
assert_allclose(sig_test[:4], sig_target_1, rtol=0.05)
assert_allclose(sig_test[4:], sig_target_2, rtol=0.05)
assert_allclose(rho_test, QNT_rho_target, atol=0.05)
# ---------------------------------------------------------------------
A.calculate_losses()
# ---------------------------------------------- check loss calculation
DV_COST = A._DV_dict['rec_cost'] / A._DMG
# After the DVs are normalized by the damaged quantities, the resulting
# samples show the correlations between the DV_measure (such as
# reconstruction cost) / 1 unit of damaged component. Because this
# consequences are perfectly correlated among the components of a
# fragility group by definition, the quadrants on the main diagonal
# will follow the matrix presented below. If there are additional
# correlations defined between component quantities in different
# fragility groups (i.e. the off-diagonal quadrants of the rho matrix),
# those will be preserved in the consequences. Therefore, the
# off-diagonal quadrants need to be updated with those from QNT_rho_target
# to get an appropriate rho_DV_target.
rho_DV_target = np.array([
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
])
rho_DV_target[:4, 4:] = QNT_rho_target[:4, 4:]
rho_DV_target[4:, :4] = QNT_rho_target[:4, 4:]
assert_allclose(DV_COST.corr(), rho_DV_target, atol=0.05)
# uncertainty in decision variables is controlled by the correlation
# between damages
P_test_PID = np.sum(DV_COST.iloc[:, 0] < 10.01) / 10000.
P_test_PFA = np.sum(DV_COST.iloc[:, 4] < 10.01) / 10000.
# the first component quantities follow a truncated multivariate normal
# distribution
mu_target_PID = mu_target_1 * 4.
sig_target_PID = np.sqrt(
sig_target_1 ** 2. * np.sum(QNT_rho_target[:4, :4]))
mu_target_PID_b = mu_target_PID
sig_target_PID_b = sig_target_PID
alpha = 100.
i = 0
while (np.log(
np.abs(alpha / (mu_target_PID_b / sig_target_PID_b))) > 0.001) and (
i < 10):
alpha = -mu_target_PID_b / sig_target_PID_b
mu_target_PID_b = mu_target_PID - sig_target_PID_b * norm.pdf(
alpha) / (1.0 - norm.cdf(alpha))
sig_target_PID_b = sig_target_PID / np.sqrt(
(1.0 + alpha * norm.pdf(alpha) / (1.0 - norm.cdf(alpha))))
i += 1
xi = (90 - mu_target_PID_b) / sig_target_PID_b
P_target_PID = 1.0 - (norm.cdf(xi) - norm.cdf(alpha)) / (
1.0 - norm.cdf(alpha))
assert P_target_PID == pytest.approx(P_test_PID, rel=0.05)
# the second component quantities follow a multivariate lognormal
# distribution
mu_target_PFA = mu_target_2 * 4.
sig_target_PFA = np.sqrt(
sig_target_2 ** 2. * np.sum(QNT_rho_target[4:, 4:]))
sig_target_PFA_b = np.sqrt(
np.log(sig_target_PFA ** 2.0 / mu_target_PFA ** 2.0 + 1.0))
mu_target_PFA_b = np.log(mu_target_PFA) - sig_target_PFA_b ** 2.0 / 2.
xi = np.log(90)
P_target_PFA = 1.0 - norm.cdf(xi, loc=mu_target_PFA_b,
scale=sig_target_PFA_b)
assert P_target_PFA == pytest.approx(P_test_PFA, rel=0.05)
# the same checks can be performed for reconstruction time
DV_TIME = A._DV_dict['rec_time'] / A._DMG
assert_allclose(DV_TIME.corr(), rho_DV_target, atol=0.05)
P_test_PID = np.sum(DV_TIME.iloc[:, 0] < 0.0101) / 10000.
assert P_target_PID == pytest.approx(P_test_PID, rel=0.05)
P_test_PFA = np.sum(DV_TIME.iloc[:, 4] < 0.0101) / 10000.
assert P_target_PFA == pytest.approx(P_test_PFA, rel=0.05)
# injuries...
# Every component is damaged in every realization in this test. Once
# normalized by the quantity of components, the number of injuries
# shall be identical and unaffected by the correlation between
# component quantities.
DV_INJ_dict = deepcopy(A._DV_dict['injuries'])
DV_INJ0 = (DV_INJ_dict[0] / A._DMG).describe()
DV_INJ1 = (DV_INJ_dict[1] / A._DMG).describe()
assert_allclose(DV_INJ0.loc['mean', :][:4], np.ones(4) * 0.025,
rtol=0.001)
assert_allclose(DV_INJ0.loc['mean', :][4:], np.ones(4) * 0.1,
rtol=0.001)
assert_allclose(DV_INJ1.loc['mean', :][:4], np.ones(4) * 0.005,
rtol=0.001)
assert_allclose(DV_INJ1.loc['mean', :][4:], np.ones(4) * 0.02,
rtol=0.001)
assert_allclose(DV_INJ0.loc['std', :], np.zeros(8), atol=1e-4)
assert_allclose(DV_INJ1.loc['std', :], np.zeros(8), atol=1e-4)
# and for red tag...
# since every component is damaged in every realization, the red tag
# results should all be 1.0
assert_allclose(A._DV_dict['red_tag'], np.ones((10000, 8)))
# ---------------------------------------------------------------------
A.aggregate_results()
# -------------------------------------------- check result aggregation
S = A._SUMMARY
SD = S.describe().T
assert SD.loc[('inhabitants', ''), 'mean'] == 20.0
assert SD.loc[('inhabitants', ''), 'std'] == 0.0
assert SD.loc[('collapses', 'collapsed'), 'mean'] == 0.0
assert SD.loc[('collapses', 'collapsed'), 'std'] == 0.0
assert SD.loc[('red tagged', ''), 'mean'] == 1.0
assert SD.loc[('red tagged', ''), 'std'] == 0.0
assert np.corrcoef(S.loc[:, ('reconstruction', 'cost')],
S.loc[:, ('reconstruction', 'time-sequential')])[
0, 1] == pytest.approx(1.0)
assert_allclose(A._DV_dict['rec_cost'].sum(axis=1),
S.loc[:, ('reconstruction', 'cost')])
assert_allclose(A._DV_dict['rec_time'].sum(axis=1),
S.loc[:, ('reconstruction', 'time-sequential')])
assert_allclose(A._DV_dict['rec_time'].max(axis=1),
S.loc[:, ('reconstruction', 'time-parallel')])
assert_allclose(A._DV_dict['injuries'][0].sum(axis=1),
S.loc[:, ('injuries', 'sev1')])
assert_allclose(A._DV_dict['injuries'][1].sum(axis=1),
S.loc[:, ('injuries', 'sev2')])
def test_FEMA_P58_Assessment_FRAG_uncertainty_dependencies(dep='IND'):
"""
Perform loss assessment with customized inputs that focus on testing the
propagation of uncertainty in component fragilities. Dispersions in other
calculation parameters are reduced to negligible levels. This allows us to
test the results against pre-defined reference values in spite of the
randomness involved in the calculations.
"""
print()
idx = pd.IndexSlice
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_9.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_9.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A._AIM_in['dependencies']['fragilities'] = dep
A.define_random_variables()
# ---------------------------------------------- check random variables
RV_FF = list(A._FF_dict.values())
fr_names = np.unique([rv.name[3:12] for rv in RV_FF])
fr_keys = {}
for fr_name in fr_names:
fr_list = [rv.name for rv in RV_FF if fr_name in rv.name]
fr_keys.update({fr_name: fr_list})
# fr_keys = []
# for key in A._RV_dict.keys():
# if 'FR' in key:
# fr_keys.append(key)
dimtag_target = [4 * 2 * 3, 20 * 2 * 3 * 3, 20 * 2 * 3 * 3,
20 * 2 * 3 * 3]
theta_target = [[0.048, 0.096], [0.048, 0.072, 0.096],
[2.9419, 5.8840, 11.7680], [2.9419, 5.8840, 11.7680]]
sig_target = [[0.5, 0.25], [1.0, 0.5, 0.25], [1.0, 0.5, 0.25],
[1.0, 0.5, 0.25]]
if dep == 'IND':
rho_target = np.zeros((24, 24))
np.fill_diagonal(rho_target, 1.0)
rho_sum = 360
elif dep == 'PG':
rho_target = np.ones((24, 24))
rho_sum = 360 ** 2.
elif dep == 'DIR':
rho_target = [
[1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.]]
rho_sum = (20 * 2 * 3) ** 2. * 3
elif dep == 'LOC':
rho_target = [
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1.],
[0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1.],
[0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1.],
[0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1.]]
rho_sum = (20 * 3) ** 2. * (2 * 9)
elif dep in ['ATC', 'CSG']:
rho_target = [
[1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1.]]
rho_sum = (20 * 3) ** 2. * (2 * 3)
elif dep == 'DS':
rho_target = [
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1.]]
rho_sum = 3 ** 2 * (20 * 2 * 3)
for k, key in enumerate(sorted(fr_keys.keys())):
RV_FF_i = [A._FF_dict[rv_i] for rv_i in fr_keys[key]]
assert len(RV_FF_i) == dimtag_target[k]
FF_theta_test, FF_beta_test = np.array([rv.theta for rv in RV_FF_i]).T
if k == 0:
FF_theta_test = pd.DataFrame(
np.reshape(FF_theta_test, (12, 2))).describe()
FF_beta_test = pd.DataFrame(
np.reshape(FF_beta_test, (12, 2))).describe()
else:
FF_theta_test = pd.DataFrame(
np.reshape(FF_theta_test, (120, 3))).describe()
FF_beta_test = pd.DataFrame(
np.reshape(FF_beta_test, (120, 3))).describe()
assert_allclose(FF_theta_test.loc['mean', :].values, theta_target[k],
rtol=1e-4)
assert_allclose(FF_theta_test.loc['std', :].values,
np.zeros(np.array(theta_target[k]).shape),
atol=1e-10)
assert_allclose(FF_beta_test.loc['mean', :].values, sig_target[k],
rtol=1e-4)
assert_allclose(FF_beta_test.loc['std', :].values,
np.zeros(np.array(sig_target[k]).shape), atol=1e-10)
rho_test = RV_FF_i[0].RV_set.Rho(fr_keys[fr_names[k]])
if k == 0:
# we perform the detailed verification of rho for the first case
# only (because the others are 360x360 matrices)
assert_allclose(rho_test, rho_target)
else:
# for the other cases we check the number of ones in the matrix
assert np.sum(rho_test) == rho_sum
# RV_FR = deepcopy(A._RV_dict[key])
# assert len(RV_FR._dimension_tags) == dimtag_target[k]
#
# COV_test = RV_FR.COV
# sig_test = np.sqrt(np.diagonal(COV_test))
# rho_test = COV_test / np.outer(sig_test, sig_test)
#
# if k == 0:
# theta_test = pd.DataFrame(
# np.reshape(RV_FR.theta, (12, 2))).describe()
# sig_test = pd.DataFrame(
# np.reshape(sig_test, (12, 2))).describe()
# else:
# theta_test = pd.DataFrame(
# np.reshape(RV_FR.theta, (120, 3))).describe()
# sig_test = pd.DataFrame(
# np.reshape(sig_test, (120, 3))).describe()
#
# assert_allclose(theta_test.loc['mean', :].values, theta_target[k],
# rtol=1e-4)
# assert_allclose(theta_test.loc['std', :].values,
# np.zeros(np.array(theta_target[k]).shape),
# atol=1e-10)
#
# assert_allclose(sig_test.loc['mean', :].values, sig_target[k],
# rtol=1e-4)
# assert_allclose(sig_test.loc['std', :].values,
# np.zeros(np.array(sig_target[k]).shape), atol=1e-10)
#
# if k == 0:
# # we perform the detailed verification of rho for the first case
# # only (because the others are 360x360 matrices)
# assert_allclose(rho_test, rho_target)
#
# else:
# # for the other cases we check the number of ones in the matrix
# assert np.sum(rho_test) == rho_sum
# ---------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# -------------------------------------------- check damage calculation
# COL
# there shall be no collapses
assert A._COL.describe().T['mean'].values == 0
# DMG
DMG_check = A._DMG
# start with checking the damage correlations
for k in range(4):
DMG_corr = DMG_check.loc[:, idx[k + 1, :, :]].corr()
if k == 0:
DMG_corr = DMG_corr.iloc[:8, :8]
if dep in ['IND', 'ATC', 'CSG', 'DS']:
DMG_corr_ref = np.array([
[ 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 1.0,-0.1, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0],
])
elif dep == 'PG':
DMG_corr_ref = np.array([
[ 1.0,-0.1, 1.0,-0.1, 1.0,-0.1, 1.0,-0.1],
[-0.1, 1.0,-0.1, 1.0,-0.1, 1.0,-0.1, 1.0],
[ 1.0,-0.1, 1.0,-0.1, 1.0,-0.1, 1.0,-0.1],
[-0.1, 1.0,-0.1, 1.0,-0.1, 1.0,-0.1, 1.0],
[ 1.0,-0.1, 1.0,-0.1, 1.0,-0.1, 1.0,-0.1],
[-0.1, 1.0,-0.1, 1.0,-0.1, 1.0,-0.1, 1.0],
[ 1.0,-0.1, 1.0,-0.1, 1.0,-0.1, 1.0,-0.1],
[-0.1, 1.0,-0.1, 1.0,-0.1, 1.0,-0.1, 1.0],
])
elif dep == 'DIR':
DMG_corr_ref = np.array([
[ 1.0,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0],
[ 1.0,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 1.0,-0.1, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1, 1.0],
[ 0.0, 0.0, 0.0, 0.0, 1.0,-0.1, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1, 1.0],
])
elif dep == 'LOC':
DMG_corr_ref = np.array([
[ 1.0,-0.1, 0.0, 0.0, 1.0,-0.1, 0.0, 0.0],
[-0.1, 1.0, 0.0, 0.0,-0.1, 1.0, 0.0, 0.0],
[ 0.0, 0.0, 1.0,-0.1, 0.0, 0.0, 1.0,-0.1],
[ 0.0, 0.0,-0.1, 1.0, 0.0, 0.0,-0.1, 1.0],
[ 1.0,-0.1, 0.0, 0.0, 1.0,-0.1, 0.0, 0.0],
[-0.1, 1.0, 0.0, 0.0,-0.1, 1.0, 0.0, 0.0],
[ 0.0, 0.0, 1.0,-0.1, 0.0, 0.0, 1.0,-0.1],
[ 0.0, 0.0,-0.1, 1.0, 0.0, 0.0,-0.1, 1.0],
])
if k == 1:
DMG_corr = DMG_corr.iloc[:12, :12]
if dep in ['IND', 'ATC', 'CSG', 'DS']:
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0],
])
elif dep == 'PG':
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1],
[-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1],
[-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1],
[-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1],
[-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1],
[-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1],
[-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1],
[-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1],
[-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0],
])
elif dep == 'DIR':
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0,-0.1,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1, 1.0,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0,-0.1,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1, 1.0,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0],
])
elif dep == 'LOC':
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0],
[-0.1, 1.0,-0.1, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0],
[-0.1,-0.1, 1.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1],
[ 0.0, 0.0, 0.0,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1],
[ 0.0, 0.0, 0.0,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0],
[-0.1, 1.0,-0.1, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0],
[-0.1,-0.1, 1.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1],
[ 0.0, 0.0, 0.0,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1],
[ 0.0, 0.0, 0.0,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0],
])
if k == 2:
DMG_corr = DMG_corr.iloc[:20, :20]
if dep in ['IND', 'DS']:
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0],
])
elif dep == 'PG':
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[-0.1, 1.0, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1],
[-0.1, 0.5, 1.0, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1],
[-0.1, 0.5, 0.5, 1.0,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 1.0, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1],
[-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 1.0, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1],
[-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 1.0,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 1.0, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1],
[-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 1.0, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1],
[-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 1.0,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 1.0, 0.5, 0.5,-0.1],
[-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 1.0, 0.5,-0.1],
[-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 1.0,-0.1],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
])
elif dep == 'DIR':
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.5, 1.0, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.5, 0.5, 1.0,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 1.0, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 1.0, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 1.0, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 1.0,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 1.0, 0.5, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 1.0, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
])
elif dep == 'LOC':
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.5, 1.0, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.6, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.5, 0.5, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.5, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 1.0, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.6, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.8, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.5, 0.6, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 1.0, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.5, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.5, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.6, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 1.0, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0],
])
elif dep in ['ATC', 'CSG']:
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.5, 1.0, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.5, 0.5, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 1.0, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 1.0, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.5, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 1.0, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0],
])
if k == 3:
DMG_corr = DMG_corr.iloc[:20, :20]
if dep in ['IND', 'DS']:
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0, 0.0, 0.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.0, 1.0, 0.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.0, 0.0, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.0, 0.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.0, 1.0, 0.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.0, 0.0, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.0, 0.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.0, 1.0, 0.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.0, 0.0, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.0, 0.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.0, 1.0, 0.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.0, 0.0, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0],
])
elif dep == 'PG':
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[-0.1, 1.0, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1],
[-0.1, 0.8, 1.0, 0.6,-0.1,-0.1, 0.8, 0.7, 0.6,-0.1,-0.1, 0.8, 0.7, 0.6,-0.1,-0.1, 0.8, 0.7, 0.6,-0.1],
[-0.1, 0.7, 0.6, 1.0,-0.1,-0.1, 0.7, 0.6, 0.6,-0.1,-0.1, 0.7, 0.6, 0.6,-0.1,-0.1, 0.7, 0.6, 0.6,-0.1],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 1.0, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1],
[-0.1, 0.8, 0.6, 0.6,-0.1,-0.1, 0.8, 1.0, 0.6,-0.1,-0.1, 0.8, 0.7, 0.6,-0.1,-0.1, 0.8, 0.7, 0.6,-0.1],
[-0.1, 0.7, 0.6, 0.5,-0.1,-0.1, 0.7, 0.6, 1.0,-0.1,-0.1, 0.7, 0.6, 0.6,-0.1,-0.1, 0.7, 0.6, 0.6,-0.1],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 1.0, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1],
[-0.1, 0.8, 0.7, 0.6,-0.1,-0.1, 0.8, 0.7, 0.6,-0.1,-0.1, 0.8, 1.0, 0.6,-0.1,-0.1, 0.8, 0.7, 0.6,-0.1],
[-0.1, 0.7, 0.6, 0.6,-0.1,-0.1, 0.7, 0.6, 0.6,-0.1,-0.1, 0.7, 0.6, 1.0,-0.1,-0.1, 0.7, 0.6, 0.6,-0.1],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 1.0, 0.8, 0.7,-0.1],
[-0.1, 0.8, 0.7, 0.6,-0.1,-0.1, 0.8, 0.7, 0.6,-0.1,-0.1, 0.8, 0.6, 0.6,-0.1,-0.1, 0.8, 1.0, 0.6,-0.1],
[-0.1, 0.7, 0.6, 0.6,-0.1,-0.1, 0.7, 0.6, 0.6,-0.1,-0.1, 0.7, 0.6, 0.5,-0.1,-0.1, 0.7, 0.6, 1.0,-0.1],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
])
elif dep == 'DIR':
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.8, 1.0, 0.6,-0.1,-0.1, 0.8, 0.7, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.7, 0.6, 1.0,-0.1,-0.1, 0.7, 0.6, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 1.0, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.8, 0.6, 0.6,-0.1,-0.1, 0.8, 1.0, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.7, 0.6, 0.5,-0.1,-0.1, 0.7, 0.6, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 1.0, 0.6,-0.1,-0.1, 0.8, 0.7, 0.6,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 1.0,-0.1,-0.1, 0.7, 0.6, 0.6,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 1.0, 0.8, 0.7,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.6, 0.6,-0.1,-0.1, 0.8, 1.0, 0.6,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 0.5,-0.1,-0.1, 0.7, 0.6, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
])
elif dep == 'LOC':
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.8, 1.0, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.7, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.7, 0.6, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.8, 0.7,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 1.0, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.7, 0.6,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 0.6,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.8, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.8, 0.7, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 1.0, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.7, 0.6, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.8, 0.7,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.7, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 1.0, 0.6,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0],
])
elif dep in ['ATC', 'CSG']:
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.8, 1.0, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.7, 0.6, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 1.0, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 1.0, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.8, 0.7,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 1.0, 0.6,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0],
])
for i in range(len(DMG_corr.index)):
for j in range(len(DMG_corr.columns)):
ref_i = DMG_corr_ref[i, j]
if ref_i != 0.0:
if ref_i > 0.0:
assert DMG_corr.iloc[i, j] > 0.97 * ref_i
else:
assert DMG_corr.iloc[i, j] < 0.0
else:
assert DMG_corr.iloc[i, j] == pytest.approx(ref_i,
abs=0.15)
# then check the distribution of damage within each performance group
EDP_list = np.array(
[[[0.080000, 0.080000], [0.080000, 0.080000], [0.040000, 0.040000]],
[[7.845320, 7.845320], [7.845320, 7.845320],
[2.942000, 2.942000]]])
fr_keys = []
for key in A._RV_dict.keys():
if 'FR' in key:
fr_keys.append(key)
for k, key in enumerate(sorted(fr_keys)):
# print(key)
RV_FR = A._RV_dict[key]
# only third of the data is unique because of the 3 stories
rel_len = int(len(RV_FR._dimension_tags) / 3)
COV_test = RV_FR.COV[:rel_len, :rel_len]
theta_test = RV_FR.theta[:rel_len]
lims = np.unique(theta_test)
ndims = len(lims)
if k in [2, 3]:
ndims += 2
if (dep in ['DS', 'IND']) or k > 1:
DMG_vals = [[[0., 5., 7.5, 12.5, 17.5, 20., 25.], [0., 25.]],
[[0., 1.5, 3., 4.5, 6., 7.5, 9., 10.5, 12., 13.5,
15.,
16.5, 18., 19.5, 21., 22.5, 24., 25.5, 27., 28.5,
30.0],
[0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.,
11., 12., 13., 14., 15., 16., 17., 18., 19.,
20.]]]
else:
DMG_vals = [[[0., 25.], [0., 25.]],
[[0., 30.], [0., 20.]]]
DMG_vals = np.array(DMG_vals)
for story in [0, 1, 2]:
for dir_ in [0, 1]:
# print(story, dir_)
idx = pd.IndexSlice
DMG_check_FG = DMG_check.loc[:, idx[k + 1, :, :]]
DMG_check_PG = DMG_check_FG.iloc[:,
story * 2 * ndims + dir_ * ndims:story * 2 * ndims + (
dir_ + 1) * ndims]
DMG_val_test = np.unique(
np.around(DMG_check_PG.values * 10., decimals=0) / 10.,
return_counts=True)
DMG_val_test = DMG_val_test[0][DMG_val_test[1] > 10]
# only check at most the first 10 elements, because the
# higher values have extremely low likelihood
ddim = min(len(DMG_val_test), 10)
DMG_val_ref = DMG_vals[np.sign(k), dir_]
for v in DMG_val_test:
assert v in DMG_val_ref
# additional tests for mutually exclusive DS2 in FG3
if (k == 2) and (dep not in ['DS', 'IND']):
DMG_tot = [[0., 30.], [0., 20.]][dir_]
DMG_DS2_test = DMG_check_PG.iloc[:, [1, 2, 3]].sum(
axis=1)
# the proportion of each DS in DS2 shall follow the
# pre-assigned weights
ME_test = \
DMG_check_PG.iloc[DMG_DS2_test.values > 0].iloc[:,
[1, 2, 3]].describe().T['mean'].values / DMG_tot[-1]
assert_allclose(ME_test, [0.5, 0.3, 0.2], atol=0.01)
# the sum of DMG with correlated CSGs shall be either 0.
# or the total quantity
DMG_DS2_test = np.unique(
np.around(DMG_DS2_test * 10., decimals=0) / 10.,
return_counts=True)
DMG_DS2_test = DMG_DS2_test[0][DMG_DS2_test[1] > 10]
assert_allclose(DMG_DS2_test, DMG_tot, atol=0.01)
# additional tests for simultaneous DS2 in FG4
if (k == 3) and (dep not in ['DS', 'IND']):
DMG_tot = [30.0, 20.0][dir_]
DMG_DS2_test = DMG_check_PG.iloc[:, [1, 2, 3]].sum(
axis=1)
# the proportion of each DS in DS2 shall follow the
# pre-assigned weights considering replacement
SIM_test = \
DMG_check_PG.iloc[DMG_DS2_test.values > 0].iloc[:,
[1, 2, 3]].describe().T['mean'].values / DMG_tot
P_rep = 0.5 * 0.7 * 0.8
SIM_ref = np.array([0.5, 0.3, 0.2]) * (
1.0 + P_rep / (1.0 - P_rep))
assert_allclose(SIM_test, SIM_ref, atol=0.02)
# the sum of DMG with correlated CSGs shall be either
# 0. or more than the total quantity
DMG_DS2_test = DMG_DS2_test.iloc[
DMG_DS2_test.values > 0]
# Even with perfect correlation, the generated random
# samples will not be identical. Hence, one of the 20
# CSGs in FG4, very rarely will belong to a different
# DS than the rest. To avoid false negatives, we test
# the third smallest value.
assert DMG_DS2_test.sort_values().iloc[
2] >= DMG_tot * 0.99
assert np.max(DMG_DS2_test.values) > DMG_tot
# the first component has 3-1 CSGs in dir 1 and 2,
# respectively
if k == 0:
dir_len = int(rel_len * 3 / 4)
# the other components have 20-20 CSGs in dir 1 and 2,
# respectively
else:
dir_len = int(rel_len / 2)
if dir_ == 0:
theta_t = theta_test[:dir_len]
COV_t = COV_test[:dir_len, :dir_len]
else:
theta_t = theta_test[dir_len:]
COV_t = COV_test[dir_len:, dir_len:]
lim_ds1 = np.where(theta_t == lims[0])[0]
lim_ds2 = np.where(theta_t == lims[1])[0]
if k > 0:
lim_ds3 = np.where(theta_t == lims[2])[0]
ndim = len(theta_t)
EDP = EDP_list[int(k > 1), story, dir_]*1.2
DS_ref_all = []
DS_ref_any = []
DS_test_all = []
DS_test_any = []
# DS0
DS_ref_all.append(mvn_od(np.log(theta_t), COV_t,
lower=np.log(np.ones(ndim) * EDP),
upper=np.ones(ndim) * np.inf)[0])
if k == 0:
DS_test_all.append(
np.sum(np.all([DMG_check_PG.iloc[:, 0] == 0.,
DMG_check_PG.iloc[:, 1] == 0.],
axis=0)) / 10000.)
elif k == 1:
DS_test_all.append(
np.sum(np.all([DMG_check_PG.iloc[:, 0] == 0.,
DMG_check_PG.iloc[:, 1] == 0.,
DMG_check_PG.iloc[:, 2] == 0.],
axis=0)) / 10000.)
else:
DS_test_all.append(
np.sum(np.all([DMG_check_PG.iloc[:, 0] == 0.,
DMG_check_PG.iloc[:, 1] == 0.,
DMG_check_PG.iloc[:, 2] == 0.,
DMG_check_PG.iloc[:, 3] == 0.,
DMG_check_PG.iloc[:, 4] == 0.],
axis=0)) / 10000.)
# DS1
lower_lim = -np.ones(ndim) * np.inf
upper_lim = np.ones(ndim) * np.inf
lower_lim[lim_ds2] = np.log(EDP)
upper_lim[lim_ds1] = np.log(EDP)
if k > 0:
lower_lim[lim_ds3] = np.log(EDP)
DS_ref_all.append(mvn_od(np.log(theta_t), COV_t,
lower=lower_lim, upper=upper_lim)[
0])
lower_lim = -np.ones(ndim) * np.inf
upper_lim = np.ones(ndim) * np.inf
lower_lim[lim_ds2[0]] = np.log(EDP)
upper_lim[lim_ds1[0]] = np.log(EDP)
if k > 0:
lower_lim[lim_ds3[0]] = np.log(EDP)
P_any = mvn_od(np.log(theta_t), COV_t, lower=lower_lim,
upper=upper_lim)[0]
if (dep in ['DS', 'IND']):
P_any = 1.0 - (1.0 - P_any) ** len(lim_ds1)
DS_ref_any.append(P_any)
if k == 0:
DS_test_all.append(np.sum(np.all(
[DMG_check_PG.iloc[:, 0] > DMG_val_ref[-1] - 0.1,
DMG_check_PG.iloc[:, 1] == 0.], axis=0)) / 10000.)
elif k == 1:
DS_test_all.append(np.sum(np.all(
[DMG_check_PG.iloc[:, 0] > DMG_val_ref[-1] - 0.1,
DMG_check_PG.iloc[:, 1] == 0.,
DMG_check_PG.iloc[:, 2] == 0.], axis=0)) / 10000.)
else:
DS_test_all.append(np.sum(np.all(
[DMG_check_PG.iloc[:, 0] > DMG_val_ref[-1] - 0.1,
DMG_check_PG.iloc[:, 1] == 0.,
DMG_check_PG.iloc[:, 2] == 0.,
DMG_check_PG.iloc[:, 3] == 0.,
DMG_check_PG.iloc[:, 4] == 0.], axis=0)) / 10000.)
DS_test_any.append(np.sum(
np.all([DMG_check_PG.iloc[:, 0] > 0.],
axis=0)) / 10000.)
# DS2
lower_lim = -np.ones(ndim) * np.inf
upper_lim = np.ones(ndim) * np.inf
upper_lim[lim_ds2] = np.log(EDP)
if k > 0:
lower_lim[lim_ds3] = np.log(EDP)
if k < 3:
DS_ref_all.append(mvn_od(np.log(theta_t), COV_t,
lower=lower_lim,
upper=upper_lim)[0])
else:
DS_ref_all.append(0.0)
lower_lim = -np.ones(ndim) * np.inf
upper_lim = np.ones(ndim) * np.inf
upper_lim[lim_ds2[0]] = np.log(EDP)
if k > 0:
lower_lim[lim_ds3[0]] = np.log(EDP)
P_any = mvn_od(np.log(theta_t), COV_t, lower=lower_lim,
upper=upper_lim)[0]
if (dep in ['DS', 'IND']):
P_any = 1.0 - (1.0 - P_any) ** len(lim_ds1)
DS_ref_any.append(P_any)
if k == 0:
DS_test_all.append(
np.sum(np.all([DMG_check_PG.iloc[:, 0] == 0.,
DMG_check_PG.iloc[:, 1] >
DMG_val_ref[-1] - 0.1],
axis=0)) / 10000.)
elif k == 1:
DS_test_all.append(
np.sum(np.all([DMG_check_PG.iloc[:, 0] == 0.,
DMG_check_PG.iloc[:, 1] >
DMG_val_ref[-1] - 0.1,
DMG_check_PG.iloc[:, 2] == 0.],
axis=0)) / 10000.)
elif k == 2:
DS_test_all.append(
np.sum(np.all([DMG_check_PG.iloc[:, 0] == 0.,
DMG_check_PG.iloc[:, [1, 2, 3]].sum(
axis=1) > DMG_val_ref[-1] - 0.1,
DMG_check_PG.iloc[:, 4] == 0.],
axis=0)) / 10000.)
elif k == 3:
# skip this case
DS_test_all.append(0.0)
if k < 2:
DS_test_any.append(np.sum(
np.all([DMG_check_PG.iloc[:, 1] > 0.],
axis=0)) / 10000.)
else:
DS_test_any.append(np.sum(np.all(
[DMG_check_PG.iloc[:, [1, 2, 3]].sum(axis=1) > 0.],
axis=0)) / 10000.)
# DS3
if k > 0:
lower_lim = -np.ones(ndim) * np.inf
upper_lim = | np.ones(ndim) | numpy.ones |
#!/usr/bin/env python2
"""
Subroutine for applying FRI-type compression to the
Near-Uniform distribution.
"""
import numpy
import compress_utils
import near_uniform
import fci_c_utils
def cmp_hier_strat(sol_vec, n_sample, p_doub, occ_orb,
orb_symm, symm_lookup, hf_num, rngen_ptrs):
"""Perform FRI-type compression on the Near-Uniform distribution,
column-by-column, preserving colummns exactly as determined by
number of samples vs. number of nonzero elements.
Parameters
----------
sol_vec : (SparseVector object)
the current solution vector
n_sample : (unsigned int)
the desired number of nonzero matrix elements in each column after
compression
p_doub : (double)
the probability of choosing a double excitation vs a single excitation
occ_orb : (numpy.ndarray, uint8)
The numbers in each row correspond to the indices of occupied
orbitals in each determinant, calculated from fci_c_utils.gen_orb_lists
orb_symm : (numpy.ndarray, uint8)
irreducible representation of each spatial orbital
symm_lookup : (numpy.ndarray, uint8)
Table of orbitals with each type of symmetry, as generated
by fci_utils.gen_byte_table()
Returns
-------
(numpy.ndarray, uint8) :
chosen occupied (0th and 1st columns) and unoccupied (2nd and 3rd
columns) orbitals for double excitations
(numpy.ndarray, float64) :
probability of selecting each chosen double excitation (weight
divided by compressed weight)
(numpy.ndarray, uint32) :
index of the origin determinant of each chosen double excitation
in the dets array
(numpy.ndarray, uint8) :
chosen occupied (0th column) and unoccupied (1st column)
orbitals for single excitations
(numpy.ndarray, float64) :
probability of selecting each chosen single excitation
(numpy.ndarray, uint32) :
index of the origin determinant of each chosen single excitation
in the dets array
"""
vec_weights = numpy.abs(sol_vec.values)
one_norm = vec_weights.sum()
kept_sing_orb = numpy.empty([0, 2], dtype=numpy.uint8)
kept_doub_orb = | numpy.empty([0, 4], dtype=numpy.uint8) | numpy.empty |
import pytest
from avalanche_rl.training.strategies.buffers import ReplayMemory, Step, Rollout
import numpy as np
import torch
from itertools import product
def unsqueeze(a, n=1):
a = [a for _ in range(n)]
if type(a[0]) is np.ndarray:
# return a[np.newaxis, ...]
return | np.stack(a) | numpy.stack |
import json
import numpy as np
from scipy.spatial.distance import cdist
import os
try:
import cPickle as pickle
except ImportError:
import pickle
import torch
import torch.nn.functional as F
import cv2
import argparse
name2id = {}
results = []
def morpho(mask, iter, bigger=True):
# return mask
mask = mask * 255
mask = mask.astype(np.uint8)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
# print(kernel)
if bigger:
mask = cv2.dilate(mask, kernel, iterations=iter)
else:
mask = cv2.erode(mask, kernel, iterations=iter)
return mask / 255
def TPS(P1, P2, _lambda=1e-3, width=768, height=1024, calc_new_pos=False):
def radius_basis(r):
epsilon = 1e-14
return r ** 2 * np.log(r ** 2 + epsilon)
def homogenization(P):
point_num = P.shape[0]
P_homo = np.ones((point_num, 3))
P_homo[:, 1:3] = P
return P_homo
point_num = P1.shape[0]
K = radius_basis(cdist(P1, P1)) + _lambda * np.eye(point_num)
L = np.zeros((point_num + 3, point_num + 3))
L[:point_num, :point_num] = K
L[:point_num, point_num:point_num + 3] = homogenization(P1)
L[point_num:point_num + 3, :point_num] = homogenization(P1).T
# target value, calculate in turn
v_x = np.zeros(point_num + 3)
v_y = np.zeros(point_num + 3)
v_x[:point_num] = P2[:, 0]
v_y[:point_num] = P2[:, 1]
w_x = np.linalg.solve(L, v_x)
a_x = w_x[point_num:]
w_x = w_x[:point_num]
w_y = np.linalg.solve(L, v_y)
a_y = w_y[point_num:]
w_y = w_y[:point_num]
if calc_new_pos:
points = np.zeros((width * height, 2))
for i in range(width):
points[i * height:(i + 1) * height, 0] = np.ones(height) * i / width
points[i * height:(i + 1) * height, 1] = np.arange(height) / height
h_points = homogenization(points)
new_x = np.matmul(h_points, a_x) + np.matmul(w_x.T, radius_basis(cdist(P1, points)))
new_y = np.matmul(h_points, a_y) + np.matmul(w_y.T, radius_basis(cdist(P1, points)))
new_x = new_x.reshape(width, height).T
new_y = new_y.reshape(width, height).T
new_x = np.stack((new_x, new_y), axis=2)
return None, new_x if calc_new_pos else None
def normalize(p, w, h):
return p / np.array([w, h]).astype(np.float32)
def load_name_to_memory(keypoint_path):
global results, name2id, x
with open(keypoint_path, 'r') as f:
results += json.load(f)
for i in range(len(results)):
result = results[i]
name2id[result['image_id'].split('/')[-1]] = i
print(name2id)
def load_keypoints(source_keypoint_path='', target_keypoint_path='',
w=100, h=100, source_name='', target_name=''):
# print(source_keypoint_path, target_keypoint_path)
if len(name2id) == 0:
load_name_to_memory(keypoint_path=source_keypoint_path)
load_name_to_memory(keypoint_path=target_keypoint_path)
source_id = name2id[source_name]
target_id = name2id[target_name]
raw_source_keypoint = np.array(results[source_id]['keypoints'], dtype=np.float32).reshape((-1, 3))[:25, :2]
source_keypoint = normalize(raw_source_keypoint, w, h)
raw_target_keypoint = np.array(results[target_id]['keypoints'], dtype=np.float32).reshape((-1, 3))[:25, :2]
target_keypoint = normalize(raw_target_keypoint, w, h)
return source_keypoint, target_keypoint, raw_source_keypoint, raw_target_keypoint
def get_midpoint(point1, point2, x_val):
slope = (point2[1] - point1[1]) / (point2[0] - point1[0])
bias = point1[1] - slope * point1[0]
y_val = x_val * slope + bias
return np.array([x_val, y_val]).reshape(1, 2)
def get_slanted_x(point1, point2, shoulder, const=0.7):
delta = point2 - point1
if delta[1] == 0 or delta[0] == 0:
return point2[0]
tan_theta = delta[0] / delta[1]
return point2[0] + tan_theta * shoulder * const
def get_align_keypoint(keypoint, is_source=True):
if is_source:
for i in range(11, 15):
keypoint[i, 1] = (keypoint[i, 1] + keypoint[30 - i, 1]) / 2
keypoint[30 - i, 1] = keypoint[i, 1]
else:
point1 = get_midpoint(keypoint[14, :], keypoint[16, :], keypoint[11, 0])
point3 = get_midpoint(keypoint[14, :], keypoint[16, :], keypoint[19, 0])
keypoint[14, :] = point1
keypoint[16, :] = point3
point1 = get_midpoint(keypoint[13, :], keypoint[17, :], keypoint[11, 0])
point3 = get_midpoint(keypoint[13, :], keypoint[17, :], keypoint[19, 0])
keypoint[13, :] = point1
keypoint[17, :] = point3
x = get_slanted_x(keypoint[0, :], keypoint[3, :], keypoint[5, 0] - keypoint[1, 0])
point1 = get_midpoint(keypoint[13, :], keypoint[17, :], x)
point2 = get_midpoint(keypoint[14, :], keypoint[16, :], x)
point3 = get_midpoint(keypoint[13, :], keypoint[17, :], keypoint[3, 0])
point4 = get_midpoint(keypoint[14, :], keypoint[16, :], keypoint[3, 0])
# x = get_slanted_x(keypoint[0, :], keypoint[3, :], keypoint[5, 0] - keypoint[1, 0], const=0.9)
# point5 = get_midpoint(keypoint[12, :], keypoint[18, :], x)
# point6 = get_midpoint(keypoint[12, :], keypoint[18, :], keypoint[3, 0])
align_keypoint = point2
for i in [2, 4, 6, 11, 12, 13, 14, 16, 17, 18, 19, 24, 3, 0]:
align_keypoint = np.concatenate((align_keypoint, keypoint[i:i + 1, :]), axis=0)
align_keypoint = np.concatenate((align_keypoint, point4), axis=0)
return keypoint, align_keypoint
cnt = 0
def visualize(keypoint, img_path='', output_root='./visualize_landmark', prefix='black'):
if not os.path.exists(output_root):
os.mkdir(output_root)
global cnt
cnt += 1
img = cv2.imread(img_path)
for i in range(keypoint.shape[0]):
cv2.circle(img, (int(keypoint[i, 0]), int(keypoint[i, 1])), 4, [255, 0, 170], thickness=-1)
cv2.imwrite(os.path.join(output_root, f'{prefix}_{cnt}.jpg'), img)
def H_cosine(cloth, logo, base, name=''):
cv2.imwrite(f'./cloth{name}.jpg', cloth)
cv2.imwrite(f'./logo_{name}.jpg', logo)
cloth_hsv = cv2.cvtColor(cloth, cv2.COLOR_BGR2HSV)
logo_hsv = cv2.cvtColor(logo, cv2.COLOR_BGR2HSV)
base_hsv = cv2.cvtColor(base, cv2.COLOR_BGR2HSV)
cloth_h_rad = cloth_hsv[:, :, 0] / 255 * np.pi * 2
logo_h_rad = logo_hsv[:, :, 0] / 255 * np.pi * 2
base_h_rad = base_hsv[:, :, 0] / 255 * np.pi * 2
return np.arccos(np.cos(cloth_h_rad - base_h_rad)), np.arccos(np.cos(logo_h_rad - base_h_rad))
def HS_cosine(cloth_hsv, logo_hsv, base_hsv, dim=0, name=''):
if dim == 0:
cloth_h_rad = cloth_hsv[:, :, dim] / 255 * np.pi * 2
logo_h_rad = logo_hsv[:, :, dim] / 255 * np.pi * 2
base_h_rad = base_hsv[:, :, dim] / 255 * np.pi * 2
return np.arccos(np.cos(cloth_h_rad - base_h_rad)), np.arccos(np.cos(logo_h_rad - base_h_rad))
print('base_hsv', base_hsv)
return np.abs(cloth_hsv[:, :, dim].astype(int) - base_hsv[:, :, dim].astype(int)) / 255, np.abs(logo_hsv[:, :, dim].astype(int) - base_hsv[:, :, dim].astype(int)) / 255
def standardization(base, arr, mask):
x_arr, y_arr, _ = | np.nonzero(mask) | numpy.nonzero |
import numpy as np
import torch
import json
from improved_normal_inference.help_funs.file_io import writePLY, load_8bitImage, load_24bitNormal, load_scaled16bitImage, \
get_file_name, get_output_file_name
from improved_normal_inference.help_funs.mu import lightVisualization, cameraVisualization, depth2vertex
from improved_normal_inference import config
def generate_ply(file_idx, normal, data_path, param=0):
image_file, ply_file, json_file, depth_file, normal_file = get_file_name(file_idx, data_path)
f = open(json_file)
data = json.load(f)
f.close()
data['R'] = np.identity(3)
data['t'] = np.zeros(3)
image = load_8bitImage(image_file)
depth = load_scaled16bitImage(depth_file, data['minDepth'], data['maxDepth'])
vertex = depth2vertex(torch.tensor(depth).permute(2, 0, 1),
torch.tensor(data['K']),
torch.tensor(data['R']).float(),
torch.tensor(data['t']).float())
mask = np.sum(np.abs(normal), axis=2) != 0
cameraPoints = cameraVisualization()
for i in range(len(cameraPoints)):
cameraPoints[i] = np.array(data['R']).transpose() @ cameraPoints[i] / 4 - np.array(
data['R']).transpose() @ | np.array(data['t']) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 31 12:15:28 2019
@author: RickFu
https://github.com/rickfu415/heatConduction
https://github.com/NelisW/heatConduction
see also Amar2006:
https://repository.lib.ncsu.edu/bitstream/handle/1840.16/2847/etd.pdf
http://web.engr.uky.edu/~acfd/egr537-lctrs.pdf
"""
# try:
# import cupy as np
# except ImportError:
import numpy as np
import pandas as pd
import parameter
import utility
import time
##########################################################################
def initialize(para):
""" Initialize key data
done only once at the start of solve
T: current step temperature
T0: last step temperature
TProfile: temperature results in time and space
F: B as right hand side of Ax = B
Jacobian: A as left had side of Ax = B
Return: a dictionary
"""
numberOfNode = int(para['numberOfNode'])
numOfTimeStep = int(para['numberOfTimeStep'])
Tic = para['Initial value']
T = np.full((numberOfNode, 1), Tic)
T0 = np.full((numberOfNode, 1), Tic)
TProfile = np.zeros((numberOfNode, numOfTimeStep + 1))
F = np.zeros((numberOfNode, 1))
Jacobian = | np.zeros((numberOfNode, numberOfNode)) | numpy.zeros |
"""Prepare data for training, validation and testing."""
import os
import fnmatch
import extract_data
from numpy import array, concatenate, mean, split
from keras.utils import to_categorical
def create_samples(time_series, n_steps):
"""
Split a time series into samples of size n_steps.
Example :
time_series = [1, 2, 3, 4]
n_steps = 2
create_samples(time_series, n_steps) = [ [1, 2], [2, 3], [3, 4] ]
"""
# Split a univariable sequence into samples
X = list()
n = len(time_series)
for i in range(n):
# Find the end of this pattern
end_ix = i + n_steps
# Check if we are beyond the sequence
if end_ix > n - 1:
break
# Gather input and output parts of the pattern
X.append(time_series[i:end_ix])
return | array(X, dtype="uint16") | numpy.array |
#!/usr/bin/env python
import numpy as np
from scipy.spatial import Delaunay
from . import pg_utilities
from . import imports_and_exports
"""
.. module:: generate_shapes
:synopsis: Contains code to generate placental shapes for generic placental models.
:synopsis:Contains code to generate placental shapes for generic placental models \n
(i.e. from literature measures without specific data from an individual
"""
def equispaced_data_in_ellipsoid(n, volume, thickness, ellipticity):
"""
:Function name: **equispaced_data_in_ellipsoid**
Generates equally spaced data points in an ellipsoid.
:inputs:
- n: Number of data points which we aim to generate
- volume: Volume of ellipsoid
- thickness: Placental thickness (z-dimension)
- ellipticity: Ratio of y to x axis dimensions
return:
- Edata: A nx3 array of datapoints, with each point being defined by its x-,y-, and z- coordinates
A way you might want to use me is:
>>> n = 100
>>> volume = 10
>>> thickness = 3
>>> ellipticity = 1.1
>>> equispaced_data_in_ellipsoid(n, volume, thickness, ellipticity)
This will return 100 data points in an ellipse with z-axis thickness 3, volume 10, and with the y-axis dimension 1.1 times the x-axis dimension.
"""
data_spacing = (volume / n) ** (1.0 / 3.0)
print('Generating data ' + str(data_spacing) + ' apart')
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
# Aiming to generate seed points that fill a cuboid encompasing the placental volume then remove seed points that
# are external to the ellipsoid
num_data = 0 # zero the total number of data points
# Calculate the number of points that should lie in each dimension in a cube
nd_x = np.floor(2.0 * (x_radius + data_spacing) / data_spacing)
nd_y = np.floor(2.0 * (y_radius + data_spacing) / data_spacing)
nd_z = np.floor(2.0 * (z_radius + data_spacing) / data_spacing)
nd_x = int(nd_x)
nd_y = int(nd_y)
nd_z = int(nd_z)
# Set up edge node coordinates
x_coord = np.linspace(-x_radius - data_spacing / 2.0, x_radius + data_spacing / 2.0, nd_x)
y_coord = np.linspace(-y_radius - data_spacing / 2.0, y_radius + data_spacing / 2.0, nd_y)
z_coord = np.linspace(-z_radius - data_spacing / 2.0, z_radius + data_spacing / 2.0, nd_z)
# Use these vectors to form a unifromly spaced grid
data_coords = np.vstack(np.meshgrid(x_coord, y_coord, z_coord)).reshape(3, -1).T
# Store nodes that lie within ellipsoid
datapoints = np.zeros((nd_x * nd_y * nd_z, 3))
for i in range(len(data_coords)): # Loop through grid
coord_check = pg_utilities.check_in_ellipsoid(data_coords[i][0], data_coords[i][1], data_coords[i][2], x_radius,
y_radius, z_radius)
if coord_check is True: # Has to be strictly in the ellipsoid
datapoints[num_data, :] = data_coords[i, :] # add to data array
num_data = num_data + 1
datapoints.resize(num_data, 3,refcheck=False) # resize data array to correct size
print('Data points within ellipsoid allocated. Total = ' + str(len(datapoints)))
return datapoints
def uniform_data_on_ellipsoid(n, volume, thickness, ellipticity, random_seed):
"""
:Function name: **uniform_data_on_ellipsoid**
Generates equally spaced data points on the positive z-surface of an ellipsoid
:inputs:
- n: number of data points which we aim to generate
- volume: volume of ellipsoid
- thickness: placental thickness (z-dimension)
- ellipticity: ratio of y to x axis dimensions
:return:
- chorion_data: A nx3 array of datapoints, with each point being defined by its x-,y-, and z- coordinates
A way you might want to use me is:
>>> n = 100
>>> volume = 10
>>> thickness = 3
>>> ellipticity = 1.1
>>> equispaced_data_on_ellipsoid(n, volume, thickness, ellipticity)
This will return 100 data points on the positive z-surface ellipse with z-axis thickness 3, volume 10,
and with the y-axis dimension 1.1 times the x-axis dimension.
"""
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
area_estimate = np.pi * x_radius * y_radius
data_spacing = 0.85 * np.sqrt(area_estimate / n)
chorion_data = np.zeros((n, 3))
np.random.seed(random_seed)
generated_seed = 0
acceptable_attempts = n * 1000 # try not to have too many failures
attempts = 0
while generated_seed < n and attempts < acceptable_attempts:
# generate random x-y coordinates between negative and positive radii
new_x = np.random.uniform(-x_radius, x_radius)
new_y = np.random.uniform(-y_radius, y_radius)
# check if new coordinate is on the ellipse
if ((new_x / x_radius) ** 2 + (new_y / y_radius) ** 2) < 1: # on the surface
if generated_seed == 0:
generated_seed = generated_seed + 1
new_z = pg_utilities.z_from_xy(new_x, new_y, x_radius, y_radius, z_radius)
chorion_data[generated_seed - 1][:] = [new_x, new_y, new_z]
else:
reject = False
for j in range(0, generated_seed + 1):
distance = (chorion_data[j - 1][0] - new_x) ** 2 + (chorion_data[j - 1][1] - new_y) ** 2
distance = np.sqrt(distance)
if distance <= data_spacing:
reject = True
break
if reject is False:
generated_seed = generated_seed + 1
new_z = pg_utilities.z_from_xy(new_x, new_y, x_radius, y_radius, z_radius)
chorion_data[generated_seed - 1][:] = [new_x, new_y, new_z]
attempts = attempts + 1
chorion_data.resize(generated_seed, 3) # resize data array to correct size
print('Data points on ellipsoid allocated. Total = ' + str(len(chorion_data)) )
return chorion_data
def gen_rect_cover_ellipsoid(volume, thickness, ellipticity, x_spacing, y_spacing, z_spacing):
# Generates equally spaced data nodes and elements and constructs a rectangular 'mesh' that covers the space that is
# made up of an ellipsoidal placenta
# volume=volume of ellipsoid
# thickness = placental thickness (z-dimension)
# ellipticity = ratio of y to x axis dimensions
# X,Y,Z spacing is the number of elements required in each of the x, y z directions
# Calculate the dimensions of the ellipsoid
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
# z height of ellipsoid is 2* zradius
# We want number of nodes to cover height and have prescribed spaing
nnod_x = int(np.ceil(x_radius * 2.0 / x_spacing)) + 1
x_width = x_spacing * (nnod_x - 1)
nnod_y = int(np.ceil(y_radius * 2.0 / y_spacing)) + 1
y_width = y_spacing * (nnod_y - 1)
nnod_z = int(np.ceil(z_radius * 2.0 / z_spacing)) + 1
z_width = z_spacing * (nnod_z - 1)
node_loc = gen_rectangular_node(x_width, y_width, z_width, nnod_x, nnod_y, nnod_z)
# Generating the element connectivity of each cube element, 8 nodes for each 3D cube element
elems = cube_mesh_connectivity(nnod_x, nnod_y, nnod_z)
return {'nodes': node_loc, 'elems': elems, 'total_nodes': nnod_x * nnod_y * nnod_z,
'total_elems': (nnod_x - 1) * (nnod_y - 1) * (nnod_z - 1)}
def gen_ellip_mesh_tet(volume, thickness, ellipticity, n):
""" Generates ellipsoid tetrahedral mesh for 3D problems
Inputs:
- volume: volume of placental ellipsoid
- thickness: placental thickness (z-dimension)
- ellipticity: ratio of y to x axis dimensions
- n: number of datapoints generated to create the mesh
Returns:
- nodes: nodes location of mesh
- elems: element connectivity of mesh (tetrahedral element)
- node_array: array of nodes
- element_array: array of elements
"""
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
nodeSpacing = (n / (2 * x_radius * 2 * y_radius * 2 * z_radius)) ** (1. / 3)
nnod_x = 2 * x_radius * nodeSpacing
nnod_y = 2 * y_radius * nodeSpacing
nnod_z = 2 * z_radius * nodeSpacing
nodes = gen_rectangular_node(x_radius * 2, y_radius * 2, z_radius * 2, nnod_x, nnod_y, nnod_z)
# nodes inside the ellipsoid
ellipsoid_node = np.zeros((len(nodes), 3))
count = 0
for nnode in range(0, len(nodes)):
coord_point = nodes[nnode][0:3]
inside = pg_utilities.check_in_on_ellipsoid(coord_point[0], coord_point[1], coord_point[2], x_radius, y_radius,
z_radius)
if inside:
ellipsoid_node[count, :] = coord_point[:]
count = count + 1
ellipsoid_node.resize(count, 3,refcheck=False)
xyList = ellipsoid_node[:, [0, 1]]
xyListUnique = np.vstack({tuple(row) for row in xyList})
# looking for z_coordinate of surface nodes
for xyColumn in xyListUnique:
xyNodes = np.where(np.all(xyList == xyColumn, axis=1))[0]
if len(xyNodes) > 1:
x_coord = ellipsoid_node[xyNodes[0], 0]
y_coord = ellipsoid_node[xyNodes[0], 1]
ellipsoid_node[xyNodes[len(xyNodes) - 1], 2] = pg_utilities.z_from_xy(x_coord, y_coord, x_radius, y_radius,
z_radius)
ellipsoid_node[xyNodes[0], 2] = -1 * (
pg_utilities.z_from_xy(x_coord, y_coord, x_radius, y_radius, z_radius))
# generate tetrahedral mesh
pyMesh = Delaunay(ellipsoid_node)
# Build arrays to pass into openCMISS conversion:
node_loc = pyMesh.points
temp_elems = pyMesh.simplices
# CHECK ELEMENTS FOR 0 VOLUME:
min_vol = 0.00001
index = 0
indexArr = []
for element in temp_elems:
x_coor = []
y_coor = []
z_coor = []
for node in element:
x_coor.append(node_loc[node][0])
y_coor.append(node_loc[node][1])
z_coor.append(node_loc[node][2])
vmat = np.vstack((x_coor, y_coor, z_coor, [1.0, 1.0, 1.0, 1.0])) # matrix of coor of element
elem_volume = (1 / 6.0) * abs(np.linalg.det(vmat)) # volume of each tetrahedral element
# if volume is not zero
if elem_volume > min_vol:
indexArr.append(index)
index = index + 1
# update arrays without 0 volume elements, to pass into openCMISS
elems = temp_elems[indexArr, :]
for i in range(len(elems)):
elems[i] = [x + 1 for x in elems[i]]
element_array = range(1, len(elems) + 1)
node_array = range(1, len(node_loc) + 1)
return {'nodes': node_loc, 'elems': elems, 'element_array': element_array, 'node_array': node_array,
'nodeSpacing': nodeSpacing}
def gen_rectangular_node(x_width, y_width, z_width, nnod_x, nnod_y, nnod_z):
# Create linspaces for x y and z coordinates
x = np.linspace(-x_width / 2.0, x_width / 2.0, int(nnod_x)) # linspace for x axis
y = np.linspace(-y_width / 2.0, y_width / 2.0, int(nnod_y)) # linspace for y axis
z = np.linspace(-z_width / 2.0, z_width / 2.0, int(nnod_z)) # linspace for z axis
node_loc_temp = np.vstack(np.meshgrid(y, z, x)).reshape(3, -1).T # generate nodes for rectangular mesh
node_loc = np.zeros((len(node_loc_temp), 3))
for i in range(0, len(node_loc)):
node_loc[i][0] = node_loc_temp[i][2]
node_loc[i][1] = node_loc_temp[i][0]
node_loc[i][2] = node_loc_temp[i][1]
return node_loc
def gen_rectangular_mesh2(nel_x, nel_y, nel_z, xdim, ydim, zdim, element_type):
# generates a rectangular mesh of defined dimenions using either linear or quadratic elements
if element_type == 1: # linear element
nnod_x = int(nel_x + 1)
nnod_y = int(nel_y + 1)
nnod_z = int(nel_z + 1)
elif element_type == 2: # quadratic element
nnod_x = int((nel_x * 2) + 1)
nnod_y = int((nel_y * 2) + 1)
nnod_z = int((nel_z * 2) + 1)
node = gen_rectangular_node(xdim, ydim, zdim, nnod_x, nnod_y, nnod_z) # getting nodes
if element_type == 1: # linear element
elems = cube_mesh_connectivity(nnod_x, nnod_y, nnod_z) # getting elem connectivity
elif element_type == 2: # quadratic element
elems = cube_mesh_connectivity_quadratic(nel_x, nel_y, nel_z, nnod_x, nnod_y,
nnod_z) # getting element connectivity
element_array = range(1, len(elems) + 1)
node_array = range(1, len(node) + 1)
if element_type == 2:
surfacenodes = identify_surface_node_quad(nel_x, nel_y, nel_z)
else:
print("This element type has no implemented surface node definition")
surfacenodes = 0
return {'nodes': node, 'elems': elems, 'element_array': element_array,
'node_array': node_array, 'surface_nodes': surfacenodes}
def gen_3d_ellipsoid(nel_x, nel_y, nel_z, volume, thickness, ellipticity, element_type):
""" Generates ellipsoid placental mesh to solve 3D problems (note this is not a quality structured mesh)
Inputs:
- nel: number of element in x,y,z axis , the more nel, the rounder the mesh
- volume: volume of placental ellipsoid
- thickness: placental thickness (z-dimension)
- ellipticity: ratio of y to x axis dimensions
Returns:
- placental_node_coor: nodes location of mesh
- placental_el_con: element connectivity of mesh (tetrahedral element)
- node_array: array of nodes
- element_array: array of elements
"""
# creating cube between -1 and 1 with n number of element
# cubelength=2
if element_type == 1: # linear element
nnod_x = int(nel_x + 1)
nnod_y = int(nel_y + 1)
nnod_z = int(nel_z + 1)
elif element_type == 2: # quadratic element
nnod_x = int((nel_x * 2) + 1)
nnod_y = int((nel_y * 2) + 1)
nnod_z = int((nel_z * 2) + 1)
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
cube_node = gen_rectangular_node(2 * x_radius, 2 * y_radius, 2 * z_radius, nnod_x, nnod_y, nnod_z)
if element_type == 1: # linear element
cube_elems = cube_mesh_connectivity(nnod_x, nnod_y, nnod_z) # getting elem connectivity
elif element_type == 2: # quadratic element
cube_elems = cube_mesh_connectivity_quadratic(nel_x, nel_y, nel_z, nnod_x, nnod_y,
nnod_z) # getting element connectivity
ellipsoid_coor = np.zeros((len(cube_node), 3))
for ii in range(0, len(cube_node)):
ellipsoid_coor[ii, 0] = cube_node[ii, 0] * np.sqrt(1.0 - cube_node[ii, 1] ** 2 / (2.0 * y_radius ** 2) -
cube_node[ii, 2] ** 2 / (2.0 * z_radius ** 2) + cube_node[
ii, 1] ** 2 *
cube_node[ii, 2] ** 2 / (
3.0 * y_radius ** 2 * z_radius ** 2)) # for x_coor
ellipsoid_coor[ii, 1] = cube_node[ii, 1] * np.sqrt(1.0 - cube_node[ii, 0] ** 2 / (2.0 * x_radius ** 2) -
cube_node[ii, 2] ** 2 / (2.0 * z_radius ** 2) + cube_node[
ii, 0] ** 2 * cube_node[ii, 2] ** 2
/ (3.0 * x_radius ** 2 * z_radius ** 2)) # for y_coor
ellipsoid_coor[ii, 2] = cube_node[ii, 2] * np.sqrt(1.0 - cube_node[ii, 1] ** 2 / (2.0 * y_radius ** 2) -
cube_node[ii, 0] ** 2 / (2.0 * x_radius ** 2) + cube_node[
ii, 1] ** 2 * cube_node[ii, 0] ** 2
/ (3.0 * y_radius ** 2 * x_radius ** 2)) # for z_coor
element_array = range(1, len(cube_elems) + 1)
node_array = range(1, len(ellipsoid_coor) + 1)
if element_type == 2:
surfacenodes = identify_surface_node_quad(nel_x, nel_y, nel_z)
else:
print("This element type has no implemented surface node definition")
surfacenodes = 0
return {'placental_node_coor': ellipsoid_coor, 'placental_el_con': cube_elems, 'element_array': element_array,
'node_array': node_array, 'surface_nodes': surfacenodes}
def cube_mesh_connectivity(nnod_x, nnod_y, nnod_z):
"""Generates element connectivity in cube mesh
Inputs:
- nnod_x:number of node in x axis
- nnod_y:number of node in y axis
- nnod_z:number of node in z axis
Outputs:
- elems: array of element connectivity
"""
num_elems = (nnod_x - 1) * (nnod_y - 1) * (nnod_z - 1)
elems = np.zeros((num_elems, 9),
dtype=int) # this stores first element number and then the nodes of each mesh element
element_number = 0
ne = 0
# loop through elements
for k in range(1, nnod_z):
for j in range(1, nnod_y):
for i in range(1, nnod_x):
elems[ne][0] = ne # store element number
elems[ne][1] = (i - 1) + (nnod_x) * (j - 1) + nnod_x * nnod_y * (k - 1) # lowest coordinates
elems[ne][2] = elems[ne][1] + 1 # add one in x
elems[ne][3] = elems[ne][1] + nnod_x # go through x and find first in y
elems[ne][4] = elems[ne][3] + 1 # add one in y
elems[ne][5] = elems[ne][1] + nnod_x * nnod_y # same as 1 -4 but at higher z -coord
elems[ne][6] = elems[ne][2] + nnod_x * nnod_y
elems[ne][7] = elems[ne][3] + nnod_x * nnod_y
elems[ne][8] = elems[ne][4] + nnod_x * nnod_y
ne = ne + 1
return elems
def cube_mesh_connectivity_quadratic(nel_x, nel_y, nel_z, nnod_x, nnod_y, nnod_z):
"""Generates element connectivity in quadratic cube mesh
Inputs:
- nnod_x:number of node in x axis
- nnod_y:number of node in y axis
- nnod_z:number of node in z axis
Outputs:
- elems: array of element connectivity in quadratic
"""
num_elems = nel_x * nel_y * nel_z
elems = np.zeros((num_elems, 28), dtype=int)
element_number = 0
ne = 0
# Got the element
for k in range(1, nnod_z, 2):
for j in range(1, nnod_y, 2):
for i in range(1, nnod_x, 2):
# 1st layer
elems[ne][0] = ne
elems[ne][1] = (i - 1) + (nnod_x) * (j - 1) + nnod_x * nnod_y * (k - 1) # 1st node
elems[ne][2] = (i - 1) + (nnod_x) * (j - 1) + nnod_x * nnod_y * (k - 1) + 1 # right subsequent node
elems[ne][3] = (i - 1) + (nnod_x) * (j - 1) + nnod_x * nnod_y * (k - 1) + 2 # right subsequent node
elems[ne][4] = elems[ne][1] + nnod_x # 1st node in another y layer
elems[ne][5] = elems[ne][1] + nnod_x + 1 # right subsequent node
elems[ne][6] = elems[ne][1] + nnod_x + 2 # right subsequent node
elems[ne][7] = elems[ne][1] + 2 * (nnod_x) # 1st node in another y layer
elems[ne][8] = elems[ne][1] + 2 * (nnod_x) + 1 # right subsequent node
elems[ne][9] = elems[ne][1] + 2 * (nnod_x) + 2 # right subsequent node
# 2nd layer
elems[ne][10] = elems[ne][1] + nnod_x * nnod_y # same in one z layer
elems[ne][11] = elems[ne][2] + nnod_x * nnod_y
elems[ne][12] = elems[ne][3] + nnod_x * nnod_y
elems[ne][13] = elems[ne][4] + nnod_x * nnod_y
elems[ne][14] = elems[ne][5] + nnod_x * nnod_y
elems[ne][15] = elems[ne][6] + nnod_x * nnod_y
elems[ne][16] = elems[ne][7] + nnod_x * nnod_y
elems[ne][17] = elems[ne][8] + nnod_x * nnod_y
elems[ne][18] = elems[ne][9] + nnod_x * nnod_y
# thrid layer
elems[ne][19] = elems[ne][1] + nnod_x * nnod_y * 2 # same in another z layer
elems[ne][20] = elems[ne][2] + nnod_x * nnod_y * 2
elems[ne][21] = elems[ne][3] + nnod_x * nnod_y * 2
elems[ne][22] = elems[ne][4] + nnod_x * nnod_y * 2
elems[ne][23] = elems[ne][5] + nnod_x * nnod_y * 2
elems[ne][24] = elems[ne][6] + nnod_x * nnod_y * 2
elems[ne][25] = elems[ne][7] + nnod_x * nnod_y * 2
elems[ne][26] = elems[ne][8] + nnod_x * nnod_y * 2
elems[ne][27] = elems[ne][9] + nnod_x * nnod_y * 2
ne = ne + 1
return elems
def identify_surface_node_quad(nel_x, nel_y, nel_z):
"""Generates collection of nodes that are on the surface of in quadratic placental mesh
Inputs:
- nel_x:number of elem in x axis
- nel_y:number of elem in y axis
- nel_z:number of elem in z axis
Outputs:
- surfacenode: collection of nodes on the surface of placental mesh
"""
nnod_x = int((nel_x * 2) + 1) # number of nodes in x axis
nnod_y = int((nel_y * 2) + 1) # number of nodes in y axis
nnod_z = int((nel_z * 2) + 1) # number of nodes in z axis
# For left and right surface
sIEN = np.zeros((9, nel_y * nel_z), dtype=int) # to store surface indiviaul element nodes (sIEN)
e = 0
for k in range(1, nnod_x * nnod_y * (nnod_z - 1), (nnod_x * nnod_y) * 2): # go up
for j in range(1, nnod_x * (nnod_y - 1), 2 * nnod_x): # go left
sIEN[0, e] = j + (k - 1) # 1st node
sIEN[1, e] = sIEN[0, e] + (nnod_x) * (nnod_y) # 2nd node
sIEN[2, e] = sIEN[1, e] + (nnod_x) * (nnod_y) # 3rd node
sIEN[3, e] = sIEN[0, e] + nnod_x # 4th node
sIEN[4, e] = sIEN[1, e] + nnod_x # 5th node
sIEN[5, e] = sIEN[2, e] + nnod_x # 6th node
sIEN[6, e] = sIEN[3, e] + nnod_x # 7th node
sIEN[7, e] = sIEN[4, e] + nnod_x # 8th node
sIEN[8, e] = sIEN[5, e] + nnod_x # 9th node
e = e + 1
left = np.unique(sIEN) # collection of nodes of left surface
right = np.unique(sIEN.T + (nnod_x - 1)) # collection of nodes on right surface
# For front and back surface
sIEN = np.zeros((9, nel_x * nel_z), dtype=int)
e = 0
for k in range(1, nnod_x * nnod_y * (nnod_z - 2), (nnod_x * nnod_y) * 2): # go up
for i in range(1, nnod_x - 1, 2): # go right
sIEN[0, e] = i + (k - 1)
sIEN[1, e] = sIEN[0, e] + 1
sIEN[2, e] = sIEN[0, e] + 2
sIEN[3, e] = sIEN[0, e] + (nnod_x * nnod_y)
sIEN[4, e] = sIEN[3, e] + 1
sIEN[5, e] = sIEN[3, e] + 2
sIEN[6, e] = sIEN[3, e] + (nnod_x * nnod_y)
sIEN[7, e] = sIEN[6, e] + 1
sIEN[8, e] = sIEN[6, e] + 2
e = e + 1
front = np.unique(sIEN) # collection of nodes on front surface
back = np.unique(sIEN.T + (nnod_x * (nnod_y - 1))) # collection of nodes on back surface
# For top and bottom surface
sIEN = np.zeros((9, nel_x * nel_y), dtype=int)
e = 0
for j in range(1, nnod_x * (nnod_y - 1), nnod_x * 2): # go up
for i in range(1, nnod_x - 1, 2): # go back
sIEN[0, e] = i + (j - 1)
sIEN[1, e] = sIEN[0, e] + 1
sIEN[2, e] = sIEN[0, e] + 2
sIEN[3, e] = sIEN[0, e] + nnod_x
sIEN[4, e] = sIEN[3, e] + 1
sIEN[5, e] = sIEN[3, e] + 2
sIEN[6, e] = sIEN[3, e] + nnod_x
sIEN[7, e] = sIEN[6, e] + 1
sIEN[8, e] = sIEN[6, e] + 2
e = e + 1
bottom = np.unique(sIEN) # collection of nodes on bottom surface
top = np.unique(sIEN.T + (nnod_x * nnod_y) * (nnod_z - 1)) # collection of nodes on top surface
surfacenode = np.hstack((front, back, left, right, bottom, top))
surfacenode = np.unique(surfacenode) # collection of surface nodes from all surface
return surfacenode
def identify_node_from_coord(nodes, filename):
# reading in the node location
xyz = open(filename, 'r')
xyz_coor = xyz.readlines() # readlines
startLines = range(0, len(xyz_coor))
for i in range(len(xyz_coor)):
xyz_coor[i] = xyz_coor[i].split()
xyzList = []
for i in startLines:
targetpoint = []
targetpoint.append(float(xyz_coor[i][0])) # x coor
targetpoint.append((float(xyz_coor[i][1]))) # y coor
targetpoint.append((float(xyz_coor[i][1]))) # y coor
xyzList.append(targetpoint)
xyz.close()
node_list = np.zeros(len(xyzList))
mindist = 100000
for i in range(0, len(xyzList)):
for j in range(0, len(nodes)):
print(xyzList[i][0], nodes[j][0])
return i
def identify_vessel_node(ellipsoid_coor, surfacenode, stem_file, sa_radius, dv_radius, volume,thickness, ellipticity):
"""Generates array of spiral artery nodes and decidual vein nodes. Spiral artery nodes are mapped with stem villi.
Inputs:
- ellipsoid_coor:coordinate of nodes of placental mesh
- surfacenode:array of surface nodes
- stem_file:txt file that described stem villi locations
Outputs:
- spiral_array: array of spiral artery nodes
- decidual_array: array of decidual artery nodes
- vesselnode: array of both spiral and decidual nodes
- surfnode_ex_vessel: array of surface node excluding vessel nodes
"""
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
xyList = np.zeros((len(surfacenode), 4))
count = 0
for i in range(0, len(surfacenode)): # taking only x and y coordinates
if ellipsoid_coor[surfacenode[i] - 1, 3] < 0: # take if upper surface nodes as this is where vessele reside
# location from upper surface nodes only
xyList[count, 0] = ellipsoid_coor[surfacenode[i] - 1, 0] #node number
xyList[count, 1] = ellipsoid_coor[surfacenode[i] - 1, 1] #x-coordinate
xyList[count, 2] = ellipsoid_coor[surfacenode[i] - 1, 2] #y-coordinate
xyList[count, 3] = ellipsoid_coor[surfacenode[i] - 1, 3] #z-coordinate
count = count + 1
xyList = xyList[0:count, :]
surfnode_ex_vessel = np.copy(surfacenode)
vesselnode_temp = np.vstack({tuple(row) for row in xyList}) #nodes that might be vessels
# reading in the stem vessel to map the spiral artery location
stem_xy = open(stem_file, 'r')
stem_coor = stem_xy.readlines() # readlines
stem_xyList = imports_and_exports.import_stemxy(stem_file)['stem_xy']
print('Total stem read = '+ str(len(stem_xyList)))
vessel_mapped_stem = stem_xyList # this is the x,y location where we want to put spiral artery
spiral_array = np.zeros((len(xyList)), dtype=int) # store the node nuber of spiral artery
decidual_array = np.zeros((len(xyList)), dtype=int) # store the node number of decidual vein
check = ellipsoid_coor[:, 0:2]
np.random.seed(0)
sa_nodes = 0
dv_nodes = 0
for i in range(0, len(vessel_mapped_stem)): # for each blood vessel,Cycle through to find closest nodes
closest_node = 0
for nodeX in vesselnode_temp:
distance=np.sqrt((vessel_mapped_stem[i][0] - nodeX[1]) ** 2 + (
vessel_mapped_stem[i][1] - nodeX[2]) ** 2 ) # distance from the nodes
if(distance < sa_radius):
#print('SA Node', int(nodeX[0]),nodeX[1],nodeX[2],vessel_mapped_stem[i][0],vessel_mapped_stem[i][1])
arterynode = nodeX[0]
A = np.where(vesselnode_temp == arterynode)
vesselnode_temp = np.delete(vesselnode_temp, A[0], axis=0)
A2 = np.where(surfnode_ex_vessel == int(arterynode))
surfnode_ex_vessel = np.delete(surfnode_ex_vessel, A2)
spiral_array[sa_nodes] = arterynode
sa_nodes = sa_nodes +1
#print(closest_node[0])
#arterynode = closest_node[0]
#A = np.where(vesselnode_temp == arterynode)
#vesselnode_temp = np.delete(vesselnode_temp, A[0], axis=0)
#A2 = np.where(surfnode_ex_vessel == int(arterynode))
#surfnode_ex_vessel = np.delete(surfnode_ex_vessel, A2)
#spiral_array[i] = arterynode
#sa_nodes = sa_nodes +1
#Doing decidual veins after arteries to make sure we dont take up any spots that arteries would have otherwise beein
for i in range(0, len(vessel_mapped_stem)): #need same number of arteries as veins
V = np.random.choice(len(vesselnode_temp)) # choosing random , won't repeat arteries as they are already
vessel_location = vesselnode_temp[V]
for nodeX in vesselnode_temp:
distance=np.sqrt((vessel_location[1] - nodeX[1]) ** 2 + (
vessel_location[2] - nodeX[2]) ** 2 ) # distance from the nodes
dv_from_centre = np.sqrt(nodeX[1] ** 2 + nodeX[2] ** 2 )
if(distance < dv_radius and dv_from_centre < 0.9*x_radius):
#print('DV Node', int(nodeX[0]))
veinnode = nodeX[0]
V = np.where(vesselnode_temp == veinnode)
vesselnode_temp = np.delete(vesselnode_temp, V[0], axis=0)
V2 = np.where(surfnode_ex_vessel == int(veinnode))
surfnode_ex_vessel = np.delete(surfnode_ex_vessel, V2)
decidual_array[dv_nodes] = veinnode
dv_nodes = dv_nodes +1
#veinnode = vesselnode_temp[V][0]
#vesselnode_temp = np.delete(vesselnode_temp, V, axis=0)
#V2 = np.where(surfnode_ex_vessel == int(veinnode))
#surfnode_ex_vessel = np.delete(surfnode_ex_vessel, V2)
#decidual_array[i] = veinnode
#dv_nodes = dv_nodes+1
spiral_array = np.resize(spiral_array,sa_nodes)
print('SAs found = ' + str(sa_nodes))
decidual_array = np.resize(decidual_array, dv_nodes)
#print('dec',decidual_array)
return {'spiral_array': spiral_array, 'decidual_array': decidual_array, 'surfnode_ex_vessel': surfnode_ex_vessel,
'num_sa': len(stem_xyList)}
def identify_vessel_node_test_mesh(ellipsoid_coor, surfacenode,volume, thickness, ellipticity):
"""Generates array of spiral artery nodes and decidual vein nodes. Spiral artery nodes are mapped with stem villi.
Inputs:
- ellipsoid_coor:coordinate of nodes of placental mesh
- surfacenode:array of surface nodes
- stem_file:txt file that described stem villi locations
Outputs:
- spiral_array: array of spiral artery nodes
- decidual_array: array of decidual artery nodes
- vesselnode: array of both spiral and decidual nodes
- surfnode_ex_vessel: array of surface node excluding vessel nodes
"""
sa_radius = 3.7 / 2.0
dv_radius = sa_radius
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
xyList = np.zeros((len(surfacenode), 4))
count = 0
for i in range(0, len(surfacenode)): # taking only x and y coordinates
if ellipsoid_coor[surfacenode[i] - 1, 3] > 0: # take if upper surface nodes as this is where vessele reside
# location from upper surface nodes only
xyList[count, 0] = ellipsoid_coor[surfacenode[i] - 1, 0] # node number
xyList[count, 1] = ellipsoid_coor[surfacenode[i] - 1, 1] # x-coordinate
xyList[count, 2] = ellipsoid_coor[surfacenode[i] - 1, 2] # y-coordinate
xyList[count, 3] = ellipsoid_coor[surfacenode[i] - 1, 3] # z-coordinate
count = count + 1
xyList = xyList[0:count, :]
surfnode_ex_vessel = np.copy(surfacenode)
vesselnode_temp = np.vstack({tuple(row) for row in xyList}) # nodes that might be vessels
# reading in the stem vessel to map the spiral artery location
vessel_mapped_stem = [-9.822741e+00, 1.550285e+01]
vessel_mapped_stem_v = [1.155144e+01, 1.435972e+01]
spiral_array = np.zeros((len(xyList)), dtype=int) # store the node nuber of spiral artery
decidual_array = np.zeros((len(xyList)), dtype=int) # store the node number of decidual vein
check = ellipsoid_coor[:, 0:2]
np.random.seed(0)
sa_nodes = 0
dv_nodes = 0
for i in range(0, len(vessel_mapped_stem)): # for each blood vessel,Cycle through to find closest nodes
for nodeX in vesselnode_temp:
distance = np.sqrt((vessel_mapped_stem[0] - nodeX[1]) ** 2 + (
vessel_mapped_stem[1] - nodeX[2]) ** 2) # distance from the nodes
if (distance < sa_radius):
#print('SA Node', int(nodeX[0]))
arterynode = nodeX[0]
A = np.where(vesselnode_temp == arterynode)
vesselnode_temp = np.delete(vesselnode_temp, A[0], axis=0)
A2 = np.where(surfnode_ex_vessel == int(arterynode))
surfnode_ex_vessel = np.delete(surfnode_ex_vessel, A2)
spiral_array[sa_nodes] = arterynode
sa_nodes = sa_nodes + 1
# Doing decidual veins after arteries to make sure we dont take up any spots that arteries would have otherwise beein
for i in range(0, len(vessel_mapped_stem_v)): # need same number of arteries as veins
for nodeX in vesselnode_temp:
distance = np.sqrt((vessel_mapped_stem_v[0] - nodeX[1]) ** 2 + (
vessel_mapped_stem_v[1] - nodeX[2]) ** 2) # distance from the nodes
if (distance < dv_radius):
#print('DV Node', int(nodeX[0]))
veinnode = nodeX[0]
V = np.where(vesselnode_temp == veinnode)
vesselnode_temp = np.delete(vesselnode_temp, V[0], axis=0)
V2 = np.where(surfnode_ex_vessel == int(veinnode))
surfnode_ex_vessel = np.delete(surfnode_ex_vessel, V2)
decidual_array[dv_nodes] = veinnode
dv_nodes = dv_nodes + 1
spiral_array = np.resize(spiral_array, sa_nodes)
decidual_array = np.resize(decidual_array, dv_nodes)
#print('dec', decidual_array)
return {'spiral_array': spiral_array, 'decidual_array': decidual_array, 'surfnode_ex_vessel': surfnode_ex_vessel}
def gen_3d_ellipsoid_structured(size_el, volume, thickness, ellipticity, squareSizeRatio, circle_prop, el_type, debug):
""" Generates a structured ellipsoid mesh to solve 3D problems. The aim is for a quality computational mesh that
has as regular elements as possible, within the constraints of typical dimensions of ellipsoids representing the
volume of the placenta. This code is derived from an openCMISS example written by <NAME>, which is used to
simulate fluid structure interactions in a cylinder. Note that this hasn't been tested on linear elements
Inputs:
- size_el: approximate dimension of an element in each axis that we are aiming for
- volume: volume of placental ellipsoid
- thickness: placental thickness (z-dimension)
- ellipticity: ratio of y to x axis dimension
- squareSizeRatio: ratio of square in mesh cross-section to radius
- circle_prop: proportion of ellipse in x-y that is made up by 'plate' of nodes and elements
- debug (True or False) allows you to print certain statements to screen
Returns:
- placental_node_coor: nodes location of mesh
- placental_el_con: element connectivity of mesh (tetrahedral element)
- node_array: array of nodes
- element_array: array of elements
"""
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
ellipsoidRadius_z = radii['z_radius']
ellipsoidRadius_x = radii['x_radius']
ellipsoidRadius_y = radii['y_radius']
if (debug):
print('Solving a model with x-radius: ' + str(ellipsoidRadius_x) + ' y-radius: ' + str(
ellipsoidRadius_y) + 'z-radius: ' + str(ellipsoidRadius_z))
nel_x = int(np.floor((ellipsoidRadius_x * 2) / size_el)) # number of elems needed in x aixs in mesh
nel_y = int(np.floor((ellipsoidRadius_y * 2) / size_el)) # number of elems needed in in y axis in mesh (need to
# implement having different x,y element numbers
nel_z = int(np.floor((ellipsoidRadius_z * 2) / size_el)) # number of elems needed in in z axis in mesh
# total number of elements in x,y are number in square plus 2* number in arm
# If square takes up half the radius need even numbers in arm and square at one third of total each
# If square takes up squareSizeRatio of the total, then need the square part to be multiplied by that proportion
total_square_arm = 2.0 * nel_x / 3.0
numberOfSquareElements = int(np.floor(squareSizeRatio * total_square_arm))
numberOfArmElements = int(np.floor((1 - squareSizeRatio) * total_square_arm))
# In future for cross-sections that deviate a lot from circular will need different number of elements in square
# and arm in x- and y-
numberOfZElements = nel_z
if (el_type == 1): # linear
numberOfNodesXi = 2
elif (el_type == 2): # quadratic
numberOfNodesXi = 3
numberOfLocalNodes = numberOfNodesXi * numberOfNodesXi * numberOfNodesXi
numberOfLocalInterfaceNodes = numberOfNodesXi * numberOfNodesXi
localNodeIdx000 = 0
localNodeIdx100 = numberOfNodesXi - 1
localNodeIdx010 = numberOfNodesXi * (numberOfNodesXi - 1)
localNodeIdx110 = numberOfNodesXi * numberOfNodesXi - 1
localNodeIdx001 = numberOfNodesXi * numberOfNodesXi * (numberOfNodesXi - 1)
localNodeIdx101 = numberOfNodesXi - 1 + numberOfNodesXi * numberOfNodesXi * (numberOfNodesXi - 1)
localNodeIdx011 = numberOfNodesXi * (numberOfNodesXi - 1) + numberOfNodesXi * numberOfNodesXi * (
numberOfNodesXi - 1)
localNodeIdx111 = numberOfLocalNodes - 1
numberOfNodesPerBlock = numberOfSquareElements * (numberOfNodesXi - 1) * (
numberOfArmElements * (numberOfNodesXi - 1) + 1)
numberOfElementsPerBlock = numberOfSquareElements * numberOfArmElements
numberOfNodesPerLength = 4 * numberOfNodesPerBlock + \
(numberOfSquareElements * (numberOfNodesXi - 1) - 1) * (
numberOfSquareElements * (numberOfNodesXi - 1) - 1)
numberOfElementsPerLength = 4 * numberOfElementsPerBlock + numberOfSquareElements * numberOfSquareElements
numberOfNodes = numberOfNodesPerLength * (numberOfZElements * (numberOfNodesXi - 1) + 1)
numberOfElements = numberOfElementsPerLength * numberOfZElements
if debug:
print(' Mesh Parameters:')
print(' numberOfSquareElements: %d' % (numberOfSquareElements))
print(' numberOfArmElements: %d' % (numberOfArmElements))
print(' numberOfZElements: %d' % (numberOfZElements))
print(' numberOfNodesXi: %d' % (numberOfNodesXi))
print(' numberOfNodesPerBlock: %d' % (numberOfNodesPerBlock))
print(' numberOfElementPerBlock: %d' % (numberOfElementsPerBlock))
print(' numberOfNodesPerLength: %d' % (numberOfNodesPerLength))
print(' numberOfElementsPerLength: %d' % (numberOfElementsPerLength))
print(' numberOfNodes: %d' % (numberOfNodes))
print(' numberOfElements: %d' % (numberOfElements))
print(' numberOfLocalNodes: %d' % (numberOfLocalNodes))
elems = np.zeros((numberOfElements, numberOfLocalNodes+1), dtype='int32')
node_array = np.zeros((numberOfNodes,4))
nodelist = [0]*numberOfNodes
surface_nodes = [0] * numberOfNodes
num_surface_nodes = 0
for zElementIdx in range(1, max(numberOfZElements + 1, 2)):
# Handle the arm blocks first
previousBlock = 4
for blockIdx in range(1, 5): # generating arm blocks
# DEFINING NODES AND ELEMENTS WITH CONNECTIVITY
for yElementIdx in range(1, numberOfArmElements + 1):
for xElementIdx in range(1, numberOfSquareElements + 1):
localNodes = [0] * numberOfLocalNodes # Nodes local to this arm block
elementNumber = xElementIdx + (yElementIdx - 1) * numberOfSquareElements + (
blockIdx - 1) * numberOfSquareElements * numberOfArmElements + \
(zElementIdx - 1) * numberOfElementsPerLength
if (xElementIdx == 1):
localNodes[localNodeIdx000] = (
previousBlock - 1) * numberOfNodesPerBlock + numberOfSquareElements * (
numberOfNodesXi - 1) + \
(yElementIdx - 1) * (
numberOfNodesXi - 1) * numberOfSquareElements * (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx100] = (blockIdx - 1) * numberOfNodesPerBlock + numberOfNodesXi - 1 + \
(yElementIdx - 1) * (
numberOfNodesXi - 1) * numberOfSquareElements * (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
else:
localNodes[localNodeIdx000] = (blockIdx - 1) * numberOfNodesPerBlock + (xElementIdx - 2) * (
numberOfNodesXi - 1) + (numberOfNodesXi - 2) + 1 + \
(yElementIdx - 1) * (numberOfNodesXi - 1) * (
numberOfSquareElements * (numberOfNodesXi - 1)) + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx100] = localNodes[localNodeIdx000] + numberOfNodesXi - 1
localNodes[localNodeIdx010] = localNodes[localNodeIdx000] + numberOfSquareElements * (
numberOfNodesXi - 1) * (numberOfNodesXi - 1)
localNodes[localNodeIdx110] = localNodes[localNodeIdx100] + numberOfSquareElements * (
numberOfNodesXi - 1) * (numberOfNodesXi - 1)
localNodes[localNodeIdx001] = localNodes[localNodeIdx000] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx101] = localNodes[localNodeIdx100] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx011] = localNodes[localNodeIdx010] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx111] = localNodes[localNodeIdx110] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx100] - 1
localNodes[3] = localNodes[localNodeIdx000] + numberOfSquareElements * (numberOfNodesXi - 1)
localNodes[4] = localNodes[1] + numberOfSquareElements * (numberOfNodesXi - 1)
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx110] - 1
localNodes[9] = localNodes[0] + numberOfNodesPerLength
localNodes[10] = localNodes[1] + numberOfNodesPerLength
localNodes[11] = localNodes[2] + numberOfNodesPerLength
localNodes[12] = localNodes[3] + numberOfNodesPerLength
localNodes[13] = localNodes[4] + numberOfNodesPerLength
localNodes[14] = localNodes[5] + numberOfNodesPerLength
localNodes[15] = localNodes[6] + numberOfNodesPerLength
localNodes[16] = localNodes[7] + numberOfNodesPerLength
localNodes[17] = localNodes[8] + numberOfNodesPerLength
localNodes[19] = localNodes[10] + numberOfNodesPerLength
localNodes[21] = localNodes[12] + numberOfNodesPerLength
localNodes[22] = localNodes[13] + numberOfNodesPerLength
localNodes[23] = localNodes[14] + numberOfNodesPerLength
localNodes[25] = localNodes[16] + numberOfNodesPerLength
linearNodes = [localNodes[localNodeIdx000], localNodes[localNodeIdx100],
localNodes[localNodeIdx010], localNodes[localNodeIdx110], \
localNodes[localNodeIdx001], localNodes[localNodeIdx101],
localNodes[localNodeIdx011], localNodes[localNodeIdx111]]
if (debug):
print(' Element %8d; Nodes: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(elementNumber, linearNodes[0], linearNodes[1], linearNodes[2], linearNodes[3],
linearNodes[4], linearNodes[5], linearNodes[6], linearNodes[7]))
if (el_type == 2):
print(' Nodes: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[0], localNodes[1], localNodes[2], localNodes[3], localNodes[4],
localNodes[5], localNodes[6], localNodes[7], localNodes[8]))
print(' %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[9], localNodes[10], localNodes[11], localNodes[12], localNodes[13],
localNodes[14], localNodes[15], localNodes[16], localNodes[17]))
print(' %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[18], localNodes[19], localNodes[20], localNodes[21], localNodes[22],
localNodes[23], localNodes[24], localNodes[25], localNodes[26]))
if (el_type == 1):
elems[elementNumber-1][0] = elementNumber
elems[elementNumber-1][1:numberOfLocalNodes+1] = linearNodes
if (el_type == 2):
elems[elementNumber - 1][0] = elementNumber
elems[elementNumber - 1][1:numberOfLocalNodes + 1] = localNodes
previousBlock = blockIdx
# Handle the square block
if (numberOfSquareElements == 1):
elementNumber = elementNumber + 1
localNodes[localNodeIdx000] = 3 * numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx100] = 4 * numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx010] = 2 * numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx110] = numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx100] - 1
localNodes[3] = localNodes[localNodeIdx000] - 1
localNodes[4] = localNodes[localNodeIdx100] + 1
localNodes[5] = localNodes[localNodeIdx110] - 1
localNodes[7] = localNodes[localNodeIdx010] - 1
localNodes[localNodeIdx001] = localNodes[localNodeIdx000] + numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx101] = localNodes[localNodeIdx100] + numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx011] = localNodes[localNodeIdx010] + numberOfNodesPerLength * (numberOfNodesXi - 1)
localNodes[localNodeIdx111] = localNodes[localNodeIdx110] + numberOfNodesPerLength * (numberOfNodesXi - 1)
linearNodes = [localNodes[localNodeIdx000], localNodes[localNodeIdx100], localNodes[localNodeIdx010],
localNodes[localNodeIdx110], \
localNodes[localNodeIdx001], localNodes[localNodeIdx101], localNodes[localNodeIdx011],
localNodes[localNodeIdx111]]
if (el_type == 2):
localNodes[9] = localNodes[0] + numberOfNodesPerLength
localNodes[10] = localNodes[1] + numberOfNodesPerLength
localNodes[11] = localNodes[2] + numberOfNodesPerLength
localNodes[12] = localNodes[3] + numberOfNodesPerLength
localNodes[13] = localNodes[4] + numberOfNodesPerLength
localNodes[14] = localNodes[5] + numberOfNodesPerLength
localNodes[15] = localNodes[6] + numberOfNodesPerLength
localNodes[16] = localNodes[7] + numberOfNodesPerLength
localNodes[17] = localNodes[8] + numberOfNodesPerLength
localNodes[19] = localNodes[10] + numberOfNodesPerLength
localNodes[21] = localNodes[12] + numberOfNodesPerLength
localNodes[22] = localNodes[13] + numberOfNodesPerLength
localNodes[23] = localNodes[14] + numberOfNodesPerLength
localNodes[25] = localNodes[16] + numberOfNodesPerLength
if (debug):
print(' Element %8d; Nodes: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(elementNumber, linearNodes[0], linearNodes[1], linearNodes[2], linearNodes[3], linearNodes[4],
linearNodes[5], linearNodes[6], linearNodes[7]))
if (el_type == 2):
print(' Nodes: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[0], localNodes[1], localNodes[2], localNodes[3], localNodes[4], localNodes[5],
localNodes[6], localNodes[7], localNodes[8]))
print(' %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(
localNodes[9], localNodes[10], localNodes[11], localNodes[12], localNodes[13], localNodes[14],
localNodes[15], localNodes[16], localNodes[17]))
print(' %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[18], localNodes[19], localNodes[20], localNodes[21], localNodes[22],
localNodes[23], localNodes[24], localNodes[25], localNodes[26]))
if (el_type == 1):
elems[elementNumber - 1][0] = elementNumber
elems[elementNumber - 1][1:numberOfLocalNodes + 1] = linearNodes
if (el_type == 2):
elems[elementNumber - 1][0] = elementNumber
elems[elementNumber - 1][1:numberOfLocalNodes + 1] = localNodes
else:
for yElementIdx in range(1, numberOfSquareElements + 1):
for xElementIdx in range(1, numberOfSquareElements + 1):
localNodes = [0] * numberOfLocalNodes
elementNumber = 4 * numberOfElementsPerBlock + xElementIdx + (
yElementIdx - 1) * numberOfSquareElements + \
(zElementIdx - 1) * numberOfElementsPerLength
if (yElementIdx == 1):
if (xElementIdx == 1):
# Bottom-left
localNodes[localNodeIdx000] = 3 * numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = 3 * numberOfNodesPerBlock + numberOfArmElements * (
numberOfNodesXi - 1) * \
numberOfSquareElements * (numberOfNodesXi - 1) + (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx010] = 3 * numberOfNodesPerBlock - (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx110] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
(numberOfNodesXi - 2) + (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx100] - 1
localNodes[3] = localNodes[localNodeIdx000] - 1
localNodes[4] = localNodes[localNodeIdx110] - numberOfSquareElements * (
numberOfNodesXi - 1)
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx110] - 1
elif (xElementIdx == numberOfSquareElements):
# Bottom-right
localNodes[localNodeIdx000] = 4 * numberOfNodesPerBlock - (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = 4 * numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx010] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
(numberOfNodesXi - 1) - (numberOfNodesXi - 2) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx110] = numberOfSquareElements * (numberOfNodesXi - 1) * \
numberOfArmElements * (numberOfNodesXi - 1) + (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx000] + 1
localNodes[3] = localNodes[localNodeIdx010] - numberOfSquareElements * (
numberOfNodesXi - 1) + 1
localNodes[4] = localNodes[3] + 1
localNodes[5] = localNodes[localNodeIdx110] - 1
localNodes[7] = localNodes[localNodeIdx010] + 1
else:
# Bottom
localNodes[localNodeIdx000] = 3 * numberOfNodesPerBlock + numberOfSquareElements * (
numberOfNodesXi - 1) * \
numberOfArmElements * (numberOfNodesXi - 1) + (
xElementIdx - 1) * (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = localNodes[localNodeIdx000] + (numberOfNodesXi - 1)
localNodes[localNodeIdx010] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
(numberOfNodesXi - 2) + (xElementIdx - 1) * (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx110] = localNodes[localNodeIdx010] + (numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx000] + 1
localNodes[3] = localNodes[localNodeIdx010] - numberOfSquareElements * (
numberOfNodesXi - 1) + 1
localNodes[4] = localNodes[3] + 1
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx110] - 1
elif (yElementIdx == numberOfSquareElements):
if (xElementIdx == 1):
# Top-left
localNodes[localNodeIdx000] = 2 * numberOfNodesPerBlock + numberOfSquareElements * (
numberOfNodesXi - 1) * \
numberOfArmElements * (numberOfNodesXi - 1) + (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
((numberOfSquareElements - 1) * (numberOfNodesXi - 1) - 1) + (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx010] = 2 * numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx110] = 2 * numberOfNodesPerBlock - (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx100] - 1
localNodes[3] = localNodes[localNodeIdx000] - 1
localNodes[4] = localNodes[1] + numberOfSquareElements * (numberOfNodesXi - 1) - 1
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx110] + 1
elif (xElementIdx == numberOfSquareElements):
# Top-right
localNodes[localNodeIdx000] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
((numberOfSquareElements - 1) * (numberOfNodesXi - 1) - 1) + \
(numberOfSquareElements - 1) * (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = numberOfSquareElements * (numberOfNodesXi - 1) * \
numberOfArmElements * (numberOfNodesXi - 1) + \
(numberOfSquareElements - 1) * (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx010] = numberOfNodesPerBlock + numberOfSquareElements * (
numberOfNodesXi - 1) * \
numberOfArmElements * (numberOfNodesXi - 1) + (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx110] = numberOfNodesPerBlock + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx000] + 1
localNodes[3] = localNodes[localNodeIdx000] + numberOfSquareElements * (
numberOfNodesXi - 1) - 1
localNodes[4] = localNodes[3] + 1
localNodes[5] = localNodes[localNodeIdx110] - 1
localNodes[7] = localNodes[localNodeIdx010] - 1
else:
# Top
localNodes[localNodeIdx000] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
((numberOfSquareElements - 1) * (numberOfNodesXi - 1) - 1) + \
(xElementIdx - 1) * (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = localNodes[localNodeIdx000] + (numberOfNodesXi - 1)
localNodes[localNodeIdx010] = 2 * numberOfNodesPerBlock - (xElementIdx - 1) * (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx110] = localNodes[localNodeIdx010] - (numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx000] + 1
localNodes[3] = localNodes[localNodeIdx000] + numberOfSquareElements * (
numberOfNodesXi - 1) - 1
localNodes[4] = localNodes[3] + 1
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx010] - 1
else:
if (xElementIdx == 1):
# Left
localNodes[localNodeIdx000] = 3 * numberOfNodesPerBlock - (yElementIdx - 1) * (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
((yElementIdx - 1) * (numberOfNodesXi - 1) - 1) + (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx010] = localNodes[localNodeIdx000] - (numberOfNodesXi - 1)
localNodes[localNodeIdx110] = localNodes[localNodeIdx100] + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
(numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx100] - 1
localNodes[3] = localNodes[localNodeIdx000] - 1
localNodes[4] = localNodes[localNodeIdx110] - numberOfSquareElements * (
numberOfNodesXi - 1)
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx110] - 1
elif (xElementIdx == numberOfSquareElements):
# Right
localNodes[localNodeIdx000] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
((yElementIdx - 1) * (numberOfNodesXi - 1) - 1) + (
numberOfSquareElements - 1) * (
numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = numberOfSquareElements * (
numberOfNodesXi - 1) * numberOfArmElements * (numberOfNodesXi - 1) + \
(yElementIdx - 1) * (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx010] = localNodes[localNodeIdx000] + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
(numberOfNodesXi - 1)
localNodes[localNodeIdx110] = localNodes[localNodeIdx100] + (numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx000] + 1
localNodes[3] = localNodes[localNodeIdx010] - numberOfSquareElements * (
numberOfNodesXi - 1) + 1
localNodes[4] = localNodes[3] + 1
localNodes[5] = localNodes[localNodeIdx100] + 1
localNodes[7] = localNodes[localNodeIdx010] + 1
else:
# Middle
localNodes[localNodeIdx000] = 4 * numberOfNodesPerBlock + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
((yElementIdx - 1) * (numberOfNodesXi - 1) - 1) + (
xElementIdx - 1) * (numberOfNodesXi - 1) + \
(zElementIdx - 1) * numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx100] = localNodes[localNodeIdx000] + (numberOfNodesXi - 1)
localNodes[localNodeIdx010] = localNodes[localNodeIdx000] + (
numberOfSquareElements * (numberOfNodesXi - 1) - 1) * \
(numberOfNodesXi - 1)
localNodes[localNodeIdx110] = localNodes[localNodeIdx010] + (numberOfNodesXi - 1)
if (el_type == 2):
localNodes[1] = localNodes[localNodeIdx000] + 1
localNodes[3] = localNodes[localNodeIdx000] + numberOfSquareElements * (
numberOfNodesXi - 1) - 1
localNodes[4] = localNodes[3] + 1
localNodes[5] = localNodes[4] + 1
localNodes[7] = localNodes[localNodeIdx010] + 1
localNodes[localNodeIdx001] = localNodes[localNodeIdx000] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx101] = localNodes[localNodeIdx100] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx011] = localNodes[localNodeIdx010] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
localNodes[localNodeIdx111] = localNodes[localNodeIdx110] + numberOfNodesPerLength * (
numberOfNodesXi - 1)
linearNodes = [localNodes[localNodeIdx000], localNodes[localNodeIdx100],
localNodes[localNodeIdx010], localNodes[localNodeIdx110], \
localNodes[localNodeIdx001], localNodes[localNodeIdx101],
localNodes[localNodeIdx011], localNodes[localNodeIdx111]]
if (el_type == 2):
localNodes[9] = localNodes[0] + numberOfNodesPerLength
localNodes[10] = localNodes[1] + numberOfNodesPerLength
localNodes[11] = localNodes[2] + numberOfNodesPerLength
localNodes[12] = localNodes[3] + numberOfNodesPerLength
localNodes[13] = localNodes[4] + numberOfNodesPerLength
localNodes[14] = localNodes[5] + numberOfNodesPerLength
localNodes[15] = localNodes[6] + numberOfNodesPerLength
localNodes[16] = localNodes[7] + numberOfNodesPerLength
localNodes[17] = localNodes[8] + numberOfNodesPerLength
localNodes[19] = localNodes[10] + numberOfNodesPerLength
localNodes[21] = localNodes[12] + numberOfNodesPerLength
localNodes[22] = localNodes[13] + numberOfNodesPerLength
localNodes[23] = localNodes[14] + numberOfNodesPerLength
localNodes[25] = localNodes[16] + numberOfNodesPerLength
if (debug):
print(' Element %8d; Nodes: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(elementNumber, linearNodes[0], linearNodes[1], linearNodes[2], linearNodes[3],
linearNodes[4], linearNodes[5], linearNodes[6], linearNodes[7]))
if (el_type == 2):
print(' Nodes: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[0], localNodes[1], localNodes[2], localNodes[3], localNodes[4],
localNodes[5], localNodes[6], localNodes[7], localNodes[8]))
print(' %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[9], localNodes[10], localNodes[11], localNodes[12], localNodes[13],
localNodes[14], localNodes[15], localNodes[16], localNodes[17]))
print(' %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d' % \
(localNodes[18], localNodes[19], localNodes[20], localNodes[21], localNodes[22],
localNodes[23], localNodes[24], localNodes[25], localNodes[26]))
if (el_type == 1):
elems[elementNumber-1][0] = elementNumber
elems[elementNumber-1][1:numberOfLocalNodes+1] = linearNodes
if (el_type == 2):
elems[elementNumber - 1][0] = elementNumber
elems[elementNumber - 1][1:numberOfLocalNodes + 1] = localNodes
if (debug):
print(' Nodes:')
for zNodeIdx in range(1, numberOfZElements * (numberOfNodesXi - 1) + 2):
prop = 1 - (1 - circle_prop) * abs(2.0 * (zNodeIdx - 1) / float(numberOfZElements * (numberOfNodesXi - 1)) -
1.0)
sign = np.sign(2.0 * (zNodeIdx - 1) / float(numberOfZElements * (numberOfNodesXi - 1)) - 1.0)
#This is the z height associated with this prop
zPosition = sign * ellipsoidRadius_z * | np.sqrt(1 - prop ** 2) | numpy.sqrt |
import numpy as np
import json
from sklearn.linear_model import LogisticRegression
from src.models.Classifier import Classifier
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import loguniform
# This code section avoid to be flooded with ConvergenceWarning from the randomizeSearch
import sys
import warnings
import os
if not sys.warnoptions:
warnings.simplefilter("ignore")
os.environ["PYTHONWARNINGS"] = "ignore"
###
class LogisticRegressionClassifier(Classifier):
def __init__(self, fold):
super().__init__()
self.fold = fold
self.clf = None
self.name = "Logistic Regression"
self.print('Creating')
def initialize_classifier(self, pre_trained=False):
self.print('Initialization')
if not pre_trained:
self.clf = LogisticRegression()
else:
with open(self.get_config_file_path(), 'r') as fp:
hyp = json.load(fp)
hyp_string = ''
for key in hyp:
hyp_string += key + ':' + str(hyp[key]) + ' '
self.print(hyp_string)
self.clf = LogisticRegression(C=hyp['C'], penalty=hyp['penalty'], solver=hyp['solver'])
def optimize(self, data, labels):
self.initialize_classifier()
self.print('Start optimization')
hyp_grid = [
{'solver': ['newton-cg'], 'penalty': ['l2'], 'C': loguniform(1e-5, 1000)},
{'solver': ['lbfgs'], 'penalty': ['l2'], 'C': loguniform(1e-5, 1000)},
{'solver': ['liblinear'], 'penalty': ['l1', 'l2'], 'C': loguniform(1e-5, 1000)},
{'solver': ['sag'], 'penalty': ['l2'], 'C': loguniform(1e-5, 1000)},
{'solver': ['saga'], 'penalty': ['elasticnet', 'l1', 'l2'], 'C': loguniform(1e-5, 1000)}
]
search = RandomizedSearchCV(self.clf,
hyp_grid,
n_iter=100,
scoring='neg_log_loss',
cv=self.fold,
random_state=42,
n_jobs=-1)
result = search.fit(data.values, | np.ravel(labels.values) | numpy.ravel |
# import the opencv library
import cv2
from cv2 import aruco
import numpy as np
import sys
from util import extract_laser, undistort_camera
def customAruco():
# define an empty custom dictionary with
aruco_dict = cv2.aruco.custom_dictionary(0, 5, 1)
# add empty bytesList array to fill with 3 markers later
aruco_dict.bytesList = np.empty(shape = (5, 4, 4), dtype = np.uint8)
# add new marker(s)
mybits = np.array([[1,0,0,0,0],[1,0,0,0,0],[1,0,0,0,0],[1,0,0,0,0],[1,0,1,1,1]], dtype = np.uint8)
aruco_dict.bytesList[0] = cv2.aruco.Dictionary_getByteListFromBits(mybits)
mybits = np.array([[1,0,0,0,0],[1,0,0,0,0],[1,0,0,0,0],[1,0,0,0,0],[0,1,0,0,1]], dtype = np.uint8)
aruco_dict.bytesList[1] = cv2.aruco.Dictionary_getByteListFromBits(mybits)
mybits = np.array([[1,0,0,0,0],[1,0,0,0,0],[1,0,0,0,0],[1,0,0,0,0],[0,1,1,1,0]], dtype = np.uint8)
aruco_dict.bytesList[2] = cv2.aruco.Dictionary_getByteListFromBits(mybits)
mybits = np.array([[1,0,0,0,0],[1,0,0,0,0],[1,0,0,0,0],[1,0,1,1,1],[1,0,0,0,0]], dtype = np.uint8)
aruco_dict.bytesList[3] = cv2.aruco.Dictionary_getByteListFromBits(mybits)
mybits = | np.array([[1,0,0,0,0],[1,0,0,0,0],[1,0,0,0,0],[1,0,1,1,1],[1,0,1,1,1]], dtype = np.uint8) | numpy.array |
from flask import Flask
from flask import request, json
from PIL import Image
import PIL
import os , io , sys
import numpy as np
import base64
import cv2
from skimage import transform
from texturizing import *
import glob
from pathlib import Path
import random
import shutil
from tensorflow.keras.models import load_model
import tensorflow as tf
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from flowers_quilting.main import augment
def image_to_bytes(img):
rawBytes = io.BytesIO()
img.save(rawBytes, "JPEG")
rawBytes.seek(0)
img_base64 = base64.b64encode(rawBytes.read())
return img_base64
def load_image(file,size=[256,256]):
pixels = tf.convert_to_tensor(file)
pixels = tf.cast(pixels, tf.float32)
pixels = tf.image.resize(pixels, size, method= tf.image.ResizeMethod.BILINEAR)
pixels = (pixels / 127.5) - 1
pixels = tf.expand_dims(pixels, 0)
# pixels = np.array(file).astype('float32')/255
# pixels= transform.resize(pixels, (size[0], size[1], 3))
# pixels = np.expand_dims(pixels, 0)
return pixels
model = load_model('./Updated_ImagetoImage.h5')
print("[+] Model loaded successfully.")
# create directory tmp if not exist
if not os.path.exists('./tmp'):
os.makedirs('./tmp')
app = Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16 MB
print("[+] Server started succesfully.")
@app.route('/')
def main():
return "Hello World"
@app.route('/api/predict', methods=['POST'])
def predict():
global model
# print(request.files, file=sys.stderr)
file = request.files['image'].read() ## byte file
npimg = np.fromstring(file, np.uint8)
img = cv2.imdecode(npimg,cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# ######### Do preprocessing here ################
pixels = load_image(img)
prediction = model(pixels, training=True)
print("[+] Model prediction successful!")
prediction = ((prediction + 1) / 2.0) * 255
#################################################
prediction = | np.array(prediction[0], dtype=np.uint8) | numpy.array |
import _init_paths
import argparse
import os
import copy
import random
import numpy as np
from PIL import Image
import scipy.io as scio
import scipy.misc
import numpy.ma as ma
import math
import trimesh
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import torch.nn.functional as F
from torch.autograd import Variable
from datasets.ycb.dataset import PoseDataset
from lib.network import PoseNet, PoseRefineNet
from lib.transformations import euler_matrix, quaternion_matrix, quaternion_from_matrix
import cv2
from scipy.spatial import cKDTree as KDTree
import json
import utils_3d
CLASSES_FILE = 'datasets/ycb/dataset_config/classes.txt'
OBJECTS_DIR = 'models'
def load_object(object_idx):
"""
Load an object from that object's label index
"""
class_file = open(CLASSES_FILE)
model_list = []
while 1:
class_input = class_file.readline()
if not class_input:
break
model_list.append(
class_input[:-1]
)
model_path = os.path.join(
OBJECTS_DIR, model_list[object_idx-1], "textured.obj"
)
print("Loading model from: {}".format(model_path))
return trimesh.load(model_path)
def get_bbx_from_seg(label):
"""
Get a bounding box from a binary mask
"""
border_list = [-1, 40, 80, 120, 160, 200, 240, 280, 320, 360, 400, 440, 480, 520, 560, 600, 640, 680]
img_width = 480
img_length = 640
rows = np.any(label, axis=1)
cols = np.any(label, axis=0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = | np.where(cols) | numpy.where |
import sys
import os
import subprocess
from smt.sampling_methods import LHS
import numpy as np
from scipy import stats
from surmise.emulation import emulator
from dt import cross_section, s_factor
# Reduced mass in the deuteron channel.
MU_D = 1124.6473494927284 # MeV
indices = np.array([0, 1, 2])
n = 1 # output dimension
# Default values.
AD = 6.0
AN = 4.0
UE = 0.0
A = 0.0
GD2 = 3.0
GN2 = 0.5
ER = 0.070
DEFAULT_VALUES = np.array([ER, GD2, GN2, AD, AN, UE, A])
E_MIN = 0.010
E_MAX = 0.200
K_MIN = np.sqrt(2*MU_D*E_MIN)
K_MAX = np.sqrt(2*MU_D*E_MAX)
BOUNDS = np.array([[K_MIN, K_MAX],
[0.010, 0.120],
[1, 5],
[0.001, 0.5]])
NTRAIN = 500 # number of training points
NTEST = 50 # number of testing points
class RMatrixModel:
def __init__(self, parameter_indices):
self.parameter_indices = np.copy(parameter_indices)
def s_factor(self, energy, theta):
return s_factor(energy, theta[0], theta[0], *theta[1:])
def evaluate(self, energy, theta):
thetap = np.copy(DEFAULT_VALUES)
thetap[self.parameter_indices] = theta
return self.s_factor(energy, thetap)
model = RMatrixModel(indices)
# Set up Latin hypercube sampling to generate the training/testing space.
generator = LHS(xlimits=BOUNDS)
# Convenience function for generating a matrix of data.
def generate_data(m):
'''
Generates a matrix of data. Organized according to:
momentum | theta_0 | theta_1 | theta_2 | y
'''
theta_space = generator(m)
return np.array([
[*theta, model.evaluate(0.5*theta[0]**2/MU_D, theta[1:])] for theta in theta_space
])
# Generating data.
train = generate_data(NTRAIN)
test = generate_data(NTEST)
| np.savetxt('datfiles/training_data_sampled_energies.txt', train, header='''
Momentum (MeV) | E_r (MeV) | gamma_d^2 (MeV) | gamma_n^2 (MeV) | S factor (MeV b)
''') | numpy.savetxt |
"""
vocmaxlib
This python package calculates the maximum sting size for a photovoltaic
installation. The method is consistent with the NEC 2017 690.7 standard.
toddkarin
"""
import numpy as np
import pvlib
import pvlib.bifacial
# import nsrdbtools
# import socket
# import matplotlib
# matplotlib.use('TkAgg')
# import matplotlib.pyplot as plt
import pandas as pd
import datetime
import glob
import pytz
from vocmax import nsrdb
import tqdm
import os
import urllib
import pytz
import sys
import os
import warnings
from pvlib.iotools import get_psm3
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
from vocmax.bifacial import pvfactors_timeseries
import glob
import vocmax
# from pvlib.bifacial import PVFactorsReportBuilder as PVFactorsReportBuilder
# Parameters entering into Voc calculation:
cec_modules = pvlib.pvsystem.retrieve_sam('CeCMod')
# Descriptions of hte various parameters used in the calculation.
explain = {
'Voco': 'Open circuit voltage at reference conditions, in V',
'Bvoco': 'Temperature dependence of open circuit voltage, in V/C',
'Mbvoc': """Coefficient providing the irradiance dependence of the
temperature coefficient of open circuit voltage, typically assumed to be
zero, in V/C
""",
'n_diode': 'Diode ideality factor, unitless',
'cells_in_series': 'Number of cells in series in each module, dimensionless',
'FD': """Fraction of diffuse irradiance arriving at the cell, typically
assumed to be 1, dimensionless
""",
'alpha_sc': """The short-circuit current temperature coefficient of the
module, in A/C
""",
'a_ref': """The product of the usual diode ideality factor (n_diode,
unitless), number of cells in series (cells_in_series), and cell thermal
voltage at reference conditions, in units of V.
""",
'I_L_ref': """The light-generated current (or photocurrent) at reference
conditions, in amperes.
""",
'I_o_ref': """The dark or diode reverse saturation current at reference
conditions, in amperes.
""",
'R_sh_ref': """The shunt resistance at reference conditions, in ohms.""",
'R_s': """The series resistance at reference conditions, in ohms.""",
'Isco': """Short circuit current at reference conditions, in amperes.""",
'Impo': """Maximum-power current at reference conditions, in amperes.""",
'Vmpo': """Maximum-power voltage at reference conditions, in volts.""",
'Pmpo': """Maximum-power power at reference conditions, in watts.""",
'Bisco': """Temperature coefficient of short circuit current, in A/C"""
}
def get_weather_data(lat, lon,
api_key,
cache_directory='cached_weather_data',
attributes='ghi,dhi,dni,wind_speed,air_temperature',
force_download=False,
full_name='<NAME>',
email='<EMAIL>',
affiliation='vocmax',
years=np.arange(1998, 2018.5),
interval=30,
):
"""
Retrieve weather data from the national solar radiation database (NSRDB).
Description
-----------
df, info = get_weather_data(lat,lon,api_key) gets weather data from the
NSRDB using the NSRDB api. Data download for a single location takes
around 3 minutes. Once weather data is downloaded, it is stored in a
local cache so it can be retrieved quickly. One sample point (lat=37.876,
lon=-122.247) is provided with the function so sample data can be easily
loaded without an api key.
Api keys are available free of charge at https://developer.nrel.gov/signup/
Note can only donwload data from NSRDB sequentially (not possible to
download data using multiple scripts in parallel).
Examples
--------
lat, lon = 37.876, -122.247
# Note: Replace with your api key
api_key = '<KEY>'
df, info = vocmax.get_weather_data(lat,lon,api_key)
Parameters
----------
lat : float or int
latitude in decimal degrees, between -90 and 90, north is positive
lon : float or int
longitude in decimal degrees, between -180 and 180, east is positive
api_key : str
NREL Developer Network API key
email : str
NREL API uses this to automatically communicate messages back
to the user only if necessary
names : str, default 'tmy'
PSM3 API parameter specifing year or TMY variant to download, see notes
below for options
interval : int, default 60
interval size in minutes, can only be either 30 or 60. Only used for
single-year requests (i.e., it is ignored for tmy/tgy/tdy requests).
leap_day : boolean, default False
include leap day in the results. Only used for single-year requests
(i.e., it is ignored for tmy/tgy/tdy requests).
full_name : str, default 'pvlib python'
optional
affiliation : str, default 'pvlib python'
optional
timeout : int, default 30
time in seconds to wait for server response before timeout
force_download : bool
If true, force downloading of weather data regardless of weather
that particular location has already been downloaded. Default is false.
tz_localize : bool
Weather to localize the time zone.
Returns
-------
df : pandas dataframe
Dataframe containing weather data with fields
'year' - year of row.
'month', 'day', 'hour', 'minute', 'dni', 'ghi', 'dhi',
'temp_air', 'wind_speed'.
info : dictionary
Dictionary containting information on the weather dataset.
"""
# First check if data exists in cahce directory.
if not force_download:
search_str = os.path.join(cache_directory,
'*_{:3.3f}_{:3.3f}.npz'.format(lat, lon))
print(search_str)
# One sample data point is provided with the package so that users don't
# have to get an api key to try it out.
if '{:3.3f}_{:3.3f}'.format(lat, lon) == '37.876_-122.247':
print('getting sample data point')
dir_path = os.path.dirname(os.path.realpath(__file__))
df, info = nsrdb.get_local_weather_data(
os.path.join(dir_path,
'123796_37.89_-122.26_search-point_37.876_-122.247.npz')
)
return df, info
# Otherwise search the cache for a weather data file that has already
# been downloaded.
filename = glob.glob(search_str)
if len(filename) > 0:
# Cached weather data found, load it
df, info = nsrdb.get_local_weather_data(filename[0])
# TODO: Add checks that the loaded file has the same options as in the function call.
return df, info
else:
# No cached weather data found.
pass
# Pull data from NSRDB because either force_download=True or no cached datafile found.
print('Downloading weather data and saving to "cached_weather_data" ...')
for j in tqdm.tqdm(range(len(years))):
year = '{:.0f}'.format(years[j])
info_iter, df_iter = get_psm3(
latitude=lat,
longitude=lon,
api_key=api_key,
email=email,
names=year,
interval=30,
leap_day=False,
full_name=full_name,
affiliation=affiliation,
timeout=30)
#
# # Declare url string
# url = 'http://developer.nrel.gov/api/solar/nsrdb_psm3_download.csv?wkt=POINT({lon}%20{lat})&names={year}&leap_day={leap}&interval={interval}&utc={utc}&full_name={name}&email={email}&affiliation={affiliation}&mailing_list={mailing_list}&reason={reason}&api_key={api}&attributes={attr}'.format(
# year = year, lat = lat, lon = lon, leap = leap_year, interval = interval,
# utc = utc, name = your_name, email = your_email,
# mailing_list = mailing_list, affiliation = your_affiliation,
# reason = reason_for_use, api = api_key, attr = attributes)
#
# # file_name, urllib.request.urlretrieve(url, "testfile.txt")
# with urllib.request.urlopen(url) as f:
# # Get the data as a string.
# response = f.read().decode('utf-8')
#
# # Read the first few lines to get info on datafile
# info_df = pd.read_csv(StringIO(response), nrows=1)
#
# # Create a dictionary for the info file.
# info_iter = {}
# for p in info_df:
# info_iter[p] = info_df[p].iloc[0]
#
# df_iter = pd.read_csv(StringIO(response), skiprows=2)
#
# if np.diff(df_iter[0:2].Minute) == 30:
# interval = '30'
# info_iter['interval_in_hours'] = 0.5
# elif np.diff(df_iter[0:2].Minute) == 0:
# interval = '60'
# info_iter['interval_in_hours'] = 1
# else:
# print('Interval not understood!')
info_iter['interval_in_hours'] = interval / 60
# Set the time index in the pandas dataframe:
year_iter = str(df_iter['Year'][0])
df_iter = df_iter.set_index(
pd.date_range('1/1/{yr}'.format(yr=year_iter),
freq='{}Min'.format(interval),
periods=len(df_iter)))
df_iter.index = df_iter.index.tz_localize(
pytz.FixedOffset(float(info_iter['Time Zone'] * 60)))
if j == 0:
info = info_iter
df = df_iter
else:
df = df.append(df_iter)
# Process/compress the downloaded dfs.
info['timedelta_in_years'] = (df.index[-1] - df.index[0]).days / 365
# Convert to int for lowering file size.
dni = np.array(df['DNI'].astype(np.int16))
dhi = np.array(df['DHI'].astype(np.int16))
ghi = np.array(df['GHI'].astype(np.int16))
temp_air = np.array(df['Temperature'].astype(np.float32))
wind_speed = np.array(df['Wind Speed'].astype(np.float16))
year = np.array(df['Year'].astype(np.int16))
month = np.array(df['Month'].astype(np.int8))
day = np.array(df['Day'].astype(np.int8))
hour = np.array(df['Hour'].astype(np.int8))
minute = np.array(df['Minute'].astype(np.int8))
cache_directory = 'cached_weather_data'
if not os.path.exists(cache_directory):
print('Creating cache directory')
os.mkdir(cache_directory)
save_filename = os.path.join(cache_directory,
'{}_{:3.2f}_{:3.2f}_search-point_{:3.3f}_{:3.3f}.npz'.format(
info['Location ID'], info['Latitude'],
info['Longitude'], lat, lon)
)
# Write to file.
np.savez_compressed(save_filename,
Source=info['Source'],
Location_ID=info['Location ID'],
Latitude=info['Latitude'],
Longitude=info['Longitude'],
Elevation=info['Elevation'],
local_time_zone=info['Local Time Zone'],
interval_in_hours=info['interval_in_hours'],
timedelta_in_years=info['timedelta_in_years'],
Version=info['Version'],
dni=dni,
dhi=dhi,
ghi=ghi,
temp_air=temp_air,
wind_speed=wind_speed,
year=year,
month=month,
day=day,
hour=hour,
minute=minute)
# Reload from file.
df, info = nsrdb.get_local_weather_data(save_filename)
return df, info
# def ashrae_get_data():
# dir_path = os.path.dirname(os.path.realpath(__file__))
#
# # Load temperature difference data.
# ashrae = pd.read_csv(
# os.path.join(dir_path, 'ASHRAE2017_temperature_data.csv')
# )
# return ashrae
def ashrae_get_design_conditions_at_loc(lat, lon, ashrae):
"""
Get the ASHRAE design conditions data closest to the lat/lon of interest.
Parameters
----------
lat
lon
ashrae : dataframe
Returns
-------
dataframe
fields are
'Latitude'
'Longitude'
'Extreme Annual Mean Minimum Dry Bulb Temperature' - ASHRAE
extreme minimum dry bulb temperature, in C
"""
# df = ashrae_get_design_conditions()
# Calculate distance to search point.
distance = nsrdb.haversine_distance(lat, lon, ashrae['Lat'], ashrae['Lon'])
closest_idx = distance.idxmin()
return ashrae.iloc[closest_idx]
def nec_correction_factor(temperature):
"""
NEC 690.7(A)(2) correction factor from NEC2017.
Parameters
----------
temperature : numeric
Temperature in C.
Returns
-------
correction_factor : flat
"""
is_array = isinstance(temperature, np.ndarray)
temperature = np.array([temperature])
f = np.zeros_like(temperature, dtype='float') + 1
f[temperature < 25] = 1.02
f[temperature < 20] = 1.04
f[temperature < 15] = 1.06
f[temperature < 10] = 1.08
f[temperature < 5] = 1.10
f[temperature < 0] = 1.12
f[temperature < -5] = 1.14
f[temperature < -10] = 1.16
f[temperature < -15] = 1.18
f[temperature < -20] = 1.20
f[temperature < -25] = 1.21
f[temperature < -30] = 1.23
f[temperature < -35] = 1.25
f[np.isnan(temperature)] = np.nan
if not is_array:
f = f[0]
return f
def get_nsrdb_temperature_error(lat, lon,
number_of_closest_points=5):
"""
Find the temperature error for a particular location.
The NSRDB database provides temeprature data for many locations. However,
these data are taken from the MERRA-2 dataset, and have some error
compared to ground measurements. The temperature error depends on location.
As a comparison, we calculated the mean minimum extreme minimum dry bulb
temperature using NSRDB data and compared to ASHRAE data. The temperature
difference determines the safety factor necessary for string length
calculations.
This function finds the closest points to a particular lat,lon coordinate
in the ASHRAE dataset and returns the maximum temperature difference (
NSRDB - ASHRAE) for these locations. A higher temperature difference
means that the NSRDB is overestimating the true temperature that is
measured at a ground station. Higher positive temperature differences
mean that a larger safety factor should be used when calculating string
length. The Safety factor can be calculated
Examples
--------
temperature_difference = vocmax.get_nsrdb_temperature_error(lat,lon)
Parameters
----------
lat : float
latitude of search point in fractional degrees
lon : float
longitude of search point in fractional degrees
number_of_closest_points : int
The number of closest datapoints to find. Default is 5.
Returns
-------
temperature_difference : float
max temperature difference between NSRDB point and closest ASHRAE
points. A positive number means that the NSRDB design temperature is
higher than the ASHRAE design temperature. If a positive temperature
difference is found, then an additional safety factor is suggested to
account for this error in the NSRDB dataset.
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
# Load temperature difference data.
df = pd.read_pickle(
os.path.join(dir_path, 'nsrdb_ashrae_comparison.pkl')
)
# Calculate distance to search point.
distance = vocmax.nsrdb.haversine_distance(lat, lon, df['lat'], df['lon'])
# Find the closest locations.
distance_sort = distance.sort_values()
closest_idx = distance_sort.index[:number_of_closest_points]
# Calculate temperature difference
temperature_difference = df['nsrdb-ashrae Extreme_Annual_Mean_Min_DB'].loc[
closest_idx]
return temperature_difference.max()
def ashrae_import_design_conditions(filename='2017DesignConditions_s.xlsx'):
"""
Load the ASHRAE 2017 design conditions excel file. This file is NOT
provided in vocmax, it must be purchased directly from ASHRAE and added
to the current directory. The filename is '2017DesignConditions_s.xlsx'.
The '_s' at the end of the filename stands for 'SI'. There is also
another file '2017DesignConditions_p.xlsx' that contains measurements in
imperial units, do not use this file.
In order to use this function, purchase the weather data viewer DVD,
version 6.0, available at:
https://www.techstreet.com/ashrae/standards/weather-data-viewer-dvd-version-6-0?ashrae_auth_token=<PASSWORD>89-8065208f2e36&product_id=1949790
Importing the excel file takes around 1 minute, the data is then saved as
a csv file with name filename + '.csv' in the current directory. This
makes loading quick the second time.
Parameters
----------
filename : string
Filename to import.
Returns
-------
df : dataframe
Pandas dataframe containing certain fields of the weather data file.
"""
# filename = '2017DesignConditions_s.xlsx'
df = pd.read_excel(filename,
skiprows=0,
sheet_name=0,
header=[1, 2, 3],
verbose=False)
filename_out = filename + '.csv'
df_out = pd.DataFrame(
{'Lat': np.array(df['Lat']).flatten(),
'Lon': np.array(df['Lon']).flatten(),
'Country': np.array(df['Country']).flatten(),
'Station Name': np.array(df['Station Name']).flatten(),
'Extreme_Annual_Mean_Min_DB': np.array(
df['Extreme Annual DB']['Mean']['Min']).flatten(),
'Extreme_Annual_Standard Deviation_Min_DB': np.array(
df['Extreme Annual DB']['Standard Deviation']['Min']).flatten(),
'20-Year Return Period Extreme Min DB': np.array(
df['n-Year Return Period Values of Extreme DB']['n=20 years'][
'Min']).flatten(),
}
)
df_out.to_csv(filename_out, index=False)
return df_out
def ashrae_is_design_conditions_available(
filename='2017DesignConditions_s.xlsx'):
return os.path.exists(filename)
def ashrae_get_design_conditions(filename='2017DesignConditions_s.xlsx'):
"""
Get the ASHRAE design conditions data.
Parameters
----------
filename
Returns
-------
df : dataframe
Pandas dataframe containing certain fields of the ASHARE design
conditions file
"""
if os.path.exists(filename + '.csv'):
df = pd.read_csv(filename + '.csv')
elif os.path.exists(filename):
print(
"""Importing and compressing ASHRAE design conditions excel file. Future calls will quickly call csv version. """)
print('Found file: {}'.format(filename))
print('Expected loading time: 1.0 minute')
df = ashrae_import_design_conditions(filename)
else:
raise Exception(
"Design conditions file '{}' not found. File must be purchased from ASHRAE and placed in current directory.".format(
filename))
return df
def simulate_system(weather, info, module_parameters,
racking_parameters, thermal_model,
irrad_model='perez',
nighttime_irradiance_addition=0
):
"""
Use the PVLIB SAPM model to calculate maximum Voc.
Parameters
----------
weather : Dataframe
Weather data dataframe containing the columns:
'dni': Direct Normal Irradiance (W/m^2)
'dhi': Diffuse Horizontal Irradiance (W/m^2)
'ghi' Global Horizontal Irradiance (W/m^2)
'temp_air': air temperature (C)
'wind_speed': 10 m wind speed in (m/s)
info : dict
Dictionary containing location information with fields:
'Latitude': float
latitude in degrees
'Longitude': float
longitude in degrees.
Other fields may be included in info as well and will not interfere
with operation.
module_parameters : dict
Dict or Series containing the below fields describing the module
'Voco' : float
Open circuit voltage at reference conditions.
'Bvoco' : float
Temperature coefficient of open circuit voltage, in Volts/C.
'cells_in_series' : int
Number of cells in series in the module.
'n_diode' : float
Diode ideality factor
'Mbvoc' : float
Irradiance dependence of the temperature coefficient of
open-circuit voltage, typically assumed to be zero.
'FD' : float
Fraction of diffuse irradiance used by the module.
'efficiency' : float
Module fractional efficiency.
'iv_model' : string
Model for calculating Voc. Can be 'sapm', 'cec' or 'desoto'.
TODO: Describe better.
'aoi_model' : string
Model for calculating the angle-of-incidence loss function. Can
be 'no_loss' or 'ashrae'. The 'no_loss' method assumes that no
extra reflection losses are accrued at non-normal angles of
incidence. The 'ashrae' option uses the model in
pvlib.pvsystem.ashraeiam
'is_bifacial' : bool
True if module is bifacial. Using False will force the use of
monofacial models even if 'bifacial_model' in the
racking_parameters input dict is set to a value.
bifaciality_factor : float
Number describing the efficiency of the backside of the module
relative to the frontside. A typical values is 0.7.
racking_parameters : dict
dictionary describing the racking setup. Contains fields:
'racking_type' : str
Can be 'fixed_tilt' for a stationary PV system or 'single_axis'
for a single axis tracker.
'surface_tilt' : float
If racking_type is 'fixed_tilt', specify the surface tilt in
degrees from horizontal.
'surface_azimuth' : float
If racking type is 'surface_azimuth', specify the racking azimuth
in degrees. A value of 180 degrees has the module face oriented
due South.
'axis_tilt' : float
If racking_type is 'single_axis', specify the the tilt of the
axis of rotation (i.e, the y-axis defined by axis_azimuth) with
respect to horizontal, in decimal degrees. Standard value is 0.
'axis_azimuth' : float
If racking_type is 'single_axis', specify a value denoting the
compass direction along which the axis of rotation lies. Measured
in decimal degrees East of North. Standard value is 0.
'backtrack' : bool
Controls whether the tracker has the capability to ''backtrack''
to avoid row-to-row shading. False denotes no backtrack
capability. True denotes backtrack capability.
'gcr' : float
A value denoting the ground coverage ratio of a tracker system
which utilizes backtracking; i.e. the ratio between the PV array
surface area to total ground area. A tracker system with modules
2 meters wide, centered on the tracking axis, with 6 meters
between the tracking axes has a gcr of 2/6=0.333. If gcr is not
provided, a gcr of 2/7 is default. gcr must be <=1
bifacial_model : string
Can be 'proportional' or 'pvfactors'. The 'proportional' bifacial
modeling method calculates the effective irradiance on the
frontside of the module and then assumes that the backside
irradiance is equal to the frontside irradiance times the
backside_irradiance_fraction times the bifaciality_factor. The
'pvfactors' method uses bifacial modeling found in the pvfactors
package.
backside_irradiance_fraction : float
For simple bifacial modeling, the backside irradiance is assumed
to be equal to the frontside irradiance times the
backside_irradiance_fraction. Required if using
bifacial_model 'proportional'. Typical value is 0.3.
pvrow_height : float.
Height of the pv rows, measured at their center (m). Required if
using bifacial_model 'pvfactors'.
pvrow_width : float
Width of the pv rows in the considered 2D plane (m). Required if
using bifacial_model 'pvfactors'.
albedo: float
Ground albedo. Required if using bifacial_model 'pvfactors'.
n_pvrows: int, default 3
Number of PV rows to consider in the PV array. Required if
using bifacial_model 'pvfactors'.
index_observed_pvrow: int, default 1
Index of the PV row whose incident irradiance will be returned.
Indices of PV rows go from 0 to n_pvrows-1. Required if using
bifacial_model 'pvfactors'.
rho_front_pvrow: float, default 0.03
Front surface reflectivity of PV rows. Required if using
bifacial_model 'pvfactors'.
rho_back_pvrow: float, default 0.05
Back surface reflectivity of PV rows. Required if using
bifacial_model 'pvfactors'.
horizon_band_angle: float, default 15
Elevation angle of the sky dome's diffuse horizon band (deg).
Required if using bifacial_model 'pvfactors'.
thermal_model : dict
named_model : string
If named_model is 'explicit', then use SAPM parameters defined by
a, b, and deltaT. Otherwise named_model can be one of the
following strings:
‘open_rack_cell_glassback’ (default)
‘roof_mount_cell_glassback’
‘open_rack_cell_polymerback’
‘insulated_back_polymerback’
‘open_rack_polymer_thinfilm_steel’
‘22x_concentrator_tracker’
a: float
SAPM module parameter for establishing the upper limit for
module temperature at low wind speeds and high solar
irradiance.
b :float
SAPM module parameter for establishing the rate at which the
module temperature drops as wind speed increases (see SAPM
eqn. 11).
deltaT :float
SAPM module parameter giving the temperature difference
between the cell and module back surface at the reference
irradiance, E0.
open_circuit_rise : bool
The SAPM parameters are measured for modules at maximum power
point. At open-circuit voltage the module is warmer because less
energy is exported as electricity. If open_circuit_rise is True
then this temperature rise is taken into account, if False then
it is not.
thermal_mass : bool
Weather to take into account the thermal mass of the modules when
calculating temperature. Thermal mass is performed using an
exponentially weighted moving average [Bosco2016]
thermal_time_constant : float
Thermal time constant of the modules, in minutes.
irrad_model : str
Irradiance model for determining in-plane sky diffuse irradiance
component using the specified sky diffuse irradiance model. Default
is 'perez'
Sky diffuse models include:
* isotropic (default)
* klucher
* haydavies
* reindl
* king
* perez
Returns
-------
dataframe containing simulation results. Includes the fields present in
input 'weather' in addtion to:
'v_oc': open circuit voltage in Volts
'aoi': angle of incidence in degrees.
'temp_cell': cell temeprature in C.
References
----------
[Bosco2016] <NAME>, et al., Climate specific thermomechanical fatigue
of flat plate photovoltaic module solder joints, Microelectronics
Reliability (2016), http://dx.doi.org/10.1016/j.microrel.2016.03.024
"""
# Rename the weather data for input to PVLIB.
if np.all([c in weather.columns for c in ['dni', 'dhi', 'ghi', 'temp_air',
'wind_speed', 'year', 'month',
'day', 'hour', 'minute']]):
# All colmuns are propoerly labeled, skip any relabeling.
pass
else:
# Try renaming from NSRDB default values.
weather = weather.rename(
columns={'DNI': 'dni',
'DHI': 'dhi',
'GHI': 'ghi',
'Temperature': 'temp_air',
'Wind Speed': 'wind_speed',
'Year': 'year',
'Month': 'month',
'Day': 'day',
'Hour': 'hour',
'Minute': 'minute'})
df = weather.copy()
# Set location
location = pvlib.location.Location(latitude=info['Latitude'],
longitude=info['Longitude'])
# Add module parameters if some aren't specified.
module_parameters = add_default_module_params(module_parameters)
# #
# start_time = time.time()
# # This is the most time consuming step
# solar_position = location.get_solarposition(weather.index, method='nrel_numpy')
# print( time.time()-start_time)
#
# Ephemeris method is faster and gives very similar results.
solar_position = location.get_solarposition(weather.index,
method='ephemeris')
# Get surface tilt and azimuth
if racking_parameters['racking_type'] == 'fixed_tilt':
surface_tilt = racking_parameters['surface_tilt']
surface_azimuth = racking_parameters['surface_azimuth']
# idealized assumption
elif racking_parameters['racking_type'] == 'single_axis':
# Avoid nan warnings by presetting unphysical zenith angles.
solar_position['apparent_zenith'][
solar_position['apparent_zenith'] > 90] = 90
# Todo: Check appraent_zenith vs. zenith.
single_axis_vals = pvlib.tracking.singleaxis(
solar_position['apparent_zenith'],
solar_position['azimuth'],
axis_tilt=racking_parameters['axis_tilt'],
axis_azimuth=racking_parameters['axis_azimuth'],
max_angle=racking_parameters['max_angle'],
backtrack=racking_parameters['backtrack'],
gcr=racking_parameters['gcr']
)
surface_tilt = single_axis_vals['surface_tilt']
surface_azimuth = single_axis_vals['surface_azimuth']
else:
raise Exception('Racking system not recognized')
# Extraterrestrial radiation
dni_extra = pvlib.irradiance.get_extra_radiation(solar_position.index)
airmass = location.get_airmass(solar_position=solar_position)
# Perez is a good diffuse sky model
total_irrad = pvlib.irradiance.get_total_irradiance(
surface_tilt,
surface_azimuth,
solar_position['zenith'],
solar_position['azimuth'],
weather['dni'].astype('float'),
weather['ghi'].astype('float'),
weather['dhi'].astype('float'),
model='perez',
dni_extra=dni_extra,
airmass=airmass['airmass_relative'],
albedo=racking_parameters['albedo'])
# Add a small irradiance during night time
for k in total_irrad.keys():
total_irrad[k][np.isnan(total_irrad[k])] = 0
total_irrad[k] = total_irrad[k] + nighttime_irradiance_addition
if racking_parameters['racking_type'] == 'fixed_tilt':
aoi = pvlib.irradiance.aoi(surface_tilt, surface_azimuth,
solar_position['zenith'],
solar_position['azimuth'])
elif racking_parameters['racking_type'] == 'single_axis':
aoi = single_axis_vals['aoi']
else:
raise Exception('Racking type not understood')
# aoi = single_axis_vals['aoi']
if (not 'named_model' in thermal_model) or thermal_model[
'named_model'] == 'explicit':
thermal_model_params = {k: thermal_model[k] for k in
['a', 'b', 'deltaT']}
else:
temperature_model_parameters = \
pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS['sapm']
thermal_model_params = temperature_model_parameters[
thermal_model['named_model']]
temperature_cell = pvlib.temperature.sapm_cell(
poa_global=total_irrad['poa_global'],
temp_air=weather['temp_air'],
wind_speed=weather['wind_speed'],
a=thermal_model_params['a'],
b=thermal_model_params['b'],
deltaT=thermal_model_params['deltaT'])
# temps = pvlib.temperature.sapm_cell(total_irrad['poa_global'],
# weather['wind_speed'],
# weather['temp_air'],
# thermal_model_params)
# if thermal_model['thermal_mass']:
# thermal_alpha = np.exp(-(info['interval_in_hours'] * 60) / 270)
#
if thermal_model['open_circuit_rise']:
temperature_cell = weather['temp_air'] + \
(temperature_cell - weather['temp_air']) / (
1 - module_parameters['efficiency'])
# Spectral loss is typically very small on order of a few percent, assume no
# spectral loss for simplicity
spectral_loss = 1
if not 'aoi_model' in module_parameters:
module_parameters['aoi_model'] = 'no_loss'
if not 'FD' in module_parameters:
module_parameters['FD'] = 1
# AOI loss:
if module_parameters['aoi_model'] == 'no_loss':
aoi_loss = 1
elif module_parameters['aoi_model'] == 'ashrae':
aoi_loss = pvlib.iam.ashrae(aoi,
b=module_parameters['ashrae_iam_param'])
else:
raise Exception('aoi_model must be ashrae or no_loss')
# Calculate effective irradiance.
if ('is_bifacial' in module_parameters) and \
(module_parameters['is_bifacial'] == True):
if not 'bifacial_model' in racking_parameters:
warnings.warn("""'bifacial_model' in racking_parameters is not
specified, can be 'simple' or 'pvfactors'. Defaulting to
'simple'.""")
racking_parameters['bifacial_model'] = 'proportional'
if racking_parameters['bifacial_model'] == 'proportional':
effective_irradiance_front = calculate_effective_irradiance(
total_irrad['poa_direct'],
total_irrad['poa_diffuse'],
aoi_loss=aoi_loss,
FD=module_parameters['FD']
)
if not 'backside_irradiance_fraction' in racking_parameters:
raise Exception("""Must specify 'backside_irradiance_fraction' in
racking_parameters for bifacial modeling. """
)
effective_irradiance_back = effective_irradiance_front * \
racking_parameters[
'backside_irradiance_fraction'] * \
module_parameters['bifaciality_factor']
effective_irradiance = effective_irradiance_front + effective_irradiance_back
df['effective_irradiance_front'] = effective_irradiance_front
df['effective_irradiance_back'] = effective_irradiance_back
elif racking_parameters['bifacial_model'] == 'pvfactors':
total_inc_front, total_inc_back, poa_front_absorbed, poa_back_absorbed = pvfactors_timeseries(
solar_position['azimuth'], solar_position['zenith'],
surface_azimuth,
surface_tilt,
racking_parameters['axis_azimuth'],
weather.index, weather['dni'], weather['dhi'],
racking_parameters['gcr'],
racking_parameters['pvrow_height'],
racking_parameters['pvrow_width'],
racking_parameters['albedo'],
n_pvrows=racking_parameters['n_pvrows'],
# fast_mode_pvrow_index=racking_parameters['fast_mode_pvrow_index'],
index_observed_pvrow=racking_parameters['index_observed_pvrow'],
rho_front_pvrow=racking_parameters['rho_front_pvrow'],
rho_back_pvrow=racking_parameters['rho_back_pvrow'],
horizon_band_angle=racking_parameters['horizon_band_angle'],
# run_parallel_calculations=racking_parameters['run_parallel_calculations'],
# n_workers_for_parallel_calcs=racking_parameters['n_workers_for_parallel_calcs']
)
effective_irradiance_front = np.nan_to_num(poa_front_absorbed)
effective_irradiance_back = np.nan_to_num(poa_back_absorbed)
effective_irradiance = effective_irradiance_front + effective_irradiance_back
df['effective_irradiance_front'] = effective_irradiance_front
df['effective_irradiance_back'] = effective_irradiance_back
else:
raise Exception(
"racking_parameters['bifacial_model'] must be either 'proportional' or 'pvfactors'. ")
else:
# Not bifacial, i.e. monofacial module.
effective_irradiance = calculate_effective_irradiance(
total_irrad['poa_direct'],
total_irrad['poa_diffuse'],
aoi_loss=aoi_loss,
FD=module_parameters['FD']
)
v_oc = sapm_voc(effective_irradiance, temperature_cell,
module_parameters)
df['aoi'] = aoi
# df['aoi_loss'] = aoi_loss
df['temp_cell'] = temperature_cell
df['temp_air'] = weather['temp_air']
df['effective_irradiance'] = effective_irradiance
df['v_oc'] = v_oc
df['surface_tilt'] = surface_tilt
df['surface_azimuth'] = surface_azimuth
df['solar_zenith'] = solar_position['apparent_zenith']
df['solar_azimuth'] = solar_position['azimuth']
df['poa_direct'] = total_irrad['poa_direct']
df['poa_diffuse'] = total_irrad['poa_diffuse']
return df
#
# def pvfactors_timeseries(
# solar_azimuth, solar_zenith, surface_azimuth, surface_tilt,
# axis_azimuth,
# timestamps, dni, dhi, gcr, pvrow_height, pvrow_width, albedo,
# n_pvrows=3,fast_mode_pvrow_index=2,index_observed_pvrow=1,
# rho_front_pvrow=0.03, rho_back_pvrow=0.05,
# horizon_band_angle=15.,
# run_parallel_calculations=True, n_workers_for_parallel_calcs=2):
# """
# Calculate front and back surface plane-of-array irradiance on
# a fixed tilt or single-axis tracker PV array configuration, and using
# the open-source "pvfactors" package.
# Please refer to pvfactors online documentation for more details:
# https://sunpower.github.io/pvfactors/
#
# Parameters
# ----------
# solar_azimuth: numeric
# Sun's azimuth angles using pvlib's azimuth convention (deg)
# solar_zenith: numeric
# Sun's zenith angles (deg)
# surface_azimuth: numeric
# Azimuth angle of the front surface of the PV modules, using pvlib's
# convention (deg)
# surface_tilt: numeric
# Tilt angle of the PV modules, going from 0 to 180 (deg)
# axis_azimuth: float
# Azimuth angle of the rotation axis of the PV modules, using pvlib's
# convention (deg). This is supposed to be fixed for all timestamps.
# timestamps: datetime or DatetimeIndex
# List of simulation timestamps
# dni: numeric
# Direct normal irradiance (W/m2)
# dhi: numeric
# Diffuse horizontal irradiance (W/m2)
# gcr: float
# Ground coverage ratio of the pv array
# pvrow_height: float
# Height of the pv rows, measured at their center (m)
# pvrow_width: float
# Width of the pv rows in the considered 2D plane (m)
# albedo: float
# Ground albedo
# n_pvrows: int, default 3
# Number of PV rows to consider in the PV array
# fast_mode_pvrow_index: int
# In fast mode, the user will be able to calculate rapidly (but with
# additional approximations) the incident irradiance on the back side
# of one PV row in the PV array, and the index of that PV row needs to
# be passed as a keyword argument to fast_mode_pvrow_index
# index_observed_pvrow: int, default 1
# Index of the PV row whose incident irradiance will be returned. Indices
# of PV rows go from 0 to n_pvrows-1.
# rho_front_pvrow: float, default 0.03
# Front surface reflectivity of PV rows
# rho_back_pvrow: float, default 0.05
# Back surface reflectivity of PV rows
# horizon_band_angle: float, default 15
# Elevation angle of the sky dome's diffuse horizon band (deg)
# run_parallel_calculations: bool, default True
# pvfactors is capable of using multiprocessing. Use this flag to decide
# to run calculations in parallel (recommended) or not.
# n_workers_for_parallel_calcs: int, default 2
# Number of workers to use in the case of parallel calculations. The
# '-1' value will lead to using a value equal to the number
# of CPU's on the machine running the model.
#
# Returns
# -------
# front_poa_irradiance: numeric
# Calculated incident irradiance on the front surface of the PV modules
# (W/m2)
# back_poa_irradiance: numeric
# Calculated incident irradiance on the back surface of the PV modules
# (W/m2)
# df_registries: pandas DataFrame
# DataFrame containing detailed outputs of the simulation; for
# instance the shapely geometries, the irradiance components incident on
# all surfaces of the PV array (for all timestamps), etc.
# In the pvfactors documentation, this is refered to as the "surface
# registry".
#
# References
# ----------
# .. [1] Anoma, <NAME>, et al. "View Factor Model and Validation for
# Bifacial PV and Diffuse Shade on Single-Axis Trackers." 44th IEEE
# Photovoltaic Specialist Conference. 2017.
# """
#
# # Convert pandas Series inputs (and some lists) to numpy arrays
# if isinstance(solar_azimuth, pd.Series):
# solar_azimuth = solar_azimuth.values
# elif isinstance(solar_azimuth, list):
# solar_azimuth = np.array(solar_azimuth)
# if isinstance(solar_zenith, pd.Series):
# solar_zenith = solar_zenith.values
# if isinstance(surface_azimuth, pd.Series):
# surface_azimuth = surface_azimuth.values
# elif isinstance(surface_azimuth, list):
# surface_azimuth = np.array(surface_azimuth)
# if isinstance(surface_tilt, pd.Series):
# surface_tilt = surface_tilt.values
# if isinstance(dni, pd.Series):
# dni = dni.values
# if isinstance(dhi, pd.Series):
# dhi = dhi.values
# if isinstance(solar_azimuth, list):
# solar_azimuth = np.array(solar_azimuth)
#
# # Import pvfactors functions for timeseries calculations.
# from pvfactors.run import (run_timeseries_engine,
# run_parallel_engine)
#
# # Build up pv array configuration parameters
# pvarray_parameters = {
# 'n_pvrows': n_pvrows,
# 'axis_azimuth': axis_azimuth,
# 'pvrow_height': pvrow_height,
# 'pvrow_width': pvrow_width,
# 'gcr': gcr,
# 'rho_front_pvrow': rho_front_pvrow,
# 'rho_back_pvrow': rho_back_pvrow,
# 'horizon_band_angle': horizon_band_angle
# }
#
# # Run pvfactors calculations: either in parallel or serially
# if run_parallel_calculations:
#
# # report_builder = ReportBuilder(fast_mode_pvrow_index)
#
# report = run_parallel_engine(
# ReportBuilder(fast_mode_pvrow_index), pvarray_parameters,
# timestamps, dni, dhi,
# solar_zenith, solar_azimuth,
# surface_tilt, surface_azimuth,
# albedo, n_processes=n_workers_for_parallel_calcs,
# fast_mode_pvrow_index=fast_mode_pvrow_index
# )
# else:
# report = run_timeseries_engine(
# PVFactorsReportBuilder.build, pvarray_parameters,
# timestamps, dni, dhi,
# solar_zenith, solar_azimuth,
# surface_tilt, surface_azimuth,
# albedo)
#
# print(report)
# # Turn report into dataframe
# df_report = pd.DataFrame(report, index=timestamps)
#
# return df_report.total_inc_front, df_report.total_inc_back
#
#
# class ReportBuilder(object):
# """A class is required to build reports when running calculations with
# multiprocessing because of python constraints"""
#
# def __init__(self, fast_mode_pvrow_index):
# """Create report builder object for fast mode simulation.
#
# Parameters
# ----------
# fast_mode_pvrow_index : int
# Index of PV row whose back side irradiance we want to report
# """
# self.fast_mode_pvrow_index = fast_mode_pvrow_index
#
# def build(self, report, pvarray):
# # Initialize the report as a dictionary
# if report is None:
# report = {'total_inc_back': []}
# # Add elements to the report
# if pvarray is not None:
# pvrow = pvarray.pvrows[self.fast_mode_pvrow_index]
# report['total_inc_back'].append(
# pvrow.back.get_param_weighted('qinc'))
# else:
# # No calculation was performed, because sun was down
# report['total_inc_back'].append(np.nan)
#
# return report
#
# @staticmethod
# def merge(reports):
# """Works for dictionary reports"""
# report = reports[0]
# # Merge other reports
# keys_report = list(reports[0].keys())
# for other_report in reports[1:]:
# for key in keys_report:
# report[key] += other_report[key]
# return report
#
# class PVFactorsReportBuilder(object):
# """In pvfactors, a class is required to build reports when running
# calculations with multiprocessing because of python constraints"""
#
# def __init__(self, fast_mode_pvrow_index):
# """Create report builder object for fast mode simulation.
#
# Parameters
# ----------
# fast_mode_pvrow_index : int
# Index of PV row whose back side irradiance we want to report
# """
# self.fast_mode_pvrow_index = fast_mode_pvrow_index
#
# # @staticmethod
# def build(self,report, pvarray):
# """Reports will have total incident irradiance on front and
# back surface of center pvrow (index=1)"""
# # Initialize the report as a dictionary
# if report is None:
# list_keys = ['total_inc_back', 'total_inc_front']
# report = {key: [] for key in list_keys}
# # Add elements to the report
# if pvarray is not None:
# # pvrow = pvarray.pvrows[1] # use center pvrow
# pvrow = pvarray.pvrows[self.fast_mode_pvrow_index]
# print(pvrow.front)
# report['total_inc_back'].append(
# pvrow.back.get_param_weighted('qinc'))
# report['total_inc_front'].append(
# pvrow.front.get_param_weighted('qinc'))
# else:
# # No calculation is performed when the sun is down
# report['total_inc_back'].append(np.nan)
# report['total_inc_front'].append(np.nan)
#
# return report
#
# @staticmethod
# def merge(reports):
# """Works for dictionary reports"""
# report = reports[0]
# # Merge only if more than 1 report
# if len(reports) > 1:
# keys_report = list(reports[0].keys())
# for other_report in reports[1:]:
# if other_report is not None:
# for key in keys_report:
# report[key] += other_report[key]
# return report
#
def add_default_module_params(module_parameters):
"""
Adds default fields to the module_parameters dictionary.
Parameters
----------
module_parameters : dict
Examples
--------
>> module = add_default_module_params(module)
Returns
-------
module_parameters : dict
Same as input, except default values are added for the following fields:
'Mbvoc' : 0
'FD' : 1
'iv_model' : 'sapm'
'aoi_model' : 'no_loss'
"""
if not 'Mbvoc' in module_parameters:
module_parameters['Mbvoc'] = 0
if not 'FD' in module_parameters:
module_parameters['FD'] = 1
if not 'iv_model' in module_parameters:
module_parameters['iv_model'] = 'sapm'
if not 'aoi_model' in module_parameters:
module_parameters['aoi_model'] = 'no_loss'
return module_parameters
def make_voc_summary(df, info, module_parameters,
string_design_voltage=1500,
safety_factor=0.023,
ashrae='local_load'):
"""
Calculate maximum Voc expected using four relevant standards. See
documentation for a description of the standards.
Parameters
----------
df : dataframe
Dataframe containing fields: 'v_oc', 'ghi', 'temp_air'
info : dict
Dictionary containing fields 'lat' and 'lon'. These are used to
calculate the ASHRAE standards.
module_parameters : dict
Dictionary containing module parameters. The module paramaters are
used in a direct call to the function calculate_voc.
string_design_voltage : float
Maximum allowable string voltage for the design, in V. Typically 600
V, 1200 V or 1500 V
safety_factor : float
safety factor for calculating string length as a fraction of max Voc.
An example value wuold be 0.023, corresponding to a safety factor of
2.3%. Safety factors are only used for 690.7(A)(1) standards.
Returns
-------
voc_summary : dataframe
Dataframe containing fields:
'max_module_voltage' - the maximum module voltage (not including
safety factor).
'string_design_voltage' - Maximum allowable string voltage for the
design, in V. Typically 600 V, 1200 V or 1500 V
'safety_factor' - safety factor for calculating string length as a
fraction of max Voc. An example value wuold be 0.023, corresponding
to a safety factor of 2.3%. Safety factors are only used for 690.7(A)(1)
standards.
'string_length' - Longest acceptable string length.
'Cell Temperature' - Temperature
"""
voc_summary = pd.DataFrame(
columns=['Conditions', 'max_module_voltage', 'string_design_voltage',
'safety_factor',
'string_length',
'Cell Temperature', 'POA Irradiance', 'long_note'],
index=['690.7(A)(3)-P99.5',
'690.7(A)(3)-P100',
'690.7(A)(1)-DAY',
'690.7(A)(1)-NSRDB',
'690.7(A)(1)-ASHRAE',
'690.7(A)(2)-ASHRAE'])
mean_yearly_min_temp = calculate_mean_yearly_min_temp(df.index,
df['temp_air'])
if type(ashrae) == type(pd.DataFrame()):
ashrae_loc = vocmax.ashrae_get_design_conditions_at_loc(
info['Latitude'], info['Longitude'], ashrae)
lowest_expected_temperature_ashrae = ashrae_loc[
'Extreme_Annual_Mean_Min_DB']
else:
ashrae_available = ashrae_is_design_conditions_available()
if ashrae_available:
ashrae = ashrae_get_design_conditions()
ashrae_loc = vocmax.ashrae_get_design_conditions_at_loc(
info['Latitude'], info['Longitude'], ashrae)
lowest_expected_temperature_ashrae = ashrae_loc[
'Extreme_Annual_Mean_Min_DB']
else:
lowest_expected_temperature_ashrae = np.nan
# mean_yearly_min_temp_ashrae =
mean_yearly_min_day_temp = calculate_mean_yearly_min_temp(
df.index[df['ghi'] > 150],
df['temp_air'][df['ghi'] > 150])
voc_summary['safety_factor'] = 0
for f in ['690.7(A)(3)-P99.5', '690.7(A)(3)-P100']:
voc_summary.loc[f, 'safety_factor'] = safety_factor
# Calculate some standard voc values.
voc_values = {
'690.7(A)(3)-P99.5': np.percentile( | np.array(df['v_oc']) | numpy.array |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/core/data_conversion.ipynb (unless otherwise specified).
__all__ = ['DataConverter', 'NoConverter', 'GenericConverter', 'StandardConverter', 'PandasConverter',
'Window2Dto3Dconverter', 'data_converter_factory']
# Cell
import abc
import pandas as pd
import numpy as np
import warnings
#block-types
from ..config import bt_defaults as dflt
from ..utils.utils import set_logger
# Cell
class DataConverter ():
"""
Convert input and output data format.
This class allows to convert the format of the data before fitting
and before transforming, and revert the changes back after performing
these operations. This allows to decouple the implementation of a
particular component from the remaining components in the pipeline,
making it more reusable across different pipelines.
"""
def __init__ (self, logger=None, verbose: int=dflt.verbose, inplace: bool=True,
convert_before=None, convert_before_transforming=None, convert_before_fitting=None,
convert_after=None, convert_after_transforming=None, convert_after_fitting=None,
convert_before_transforming_after_fit=None,
convert_after_transforming_after_fit=None,
unpack_single_tuple_for_fitting=True, unpack_single_tuple_for_transforming=True,
unpack_single_tuple=None, unpack_single_tuple_for_result_func=False,
ensure_tuple=True, **kwargs):
"""
Initialize common attributes and fields, in particular the logger.
Parameters
----------
logger : logging.Logger or None, optional
Logger used to write messages
verbose : int, optional
Verbosity, 0: warning or critical, 1: info, 2: debug.
"""
# logger used to display messages
if logger is None:
self.logger = set_logger ('dsblocks', verbose=verbose)
else:
self.logger = logger
self.inplace = inplace
self._set_convert_from_functions (
convert_before=convert_before,
convert_before_transforming=convert_before_transforming,
convert_before_fitting=convert_before_fitting,
convert_after=convert_after,
convert_after_transforming=convert_after_transforming,
convert_after_fitting=convert_after_fitting,
convert_before_transforming_after_fit=convert_before_transforming_after_fit,
convert_after_transforming_after_fit=convert_after_transforming_after_fit)
unpack_single_tuple_for_fitting = (
unpack_single_tuple if unpack_single_tuple is not None
else unpack_single_tuple_for_fitting)
unpack_single_tuple_for_transforming = (
unpack_single_tuple if unpack_single_tuple is not None
else unpack_single_tuple_for_transforming)
self.convert_single_tuple_for_fitting = (
self.convert_single_tuple if unpack_single_tuple_for_fitting
else self.do_not_convert_single_tuple)
self.convert_single_tuple_for_transforming = (
self.convert_single_tuple if unpack_single_tuple_for_transforming
else self.do_not_convert_single_tuple)
self.convert_single_tuple_for_result_func = (
self.convert_single_tuple if unpack_single_tuple_for_result_func
else self.do_not_convert_single_tuple)
self.convert_no_tuple = (self.convert_no_tuple if ensure_tuple
else self.do_not_convert_no_tuple)
def convert_single_tuple (self, X):
return X[0] if (len(X)==1 and type(X[0]) is tuple) else X
def do_not_convert_single_tuple (self, X):
return X
def convert_no_tuple (self, X):
X = X if type (X) is tuple else (X,)
return X
def do_not_convert_no_tuple (self, X):
return X
def convert_varargs_to_x_y (self, X):
assert len(X)==1 or len(X)==2
X, y = X if len(X)==2 else (X[0], None)
return X, y
def convert_before_fitting (self, *X):
"""
Convert incoming data before running fit method.
Parameters
----------
X : data (N observations x D dimensions)
data used for fitting model parameters
Returns
-------
X : data (N observations x D dimensions)
data with transformed format but same content
"""
return X
def convert_after_fitting (self, *X):
"""
Convert data after running fit method.
Calling this method is only required when convert_before_fitting
changes X "in place", instead of changing a copy of X. This might
be more efficient sometimes, and we have convert_after_fitting to
revert the previous change.
Parameters
----------
X : data (N observations x D dimensions)
data used for fitting model parameters
Returns
-------
X : data (N observations x D dimensions)
data with transformed format but same content
"""
return X
def convert_before_transforming (self, *X, **kwargs):
"""
Convert data before running transform method.
Parameters
----------
X : data (N observations x D dimensions)
data used to be transformed
Returns
-------
X : data (N observations x D dimensions)
data with transformed format but same content
"""
return X
def convert_after_transforming (self, result, **kwargs):
"""
Convert result obtained after by transform method.
Parameters
----------
result : data (N' observations x D' dimensions)
result obtained by transformed method
Returns
-------
result : data (N' observations x D' dimensions)
result with transformed format but same content
"""
return result
def convert_before_fit_apply (self, *X, sequential_fit_apply=False, **kwargs):
#return self.convert_before_fitting (*X)
X_original = copy.deepcopy (X) if self.inplace else X
_ = self.convert_before_transforming (
*X_original, fit_apply=True, sequential_fit_apply=sequential_fit_apply, **kwargs)
X = self.convert_before_fitting (*X)
if self.inplace:
self.X = X
return X
def convert_after_fit_apply (self, result, sequential_fit_apply=False, **kwargs):
#return self.convert_after_transforming (result, **kwargs)
if self.inplace:
_ = self.convert_after_fitting (*self.X)
self.X = None
return self.convert_after_transforming (
result, fit_apply=True, sequential_fit_apply=sequential_fit_apply, **kwargs)
## methods based on passed-in functions
def _set_convert_from_functions (self, convert_before=None, convert_before_transforming=None,
convert_before_fitting=None, convert_after=None,
convert_after_transforming=None, convert_after_fitting=None,
convert_before_transforming_after_fit=None,
convert_after_transforming_after_fit=None):
# functions
if convert_before is not None:
if convert_before_transforming is None: convert_before_transforming = convert_before
if convert_before_fitting is None:
self._convert_before_fitting = convert_before
self.convert_before_fitting = self.convert_before_fitting_from_function
#if convert_before_fit_apply is None: convert_before_fit_apply=convert_before
if convert_before_transforming is not None:
self._convert_before_transforming = convert_before_transforming
self.convert_before_transforming = self.convert_before_transforming_from_function
self._convert_before_transforming_after_fit = (
self._convert_before_transforming if convert_before_transforming_after_fit is None
else convert_before_transforming_after_fit)
if convert_before_fitting is not None:
self._convert_before_fitting = convert_before_fitting
self.convert_before_fitting = self.convert_before_fitting_from_function
if convert_after is not None:
if convert_after_transforming is None: convert_after_transforming = convert_after
if convert_after_fitting is None: convert_after_fitting = convert_after
if convert_after_transforming is not None:
self._convert_after_transforming = convert_after_transforming
self.convert_after_transforming = self.convert_after_transforming_from_function
self._convert_after_transforming_after_fit = (
self._convert_after_transforming if convert_after_transforming_after_fit is None
else convert_after_transforming_after_fit)
if convert_after_fitting is not None:
self._convert_after_fitting = convert_after_fitting
self.convert_after_fitting = self.convert_after_fitting_from_function
def convert_before_fitting_from_function (self, *X):
return self._convert_before_fitting (*X)
def convert_after_fitting_from_function (self, *X):
return self._convert_after_fitting (*X)
def convert_before_transforming_from_function (self, *X, fit_apply=False,
sequential_fit_apply=False, **kwargs):
if fit_apply or sequential_fit_apply:
return self._convert_before_transforming_after_fit (*X, **kwargs)
else:
return self._convert_before_transforming (*X, **kwargs)
def convert_after_transforming_from_function (self, result, fit_apply=False,
sequential_fit_apply=False, **kwargs):
if fit_apply or sequential_fit_apply:
return self._convert_after_transforming_after_fit (result, **kwargs)
else:
return self._convert_after_transforming (result, **kwargs)
# Cell
class NoConverter (DataConverter):
"""Performs no conversion."""
def __init__ (self, **kwargs):
super().__init__(inplace=False, **kwargs)
# Cell
class GenericConverter (DataConverter):
"""
Supply X, y to `fit`, and provide only X to `transform` / `predict` / `apply`
Cases:
- Usual case:
(X, y) are provided to fit.
Only X is provided to transform.
Only X is returned by transform.
- Transform uses labels:
(X, y) are provided to fit.
(X, y) are provided to transform.
(X, y) are returned by transform.
- separate_labels = False, or no_labels=True
X is provided to fit
X is provided to transform
X is returned by transform
"""
error_warning_message = 'Did not find y as separate argument, but no_labels is False'
def __init__ (self, transform_uses_labels=False, separate_labels=True, no_labels=False,
labels_returned_by_transform=None, labels_to_be_returned_by_transform=None,
labels_included_without_fitting=False,
raise_error_if_no_label_inconsistency=False,
raise_warning_if_no_label_inconsistency=False,
inplace=False, **kwargs):
"""
Initialize attributes and fields.
Parameters
----------
transform_uses_labels : bool, optional
If True, the `transform` method receives both `X` and `y`.
If False, the `transform` method only receives `X`.
"""
super().__init__(inplace=False, **kwargs)
# whether the _transform method receives a DataFrame that includes the labels, or it doesn't
self.separate_labels = separate_labels
self.no_labels = (not separate_labels) or no_labels
self.transform_uses_labels = transform_uses_labels and not self.no_labels
self.stored_y = False
self.labels_returned_by_transform = (labels_returned_by_transform
if labels_returned_by_transform is not None
else self.transform_uses_labels)
self.labels_included_without_fitting = labels_included_without_fitting
self.labels_to_be_returned_by_transform = (
labels_to_be_returned_by_transform if labels_to_be_returned_by_transform is not None
else (self.transform_uses_labels or self.labels_included_without_fitting))
self.raise_error_if_no_label_inconsistency = raise_error_if_no_label_inconsistency
self.raise_warning_if_no_label_inconsistency = raise_warning_if_no_label_inconsistency
def convert_before_transforming (self, *X, fit_apply=False, sequential_fit_apply=False, **kwargs):
"""
By default, remove labels from incoming input.
"""
self.stored_y = False
fit_apply = fit_apply or sequential_fit_apply
if not fit_apply and not self.labels_included_without_fitting:
return X
if not(self.no_labels or self.transform_uses_labels or len(X)<=1):
self.stored_y = True
*X, self.y = X
#if len (X) > 1: X = tuple(X)
X = tuple(X)
return X
if (fit_apply or self.transform_uses_labels) and len(X)>1:
self.stored_y = True
*_, self.y = X
if not self.no_labels and not self.stored_y:
if self.raise_error_if_no_label_inconsistency:
raise TypeError (self.error_warning_message)
elif self.raise_warning_if_no_label_inconsistency:
warnings.warn (self.error_warning_message)
print (self.error_warning_message)
return X
def convert_after_transforming (self, result, fit_apply=False, sequential_fit_apply=False, **kwargs):
"""
Convert the result produced by `transform`to DataFrame format.
If the input to `transform` was in DataFrame format, the `result`
given by `transform` is converted to DataFrame if it is not
produced in this format. Furthermore, if the `label` column was
in the input to `transform` and it is not in the output given
by `transform`, it is appended to the result.
"""
fit_apply = fit_apply or sequential_fit_apply
if ((not fit_apply or not self.stored_y) and
(not self.stored_y or not self.labels_to_be_returned_by_transform or self.no_labels
or (self.labels_returned_by_transform and (type(result) is tuple) and len(result)>1))):
return result
elif type(result) is tuple:
result = result + (self.y, )
else:
result = (result, self.y)
self.stored_y = False
self.y = None
return result
# Cell
class StandardConverter (DataConverter):
"""Convert input and output data format.
Assumes that, when fitting, the data is introduced either as a single element or
as a tuple with more than one element."""
def __init__ (self, inplace=False, unpack_single_tuple=False,
unpack_single_tuple_for_result_func=True, **kwargs):
"""
Initialize common attributes and fields, in particular the logger.
"""
# logger used to display messages
super().__init__(inplace=False, unpack_single_tuple=False,
unpack_single_tuple_for_result_func=True, **kwargs)
def convert_before_transforming (self, *X, fit_apply=False, sequential_fit_apply=False, **kwargs):
"""
Convert data before running transform method.
"""
if ((fit_apply or sequential_fit_apply) and len(X)==2):
X, self.y = X
else:
self.y = None
return X
def convert_after_transforming (self, result, sequential_fit_apply=False, **kwargs):
"""
Convert result obtained after by transform method.
"""
if sequential_fit_apply and self.y is not None:
result = (result, self.y)
self.y = None
return result
# Cell
class PandasConverter (DataConverter):
"""
Convert DataFrame to numpy array and back, if needed.
By default, this class assumes the following:
- When calling the fit method, the data is received
as a DataFrame. This DataFrame contains not only the
data to be used for fitting our model, but also the
ground-truth labels. The `PandasConverter` takes only
the data needed for fitting the model, and puts it
into a matrix `X`, and then takes the labels and puts
them into a separate vector `y`. While all this is done
by default, the `PandasConverter` also allows other
possibilities: receiving the data and the labels separately
in `X` and `y`, in which case no action is needed, or avoiding
to separate the data and the labels (if the flag `separate_labels` is False),
in which case the matrix `X` will contain both data and labels.
It also allows to receive numpy arrays instead of DataFrames,
in which case the data format is preserved.
- When calling the `transform` method, the `PandasConverter`
removes by default the labels from the incoming DataFrame,
and then puts them back after performing the transformation.
This behaviour can change if we set `transform_needs_labels=True`.
In this case, the labels are kept in the matrix `X` so that
they can be used during the transformation. This is done in
particular by one type of component called `SamplingComponent`,
defined in `core.component_types`. This is useful for
components that do some sort of under-sampling or over-sampling,
changing the number of observations. When this occurs, the
labels need to be adjusted accordingly, so that the `transform`
method modifies both the data and the labels, both of whom are
contained in the output matrix `X`.
The default `DataConverter` used in the current implementation is the
`PandasConverter`.
#### Note on generic use of metadata (to be implemented)
In general, our DataFrames behave like a single-table in-memory DataBases
from which we can take the necessary data and metadata to perform any
operation needed in our pipeline. Although currently we only consider
groundtruth labels as metadata, in the future we plan to allow any other
metadata indicated by configuration. This includes the `chiller_id`, which
might be needed by some of the components, to differentiate between the data of
different chillers, for data-sets with more than one chiller. Currently
our dataset contains a single chiller, and this type of metadata is not needed.
Regardless of the metadata being used, the `PandasConverter` takes only the data
needed for fitting the model, puts it into a matrix `X`, and then takes the
labels and puts them into a separate vector `y`. The rest of the metadata is
discarded unless the component needs it for some purpose, in which case this
will be indicated by a parameter called something like `metadata`, which contains
the list of columns in the DataFrame which contain the rest of metadata.
"""
def __init__ (self, transform_uses_labels=False, transformed_index=None, transformed_columns=None,
separate_labels=True, inplace=False, metadata=None, **kwargs):
"""
Initialize attributes and fields.
Parameters
----------
transform_uses_labels : bool, optional
If True, the `transform` method receives as input data `X` a DataFrame where
one of the columns is `label`, containing the ground-truth labels. This allows
the transform method to modify the number of observations,
changing the number of rows in the data and in the labels.
See `SamplingComponent` class in `dsblocks.core.component_types`.
If False, the input data `X` only contains data consumed by , without
ground-truth labels.
transformed_index : array-like or None, optional
Used after transforming the data. If the result of the transformation is
a numpy array, two things can happen: 1) if the number of rows of this array
is the same as the number of rows of the input DataFrame, then we convert
the array to a DataFrame with the same index as the original; 2) if the number
of rows is not the same, the index used for the new DataFrame is
`transformed_index` if provided, or 0..N-1 (where N=number of rows) if not
provided.
transformed_columns : array-like or None, optional
Used after transforming the data. If the result of the transformation is
a numpy array, two things can happen: 1) if the number of columns of this array
is the same as the number of columns of the input DataFrame, then we convert
the array to a DataFrame with the same columns as the original; 2) if the number
of columns is not the same, the columns used for the new DataFrame is
`transformed_columns` if provided, or 0..D-1 (where D=number of columns) if not
provided.
separate_labels : bool, optional
Used before calling the fit method. If separate_labels=True (default value),
the `fit` method receives the data and labels separately in `X` and `y`
respectively. If separate_labels=False, the `fit` method receives both the
data and the labels in the same input `X`, where the labels are in a
column of `X` called `label` (TODO: make this configurable). This last
option is used by the `Pipeline` class, and its rationale is provided in
the description of that class.
"""
super().__init__(inplace=inplace, **kwargs)
# whether the _transform method receives a DataFrame that includes the labels, or it doesn't
self.transform_uses_labels = transform_uses_labels
# configuration for converting the transformed data into a DataFrame
self.transformed_index = transformed_index
self.transformed_columns = transformed_columns
# whether the _fit method receives a DataFrame that includes the labels, or the labels are placed separately in y
self.separate_labels = separate_labels
self.metadata=metadata
def convert_before_fitting (self, *X):
"""
By default, convert DataFrame X to numpy arrays X and y
The most common use of this method is:
- When calling the fit method, the data is received
as a DataFrame.
- This DataFrame contains not only the data to be
used for fitting our model, but also the
ground-truth labels. This method takes only
the data needed for fitting the model, and puts it
into a matrix `X`, and then takes the labels and puts
them into a separate vector `y`.
Other possibilities are:
- If the data and the labels are separated in `X` and `y`
(i.e., X does not include labels), no action is performed.
- If `self.separate_labels` is False, the data and the labels
are not separated, in which case the data `X`
passed to the fit method will contain both data and labels.
- It also allows to receive numpy arrays instead of DataFrames,
in which case the data format is preserved.
"""
X, y = self.convert_varargs_to_x_y (X)
if self.separate_labels and (type(X) is pd.DataFrame) and ('label' in X.columns):
if y is None:
y = X['label']
else:
assert (y==X['label']).all(), "discrepancy between y and X['label']"
X = X.drop(columns='label')
self.restore_label_fitting = True
self.y_fitting = y
else:
self.restore_label_fitting = False
if self.metadata is not None:
self.df = X[self.metadata]
X = X.drop(columns=self.metadata)
return X, y
def convert_after_fitting (self, *X):
"""Do nothing. Return same data received."""
return X
def convert_before_transforming (self, X, new_columns=None, **kwargs):
"""
By default, remove labels from incoming DataFrame.
This method allows to remove the labels from the incoming DataFrame,
and then put them back after performing the transformation.
This behaviour can change if we set `self.transform_needs_labels=True`.
In this case, the labels are kept in the matrix `X` so that they can be
used during the transformation. This is done in particular by one type of
component called `SamplingComponent`, defined in `core.component_types`.
This is useful for components that do some sort of under-sampling or
over-sampling, changing the number of observations. When this occurs,
the labels need to be adjusted accordingly, so that the `transform` method
modifies both the data and the labels, both of whom are contained in the output
matrix `X`.
"""
if new_columns is None:
new_columns = self.transformed_columns
self.new_columns = new_columns
if (type(X) is pd.DataFrame) and ('label' in X.columns) and (not self.transform_uses_labels):
y = X['label']
X = X.drop(columns='label')
self.restore_label_transform = True
self.y_transform = y
else:
self.restore_label_transform = False
if self.metadata is not None:
self.df = X[self.metadata]
X = X.drop(columns=self.metadata)
self.type_X = type(X)
if self.type_X is pd.DataFrame:
self.X_shape = X.shape
self.X_index = X.index
self.X_columns = X.columns
if 'label' in self.X_columns:
self.X_label = X['label']
return X
def convert_after_transforming (self, result, **kwargs):
"""
Convert the result produced by `transform`to DataFrame format.
If the input to `transform` was in DataFrame format, the `result`
given by `transform` is converted to DataFrame if it is not
produced in this format. Furthermore, if the `label` column was
in the input to `transform` and it is not in the output given
by `transform`, it is appended to the result.
"""
result = self.convert_to_dataframe (result)
if self.restore_label_transform:
if type(result) is pd.DataFrame:
if 'label' in result.columns:
self.logger.warning ('label already part of result')
result['label'] = self.y_transform
else:
self.logger.warning ('result is not DataFrame')
if self.metadata is not None:
result[self.metadata] = self.df
del self.df
return result
def convert_to_dataframe (self, result):
"""Convert the `result` produced by `transform`to DataFrame format."""
if self.type_X is pd.DataFrame:
if type(result) is np.ndarray or type(result) is pd.Series:
if result.shape[0] == self.X_shape[0]:
index = self.X_index
else:
index = self.transformed_index if (self.transformed_index is None) else range(result.shape[0])
if (result.ndim > 1) and (result.shape[1] == self.X_shape[1]):
columns = self.X_columns
else:
columns = self.new_columns if (self.new_columns is not None) else range(result.shape[1]) if (result.ndim > 1) else [0]
result = pd.DataFrame (result, index=index, columns=columns)
elif (type(result) is pd.DataFrame and self.new_columns is not None and
result.shape[1]==len(self.new_columns) and not (result.columns==self.new_columns).all()):
result.columns = self.new_columns
if type(result) is pd.DataFrame:
if ('label' in self.X_columns) and ('label' not in result.columns):
self.logger.info ('label column not found in result, but found in input DataFrame')
result['label'] = self.X_label
return result
# Cell
class Window2Dto3Dconverter (DataConverter):
"""Convert sequence of windows from WindowGenerator's 2D format to 3D.
Given a 2D Dataframe of size N x (W*D), where N=number of windows,
D=number of variables (dimensions), and W=size of windows, converts this
to a numpy array of N x D x W. Note that the order of the elements is
transposed: for each window, the has first the elements of a window in
one dimension, then the elements in the second dimension, etc. This
is transposed in the output to have the second and third axis be D and W
respectively.
"""
def __init__ (self, sequence_length: int, data_converter: DataConverter = None, **kwargs):
"""
Initialize common attributes and fields.
Parameters
----------
sequence_length : int
Size of each window.
data_converter : DataConverter, optional
DataConverter that will transform the input data to a 2D DataFrame of
size N x (D*W), if it is not already in this format. PandasConverter
is used by default.
"""
self.sequence_length = sequence_length
if data_converter is None:
self.data_converter = PandasConverter (**kwargs)
else:
self.data_converter = data_converter
super ().__init__ (inplace=self.data_converter.inplace, **kwargs)
def convert_before_fitting (self, X, y=None):
"""
Convert incoming data before running fit method.
Parameters
----------
X : data (N observations x D dimensions)
data used for fitting model parameters
y : labels (N observations), optional
One dimensional array with N groundtruth labels.
Returns
-------
X : data (N observations x D dimensions)
data with transformed format but same content
y : labels (N observations)
labels with transformed format but same content
"""
X, y = self.data_converter.convert_before_fitting (X, y)
X = self.transform (X)
return X, y
def convert_after_fitting (self, X):
"""
Convert data after running fit method.
Calling this method is only required when convert_before_fitting
changes X "in place", instead of changing a copy of X. This might
be more efficient sometimes, and we have convert_after_fitting to
revert the previous change.
Parameters
----------
X : data (N observations x D dimensions)
data used for fitting model parameters
Returns
-------
X : data (N observations x D dimensions)
data with transformed format but same content
"""
return self.data_converter.convert_after_fitting (X)
def convert_before_transforming (self, X, **kwargs):
"""
Convert data before running transform method.
Parameters
----------
X : data (N observations x D dimensions)
data used to be transformed
Returns
-------
X : data (N observations x D dimensions)
data with transformed format but same content
"""
X = self.data_converter.convert_before_transforming (X, **kwargs)
X = self.transform (X)
return X
def convert_after_transforming (self, result, **kwargs):
"""
Convert result obtained after by transform method.
Parameters
----------
result : data (N' observations x D' dimensions)
result obtained by transformed method
Returns
-------
result : data (N' observations x D' dimensions)
result with transformed format but same content
"""
result = self.inverse_transform (result)
result = self.data_converter.convert_after_transforming (result, **kwargs)
return result
def transform (self, df):
"""
Convert input DataFrame `df` to numpy array in 3D format.
Given a 2D Dataframe of size N x (W*D), where N=number of windows,
D=number of variables (dimensions), and W=size of windows, converts this
to a numpy array of N x D x W. Note that the order of the elements is
transposed: for each window, the has first the elements of a window in
one dimension, then the elements in the second dimension, etc. This
is transposed in the output to have the second and third axis be D and W
respectively.
"""
data = df.values.reshape(df.shape[0], -1, self.sequence_length)
data = | np.transpose(data, (0,2,1)) | numpy.transpose |
import os
import sys
sys.path.insert(1, os.path.dirname(os.path.realpath(__file__)) + '/../')
from common import utils
import models
from common.log import log, LogLevel
from common.state import State
from common import cuda
from common import paths
import common.torch
import common.numpy
from training import train_classifier_adversarially
import torch
import numpy
import argparse
import math
if utils.display():
from common import plot
class TrainLearnedDecoderClassifierAdversarially(train_classifier_adversarially.TrainClassifierAdversarially):
"""
Train a classifier.
:param args: arguments
:type args: list
"""
def __init__(self, args=None):
"""
Initialize.
:param args: optional arguments if not to use sys.argv
:type args: [str]
"""
super(TrainLearnedDecoderClassifierAdversarially, self).__init__(args)
self.train_statistics = numpy.zeros((0, 11))
self.test_statistics = numpy.zeros((0, 10))
self.train_theta = None
""" (numpy.ndarray) Training transformation parameters. """
self.test_theta = None
""" (numpy.ndarray) Testing transformation parameters. """
self.decoder = None
""" (Decoder) Decoder. """
self.decoder_classifier = None
""" (DecoderClassifier) Model to attack. """
def get_parser(self):
"""
Get parser.
:return: parser
:rtype: argparse.ArgumentParser
"""
parser = argparse.ArgumentParser(description='Train classifier.')
parser.add_argument('-train_images_file', default=paths.train_images_file(), help='HDF5 file containing dataset.', type=str)
parser.add_argument('-train_codes_file', default=paths.train_codes_file(), help='HDF5 file containing codes.', type=str)
parser.add_argument('-train_theta_file', default=paths.results_file('train_theta'), help='HDF5 file containing transformations.', type=str)
parser.add_argument('-test_theta_file', default=paths.results_file('test_theta'), help='HDF5 file containing transformations.', type=str)
parser.add_argument('-test_images_file', default=paths.test_images_file(), help='HDF5 file containing dataset.', type=str)
parser.add_argument('-test_codes_file', default=paths.test_codes_file(), help='HDF5 file containing codes.', type=str)
parser.add_argument('-decoder_files', default=paths.state_file('decoder'), help='Decoder files.', type=str)
parser.add_argument('-latent_space_size', default=10, help='Size of latent space.', type=int)
parser.add_argument('-label_index', default=2, help='Column index in label file.', type=int)
parser.add_argument('-state_file', default=paths.state_file('robust_manifold_classifier'), help='Snapshot state file.', type=str)
parser.add_argument('-log_file', default=paths.log_file('robust_manifold_classifier'), help='Log file.', type=str)
parser.add_argument('-training_file', default=paths.results_file('robust_manifold_training'), help='Training statistics file.', type=str)
parser.add_argument('-testing_file', default=paths.results_file('robust_manifold_testing'), help='Testing statistics file.', type=str)
parser.add_argument('-loss_file', default=paths.image_file('loss'), help='Loss plot file.', type=str)
parser.add_argument('-error_file', default=paths.image_file('error'), help='Error plot file.', type=str)
parser.add_argument('-success_file', default=paths.image_file('robust_success'), help='Success rate plot file.', type=str)
parser.add_argument('-gradient_file', default='', help='Gradient plot file.', type=str)
parser.add_argument('-random_samples', default=False, action='store_true', help='Randomize the subsampling of the training set.')
parser.add_argument('-training_samples', default=-1, help='Number of samples used for training.', type=int)
parser.add_argument('-test_samples', default=-1, help='Number of samples for validation.', type=int)
parser.add_argument('-validation_samples', default=0, help='Number of samples for validation.', type=int)
parser.add_argument('-early_stopping', default=False, action='store_true', help='Use early stopping.')
parser.add_argument('-attack_samples', default=1000, help='Samples to attack.', type=int)
parser.add_argument('-batch_size', default=64, help='Batch size.', type=int)
parser.add_argument('-epochs', default=10, help='Number of epochs.', type=int)
parser.add_argument('-weight_decay', default=0.0001, help='Weight decay importance.', type=float)
parser.add_argument('-logit_decay', default=0, help='Logit decay importance.', type=float)
parser.add_argument('-no_gpu', dest='use_gpu', action='store_false')
parser.add_argument('-skip', default=5, help='Verbosity in iterations.', type=int)
parser.add_argument('-lr', default=0.005, type=float, help='Base learning rate.')
parser.add_argument('-lr_decay', default=0.9, type=float, help='Learning rate decay.')
parser.add_argument('-results_file', default='', help='Results file for evaluation.', type=str)
parser.add_argument('-bound', default=2, help='Bound used to define "safe" latent codes to compute adversarial examples on.', type=float)
parser.add_argument('-debug_directory', default='', help='Debug directory.', type=str)
# Some network parameters.
parser.add_argument('-network_architecture', default='standard', help='Classifier architecture to use.', type=str)
parser.add_argument('-network_activation', default='relu', help='Activation function to use.', type=str)
parser.add_argument('-network_no_batch_normalization', default=False, help='Do not use batch normalization.', action='store_true')
parser.add_argument('-network_channels', default=16, help='Channels of first convolutional layer, afterwards channels are doubled.', type=int)
parser.add_argument('-network_dropout', default=False, action='store_true', help='Whether to use dropout.')
parser.add_argument('-network_units', default='1024,1024,1024,1024', help='Units for MLP.')
# Decoder parameters.
parser.add_argument('-decoder_architecture', default='standard', help='Architecture to use.', type=str)
parser.add_argument('-decoder_activation', default='relu', help='Activation function to use.', type=str)
parser.add_argument('-decoder_no_batch_normalization', default=False, help='Do not use batch normalization.', action='store_true')
parser.add_argument('-decoder_channels', default=16, help='Channels of first convolutional layer, afterwards channels are doubled.', type=int)
parser.add_argument('-decoder_dropout', default=False, action='store_true', help='Whether to use dropout.')
parser.add_argument('-decoder_units', default='1024,1024,1024,1024', help='Units for MLP.')
# Attack parameters.
parser.add_argument('-attack', default='UntargetedBatchL2ClippedGradientDescent', help='Attack to try.', type=str)
parser.add_argument('-objective', default='UntargetedF6', help='Objective to use.', type=str)
parser.add_argument('-epsilon', default=1, help='Epsilon allowed for attacks.', type=float)
parser.add_argument('-c_0', default=0., help='Weight of norm.', type=float)
parser.add_argument('-c_1', default=0.1, help='Weight of bound, if not enforced through clipping or reparameterization.', type=float)
parser.add_argument('-c_2', default=0.5, help='Weight of objective.', type=float)
parser.add_argument('-max_iterations', default=10, help='Number of iterations for attack.', type=int)
parser.add_argument('-max_projections', default=5, help='Number of projections for alternating projection.', type=int)
parser.add_argument('-base_lr', default=0.005, help='Learning rate for attack.', type=float)
parser.add_argument('-verbose', action='store_true', default=False, help='Verbose attacks.')
parser.add_argument('-anneal_epochs', default=0, help='Anneal iterations in the first epochs.', type=int)
# Variants.
parser.add_argument('-full_variant', default=False, action='store_true', help='100% variant.')
parser.add_argument('-safe', default=False, action='store_true', help='Save variant.')
parser.add_argument('-safe_bn', default=False, action='store_true', help='Save batch normalization variant.')
parser.add_argument('-training_mode', default=False, action='store_true', help='Training mode variant for attack.')
return parser
def train(self):
"""
Train adversarially.
"""
split = self.args.batch_size // 2
num_batches = int(math.ceil(self.train_images.shape[0] / self.args.batch_size))
permutation = | numpy.random.permutation(self.train_images.shape[0]) | numpy.random.permutation |
"""Toolkit for exploratory work regarding the polarization transfer coefficients
analyzed in Heyvaerts et al 2013.
Heyvaert's "f" variable is usually called r_V or rho_V by other authors. The
variable "h" is usually called r_Q or rho_Q.
"""
import numpy as np
from pwkit import cgs
from pwkit.numutil import broadcastize, parallel_quad
from scipy.integrate import quad
M3_C3 = cgs.me**3 * cgs.c**3
FOUR_PI_M3_C3 = 4 * cgs.pi * M3_C3
DEFAULT_S = 10.
DEFAULT_THETA = 0.5
# Set this True to override safety checks for incompletely implemented physics.
# Stands for "I know what I'm doing."
IKWID = False
# Bessel function fun. Scipy names second-kind Bessels as Y_v(x); we follow
# Heyvaerts and use N_v(x).
from scipy.special import jv as jv_scipy, jvp as jvp_scipy, yv as nv_scipy, yvp as nvp_scipy, \
kv as kv_scipy, iv as iv_scipy
def lv(nu, x):
"""Similar to a modified Bessel function of the second kind, but not the
same.
"""
return 0.5 * np.pi * (iv_scipy(-nu, x) + iv_scipy(nu, x)) / np.sin(nu * np.pi)
def jv_nicholson(sigma, x):
"""Nicholson's approximation J_sigma(x), for x somewhat smaller than sigma.
Equations 94, 95.
"""
g = (2 * (sigma - x))**1.5 / (3 * np.sqrt(x))
return kv_scipy(1./3, g) * np.sqrt(2 * (sigma - x) / (3 * x)) / np.pi
def nv_nicholson(sigma, x):
"""Nicholson's approximation N_sigma(x), for x somewhat smaller than sigma.
Equations 94, 95.
"""
g = (2 * (sigma - x))**1.5 / (3 * np.sqrt(x))
return -lv(1./3, g) * np.sqrt(2 * (sigma - x) / x) / np.pi
def jvp_nicholson(sigma, x):
"""Nicholson's approximation J'_sigma(x), for x somewhat smaller than sigma.
Equations 94, 96.
The derivative approximations do not converge nearly as well as the
non-derivatives.
"""
g = (2 * (sigma - x))**1.5 / (3 * np.sqrt(x))
return kv_scipy(2./3, g) * 2 * (sigma - x) / (3**0.5 * np.pi * x)
def nvp_nicholson(sigma, x):
"""Nicholson's approximation N'_sigma(x), for x somewhat smaller than sigma.
Equations 94, 96.
The derivative approximations do not converge nearly as well as the
non-derivatives.
"""
g = (2 * (sigma - x))**1.5 / (3 * np.sqrt(x))
return lv(2./3, g) * 2 * (sigma - x) / (np.pi * x)
# coefficients from http://dlmf.nist.gov/10.41#ii, 10.41.10 etc. u0 = v0 = 1.
# Inspired by Heyvaerts but functions from http://dlmf.nist.gov/10.19
_debye_u1_coeffs = np.array([-5., 0, 3, 0]) / 24
_debye_u2_coeffs = np.array([385., 0, -462, 0, 81, 0, 0]) / 1152
_debye_u3_coeffs = np.array([-425425., 0, 765765, 0, -369603, 0, 30375, 0, 0, 0]) / 414720
_debye_v1_coeffs = np.array([7., 0, -9, 0]) / 24
_debye_v2_coeffs = np.array([-455., 0, 594, 0, -135, 0, 0]) / 1152
_debye_v3_coeffs = np.array([475475., 0, -883575, 0, 451737, 0, -42525, 0, 0, 0]) / 414720
def jv_debye(sigma, x):
"""The Debye expansion of J_sigma(x), used with large x and sigma."""
alpha = np.arccosh(sigma / x)
tanha = np.tanh(alpha)
cotha = 1. / tanha
s = (1. + # m=0 term
np.polyval(_debye_u1_coeffs, cotha) / sigma + # m=1
np.polyval(_debye_u2_coeffs, cotha) / sigma**2 + # m=2
np.polyval(_debye_u3_coeffs, cotha) / sigma**3) # m=3
return np.exp(sigma * (tanha - alpha)) * s / np.sqrt(2 * np.pi * sigma * tanha)
def nv_debye(sigma, x):
"""The Debye expansion of N_sigma(x), used with large x and sigma."""
alpha = np.arccosh(sigma / x)
tanha = np.tanh(alpha)
cotha = 1. / tanha
s = (1. - # m=0 term; note alternating signs
np.polyval(_debye_u1_coeffs, cotha) / sigma + # m=1
np.polyval(_debye_u2_coeffs, cotha) / sigma**2 - # m=2
np.polyval(_debye_u3_coeffs, cotha) / sigma**3) # m=3
return -np.exp(sigma * (alpha - tanha)) * s / np.sqrt(0.5 * np.pi * sigma * tanha)
def jvp_debye(sigma, x):
"""The Debye expansion of J'_sigma(x), used with large x and sigma."""
alpha = np.arccosh(sigma / x)
tanha = np.tanh(alpha)
cotha = 1. / tanha
s = (1. + # m=0 term
np.polyval(_debye_v1_coeffs, cotha) / sigma + # m=1
np.polyval(_debye_v2_coeffs, cotha) / sigma**2 + # m=2
np.polyval(_debye_v3_coeffs, cotha) / sigma**3) # m=3
return np.exp(sigma * (tanha - alpha)) * s * np.sqrt(np.sinh(2 * alpha) / (4 * np.pi * sigma))
def nvp_debye(sigma, x):
"""The Debye expansion of N'_sigma(x), used with large x and sigma."""
alpha = np.arccosh(sigma / x)
tanha = np.tanh(alpha)
cotha = 1. / tanha
s = (1. - # m=0 term; note alternating signs
np.polyval(_debye_v1_coeffs, cotha) / sigma + # m=1
np.polyval(_debye_v2_coeffs, cotha) / sigma**2 - # m=2
np.polyval(_debye_v3_coeffs, cotha) / sigma**3) # m=3
return np.exp(sigma * (alpha - tanha)) * s * np.sqrt(np.sinh(2 * alpha) / (np.pi * sigma))
NICHOLSON_SIGMA_CUT = 30. # made up
NICHOLSON_REL_TOL = 0.01 # made up
DEBYE_SIGMA_CUT = 30. # made up
DEBYE_REL_TOL = 0.1 # made up
@broadcastize(2)
def jv(sigma, x):
"Bessel function of first kind."
r = jv_scipy(sigma, x)
w = (sigma > NICHOLSON_SIGMA_CUT) & ((sigma - x) / sigma < NICHOLSON_REL_TOL)
r[w] = jv_nicholson(sigma[w], x[w])
w = (sigma > DEBYE_SIGMA_CUT) & (np.abs(np.cbrt(sigma) / (sigma - x)) < DEBYE_REL_TOL)
r[w] = jv_debye(sigma[w], x[w])
nf = ~np.isfinite(r)
#if nf.sum(): print('jv nf', sigma, x)
r[nf] = 0.
return r
@broadcastize(2)
def nv(sigma, x):
"Bessel function of second kind. AKA N_v"
r = nv_scipy(sigma, x)
w = (sigma > NICHOLSON_SIGMA_CUT) & ((sigma - x) / sigma < NICHOLSON_REL_TOL)
r[w] = nv_nicholson(sigma[w], x[w])
w = (sigma > DEBYE_SIGMA_CUT) & (np.abs(np.cbrt(sigma) / (sigma - x)) < DEBYE_REL_TOL)
r[w] = nv_debye(sigma[w], x[w])
nf = ~np.isfinite(r)
#if nf.sum(): print('nv nf', sigma, x)
r[nf] = 0.
return r
@broadcastize(2)
def jvp(sigma, x):
"First derivative of Bessel function of first kind."
r = jvp_scipy(sigma, x)
w = (sigma > NICHOLSON_SIGMA_CUT) & ((sigma - x) / sigma < NICHOLSON_REL_TOL)
r[w] = jvp_nicholson(sigma[w], x[w])
w = (sigma > DEBYE_SIGMA_CUT) & (np.abs(np.cbrt(sigma) / (sigma - x)) < DEBYE_REL_TOL)
r[w] = jvp_debye(sigma[w], x[w])
nf = ~np.isfinite(r)
#if nf.sum(): print('jvp nf', sigma, x)
r[nf] = 0.
return r
@broadcastize(2)
def nvp(sigma, x):
"First derivative of Bessel function of second kind. AKA N_v"
r = nvp_scipy(sigma, x)
w = (sigma > NICHOLSON_SIGMA_CUT) & ((sigma - x) / sigma < NICHOLSON_REL_TOL)
r[w] = nvp_nicholson(sigma[w], x[w])
w = (sigma > DEBYE_SIGMA_CUT) & (np.abs(np.cbrt(sigma) / (sigma - x)) < DEBYE_REL_TOL)
r[w] = nvp_debye(sigma[w], x[w])
nf = ~np.isfinite(r)
#if nf.sum(): print('nvp nf', sigma, x)
r[nf] = 0.
return r
@broadcastize(2)
def jvpnv_heyvaerts_debye(sigma, x):
"""Product of the first derivative of the Bessel function of the first kind
and the (not a derivative of) the Bessel function of the second kind, with
Heyvaerts' Debye approximation, used with large x and sigma .
Heyvaerts presents an expansion that makes these computations more
tractable at extreme values, where J_v is very small and N_v is very big.
"""
s2 = sigma**2
x2 = x**2
A1 = 0.125 - 5 * s2 / (s2 - x2)
A2 = 3./128 - 77 * s2 / (576 * (s2 - x2)) + 385 * s2**2 / (3456 * (s2 - x2)**2)
xA1p = -5 * s2 * x2 / (12 * (s2 - x2)**2)
return -1 / (np.pi * x) * (
1 +
x2 / (2 * (s2 - x2)**1.5) +
(6 * A2 + xA1p - A1**2) / (s2 - x2) +
3 * A1 * x2 / (2 * (s2 - x2)**2)
)
def jvpnv_scipy(sigma, x):
return jvp_scipy(sigma, x) * nv_scipy(sigma, x)
@broadcastize(2)
def jvpnv(sigma, x):
"""Product of the first derivative of the Bessel function of the first kind
and the (not a derivative of) the Bessel function of the second kind.
Heyvaerts presents an expansion that makes these computations more
tractable at extreme values, where J_v is very small and N_v is very big.
"""
r = np.empty_like(sigma)
# Places where we can't use the approximation.
w = (sigma < DEBYE_SIGMA_CUT) | (np.abs(np.cbrt(sigma) / (sigma - x)) > DEBYE_REL_TOL)
r[w] = jvp(sigma[w], x[w]) * nv(sigma[w], x[w])
# Places where we can.
w = ~w
r[w] = jvpnv_heyvaerts_debye(sigma[w], x[w])
return r
def K23L13(x):
"""K_{2/3}(x) * L_{1/3}(x)
Evaluating the sin denominators, K_{2/3}(x) = pi/sqrt(3)*[I_{-2/3}(x) - I_{2/3}(x)],
and analogously for L.
This appproximation is only supposed to kick in when x <~ 1., but I have
cases where I have x ~ 15. I think what's happening is that the NR/QR
structure of the problem is weird (sigma_QR_min < sigma_0) and Heyvaert's
assumptions about the problem geometry aren't so valid.
"""
tt = 2. / 3
ot = 1. / 3
if x < 10:
K = np.pi / np.sqrt(3) * (iv_scipy(-tt, x) - iv_scipy(tt, x))
else:
K = kv_scipy(tt, x)
L = np.pi / np.sqrt(3) * (iv_scipy(-ot, x) + iv_scipy(ot, x))
return K * L
def K13L13(x):
ot = 1. / 3
K = np.pi / np.sqrt(3) * (iv_scipy(-ot, x) - iv_scipy(ot, x))
L = np.pi / np.sqrt(3) * (iv_scipy(-ot, x) + iv_scipy(ot, x))
return K * L
def K23L23(x):
tt = 2. / 3
K = np.pi / np.sqrt(3) * (iv_scipy(-tt, x) - iv_scipy(tt, x))
L = np.pi / np.sqrt(3) * (iv_scipy(-tt, x) + iv_scipy(tt, x))
return K * L
def evaluate_generic(sigma_max, s, theta, func, nsigma=64, npomega=64, **kwargs):
sin_theta = np.sin(theta)
cos_theta = np.cos(theta)
sigma0 = s * sin_theta
if sigma_max < 0:
sigma_max = | np.abs(sigma_max) | numpy.abs |
"""
Routines for the analysis of proton radiographs. These routines can be broadly
classified as either creating synthetic radiographs from prescribed fields or
methods of 'inverting' experimentally created radiographs to reconstruct the
original fields (under some set of assumptions).
"""
__all__ = [
"SyntheticProtonRadiograph",
]
import astropy.constants as const
import astropy.units as u
import numpy as np
import sys
import warnings
from tqdm import tqdm
from plasmapy import particles
from plasmapy.formulary.mathematics import rot_a_to_b
from plasmapy.particles import Particle
from plasmapy.plasma.grids import AbstractGrid
from plasmapy.simulation.particle_integrators import boris_push
def _coerce_to_cartesian_si(pos):
"""
Takes a tuple of `astropy.unit.Quantity` values representing a position
in space in either Cartesian, cylindrical, or spherical coordinates, and
returns a numpy array representing the same point in Cartesian
coordinates and units of meters.
"""
# Auto-detect geometry based on units
geo_units = [x.unit for x in pos]
if geo_units[2].is_equivalent(u.rad):
geometry = "spherical"
elif geo_units[1].is_equivalent(u.rad):
geometry = "cylindrical"
else:
geometry = "cartesian"
# Convert geometrical inputs between coordinates systems
pos_out = np.zeros(3)
if geometry == "cartesian":
x, y, z = pos
pos_out[0] = x.to(u.m).value
pos_out[1] = y.to(u.m).value
pos_out[2] = z.to(u.m).value
elif geometry == "cylindrical":
r, t, z = pos
r = r.to(u.m)
t = t.to(u.rad).value
z = z.to(u.m)
pos_out[0] = (r * np.cos(t)).to(u.m).value
pos_out[1] = (r * np.sin(t)).to(u.m).value
pos_out[2] = z.to(u.m).value
elif geometry == "spherical":
r, t, p = pos
r = r.to(u.m)
t = t.to(u.rad).value
p = p.to(u.rad).value
pos_out[0] = (r * np.sin(t) * np.cos(p)).to(u.m).value
pos_out[1] = (r * np.sin(t) * np.sin(p)).to(u.m).value
pos_out[2] = (r * np.cos(t)).to(u.m).value
return pos_out
class SyntheticProtonRadiograph:
r"""
Represents a charged particle radiography experiment with simulated or
calculated E and B fields given at positions defined by a grid of spatial
coordinates. The particle source and detector plane are defined by vectors
from the origin of the grid.
Parameters
----------
grid : `~plasmapy.plasma.grids.AbstractGrid` or subclass thereof
A Grid object containing the required quantities [E_x, E_y, E_z, B_x, B_y, B_z].
If any of these quantities are missing, a warning will be given and that
quantity will be assumed to be zero everywhere.
source : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the location
of the particle source. This vector will be interpreted as
being in either cartesian, cylindrical, or spherical coordinates
based on its units. Valid geometries are:
* Cartesian (x,y,z) : (meters, meters, meters)
* cylindrical (r, theta, z) : (meters, radians, meters)
* spherical (r, theta, phi) : (meters, radians, radians)
In spherical coordinates theta is the polar angle.
detector : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the center
of the detector plane. The vector from the source point to this
point defines the normal vector of the detector plane. This vector
can also be specified in cartesian, cylindrical, or spherical
coordinates (see the `source` keyword).
detector_hdir : `numpy.ndarray`, shape (3), optional
A unit vector (in Cartesian coordinates) defining the horizontal
direction on the detector plane. By default, the horizontal axis in the
detector plane is defined to be perpendicular to both the
source-to-detector vector and the z-axis (unless the source-to-detector axis
is parallel to the z axis, in which case the horizontal axis is the x-axis).
The detector vertical axis is then defined
to be orthogonal to both the source-to-detector vector and the
detector horizontal axis.
verbose : bool, optional
If true, updates on the status of the program will be printed
into the standard output while running.
"""
def __init__(
self,
grid: AbstractGrid,
source: u.m,
detector: u.m,
detector_hdir=None,
verbose=True,
):
# self.grid is the grid object
self.grid = grid
# self.grid_arr is the grid positions in si units. This is created here
# so that it isn't continously called later
self.grid_arr = grid.grid.to(u.m).value
self.verbose = verbose
# A list of wire meshes added to the grid with add_wire_mesh
# Particles that would hit these meshes will be removed at runtime
# by _apply_wire_mesh
self.mesh_list = []
# ************************************************************************
# Setup the source and detector geometries
# ************************************************************************
self.source = _coerce_to_cartesian_si(source)
self.detector = _coerce_to_cartesian_si(detector)
self._log(f"Source: {self.source} m")
self._log(f"Detector: {self.detector} m")
# Calculate normal vectors (facing towards the grid origin) for both
# the source and detector planes
self.src_n = -self.source / np.linalg.norm(self.source)
self.det_n = -self.detector / np.linalg.norm(self.detector)
# Vector directly from source to detector
self.src_det = self.detector - self.source
# Magnification
self.mag = 1 + np.linalg.norm(self.detector) / np.linalg.norm(self.source)
self._log(f"Magnification: {self.mag}")
# Check that source-detector vector actually passes through the grid
if not self.grid.vector_intersects(self.source * u.m, self.detector * u.m):
raise ValueError(
"The vector between the source and the detector "
"does not intersect the grid provided!"
)
# Determine the angle above which particles will not hit the grid
# these particles can be ignored until the end of the simulation,
# then immediately advanced to the detector grid with their original
# velocities
self.max_theta_hit_grid = self._max_theta_hit_grid()
# ************************************************************************
# Define the detector plane
# ************************************************************************
# Load or calculate the detector hdir
if detector_hdir is not None:
self.det_hdir = detector_hdir / np.linalg.norm(detector_hdir)
else:
self.det_hdir = self._default_detector_hdir()
# Calculate the detector vdir
ny = np.cross(self.det_hdir, self.det_n)
self.det_vdir = -ny / np.linalg.norm(ny)
# ************************************************************************
# Validate the E and B fields
# ************************************************************************
req_quantities = ["E_x", "E_y", "E_z", "B_x", "B_y", "B_z"]
self.grid.require_quantities(req_quantities, replace_with_zeros=True)
for rq in req_quantities:
# Check that there are no infinite values
if not np.isfinite(self.grid[rq].value).all():
raise ValueError(
f"Input arrays must be finite: {rq} contains "
"either NaN or infinite values."
)
# Check that the max values on the edges of the arrays are
# small relative to the maximum values on that grid
#
# Array must be dimensionless to re-assemble it into an array
# of max values like this
arr = np.abs(self.grid[rq]).value
edge_max = np.max(
np.array(
[
np.max(arr[0, :, :]),
np.max(arr[-1, :, :]),
np.max(arr[:, 0, :]),
np.max(arr[:, -1, :]),
np.max(arr[:, :, 0]),
np.max(arr[:, :, -1]),
]
)
)
if edge_max > 1e-3 * np.max(arr):
unit = grid.recognized_quantities[rq].unit
warnings.warn(
"Fields should go to zero at edges of grid to avoid "
f"non-physical effects, but a value of {edge_max:.2E} {unit} was "
f"found on the edge of the {rq} array. Consider applying a "
"envelope function to force the fields at the edge to go to "
"zero.",
RuntimeWarning,
)
def _default_detector_hdir(self):
"""
Calculates the default horizontal unit vector for the detector plane
(see __init__ description for details)
"""
# Create unit vectors that define the detector plane
# Define plane horizontal axis
if np.allclose(np.abs(self.det_n), np.array([0, 0, 1])):
nx = np.array([1, 0, 0])
else:
nx = np.cross(np.array([0, 0, 1]), self.det_n)
nx = nx / np.linalg.norm(nx)
return nx
def _max_theta_hit_grid(self):
r"""
Using the grid and the source position, compute the maximum particle
theta that will impact the grid. This value can be used to determine
which particles are worth tracking.
"""
ind = 0
theta = np.zeros([8])
for x in [0, -1]:
for y in [0, -1]:
for z in [0, -1]:
# Source to grid corner vector
vec = self.grid_arr[x, y, z, :] - self.source
# Calculate angle between vec and the source-to-detector
# axis, which is the central axis of the particle beam
theta[ind] = np.arccos(
np.dot(vec, self.src_det)
/ np.linalg.norm(vec)
/ np.linalg.norm(self.src_det)
)
ind += 1
return np.max(theta)
def _log(self, msg):
if self.verbose:
print(msg)
# Define some constants so they don't get constantly re-evaluated
_c = const.c.si.value
# *************************************************************************
# Create mesh
# *************************************************************************
def add_wire_mesh(
self, location, extent, nwires, wire_diameter, mesh_hdir=None, mesh_vdir=None
):
"""
Add a wire mesh grid between the particle source and the object grid
that blocks particles whose paths intersect the wires.
Parameters
----------
location : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the center of the
mesh grid. This location must be between the source and the
object grid.
This vector will be interpreted as
being in either cartesian, cylindrical, or spherical coordinates
based on its units. Valid geometries are:
* Cartesian (x,y,z) : (meters, meters, meters)
* cylindrical (r, theta, z) : (meters, radians, meters)
* spherical (r, theta, phi) : (meters, radians, radians)
In spherical coordinates theta is the polar angle.
extent : Tuple of 1 or 2 `~astropy.units.Quantity`
The size of the mesh grid (in the mesh plane). If one value
is provided, the mesh is circular and the value provided is
interpreted as the diameter. If two values are provided, the
mesh is rectangular and they the values are interpreted as the
width and height respectively.
nwires : Tuple of 1 or 2 ints, or a single int
The number of wires in the horizontal and vertical directions. If
only one value is provided, the number in the two directions is
assumed to be equal. Note that a wire will cross the center of the
mesh only when nwires is odd.
wire_diameter : `~astropy.units.Quantity`
The diameter of the wires.
mesh_hdir : `numpy.ndarray`, shape (3), optional
A unit vector (in Cartesian coordinates) defining the horizontal
direction on the mesh plane. Modifying this vector can rotate the
mesh in the plane or tilt the mesh plane relative to the
source-detector axis. By default, `mesh_hdir` is set equal to
`detector_hdir` (see `detector_hdir` keyword in `__init__`).
mesh_vdir : `numpy.ndarray`, shape (3), optional
A unit vector (in Cartesian coordinates) defining the vertical
direction on the mesh plane. Modifying this vector can tilt the
mesh relative to the source-detector axis. By default, `mesh_vdir`
is defined to be perpendicular to `mesh_hdir` and the detector
plane normal (such that the mesh is parallel to the detector plane).
Raises
------
ValueError
Raises a ValueError if the provided mesh location is not
between the source and the object grid.
"""
location = _coerce_to_cartesian_si(location)
wire_radius = wire_diameter.si.value / 2
if not isinstance(extent, tuple):
extent = (extent,)
if len(extent) == 1:
radius = 0.5 * extent[0].si.value
width = extent[0].si.value
height = extent[0].si.value
elif len(extent) == 2:
radius = None
width = extent[0].si.value
height = extent[1].si.value
else:
raise ValueError(
"extent must be a tuple of 1 or 2 elements, but "
f"{len(extent)} elements were provided."
)
if not isinstance(nwires, tuple):
nwires = (nwires,)
if len(nwires) != 2:
nwires = (nwires[0], nwires[0])
# If no hdir/vdir is specified, calculate a default value
# If one is specified, make sure it is normalized
if mesh_hdir is None:
# Re-calculate the default here, in case the user
# specified a different det_hdir
mesh_hdir = self._default_detector_hdir()
else:
mesh_hdir = mesh_hdir / np.linalg.norm(mesh_hdir)
if mesh_vdir is None:
mesh_vdir = np.cross(mesh_hdir, self.det_n)
mesh_vdir = -mesh_vdir / np.linalg.norm(mesh_vdir)
else:
mesh_vdir = mesh_vdir / np.linalg.norm(mesh_vdir)
# Raise exception if mesh is AFTER the field grid
if np.linalg.norm(location - self.source) > np.linalg.norm(self.source):
raise ValueError(
f"The specified mesh location, {location},"
"is not between the source and the origin."
)
mesh_entry = {
"location": location,
"wire_radius": wire_radius,
"radius": radius,
"width": width,
"height": height,
"nwires": nwires,
"mesh_hdir": mesh_hdir,
"mesh_vdir": mesh_vdir,
}
self.mesh_list.append(mesh_entry)
def _apply_wire_mesh(
self,
location=None,
wire_radius=None,
radius=None,
width=None,
height=None,
nwires=None,
mesh_hdir=None,
mesh_vdir=None,
):
"""
Apply wire meshes that were added to self.mesh_list
"""
x = self._coast_to_plane(location, mesh_hdir, mesh_vdir)
# Particle positions in 2D on the mesh plane
xloc = np.dot(x - location, mesh_hdir)
yloc = np.dot(x - location, mesh_vdir)
# Create an array in which True indicates that a particle has hit a wire
# and False indicates that it has not
hit = np.zeros(self.nparticles, dtype=bool)
# Mark particles that overlap vertical or horizontal position with a wire
h_centers = np.linspace(-width / 2, width / 2, num=nwires[0])
for c in h_centers:
hit |= np.isclose(xloc, c, atol=wire_radius)
v_centers = np.linspace(-height / 2, height / 2, num=nwires[1])
for c in v_centers:
hit |= np.isclose(yloc, c, atol=wire_radius)
# Put back any particles that are outside the mesh boundaries
# First handle the case where the mesh is rectangular
if radius is None:
# Replace particles outside the x-boundary
hit[
np.logical_or(
xloc > np.max(h_centers) + wire_radius,
xloc < np.min(h_centers) - wire_radius,
)
] = False
# Replace particles outside the y-boundary
hit[
np.logical_or(
yloc > np.max(v_centers) + wire_radius,
yloc < np.min(v_centers) - wire_radius,
)
] = False
# Handle the case where the mesh is circular
else:
loc_rad = np.sqrt(xloc ** 2 + yloc ** 2)
hit[loc_rad > radius] = False
# In the case of a circular mesh, also create a round wire along the
# outside edge
hit[np.isclose(loc_rad, radius, atol=wire_radius)] = True
# Identify the particles that have hit something, then remove them from
# all of the arrays
keep_these_particles = ~hit
number_kept_particles = keep_these_particles.sum()
nremoved = self.nparticles - number_kept_particles
if self.nparticles - nremoved <= 0:
raise ValueError(
"The specified mesh is blocking all of the particles. "
f"The wire diameter ({2*wire_radius}) may be too large."
)
self.x = self.x[keep_these_particles, :]
self.v = self.v[keep_these_particles, :]
self.theta = self.theta[
keep_these_particles
] # Important to apply here to get correct grid_ind
self.nparticles = number_kept_particles
# *************************************************************************
# Particle creation methods
# *************************************************************************
def _angles_monte_carlo(self):
"""
Generates angles for each particle randomly such that the flux
per solid angle is uniform.
"""
# Create a probability vector for the theta distribution
# Theta must follow a sine distribution in order for the particle
# flux per solid angle to be uniform.
arg = np.linspace(0, self.max_theta, num=int(1e5))
prob = np.sin(arg)
prob *= 1 / np.sum(prob)
# Randomly choose theta's weighted with the sine probabilities
theta = np.random.choice(arg, size=self.nparticles, replace=True, p=prob)
# Also generate a uniform phi distribution
phi = np.random.uniform(high=2 * np.pi, size=self.nparticles)
return theta, phi
def _angles_uniform(self):
"""
Generates angles for each particle such that their velocities are
uniformly distributed on a grid in theta and phi. This method
requires that `nparticles` be a perfect square. If it is not,
`nparticles` will be set as the largest perfect square smaller
than the provided `nparticles`.
"""
# Calculate the approximate square root
n_per = np.floor(np.sqrt(self.nparticles)).astype(np.int32)
# Set new nparticles to be a perfect square
self.nparticles = n_per ** 2
# Create an imaginary grid positioned 1 unit from the source
# and spanning max_theta at the corners
extent = np.sin(self.max_theta) / np.sqrt(2)
arr = np.linspace(-extent, extent, num=n_per)
harr, varr = np.meshgrid(arr, arr, indexing="ij")
# calculate the angles from the source for each point in
# the grid.
theta = np.arctan(np.sqrt(harr ** 2 + varr ** 2))
phi = np.arctan2(varr, harr)
return theta.flatten(), phi.flatten()
@particles.particle_input
def create_particles(
self,
nparticles,
particle_energy,
max_theta=None,
particle: Particle = Particle("p+"),
distribution="monte-carlo",
):
r"""
Generates the angular distributions about the Z-axis, then
rotates those distributions to align with the source-to-detector axis.
By default, particles are generated over almost the entire pi/2. However,
if the detector is far from the source, many of these particles will
never be observed. The max_theta keyword allows these extraneous
particles to be neglected to focus computational resources on the
particles who will actually hit the detector.
nparticles : integer
The number of particles to include in the simulation. The default
is 1e5.
particle_energy : `~astropy.units.Quantity`
The energy of the particle, in units convertible to eV.
All particles are given the same energy.
max_theta : `~astropy.units.Quantity`, optional
The largest velocity vector angle (measured from the
source-to-detector axis) for which particles should be generated.
Decreasing this angle can eliminate particles that would never
reach the detector region of interest. If no value is given, a
guess will be made based on the size of the grid.
Units must be convertible to radians.
particle : ~plasmapy.particles.Particle or string representation of same, optional
Representation of the particle species as either a `Particle` object
or a string representation. The default particle is protons.
distribution: str
A keyword which determines how particles will be distributed
in velocity space. Options are:
- 'monte-carlo': velocities will be chosen randomly,
such that the flux per solid angle is uniform.
- 'uniform': velocities will be distrbuted such that,
left unperturbed,they will form a uniform pattern
on the detection plane. This method
requires that `nparticles` be a perfect square. If it is not,
`nparticles` will be set as the largest perfect square smaller
than the provided `nparticles`.
Simulations run in the `uniform` mode will imprint a grid pattern
on the image, but will well-sample the field grid with a
smaller number of particles. The default is `monte-carlo`
"""
self._log("Creating Particles")
# Load inputs
self.nparticles = int(nparticles)
self.particle_energy = particle_energy.to(u.eV).value
self.q = particle.charge.to(u.C).value
self.m = particle.mass.to(u.kg).value
# If max_theta is not specified, make a guess based on the grid size
if max_theta is None:
self.max_theta = np.clip(
1.5 * self.max_theta_hit_grid, 0.01, 0.99 * np.pi / 2
)
else:
self.max_theta = max_theta.to(u.rad).value
# Calculate the velocity corresponding to the particle energy
ER = self.particle_energy * 1.6e-19 / (self.m * self._c ** 2)
v0 = self._c * np.sqrt(1 - 1 / (ER + 1) ** 2)
if distribution == "monte-carlo":
theta, phi = self._angles_monte_carlo()
elif distribution == "uniform":
theta, phi = self._angles_uniform()
# Temporarily save theta to later determine which particles
# should be tracked
self.theta = theta
# Construct the velocity distribution around the z-axis
self.v = np.zeros([self.nparticles, 3])
self.v[:, 0] = v0 * np.sin(theta) * np.cos(phi)
self.v[:, 1] = v0 * np.sin(theta) * np.sin(phi)
self.v[:, 2] = v0 * np.cos(theta)
# Calculate the rotation matrix that rotates the z-axis
# onto the source-detector axis
a = np.array([0, 0, 1])
b = self.detector - self.source
rot = rot_a_to_b(a, b)
# Apply rotation matrix to calculated velocity distribution
self.v = np.matmul(self.v, rot)
# Place particles at the source
self.x = np.tile(self.source, (self.nparticles, 1))
@particles.particle_input
def load_particles(
self, x, v, particle: Particle = Particle("p+"),
):
r"""
Load arrays of particle positions and velocities
x : `~astropy.units.Quantity`, shape (N,3)
Positions for N particles
v: `~astropy.units.Quantity`, shape (N,3)
Velocities for N particles
particle : ~plasmapy.particles.Particle or string representation of same, optional
Representation of the particle species as either a `Particle` object
or a string representation. The default particle is protons.
distribution: str
A keyword which determines how particles will be distributed
in velocity space. Options are:
- 'monte-carlo': velocities will be chosen randomly,
such that the flux per solid angle is uniform.
- 'uniform': velocities will be distrbuted such that,
left unpreturbed,they will form a uniform pattern
on the detection plane.
Simulations run in the `uniform` mode will imprint a grid pattern
on the image, but will well-sample the field grid with a
smaller number of particles. The default is `monte-carlo`
"""
self.q = particle.charge.to(u.C).value
self.m = particle.mass.to(u.kg).value
if x.shape[0] != v.shape[0]:
raise ValueError(
"Provided x and v arrays have inconsistent numbers "
" of particles "
f"({x.shape[0]} and {v.shape[0]} respectively)."
)
else:
self.nparticles = x.shape[0]
self.x = x.to(u.m).value
self.v = v.to(u.m / u.s).value
self.theta = np.arccos(
np.inner(self.v, self.src_n) / np.linalg.norm(self.v, axis=-1)
)
n_wrong_way = np.sum(np.where(self.theta > np.pi / 2, 1, 0))
if n_wrong_way > 1:
warnings.warn(
f"{100*n_wrong_way/self.nparticles:.2f}% of particles "
"initialized are heading away from the grid. Check the orientation "
" of the provided velocity vectors.",
RuntimeWarning,
)
# *************************************************************************
# Run/push loop methods
# *************************************************************************
def _adaptive_dt(self, Ex, Ey, Ez, Bx, By, Bz):
r"""
Calculate the appropriate dt based on a number of considerations
including the local grid resolution (ds) and the gyroperiod of the
particles in the current fields.
"""
# If dt was explicitly set, skip the rest of this function
if self.dt.size == 1:
return self.dt
# Compute the timestep indicated by the grid resolution
ds = self.grid.grid_resolution.to(u.m).value
gridstep = 0.5 * (np.min(ds) / self.vmax)
# If not, compute a number of possible timesteps
# Compute the cyclotron gyroperiod
Bmag = np.max(np.sqrt(Bx ** 2 + By ** 2 + Bz ** 2)).to(u.T).value
# Compute the gyroperiod
if Bmag == 0:
gyroperiod = np.inf
else:
gyroperiod = 2 * np.pi * self.m / (self.q * np.max(Bmag))
# TODO: introduce a minimum timestep based on electric fields too!
# Create an array of all the possible time steps we computed
candidates = np.array([gyroperiod / 12, gridstep])
# Enforce limits on dt
candidates = np.clip(candidates, self.dt[0], self.dt[1])
# dt is the min of the remaining candidates
return np.min(candidates)
def _coast_to_grid(self):
r"""
Coasts all particles to the timestep when the first particle should
be entering the grid. Doing in this in one step (rather than pushing
the particles through zero fields) saves computation time.
"""
# Distance from the source to the nearest gridpoint
dist = np.min(np.linalg.norm(self.grid_arr - self.source, axis=3))
# Find the particle with the highest speed towards the grid
vmax = np.max(np.dot(self.v, self.src_n))
# Time for fastest possible particle to reach the grid.
t = dist / vmax
# Coast the particles to the advanced position
self.x = self.x + self.v * t
def _coast_to_plane(self, center, hdir, vdir, x=None):
"""
Calculates the positions where the current trajectories of each
particle impact a plane, described by the plane's center and
horizontal and vertical unit vectors.
Returns an [nparticles, 3] array of the particle positions in the plane
By default this function does not alter self.x. The optional keyword
x can be used to pass in an output array that will used to hold
the positions in the plane. This can be used to directly update self.x
as follows:
self._coast_to_plane(self.detector, self.det_hdir, self.det_vdir, x = self.x)
"""
normal = np.cross(hdir, vdir)
# Calculate the time required to evolve each particle into the
# plane
t = np.inner(center[np.newaxis, :] - self.x, normal) / np.inner(self.v, normal)
# Calculate particle positions in the plane
if x is None:
# If no output array is provided, preallocate
x = np.empty_like(self.x)
x[...] = self.x + self.v * t[:, np.newaxis]
# Check that all points are now in the plane
# (Eq. of a plane is nhat*x + d = 0)
plane_eq = np.dot(x - center, normal)
assert | np.allclose(plane_eq, 0, atol=1e-6) | numpy.allclose |
import numpy as np
import random
random.seed(2301)
np.random.seed(795118)
from sklearn.cluster import KMeans
from sklearn.cluster import AgglomerativeClustering
from sklearn.mixture import GaussianMixture
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
class Agent():
"""
Class that models a reinforcement learning agent.
"""
def __init__(self, number_of_algorithms, epsilon=0.2, alpha=0.01, gamma=0.7):
self.n_actions = number_of_algorithms
# 20 algoritmi
self.time_portions = [0.01, 0.0127, 0.0162, 0.0206, 0.0263, 0.0335, 0.0428, 0.0545, 0.0695, 0.0885, 0.1128, 0.1438, 0.1832, 0.2335, 0.2976, 0.3792, 0.4, 0.4832, 0.5, 0.6158, 0.7, 0.7847, 0.85, 0.9, 0.97, 1.02]
self.time_portions = [self.time_portions[i]-0.01 for i in range(len(self.time_portions))]
self.epsilon = epsilon
self.alpha = alpha
self.gamma = gamma
#self.Q = np.random.rand(len(self.time_portions)-1, self.n_actions)
def reset(self, dataset_meta_features, algorithms_meta_features):
"""
Reset the agents' memory for a new dataset
Parameters
----------
dataset_meta_features : dict of {str : str}
The meta-features of the dataset at hand, including:
'usage' : name of the competition
'name' : name of the dataset
'task' : type of the task
'target_type' : target type
'feat_type' : feature type
'metric' : evaluatuon metric used
'time_budget' : time budget for training and testing
'feat_num' : number of features
'target_num' : number of targets
'label_num' : number of labels
'train_num' : number of training examples
'valid_num' : number of validation examples
'test_num' : number of test examples
'has_categorical' : presence or absence of categorical variables
'has_missing' : presence or absence of missing values
'is_sparse' : full matrices or sparse matrices
algorithms_meta_features : dict of dict of {str : str}
The meta_features of all algorithms
Examples
----------
>>> dataset_meta_features
{'usage': 'Meta-learningchallenge2022', 'name': 'Erik', 'task': 'regression',
'target_type': 'Binary', 'feat_type': 'Mixed', 'metric': 'f1_metric',
'time_budget': '600', 'feat_num': '9', 'target_num': '6', 'label_num': '10',
'train_num': '17', 'valid_num': '87', 'test_num': '72', 'has_categorical': '1',
'has_missing': '0', 'is_sparse': '1'}
>>> algorithms_meta_features
{'0': {'meta_feature_0': '0', 'meta_feature_1': '0.1'},
'1': {'meta_feature_0': '1', 'meta_feature_1': '0.2'},
'2': {'meta_feature_0': '0', 'meta_feature_1': '0.3'},
'3': {'meta_feature_0': '1', 'meta_feature_1': '0.4'},
...
'18': {'meta_feature_0': '1', 'meta_feature_1': '0.9'},
'19': {'meta_feature_0': '0', 'meta_feature_1': '1.0'},
}
"""
self.dataset_metadata = dataset_meta_features
self.algorithms_metadata = algorithms_meta_features
self.validation_last_scores = [0.0 for i in range(self.n_actions)]
self.validation_time_seen = [0.0 for i in range(self.n_actions)]
#self.Q = np.random.rand(len(self.time_portions) - 1, self.n_actions)
self.time_used = 1
self.time_budget = float(dataset_meta_features['time_budget'])
if self.time_budget not in self.time_budgets_state:
distances = [abs(self.time_budget-self.time_budgets_state[i]) for i in range(len(self.time_budgets_state))]
self.time_budget_position = np.argmin(distances)
else:
self.time_budget_position = self.time_budgets_state.index(float(self.time_budget))
ds_features = np.array(self._ds_to_vec(dataset_meta_features, self.ordered_features)).reshape(1, -1)
ds_features = self.scaler.transform(ds_features)
self.cluster_label = self.cluster.predict(ds_features)
def reset_for_train(self, dataset_meta_features, algorithms_meta_features):
"""
Reset the agents' memory for a new dataset
Parameters
----------
dataset_meta_features : dict of {str : str}
The meta-features of the dataset at hand, including:
'usage' : name of the competition
'name' : name of the dataset
'task' : type of the task
'target_type' : target type
'feat_type' : feature type
'metric' : evaluatuon metric used
'time_budget' : time budget for training and testing
'feat_num' : number of features
'target_num' : number of targets
'label_num' : number of labels
'train_num' : number of training examples
'valid_num' : number of validation examples
'test_num' : number of test examples
'has_categorical' : presence or absence of categorical variables
'has_missing' : presence or absence of missing values
'is_sparse' : full matrices or sparse matrices
algorithms_meta_features : dict of dict of {str : str}
The meta_features of all algorithms
Examples
----------
>>> dataset_meta_features
{'usage': 'Meta-learningchallenge2022', 'name': 'Erik', 'task': 'regression',
'target_type': 'Binary', 'feat_type': 'Mixed', 'metric': 'f1_metric',
'time_budget': '600', 'feat_num': '9', 'target_num': '6', 'label_num': '10',
'train_num': '17', 'valid_num': '87', 'test_num': '72', 'has_categorical': '1',
'has_missing': '0', 'is_sparse': '1'}
>>> algorithms_meta_features
{'0': {'meta_feature_0': '0', 'meta_feature_1': '0.1'},
'1': {'meta_feature_0': '1', 'meta_feature_1': '0.2'},
'2': {'meta_feature_0': '0', 'meta_feature_1': '0.3'},
'3': {'meta_feature_0': '1', 'meta_feature_1': '0.4'},
...
'18': {'meta_feature_0': '1', 'meta_feature_1': '0.9'},
'19': {'meta_feature_0': '0', 'meta_feature_1': '1.0'},
}
"""
self.dataset_metadata = dataset_meta_features
self.algorithms_metadata = algorithms_meta_features
self.validation_last_scores = [0.0 for i in range(self.n_actions)]
self.validation_time_seen = [0.0 for i in range(self.n_actions)]
#self.Q = np.random.rand(len(self.time_portions) - 1, self.n_actions)
self.time_used = 1
self.time_budget = float(dataset_meta_features['time_budget'])
if self.time_budget not in self.time_budgets_state:
distances = [abs(self.time_budget-self.time_budgets_state[i]) for i in range(len(self.time_budgets_state))]
self.time_budget_position = np.argmin(distances)
else:
self.time_budget_position = self.time_budgets_state.index(float(self.time_budget))
def meta_train(self, datasets_meta_features, algorithms_meta_features, validation_learning_curves,
test_learning_curves):
self.train_datasets_ids = [k for k in test_learning_curves][:25]
self.ordered_features = self._ds_ordered(datasets_meta_features[random.choice(self.train_datasets_ids)])
self.cluster = self.kmeans_clustering(datasets_meta_features)
self.cluster_labels = self.cluster.labels_
self.time_budgets_state = []
for ds in datasets_meta_features:
if float(datasets_meta_features[ds]['time_budget']) not in self.time_budgets_state:
self.time_budgets_state.append(float(datasets_meta_features[ds]['time_budget']))
self.Q = np.random.rand(12, len(self.time_portions) - 1, self.n_actions)
maxit = 5000
for iteration in range(maxit):
for idx, episode in enumerate(self.train_datasets_ids):
self.dataset_num = episode
self.counters = {i: 0.0 for i in range(self.n_actions)} # Counters keeping track of the time has been spent for each algorithm
dataset_meta_features = datasets_meta_features[episode]
self.cluster_label = self.cluster_labels[idx]
self.total_time_budget = float(dataset_meta_features['time_budget'])
self.remaining_time_budget = self.total_time_budget
self.list_algorithms = [k for k in test_learning_curves[episode].keys()]
self.reset_for_train(dataset_meta_features, algorithms_meta_features)
#print(
# "\n#===================== Start META-TRAINING on dataset: " + episode + " =====================#")
#print( "\n#---Dataset meta-features = " + str(datasets_meta_features[episode]))
#print( "\n#---Algorithms meta-features = " + str(algorithms_meta_features))
observation = None
for it in range(len(self.time_portions)-1):
# === Get the agent's suggestion
(best_algo, next_algo, delta_t) = self.suggest_for_train(observation)
action = (best_algo, next_algo, delta_t)
self.timestamps = test_learning_curves[episode][self.list_algorithms[next_algo]].timestamps
self.scores = test_learning_curves[episode][self.list_algorithms[next_algo]].scores
R_test_C_A, C_A = self.get_last_point_within_delta_t(delta_t, self.validation_time_seen[next_algo])
self.validation_time_seen[next_algo] = C_A
observation = (next_algo, C_A, R_test_C_A)
# print( "------------------")
# print( "A_star = " + str(action[0]))
# print( "A = " + str(action[1]))
# print( "delta_t = " + str(action[2]))
# print( "remaining_time_budget = " + str((1.01-self.time_portions[self.time_used-1])*self.time_budget))
# print( "observation = " + str(observation))
#print( "[+]Finished META-TRAINING phase")
# #self.reset(datasets_meta_features[episode], algorithms_meta_features)
def suggest_for_train(self, observation):
next_algo_to_reveal = self.get_action_eps_greedy(self.cluster_label, self.time_used-1)
delta_t = (self.time_portions[self.time_used]-self.time_portions[self.time_used-1])*self.time_budget
if observation == None:
best_algo_for_test = None
self.time_used += 1
self.old_score = 0
else:
A, C_A, R_validation_C_A = observation
self.validation_last_scores[A] = R_validation_C_A
self.validation_time_seen[A] = C_A
weight = ((1.01-self.time_portions[self.time_used])*self.time_budget)
reward = (np.max([R_validation_C_A, self.old_score])-self.old_score)
#self.time_used += 1
self.update_Q(old_state=[self.cluster_label, self.time_used - 2], action=A, reward = reward, new_state=[self.cluster_label, self.time_used - 1])
self.time_used += 1
best_algo_for_test = np.argmax(self.validation_last_scores)
self.old_score = | np.max([R_validation_C_A, self.old_score]) | numpy.max |
# ############################################################################
# linear_operator.py
# =======
# Authors : <NAME> [<EMAIL>] and <NAME> [<EMAIL>]
# ############################################################################
"""
Classes and routines for linear operators used in generalised FRI problems.
"""
import numpy as np
import time as t
from abc import abstractmethod
import scipy.sparse.linalg as spsparse
import numpy.linalg as nplin
import scipy.linalg as splin
from scipy.signal import convolve, choose_conv_method
from numbers import Number
from typing import Union
class AbstractLinearOperator(spsparse.LinearOperator):
"""
Base class for linear operators, inherited from scipy.sparse.linalg.LinearOperator.
"""
def __init__(self, dtype: type, shape: tuple):
super(AbstractLinearOperator, self).__init__(shape=shape, dtype=dtype)
@abstractmethod
def pinv(self, x: np.ndarray):
pass
def proj(self, x: np.ndarray):
"""
Orthogonal projection onto the range of the linear operator.
:param x: np.ndarray
Vector to be projected.
:return: np.ndarray
Projected vector.
"""
return self.matvec(self.pinv(x))
def proj_conjugate(self, x: np.ndarray, sigma: float):
if not isinstance(sigma, Number):
raise ValueError("Parameter sigma must be numeric.")
return x - sigma * self.proj(x / sigma)
class LinearOperatorFromMatrix(AbstractLinearOperator):
"""
Class for linear operators defined from matrices.
:attribute mat: np.ndarray
Matrix representation of the linear operator.
:attribute adjoint: np.ndarray
Conjugate transpose of `mat`.
:attribute gram: np.ndarray
Gram matrix adjoint @ mat
:attribute norm, lipschitz_cst: float
Spectral norm of operator.
"""
def __init__(self, mat: np.ndarray):
"""
Initiliaze object of class.
:param mat: np.ndarray[L,N]
Matrix representation of the linear operator.
"""
# Check mat
try:
mat = np.asarray(mat)
except ValueError:
print("Input matrix must be a numpy array.")
# Init from super class
super(LinearOperatorFromMatrix, self).__init__(shape=mat.shape, dtype=mat.dtype)
# Matrix corresponding to the linear operator
self.mat = mat
# Adjoint
self.adjoint = mat.conj().transpose()
# Corresponding Gram matrix
self.gram = self.adjoint @ mat
# Spectral norm, Lipschitz constant
self.norm = self.lipschitz_cst = np.sqrt(
spsparse.eigs(self.gram, k=1, which='LM', return_eigenvectors=False, maxiter=int(5e4)))
def _matvec(self, x: np.ndarray):
"""
Matrix/vector product.
:param x: np.ndarray[N,]
Vector.
:return: np.ndarray[L,]
Vector resulting from matrix/vector product.
"""
M, N = self.shape
if x.shape != (N,) and x.shape != (N, 1):
raise ValueError('dimension mismatch')
return self.mat @ x
def _rmatvec(self, x: np.ndarray):
"""
Adjoint matrix/vector product.
:param x: np.ndarray[L,]
Vector.
:return: np.ndarray[N,]
Vector resulting from the adjoint matrix/vector product.
"""
M, N = self.shape
if x.shape != (M,) and x.shape != (M, 1):
raise ValueError('dimension mismatch')
return self.adjoint @ x
def pinv(self, x: np.ndarray, rcond: float = 1e-9):
"""
Evaluate the pseudo-inverse of the linear operator for a vector x.
:param x: np.ndarray[L,]
Vector.
:param rcond:
Cutoff for eigenvalues in `np.linalg.pinv`.
:return: np.ndarray[N,]
"""
M, N = self.shape
if x.shape != (M,) and x.shape != (M, 1):
raise ValueError('dimension mismatch')
inv_mat = np.linalg.pinv(self.mat, rcond=rcond)
return inv_mat @ x
class Id(LinearOperatorFromMatrix):
"""
Class for identity operator inherited from `LinearOperatorFromMatrix`.
"""
def __init__(self, n: int):
super(Id, self).__init__(mat=np.eye(n))
class ToeplitzificationOperator(AbstractLinearOperator):
"""
Class for Toeplitzification operator, inherited from `AbstractLinearOperator`.
:attribute P: int
Parameter P in [Section II.A,1].
:attribute M: int
Parameter M in [Section II.A,1].
:attribute N: int
Parameter N=2*M+1 in [Section II.A,1].
:attribute norm: float
Spectral norm of linear operator.
:attribute gram: np.ndarray
Diagonal Gram matrix stored as 1D array.
Reference: Section II.A of
[1] <NAME>., <NAME>., <NAME>. & <NAME>. (2020). Cadzow Plug-and-Play Gradient Descent for Generalised FRI.
Under review.
"""
def __init__(self, P: int, M: int, dtype: type = np.complex128):
"""
Initiliase Toeplitzification operator with parameter P acting on vectors of size N=2*M+1.
:param P: int,
:param M: int.
:param dtype: type
Type of the entries of the linear operator.
"""
# Check P
try:
P = int(P)
except ValueError:
print("P must be a number.")
# Check M
try:
M = int(M)
except ValueError:
print("M must be a number.")
self.P = P
self.M = M
self.N = 2 * M + 1
self.__offsets = -(np.arange(1, self.N + 1) - 1 - self.P)
# Init from super class
shape = ((self.N - self.P) * (self.P + 1), self.N)
super(ToeplitzificationOperator, self).__init__(shape=shape, dtype=dtype)
self.norm = np.sqrt(self.P + 1)
self.gram = toep_gram(self.P, self.N)
def _matvec(self, x: np.ndarray):
"""
Compute Tp(x).
:param x: np.ndarray[N,]
:return: np.ndarray[N-P,P+1]
Toeplitz matrix generated by the entries of x.
"""
if x.size != self.N:
return print(f'The vector x must have (odd) size {self.N}.')
else:
index_i = -self.M + self.P + np.arange(1, self.N - self.P + 1) - 1
index_j = np.arange(1, self.P + 2) - 1
index_ij = index_i[:, None] - index_j[None, :]
Tp_x = x[index_ij + self.M]
return Tp_x
def matvec(self, x: np.ndarray):
"""
Alias of _matvec.
"""
return self._matvec(x)
def _rmatvec(self, x: np.ndarray):
"""
Compute Tp'(x): maps a matrix onto a vector by summing the anti-diagonals.
:param x: np.ndarray[N-P,P+1]
Matrix.
:return: np.ndarray[N,]
Vector resulting from anti-diagonal summations.
"""
if x.size != self.shape[0]:
return print(f'M must have shape {self.shape[0]}x{self.shape[1]}.')
else:
out = np.zeros(shape=(self.N,), dtype=x.dtype)
for (i, m) in enumerate(self.__offsets):
out[i] = np.sum(np.diagonal(x, offset=m))
return out
def rmatvec(self, x: np.ndarray):
"""
Alias of _rmatvec.
"""
return self._rmatvec(x)
def pinv(self, x: np.ndarray):
"""
Apply pseudo inverse of Tp to input matrix M. We have Tp.pinv(Tp(x))=x.
:param M: np.ndarray[N-P,P+1]
:return: np.ndarray[N,]
"""
return self.rmatvec(x) / self.gram
def rightdual(self, h: np.ndarray):
"""
Right dual R of Toeplitzification operator: T(x)h=R(h)x.
:param h: np.ndarray[P+1,]
Generator vector.
:return: np.ndarray[N-P,N]
Toeplitz matrix.
Reference: See Definition 1 of
[2] <NAME>., <NAME>., & <NAME>. (2016). Towards generalized FRI sampling with an application to source resolution
in radioastronomy. IEEE Transactions on Signal Processing, 65(4), 821-835.
"""
col = np.concatenate(([h[-1]], np.zeros(self.N - self.P - 1)))
row = np.concatenate((h[::-1], np.zeros(self.N - self.P - 1)))
return splin.toeplitz(col, row)
class ToeplitzMatrixFree(AbstractLinearOperator):
"""
Class for matrix-free Toeplitz operators, inherited from `AbstractLinearOperator`.
:attribute P: int
Parameter P in [Section II.A,1].
:attribute M: int
Parameter M in [Section II.A,1].
:attribute N: int
Parameter N=2*M+1 in [Section II.A,1].
:attribute x: np.ndarray[N,]
Generator vector.
:attribute method: str {'auto','direct','fft'}
Method used by scipy.signal.convolve for performing discrete convolutions.
:attribute measure: bool
Whether or not `method` is chosen from precomputed setups or via direct time measures.
Reference: Supplementary material Appendix A of
[1] <NAME>., <NAME>., <NAME>. & <NAME>. (2020). Cadzow Plug-and-Play Gradient Descent for Generalised FRI.
Under review.
"""
def __init__(self, P: int, M: int, x, measure: bool = False, method: str = 'auto', choose_method: bool = False):
"""
Initialize object of the class.
:param P: int
Parameter P in [Section II.A,1].
:param M: int
Parameter M in [Section II.A,1].
:param x: np.ndarray[2*M+1,]
Generator vector.
:param measure: bool
Whether or not `method` is chosen from precomputed setups or via direct time measures.
:param method: str {'auto','direct','fft'}
Method used by scipy.signal.convolve for performing discrete convolutions.
:param choose_method: bool
If True choose the optimal convolution method using scipy.signal.choose_conv_method.
"""
self.P = P
self.M = M
self.N = 2 * M + 1
self.x = x
self.measure = measure
super(ToeplitzMatrixFree, self).__init__(shape=(self.N - self.P, self.P + 1), dtype=x.dtype)
if choose_method is True:
self.method = self.choose_method()
else:
self.method = method
@property
def mat(self) -> np.ndarray:
"""
Return the Toeplitz matrix associated to the Toeplitz operator.
:return: np.ndarray[N-P,P+1]
Toeplitz matrix.
"""
Tp = ToeplitzificationOperator(P=self.P, M=self.M, dtype=self.x.dtype)
return Tp.matvec(self.x)
def choose_method(self):
"""
Choose the optimal convolution method using scipy.signal.choose_conv_method.
:return: str {'direct','fft'}
Optimal convolution method.
"""
h = np.random.rand(self.P + 1) + 1j * np.random.rand(self.P + 1)
if self.measure is True:
method, _ = choose_conv_method(self.x, h, mode='valid', measure=self.measure)
else:
method = choose_conv_method(self.x, h, mode='valid', measure=self.measure)
return method
def matvec(self, h: np.ndarray) -> np.ndarray:
"""
Alias to _matvec.
"""
return self._matvec(h)
def rmatvec(self, u: np.ndarray) -> np.ndarray:
"""
Alias to _rmatvec.
"""
return self._matvec(u)
def _matvec(self, h: np.ndarray) -> np.ndarray:
"""
Compute Tp(x)h as the valid part of the convolution between x and h (see [Appendix A, 1]).
:param h: np.ndarray[P+1,]
:return: np.ndarray[N-P,]
"""
return convolve(self.x, h, mode='valid', method=self.method)
def _rmatvec(self, u: np.ndarray) -> np.ndarray:
"""
Compute Tp(x)'u as the valid part of the cross-correlation between x and h (see [Appendix A, 1]).
:param h: np.ndarray[N-P,]
:return: np.ndarray[P+1,]
"""
return convolve(self.x.conj()[::-1], u, mode='valid', method=self.method)
class FRISampling(LinearOperatorFromMatrix):
"""
Class for the non-uniform low-pass sampling operator, used in [Section V, 1].
Inherited from `AbstractLinearOperator`.
:attribute frequencies: np.ndarray[N,]
Fourier frequencies.
:attribute time_samples: np.ndarray[L,]
Sampling times.
:attribute period: float
Period of Dirac stream.
Reference: Section V of
[1] <NAME>., <NAME>., <NAME>. & <NAME>. (2020). Cadzow Plug-and-Play Gradient Descent for Generalised FRI.
Under review.
"""
def __init__(self, frequencies: np.ndarray, time_samples: np.ndarray, period: float):
"""
Initialize an object from the class.
:param frequencies: np.ndarray[N,]
Fourier frequencies.
:param time_samples: np.ndarray[L,]
Sampling times.
:param period: float
Period of Dirac stream.
"""
# Check dtypes of the different inputs
try:
frequencies = np.asarray(frequencies)
except ValueError:
print("Invalid value of frequencies. Must be a np.array.")
self.frequencies = frequencies
try:
time_samples = np.asarray(time_samples)
except ValueError:
print("Invalid value of time samples. Must be a np.array.")
self.time_samples = time_samples
if not isinstance(period, Number):
raise ValueError("Invalid value of period. Must be a float.")
self.period = float(period)
# Build FRI sampling matrix and corresponding operator
super(FRISampling, self).__init__(mat=self.build_sampling_matrix())
def build_sampling_matrix(self):
"""
Forward operator for traditional FRI setups (ideal low-pass filtering followed by regular or irregular time
sampling).
:param frequencies: np.ndarray[2*M+1,]
:param time_samples: np.ndarray[L,]
:param period: float,
:return: np.ndarray[L,2*M+1]
"""
G = self.period * np.exp(1j * 2 * np.pi * self.frequencies[None, :] * self.time_samples[:, None] / self.period)
return G
class GaussianRandomSampling(LinearOperatorFromMatrix):
"""
Class for Gaussian random matrices.
Inherited from `LinearOperatorFromMatrix`.
:attribute nrows: int
Number of rows.
:attribute ncols: int
Number of columns.
:attribute rank: int
Rank of matrix
"""
def __init__(self, nrows: int, ncols: int, rank: int):
"""
Initialize an object from the class.
:param nrows: int
Number of rows.
:param ncols: int
Number of columns.
:param rank: int
Rank of matrix
"""
try:
nrows = int(nrows)
except ValueError:
print("Invalid value of nrows. Must be an int.")
self.nrows = nrows
try:
ncols = int(ncols)
except ValueError:
print("Invalid value of ncols. Must be an int.")
self.ncols = ncols
if rank is None:
rank = np.minimum(nrows, ncols)
try:
rank = int(rank)
except ValueError:
print("Invalid value of rank. Must be an int.")
self.rank = rank
super(GaussianRandomSampling, self).__init__(mat=self.build_sampling_matrix())
def build_sampling_matrix(self) -> np.ndarray:
"""
Forms the matrix.
:return: np.ndarray[nrows, ncols]
Gaussian random matrix of rank specified by the attribute `rank`.
"""
mean = 0.0
stdev = 1 / np.sqrt(self.nrows)
prng = np.random.RandomState(seed=3)
G = prng.normal(loc=mean, scale=stdev, size=(self.nrows, self.ncols))
# Low-rank approximation to reduce the rank
U, s, Vh = np.linalg.svd(G, full_matrices=False)
if self.rank < np.minimum(self.nrows, self.ncols):
s[self.rank:] = 0
S = np.diag(s)
return np.matmul(U, np.matmul(S, Vh))
def toep_gram(P: int, N: int) -> np.ndarray:
"""
Return diagonal entries of Tp'Tp.
:param P: float,
:param N: float,
:return: np.ndarray (N,)
Diagonal entries of the Gram matrix.
"""
weights = np.ones(shape=(N,)) * (P + 1)
weights[:P] = np.arange(1, P + 1)
weights[N - P:] = np.flip(np.arange(1, P + 1), axis=0)
return weights
def build_toeplitz_operator(P: int, M: int, x: np.ndarray, toeplitz_class: str = 'standard', method=None) -> Union[
LinearOperatorFromMatrix, ToeplitzMatrixFree]:
"""
Build a Toeplitz operator in standard or matrix-free form.
:param P: int
:param M: int
:param x: np.ndarray[2*M+1,]
Generator.
:param toeplitz_class: str {'standard','matrix_free'}
If 'standard' returns object of class `LinearOperatorFromMatrix` otherwise an object of class `ToeplitzMatrixFree`.
:param method: {None,'auto','direct','fft'}
Method used by scipy.signal.convolve for performing discrete convolutions. Only used if `toeplitz_class` is
'matrix-free'.
:return: {LinearOperatorFromMatrix,ToeplitzMatrixFree}
Toeplitz operator object of specified class.
"""
if toeplitz_class in ['standard', 'matrix_free']:
chosen_toeplitz_class = toeplitz_class
else:
chosen_toeplitz_class = None
print("Method must be one of: ['standard','matrix_free']")
if chosen_toeplitz_class == 'standard':
Tp = ToeplitzificationOperator(P=P, M=M, dtype=x.dtype)
return LinearOperatorFromMatrix(mat=Tp.matvec(x))
else:
return ToeplitzMatrixFree(P, M, x, measure=False, method=method)
def choose_toeplitz_class(P: int, M: int, measure: bool = False, avg_size: int = 10):
"""
Choose optimal Toeplitz class and convolution method by comparing execution times.
:param P: int
:param M: int
:param measure: bool
If True computations are timed otherwise the setup is matched to the closest existing pre-computed scenario.
:param avg_size: int
Number of repeated runs for timing the computation.
:return: tuple {('standard',None),('matrix_free',str)}
Optimal Toeplitz class and convolution method (if applicable).
"""
x = | np.random.rand(2 * M + 1) | numpy.random.rand |
# %load ../../src/models/model_utils.py
# %%writefile ../../src/models/model_utils.py
"""
Author: <NAME>
Created in the scope of my PhD
"""
import pandas as pd
import numpy as np
import sklearn as sk
import math
import itertools
from scipy import stats
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LinearRegression, Ridge, Lasso, HuberRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.svm import SVC, SVR
from sklearn.preprocessing import PolynomialFeatures
def CreateRankedLabels(a):
pw = list(itertools.combinations(a,2))
labels = [1 if item[0]>item[1] else -1 for item in pw]
return labels
def GetParameterSet(parLabel, parRange):
"""Retrieve a set of parameter values used for training of a model in sklearn.
Parameters
-----------
parLabel : 1-dimensional numpy array (str)
numpy array holding a set of parameter labels. Valid labels include:
[alpha, gamma, C, coef0, epsilon, max_depth, min_samples, max_features]
parRange : 1-dimensional numpy array (int)
numpy array with the amount of parameters returned for every parameter label.
parLabel and parRange must be of the same dimension.
Returns
--------
parSet : Dictionary
Dictionary containing a set of parameters for every label
"""
if parLabel[0] in ['max_depth','min_samples_split', 'max_features']:
parameters = [np.zeros(parRange[u],dtype=np.int) for u in range(len(parRange))]
else:
parameters = [np.zeros(parRange[u]) for u in range(len(parRange))]
for i in range(len(parLabel)):
if parLabel[i] == "alpha":
parameters[i][:] = [math.pow(10,(u - np.around(parRange[i]/2))) for u in range(parRange[i])]
elif parLabel[i] == "gamma":
parameters[i][:] = [math.pow(10,(u - np.around(parRange[i]/2))) for u in range(parRange[i])]
elif parLabel[i] == "C":
parameters[i][:] = [math.pow(10,(u - np.around(parRange[i]/2))) for u in range(parRange[i])]
elif parLabel[i] == "coef0":
parameters[i][:] = [math.pow(10,(u - np.around(parRange[i]/2))) for u in range(parRange[i])]
elif parLabel[i] == "epsilon":
parameters[i][:] = [0+2/parRange[i]*u for u in range(parRange[i])]
elif parLabel[i] == "max_depth":
parameters[i][:] = [int(u+1) for u in range(parRange[i])]
elif parLabel[i] == 'min_samples_split':
parameters[i][:] = [int(u+2) for u in range(parRange[i])]
elif parLabel[i] == 'max_features':
parameters[i][:] = [int(u+2) for u in range(parRange[i])]
else:
return print("Not a valid parameter")
parSet = {parLabel[u]:parameters[u] for u in range(len(parLabel))}
return parSet
def EvaluateParameterSet(X_train, X_test, y_train, y_test, parModel, parSet):
"""Evaluate the scores of a set of parameters for a given model.
Parameters
-----------
X_train:
Training dataset features
X_test:
Test dataset features
y_train
Training dataset labels
y_test
Test dataset labels
parModel: Dictionary
parSet : Dictionary
Dictionary holding the parameter label and values over which the model has to be
evaluated. This can be created through the function GetParameterSet.
Accepted keys are:
[alpha, gamma, C, coef0, epsilon, max_depth, min_samples, max_features]
Returns
--------
scores: 1-dimensional numpy array: int
Fitted scores of the model with each of the parametersSets
optimalPar: int
Optimal parameter value for a given parameter label
"""
scores = []
for i in range(len(parSet[parLabel])):
parSetIt = {parLabel:parSet[parLabel][i]}
model = SelectModel(**parModel,**parEvalIt)
model.fit(X_train,y_train)
scores = np.append(model.score(X_test,y_test))
optimalPar = parSet[parLabel][np.argmax(scores)]
return scores, optimalPar
def EvaluateScore(X_train, X_test, y_train, y_test, parModel, scoring='default', pw=False):
"""Evaluates the score of a model given for a given test and training data
Parameters
-----------
X_train, X_test: DataFrame
Test and training data of the features
y_train, y_test: 1-dimensional numpy array
Test and training data of the labels
parModel: dictionary
Parameters indicating the model and some of its features
Returns
--------
score: int
Score of the test data on the model
y_pred: 1-dimensional array
An array giving the predicted labels for a given test set
"""
model = SelectModel(**parModel)
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
if scoring == 'default':
score = model.score(X_test,y_test)
elif scoring == 'kt':
if pw is True:
score = KendallTau(y_pred, y_test)
if pw is False:
y_pred_pw = CreateRankedLabels(y_pred)
y_test_pw = CreateRankedLabels(y_test)
score = KendallTau(y_pred_pw, y_test_pw)
elif scoring == 'spearman':
score = stats.spearmanr(y_test, y_pred)[0]
else:
raise("Scoring type not defined. Possible options are: 'default', 'kt', and 'spearman'")
return score, y_pred
def KendallTau(y_pred, y_true):
a = np.array(y_pred)
b = np.array(y_true)
n = len(y_pred)
score = (np.sum(a==b)-np.sum(a!=b))/n
return score
def LearningCurveInSample(dfDataset, featureBox, y ,parModel, scoring='default', k=5, pw=False, step=1):
"""Calculates the learning curve of a dataset for a given model
Parameters
-----------
dfDataset: Dataframe
Dataframe holding sequences,
featureBox: Dataframe
Test dataset features
y: 1-dimensional numpy array
parModel: Dictionary
k: int
pw: Boolean
step: int
Returns
--------
scores: 1-dimensional numpy array: int
Fitted scores of the model with each of the parametersSets
optimalPar: int
Optimal parameter value for a given parameter label
"""
X = featureBox.values
if pw is True:
temp = np.unique(dfDataset[['ID_1', 'ID_2']].values)
dfId = pd.Series(temp[:-(len(temp)%k)])
else:
dfId = dfDataset['ID'][:-(len(dfDataset)%k)]
lenId = len(dfId)
Id = dfId.values
indexId = np.array(range(lenId))
scores = np.array([])
it=0
for i in range(k):
boolTest = np.logical_and(indexId>=i*lenId/k,indexId<(i+1)*lenId/k)
test = Id[boolTest]
train = Id[np.invert(boolTest)]
if pw is True:
indexTest = (dfDataset['ID_1'].isin(test) | dfDataset['ID_2'].isin(test)).values
else:
indexTest = dfDataset['ID'].isin(test).values
dfDatasetTrain = dfDataset[np.invert(indexTest)]
X_train, y_train = featureBox[np.invert(indexTest)], y[np.invert(indexTest)]
X_test, y_test = featureBox[indexTest], y[indexTest]
for j in range((len(train)-5)//step):
print("\rProgress {:2.1%}".format(it/k+(j/len(train)/k*step)), end='')
trainInner = train[:(j*step)+5]
if pw is True:
indexTrainInner = (dfDatasetTrain['ID_1'].isin(trainInner) & dfDatasetTrain['ID_2'].isin(trainInner)).values
else:
indexTrainInner = (dfDatasetTrain['ID'].isin(trainInner)).values
X_trainInner, y_trainInner = X_train[indexTrainInner], y_train[indexTrainInner]
score, y_pred = EvaluateScore(X_trainInner, X_test, y_trainInner, y_test, {**parModel}, scoring, pw)
scores = np.append(scores,score)
it+=1
scores = scores.reshape((k,-1))
return scores
def LearningCurveInSampleEnriched(dfDataset, featureBox, enrichBox, y, y_enrich ,parModel,
scoring='default', k=5, pw=True, step=1):
"""Calculates the learning curve of an enriched dataset for a given model
Parameters
-----------
dfDataset: Dataframe
Dataframe holding sequences,
featureBox: Dataframe
Test dataset features
y: 1-dimensional numpy array
parModel: Dictionary
k: int
pw: Boolean
step: int
Returns
--------
scores: 1-dimensional numpy array: int
Fitted scores of the model with each of the parametersSets
optimalPar: int
Optimal parameter value for a given parameter label
"""
if pw is True:
temp = np.unique(dfDataset[['ID_1', 'ID_2']].values)
dfId = pd.Series(temp[:-(len(temp)%k)])
else:
dfId = dfDataset['ID'][:-(len(dfDataset)%k)]
lenId = len(dfId)
Id = dfId.values
indexId = np.array(range(lenId))
scores = np.array([])
it=0
for i in range(k):
boolTest = np.logical_and(indexId>=i*lenId/k,indexId<(i+1)*lenId/k)
test = Id[boolTest]
train = Id[np.invert(boolTest)]
if pw is True:
indexTest = (dfDataset['ID_1'].isin(test) | dfDataset['ID_2'].isin(test)).values
else:
indexTest = dfDataset['ID'].isin(test).values
dfDatasetTrain = dfDataset[np.invert(indexTest)]
X_train = featureBox[np.invert(indexTest)]
y_train = y[np.invert(indexTest)]
X_test, y_test = featureBox[indexTest], y[indexTest]
for j in range((len(train))//step):
print("\rProgress {:2.1%}".format(it/k+(j/len(train)/k*step)), end='')
trainInner = train[:(j*step)]
if pw is True:
indexTrainInner = (dfDatasetTrain['ID_1'].isin(trainInner) & dfDatasetTrain['ID_2'].isin(trainInner)).values
else:
indexTrainInner = (dfDatasetTrain['ID'].isin(trainInner)).values
X_trainInner = np.vstack((enrichBox,X_train[indexTrainInner]))
y_trainInner = np.append(y_enrich, y_train[indexTrainInner])
score, y_pred = EvaluateScore(X_trainInner, X_test, y_trainInner, y_test, {**parModel}, scoring, pw)
scores = | np.append(scores,score) | numpy.append |
'''
Amplitude-Phase Recombination: Rethinking Robustness of Convolutional Neural Networks in Frequency Domain (ICCV2021)
Paper link: https://arxiv.org/abs/2108.08487
Modified by <NAME>
2021.8.30
'''
import random
from PIL import Image
import numpy as np
from PIL import Image, ImageOps, ImageEnhance
class APRecombination(object):
def __init__(self, img_size=32):
self.img_size = img_size
def int_parameter(level, maxval):
return int(level * maxval / 10)
def float_parameter(level, maxval):
return float(level) * maxval / 10.
def sample_level(n):
return np.random.uniform(low=0.1, high=n)
def autocontrast(pil_img, _):
return ImageOps.autocontrast(pil_img)
def equalize(pil_img, _):
return ImageOps.equalize(pil_img)
def posterize(pil_img, level):
level = int_parameter(sample_level(level), 4)
return ImageOps.posterize(pil_img, 4 - level)
def rotate(pil_img, level):
degrees = int_parameter(sample_level(level), 30)
if np.random.uniform() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees, resample=Image.BILINEAR)
def solarize(pil_img, level):
level = int_parameter(sample_level(level), 256)
return ImageOps.solarize(pil_img, 256 - level)
def shear_x(pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform((self.img_size, self.img_size),
Image.AFFINE, (1, level, 0, 0, 1, 0),
resample=Image.BILINEAR)
def shear_y(pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform((self.img_size, self.img_size),
Image.AFFINE, (1, 0, 0, level, 1, 0),
resample=Image.BILINEAR)
def translate_x(pil_img, level):
level = int_parameter(sample_level(level), self.img_size / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform((self.img_size, self.img_size),
Image.AFFINE, (1, 0, level, 0, 1, 0),
resample=Image.BILINEAR)
def translate_y(pil_img, level):
level = int_parameter(sample_level(level), self.img_size / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform((self.img_size, self.img_size),
Image.AFFINE, (1, 0, 0, 0, 1, level),
resample=Image.BILINEAR)
# operation that overlaps with ImageNet-C's test set
def color(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Color(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def contrast(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Contrast(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def brightness(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Brightness(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def sharpness(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Sharpness(pil_img).enhance(level)
augmentations = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y
]
augmentations_all = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y, color, contrast, brightness, sharpness
]
self.aug_list = augmentations_all
def __call__(self, x):
'''
:param img: (PIL Image): Image
:return: code img (PIL Image): Image
'''
op = np.random.choice(self.aug_list)
x = op(x, 3)
p = random.uniform(0, 1)
if p > 0.5:
return x
x_aug = x.copy()
op = np.random.choice(self.aug_list)
x_aug = op(x_aug, 3)
x = np.array(x).astype(np.uint8)
x_aug = np.array(x_aug).astype(np.uint8)
fft_1 = np.fft.fftshift(np.fft.fftn(x))
fft_2 = np.fft.fftshift( | np.fft.fftn(x_aug) | numpy.fft.fftn |
# From https://github.com/jellis18/PAL/blob/master/bayesutils.py
# - modified to minimize non-lalsuite installations
# - requires
# -healpy
# -statsmodels http://statsmodels.sourceforge.net/, which requires pandas
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as interp
import scipy.ndimage.filters as filter
import healpy as hp
from lalinference.bayestar import plot as bplot
import matplotlib.mlab as ml
#import statsmodels.api as sm
from matplotlib.ticker import FormatStrFormatter, LinearLocator, NullFormatter, NullLocator
import matplotlib.ticker
import matplotlib.colors
from optparse import OptionParser
"""
Given a 2D matrix of (marginalised) likelihood levels, this function returns
the 1, 2, 3- sigma levels. The 2D matrix is usually either a 2D histogram or a
likelihood scan
"""
def getsigmalevels(hist2d):
# We will draw contours with these levels
sigma1 = 0.68268949
level1 = 0
sigma2 = 0.95449974
level2 = 0
sigma3 = 0.99730024
level3 = 0
#
lik = hist2d.reshape(hist2d.size)
sortlik = np.sort(lik)
# Figure out the 1sigma level
dTotal = np.sum(sortlik)
nIndex = sortlik.size
dSum = 0
while (dSum < dTotal * sigma1):
nIndex -= 1
dSum += sortlik[nIndex]
level1 = sortlik[nIndex]
# 2 sigma level
nIndex = sortlik.size
dSum = 0
while (dSum < dTotal * sigma2):
nIndex -= 1
dSum += sortlik[nIndex]
level2 = sortlik[nIndex]
# 3 sigma level
nIndex = sortlik.size
dSum = 0
while (dSum < dTotal * sigma3):
nIndex -= 1
dSum += sortlik[nIndex]
level3 = sortlik[nIndex]
return level1, level2, level3
# def confinterval(samples, sigma=0.68, onesided=False):
# """
# Given a list of samples, return the desired cofidence intervals.
# Returns the minimum and maximum confidence levels
# @param samples: Samples that we wish to get confidence intervals
# @param sigmalevel: Sigma level 1, 2, or 3 sigma, will return
# corresponding confidence limits
# @param onesided: Boolean to use onesided or twosided confidence
# limits.
# """
# # Create the ecdf function
# ecdf = sm.distributions.ECDF(samples)
# # Create the binning
# x = np.linspace(min(samples), max(samples), 1000)
# y = ecdf(x)
# # Find the intervals
# x2min = y[0]
# if onesided:
# bound = 1 - sigma
# else:
# bound = 0.5*(1-sigma)
# for i in range(len(y)):
# if y[i] >= bound:
# x2min = x[i]
# break
# x2max = y[-1]
# if onesided:
# bound = sigma
# else:
# bound = 1 - 0.5 * (1 - sigma)
# for i in reversed(range(len(y))):
# if y[i] <= bound:
# x2max = x[i]
# break
# return x2min, x2max
def makesubplot2d(ax, samples1, samples2, color=True, weights=None, smooth=True, \
bins=[40, 40], contours=True, x_range=None, y_range=None, \
logx=False, logy=False, logz=False):
if x_range is None:
xmin = np.min(samples1)
xmax = np.max(samples1)
else:
xmin = x_range[0]
xmax = x_range[1]
if y_range is None:
ymin = np.min(samples2)
ymax = np.max(samples2)
else:
ymin = y_range[0]
ymax = y_range[1]
if logx:
bins[0] = np.logspace(np.log10(xmin), np.log10(xmax), bins[0])
if logy:
bins[1] = np.logspace(np.log10(ymin), np.log10(ymax), bins[1])
hist2d,xedges,yedges = np.histogram2d(samples1, samples2, weights=weights, \
bins=bins,range=[[xmin,xmax],[ymin,ymax]])
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1] ]
if logz:
for ii in range(hist2d.shape[0]):
for jj in range(hist2d.shape[1]):
if hist2d[ii,jj] <= 0:
hist2d[ii,jj] = 1
xedges = np.delete(xedges, -1) + 0.5*(xedges[1] - xedges[0])
yedges = np.delete(yedges, -1) + 0.5*(yedges[1] - yedges[0])
# gaussian smoothing
if smooth:
hist2d = filter.gaussian_filter(hist2d, sigma=0.75)
if contours:
level1, level2, level3 = getsigmalevels(hist2d)
contourlevels = (level1, level2, level3)
#contourcolors = ('darkblue', 'darkblue', 'darkblue')
contourcolors = ('black', 'black', 'black')
contourlinestyles = ('-', '--', ':')
contourlinewidths = (1.5, 1.5, 1.5)
contourlabels = [r'1 $\sigma$', r'2 $\sigma$',r'3 $\sigma$']
contlabels = (contourlabels[0], contourlabels[1], contourlabels[2])
c1 = ax.contour(xedges,yedges,hist2d.T,contourlevels, \
colors=contourcolors, linestyles=contourlinestyles, \
linewidths=contourlinewidths, zorder=2)
if color:
if logz:
c2 = ax.imshow(np.flipud(hist2d.T), extent=extent, aspect=ax.get_aspect(), \
interpolation='gaussian', norm=matplotlib.colors.LogNorm())
else:
c2 = ax.imshow(np.flipud(hist2d.T), extent=extent, aspect=ax.get_aspect(), \
interpolation='gaussian')
if logx:
ax.set_xscale('log')
if logy:
ax.set_yscale('log')
def makesubplot1d(ax, samples, weights=None, interpolate=False, smooth=True,\
label=None, bins=30, range=None, color='k'):
"""
Make histogram of samples
"""
if range is None:
hist, xedges = np.histogram(samples, bins, normed=True, weights=weights)
else:
hist, xedges = np.histogram(samples, bins, normed=True, range=range, weights=weights)
xedges = np.delete(xedges, -1) + 0.5*(xedges[1] - xedges[0])
# gaussian smoothing
if smooth:
hist = filter.gaussian_filter(hist, sigma=0.75)
if interpolate:
f = interp.interp1d(xedges, hist, kind='cubic')
xedges = np.linspace(xedges.min(), xedges.max(), 10000)
hist = f(xedges)
# make plot
if label is not None:
ax.plot(xedges, hist, color=color, lw=1.5, label=label)
else:
ax.plot(xedges, hist, color=color, lw=1.5)
def getMax(samples, weights=None, range=None, bins=50):
"""
Make histogram of samples
"""
if range is None:
hist, xedges = np.histogram(samples, bins, normed=True)
else:
hist, xedges = np.histogram(samples, bins, normed=True, range=range)
xedges = np.delete(xedges, -1) + 0.5*(xedges[1] - xedges[0])
# gaussian smoothing
hist = filter.gaussian_filter(hist, sigma=0.75)
# interpolation
f = interp.interp1d(xedges, hist, kind='cubic')
xedges = np.linspace(xedges.min(), xedges.max(), 10000)
hist = f(xedges)
return xedges[np.argmax(hist)]
# make triangle plot of marginalized posterior distribution
def triplot(chain, color=True, weights=None, interpolate=False, smooth=True, \
labels=None, figsize=(11,8.5), title=None, inj=None):
"""
Make Triangle plot
"""
# rcParams settings
plt.rcParams['ytick.labelsize'] = 10.0
plt.rcParams['xtick.labelsize'] = 10.0
plt.rcParams['text.usetex'] = True
plt.rcParams['figure.figsize'] = figsize
# get number of parameters
ndim = chain.shape[1]
parameters = np.linspace(0,ndim-1,ndim)
f, axarr = plt.subplots(nrows=len(parameters), ncols=len(parameters),figsize=figsize)
for i in range(len(parameters)):
# for j in len(parameters[np.where(i <= parameters)]:
for j in range(len(parameters)):
ii = i
jj = len(parameters) - j - 1
xmajorLocator = matplotlib.ticker.MaxNLocator(nbins=4,prune='both')
ymajorLocator = matplotlib.ticker.MaxNLocator(nbins=4,prune='both')
if j <= len(parameters)-i-1:
axarr[jj][ii].xaxis.set_minor_locator(NullLocator())
axarr[jj][ii].yaxis.set_minor_locator(NullLocator())
axarr[jj][ii].xaxis.set_major_locator(NullLocator())
axarr[jj][ii].yaxis.set_major_locator(NullLocator())
axarr[jj][ii].xaxis.set_minor_formatter(NullFormatter())
axarr[jj][ii].yaxis.set_minor_formatter(NullFormatter())
axarr[jj][ii].xaxis.set_major_formatter(NullFormatter())
axarr[jj][ii].yaxis.set_major_formatter(NullFormatter())
xmajorFormatter = FormatStrFormatter('%g')
ymajorFormatter = FormatStrFormatter('%g')
if ii == jj:
# Make a 1D plot
makesubplot1d(axarr[ii][ii], chain[:,parameters[ii]], \
weights=weights, interpolate=interpolate, \
smooth=smooth)
axarr[ii][jj].set_ylim(ymin=0)
if inj is not None:
axarr[ii][ii].axvline(inj[ii], lw=2, color='k')
else:
# Make a 2D plot
makesubplot2d(axarr[jj][ii], chain[:,parameters[ii]], \
chain[:,parameters[jj]],color=color, weights=weights, \
smooth=smooth)
if inj is not None:
axarr[jj][ii].plot(inj[ii], inj[jj], 'x', color='k', markersize=12, \
mew=2, mec='k')
axarr[jj][ii].xaxis.set_major_locator(xmajorLocator)
axarr[jj][ii].yaxis.set_major_locator(ymajorLocator)
else:
axarr[jj][ii].set_visible(False)
#axarr[jj][ii].axis('off')
if jj == len(parameters)-1:
axarr[jj][ii].xaxis.set_major_formatter(xmajorFormatter)
if labels:
axarr[jj][ii].set_xlabel(labels[ii])
if ii == 0:
if jj == 0:
axarr[jj][ii].yaxis.set_major_locator(NullLocator())
#axarr[jj][ii].set_ylabel('Post.')
else:
axarr[jj][ii].yaxis.set_major_formatter(ymajorFormatter)
if labels:
axarr[jj][ii].set_ylabel(labels[jj])
# overall plot title
if title:
f.suptitle(title, fontsize=14, y=0.90)
# make plots closer together
f.subplots_adjust(hspace=0.1)
f.subplots_adjust(wspace=0.1)
def pol2cart(lon, lat):
"""
Utility function to convert longitude,latitude on a unit sphere to
cartesian co-ordinates.
"""
x = | np.cos(lat) | numpy.cos |
"""
A class for link functions. These are link functions mapping responses to latent
responses on the real line.
TODO: put all probit models into one class with inheritance.
Author:
<NAME>
Date:
"""
from __future__ import division
from MixtureOfExperts.utils import tmvtnorm
import numpy as np
from scipy.stats import norm
import logging
logger = logging.getLogger(__name__)
__all__ = ['orderedProbit']
class orderedProbit(object):
"""
A class for the ordered probit model for ordinal outputs.
"""
# public (accessible through @property decorators below)
# private
_expert = None # expert
_L = None # Number of (linearly spaced) blocks (outputs = 0, 1, ..., L-1)
_epsilon0 = None # Start of linear spacing
_epsilonLm1 = None # end of linear spacing
_epsilon = None # the edges
@property
def categories(self):
"""Return the discrete categories for the probit model"""
#print(np.hstack((self._epsilon[1:-1], self._L)))
return np.hstack((self._epsilon[1:-1], self._L))
def __init__(self, expert, L, minmax, name='ordinal probit'):
"""
Initialize the class.
:param expert: # expert
"""
self.__name__ = name
self._expert = expert
self._L = L
self._epsilon0 = minmax[0]
self._epsilonLm1 = minmax[1]
epsilon = np.linspace(self._epsilon0, self._epsilonLm1, self._L)
epsilon = np.insert(epsilon, np.shape(epsilon), np.inf)
self._epsilon = np.insert(epsilon, 0, -np.inf)
def __str__(self):
"""
Return a string representation of the object.
"""
s = "\nLINK FUNCTION: " + self.__name__
s += "\n\t epsilon: {0} \n".format(str(self._epsilon))
s += "\n\t L: {0}".format(str(self._L))
s += "\n\t categories: {0}".format(str(self.categories))
return s
def sample_latent_response(self, X, Z, Y, theta, samples=1, position=None):
"""
:param X Covariates
:param Z Feature cluster allocations
:param Y Raw features [0,1,2,...,Nunique-1]
:param theta: Matrix containing hyper-parameters for all current clusters.
:param samples: Number of latent response samples to draw for each datum, default 1.
:return: Latent samples (binary)
"""
Ylatent = np.zeros_like(Y)
for j in np.unique(Z):
thetaJ = theta[int(j), :] # Hyper-params of cluster j
xj = X[Z == j] # Get which pairs are in j
yj = Y[Z == j]
Nj = np.shape(xj)[0] # Size of cluster j
for feature in range(Y.shape[1]):
itershift = feature * self._expert._n_hyper_indGP # for adding hypers back
# Expert mean
if self._expert._process_mean is not False:
mean_function = self._expert.process_mean
if self._expert._process_mean == 'Linear':
mean_function.A = thetaJ[self._expert.index_mean + itershift, None]
logging.debug('Linear mean function. Setting A when sampling probit model.')
elif self._expert._process_mean == 'Constant':
mean_function.C = thetaJ[self._expert.index_mean + itershift, None]
logging.debug('Constant mean function. Setting C when sampling probit model.')
else:
raise ValueError('Bad mean function')
mean = mean_function.f(xj)[:, 0]
else:
mean = np.zeros((xj.shape[0], 1))[:, 0]
# Expert covariance
kernel = self._expert.kern
kernel.variance = thetaJ[self._expert.index_signal + itershift] # Add hyper-parameters
kernel.lengthscale = thetaJ[self._expert.index_lengthscale + itershift] # Add hyper-parameters
covariance = kernel.K(xj, xj) + (thetaJ[self._expert.index_nugget] * np.eye(xj.shape[0]))
# Sample latent response from truncated multivariate normal, if yji == 0 (-inf, 0)
lower = [self._epsilon[int(yj[i])] for i in range(Nj)]
upper = [self._epsilon[int(yj[i])+1] for i in range(Nj)]
if position is not None:
pos0 = position[Z==j]
else:
pos0 = None
try:
Ylatent[Z == j, feature] = tmvtnorm.tmvtnorm_sample(samples, mean, covariance, lower, upper,
position=pos0)
assert (np.all(~np.isinf(Ylatent))), 'Bad latent Y for features allocated to cluster {0}'.format(j)
assert (np.all(~ | np.isnan(Ylatent) | numpy.isnan |
import numpy as np
import random
import time
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
# 二维目标函数
def Rastrigrin(x: np.ndarray):
return 20 + x[0] ** 2 - 10 * np.cos(2 * np.pi * x[0]) + x[1] ** 2 - 10 * np.cos(2 * np.pi * x[1])
def obj_fun(x: np.ndarray):
return abs(0.2 * x[0]) + 10 * np.sin(5 * x[0]) + 7 * np.cos(4 * x[0])
def obj_fun2(x: np.ndarray):
return (1 - x[0]) ** 2 + 100 * (x[0] - x[1] ** 2) ** 2
def Easom(x: np.ndarray):
return -1 * np.cos(x[0]) * np.cos(x[1]) * np.exp(-1 * (x[0] - np.pi) ** 2 - (x[0] - np.pi) ** 2)
def obj_fun_test1(x: np.ndarray):
return -1 * (10 + np.sin(1 / x) / ((x - 0.16) ** 2 + 0.1))
def Bohachevsky(x: np.ndarray):
return x[0] ** 2 + x[1] ** 2 - 0.3 * np.cos(3 * np.pi * x[0]) + 0.3 * np.cos(4 * np.pi * x[1]) + 0.3
def Schaffersf6(x: np.ndarray):
return 0.5 + (np.sin(x[0] ** 2 + x[1] ** 2) ** 2 - 0.5) / (1 + 0.001 * (x[0] ** 2 + x[1] ** 2) ** 2) ** 2
def Shubert(x: np.ndarray):
s1 = 0
s2 = 0
for i in range(1, 6):
s1 += i * np.cos((i + 1) * x[0] + i)
s2 += i * np.cos((i + 1) * x[1] + i)
return s1 + s2
# 一维目标函数
def fun1(x):
return -(x + 10 * np.sin(5 * x) + 7 * np.cos(4 * x))
class GSA():
def __init__(self,T_max=400,T_min=10,pop=10,new_pop=10,cur_g=1,p=0.9,tour_n=10,func=fun1,shape=1, **kwargs):
"""
@param T_max: 最大温度
@param T_min: 最小温度
@param pop: 种群的个体数量
@param new_pop: 每个个体生成的新解的数量
@param cur_g: 当前迭代代数
@param p: 锦标赛中的概率p
@param tour_n: 一次锦标赛的选手数
@param func: 函数
@param x_best: 最优点
@param y_best: 最优点的函数值
@param shape: x的维度
"""
self.T_max = T_max
self.T = T_max # 当前温度
self.T_min = T_min
self.pop = pop
self.new_pop = new_pop
self.cur_g = cur_g
self.p = p
self.tour_n = tour_n
self.func = func
self.shape = shape
self.x_best = [0]*shape
self.y_best = 0
self.x_history = []
self.T_history = [self.T]
self.m, self.n, self.quench = kwargs.get('m', 1), kwargs.get('n', 1), kwargs.get('quench', 1)
self.lower, self.upper = kwargs.get('lower', -10), kwargs.get('upper', 10)
self.c = self.m * np.exp(-self.n * self.quench)
def xrange(self, xmin: np.ndarray, xmax: np.ndarray): # 输入x范围,尚未考虑多维情形
self.xmin = xmin
print('x的下界是'+str(xmin))
self.xmax = xmax
print('x的上界是'+str(xmax))
def init_pop(self):
pop1 = []
x_init = [10,10]
for i in range(self.pop):
pop1.append(self.get_new(x_init))
return pop1
def judge(self, df):
if df < 0: # 新解 < 原解 ---> 直接接受
return True
else:
pp = np.exp(-1 * (df / self.T)) # 新解 > 原解 ---> 计算接受概率p
rand_p = random.random() # 产生 rand_p ~ Uniform(0, 1)
if pp > rand_p:
return True
else:
return False
def get_new(self,x):
u = np.random.uniform(-1, 1, size=self.shape) # 产生均匀分布的[Xi,……] u ~ Uniform(0, 1, size = d)
x_new = x + 20 * np.sign(u) * self.T * ((1 + 1.0 / self.T) ** np.abs(u) - 1.0)
return x_new
def generate(self,old_pop):
x_new = []
for i in range(len(old_pop)):
while len(x_new) < (i+1)*self.new_pop:
xnew = self.get_new(old_pop[i])
# Metropolis准则
df = self.func(xnew) - self.func(old_pop[i]) # 计算函数值差值
if self.judge(df):
x_new.append(xnew)
return x_new
def tournament(self,x):
"""
计算每个点的适应值,选择N个点作为生存集(GA)锦标赛。
如果适应值最大的点被舍弃了,那么就随机舍弃一个点,又把它加回来。
处理为先把适应值最大的点加进去
"""
survive = []
x_cur = x
y_cur = [self.func(xx) for xx in x]
best_index = y_cur.index(min(y_cur))
self.x_best = x_cur[best_index]
self.y_best = y_cur[best_index]
survive.append(self.x_best) # 先把最好的点放进去
del(x_cur[best_index]) # 把最好的点删了,其他的进行锦标赛,最后选出self.pop-1个点
# 进行选择(锦标赛方法)
# choose k (the tournament size) individuals from the population at random
# choose the best individual from the tournament with probability p
# choose the second best individual with probability p*(1-p)
# choose the third best individual with probability p*((1-p)^2)
# and so on
fitness = [self.y_best - self.func(xx) for xx in x_cur]
num_individuals = len(fitness)
indices = list(range(len(fitness)))
selected_indices = []
selection_size = self.pop - 1
while (len(selected_indices) < selection_size):
np.random.shuffle(indices)
idx_tournament = indices[0:self.tour_n]
fit = []
for i in range(len(idx_tournament)):
fit.append(fitness[idx_tournament[i]])
maxindex = fit.index(max(fit))
winner = idx_tournament[maxindex]
r = random.random()
if r < self.p:
selected_indices.append(winner)
selected_indices = list(set(selected_indices))
for i in range(len(selected_indices)):
survive.append(x_cur[selected_indices[i]])
return survive
def cool(self):
self.T = self.T * 0.7
self.T_history.append(self.T)
def disp(self):
if self.shape == 1:
pass
elif self.shape == 2:
fig = plt.figure()
ax = plt.axes(projection='3d')
x0_list = [[0 for col in range(len(self.x_history[0]))] for row in range(len(self.x_history))]
x1_list = [[0 for col in range(len(self.x_history[0]))] for row in range(len(self.x_history))]
for i in range(len(self.x_history)):
for j in range(len(self.x_history[i])):
x0_list[i][j] = self.x_history[i][j][0]
x1_list[i][j] = self.x_history[i][j][1]
x_min, x_max = min(min(row) for row in x0_list), max(max(row) for row in x0_list)
y_min, y_max = min(min(row) for row in x1_list), max(max(row) for row in x1_list)
x = | np.arange(x_min, x_max, 0.01) | numpy.arange |
# Authors: <NAME> <<EMAIL>>
"""
----------------------------------------------------------------------
--- jumeg.decompose.fourier_ica --------------------------------------
----------------------------------------------------------------------
authors:
<NAME>
<NAME>
email: <EMAIL>
Change history:
30.10.2019: bug fix: gap, mibs, and bic now return rank not index
17.10.2019: added rank estimation using PCA, FA in a cross-validation scenario
25.09.2019: separated functions that were combined
----------------------------------------------------------------------
The implementation of the following methods for automated
selection of the optimal data dimensionality is based on
following publications
----------------------------------------------------------------------
<NAME>, & <NAME>, 2002. "Adaptive Blind Signal and Image
Processing - Learning Algorithms and Applications,"
<NAME> & Sons
<NAME>, 'Automatic choice of dimensionality
for PCA', MIT Press (2001)
<NAME>, <NAME>, <NAME> and <NAME>, "Detecting the number
of clusters in n-way probabilistic clustering," IEEE Trans.
Pattern Anal. Mach. Intell., vol. 32, pp. 2006-2021, Nov, 2010.
<NAME>, and <NAME>, "Detection of signals by
information-theoretic criteria," IEEE Trans. on Acoustics,
vol. 33, pp. 387-392, 1985.
----------------------------------------------------------------------
Overview
----------------------------------------------------------------------
All methods are based on the eigenvalues you get from the
eigenvalue decomposition of the data covariance matrix.
All methods try to estimate the optimal data dimension:
- aic(): Akaike's information criterion
- bic(): Bayesian Information Criteria
- mibs(): MInka Bayesian model Selection
- mdl(): Minimum description length
- gap(): probabilistic clustering
- explVar(): explained variance
----------------------------------------------------------------------
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
import numpy as np
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.model_selection import cross_val_score
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# AIC - Akaike's information criterion
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def aic(eigenvalues):
"""
Routine to estimate the model order using
the Akaike's information criterion (AIC)
For detailed information see:
<NAME>, and <NAME>,
"Detection of signals by information-theoretic
criteria," IEEE Trans. on Acoustics,
vol. 33, pp. 387-392, 1985.
Parameters
----------
eigenvalues: eigenvalues received when applying
PCA. Note eigenvalues must be sorted decreasing
Returns
-------
aic_dim: optimal data dimension based on the AIC
method
"""
# ------------------------------------------
# check input parameter
# ------------------------------------------
neig = len(eigenvalues)
aic = np.ones((neig))
# ------------------------------------------
# loop over all eigenvalues to estimate AIC
# ------------------------------------------
for idx in range(1, neig):
log_rho = np.mean(np.log(eigenvalues[idx:])) - np.log(np.mean(eigenvalues[idx:]))
aic[idx] = -2.0 * neig * (neig - idx + 1) * log_rho + 2.0 * (idx + 1) * (2.0 * neig - idx + 1)
# ------------------------------------------
# get rank of minimum AIC value
# ------------------------------------------
aic_dim = aic[1:].argmin() + 1
return aic_dim
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# BIC - Bayesian Information Criteria
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def bic(eigenvalues, n_samples):
"""
Routine to estimate the Baysian Information Criteria
Parameters
----------
eigenvalues: eigenvalues received when applying
PCA. Note eigenvalues must be sorted decreasing
n_samples: number of samples/ time slices used to
estimate the covariance matrix for PCA
Returns
-------
bic: optimal data dimension
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from math import gamma
# ------------------------------------------
# set variables to be confirm with notation
# in Chichocki and Amari, 'Adaptive Blind
# Signal And Image Processing', (2006), p.93
# ------------------------------------------
N = n_samples
m = len(eigenvalues)
bic_val = np.zeros(m)
log_pi = np.log(np.pi)
log_2pi = np.log(2.0 * np.pi)
log_N = np.log(N)
# ------------------------------------------
# loop over all possible ranks
# ------------------------------------------
for n in range(1, m):
# ------------------------------------------
# define some variables for BIC
#------------------------------------------
sigma = np.mean(eigenvalues[n:])
d_n = m*n - 0.5*n*(n+1)
p_n = -n * np.log(2.0)
A_n = 0.0
prod_lambda = np.sum(np.log(eigenvalues[:n]))
eigenvalues_tmp = eigenvalues.copy()
eigenvalues_tmp[n:] = sigma
# ------------------------------------------
# estimate p_n and A_n
# ------------------------------------------
# loop over n
for idx in range(n):
p_n += np.log(gamma(0.5*(m-idx))) - (0.5*(m-idx) * log_pi)
for j in range(idx+1, m):
A_n += np.log(eigenvalues_tmp[idx] - eigenvalues_tmp[j]) +\
np.log(eigenvalues_tmp[j]) + np.log(eigenvalues_tmp[idx]) + \
log_N + np.log(eigenvalues[idx]-eigenvalues[j])
# ------------------------------------------
# estimate the BIC value
# ------------------------------------------
bic_val[n] = - 0.5 * N * prod_lambda - N * (m-n) * np.log(sigma) - 0.5*(d_n+n) * log_N
# ------------------------------------------
# get rank of maximum BIC value
# ------------------------------------------
max_bic = bic_val[1:].argmax() + 1
return max_bic
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# MIBS - MInka Bayesian model Selection
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def mibs(eigenvalues, n_samples):
"""
Routine to estimate the MInka Bayesian model
Selection (MIBS) value as introduced in:
<NAME>, 'Automatic choice of dimensionality
for PCA', MIT Press (2001)
Note: For numerical stability here ln(MIBS) is
estimated instead of MIBS
Parameters
----------
eigenvalues: eigenvalues received when applying
PCA. Note eigenvalues must be sorted decreasing
n_samples: number of samples/ time slices used to
estimate the covariance matrix for PCA
Returns
-------
mibs: optimal data dimension
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from math import gamma
# ------------------------------------------
# set variables to be confirm with notation
# in Chichocki and Amari, 'Adaptive Blind
# Signal And Image Processing', (2006), p.93
# ------------------------------------------
N = n_samples
m = len(eigenvalues)
mibs_val = np.zeros(m)
log_pi = np.log(np.pi)
log_2pi = np.log(2.0 * np.pi)
log_N = np.log(N)
# ------------------------------------------
# loop over all possible ranks
# ------------------------------------------
for n in range(1, m):
# ------------------------------------------
# define some variables for MIBS
#------------------------------------------
sigma = np.mean(eigenvalues[n:])
d_n = m*n - 0.5*n*(n+1)
p_n = -n * np.log(2.0)
A_n = 0.0
prod_lambda = np.sum(np.log(eigenvalues[:n]))
eigenvalues_tmp = eigenvalues.copy()
eigenvalues_tmp[n:] = sigma
# ------------------------------------------
# estimate p_n and A_n
# ------------------------------------------
# loop over n
for idx in range(n):
p_n += np.log(gamma(0.5*(m-idx))) - (0.5*(m-idx) * log_pi)
for j in range(idx+1, m):
A_n += np.log(eigenvalues_tmp[idx] - eigenvalues_tmp[j]) +\
np.log(eigenvalues_tmp[j]) + np.log(eigenvalues_tmp[idx]) + \
log_N + np.log(eigenvalues[idx]-eigenvalues[j])
# ------------------------------------------
# estimation of MIBS
# ------------------------------------------
mibs_val[n] = p_n - 0.5 * N * prod_lambda - N * (m-n) * np.log(sigma) - \
0.5 * A_n + 0.5*(d_n+n) * log_2pi - 0.5 * n * log_N
# ------------------------------------------
# get rank of maximum MIBS value
# ------------------------------------------
max_mibs = mibs_val[1:].argmax() + 1
return max_mibs
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# MDL - Minimum description length
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def mdl(eigenvalues):
"""
Routine to estimate the model order using the
minimum description length (MDL) criterion.
For detailed information see:
<NAME>, and <NAME>,
"Detection of signals by information-theoretic
criteria," IEEE Trans. on Acoustics,
vol. 33, pp. 387-392, 1985.
Parameters
----------
eigenvalues: eigenvalues received when applying
PCA. Note eigenvalues must be sorted decreasing
Returns
-------
mdl_dim: optimal data dimension based on the MDL
method
"""
# ------------------------------------------
# check input parameter
# ------------------------------------------
neig = len(eigenvalues)
mdl = np.ones((neig))
# ------------------------------------------
# loop over all eigenvalues to estimate MDL
# ------------------------------------------
for idx in range(1, neig):
log_rho = np.mean(np.log(eigenvalues[idx:])) - np.log( | np.mean(eigenvalues[idx:]) | numpy.mean |
# Copyright 2022 <NAME>, MIT license
"""
Module with all the definitions (routines) of general use
of the multitaper routines.
Contains:
* set_xint - setup Ierly's quadrature
* xint - Quadrature by Ierley's method of Chebychev sampling.
* dpss_ev - Recalculate the DPSS eigenvalues using Quadrature
* dpss - calculate the DPSS for given NW, NPTS
* eigenspec - calculate eigenspectra using DPSS sequences.
* adaptspec - calculate adaptively weighted power spectrum
* jackspec - calculate adaptively weighted jackknifed 95% confidence limits
* qiinv - calculate the Stationary Inverse Theory Spectrum.
* ftest - performs the F-test for a line component
* yk_reshape - reshape eigenft's around significant spectral lines
* wt2dof - calculate the d.o.f. of the multitaper
* df_spec - Dual frequency spectrum, using two MTSPEC classes to compute.
* sft - the slow Fourier transform
* squick - for sine multitaper, constructs average multitaper
* squick2 - for sine multitaper, constructs average multitaper, 2 signals
* sadapt - for sine multitaper, adaptive estimation of # of tapers
* sadapt2 - for sine multitaper, same but for 2 signals
* north - for sine multitaper, derivatives of spectrum
* curb - for sine multitaper, clips # of tapers
* get_data - download data and load into numpy array
|
"""
#-----------------------------------------------------
# Import main libraries and modules
#-----------------------------------------------------
import numpy as np
import scipy
from scipy import signal
import scipy.linalg as linalg
import scipy.interpolate as interp
import scipy.optimize as optim
import os
#-------------------------------------------------------------------------
# SET_XINT - Set up weights and sample points for Ierly quadrature
#-------------------------------------------------------------------------
def set_xint(ising):
"""
Sets up weights and sample points for Ierley quadrature,
Slightly changed from original code, to avoid using common
blocks. Also avoided using some go to statements, not needed.
*Parameters*
ising : integer
ising=1
integrand is analytic in closed interval
ising=2
integrand may have bounded singularities
at end points
*Returns*
w : ndarray (nomx,lomx+1)
weights
x : sample points (lomx+1)
sample points
lomx=number of samples = 2**nomx
*Modified*
November 2004 (<NAME>)
|
"""
nomx = 8
lomx = 256
w = np.zeros((nomx,lomx+1),dtype=float)
x = np.zeros(lomx+1,dtype=float)
pi = np.pi
n = 2
for index in range(1,nomx+1):
n = 2*n
nx = n-2
if (index == 1):
nx=4
pin = pi/float(n)
nhalf = int(n/2)
for i in range(nhalf+1):
t = float(i)*pin
si = 0.0
for k in range(0,nx+1,2):
ck=4.0
if (k == 0):
ck=2.0
rk=float(k)
si=si+ck*np.cos(rk*t)/(1.0-rk*rk)
if (i==0 or i==nhalf):
si=0.5*si
t = np.cos(t)
if (ising == 2):
t=0.5*pi*(1.0 +t)
si=si*0.5 * np.sin(t)*pi
t=np.cos(t)
x[i] = 0.5 *(1.0 +t)
w[index-1, i] = 0.5 *si/float(n)
elif (ising == 1):
x[i] = 0.5 *(1.0 +t)
w[index-1,i] = 0.5 *si/float(n)
# end i loop
# end index loop
return w, x
#-------------------------------------------------------------------------
# XINT - Numerical integration in the Fourier Domain using Ierly's method
#-------------------------------------------------------------------------
def xint(a,b,tol,vn,npts):
"""
Quadrature by Ierley's method of Chebychev sampling.
*Parameters*
a : float
upper limit of integration
b : float
upper limit of integration
tol : float
tolerance for integration
vn : ndarray
taper or Slepian sequence to convert-integrate
npts : int
number of points of tapers
*Notes*
This is a slight variation of Gleen Ierly's code. What was
mainly done, was to avoid use of common blocks, defining all
variables and performing the numerical integration inside
(previously done by function pssevf).
Exponential convergence rate for analytic functions! Much faster
than Romberg; competitive with Gauss integration, without awkward
weights.
Integrates the function dpsw on (a, b) to absolute
accuracy tol > 0.
the function in time is given by rpar with ipar points
I removed the optional printing routine part of the code,
to make it easier to read. I also moved both nval, etol
as normal variables inside the routine.
nval = number of function calls made by routine
etol = approximate magnitude of the error of the result
NB: function set_xint is called once before xint to
provide quadrature samples and weights.
I also altered the subroutine call, to get the weights
and not save them in a common block, but get them
directly back.
lomx=number of samples = 2**nomx
*Modified*
November 2004 (<NAME>)
*Calls*
utils.set_xint
|
"""
pi = np.pi
tpi = 2.0 * pi
nomx = 8
lomx = 256
ising = 1
w, x = set_xint(ising)
#---------------------------
# Check tol
#---------------------------
if (tol <= 0.0):
raise ValueError("In xint tol must be > 0 ", tol)
est = np.zeros(nomx,dtype=float)
fv = np.zeros(lomx+1,dtype=float)
n = 1
im = 2**(nomx+1)
for index in range(1,nomx+1):
n = 2*n
im = int(im/2)
im2 = int(im/2)
if (index <= 1):
for i in range(n+1):
# Bottom
y = a+(b-a)*x[im2*i]
om = tpi*y
ct, st = sft(vn,om)
f1 = ct*ct+st*st
# Top
y = b-(b-a)*x[im2*i]
om = tpi*y
ct, st = sft(vn,om)
f2 = ct*ct+st*st
fv[im2*i] = f1 + f2
# end i loop, index 1,
else:
for i in range(1,n,2):
# Bottom
y = a+(b-a)*x[im2*i]
om = tpi*y
ct,st = sft(vn,om)
f1 = ct*ct+st*st
# Top
y = b-(b-a)*x[im2*i]
om = tpi*y
ct, st = sft(vn,om)
f2 = ct*ct+st*st
fv[im2*i]= f1 + f2
# end i loop, index > 1
# end index 1, or more
x_int = 0.00
for i in range(n+1):
x_int = x_int + w[index-1, i]*fv[im2*i]
x_int = x_int*(b-a)
est[index-1] = x_int
etol = 0.0
#
# Check for convergence.
#
nval = 2*n
if (index == 2):
if ( est[index-1] == est[index-2] ):
return x_int
elif (index > 2):
sq = (est[index-1]-est[index-2])**2
bot = (0.01*sq + np.abs(est[index-1]-est[index-2]) )
if (sq == 0.0):
etol = 0.0
else:
etol = sq/bot
if (etol <= tol):
return x_int
# end check convergence
# end index loop
print('******** WARNING *********')
print(' xint unable to provide requested accuracy')
return x_int
#-------------------------------------------------------------------------
# end XINT
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# DPSS_EV - Eigenvalues of the DPSS sequences
#-------------------------------------------------------------------------
def dpss_ev(vn,w,atol=1e-14):
"""
Recalculate the DPSS eigenvalues, performing the
integration in the -W:W range, using Quadrature.
computes eigenvalues for the discrete prolate spheroidal sequences
in efn by integration of the corresponding squared discrete prolate
spheroidal wavefunctions over the inner domain. Due to symmetry, we
perform integration from zero to w.
We use Chebychev quadrature for the numerical integration.
*Parameters*
vn : ndarray [npts,kspec]
DPSS to calculate eigenvalues
w : float
the bandwidth (= time-bandwidth product/ndata)
atol : float, optional
absolute error tolerance for the integration. this should
be set to 10**-n, where n is the number of significant figures
that can be be represented on the machine.
default = 1e-14
*Returns*
lamb : ndarray [kspec]
vector of length vn.shape[1], contains the eigenvalues
*Modified*
November 2004 (<NAME>)
*Calls*
xint
|
"""
npts = np.shape(vn)[0]
kspec = np.shape(vn)[1]
lamb = np.zeros(kspec)
for k in range(kspec):
result = xint(0.0,w,atol,vn[:,k],npts)
lamb[k] = 2.0*result
return lamb
#-------------------------------------------------------------------------
# end DPSS_EV
#-------------------------------------------------------------------------
def dpss(npts,nw,kspec=None):
"""
Calculation of the Discrete Prolate Spheroidal Sequences, and
the correspondent eigenvalues.
- <NAME>. 1978 Bell Sys Tech J v57 n5 1371-1430
- <NAME>. 1982 Proc IEEE v70 n9 1055-1096
**Parameters**
npts : int
the number of points in the series
nw : float
the time-bandwidth product (number of Rayleigh bins)
kspec : int
Optional, the desired number of tapers default = 2*nw-1
**Returns**
v : ndarray (npts,kspec)
the eigenvectors (tapers) are returned in v[npts,nev]
lamb : ndarray (kspec)
the eigenvalues of the v's
**Notes**
In SCIPY the codes are already available to calculate the DPSS.
Eigenvalues are calculated using Chebeshev Quadrature.
Code also performs interpolation if NPTS>1e5
Also, define DPSS to be positive-standard, meaning vn's always
start positive, whether symmetric or not.
**Modified**
December 2020
February 2022 - Changed a for loop for a direct np.sum().
**Calls**
scipy.signal.windows.dpss
dpss_ev
|
"""
#-----------------------------------------------------
# Check number of tapers
#-----------------------------------------------------
W = nw/float(npts)
if (kspec is None):
kspec = np.int(np.round(2*nw-1))
#-----------------------------------------------------
# Get the DPSS, using SCIPY
# Interpolate if necesary
#-----------------------------------------------------
if (npts < 1e5):
v,lamb2 = signal.windows.dpss(npts, nw, Kmax=kspec,
sym=True,norm=2,
return_ratios=True)
v = v.transpose()
else:
lsize = np.floor(np.log10(npts))
nint = int((10**lsize))
print('DPSS using interpolation', npts, nint)
v2int = signal.windows.dpss(nint, nw, Kmax=kspec,
sym=True,norm=2)
v2int = v2int.transpose()
v = np.zeros((npts,kspec),dtype=float)
x = np.arange(nint)
y = np.linspace(0,nint-1,npts,endpoint=True)
for k in range(kspec):
I = interp.interp1d(x, v2int[:,k], kind='quadratic')
#'quadratic')
v[:,k] = I(y)
v[:,k] = v[:,k]*np.sqrt(float(nint)/float(npts))
#-----------------------------------------------------
# Normalize functions
#-----------------------------------------------------
vnorm = np.sqrt(np.sum(v**2,axis=0))
v = v/vnorm[None,:]
# Replaced for loop
#for i in range(kspec):
# vnorm = np.sqrt(np.sum(v[:,i]**2))
# v[:,i] = v[:,i]/vnorm
#-----------------------------------------------------
# Get positive standard
#-----------------------------------------------------
nx = npts%2
if (nx==1):
lh = int((npts+1)/2)
else:
lh = int(npts/2)
for i in range(kspec):
if (v[lh,i] < 0.0):
v[:,i] = -v[:,i]
lamb = dpss_ev(v,W)
return v, lamb
#-------------------------------------------------------------------------
# end DPSS
#-------------------------------------------------------------------------
def dpss2(npts,nw,nev=None):
"""
This is a try to compute the DPSS using the original Thomson
approach. It reduces the problem to half the size and inverts
independently for the even and odd functions.
This is work in progress and not used.
Modified from F90 library:
<NAME>
December 2020
The tapers are the eigenvectors of the tridiagonal matrix sigma(i,j)
[see Slepian(1978) eq 14 and 25.] They are also the eigenvectors of
the Toeplitz matrix eq. 18. We solve the tridiagonal system in
scipy.linalg.eigh_tridiagonal
(real symmetric tridiagonal solver) for the tapers and use
them in the integral equation in the frequency domain
(dpss_ev subroutine) to get the eigenvalues more accurately,
by performing Chebychev Gaussian Quadrature following Thomson's codes.
First, we create the main and off-diagonal vectors of the
tridiagonal matrix. We compute separetely the even and odd tapers,
by calling eigh_tridiagonal from SCIPY.
We, refine the eigenvalues, by computing the inner bandwidth
energy in the frequency domain (eq. 2.6 Thomson). Also the "leakage"
(1 - eigenvalue) is estimated, independenly if necesary.
In SCIPY the codea are already available to calculate the DPSS.
Eigenvalues are calculated using Chebeshev Quadrature.
Code also performs interpolation if NPTS>1e5
Also, define DPSS to be positive-standard, meaning vn's always
start positive, whether symmetric or not.
**Calls**
To do
|
"""
#-----------------------------------------------------
# Check number of tapers
#-----------------------------------------------------
bw = nw/float(npts)
if (nev is None):
nev = np.int(np.round(2*nw-1))
#-----------------------------------------------------
# Check size of vectors and half lengths
#-----------------------------------------------------
nx = npts%2
if (nx==1):
lh = int((npts+1)/2)
else:
lh = int(npts/2)
nodd = int ((nev-(nev%2))/2)
neven = nev - nodd
com = np.cos(2.0*np.pi*bw)
hn = float(npts-1.0)/2.0
r2 = np.sqrt(2.0)
# Initiate eigenvalues and eigenvectors
v = np.zeros((npts,nev),dtype=float)
theta = np.zeros(nev,dtype=float)
#---------------------------------------------
# Do even tapers
#---------------------------------------------
fv1 = np.zeros(lh,dtype=float)
fv2 = np.zeros(lh,dtype=float)
for i in range(lh):
n = i
fv1[i] = com*(hn - float(n))**2.0
fv2[i] = float(n*(npts-n))/2.0
if (nx == 0):
fv1[lh-1] = com*(hn-float(lh-1))**2.0 + float(lh*(npts-lh))/2.0
else:
fv2[lh-1] = r2*fv2[lh-1]
fv3 = fv2[1:lh]
eigval,v2 = linalg.eigh_tridiagonal(fv1, fv2[1:lh],
select='i',select_range=(lh-neven,lh-1))
if (nx==1):
for k in range(neven):
v[lh,k] = v[lh,k]*r2
for k in range(neven):
kr = k
k2 = 2*k
theta[k2] = eigval[kr]
nr = npts-1
for i in range(lh):
v[i,k2] = v2[i,kr]
v[nr,k2] = v2[i,kr]
nr=nr-1
#---------------------------------------------
# Do odd tapers
#---------------------------------------------
fv1 = np.zeros(lh,dtype=float)
fv2 = np.zeros(lh,dtype=float)
if (nodd > 0):
for i in range(lh):
n = i
fv1[i] = com*(hn - float(n))**2
fv2[i] = float(n*(npts-n))/2.0
if (nx == 0):
fv1[lh-1] = com*(hn-float(lh-1))**2 - float(lh*(npts-lh))/2.0
eigval,v2 = linalg.eigh_tridiagonal(fv1, fv2[1:lh],
select='i',select_range=(lh-nodd,lh-1))
for k in range(nodd):
kr = k
k2 = 2*k+1
theta[k2] = eigval[kr]
nr = npts-1
for i in range(lh):
v[i,k2] = v2[i,kr]
v[nr,k2] = -v2[i,kr]
nr=nr-1
#---------------------------------------
# Normalize the eigenfunction
# and positive standard
#---------------------------------------
for i in range(nev):
vnorm = np.sqrt(np.sum(v[:,i]**2))
v[:,i] = v[:,i]/vnorm
if (v[lh,i]<0.0):
v[:,i] = -v[:,i]
v = np.flip(v,axis=1)
lamb = dpss_ev(v,bw)
return v, lamb
#-------------------------------------------------------------------------
# end DPSS - my version
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Eigenspec
#-------------------------------------------------------------------------
def eigenspec(x,vn,lamb,nfft):
"""
Calculate eigenspectra using DPSS sequences.
Gets yk's from Thomson (1982).
**Parameters**
x : ndarray [npts,0]
real vector with the time series
vn : ndarray [npts,kspec]
the different tapers computed in dpss
lambda : ndarray [kspec]
the eigenvalues of the tapers vn
nfft : int
number of frequency points (inc. positive
and negative frequencies)
**Returns**
yk : complex ndarray [kspec,nfft]
complex array with kspec fft's of tapered
data. Regardless of real/complex input data
all frequencies are stored. Good for coherence,
deconvolution, etc.
sk : ndarray [kspec,nfft]
real array with kspec eigenspectra
**Modified**
February 2022. Changed a for loop for xtap
<NAME>, November 2004
**Notes**
Computes eigen-ft's by windowing real data with dpss and taking ffts
Note that fft is unnormalized and window is such that its sum of
squares is one, so that psd=yk**2.
The fft's are computed using SCIPY FFT codes, and parallel FFT can
potentially speed up the calculation. Up to KSPEC works are sent.
The yk's are saved to get phase information. Note that tapers are
applied to the original data (npts long) and the FFT is zero padded
up to NFFT points.
**Calls**
scipy.fft.fft
|
"""
kspec = np.shape(vn)[1]
npts = np.shape(x)[0]
if (nfft < npts):
raise ValueError("NFFT must be larger than NPTS ", npts, nfft)
k2 = vn.shape[1]
if (kspec > k2):
raise ValueError("DPSS dimensions don't agree ", kspec, k2, ' tapers')
#-----------------------------------------------------------------
# Define matrices to be used
#-----------------------------------------------------------------
x2 = np.tile(x,(1,kspec))
xtap = vn*x2
# xtap = np.zeros((npts,kspec), dtype=float)
# for i in range(kspec):
# xtap[:,i] = vn[:,i]*x[:,0]
# Get eigenspec Yk's and Sk's
yk = scipy.fft.fft(xtap,axis=0,n=nfft,workers=kspec)
sk = np.abs(yk)**2
return yk, sk
#-------------------------------------------------------------------------
# end Eigenspec
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Adaptspec
#-------------------------------------------------------------------------
def adaptspec(yk,sk,lamb,iadapt=0):
"""
Calculate adaptively weighted power spectrum
Options for non-adaptive estimates are posible, with optional parameter
iadapt, using average of sk's or weighted by eigenvalue.
**Parameters**
yk : complex ndarray [nfft,kspec]
complex array of kspec eigencoefficients
sk : ndarray [nfft,kspec]
array containing kspe power spectra
lamb : ndarray [kspec]
eigenvalues of tapers
iadapt : int
defines methos to use, default = 0
0 - adaptive multitaper
1 - unweighted, wt =1 for all tapers
2 - wt by the eigenvalue of DPSS
**Returns**
spec : ndarray [nfft]
real vector containing adaptively weighted spectrum
se : ndarray [nfft]
real vector containing the number of degrees of freedom
for the spectral estimate at each frequency.
wt : ndarray [nfft,kspec]
real array containing the ne weights for kspec
eigenspectra normalized so that if there is no bias, the
weights are unity.
**Modified**
<NAME>, Aug 2006
Corrected the estimation of the dofs se (sum of squares of wt is 1.0)
maximum wt = 1
<NAME>, October 2007
Added the an additional subroutine noadaptspec to calculate a simple non-adaptive multitaper spectrum.
This can be used in transfer functions and deconvolution,
where adaptive methods might not be necesary.
February 2022. Now calculating adapt weights without for loop.
**Calls**
nothing
|
"""
mloop = 1000
nfft = np.shape(yk)[0]
kspec = np.shape(yk)[1]
lamb1 = 1.0-lamb
#----------------------------------------------------
# Simple average, not adaptive. Weight=1
# iadapt=1
#----------------------------------------------------
if (iadapt==1):
wt = np.ones((nfft,kspec), dtype=float)
se = np.zeros((nfft,1), dtype=float)
sbar = np.zeros((nfft,1), dtype=float)
sbar[:,0] = np.sum(sk,axis=1)/ float(kspec)
se = se + 2.0 * float(kspec)
spec = sbar
return spec, se, wt
#----------------------------------------------------
# Weight by eigenvalue of Slepian functions
# iadapt=2
#----------------------------------------------------
if (iadapt==2):
wt = np.zeros((nfft,kspec), dtype=float)
for k in range(kspec):
wt[:,k] = lamb[k]
skw[:,k] = wt[:,k]**2 * sk[:,k]
wtsum = np.sum(wt**2,axis=1)
skwsum = np.sum(skw,axis=1)
sbar = skwsum / wtsum
spec = sbar[:,None]
#------------------------------------------------------------
# Number of Degrees of freedom
#------------------------------------------------------------
se = wt2dof(wt)
return spec, se, wt
# skw = np.zeros((nfft,kspec), dtype=float)
# wt = np.zeros((nfft,kspec), dtype=float)
#----------------------------------------
# Freq sampling (assume unit sampling)
#----------------------------------------
df = 1.0/float(nfft-1)
#----------------------------------------
# Variance of Sk's and avg variance
#----------------------------------------
varsk = np.sum(sk,axis=0)*df
dvar = np.mean(varsk)
bk = dvar * lamb1 # Eq 5.1b Thomson
sqlamb = np.sqrt(lamb)
#-------------------------------------------------
# Iterate to find optimal spectrum
#-------------------------------------------------
rerr = 9.5e-7 # Value used in F90 codes check
sbar = (sk[:,0] + sk[:,1])/2.0
spec = sbar[:,None]
for i in range(mloop):
slast = np.copy(sbar)
# for k in range(kspec):
# wt[:,k] = sqlamb[k]*sbar /(lamb[k]*sbar + bk[k])
# wt[:,k] = np.minimum(wt[:,k],1.0)
# skw[:,k] = wt[:,k]**2 * sk[:,k]
#
# wtsum = np.sum(wt**2,axis=1)
# skwsum = np.sum(skw,axis=1)
# sbar = skwsum / wtsum
wt1 = sqlamb[None,:]*sbar[:,None]
wt2 = (lamb[None,:]*sbar[:,None]+bk[None,:])
wt = np.minimum(wt1/wt2,1.0)
skw = wt**2 * sk
wtsum = np.sum(wt**2,axis=1)
skwsum = np.sum(skw,axis=1)
sbar = skwsum / wtsum
oerr = np.max(np.abs((sbar-slast)/(sbar+slast)))
if (i==mloop):
spec = sbar[:,None]
print('adaptspec did not converge, rerr = ',oerr, rerr)
break
if (oerr > rerr):
continue
spec = sbar[:,None]
break
spec = sbar[:,None]
#---------
#------------------------------------------------------------
# Number of Degrees of freedom
#------------------------------------------------------------
se = wt2dof(wt)
return spec, se, wt
#-------------------------------------------------------------------------
# end adaptspec
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# jackspec
#-------------------------------------------------------------------------
def jackspec(spec,sk,wt,se):
"""
code to calculate adaptively weighted jackknifed 95% confidence limits
**Parameters**
spec : ndarray [nfft]
real vector containing adaptively weighted spectrum
sk : ndarray [nfft,kspec]
array with kth power spectra
wt : ndarray [nfft,kspec]
real array containing the ne weights for kspec
eigenspectra normalized so that if there is no bias, the
weights are unity.
se : ndarray [nfft]
real vector containing the number of degrees of freedom
for the spectral estimate at each frequency.
**Returns**
spec_ci : ndarray [nfft,2]
real array of jackknife error estimates, with 5 and 95%
confidence intervals of the spectrum.
**Calls**
scipy.stats.t.ppf
**Modified**
<NAME>, Aug 2006
<NAME>, March 2007
Changed the Jackknife to be more efficient.
|
"""
#------------------------------------------------------
# Get sizes and define matrices
#------------------------------------------------------
nfft = np.shape(sk)[0]
kspec = np.shape(sk)[1]
wjk = np.zeros((nfft,kspec-1))
sj = np.zeros((nfft,kspec-1))
sjk = np.zeros((nfft,kspec))
varjk = np.zeros((nfft,kspec))
var = np.zeros((nfft,1))
#------------------------------------------------------
# Do simple jackknife
#------------------------------------------------------
for i in range(kspec):
ks = -1
for k in range(kspec):
if (k == i):
continue
ks = ks + 1
wjk[:,ks] = wt[:,k]
sj[:,ks] = wjk[:,ks]**2 * sk[:,k]
sjk[:,i] = np.sum(sj,axis=1)/ np.sum(wjk**2,axis=1)
#------------------------------------------------------
# Jackknife mean (Log S)
#------------------------------------------------------
lspec = np.log(spec)
lsjk = np.log(sjk)
lsjk_mean = np.sum(lsjk, axis=1)/float(kspec)
#------------------------------------------------------
# Jackknife Bias estimate (Log S)
#------------------------------------------------------
bjk = float(kspec-1) * (lspec - lsjk_mean)
#------------------------------------------------------
# Jackknife Variance estimate (Log S)
#------------------------------------------------------
for i in range(kspec):
varjk[:,i] = (lsjk[:,i] - lsjk_mean)**2
var[:,0] = np.sum(varjk, axis=1) * float(kspec-1)/float(kspec)
#------------------------------------------------------
# Use the degrees of freedom
#------------------------------------------------------
for i in range(nfft):
if (se[i]<1.0):
print('DOF < 1 ', i,'th frequency ', se[i])
raise ValueError("Jackknife - DOF are wrong")
qt = scipy.stats.t(df=se[i]).ppf((0.95))
var[i,0] = np.exp(qt)*np.sqrt(var[i,0])
#-----------------------------------------------------------------
# Clear variables
#-----------------------------------------------------------------
del wjk, sj, sjk, varjk
#-----------------------------------------------------------------
# Return confidence intervals
#-----------------------------------------------------------------
spec_ci = np.zeros((nfft,2))
ci_dw = spec/var
ci_up = spec*var
spec_ci[:,0] = ci_dw[:,0]
spec_ci[:,1] = ci_up[:,0]
return spec_ci
#-------------------------------------------------------------------------
# end jackspec
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# qiinv
#-------------------------------------------------------------------------
def qiinv(spec,yk,wt,vn,lamb,nw):
"""
Function to calculate the Quadratic Spectrum using the method
developed by Prieto et al. (2007).
The first 2 derivatives of the spectrum are estimated and the
bias associated with curvature (2nd derivative) is reduced.
Calculate the Stationary Inverse Theory Spectrum.
Basically, compute the spectrum inside the innerband.
This approach is very similar to D.J. Thomson (1990).
**Parameters**
spec : ndarray [nfft,0]
the adaptive multitaper spectrum (so far)
yk : ndarrau, complex [npts,kspec]
multitaper eigencoefficients, complex
wt : ndarray [nf,kspec]
the weights of the different coefficients.
input is the original multitaper weights,
from the Thomson adaptive weighting.
vn : ndarray [npts,kspec]
the Slepian sequences
lambda : ndarray [kspec]
the eigenvalues of the Slepian sequences
nw : float
The time-bandwisth product
**Returns**
qispec : ndarray [nfft,0]
the QI spectrum estimate
ds : ndarray [nfft,0]
the estimate of the first derivative
dds : ndarray [nfft,0]
the estimate of the second derivative
**References**
<NAME>, <NAME>, <NAME>, <NAME>,
and <NAME> (2007), Reducing the bias of multitaper
spectrum estimates, Geophys. J. Int., 171, 1269-1281.
doi: 10.1111/j.1365-246X.2007.03592.x.
**Notes**
In here I have made the Chebyshev polinomials unitless,
meaning that the associated parameters ALL have units
of the PSD and need to be normalized by 1/W for \alpha_1,
1/W**2 for \alpha_2, etc.
**Modified**
Nov 2021 (<NAME>)
Major adjustment in the inverse problem steps.
Now, the constant term is first inverted for,
and then the 1st and 2nd derivative so that we
obtain an independent 2nd derivative.
June 5, 2009 (<NAME>)
Major change, saving some important
values so that if the subroutine is called
more than once, with similar values, many of
the variables are not calculated again, making
the code run much faster.
**Calls**
scipy.optimize.nnls, scipy.linalg.qr,
scipy.linalg.lstsq
|
"""
npts = np.shape(vn)[0]
kspec = np.shape(vn)[1]
nfft = np.shape(yk)[0]
nfft2 = 11*nfft
nxi = 79;
L = kspec*kspec;
if (np.min(lamb) < 0.9):
print('Careful, Poor leakage of eigenvalue ', np.min(lamb));
print('Value of kspec is too large, revise? *****')
#---------------------------------------------
# Assign matrices to memory
#---------------------------------------------
xk = np.zeros((nfft,kspec), dtype=complex)
Vj = np.zeros((nxi,kspec), dtype=complex)
#---------------------------------------
# New inner bandwidth frequency
#---------------------------------------
bp = nw/npts # W bandwidth
xi = np.linspace(-bp,bp,num=nxi)
dxi = xi[2]-xi[1]
f_qi = scipy.fft.fftfreq(nfft2)
for k in range(kspec):
xk[:,k] = wt[:,k]*yk[:,k];
for i in range(nxi):
om = 2.0*np.pi*xi[i]
ct,st = sft(vn[:,k],om)
Vj[i,k] = 1.0/np.sqrt(lamb[k])*complex(ct,st)
#----------------------------------------------------------------
# Create the vectorized Cjk matrix and Pjk matrix { Vj Vk* }
#----------------------------------------------------------------
C = np.zeros((L,nfft),dtype=complex)
Pk = np.zeros((L,nxi), dtype=complex)
m = -1;
for i in range(kspec):
for k in range(kspec):
m = m + 1;
C[m,:] = ( np.conjugate(xk[:,i]) * (xk[:,k]) );
Pk[m,:] = np.conjugate(Vj[:,i]) * (Vj[:,k]);
Pk[:,0] = 0.5 * Pk[:,0];
Pk[:,nxi-1] = 0.5 * Pk[:,nxi-1];
#-----------------------------------------------------------
# I use the Chebyshev Polynomial as the expansion basis.
#-----------------------------------------------------------
hk = np.zeros((L,3), dtype=complex)
hcte = np.ones((nxi,1), dtype=float)
hslope = np.zeros((nxi,1), dtype=float)
hquad = np.zeros((nxi,1), dtype=float)
Cjk = np.zeros((L,1), dtype=complex)
cte = np.zeros(nfft)
cte2 = np.zeros(nfft)
slope = np.zeros(nfft)
quad = np.zeros(nfft)
sigma2 = np.zeros(nfft)
cte_var = np.zeros(nfft)
slope_var = np.zeros(nfft)
quad_var = np.zeros(nfft)
h1 = np.matmul(Pk,hcte) * dxi
hk[:,0] = h1[:,0]
hslope[:,0] = xi/bp
h2 = np.matmul(Pk,hslope) * dxi
hk[:,1] = h2[:,0]
hquad[:,0] = (2.0*((xi/bp)**2) - 1.0)
h3 = np.matmul(Pk,hquad) * dxi
hk[:,2] = h3[:,0]
nh = np.shape(hk)[1]
#----------------------------------------------------
# Begin Least squares solution (QR factorization)
#----------------------------------------------------
Q,R = scipy.linalg.qr(hk);
Qt = np.transpose(Q)
Leye = np.eye(L)
Ri,res,rnk,s = scipy.linalg.lstsq(R,Leye)
covb = np.real(np.matmul(Ri,np.transpose(Ri)))
for i in range(nfft):
Cjk[:,0] = C[:,i]
# hmodel,res,rnk,s = scipy.linalg.lstsq(hk,Cjk)
btilde = np.matmul(Qt,Cjk)
hmodel,res,rnk,s = scipy.linalg.lstsq(R,btilde)
#---------------------------------------------
# Estimate positive spectrumm
#---------------------------------------------
cte_out = optim.nnls(np.real(h1),
np.real(Cjk[:,0]))[0]
cte2[i] = np.real(cte_out)
pred = h1*cte2[i]
Cjk2 = Cjk-pred
#---------------------------------------------
# Now, solve the derivatives
#---------------------------------------------
btilde = np.matmul(Qt,Cjk2)
hmodel,res,rnk,s = scipy.linalg.lstsq(R,btilde)
cte[i] = np.real(hmodel[0])
slope[i] = -np.real(hmodel[1])
quad[i] = np.real(hmodel[2])
pred = np.matmul(hk,np.real(hmodel))
sigma2[i] = np.sum(np.abs(Cjk-pred)**2)/(L-nh)
cte_var[i] = sigma2[i]*covb[0,0]
slope_var[i] = sigma2[i]*covb[1,1]
quad_var[i] = sigma2[i]*covb[2,2]
slope = slope / (bp)
quad = quad / (bp**2)
slope_var = slope_var / (bp**2)
quad_var = quad_var / (bp**4)
qispec = np.zeros((nfft,1), dtype=float)
for i in range(nfft):
qicorr = (quad[i]**2)/((quad[i]**2) + quad_var[i] )
qicorr = qicorr * (1/6)*(bp**2)*quad[i]
qispec[i] = cte2[i] - qicorr
#qispec[i] = spec[i] - qicorr
ds = slope;
dds = quad;
ds = ds[:,np.newaxis]
dds = dds[:,np.newaxis]
return qispec, ds, dds
#-------------------------------------------------------------------------
# end qiinv
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# ftest
#-------------------------------------------------------------------------
def ftest(vn,yk):
"""
Performs the F test for a line component
Compute F-test for single spectral line components
at the frequency bins given by the mtspec routines.
**Parameters**
vn : ndarray [npts,kspec]
Slepian sequences real
yk : ndarray, complex [nfft,kspec]
multitaper eigencoefficients, complex
kspec fft's of tapered data series
**Returns**
F : ndarray [nfft]
vector of f-test values, real
p : ndarray [nfft]
vector with probability of line component
**Calls**
scipy.stats.f.cdf, scipy.stats.f.cdf
|
"""
npts = np.shape(vn)[0]
kspec = np.shape(vn)[1]
nfft = np.shape(yk)[0]
mu = np.zeros(nfft,dtype=complex)
F = np.zeros(nfft)
p = np.zeros(nfft)
dof1 = 2
dof2 = 2*(kspec-1)
#------------------------------------------------------
# The Vk(0), summing the time domain tapers
# Also normalize by sum(vn0)**2
#------------------------------------------------------
vn0 = np.sum(vn,axis=0)
vn0_sqsum = np.sum(np.abs(vn0)**2)
#------------------------------------------------------
# Calculate the mean amplitude of line components at
# each frequency
#------------------------------------------------------
for i in range(nfft):
vn_yk = vn0[:]*yk[i,:]
vn_yk_sum = np.sum(vn_yk)
mu[i] = vn_yk_sum/vn0_sqsum
#------------------------------------------------------
# Calculate F Test
# Top (kspec-1) mu**2 sum(vn0**2) Model variance
# Bottom sum(yk - mu*vn0)**2 Misfit
# Fcrit - IS the threshhold for 95% test.
#------------------------------------------------------
Fcrit = scipy.stats.f.ppf(0.95,dof1,dof2)
for i in range(nfft):
Fup = float(kspec-1) * np.abs(mu[i])**2 * np.sum(vn0**2)
Fdw = np.sum( np.abs(yk[i,:] - mu[i]*vn0[:])**2 )
F[i] = Fup/Fdw
p[i] = scipy.stats.f.cdf(F[i],dof1,dof2)
F = F[:,np.newaxis]
p = p[:,np.newaxis]
return F, p
#-------------------------------------------------------------------------
# end ftest
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# reshape spectrum
#-------------------------------------------------------------------------
def yk_reshape(yk_in,vn,p=None,fcrit=0.95):
"""
reshape the yk's based on the F-test of line compenents
Reshape eigenft's around significant spectral lines
The "significant" means above fcritical probability (def=0.95)
If probability is large at neighbouring frequencies, code will
only remove the largest probability energy.
**Parameters**
yk : ndarray complex [nfft,kspec]
eigenft's
vn : ndarray [npts,kspec]
DPSS sequences
p : ndarray optional [nfft]
F-test probabilities to find fcritical
In None, it will be calculated
fcrit : float optional
Probability value over which to reshape, default = 0.95
**Returns**
yk : ndarray, complex [nfft,kspec]
Reshaped eigenft's
sline : ndarray [nfft]
Power spetrum of line components only
**Modified**
April 2006 (<NAME>)
**Calls**
ftest - if P is not present
scipy.fft.fft
|
"""
if (p is None):
print('Doing F test')
p = utils.ftest(vn,yk)[1]
yk = np.copy(yk_in)
npts = np.shape(vn)[0]
kspec = np.shape(vn)[1]
nfft = np.shape(yk)[0]
sline = np.zeros((nfft,1),dtype=float)
Vk = np.zeros((nfft,kspec),dtype=complex)
#------------------------------------------------------
# Count and isolate, peaks that pass
# the fcrit criteria.
# Also, remove values which are not local peaks
#------------------------------------------------------
nl = 0
for i in range(nfft):
if (p[i] < fcrit):
p[i] = 0
continue
if (i==0):
if (p[i]>p[i+1]):
nl = nl + 1
else:
p[i] = 0.0
elif (i==nfft-1):
if (p[i]>p[i-1]):
nl = nl + 1
else:
p[i] = 0
else:
if (p[i]>p[i-1] and p[i]>p[i+1]):
nl = nl + 1
else:
p[i] = 0
#------------------------------------------------------
# If no lines are found, return back arrays
#------------------------------------------------------
if (nl == 0):
return yk,sline
#------------------------------------------------------
# Prepare vn's Vk's for line removal
# Compute the Vk's to reshape
# The Vk's normalized to have int -1/2 1/2 Vk**2 = 1
# This is obtained from fft already is sum(vn**2) = 1
#------------------------------------------------------
vn0 = np.sum(vn,axis=0)
for k in range(kspec):
Vk[:,k] = scipy.fft.fft(vn[:,k],nfft)
#------------------------------------------------------
# Remove mean value for each spectral line
#------------------------------------------------------
for i in range(nfft):
if (p[i]<fcrit):
continue
mu = np.sum(vn0*yk[i,:]) / np.sum(vn0**2)
for j in range(nfft):
jj = j - i
if (jj < 0):
jj = jj + nfft
yk_pred = mu*Vk[jj,:]
yk[j,:] = yk[j,:] - yk_pred
#yk[j,:] = yk[j,:] - mu*Vk[jj,:]
for k in range(kspec):
kfloat = 1.0/float(kspec)
sline[i] = sline[i] + kfloat*np.abs(mu*Vk[jj,k])**2
return yk, sline
#-------------------------------------------------------------------------
# end reshape
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Calculate degrees of freedom
#-------------------------------------------------------------------------
def wt2dof(wt):
"""
Calculate the degrees of freedom of the multitaper based on the
weights of the different tapers.
**Parameters**
wt : ndarray [nfft,kspec]
weights of the tapers at each frequency
**Returns**
se : ndarray [nfft]
degrees of freedom at each frequency
**Modified**
February 2022, changed a for loop for direct numpy sum.
|
"""
nfft = np.shape(wt)[0]
kspec = np.shape(wt)[1]
#------------------------------------------------------------
# Number of Degrees of freedom
#------------------------------------------------------------
wt1 = np.sqrt(np.sum(wt**2,axis=1)/float(kspec))
wt_dofs = np.minimum(wt/wt1[:,None],1.0)
#wt_dofs = np.zeros((nfft,kspec), dtype=float)
#for i in range(nfft):
# wt_dofs[i,:] = wt[i,:]/np.sqrt(np.sum(wt[i,:]**2)/float(kspec))
#wt_dofs = np.minimum(wt_dofs,1.0)
se = 2.0 * np.sum(wt_dofs**2, axis=1)
return se
#-------------------------------------------------------------------------
# End DOFs
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Dual-frequency spectrum
# Note: New version added, with np.tensordot, speeds up 10-100 fold
#-------------------------------------------------------------------------
def df_spec(x,y=None,fmin=None,fmax=None):
"""
Dual frequency spectrum using one/two MTSPEC classes.
For now, only positive frequencies are studied
Construct the dual-frequency spectrum from the yk's and the
weights of the usual multitaper spectrum estimation.
**Parameters**
x : MTSpec class
variable with the multitaper information (yk's)
y : MTSpec class, optional
similar to x for a second time series
if y is None, auto-dual frequency is calculated.
fmin : float, optional
minimum frequency to calculate the DF spectrum
fmax : float, optional
minimum frequency to calculate the DF spectrum
**Returns**
df_spec : ndarray complex, 2D (nf,nf)
the complex dual-frequency cross-spectrum. Not normalized
df_cohe : ndarray, 2D (nf,nf)
MSC, dual-freq coherence matrix. Normalized (0.0,1.0)
df_phase : ndarray, 2D (nf,nf)
the dual-frequency phase
**Notes**
both x and y need the same parameters (npts, kspec, etc.)
**Modified**
<NAME>, September 2005
<NAME>, September 2007
Slight rewrite to adjust to newer mtspec codes.
<NAME>, February 2022
Speed up by simplifying for loops and using np.tensordot
**Calls**
np.tensordot
|
"""
if (y is None):
y = x
kspec = x.kspec
nfft = x.nfft
nf = x.nf
freq = x.freq[:,0]
if (fmin is None):
fmin = min(abs(freq))
if (fmax is None):
fmax = max(abs(freq))
# Select frequencies of interest
floc = np.where((freq>=fmin) & (freq<=fmax))[0]
freq = freq[floc]
nf = len(freq)
#------------------------------------------------------------
# Create the cross and/or auto spectra
#------------------------------------------------------------
# Unique weights (and degrees of freedom)
wt = np.minimum(x.wt,y.wt)
# Scale weights to keep power
wt_scale = np.sqrt(np.sum(np.abs(wt)**2, axis=1))
wt = wt/wt_scale[:,None]
# Weighted Yk's
dyk_x = wt[floc,:] * x.yk[floc,:]
dyk_y = wt[floc,:] * y.yk[floc,:]
# Auto and Cross spectrum
Sxx = np.sum(np.abs(dyk_x)**2, axis=1)
Syy = np.sum(np.abs(dyk_y)**2, axis=1)
Pxy = np.outer(Sxx,Syy)
df_spec = np.tensordot(dyk_x,np.conjugate(dyk_y),axes=(1,1))
df_cohe = np.abs(df_spec**2)/Pxy
df_phase = np.arctan2(np.imag(df_spec),np.real(df_spec)) * 180.0/np.pi
return df_spec, df_cohe, df_phase, freq
def df_spec_old(x,y=None,fmin=None,fmax=None):
"""
Dual frequency spectrum using one/two MTSPEC classes.
For now, only positive frequencies are studied
Construct the dual-frequency spectrum from the yk's and the
weights of the usual multitaper spectrum estimation.
**Parameters**
x : MTSpec class
variable with the multitaper information (yk's)
y : MTSpec class, optional
similar to x for a second time series
if y is None, auto-dual frequency is calculated.
fmin : float, optional
minimum frequency to calculate the DF spectrum
fmax : float, optional
minimum frequency to calculate the DF spectrum
**Returns**
df_spec : ndarray complex, 2D (nf,nf)
the complex dual-frequency cross-spectrum. Not normalized
df_cohe : ndarray, 2D (nf,nf)
MSC, dual-freq coherence matrix. Normalized (0.0,1.0)
df_phase : ndarray, 2D (nf,nf)
the dual-frequency phase
**Notes**
both x and y need the same parameters (npts, kspec, etc.)
**Modified**
<NAME>, September 2005
<NAME>, September 2007
Slight rewrite to adjust to newer mtspec codes.
**Calls**
Nothing
|
"""
if (y is None):
y = x
kspec = x.kspec
nfft = x.nfft
nf = x.nf
freq = x.freq[:,0]
if (fmin is None):
fmin = min(abs(freq))
if (fmax is None):
fmax = max(abs(freq))
floc = np.zeros(nf,dtype=int)
icnt = -1
for i in range(nf):
if (freq[i]>=fmin and freq[i]<=fmax):
icnt = icnt + 1
floc[icnt] = i
floc = floc[0:icnt]
nf = icnt
freq = freq[floc]
#------------------------------------------------------------
# Create the cross and/or auto spectra
#------------------------------------------------------------
# Unique weights (and degrees of freedom)
wt = np.minimum(x.wt,y.wt)
wt_scale = np.sum(np.abs(wt)**2, axis=1) # Scale weights to keep power
for k in range(kspec):
wt[:,k] = wt[:,k]/np.sqrt(wt_scale)
# Weighted Yk's
dyk_x = np.zeros((nf,kspec),dtype=complex)
dyk_y = np.zeros((nf,kspec),dtype=complex)
for k in range(kspec):
dyk_x[:,k] = wt[floc,k] * x.yk[floc,k]
dyk_y[:,k] = wt[floc,k] * y.yk[floc,k]
# Auto and Cross spectrum
Sxx = np.zeros((nf,1),dtype=float)
Syy = np.zeros((nf,1),dtype=float)
Sxx[:,0] = np.sum(np.abs(dyk_x)**2, axis=1)
Syy[:,0] = np.sum(np.abs(dyk_y)**2, axis=1)
# Get coherence and phase
df_spec = np.zeros((nf,nf),dtype=complex)
df_cohe = np.zeros((nf,nf),dtype=float)
df_phase = np.zeros((nf,nf),dtype=float)
for i in range(nf):
if ((i+1)%1000==0):
print('DF_SPEC ith loop ',i+1,' of ',nf)
for j in range(nf):
df_spec[i,j] = np.sum(dyk_x[i,:] * np.conjugate(dyk_y[j,:]))
df_cohe[i,j] = np.abs(df_spec[i,j])**2 / (Sxx[i]*Syy[j])
df_phase[i,j] = np.arctan2( np.imag(df_spec[i,j]),
np.real(df_spec[i,j]) )
df_phase = df_phase * (180.0/np.pi)
return df_spec, df_cohe, df_phase, freq
#-------------------------------------------------------------------------
# End DF_SPEC
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# SFT - slow fourier transform
#-------------------------------------------------------------------------
def sft(x,om):
"""
calculates the (slow) fourier transform of real
sequence x(i),i=1,...n at angular frequency om normalized
so that nyquist=pi. the sine transform is returned in st and
the cosine transform in ct.
algorithm is that of goertzal with modifications by
gentleman, comp.j. 1969
transform is not normalized
to normalize one-sided ft, divide by sqrt(data length)
for positive om, the ft is defined as ct-(0.,1.)st or like slatec
cfftf
**Parameters**
x : ndarray (n,)
time sequence x[0],x[1],...
om : float
angular frequency of interest,
normalized such that Nyq = pi
**Modified**
<NAME>
November 2004
|
"""
n = np.shape(x)[0]
pi = np.pi
tp = 2.0*pi
np1 = n+1
l = int(np.floor(6.0*om/tp))
s = np.sin(om)
a = 0.0
c = 0.0
d = 0.0
e = 0.0
if (l == 0):
# recursion for low frequencies (.lt. nyq/3)
b = -4.0*np.sin(om/2.0)**2
for k0 in range(n):
k = k0+1
c = a
d = e
a = x[np1-k-1]+b*d+c
e = a+d
elif (l == 1):
#regular goertzal algorithm for intermediate frequencies
b = 2.0*np.cos(om)
for k0 in range(n):
k = k0 + 1
a = x[np1-k-1]+b*e-d
d = e
e = a
else:
# recursion for high frequencies (> 2*fnyq/3)
b=4.0*np.cos(om/2.0)**2
for k0 in range(n):
k = k0 + 1
c = a
d = e
a = x[np1-k-1]+b*d-c
e = a-d
st = -s*d
ct = a-b*d/2.0
return ct, st
#-------------------------------------------------------------------------
# End SFT
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# squick
#-------------------------------------------------------------------------
def squick(nptwo,fx,nf,ntap=None,kopt=None):
"""
Sine multitaper routine. With a double length FFT constructs
FT[sin(q*n)*x(n)] from F[x(n)], that is constructs the
FFT of the sine tapered signal.
The FFT should be performed previous to the call.
**Parameters**
nptwo : float
The twice signal length (2*npts)
fx : ndarray, clomplex
The FFT of the signal (twice length)
nf : int
Number of frequency points for spec
ntap : int, optional
Constant number of tapers to average from
if None, kopt is used.
if > 0 Constant value to be used
if <= 0 Use the kopt array instead
ktop : ndarray, int [nf]
array of integers, with the number of tapers
at each frequency.
**Returns**
spec : ndarray (nf,)
the spectral estimate
**References**
Based on the sine multitaper code of <NAME>.
|
"""
spec = np.zeros(nf,dtype=float)
if (kopt is None and ntap is None):
raise ValueError("Either kopt or ntap must exist")
elif (kopt is None):
if (ntap<1):
ntap = int(3.0 + np.sqrt(float(nptwo/2))/5.0)
kopt = np.ones(nf,dtype=int)*ntap
#-------------------------------------------
# Loop over frequency
#-------------------------------------------
for m in range(nf):
m2 = 2* (m)
spec[m] = 0.
klim = kopt[m]
ck = 1./float(klim)**2
#------------------------------------------------
# Average over tapers, parabolic weighting wk
#------------------------------------------------
for k0 in range(klim):
k = k0+1
j1 = (m2+nptwo-k)%nptwo
j2 = (m2+k)%nptwo
zz = fx[j1] - fx[j2]
wk = 1. - ck*float(k0)**2
spec[m] = spec[m] + (np.real(zz)**2 + np.imag(zz)**2) * wk
# end average tapers
#-------------------------------------------------
# Exact normalization for parabolic factor
#-------------------------------------------------
spec[m] = spec[m] * (6.0*float(klim))/float(4*klim**2+3*klim-1)
# end loop frequencies
return spec, kopt
#-------------------------------------------------------------------------
# end squick
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# squick2 - for cros spectra
#-------------------------------------------------------------------------
def squick2(nptwo,fx,nf,ntap=None,kopt=None):
"""
Sine multitaper routine. With a double length FFT constructs
FT[sin(q*n)*x(n)] from F[x(n)], that is constructs the
FFT of the sine tapered signal.
The FFT should be performed previous to the call.
**Parameters**
nptwo : float
The twice signal length (2*npts)
fx : ndarray, complex [nptwo,2]
The FFT of the two signals (twice length)
nf : int
Number of frequency points for spec
ntap : int, optional```
Constant number of tapers to average from
if > 0 Constant value to be used
if None kopt used
if <= 0 Use the kopt array instead
kopt : ndarray, int [nf]
array of integers, with the number of tapers
at each frequency.
**Returns**
spec : ndarray (nf,4)
the spectral estimates (first 2 columns)
and the cross spectral estiamtes (last 2 columns)
**References**
Based on the sine multitaper code of <NAME>.
|
"""
sxy = np.zeros((nf,4),dtype=float)
if (kopt is None and ntap is None):
raise ValueError("Either kopt or ntap must exist")
elif (kopt is None):
if (ntap<1):
ntap = int(3.0 + np.sqrt(float(nptwo/2))/5.0)
kopt = np.ones(nf,dtype=int)*ntap
#-------------------------------------------
# Loop over frequency
#-------------------------------------------
for m in range(nf):
m2 = 2* (m)
sxy[m,:] = 0.
klim = kopt[m]
ck = 1./float(klim)**2
#------------------------------------------------
# Average over tapers, parabolic weighting wk
#------------------------------------------------
for k0 in range(klim):
k = k0+1
j1 = (m2+nptwo-k)%nptwo
j2 = (m2+k)%nptwo
z1 = fx[j1,0] - fx[j2,0]
z2 = fx[j1,1] - fx[j2,1]
wk = 1. - ck*float(k0)**2
sxy[m,0] = sxy[m,0] + (np.real(z1)**2 + np.imag(z1)**2) * wk
sxy[m,1] = sxy[m,1] + ( | np.real(z2) | numpy.real |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = | N.array([1,1,1]) | numpy.array |
import gibson2
from gibson2.core.physics.interactive_objects import VisualMarker, InteractiveObj, BoxShape, YCBObject, VisualShape
from gibson2.core.physics.robot_locomotors import Turtlebot
from gibson2.utils.utils import parse_config, rotate_vector_3d, l2_distance, quatToXYZW, cartesian_to_polar
from gibson2.envs.base_env import BaseEnv
from transforms3d.euler import euler2quat
from collections import OrderedDict
import argparse
from transforms3d.quaternions import quat2mat, qmult
import gym
import numpy as np
import os
import pybullet as p
from IPython import embed
import cv2
import time
import collections
import logging
class NavigateEnv(BaseEnv):
"""
We define navigation environments following <NAME>, et al. 'On evaluation of embodied navigation agents.'
arXiv preprint arXiv:1807.06757 (2018). (https://arxiv.org/pdf/1807.06757.pdf)
"""
def __init__(
self,
config_file,
model_id=None,
mode='headless',
action_timestep=1 / 10.0,
physics_timestep=1 / 240.0,
automatic_reset=False,
device_idx=0,
render_to_tensor=False
):
"""
:param config_file: config_file path
:param model_id: override model_id in config file
:param mode: headless or gui mode
:param action_timestep: environment executes action per action_timestep second
:param physics_timestep: physics timestep for pybullet
:param automatic_reset: whether to automatic reset after an episode finishes
:param device_idx: device_idx: which GPU to run the simulation and rendering on
"""
super(NavigateEnv, self).__init__(config_file=config_file,
model_id=model_id,
mode=mode,
action_timestep=action_timestep,
physics_timestep=physics_timestep,
device_idx=device_idx,
render_to_tensor=render_to_tensor)
self.automatic_reset = automatic_reset
def load_task_setup(self):
"""
Load task setup, including initialization, termination conditino, reward, collision checking, discount factor
"""
# initial and target pose
self.initial_pos = np.array(self.config.get('initial_pos', [0, 0, 0]))
self.initial_orn = np.array(self.config.get('initial_orn', [0, 0, 0]))
self.target_pos = np.array(self.config.get('target_pos', [5, 5, 0]))
self.target_orn = np.array(self.config.get('target_orn', [0, 0, 0]))
self.initial_pos_z_offset = self.config.get('initial_pos_z_offset', 0.1)
check_collision_distance = self.initial_pos_z_offset * 0.5
# s = 0.5 * G * (t ** 2)
check_collision_distance_time = np.sqrt(check_collision_distance / (0.5 * 9.8))
self.check_collision_loop = int(check_collision_distance_time / self.physics_timestep)
self.additional_states_dim = self.config.get('additional_states_dim', 0)
self.goal_dim = self.config.get('goal_dim', 0)
self.base_proprioceptive_states_dim = self.config.get('base_proprioceptive_states_dim', 0)
self.arm_proprioceptive_states_dim = self.config.get('arm_proprioceptive_states_dim', 0)
self.goal_format = self.config.get('goal_format', 'polar')
# termination condition
self.dist_tol = self.config.get('dist_tol', 0.5)
self.max_step = self.config.get('max_step', 500)
self.max_collisions_allowed = self.config.get('max_collisions_allowed', 500)
# reward
self.reward_type = self.config.get('reward_type', 'l2')
assert self.reward_type in ['geodesic', 'l2', 'sparse']
self.success_reward = self.config.get('success_reward', 10.0)
self.slack_reward = self.config.get('slack_reward', -0.01)
# reward weight
self.potential_reward_weight = self.config.get('potential_reward_weight', 1.0)
self.collision_reward_weight = self.config.get('collision_reward_weight', -0.1)
# ignore the agent's collision with these body ids
self.collision_ignore_body_b_ids = set(self.config.get('collision_ignore_body_b_ids', []))
# ignore the agent's collision with these link ids of itself
self.collision_ignore_link_a_ids = set(self.config.get('collision_ignore_link_a_ids', []))
# discount factor
self.discount_factor = self.config.get('discount_factor', 0.99)
self.num_obstacles = self.config.get('num_obstacles', 0)
self.obstacle_type = self.config.get('obstacle_type', 'block')
print("NUM OBSTACLES: {}".format(self.num_obstacles))
print("TYPE OBSTACLE: {}".format(self.obstacle_type))
print("TASK TYPE: {}".format(self.config["task"]))
print("TOLERANCE: {}".format(self.dist_tol))
self.num_walls = 5
self.obstacles = []
self.obs_dir = []
self.obs_positions = []
self.reset_step = 25
self.walls = []
self._num_envs = 1
def load_observation_space(self):
"""
Load observation space
"""
self.output = self.config['output']
self.image_width = self.config.get('image_width', 128)
self.image_height = self.config.get('image_height', 128)
observation_space = OrderedDict()
if 'close_to_goal' in self.output:
self.close_to_goal_dim = 1
self.close_to_goal_space = gym.spaces.Box(low=-np.inf,
high=np.inf,
shape=(self.close_to_goal_dim,),
dtype=np.float32)
observation_space['close_to_goal'] = self.close_to_goal_space
if 'sensor' in self.output:
self.sensor_dim = self.additional_states_dim
self.sensor_space = gym.spaces.Box(low=-np.inf,
high=np.inf,
shape=(self.sensor_dim,),
dtype=np.float32)
observation_space['sensor'] = self.sensor_space
if 'base_proprioceptive' in self.output:
self.base_proprioceptive_space = gym.spaces.Box(low=-np.inf,
high=np.inf,
shape=(self.base_proprioceptive_states_dim,),
dtype=np.float32)
observation_space['base_proprioceptive'] = self.base_proprioceptive_space
if 'arm_proprioceptive' in self.output:
self.arm_proprioceptive_space = gym.spaces.Box(low=-np.inf,
high=np.inf,
shape=(self.arm_proprioceptive_states_dim,),
dtype=np.float32)
observation_space['arm_proprioceptive'] = self.arm_proprioceptive_space
if 'goal' in self.output:
self.goal_space = gym.spaces.Box(low=-np.inf,
high=np.inf,
shape=(self.goal_dim,),
dtype=np.float32)
observation_space['goal'] = self.goal_space
if 'last_camera_mask_indices' in self.output:
self.last_camera_mask_indices_space = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(1,),
dtype=np.int64)
observation_space['last_camera_mask_indices'] = self.last_camera_mask_indices_space
if 'rgb' in self.output:
self.rgb_space = gym.spaces.Box(low=0.0,
high=1.0,
shape=(self.image_height, self.image_width, 3),
dtype=np.float32)
observation_space['rgb'] = self.rgb_space
if 'wrist_rgb' in self.output:
self.wrist_rgb_space = gym.spaces.Box(low=0.0,
high=1.0,
shape=(self.image_height, self.image_width, 3),
dtype=np.float32)
observation_space['wrist_rgb'] = self.wrist_rgb_space
if 'depth' in self.output:
self.depth_noise_rate = self.config.get('depth_noise_rate', 0.0)
self.depth_low = self.config.get('depth_low', 0.0)
self.depth_high = self.config.get('depth_high', 10.0)
self.depth_space = gym.spaces.Box(low=0.0,
high=1.0,
shape=(self.image_height, self.image_width, 1),
dtype=np.float32)
observation_space['depth'] = self.depth_space
if 'wrist_depth' in self.output:
self.depth_noise_rate = self.config.get('depth_noise_rate', 0.0)
self.depth_low = self.config.get('depth_low', 0.0)
self.depth_high = self.config.get('depth_high', 10.0)
self.wrist_depth_space = gym.spaces.Box(low=0.0,
high=1.0,
shape=(self.image_height, self.image_width, 1),
dtype=np.float32)
observation_space['wrist_depth'] = self.wrist_depth_space
if 'rgbd' in self.output:
self.rgbd_space = gym.spaces.Box(low=0.0,
high=1.0,
shape=(self.image_height, self.image_width, 4),
dtype=np.float32)
observation_space['rgbd'] = self.rgbd_space
if 'seg' in self.output:
self.seg_space = gym.spaces.Box(low=0.0,
high=1.0,
shape=(self.image_height, self.image_width, 1),
dtype=np.float32)
observation_space['seg'] = self.seg_space
if 'wrist_seg' in self.output:
self.wrist_seg_space = gym.spaces.Box(low=0.0,
high=1.0,
shape=(self.image_height, self.image_width, 1),
dtype=np.float32)
observation_space['wrist_seg'] = self.wrist_seg_space
if 'scan' in self.output:
self.scan_noise_rate = self.config.get('scan_noise_rate', 0.0)
self.n_horizontal_rays = self.config.get('n_horizontal_rays', 128)
self.n_vertical_beams = self.config.get('n_vertical_beams', 1)
assert self.n_vertical_beams == 1, 'scan can only handle one vertical beam for now'
self.laser_linear_range = self.config.get('laser_linear_range', 10.0)
self.laser_angular_range = self.config.get('laser_angular_range', 180.0)
self.min_laser_dist = self.config.get('min_laser_dist', 0.05)
self.laser_link_name = self.config.get('laser_link_name', 'scan_link')
self.scan_space = gym.spaces.Box(low=0.0,
high=1.0,
shape=(self.n_horizontal_rays * self.n_vertical_beams, 1),
dtype=np.float32)
observation_space['scan'] = self.scan_space
if 'rgb_filled' in self.output: # use filler
try:
import torch.nn as nn
import torch
from torchvision import datasets, transforms
from gibson2.learn.completion import CompletionNet
except:
raise Exception('Trying to use rgb_filled ("the goggle"), but torch is not installed. Try "pip install torch torchvision".')
self.comp = CompletionNet(norm=nn.BatchNorm2d, nf=64)
self.comp = torch.nn.DataParallel(self.comp).cuda()
self.comp.load_state_dict(
torch.load(os.path.join(gibson2.assets_path, 'networks', 'model.pth')))
self.comp.eval()
self.observation_space = gym.spaces.Dict(observation_space)
def load_action_space(self):
"""
Load action space
"""
self.action_space = self.robots[0].action_space
def load_visualization(self):
"""
Load visualization, such as initial and target position, shortest path, etc
"""
if (self.mode != 'gui' and self.mode != 'iggui' and self.mode != 'pbgui' and self.mode !='headless'):
return
'''
cyl_length = 0.2
self.initial_pos_vis_obj = VisualMarker(visual_shape=p.GEOM_CYLINDER,
rgba_color=[1, 0, 0, 1],
radius=0.5,
length=cyl_length,
initial_offset=[0, 0, cyl_length / 2.0])
self.target_pos_vis_obj = VisualMarker(visual_shape=p.GEOM_CYLINDER,
rgba_color=[0, 0, 1, 1],
radius=0.5,
length=cyl_length,
initial_offset=[0, 0, cyl_length / 2.0])
self.initial_pos_vis_obj.load()
self.target_pos_vis_obj.load()
if self.scene.build_graph:
self.num_waypoints_vis = 250
self.waypoints_vis = [VisualMarker(visual_shape=p.GEOM_CYLINDER,
rgba_color=[0, 1, 0, 0.3],
radius=0.1,
length=cyl_length,
initial_offset=[0, 0, cyl_length / 2.0])
for _ in range(self.num_waypoints_vis)]
for waypoint in self.waypoints_vis:
waypoint.load()
'''
# add visual objects
self.visual_object_at_initial_target_pos = self.config.get(
'visual_object_at_initial_target_pos', False)
if self.visual_object_at_initial_target_pos:
#self.initial_pos_vis_obj = VisualMarker(visual_shape=p.GEOM_CYLINDER,
# rgba_color=[1, 0, 0, 0.95],
# radius=0.02,
# length=5)
#self.target_pos_vis_obj = VisualMarker(visual_shape=p.GEOM_CYLINDER,
# rgba_color=[1, 1, 0, 0.7],
# radius=0.02,
# length=5)
if self.obstacle_type == 'realistic':
original_size = np.array([0.07733, 0.169027, 0.218797])
scale = self.dist_tol * 2 / original_size
if self.config['task'] == 'pointgoal':
scale = 0.2 / original_size
self.target_pos_vis_obj = YCBObject('003_cracker_box', scale=scale, collision=False)
elif self.obstacle_type == 'block':
self.target_pos_vis_obj = VisualMarker(visual_shape=p.GEOM_SPHERE,
rgba_color=[1, 0, 0, 1],
radius=0.09)
#self.initial_pos_vis_obj.load()
if self.config.get('target_visual_object_visible_to_agent', False):
self.simulator.import_object(self.target_pos_vis_obj, class_id=2)
#self.simulator.import_object(self.target_pos_vis_obj_exact)
else:
self.target_pos_vis_obj.load()
#self.target_pos_vis_obj_exact.load()
# set mass to 0.0 to avoid gravity
if self.obstacle_type == 'realistic':
p.changeDynamics(self.target_pos_vis_obj.body_id, -1, mass=0.0)
def load_obstacles(self):
for i in range(self.num_obstacles):
if self.obstacle_type == 'realistic':
obstacle = VisualShape(
os.path.join(gibson2.assets_path, 'models/quadrotor/quadrotor_base.obj'),
scale=[0.025, 0.2, 0.2])
self.simulator.import_object(obstacle)
# set mass to 0.0 to avoid gravity
for joint_id in range(-1, p.getNumJoints(obstacle.body_id)):
p.changeDynamics(obstacle.body_id, joint_id, mass=0.0)
elif self.obstacle_type == 'block':
obstacle = BoxShape(dim=[0.075, 0.6, 0.075],
visual_only=False,
mass=0,
color=[1, 1, 0, 0.95])
self.simulator.import_object(obstacle, class_id=1)
obstacle.load()
self.obstacles.append(obstacle)
def load_walls(self):
back_wall = BoxShape(pos=[-2.0, 0, 1.0],
dim=[0.1, 1.5, 1.0],
visual_only=False,
mass=1000,
color=[1, 1, 1, 1])
front_wall = BoxShape(pos=[8.0, 0, 1.0],
dim=[0.1, 1.5, 1.0],
visual_only=False,
mass=1000,
color=[1, 1, 1, 1])
left_wall = BoxShape(pos=[3.0, 1.6, 1.0],
dim=[5.1, 0.1, 1.0],
visual_only=False,
mass=1000,
color=[1, 1, 1, 1])
right_wall = BoxShape(pos=[3.0, -1.6, 1.0],
dim=[5.1, 0.1, 1.0],
visual_only=False,
mass=1000,
color=[1, 1, 1, 1])
ceiling = BoxShape(pos=[3, 0, 2.05],
dim=[5.2, 1.8, 0.05],
visual_only=True,
mass=0,
color=[1, 1, 1, 1])
self.simulator.import_object(back_wall, class_id=0)
self.simulator.import_object(front_wall, class_id=0)
self.simulator.import_object(left_wall, class_id=0)
self.simulator.import_object(right_wall, class_id=0)
self.simulator.import_object(ceiling, class_id=0)
# back_wall.load()
# front_wall.load()
# left_wall.load()
# right_wall.load()
self.walls.append(back_wall)
self.walls.append(front_wall)
self.walls.append(left_wall)
self.walls.append(right_wall)
self.walls.append(ceiling)
self.wall_constraints = []
for wall in self.walls:
constraint = p.createConstraint(
0, -1, wall.body_id, -1, p.JOINT_FIXED,
[0, 0, 1],
wall.get_position(),
[0, 0, 0],
wall.get_orientation(),
[0, 0, 0, 1])
self.wall_constraints.append(constraint)
def load_miscellaneous_variables(self):
"""
Load miscellaneous variables for book keeping
"""
self.current_step = 0
self.collision_step = 0
self.current_episode = 0
self.floor_num = 0
def load(self):
"""
Load navigation environment
"""
super(NavigateEnv, self).load()
self.load_task_setup()
self.load_observation_space()
self.load_action_space()
self.load_walls()
self.load_obstacles()
self.load_visualization()
self.load_miscellaneous_variables()
def global_to_local(self, pos):
"""
Convert a 3D point in global frame to agent's local frame
:param pos: a 3D point in global frame
:return: the same 3D point in agent's local frame
"""
return rotate_vector_3d(pos - self.robots[0].get_position(), *self.robots[0].get_rpy())
def get_additional_states(self):
"""
:return: non-perception observation, such as goal location
"""
additional_states = []
#additional_states = self.global_to_local(self.target_pos)[:2]
#if self.goal_format == 'polar':
# additional_states = np.array(cartesian_to_polar(additional_states[0], additional_states[1]))
#if self.config['task'] == 'reaching':
# additional_states = np.append(additional_states, self.target_pos[2:])
#additional_states = []
# linear velocity along the x-axis
linear_velocity = rotate_vector_3d(self.robots[0].get_linear_velocity(),
*self.robots[0].get_rpy())[0]
# angular velocity along the z-axis
angular_velocity = rotate_vector_3d(self.robots[0].get_angular_velocity(),
*self.robots[0].get_rpy())[2]
additional_states = | np.append(additional_states, [linear_velocity, angular_velocity]) | numpy.append |
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pykitti
import torch
import torchvision.transforms.functional as F
from torch import hub
from torchvision.datasets import Cityscapes
from autolabeling import autolabel
from autolabeling.classes import get_lidar_colormap
from lilanet.utils import colorize_seg
def get_cityscapes_colormap():
cmap = torch.zeros([256, 3], dtype=torch.uint8)
for cls in Cityscapes.classes:
cmap[cls.id, :] = torch.tensor(cls.color)
return cmap
def convert_train_id_to_id(target):
target_copy = target.clone()
for cls in Cityscapes.classes:
target_copy[target == cls.train_id] = cls.id
return target_copy
def show_lidar_on_image(points, image, segmentation, T_cam0, K_cam0):
points_2d = autolabel.pinhole_projection(points, T_cam0, K_cam0)
cmap = get_cityscapes_colormap()
segmentation = convert_train_id_to_id(segmentation)
vis = colorize_seg(segmentation.cpu(), cmap)
height, width = segmentation.shape
for i in range(points.shape[0]):
img_x = points_2d[i, 0]
img_y = points_2d[i, 1]
img_x = np.clip(img_x, 0, width - 1)
img_y = np.clip(img_y, 0, height - 1)
color = vis[:, img_y, img_x].tolist()
cv2.circle(image, (img_x, img_y), 2, color=tuple(color), thickness=-1)
return image
def show_lidar_depth_on_image(pc_velo, img, T_cam0, K_cam0):
points_2d = autolabel.pinhole_projection(pc_velo, T_cam0, K_cam0)
cmap = plt.cm.get_cmap('hsv', 256)
cmap = np.array([cmap(i) for i in range(256)])[:, :3] * 255
for i in range(pc_velo.shape[0]):
depth = np.sqrt(pc_velo[i, 0] ** 2 + pc_velo[i, 1] ** 2 + pc_velo[i, 2] ** 2)
idx = np.clip(int(640.0 / depth), 0, 255)
color = cmap[idx, :]
img_x = points_2d[i, 0]
img_y = points_2d[i, 1]
cv2.circle(img, (img_x, img_y), 2, color=tuple(color), thickness=-1)
return img
def plot_images(file_name, distance, reflectivity, label, segmentation, img, proj_img):
cmap = get_lidar_colormap()
cs_cmap = get_cityscapes_colormap()
def _normalize(x):
return (x - x.min()) / (x.max() - x.min())
distance_map = F.to_pil_image(_normalize(distance.squeeze()))
reflectivity_map = F.to_pil_image(_normalize(reflectivity.squeeze()))
label_map = F.to_pil_image(colorize_seg(label.squeeze(), cmap).cpu())
segmentation = convert_train_id_to_id(segmentation)
segmentation_map = F.to_pil_image(colorize_seg(segmentation.squeeze(), cs_cmap).cpu())
fig = plt.figure(figsize=(10, 5))
plt.subplot(231)
plt.title("Camera Image")
plt.imshow(img)
plt.subplot(232)
plt.title("Semantic Image")
plt.imshow(segmentation_map)
plt.subplot(233)
plt.title("Semantic Transfer")
plt.imshow(proj_img)
plt.subplot(234)
plt.title("Distance")
plt.imshow(distance_map)
plt.subplot(235)
plt.title("Reflectivity")
plt.imshow(reflectivity_map)
plt.subplot(236)
plt.title("Label")
plt.imshow(label_map)
plt.tight_layout()
plt.show()
fig.savefig(file_name, dpi=200)
if __name__ == '__main__':
torch.cuda.empty_cache()
basedir = '../data/kitti_raw'
date = '2011_09_26'
drive = '0005'
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
dataset = pykitti.raw(basedir, date, drive)
idx = 16
file_name = "{}_{}_{}.png".format(date, drive, os.path.basename(dataset.cam2_files[idx])[:-4])
model = hub.load('TheCodez/pytorch-GoogLeNet-FCN', 'googlenet_fcn', pretrained='cityscapes')
model = model.to(device)
model.eval()
img = dataset.get_cam2(idx)
pc_velo = dataset.get_velo(idx)
print("Inference")
pred = autolabel.semantic_segmentation(model, img, device)
pc_velo = autolabel.get_points_in_fov_90(pc_velo)
print("Transferring labels")
pc_labels = autolabel.transfer_labels(pc_velo, pred, dataset.calib.T_cam0_velo, dataset.calib.K_cam0)
print("Spherical projection")
lidar = autolabel.spherical_projection(pc_labels)
proj_img = show_lidar_on_image(pc_velo, | np.array(img) | numpy.array |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from PIL import Image
import sys
import tifffile
import cv2
def read_tiff(filename):
img = np.array(tifffile.imread(filename)).astype(np.float)
return img
def read_img(filename):
# Convert to RGB for scene flow finalpass data
img = np.array(Image.open(filename).convert('RGB')).astype(np.float32)
return img
def read_disp(filename, subset=False):
# Scene Flow dataset
if filename.endswith('pfm'):
# For finalpass and cleanpass, gt disparity is positive, subset is negative
disp = np.ascontiguousarray(_read_pfm(filename)[0])
if subset:
disp = -disp
# KITTI
elif filename.endswith('png'):
disp = _read_kitti_disp(filename)
elif filename.endswith('npy'):
disp = np.load(filename)
elif filename.endswith('tif'):
disp = _read_dfc_disp(filename)
disp = np.abs(disp)
else:
raise Exception('Invalid disparity file format!')
return disp # [H, W]
def _read_pfm(file):
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header.decode("ascii") == 'PF':
color = True
elif header.decode("ascii") == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode("ascii"))
if dim_match:
width, height = list(map(int, dim_match.groups()))
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().decode("ascii").rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = | np.flipud(data) | numpy.flipud |
import numpy as np
import cv2
from datetime import datetime
from skimage.exposure import rescale_intensity
import scipy.stats as st
from scipy import ndimage as nimg
from scipy import sparse as sp
import math
class spatial_filtering:
# sets the zero padding size, window size, input_image, height, width and a zero padded image
def initialize(self, filename, filterType, filterSize):
self.window_size = int(filterSize)
self.pad = int(self.window_size/2)
self.input_image = cv2.imread(filename, 0)
self.height, self.width = self.input_image.shape
self.padded_image = np.pad(self.input_image, (self.pad, self.pad), 'constant', constant_values=(0))
# Parameters for portraiit mode
self.BLUR = 15
self.CANNY_THRESH_1 = 50
self.CANNY_THRESH_2 = 200
self.MASK_DILATE_ITER = 5
self.MASK_ERODE_ITER = 3
self.MASK_COLOR = (0, 0, 0) # In BGR format
# smoothing function using average or gaussian filter
def smoothing(self, filename, filterType, filterSize, variance):
print("Executing SMOOTHING function on File:", filename)
print("Selected filter type is:", filterType)
print("Filter size is:", filterSize)
self.initialize(filename, filterType, filterSize)
# sets mask.
if filterType=='Average Filter':
# Average Filter
value = float(1.0 / (self.window_size ** 2))
self.mask = np.full((self.window_size, self.window_size), value, dtype=float)
print('average mask ', self.mask)
else:
# Gaussian Filter
self.mask = self.gaussianMatrix(self.window_size, variance)
print("Variance Value ", variance)
print('Gaussian Mask ', self.mask)
# convolution using the mask selected and the zero paded image
self.convolution()
print("input img", self.input_image)
print("output img", self.output_array)
# Saves output image to file
output_image_name = 'output/Smoothing_' + filterType + str(filterSize) + datetime.now().strftime("%m%d-%H%M%S") + ".png"
cv2.imwrite(output_image_name, self.output_array)
return output_image_name
# sharpening function using Laplacian, Unsharp Mask or High Boost
def sharpening(self, filename, filterType, filterSize, unsharpMaskConstant):
print("Executing SHARPENING function on File:", filename)
print("Selected filter type is:", filterType)
print("Filter size is:", filterSize)
self.initialize(filename, filterType, filterSize)
if filterType=='Laplacian Filter':
# Laplacian Filter
# Setting Mask
self.mask = np.ones((self.window_size,self.window_size))
self.mask[int(self.window_size/2),int(self.window_size/2)] = -(self.window_size**2 - 1)
self.mask = self.mask * (-1)
print("Laplacian mask :", self.mask)
self.convolution() #Does convolution of the laplacian mask and zero padded image
self.output_array = | np.add(self.output_array, self.input_image) | numpy.add |
import numpy as np
import random
from utils import *
class S():
s_newline_id : int = ord('\n')
s_vocab_size : int = 0
s_data : list = []
def clip(gradients: dict, maxValue: int) -> dict:
'''
Clips the gradients' values between minimum and maximum.
Arguments:
gradients -- a dictionary containing the gradients "dWaa", "dWax", "dWya", "db", "dby"
maxValue -- everything above this number is set to this number, and everything less than -maxValue is set to -maxValue
Returns:
gradients -- a dictionary with the clipped gradients.
'''
dWaa, dWax, dWya, db, dby = gradients['dWaa'], gradients['dWax'], gradients['dWya'], gradients['db'], gradients['dby']
### START CODE HERE ###
# clip to mitigate exploding gradients, loop over [dWax, dWaa, dWya, db, dby]. (≈2 lines)
for gradient in [dWaa, dWax, dWya, db, dby]:
np.clip(gradient, -maxValue, maxValue, out=gradient)
### END CODE HERE ###
gradients = {"dWaa": dWaa, "dWax": dWax, "dWya": dWya, "db": db, "dby": dby}
return gradients
def sample(parameters : dict) -> list:
"""
Sample a sequence of characters according to a sequence of probability distributions output of the RNN
Arguments:
parameters -- python dictionary containing the parameters Waa, Wax, Wya, by, and b.
Returns:
indices -- a list of length n containing the indices of the sampled characters.
"""
# Retrieve parameters and relevant shapes from "parameters" dictionary
Waa, Wax, Wya, by, b = parameters['Waa'], parameters['Wax'], parameters['Wya'], parameters['by'], parameters['b']
#vocab_size = by.shape[0]
n_a = Waa.shape[1]
### START CODE HERE ###
# Step 1: Create the a zero vector x that can be used as the one-hot vector
# representing the first character (initializing the sequence generation). (≈1 line)
x = np.zeros(shape=(S.s_vocab_size, 1))
# Step 1': Initialize a_prev as zeros (≈1 line)
a_prev = np.zeros(shape=(n_a, 1))
# Create an empty list of indices, this is the list which will contain the list of indices of the characters to generate (≈1 line)
indices = []
# idx is the index of the one-hot vector x that is set to 1
# All other positions in x are zero.
# We will initialize idx to -1
idx = -1
# Loop over time-steps t. At each time-step:
# sample a character from a probability distribution
# and append its index (`idx`) to the list "indices".
# We'll stop if we reach 50 characters
# (which should be very unlikely with a well trained model).
# Setting the maximum number of characters helps with debugging and prevents infinite loops.
counter = 0
newline_character = S.s_newline_id
while (idx != newline_character and counter != 50):
# Step 2: Forward propagate x using the equations (1), (2) and (3)
a = np.tanh(np.dot(Waa, a_prev) + np.dot(Wax, x) + b)
z = np.dot(Wya, a) + by
y = softmax(z)
# Step 3: Sample the index of a character within the vocabulary from the probability distribution y
# (see additional hints above)
idx = np.random.choice(range(S.s_vocab_size), p = y.ravel())
# Append the index to "indices"
indices.append(idx)
# Step 4: Overwrite the input x with one that corresponds to the sampled index `idx`.
# (see additional hints above)
x = np.zeros(shape=(S.s_vocab_size, 1))
x[idx] = 1
# Update "a_prev" to be "a"
a_prev = a
counter +=1
### END CODE HERE ###
if (counter == 50):
indices.append(S.s_newline_id)
return indices
def optimize(X, Y, a_prev, parameters, learning_rate : float = 0.01):
"""
Execute one step of the optimization to train the model.
Arguments:
X -- list of integers, where each integer is a number that maps to a character in the vocabulary.
Y -- list of integers, exactly the same as X but shifted one index to the left.
a_prev -- previous hidden state.
parameters -- python dictionary containing:
Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)
Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
b -- Bias, numpy array of shape (n_a, 1)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
learning_rate -- learning rate for the model.
Returns:
loss -- value of the loss function (cross-entropy)
gradients -- python dictionary containing:
dWax -- Gradients of input-to-hidden weights, of shape (n_a, n_x)
dWaa -- Gradients of hidden-to-hidden weights, of shape (n_a, n_a)
dWya -- Gradients of hidden-to-output weights, of shape (n_y, n_a)
db -- Gradients of bias vector, of shape (n_a, 1)
dby -- Gradients of output bias vector, of shape (n_y, 1)
a[len(X)-1] -- the last hidden state, of shape (n_a, 1)
"""
### START CODE HERE ###
# Forward propagate through time (≈1 line)
loss, cache = rnn_forward(X, Y, a_prev, parameters) # todo I added the param 11
# Backpropagate through time (≈1 line)
gradients, a = rnn_backward(X, Y, parameters, cache)
# Clip your gradients between -5 (min) and 5 (max) (≈1 line)
gradients = clip(gradients, 5)
# Update parameters (≈1 line)
parameters = update_parameters(parameters, gradients, learning_rate)
### END CODE HERE ###
return loss, gradients, a[len(X)-1]
def smooth(loss, cur_loss):
return loss * 0.999 + cur_loss * 0.001
def get_initial_loss(seq_length : int):
return - | np.log(1.0/S.s_vocab_size) | numpy.log |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIndex
import pandas.core.common as com
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_equal)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
pass
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = {axis: list('ABCD')}
obj = self._construct(4, **kwargs)
# no values passed
# self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{axis: str.lower})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
self.assertEqual(s.get(i), d)
self.assertEqual(s.get(i, d), d)
self.assertEqual(s.get(i, "z"), d)
for other in others:
self.assertEqual(s.get(other, "z"), "z")
self.assertEqual(s.get(other, other), other)
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
com.pprint_thing("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda: obj1 and obj2)
self.assertRaises(ValueError, lambda: obj1 or obj2)
self.assertRaises(ValueError, lambda: not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
o = self._construct(shape=4)
for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum',
'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax',
'cummin', 'all', 'any']:
f = getattr(np, op, None)
if f is not None:
f(o)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return self._construct(shape=3, dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
self.assertIsNone(v)
else:
self.assertEqual(v, getattr(y, m, None))
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
raise nose.SkipTest('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
# Check for error when random_state argument invalid.
with tm.assertRaises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with tm.assertRaises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with tm.assertRaises(ValueError):
o.sample(n=-3)
with tm.assertRaises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with tm.assertRaises(ValueError):
o.sample(n=3.2)
# Check lengths are right
self.assertTrue(len(o.sample(n=4) == 4))
self.assertTrue(len(o.sample(frac=0.34) == 3))
self.assertTrue(len(o.sample(frac=0.36) == 4))
###
# Check weights
###
# Weight length must be right
with tm.assertRaises(ValueError):
o.sample(n=3, weights=[0, 1])
with tm.assertRaises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with tm.assertRaises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with tm.assertRaises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with tm.assertRaises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with tm.assertRaises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=nan_weights)
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with tm.assertRaises(ValueError):
s.sample(n=3, weights='weight_column')
panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with tm.assertRaises(ValueError):
panel.sample(n=1, weights='weight_column')
with tm.assertRaises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with tm.assertRaises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with tm.assertRaises(ValueError):
df.sample(n=1, axis=2)
with tm.assertRaises(ValueError):
df.sample(n=1, axis='not_a_name')
with tm.assertRaises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with tm.assertRaises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with tm.assertRaises(ValueError):
df.sample(1, weights=s4)
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
self.assertTrue(o.size == np.prod(o.shape))
self.assertTrue(o.size == 10 ** len(o.axes))
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
self.assertTrue(len(np.array_split(o, 5)) == 5)
self.assertTrue(len(np.array_split(o, 2)) == 2)
def test_unexpected_keyword(self): # GH8597
from pandas.util.testing import assertRaisesRegexp
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
class TestSeries(tm.TestCase, Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x, y)
def setUp(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
def test_rename_mi(self):
s = Series([11, 21, 31],
index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]]))
s.rename(str.lower)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1, 2, 3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1, '2', 3.])
result = o._get_numeric_data()
expected = Series([], dtype=object, index=pd.Index([], dtype=object))
self._compare(result, expected)
o = Series([True, False, True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101', periods=3))
result = o._get_numeric_data()
expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object))
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
self.assertTrue(s.bool())
s = Series([False])
self.assertFalse(s.bool())
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),
Series([False])]:
self.assertRaises(ValueError, lambda: bool(s))
for s in [Series([np.nan]), Series([pd.NaT])]:
self.assertRaises(ValueError, lambda: s.bool())
# multiple bool are still an error
for s in [Series([True, True]), Series([False, False])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
# single non-bool are an error
for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
o = Series(range(3), range(3))
o.name = 'foo'
o2 = Series(range(3), range(3))
o2.name = 'bar'
result = o.T
self.check_metadata(o, result)
# resample
ts = Series(np.random.rand(1000),
index=date_range('20130101', periods=1000, freq='s'),
name='foo')
result = ts.resample('1T').mean()
self.check_metadata(ts, result)
result = ts.resample('1T').min()
self.check_metadata(ts, result)
result = ts.resample('1T').apply(lambda x: x.sum())
self.check_metadata(ts, result)
_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ['name', 'filename']
o.filename = 'foo'
o2.filename = 'bar'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat' and name == 'filename':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
Series.__finalize__ = finalize
result = pd.concat([o, o2])
self.assertEqual(result.filename, 'foo+bar')
self.assertIsNone(result.name)
# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_numpy_array_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_numpy_array_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interp_regression(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
self.assertRaises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
self.assertRaises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
def test_interp_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial', order=1)
def test_interp_nonmono_raise(self):
tm._skip_if_no_scipy()
s = Series([1, np.nan, 3], index=[0, 2, 1])
with tm.assertRaises(ValueError):
s.interpolate(method='krogh')
def test_interp_datetime64(self):
tm._skip_if_no_scipy()
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.],
index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
def test_describe(self):
self.series.describe()
self.ts.describe()
def test_describe_objects(self):
s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a'])
result = s.describe()
expected = Series({'count': 7, 'unique': 4,
'top': 'a', 'freq': 3}, index=result.index)
assert_series_equal(result, expected)
dt = list(self.ts.index)
dt.append(dt[0])
ser = Series(dt)
rs = ser.describe()
min_date = min(dt)
max_date = max(dt)
xp = Series({'count': len(dt),
'unique': len(self.ts.index),
'first': min_date, 'last': max_date, 'freq': 2,
'top': min_date}, index=rs.index)
assert_series_equal(rs, xp)
def test_describe_empty(self):
result = pd.Series().describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
nanSeries = Series([np.nan])
nanSeries.name = 'NaN'
result = nanSeries.describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
def test_describe_none(self):
noneSeries = Series([None])
noneSeries.name = 'None'
expected = Series([0, 0], index=['count', 'unique'], name='None')
assert_series_equal(noneSeries.describe(), expected)
class TestDataFrame(tm.TestCase, Generic):
_typ = DataFrame
_comparator = lambda self, x, y: assert_frame_equal(x, y)
def test_rename_mi(self):
df = DataFrame([
11, 21, 31
], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]]))
df.rename(str.lower)
def test_nonzero_single_element(self):
# allow single item via bool method
df = DataFrame([[True]])
self.assertTrue(df.bool())
df = DataFrame([[False]])
self.assertFalse(df.bool())
df = DataFrame([[False, False]])
self.assertRaises(ValueError, lambda: df.bool())
self.assertRaises(ValueError, lambda: bool(df))
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = DataFrame({'A': [1, '2', 3.]})
result = o._get_numeric_data()
expected = DataFrame(index=[0, 1, 2], dtype=object)
self._compare(result, expected)
def test_interp_basic(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
expected = DataFrame({'A': [1., 2., 3., 4.],
'B': [1., 4., 9., 9.],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df.interpolate()
assert_frame_equal(result, expected)
result = df.set_index('C').interpolate()
expected = df.set_index('C')
expected.loc[3, 'A'] = 3
expected.loc[5, 'B'] = 9
assert_frame_equal(result, expected)
def test_interp_bad_method(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
with tm.assertRaises(ValueError):
df.interpolate(method='not_a_method')
def test_interp_combo(self):
df = DataFrame({'A': [1., 2., np.nan, 4.],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df['A'].interpolate()
expected = Series([1., 2., 3., 4.], name='A')
assert_series_equal(result, expected)
result = df['A'].interpolate(downcast='infer')
expected = Series([1, 2, 3, 4], name='A')
assert_series_equal(result, expected)
def test_interp_nan_idx(self):
df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [np.nan, 2, 3, 4]})
df = df.set_index('A')
with tm.assertRaises(NotImplementedError):
df.interpolate(method='values')
def test_interp_various(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
df = df.set_index('C')
expected = df.copy()
result = df.interpolate(method='polynomial', order=1)
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923076
assert_frame_equal(result, expected)
result = df.interpolate(method='cubic')
expected.A.loc[3] = 2.81621174
expected.A.loc[13] = 5.64146581
assert_frame_equal(result, expected)
result = df.interpolate(method='nearest')
expected.A.loc[3] = 2
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
result = df.interpolate(method='slinear')
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923077
assert_frame_equal(result, expected)
result = df.interpolate(method='zero')
expected.A.loc[3] = 2.
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
def test_interp_alt_scipy(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
result = df.interpolate(method='barycentric')
expected = df.copy()
expected.ix[2, 'A'] = 3
expected.ix[5, 'A'] = 6
assert_frame_equal(result, expected)
result = df.interpolate(method='barycentric', downcast='infer')
assert_frame_equal(result, expected.astype(np.int64))
result = df.interpolate(method='krogh')
expectedk = df.copy()
expectedk['A'] = expected['A']
assert_frame_equal(result, expectedk)
_skip_if_no_pchip()
import scipy
result = df.interpolate(method='pchip')
expected.ix[2, 'A'] = 3
if LooseVersion(scipy.__version__) >= '0.17.0':
expected.ix[5, 'A'] = 6.0
else:
expected.ix[5, 'A'] = 6.125
assert_frame_equal(result, expected)
def test_interp_rowwise(self):
df = DataFrame({0: [1, 2, np.nan, 4],
1: [2, 3, 4, np.nan],
2: [np.nan, 4, 5, 6],
3: [4, np.nan, 6, 7],
4: [1, 2, 3, 4]})
result = df.interpolate(axis=1)
expected = df.copy()
expected.loc[3, 1] = 5
expected.loc[0, 2] = 3
expected.loc[1, 3] = 3
expected[4] = expected[4].astype(np.float64)
assert_frame_equal(result, expected)
# scipy route
tm._skip_if_no_scipy()
result = df.interpolate(axis=1, method='values')
assert_frame_equal(result, expected)
result = df.interpolate(axis=0)
expected = df.interpolate()
assert_frame_equal(result, expected)
def test_rowwise_alt(self):
df = DataFrame({0: [0, .5, 1., np.nan, 4, 8, np.nan, np.nan, 64],
1: [1, 2, 3, 4, 3, 2, 1, 0, -1]})
df.interpolate(axis=0)
def test_interp_leading_nans(self):
df = DataFrame({"A": [np.nan, np.nan, .5, .25, 0],
"B": [np.nan, -3, -3.5, np.nan, -4]})
result = df.interpolate()
expected = df.copy()
expected['B'].loc[3] = -3.75
assert_frame_equal(result, expected)
tm._skip_if_no_scipy()
result = df.interpolate(method='polynomial', order=1)
assert_frame_equal(result, expected)
def test_interp_raise_on_only_mixed(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': ['a', 'b', 'c', 'd'],
'C': [np.nan, 2, 5, 7],
'D': [np.nan, np.nan, 9, 9],
'E': [1, 2, 3, 4]})
with tm.assertRaises(TypeError):
df.interpolate(axis=1)
def test_interp_inplace(self):
df = DataFrame({'a': [1., 2., np.nan, 4.]})
expected = DataFrame({'a': [1., 2., 3., 4.]})
result = df.copy()
result['a'].interpolate(inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result['a'].interpolate(inplace=True, downcast='infer')
assert_frame_equal(result, expected.astype('int64'))
def test_interp_inplace_row(self):
# GH 10395
result = DataFrame({'a': [1., 2., 3., 4.],
'b': [np.nan, 2., 3., 4.],
'c': [3, 2, 2, 2]})
expected = result.interpolate(method='linear', axis=1, inplace=False)
result.interpolate(method='linear', axis=1, inplace=True)
assert_frame_equal(result, expected)
def test_interp_ignore_all_good(self):
# GH
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 2, 3, 4],
'C': [1., 2., np.nan, 4.],
'D': [1., 2., 3., 4.]})
expected = DataFrame({'A': np.array(
[1, 2, 3, 4], dtype='float64'),
'B': np.array(
[1, 2, 3, 4], dtype='int64'),
'C': np.array(
[1., 2., 3, 4.], dtype='float64'),
'D': np.array(
[1., 2., 3., 4.], dtype='float64')})
result = df.interpolate(downcast=None)
assert_frame_equal(result, expected)
# all good
result = df[['B', 'D']].interpolate(downcast=None)
assert_frame_equal(result, df[['B', 'D']])
def test_describe(self):
tm.makeDataFrame().describe()
tm.makeMixedDataFrame().describe()
tm.makeTimeDataFrame().describe()
def test_describe_percentiles_percent_or_raw(self):
msg = 'percentiles should all be in the interval \\[0, 1\\]'
df = tm.makeDataFrame()
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[10, 50, 100])
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[2])
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[-2])
def test_describe_percentiles_equivalence(self):
df = tm.makeDataFrame()
d1 = df.describe()
d2 = df.describe(percentiles=[.25, .75])
assert_frame_equal(d1, d2)
def test_describe_percentiles_insert_median(self):
df = tm.makeDataFrame()
d1 = df.describe(percentiles=[.25, .75])
d2 = df.describe(percentiles=[.25, .5, .75])
assert_frame_equal(d1, d2)
self.assertTrue('25%' in d1.index)
self.assertTrue('75%' in d2.index)
# none above
d1 = df.describe(percentiles=[.25, .45])
d2 = df.describe(percentiles=[.25, .45, .5])
assert_frame_equal(d1, d2)
self.assertTrue('25%' in d1.index)
self.assertTrue('45%' in d2.index)
# none below
d1 = df.describe(percentiles=[.75, 1])
d2 = df.describe(percentiles=[.5, .75, 1])
assert_frame_equal(d1, d2)
self.assertTrue('75%' in d1.index)
self.assertTrue('100%' in d2.index)
# edge
d1 = df.describe(percentiles=[0, 1])
d2 = df.describe(percentiles=[0, .5, 1])
assert_frame_equal(d1, d2)
self.assertTrue('0%' in d1.index)
self.assertTrue('100%' in d2.index)
def test_describe_no_numeric(self):
df = DataFrame({'A': ['foo', 'foo', 'bar'] * 8,
'B': ['a', 'b', 'c', 'd'] * 6})
desc = df.describe()
expected = DataFrame(dict((k, v.describe())
for k, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(desc, expected)
ts = tm.makeTimeSeries()
df = DataFrame({'time': ts.index})
desc = df.describe()
self.assertEqual(desc.time['first'], min(ts.index))
def test_describe_empty_int_columns(self):
df = DataFrame([[0, 1], [1, 2]])
desc = df[df[0] < 0].describe() # works
assert_series_equal(desc.xs('count'),
Series([0, 0], dtype=float, name='count'))
self.assertTrue(isnull(desc.ix[1:]).all().all())
def test_describe_objects(self):
df = DataFrame({"C1": ['a', 'a', 'c'], "C2": ['d', 'd', 'f']})
result = df.describe()
expected = DataFrame({"C1": [3, 2, 'a', 2], "C2": [3, 2, 'd', 2]},
index=['count', 'unique', 'top', 'freq'])
assert_frame_equal(result, expected)
df = DataFrame({"C1": pd.date_range('2010-01-01', periods=4, freq='D')
})
df.loc[4] = pd.Timestamp('2010-01-04')
result = df.describe()
expected = DataFrame({"C1": [5, 4, pd.Timestamp('2010-01-04'), 2,
pd.Timestamp('2010-01-01'),
pd.Timestamp('2010-01-04')]},
index=['count', 'unique', 'top', 'freq',
'first', 'last'])
assert_frame_equal(result, expected)
# mix time and str
df['C2'] = ['a', 'a', 'b', 'c', 'a']
result = df.describe()
expected['C2'] = [5, 3, 'a', 3, np.nan, np.nan]
assert_frame_equal(result, expected)
# just str
expected = DataFrame({'C2': [5, 3, 'a', 4]},
index=['count', 'unique', 'top', 'freq'])
result = df[['C2']].describe()
# mix of time, str, numeric
df['C3'] = [2, 4, 6, 8, 2]
result = df.describe()
expected = DataFrame({"C3": [5., 4.4, 2.607681, 2., 2., 4., 6., 8.]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
assert_frame_equal(result, expected)
assert_frame_equal(df.describe(), df[['C3']].describe())
assert_frame_equal(df[['C1', 'C3']].describe(), df[['C3']].describe())
assert_frame_equal(df[['C2', 'C3']].describe(), df[['C3']].describe())
def test_describe_typefiltering(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24, dtype='int64'),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
descN = df.describe()
expected_cols = ['numC', 'numD', ]
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descN, expected)
desc = df.describe(include=['number'])
assert_frame_equal(desc, descN)
desc = df.describe(exclude=['object', 'datetime'])
assert_frame_equal(desc, descN)
desc = df.describe(include=['float'])
assert_frame_equal(desc, descN.drop('numC', 1))
descC = df.describe(include=['O'])
expected_cols = ['catA', 'catB']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descC, expected)
descD = df.describe(include=['datetime'])
assert_series_equal(descD.ts, df.ts.describe())
desc = df.describe(include=['object', 'number', 'datetime'])
assert_frame_equal(desc.loc[:, ["numC", "numD"]].dropna(), descN)
assert_frame_equal(desc.loc[:, ["catA", "catB"]].dropna(), descC)
descDs = descD.sort_index() # the index order change for mixed-types
assert_frame_equal(desc.loc[:, "ts":].dropna().sort_index(), descDs)
desc = df.loc[:, 'catA':'catB'].describe(include='all')
assert_frame_equal(desc, descC)
desc = df.loc[:, 'numC':'numD'].describe(include='all')
assert_frame_equal(desc, descN)
desc = df.describe(percentiles=[], include='all')
cnt = Series(data=[4, 4, 6, 6, 6],
index=['catA', 'catB', 'numC', 'numD', 'ts'])
assert_series_equal(desc.count(), cnt)
self.assertTrue('count' in desc.index)
self.assertTrue('unique' in desc.index)
self.assertTrue('50%' in desc.index)
self.assertTrue('first' in desc.index)
desc = df.drop("ts", 1).describe(percentiles=[], include='all')
assert_series_equal(desc.count(), cnt.drop("ts"))
self.assertTrue('first' not in desc.index)
desc = df.drop(["numC", "numD"], 1).describe(percentiles=[],
include='all')
assert_series_equal(desc.count(), cnt.drop(["numC", "numD"]))
self.assertTrue('50%' not in desc.index)
def test_describe_typefiltering_category_bool(self):
df = DataFrame({'A_cat': pd.Categorical(['foo', 'foo', 'bar'] * 8),
'B_str': ['a', 'b', 'c', 'd'] * 6,
'C_bool': [True] * 12 + [False] * 12,
'D_num': np.arange(24.) + .5,
'E_ts': tm.makeTimeSeries()[:24].index})
# bool is considered numeric in describe, although not an np.number
desc = df.describe()
expected_cols = ['C_bool', 'D_num']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(desc, expected)
desc = df.describe(include=["category"])
self.assertTrue(desc.columns.tolist() == ["A_cat"])
# 'all' includes numpy-dtypes + category
desc1 = df.describe(include="all")
desc2 = df.describe(include=[np.generic, "category"])
assert_frame_equal(desc1, desc2)
def test_describe_timedelta(self):
df = DataFrame({"td": pd.to_timedelta(np.arange(24) % 20, "D")})
self.assertTrue(df.describe().loc["mean"][0] == pd.to_timedelta(
"8d4h"))
def test_describe_typefiltering_dupcol(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
s = df.describe(include='all').shape[1]
df = pd.concat([df, df], axis=1)
s2 = df.describe(include='all').shape[1]
self.assertTrue(s2 == 2 * s)
def test_describe_typefiltering_groupby(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
G = df.groupby('catA')
self.assertTrue(G.describe(include=['number']).shape == (16, 2))
self.assertTrue(G.describe(include=['number', 'object']).shape == (22,
3))
self.assertTrue(G.describe(include='all').shape == (26, 4))
def test_describe_multi_index_df_column_names(self):
""" Test that column names persist after the describe operation."""
df = pd.DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
# GH 11517
# test for hierarchical index
hierarchical_index_df = df.groupby(['A', 'B']).mean().T
self.assertTrue(hierarchical_index_df.columns.names == ['A', 'B'])
self.assertTrue(hierarchical_index_df.describe().columns.names ==
['A', 'B'])
# test for non-hierarchical index
non_hierarchical_index_df = df.groupby(['A']).mean().T
self.assertTrue(non_hierarchical_index_df.columns.names == ['A'])
self.assertTrue(non_hierarchical_index_df.describe().columns.names ==
['A'])
def test_no_order(self):
tm._skip_if_no_scipy()
s = Series([0, 1, np.nan, 3])
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial')
with tm.assertRaises(ValueError):
s.interpolate(method='spline')
def test_spline(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method='spline', order=1)
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
def test_spline_extrapolate(self):
tm.skip_if_no_package(
'scipy', '0.15',
'setting ext on scipy.interpolate.UnivariateSpline')
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method='spline', order=1, ext=3)
expected3 = Series([1., 2., 3., 4., 5., 6., 6.])
assert_series_equal(result3, expected3)
result1 = s.interpolate(method='spline', order=1, ext=0)
expected1 = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result1, expected1)
def test_spline_smooth(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
self.assertNotEqual(s.interpolate(method='spline', order=3, s=0)[5],
s.interpolate(method='spline', order=3)[5])
def test_spline_interpolation(self):
tm._skip_if_no_scipy()
s = Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
result1 = s.interpolate(method='spline', order=1)
expected1 = s.interpolate(method='spline', order=1)
assert_series_equal(result1, expected1)
# GH #10633
def test_spline_error(self):
tm._skip_if_no_scipy()
s = pd.Series(np.arange(10) ** 2)
s[ | np.random.randint(0, 9, 3) | numpy.random.randint |
# This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function
import matplotlib.cm
import matplotlib.colors
import matplotlib.patches
import numpy as np
import pylab as plt
from matplotlib.ticker import FixedFormatter
import functools
class NanColormap(matplotlib.colors.Colormap):
'''
A Colormap that wraps another colormap, but replaces non-finite values
with a fixed color.
'''
def __init__(self, cmap, nancolor):
self.cmap = cmap
self.nanrgba = matplotlib.colors.colorConverter.to_rgba(nancolor)
def __call__(self, data, **kwargs):
rgba = self.cmap(data, **kwargs)
# 'data' is a MaskedArray, apparently...
if np.all(data.mask == False):
return rgba
iy,ix = | np.nonzero(data.mask) | numpy.nonzero |
#!/usr/bin/env python3
import numpy as np
from scipy.stats import norm
import time
import multiprocessing as mp
from sklearn import mixture
def get_gmm_from_pf(pf, n_components):
s = np.random.choice(pf.Np, pf.Np, p = pf.W)
X = pf.X[s]
gmm = mixture.GaussianMixture(n_components=n_components, covariance_type='diag', max_iter=10, tol = 3e-3).fit(X)
return gmm
def gmm_worker(arg):
pfs, ii ,n_components = arg
gmm = get_gmm_from_pf(pfs[ii],n_components)
return gmm
def get_fuzed_prob(x, gmms, A):
f = 1
for ii in range(len(gmms)):
f = f * (np.exp(gmms[ii].score(x.reshape(1, -1)))**A[ii])
return f
def matropolis_hasting(pf, gmms, A):
new_particles = | np.zeros_like(pf.X) | numpy.zeros_like |
"""test_stimulustools.py
Function definitions for testing the `pyret.stimulustools` module.
(C) 2016 The Baccus Lab
"""
import pytest
import numpy as np
from pyret import stimulustools
def test_resampling_1d():
"""Test up- and down-sampling a 1D stimulus."""
np.random.seed(0)
stim_size = 1000
resample_factor = 3
dt = 0.1
stim = np.random.randn(stim_size,)
time = np.arange(stim_size) * dt
stim_us, time_us = stimulustools.upsample(
stim, resample_factor, time=time)
stim_ds, time_ds = stimulustools.downsample(
stim_us, resample_factor, time=time_us)
assert np.all(stim == stim_us[::resample_factor]), 'Upsampling failed'
assert np.all(stim == stim_ds), 'Downsampling failed'
_, time_us = stimulustools.upsample(stim, resample_factor)
assert time_us is None
def test_resampling_2d():
"""Test up- and down-sampling a 2D stimulus."""
np.random.seed(0)
stim_size = (1000, 5)
resample_factor = 3
dt = 0.1
stim = np.random.randn(*stim_size)
time = np.arange(stim_size[0]) * dt
stim_us, time_us = stimulustools.upsample(
stim, resample_factor, time=time)
stim_ds, time_ds = stimulustools.downsample(
stim_us, resample_factor, time=time_us)
assert np.all(stim == stim_us[::resample_factor, ...]), 'Upsampling failed'
assert np.all(stim == stim_ds), 'Downsampling failed'
def test_slicestim_raises():
"""Verify slicestim() raises correct exceptions"""
with pytest.raises(ValueError):
stimulustools.slicestim(np.zeros(10,), 0)
with pytest.raises(ValueError):
stimulustools.slicestim(np.zeros(10,), 11)
with pytest.raises(ValueError):
stimulustools.slicestim(np.zeros(10,), 1.5)
def test_slicestim_shape():
shape = (10, 3, 3)
history = 2
stim = np.zeros(shape)
assert (stimulustools.slicestim(stim, history).shape ==
(shape[0] - history + 1, history, shape[1], shape[2]))
def test_slicestim_1d():
"""Test slicing a 1D stimulus into overlapping segments."""
| np.random.seed(0) | numpy.random.seed |
import matplotlib.pyplot as plt
import numpy as np
def plot_linpol(T, P, field, fixvminmax=True, ax=None):
# radial unit vec
# e_r = np.zeros(P.shape + (3,))
# e_r[:, :, 0] = np.sin(T)*np.cos(P)
# e_r[:, :, 1] = np.sin(T)*np.sin(P)
# e_r[:, :, 2] = np.cos(T)
# theta unit vec
e_t = np.zeros(P.shape + (3,))
e_t[:, :, 0] = np.cos(T)*np.cos(P)
e_t[:, :, 1] = np.cos(T)*np.sin(P)
e_t[:, :, 2] = -np.sin(T)
# phi unit vec
e_p = np.zeros(P.shape + (3,))
e_p[:, :, 0] = -np.sin(P)
e_p[:, :, 1] = np.cos(P)
# Er = sum([field[:, :, i]*e_r[:, :, i] for i in range(3)])
Et = sum([field[:, :, i]*e_t[:, :, i] for i in range(3)])
Ep = sum([field[:, :, i]*e_p[:, :, i] for i in range(3)])
def intcalc(f):
# returns < (f.real(t))^2 >
return np.abs(f)**2
allint = np.sum(intcalc(field), axis=2)
norm = allint.max()
S0 = (intcalc(Et) + intcalc(Ep))/norm
# checks that we are in the far-field
import numpy.testing as nt
nt.assert_allclose(allint/norm, S0, atol=1e-10)
S1 = (intcalc(Et) - intcalc(Ep))/norm
# fig, ax = plt.subplots()
# cax = ax.pcolormesh(np.degrees(T*np.cos(P)), np.degrees(T*np.sin(P)),
# intcalc(Er)/norm)
# plt.colorbar(cax)
# ax.set_title("abs Er")
# fig, ax = plt.subplots()
# cax = ax.pcolormesh(np.degrees(T*np.cos(P)), np.degrees(T*np.sin(P)),
# (intcalc(Et) + intcalc(Ep))/norm)
# plt.colorbar(cax)
# ax.set_title("abs Et + abs Ep")
# self.show()
Ep45 = np.cos(np.radians(45))*Et + np.sin(np.radians(45))*Ep
Epm45 = np.cos(np.radians(-45))*Et + np.sin(np.radians(-45))*Ep
S2 = (intcalc(Ep45) - intcalc(Epm45))/norm
Pi = np.sqrt(S1**2 + S2**2)/S0
if ax is None:
fig, ax = plt.subplots()
if fixvminmax:
kwargs = dict(vmin=0, vmax=1)
else:
kwargs = {}
cax = ax.pcolormesh(np.degrees(T*np.cos(P)), np.degrees(T*np.sin(P)),
Pi, **kwargs)
ax.set_aspect('equal')
ax.set_xlabel('theta_x [deg]')
ax.set_ylabel('theta_y [deg]')
plt.colorbar(cax, ax=ax, orientation='horizontal')
def plot_linpol_plane(X, Y, field, fixvminmax=True, ax=None):
def intcalc(f):
# returns < (f.real(t))^2 >
return np.abs(f)**2
Ex, Ey = field[:, :, 0], field[:, :, 1]
allint = np.sum(intcalc(field), axis=2)
norm = allint.max()
# or allint?
S0 = (intcalc(Ex) + intcalc(Ey))/norm
S1 = (intcalc(Ex) - intcalc(Ey))/norm
Ep45 = np.cos(np.radians(45))*Ex + np.sin(np.radians(45))*Ey
Epm45 = np.cos(np.radians(-45))*Ex + np.sin( | np.radians(-45) | numpy.radians |
"""
<NAME>, Dept. of Physics, University of Toronto, 2020
Final project for PHY 407: Computational Physics. Instr. <NAME>.
This script uses the variational method to solve the 3D Hydrogen molecule
"""
# imports
import numpy as np
import random
import matplotlib.pyplot as plt
# get the problem class from vmc.py
from vmc import problem
# set number of monte carlo steps
N = 500
# create an instance of the "problem" class from vmc.py. This means we need
# to give it a step width for the derivatives, a step width for the markov
# chain.
helium = problem(0.001, 4, N, 1.5)
# helper function to compute norm of a vector
def norm(r):
return np.sqrt(np.sum(r**2))
# The potential. It depends on the alpha parameters this time
def V(alpha, R):
d = alpha[0]
R1 = np.array([0, 0, d / 2])
R2 = | np.array([0, 0, -d / 2]) | numpy.array |
import torch
import torch.nn as nn
import numpy as np
from spinup.models.pytorch.resnet import ResNet
class MultiModalNet(nn.Module):
"""
A model that handles multiple different input modalities.
The different inputs are specified by the first entry in input_sizes that should be of type dict.
Typical image inputs (shaped (H,W,C) or (H,W)) will be fed through a convolution, other data through a
fully connected layer before everything gets concatenated and fed through a ResNet.
"""
def __init__(self, input_sizes, conv_sizes, hidden_sizes, num_inner_res_blocks, output_sizes, activation, output_activation, noisy_linear_layers, dropout_rate):
super().__init__()
if isinstance(input_sizes[0], dict):
sizes_dict = input_sizes[0]
self.dict_space = True
else:
sizes_dict = {"obs":np.array(input_sizes[0])}
self.dict_space = False
self.permute_required = set()
self.expand_dim_required = set()
input_dict = {}
self.modalities = []
for modality, size in sizes_dict.items():
self.modalities.append(modality)
if type(size) == list:
size = np.array(size)
if np.ndim(size) == 0:
input_dict[modality] = nn.Linear(size, input_sizes[1])
elif size.shape[0] in [2, 3]:
# let's treat 2d and 3d input tensors as images.
if size.shape[0] == 3:
if size[2] in [1,3]:
in_channels = size[2]
self.permute_required.add(modality)
else:
# Otherwise, the color channel hopefully already is in the first position, as pytorch requires it.
in_channels = size[0]
else:
in_channels = 1
self.expand_dim_required.add(modality)
specialized_sub_net = []
conv_out_size = np.array(size[:-1]) if modality in self.permute_required else np.array(size[1:]) # See section "Shape" at https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
for nbr_filters, kernel_size, stride, padding, dilation in conv_sizes:
specialized_sub_net.append(nn.Conv2d(in_channels=in_channels, out_channels=nbr_filters, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation))
specialized_sub_net.append(activation())
conv_out_size = np.floor((conv_out_size + 2 * padding - dilation * (kernel_size - 1) - 1)/stride + 1)
in_channels = nbr_filters
specialized_sub_net.append(nn.Flatten())
specialized_sub_net.append(nn.Linear(int( | np.prod(conv_out_size) | numpy.prod |
import networkx
import numpy
import scipy
from .base_plotable_model import BasePlotableModel
class SEIRSNetworkModel(BasePlotableModel):
"""
A class to simulate the SEIRS Stochastic Network Model
======================================================
Params:
G Network adjacency matrix (numpy array) or Networkx graph object.
beta Rate of transmission (global interactions)
beta_local Rate(s) of transmission between adjacent individuals (optional)
sigma Rate of progression to infectious state (inverse of latent period)
gamma Rate of recovery (inverse of symptomatic infectious period)
mu_I Rate of infection-related death
xi Rate of re-susceptibility (upon recovery)
mu_0 Rate of baseline death
nu Rate of baseline birth
p Probability of individuals interacting with global population
G_Q Quarantine adjacency matrix (numpy array) or Networkx graph object.
beta_Q Rate of transmission for isolated individuals (global interactions)
beta_Q_local Rate(s) of transmission (exposure) for adjacent isolated individuals (optional)
sigma_Q Rate of progression to infectious state for isolated individuals
gamma_Q Rate of recovery for isolated individuals
mu_Q Rate of infection-related death for isolated individuals
q Probability of isolated individuals interacting with global population
isolation_time Time to remain in isolation upon positive test, self-isolation, etc.
theta_E Rate of random testing for exposed individuals
theta_I Rate of random testing for infectious individuals
phi_E Rate of testing when a close contact has tested positive for exposed individuals
phi_I Rate of testing when a close contact has tested positive for infectious individuals
psi_E Probability of positive test for exposed individuals
psi_I Probability of positive test for infectious individuals
initE Initial number of exposed individuals
initI Initial number of infectious individuals
initR Initial number of recovered individuals
initF Initial number of infection-related fatalities
initQ_S Initial number of isolated susceptible individuals
initQ_E Initial number of isolated exposed individuals
initQ_I Initial number of isolated infectious individuals
initQ_R Initial number of isolated recovered individuals
(all remaining nodes initialized susceptible)
"""
plotting_number_property = "numNodes"
"""Property to access the number to base plotting on."""
def __init__(
self,
G,
beta,
sigma,
gamma,
mu_I=0,
alpha=1.0,
xi=0,
mu_0=0,
nu=0,
f=0,
p=0,
beta_local=None,
beta_pairwise_mode="infected",
delta=None,
delta_pairwise_mode=None,
G_Q=None,
beta_Q=None,
beta_Q_local=None,
sigma_Q=None,
gamma_Q=None,
mu_Q=None,
alpha_Q=None,
delta_Q=None,
theta_E=0,
theta_I=0,
phi_E=0,
phi_I=0,
psi_E=1,
psi_I=1,
q=0,
isolation_time=14,
initE=0,
initI=0,
initR=0,
initF=0,
initQ_E=0,
initQ_I=0,
transition_mode="exponential_rates",
node_groups=None,
store_Xseries=False,
seed=None,
):
if seed is not None:
numpy.random.seed(seed)
self.seed = seed
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.parameters = {
"G": G,
"G_Q": G_Q,
"beta": beta,
"sigma": sigma,
"gamma": gamma,
"mu_I": mu_I,
"xi": xi,
"mu_0": mu_0,
"nu": nu,
"f": f,
"p": p,
"beta_local": beta_local,
"beta_pairwise_mode": beta_pairwise_mode,
"alpha": alpha,
"delta": delta,
"delta_pairwise_mode": delta_pairwise_mode,
"beta_Q": beta_Q,
"beta_Q_local": beta_Q_local,
"sigma_Q": sigma_Q,
"gamma_Q": gamma_Q,
"mu_Q": mu_Q,
"alpha_Q": alpha_Q,
"delta_Q": delta_Q,
"theta_E": theta_E,
"theta_I": theta_I,
"phi_E": phi_E,
"phi_I": phi_I,
"psi_E": psi_E,
"psi_I": psi_I,
"q": q,
"isolation_time": isolation_time,
"initE": initE,
"initI": initI,
"initR": initR,
"initF": initF,
"initQ_E": initQ_E,
"initQ_I": initQ_I,
}
self.update_parameters()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Each node can undergo 4-6 transitions (sans vitality/re-susceptibility returns to S state),
# so there are ~numNodes*6 events/timesteps expected; initialize numNodes*6 timestep slots to start
# (will be expanded during run if needed for some reason)
self.tseries = numpy.zeros(6 * self.numNodes)
self.numS = numpy.zeros(6 * self.numNodes)
self.numE = numpy.zeros(6 * self.numNodes)
self.numI = numpy.zeros(6 * self.numNodes)
self.numR = numpy.zeros(6 * self.numNodes)
self.numF = numpy.zeros(6 * self.numNodes)
self.numQ_E = numpy.zeros(6 * self.numNodes)
self.numQ_I = numpy.zeros(6 * self.numNodes)
self.N = numpy.zeros(6 * self.numNodes)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tmax = 0 # will be set when run() is called
self.tidx = 0
self.tseries[0] = 0
# Vectors holding the time that each node has been in a given state or in isolation:
self.timer_state = numpy.zeros((self.numNodes, 1))
self.timer_isolation = numpy.zeros(self.numNodes)
self.isolationTime = isolation_time
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of individuals with each state:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.numE[0] = int(initE)
self.numI[0] = int(initI)
self.numR[0] = int(initR)
self.numF[0] = int(initF)
self.numQ_E[0] = int(initQ_E)
self.numQ_I[0] = int(initQ_I)
self.numS[0] = (
self.numNodes
- self.numE[0]
- self.numI[0]
- self.numR[0]
- self.numQ_E[0]
- self.numQ_I[0]
- self.numF[0]
)
self.N[0] = self.numNodes - self.numF[0]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Node states:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.S = 1
self.E = 2
self.I = 3
self.R = 4
self.F = 5
self.Q_E = 6
self.Q_I = 7
self.X = numpy.array(
[self.S] * int(self.numS[0])
+ [self.E] * int(self.numE[0])
+ [self.I] * int(self.numI[0])
+ [self.R] * int(self.numR[0])
+ [self.F] * int(self.numF[0])
+ [self.Q_E] * int(self.numQ_E[0])
+ [self.Q_I] * int(self.numQ_I[0])
).reshape((self.numNodes, 1))
numpy.random.shuffle(self.X)
self.store_Xseries = store_Xseries
if store_Xseries:
self.Xseries = numpy.zeros(
shape=(6 * self.numNodes, self.numNodes), dtype="uint8"
)
self.Xseries[0, :] = self.X.T
self.transitions = {
"StoE": {"currentState": self.S, "newState": self.E},
"EtoI": {"currentState": self.E, "newState": self.I},
"ItoR": {"currentState": self.I, "newState": self.R},
"ItoF": {"currentState": self.I, "newState": self.F},
"RtoS": {"currentState": self.R, "newState": self.S},
"EtoQE": {"currentState": self.E, "newState": self.Q_E},
"ItoQI": {"currentState": self.I, "newState": self.Q_I},
"QEtoQI": {"currentState": self.Q_E, "newState": self.Q_I},
"QItoR": {"currentState": self.Q_I, "newState": self.R},
"QItoF": {"currentState": self.Q_I, "newState": self.F},
"_toS": {"currentState": True, "newState": self.S},
}
self.transition_mode = transition_mode
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize other node metadata:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.tested = numpy.array([False] * self.numNodes).reshape((self.numNodes, 1))
self.positive = numpy.array([False] * self.numNodes).reshape((self.numNodes, 1))
self.numTested = numpy.zeros(6 * self.numNodes)
self.numPositive = numpy.zeros(6 * self.numNodes)
self.testedInCurrentState = numpy.array([False] * self.numNodes).reshape(
(self.numNodes, 1)
)
self.infectionsLog = []
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize node subgroup data series:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.nodeGroupData = None
if node_groups:
self.nodeGroupData = {}
for groupName, nodeList in node_groups.items():
self.nodeGroupData[groupName] = {
"nodes": numpy.array(nodeList),
"mask": numpy.isin(range(self.numNodes), nodeList).reshape(
(self.numNodes, 1)
),
}
self.nodeGroupData[groupName]["numS"] = numpy.zeros(6 * self.numNodes)
self.nodeGroupData[groupName]["numE"] = numpy.zeros(6 * self.numNodes)
self.nodeGroupData[groupName]["numI"] = numpy.zeros(6 * self.numNodes)
self.nodeGroupData[groupName]["numR"] = numpy.zeros(6 * self.numNodes)
self.nodeGroupData[groupName]["numF"] = numpy.zeros(6 * self.numNodes)
self.nodeGroupData[groupName]["numQ_E"] = numpy.zeros(6 * self.numNodes)
self.nodeGroupData[groupName]["numQ_I"] = numpy.zeros(6 * self.numNodes)
self.nodeGroupData[groupName]["N"] = numpy.zeros(6 * self.numNodes)
self.nodeGroupData[groupName]["numPositive"] = numpy.zeros(
6 * self.numNodes
)
self.nodeGroupData[groupName]["numTested"] = numpy.zeros(
6 * self.numNodes
)
self.nodeGroupData[groupName]["numS"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.S
)
self.nodeGroupData[groupName]["numE"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.E
)
self.nodeGroupData[groupName]["numI"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.I
)
self.nodeGroupData[groupName]["numR"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.R
)
self.nodeGroupData[groupName]["numF"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.F
)
self.nodeGroupData[groupName]["numQ_E"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.Q_E
)
self.nodeGroupData[groupName]["numQ_I"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.Q_I
)
self.nodeGroupData[groupName]["N"][0] = self.numNodes - self.numF[0]
def update_parameters(self):
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model graphs:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.G = self.parameters["G"]
# Adjacency matrix:
if type(self.G) == numpy.ndarray:
self.A = scipy.sparse.csr_matrix(self.G)
elif type(self.G) == networkx.classes.graph.Graph:
self.A = networkx.adj_matrix(
self.G
) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Input an adjacency matrix or networkx object only.")
self.numNodes = int(self.A.shape[1])
self.degree = numpy.asarray(self.node_degrees(self.A)).astype(float)
# ----------------------------------------
if self.parameters["G_Q"] is None:
self.G_Q = self.G # If no Q graph is provided, use G in its place
else:
self.G_Q = self.parameters["G_Q"]
# Quarantine Adjacency matrix:
if type(self.G_Q) == numpy.ndarray:
self.A_Q = scipy.sparse.csr_matrix(self.G_Q)
elif type(self.G_Q) == networkx.classes.graph.Graph:
self.A_Q = networkx.adj_matrix(
self.G_Q
) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Input an adjacency matrix or networkx object only.")
self.numNodes_Q = int(self.A_Q.shape[1])
self.degree_Q = numpy.asarray(self.node_degrees(self.A_Q)).astype(float)
# ----------------------------------------
assert (
self.numNodes == self.numNodes_Q
), "The normal and quarantine adjacency graphs must be of the same size."
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model parameters:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = (
numpy.array(self.parameters["beta"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["beta"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["beta"], shape=(self.numNodes, 1)
)
)
self.sigma = (
numpy.array(self.parameters["sigma"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["sigma"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["sigma"], shape=(self.numNodes, 1)
)
)
self.gamma = (
numpy.array(self.parameters["gamma"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["gamma"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["gamma"], shape=(self.numNodes, 1)
)
)
self.mu_I = (
numpy.array(self.parameters["mu_I"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["mu_I"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["mu_I"], shape=(self.numNodes, 1)
)
)
self.alpha = (
numpy.array(self.parameters["alpha"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["alpha"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["alpha"], shape=(self.numNodes, 1)
)
)
self.xi = (
numpy.array(self.parameters["xi"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["xi"], (list, numpy.ndarray))
else numpy.full(fill_value=self.parameters["xi"], shape=(self.numNodes, 1))
)
self.mu_0 = (
numpy.array(self.parameters["mu_0"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["mu_0"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["mu_0"], shape=(self.numNodes, 1)
)
)
self.nu = (
numpy.array(self.parameters["nu"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["nu"], (list, numpy.ndarray))
else numpy.full(fill_value=self.parameters["nu"], shape=(self.numNodes, 1))
)
self.f = (
numpy.array(self.parameters["f"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["f"], (list, numpy.ndarray))
else numpy.full(fill_value=self.parameters["f"], shape=(self.numNodes, 1))
)
self.p = (
numpy.array(self.parameters["p"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["p"], (list, numpy.ndarray))
else numpy.full(fill_value=self.parameters["p"], shape=(self.numNodes, 1))
)
self.rand_f = numpy.random.rand(self.f.shape[0], self.f.shape[1])
# ----------------------------------------
# Testing-related parameters:
# ----------------------------------------
self.beta_Q = (
(
numpy.array(self.parameters["beta_Q"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["beta_Q"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["beta_Q"], shape=(self.numNodes, 1)
)
)
if self.parameters["beta_Q"] is not None
else self.beta
)
self.sigma_Q = (
(
numpy.array(self.parameters["sigma_Q"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["sigma_Q"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["sigma_Q"], shape=(self.numNodes, 1)
)
)
if self.parameters["sigma_Q"] is not None
else self.sigma
)
self.gamma_Q = (
(
numpy.array(self.parameters["gamma_Q"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["gamma_Q"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["gamma_Q"], shape=(self.numNodes, 1)
)
)
if self.parameters["gamma_Q"] is not None
else self.gamma
)
self.mu_Q = (
(
numpy.array(self.parameters["mu_Q"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["mu_Q"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["mu_Q"], shape=(self.numNodes, 1)
)
)
if self.parameters["mu_Q"] is not None
else self.mu_I
)
self.alpha_Q = (
(
numpy.array(self.parameters["alpha_Q"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["alpha_Q"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["alpha_Q"], shape=(self.numNodes, 1)
)
)
if self.parameters["alpha_Q"] is not None
else self.alpha
)
self.theta_E = (
numpy.array(self.parameters["theta_E"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["theta_E"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["theta_E"], shape=(self.numNodes, 1)
)
)
self.theta_I = (
numpy.array(self.parameters["theta_I"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["theta_I"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["theta_I"], shape=(self.numNodes, 1)
)
)
self.phi_E = (
numpy.array(self.parameters["phi_E"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["phi_E"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["phi_E"], shape=(self.numNodes, 1)
)
)
self.phi_I = (
numpy.array(self.parameters["phi_I"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["phi_I"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["phi_I"], shape=(self.numNodes, 1)
)
)
self.psi_E = (
numpy.array(self.parameters["psi_E"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["psi_E"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["psi_E"], shape=(self.numNodes, 1)
)
)
self.psi_I = (
numpy.array(self.parameters["psi_I"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["psi_I"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["psi_I"], shape=(self.numNodes, 1)
)
)
self.q = (
numpy.array(self.parameters["q"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["q"], (list, numpy.ndarray))
else numpy.full(fill_value=self.parameters["q"], shape=(self.numNodes, 1))
)
# ----------------------------------------
self.beta_pairwise_mode = self.parameters["beta_pairwise_mode"]
# ----------------------------------------
# Global transmission parameters:
# ----------------------------------------
if self.beta_pairwise_mode == "infected" or self.beta_pairwise_mode is None:
self.beta_global = numpy.full_like(
self.beta, fill_value=numpy.mean(self.beta)
)
self.beta_Q_global = numpy.full_like(
self.beta_Q, fill_value=numpy.mean(self.beta_Q)
)
elif self.beta_pairwise_mode == "infectee":
self.beta_global = self.beta
self.beta_Q_global = self.beta_Q
elif self.beta_pairwise_mode == "min":
self.beta_global = numpy.minimum(self.beta, numpy.mean(self.beta))
self.beta_Q_global = numpy.minimum(self.beta_Q, numpy.mean(self.beta_Q))
elif self.beta_pairwise_mode == "max":
self.beta_global = numpy.maximum(self.beta, numpy.mean(self.beta))
self.beta_Q_global = numpy.maximum(self.beta_Q, numpy.mean(self.beta_Q))
elif self.beta_pairwise_mode == "mean":
self.beta_global = (
self.beta + numpy.full_like(self.beta, fill_value=numpy.mean(self.beta))
) / 2
self.beta_Q_global = (
self.beta_Q
+ numpy.full_like(self.beta_Q, fill_value=numpy.mean(self.beta_Q))
) / 2
# ----------------------------------------
# Local transmission parameters:
# ----------------------------------------
self.beta_local = (
self.beta
if self.parameters["beta_local"] is None
else numpy.array(self.parameters["beta_local"])
if isinstance(self.parameters["beta_local"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["beta_local"], shape=(self.numNodes, 1)
)
)
self.beta_Q_local = (
self.beta_Q
if self.parameters["beta_Q_local"] is None
else numpy.array(self.parameters["beta_Q_local"])
if isinstance(self.parameters["beta_Q_local"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["beta_Q_local"], shape=(self.numNodes, 1)
)
)
# ----------------------------------------
if (
self.beta_local.ndim == 2
and self.beta_local.shape[0] == self.numNodes
and self.beta_local.shape[1] == self.numNodes
):
self.A_beta_pairwise = self.beta_local
elif (
self.beta_local.ndim == 1 and self.beta_local.shape[0] == self.numNodes
) or (
self.beta_local.ndim == 2
and (
self.beta_local.shape[0] == self.numNodes
or self.beta_local.shape[1] == self.numNodes
)
):
self.beta_local = self.beta_local.reshape((self.numNodes, 1))
# Pre-multiply beta values by the adjacency matrix ("transmission weight connections")
A_beta_pairwise_byInfected = scipy.sparse.csr_matrix.multiply(
self.A, self.beta_local.T
).tocsr()
A_beta_pairwise_byInfectee = scipy.sparse.csr_matrix.multiply(
self.A, self.beta_local
).tocsr()
# ------------------------------
# Compute the effective pairwise beta values as a function of the infected/infectee pair:
if self.beta_pairwise_mode == "infected":
self.A_beta_pairwise = A_beta_pairwise_byInfected
elif self.beta_pairwise_mode == "infectee":
self.A_beta_pairwise = A_beta_pairwise_byInfectee
elif self.beta_pairwise_mode == "min":
self.A_beta_pairwise = scipy.sparse.csr_matrix.minimum(
A_beta_pairwise_byInfected, A_beta_pairwise_byInfectee
)
elif self.beta_pairwise_mode == "max":
self.A_beta_pairwise = scipy.sparse.csr_matrix.maximum(
A_beta_pairwise_byInfected, A_beta_pairwise_byInfectee
)
elif self.beta_pairwise_mode == "mean" or self.beta_pairwise_mode is None:
self.A_beta_pairwise = (
A_beta_pairwise_byInfected + A_beta_pairwise_byInfectee
) / 2
else:
print(
"Unrecognized beta_pairwise_mode value (support for 'infected', 'infectee', 'min', 'max', and 'mean')."
)
else:
print(
"Invalid values given for beta_local (expected 1xN list/array or NxN 2d array)"
)
# ----------------------------------------
if (
self.beta_Q_local.ndim == 2
and self.beta_Q_local.shape[0] == self.numNodes
and self.beta_Q_local.shape[1] == self.numNodes
):
self.A_Q_beta_Q_pairwise = self.beta_Q_local
elif (
self.beta_Q_local.ndim == 1 and self.beta_Q_local.shape[0] == self.numNodes
) or (
self.beta_Q_local.ndim == 2
and (
self.beta_Q_local.shape[0] == self.numNodes
or self.beta_Q_local.shape[1] == self.numNodes
)
):
self.beta_Q_local = self.beta_Q_local.reshape((self.numNodes, 1))
# Pre-multiply beta_Q values by the isolation adjacency matrix ("transmission weight connections")
A_Q_beta_Q_pairwise_byInfected = scipy.sparse.csr_matrix.multiply(
self.A_Q, self.beta_Q_local.T
).tocsr()
A_Q_beta_Q_pairwise_byInfectee = scipy.sparse.csr_matrix.multiply(
self.A_Q, self.beta_Q_local
).tocsr()
# ------------------------------
# Compute the effective pairwise beta values as a function of the infected/infectee pair:
if self.beta_pairwise_mode == "infected":
self.A_Q_beta_Q_pairwise = A_Q_beta_Q_pairwise_byInfected
elif self.beta_pairwise_mode == "infectee":
self.A_Q_beta_Q_pairwise = A_Q_beta_Q_pairwise_byInfectee
elif self.beta_pairwise_mode == "min":
self.A_Q_beta_Q_pairwise = scipy.sparse.csr_matrix.minimum(
A_Q_beta_Q_pairwise_byInfected, A_Q_beta_Q_pairwise_byInfectee
)
elif self.beta_pairwise_mode == "max":
self.A_Q_beta_Q_pairwise = scipy.sparse.csr_matrix.maximum(
A_Q_beta_Q_pairwise_byInfected, A_Q_beta_Q_pairwise_byInfectee
)
elif self.beta_pairwise_mode == "mean" or self.beta_pairwise_mode is None:
self.A_Q_beta_Q_pairwise = (
A_Q_beta_Q_pairwise_byInfected + A_Q_beta_Q_pairwise_byInfectee
) / 2
else:
print(
"Unrecognized beta_pairwise_mode value (support for 'infected', 'infectee', 'min', 'max', and 'mean')."
)
else:
print(
"Invalid values given for beta_Q_local (expected 1xN list/array or NxN 2d array)"
)
# ----------------------------------------
# ----------------------------------------
# Degree-based transmission scaling parameters:
# ----------------------------------------
self.delta_pairwise_mode = self.parameters["delta_pairwise_mode"]
with numpy.errstate(
divide="ignore"
): # ignore log(0) warning, then convert log(0) = -inf -> 0.0
self.delta = (
numpy.log(self.degree) / numpy.log(numpy.mean(self.degree))
if self.parameters["delta"] is None
else numpy.array(self.parameters["delta"])
if isinstance(self.parameters["delta"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["delta"], shape=(self.numNodes, 1)
)
)
self.delta_Q = (
numpy.log(self.degree_Q) / numpy.log(numpy.mean(self.degree_Q))
if self.parameters["delta_Q"] is None
else numpy.array(self.parameters["delta_Q"])
if isinstance(self.parameters["delta_Q"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["delta_Q"], shape=(self.numNodes, 1)
)
)
self.delta[numpy.isneginf(self.delta)] = 0.0
self.delta_Q[numpy.isneginf(self.delta_Q)] = 0.0
# ----------------------------------------
if (
self.delta.ndim == 2
and self.delta.shape[0] == self.numNodes
and self.delta.shape[1] == self.numNodes
):
self.A_delta_pairwise = self.delta
elif (self.delta.ndim == 1 and self.delta.shape[0] == self.numNodes) or (
self.delta.ndim == 2
and (
self.delta.shape[0] == self.numNodes
or self.delta.shape[1] == self.numNodes
)
):
self.delta = self.delta.reshape((self.numNodes, 1))
# Pre-multiply delta values by the adjacency matrix ("transmission weight connections")
A_delta_pairwise_byInfected = scipy.sparse.csr_matrix.multiply(
self.A, self.delta.T
).tocsr()
A_delta_pairwise_byInfectee = scipy.sparse.csr_matrix.multiply(
self.A, self.delta
).tocsr()
# ------------------------------
# Compute the effective pairwise delta values as a function of the infected/infectee pair:
if self.delta_pairwise_mode == "infected":
self.A_delta_pairwise = A_delta_pairwise_byInfected
elif self.delta_pairwise_mode == "infectee":
self.A_delta_pairwise = A_delta_pairwise_byInfectee
elif self.delta_pairwise_mode == "min":
self.A_delta_pairwise = scipy.sparse.csr_matrix.minimum(
A_delta_pairwise_byInfected, A_delta_pairwise_byInfectee
)
elif self.delta_pairwise_mode == "max":
self.A_delta_pairwise = scipy.sparse.csr_matrix.maximum(
A_delta_pairwise_byInfected, A_delta_pairwise_byInfectee
)
elif self.delta_pairwise_mode == "mean":
self.A_delta_pairwise = (
A_delta_pairwise_byInfected + A_delta_pairwise_byInfectee
) / 2
elif self.delta_pairwise_mode is None:
self.A_delta_pairwise = self.A
else:
print(
"Unrecognized delta_pairwise_mode value (support for 'infected', 'infectee', 'min', 'max', and 'mean')."
)
else:
print(
"Invalid values given for delta (expected 1xN list/array or NxN 2d array)"
)
# ----------------------------------------
if (
self.delta_Q.ndim == 2
and self.delta_Q.shape[0] == self.numNodes
and self.delta_Q.shape[1] == self.numNodes
):
self.A_Q_delta_Q_pairwise = self.delta_Q
elif (self.delta_Q.ndim == 1 and self.delta_Q.shape[0] == self.numNodes) or (
self.delta_Q.ndim == 2
and (
self.delta_Q.shape[0] == self.numNodes
or self.delta_Q.shape[1] == self.numNodes
)
):
self.delta_Q = self.delta_Q.reshape((self.numNodes, 1))
# Pre-multiply delta_Q values by the isolation adjacency matrix ("transmission weight connections")
A_Q_delta_Q_pairwise_byInfected = scipy.sparse.csr_matrix.multiply(
self.A_Q, self.delta_Q
).tocsr()
A_Q_delta_Q_pairwise_byInfectee = scipy.sparse.csr_matrix.multiply(
self.A_Q, self.delta_Q.T
).tocsr()
# ------------------------------
# Compute the effective pairwise delta values as a function of the infected/infectee pair:
if self.delta_pairwise_mode == "infected":
self.A_Q_delta_Q_pairwise = A_Q_delta_Q_pairwise_byInfected
elif self.delta_pairwise_mode == "infectee":
self.A_Q_delta_Q_pairwise = A_Q_delta_Q_pairwise_byInfectee
elif self.delta_pairwise_mode == "min":
self.A_Q_delta_Q_pairwise = scipy.sparse.csr_matrix.minimum(
A_Q_delta_Q_pairwise_byInfected, A_Q_delta_Q_pairwise_byInfectee
)
elif self.delta_pairwise_mode == "max":
self.A_Q_delta_Q_pairwise = scipy.sparse.csr_matrix.maximum(
A_Q_delta_Q_pairwise_byInfected, A_Q_delta_Q_pairwise_byInfectee
)
elif self.delta_pairwise_mode == "mean":
self.A_Q_delta_Q_pairwise = (
A_Q_delta_Q_pairwise_byInfected + A_Q_delta_Q_pairwise_byInfectee
) / 2
elif self.delta_pairwise_mode is None:
self.A_Q_delta_Q_pairwise = self.A
else:
print(
"Unrecognized delta_pairwise_mode value (support for 'infected', 'infectee', 'min', 'max', and 'mean')."
)
else:
print(
"Invalid values given for delta_Q (expected 1xN list/array or NxN 2d array)"
)
# ----------------------------------------
# Pre-calculate the pairwise delta*beta values:
# ----------------------------------------
self.A_deltabeta = scipy.sparse.csr_matrix.multiply(
self.A_delta_pairwise, self.A_beta_pairwise
)
self.A_Q_deltabeta_Q = scipy.sparse.csr_matrix.multiply(
self.A_Q_delta_Q_pairwise, self.A_Q_beta_Q_pairwise
)
def node_degrees(self, Amat):
return Amat.sum(axis=0).reshape(self.numNodes, 1) # sums of adj matrix cols
def total_num_susceptible(self, t_idx=None):
if t_idx is None:
return self.numS[:]
else:
return self.numS[t_idx]
def total_num_infected(self, t_idx=None):
if t_idx is None:
return self.numE[:] + self.numI[:] + self.numQ_E[:] + self.numQ_I[:]
else:
return (
self.numE[t_idx]
+ self.numI[t_idx]
+ self.numQ_E[t_idx]
+ self.numQ_I[t_idx]
)
def total_num_isolated(self, t_idx=None):
if t_idx is None:
return self.numQ_E[:] + self.numQ_I[:]
else:
return self.numQ_E[t_idx] + self.numQ_I[t_idx]
def total_num_tested(self, t_idx=None):
if t_idx is None:
return self.numTested[:]
else:
return self.numTested[t_idx]
def total_num_positive(self, t_idx=None):
if t_idx is None:
return self.numPositive[:]
else:
return self.numPositive[t_idx]
def total_num_recovered(self, t_idx=None):
if t_idx is None:
return self.numR[:]
else:
return self.numR[t_idx]
def calc_propensities(self):
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-calculate matrix multiplication terms that may be used in multiple propensity calculations,
# and check to see if their computation is necessary before doing the multiplication
# ------------------------------------
self.transmissionTerms_I = numpy.zeros(shape=(self.numNodes, 1))
if numpy.any(self.numI[self.tidx]):
self.transmissionTerms_I = numpy.asarray(
scipy.sparse.csr_matrix.dot(self.A_deltabeta, self.X == self.I)
)
# ------------------------------------
self.transmissionTerms_Q = numpy.zeros(shape=(self.numNodes, 1))
if numpy.any(self.numQ_I[self.tidx]):
self.transmissionTerms_Q = numpy.asarray(
scipy.sparse.csr_matrix.dot(self.A_Q_deltabeta_Q, self.X == self.Q_I)
)
# ------------------------------------
numContacts_Q = numpy.zeros(shape=(self.numNodes, 1))
if numpy.any(self.positive) and (
numpy.any(self.phi_E) or numpy.any(self.phi_I)
):
numContacts_Q = numpy.asarray(
scipy.sparse.csr_matrix.dot(
self.A, ((self.positive) & (self.X != self.R) & (self.X != self.F))
)
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities_StoE = (
self.alpha
* (
self.p
* (
(
self.beta_global * self.numI[self.tidx]
+ self.q * self.beta_Q_global * self.numQ_I[self.tidx]
)
/ self.N[self.tidx]
)
+ (1 - self.p)
* (
numpy.divide(
self.transmissionTerms_I,
self.degree,
out=numpy.zeros_like(self.degree),
where=self.degree != 0,
)
+ numpy.divide(
self.transmissionTerms_Q,
self.degree_Q,
out=numpy.zeros_like(self.degree_Q),
where=self.degree_Q != 0,
)
)
)
) * (self.X == self.S)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if self.transition_mode == "time_in_state":
propensities_EtoI = 1e5 * (
(self.X == self.E) & numpy.greater(self.timer_state, 1 / self.sigma)
)
propensities_ItoR = 1e5 * (
(self.X == self.I)
& numpy.greater(self.timer_state, 1 / self.gamma)
& numpy.greater_equal(self.rand_f, self.f)
)
propensities_ItoF = 1e5 * (
(self.X == self.I)
& numpy.greater(self.timer_state, 1 / self.mu_I)
& numpy.less(self.rand_f, self.f)
)
propensities_EtoQE = numpy.zeros_like(propensities_StoE)
propensities_ItoQI = numpy.zeros_like(propensities_StoE)
propensities_QEtoQI = 1e5 * (
(self.X == self.Q_E) & numpy.greater(self.timer_state, 1 / self.sigma_Q)
)
propensities_QItoR = 1e5 * (
(self.X == self.Q_I)
& numpy.greater(self.timer_state, 1 / self.gamma_Q)
& numpy.greater_equal(self.rand_f, self.f)
)
propensities_QItoF = 1e5 * (
(self.X == self.Q_I)
& numpy.greater(self.timer_state, 1 / self.mu_Q)
& numpy.less(self.rand_f, self.f)
)
propensities_RtoS = 1e5 * (
(self.X == self.R) & numpy.greater(self.timer_state, 1 / self.xi)
)
propensities__toS = 1e5 * (
(self.X != self.F) & numpy.greater(self.timer_state, 1 / self.nu)
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
else: # exponential_rates
propensities_EtoI = self.sigma * (self.X == self.E)
propensities_ItoR = self.gamma * (
(self.X == self.I) & (numpy.greater_equal(self.rand_f, self.f))
)
propensities_ItoF = self.mu_I * (
(self.X == self.I) & (numpy.less(self.rand_f, self.f))
)
propensities_EtoQE = (
(self.theta_E + self.phi_E * numContacts_Q)
* self.psi_E
* (self.X == self.E)
)
propensities_ItoQI = (
(self.theta_I + self.phi_I * numContacts_Q)
* self.psi_I
* (self.X == self.I)
)
propensities_QEtoQI = self.sigma_Q * (self.X == self.Q_E)
propensities_QItoR = self.gamma_Q * (
(self.X == self.Q_I) & (numpy.greater_equal(self.rand_f, self.f))
)
propensities_QItoF = self.mu_Q * (
(self.X == self.Q_I) & (numpy.less(self.rand_f, self.f))
)
propensities_RtoS = self.xi * (self.X == self.R)
propensities__toS = self.nu * (self.X != self.F)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities = numpy.hstack(
[
propensities_StoE,
propensities_EtoI,
propensities_ItoR,
propensities_ItoF,
propensities_EtoQE,
propensities_ItoQI,
propensities_QEtoQI,
propensities_QItoR,
propensities_QItoF,
propensities_RtoS,
propensities__toS,
]
)
columns = [
"StoE",
"EtoI",
"ItoR",
"ItoF",
"EtoQE",
"ItoQI",
"QEtoQI",
"QItoR",
"QItoF",
"RtoS",
"_toS",
]
return propensities, columns
def set_isolation(self, node, isolate):
# Move this node in/out of the appropriate isolation state:
if isolate == True:
if self.X[node] == self.E:
self.X[node] = self.Q_E
self.timer_state = 0
elif self.X[node] == self.I:
self.X[node] = self.Q_I
self.timer_state = 0
elif isolate == False:
if self.X[node] == self.Q_E:
self.X[node] = self.E
self.timer_state = 0
elif self.X[node] == self.Q_I:
self.X[node] = self.I
self.timer_state = 0
# Reset the isolation timer:
self.timer_isolation[node] = 0
def set_tested(self, node, tested):
self.tested[node] = tested
self.testedInCurrentState[node] = tested
def set_positive(self, node, positive):
self.positive[node] = positive
def introduce_exposures(self, num_new_exposures):
exposedNodes = numpy.random.choice(
range(self.numNodes), size=num_new_exposures, replace=False
)
for exposedNode in exposedNodes:
if self.X[exposedNode] == self.S:
self.X[exposedNode] = self.E
def increase_data_series_length(self):
self.tseries = numpy.pad(
self.tseries, [(0, 6 * self.numNodes)], mode="constant", constant_values=0
)
self.numS = numpy.pad(
self.numS, [(0, 6 * self.numNodes)], mode="constant", constant_values=0
)
self.numE = numpy.pad(
self.numE, [(0, 6 * self.numNodes)], mode="constant", constant_values=0
)
self.numI = numpy.pad(
self.numI, [(0, 6 * self.numNodes)], mode="constant", constant_values=0
)
self.numR = numpy.pad(
self.numR, [(0, 6 * self.numNodes)], mode="constant", constant_values=0
)
self.numF = numpy.pad(
self.numF, [(0, 6 * self.numNodes)], mode="constant", constant_values=0
)
self.numQ_E = numpy.pad(
self.numQ_E, [(0, 6 * self.numNodes)], mode="constant", constant_values=0
)
self.numQ_I = numpy.pad(
self.numQ_I, [(0, 6 * self.numNodes)], mode="constant", constant_values=0
)
self.N = numpy.pad(
self.N, [(0, 6 * self.numNodes)], mode="constant", constant_values=0
)
self.numTested = numpy.pad(
self.numTested, [(0, 6 * self.numNodes)], mode="constant", constant_values=0
)
self.numPositive = numpy.pad(
self.numPositive,
[(0, 6 * self.numNodes)],
mode="constant",
constant_values=0,
)
if self.store_Xseries:
self.Xseries = numpy.pad(
self.Xseries,
[(0, 6 * self.numNodes), (0, 0)],
mode="constant",
constant_values=0,
)
if self.nodeGroupData:
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]["numS"] = numpy.pad(
self.nodeGroupData[groupName]["numS"],
[(0, 6 * self.numNodes)],
mode="constant",
constant_values=0,
)
self.nodeGroupData[groupName]["numE"] = numpy.pad(
self.nodeGroupData[groupName]["numE"],
[(0, 6 * self.numNodes)],
mode="constant",
constant_values=0,
)
self.nodeGroupData[groupName]["numI"] = numpy.pad(
self.nodeGroupData[groupName]["numI"],
[(0, 6 * self.numNodes)],
mode="constant",
constant_values=0,
)
self.nodeGroupData[groupName]["numR"] = numpy.pad(
self.nodeGroupData[groupName]["numR"],
[(0, 6 * self.numNodes)],
mode="constant",
constant_values=0,
)
self.nodeGroupData[groupName]["numF"] = numpy.pad(
self.nodeGroupData[groupName]["numF"],
[(0, 6 * self.numNodes)],
mode="constant",
constant_values=0,
)
self.nodeGroupData[groupName]["numQ_E"] = numpy.pad(
self.nodeGroupData[groupName]["numQ_E"],
[(0, 6 * self.numNodes)],
mode="constant",
constant_values=0,
)
self.nodeGroupData[groupName]["numQ_I"] = numpy.pad(
self.nodeGroupData[groupName]["numQ_I"],
[(0, 6 * self.numNodes)],
mode="constant",
constant_values=0,
)
self.nodeGroupData[groupName]["N"] = numpy.pad(
self.nodeGroupData[groupName]["N"],
[(0, 6 * self.numNodes)],
mode="constant",
constant_values=0,
)
self.nodeGroupData[groupName]["numTested"] = numpy.pad(
self.nodeGroupData[groupName]["numTested"],
[(0, 6 * self.numNodes)],
mode="constant",
constant_values=0,
)
self.nodeGroupData[groupName]["numPositive"] = numpy.pad(
self.nodeGroupData[groupName]["numPositive"],
[(0, 6 * self.numNodes)],
mode="constant",
constant_values=0,
)
return None
def finalize_data_series(self):
self.tseries = numpy.array(self.tseries, dtype=float)[: self.tidx + 1]
self.numS = numpy.array(self.numS, dtype=float)[: self.tidx + 1]
self.numE = numpy.array(self.numE, dtype=float)[: self.tidx + 1]
self.numI = numpy.array(self.numI, dtype=float)[: self.tidx + 1]
self.numR = numpy.array(self.numR, dtype=float)[: self.tidx + 1]
self.numF = | numpy.array(self.numF, dtype=float) | numpy.array |
"""The product Matern52 kernel embeddings."""
from typing import List, Optional, Tuple
import numpy as np
from ...quadrature.interfaces.standard_kernels import IProductMatern52
from ..measures import IntegrationMeasure
from ..typing import BoundsType
from .quadrature_kernels import QuadratureKernel
class QuadratureProductMatern52(QuadratureKernel):
r"""A product Matern52 kernel augmented with integrability.
The kernel is of the form :math:`k(x, x') = \sigma^2 \prod_{i=1}^d k_i(x, x')` where
.. math::
k_i(x, x') = (1 + \sqrt{5} r_i + \frac{5}{3} r_i^2) \exp(- \sqrt{5} r_i).
Above, :math:`d` is the input dimensionality, :math:`r_i =\frac{|x_i - x'_i|}{\lambda_i}`,
is the scaled distance, :math:`\sigma^2` is the ``variance`` property and :math:`\lambda_i`
is the :math:`i` th element of the ``lengthscales`` property.
.. note::
This class is compatible with the standard kernel :class:`IProductMatern52`.
Each subclass of this class implements an embedding w.r.t. a specific integration measure.
.. seealso::
* :class:`emukit.quadrature.interfaces.IProductMatern52`
* :class:`emukit.quadrature.kernels.QuadratureKernel`
:param matern_kernel: The standard EmuKit product Matern52 kernel.
:param integral_bounds: The integral bounds.
List of D tuples, where D is the dimensionality
of the integral and the tuples contain the lower and upper bounds of the integral
i.e., [(lb_1, ub_1), (lb_2, ub_2), ..., (lb_D, ub_D)].
``None`` if bounds are infinite.
:param measure: The integration measure. ``None`` implies the standard Lebesgue measure.
:param variable_names: The (variable) name(s) of the integral.
"""
def __init__(
self,
matern_kernel: IProductMatern52,
integral_bounds: Optional[BoundsType],
measure: Optional[IntegrationMeasure],
variable_names: str = "",
) -> None:
super().__init__(
kern=matern_kernel, integral_bounds=integral_bounds, measure=measure, variable_names=variable_names
)
@property
def nu(self) -> float:
"""The smoothness parameter of the kernel."""
return self.kern.nu
@property
def lengthscales(self) -> np.ndarray:
r"""The lengthscales :math:`\lambda` of the kernel."""
return self.kern.lengthscales
@property
def variance(self) -> float:
r"""The scale :math:`\sigma^2` of the kernel."""
return self.kern.variance
def qK(self, x2: np.ndarray) -> np.ndarray:
raise NotImplementedError
def Kq(self, x1: np.ndarray) -> np.ndarray:
return self.qK(x1).T
def qKq(self) -> float:
raise NotImplementedError
def dqK_dx(self, x2: np.ndarray) -> np.ndarray:
raise NotImplementedError
def dKq_dx(self, x1: np.ndarray) -> np.ndarray:
return self.dqK_dx(x1).T
class QuadratureProductMatern52LebesgueMeasure(QuadratureProductMatern52):
"""An product Matern52 kernel augmented with integrability w.r.t. the standard Lebesgue measure.
.. seealso::
* :class:`emukit.quadrature.interfaces.IProductMatern52`
* :class:`emukit.quadrature.kernels.QuadratureProductMatern52`
:param matern_kernel: The standard EmuKit product Matern52 kernel.
:param integral_bounds: The integral bounds.
List of D tuples, where D is the dimensionality
of the integral and the tuples contain the lower and upper bounds of the integral
i.e., [(lb_1, ub_1), (lb_2, ub_2), ..., (lb_D, ub_D)].
``None`` if bounds are infinite.
:param variable_names: The (variable) name(s) of the integral.
"""
def __init__(self, matern_kernel: IProductMatern52, integral_bounds: BoundsType, variable_names: str = "") -> None:
super().__init__(
matern_kernel=matern_kernel, integral_bounds=integral_bounds, measure=None, variable_names=variable_names
)
def qK(self, x2: np.ndarray, skip: List[int] = None) -> np.ndarray:
if skip is None:
skip = []
qK = np.ones(x2.shape[0])
for dim in range(x2.shape[1]):
if dim in skip:
continue
qK *= self._qK_1d(x=x2[:, dim], domain=self.integral_bounds.bounds[dim], ell=self.lengthscales[dim])
return qK[None, :] * self.variance
def qKq(self) -> float:
qKq = 1.0
for dim in range(self.input_dim):
qKq *= self._qKq_1d(domain=self.integral_bounds.bounds[dim], ell=self.lengthscales[dim])
return self.variance * qKq
def dqK_dx(self, x2: np.ndarray) -> np.ndarray:
input_dim = x2.shape[1]
dqK_dx = np.zeros([input_dim, x2.shape[0]])
for dim in range(input_dim):
grad_term = self._dqK_dx_1d(
x=x2[:, dim], domain=self.integral_bounds.bounds[dim], ell=self.lengthscales[dim]
)
dqK_dx[dim, :] = grad_term * self.qK(x2, skip=[dim])[0, :]
return dqK_dx
# one dimensional integrals start here
def _qK_1d(self, x: np.ndarray, domain: Tuple[float, float], ell: float) -> np.ndarray:
"""Unscaled kernel mean for 1D Matern52 kernel."""
(a, b) = domain
s5 = np.sqrt(5)
first_term = 16 * ell / (3 * s5)
second_term = (
- | np.exp(s5 * (x - b) / ell) | numpy.exp |
#
# Copyright (c) 2021 Blickfeld GmbH.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE.md file in the root directory of this source tree.
from __future__ import print_function
import argparse
from blickfeld_scanner import scanner
import numpy as np
from time import sleep
from blickfeld_scanner.protocol.config.advanced_pb2 import Advanced
def calibrate_accelerometer(args):
"""Calibrate the rotational offset of the Blickfeld Cube 1 Inertial Measurement Unit (IMU).
The upright pose is identified by the static acceleration reading [0, 0, -1]. This means, that
the gravitational acceleration is measured along the negative direction of the devices Z-Axis.
Place the Blickfeld Cube 1 on a level surface for calibrating the IMU.
Avoid any kind of movement of the Blickfeld Cube 1 while running the script.
If the Blickfeld Cube 1 has already configured a rotational offset remove it first by running
this script with the '--remove' flag.
"""
ORIENTATION_UPRIGHT = [0, 0, -1]
ERROR_ALLOWED_NORM = 1e-2
# ensure a given vector is normalized to length 1
def _unit_vector(v: list) -> np.array:
return np.array(v) / np.linalg.norm(v)
# calculate the rotation matrix
def _calculate_rotation_matrix(acc_imu: list, acc_calib: list = ORIENTATION_UPRIGHT) -> np.array:
acc_imu = _unit_vector(acc_imu)
acc_calib = _unit_vector(acc_calib)
# see https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d
imu_static_rotation_offset = np.eye(3)
if np.linalg.norm(np.cross(acc_calib, acc_imu)) < 1e-6:
imu_static_rotation_offset = -imu_static_rotation_offset
else:
axis = np.cross(acc_calib, acc_imu)
s = | np.linalg.norm(axis) | numpy.linalg.norm |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pyrotein as pr
import numpy as np
import colorsimple as cs
import GnuplotPy3
import os
import tempfile
def plot_dmat(
dmat, # Input data, which is a distance matrix
fl_dmat, # Filename of the exported file
lbl = {}, # Labels used to mark on the diagonal
lbl_fontsize = 8, # Fontsize for label
lbl_linewidth = 1.0, # pt
diaglbl = {}, # diagonal label (usually for showing index)
diaglblfontsize = 5,
width = 6, # inch
height = 7, # inch
fontsize = 14, # pt
linewidth = 1.0, # pt
curve_linewidth = 1.0, # pt
palette = "", # Palette definition
intst_min = "0", # Min intensity value
intst_max = "*", # Max intensity value
vrange = [],
showzero = True,
showcolorbox = True,
NaN = "NaN",
temp = True,
mode = "image", # "image", "sparse", "pm3d"
showsparselabel = False,
box_range = [], # If box range is empty, then the box covers the whole area of u
default_intst_rng = False,
cmds_top = [], # Customized command for upper panel
cmds_bottom = [], # Customized command for bottom panel
):
assert len(vrange) == 0 or len(vrange) == 2, "vrange has to be an empty or 2-member tuple. "
# Partial???
range_default = ("*", "*")
if len(vrange) == 2: fl_dmat = f"{fl_dmat}.zoom"
title = f"Column mean"
cmd_box = [""]
if len(box_range):
# Right-inclusion is considered [debating if this should be done]
b, e = box_range
# Horizontal lines (beginning of a region)
cmd = f"set arrow front from graph 0,first {b} to graph 1,first {b} nohead dashtype 2 linewidth {lbl_linewidth} linecolor rgb 'black'"
cmd_box.append(cmd)
# Horizontal lines (end of a region)
cmd = f"set arrow front from graph 0,first {e} to graph 1,first {e} nohead dashtype 2 linewidth {lbl_linewidth} linecolor rgb 'black'"
cmd_box.append(cmd)
title += " (within reference range)"
else:
b, e = 0, len(dmat)
# Get the mean...
column_mean_dmat = np.nanmean(dmat[b:e, :], axis = 0, keepdims = False)
# Draw lbl (optional)...
cmds_lbl_top = [""]
cmds_lbl_bottom = [""]
color_lbl = '#BBBBBB'
_lbl = {}
if len(lbl) > 0:
for k, (b,e) in lbl.items():
# Vertical lines (beginning of a region)
cmd = f"set arrow front from {b},graph 0 to {b},graph 1 nohead dashtype 2 linewidth {lbl_linewidth} linecolor rgb '{color_lbl}'"
cmds_lbl_bottom.append(cmd)
cmds_lbl_top.append(cmd)
# Vertical lines (end of a region)
cmd = f"set arrow front from {e},graph 0 to {e},graph 1 nohead dashtype 2 linewidth {lbl_linewidth} linecolor rgb '{color_lbl}'"
cmds_lbl_bottom.append(cmd)
cmds_lbl_top.append(cmd)
# Horizontal lines (beginning of a region)
cmd = f"set arrow front from graph 0,first {b} to graph 1,first {b} nohead dashtype 2 linewidth {lbl_linewidth} linecolor rgb '{color_lbl}'"
cmds_lbl_bottom.append(cmd)
# Horizontal lines (end of a region)
cmd = f"set arrow front from graph 0,first {e} to graph 1,first {e} nohead dashtype 2 linewidth {lbl_linewidth} linecolor rgb '{color_lbl}'"
cmds_lbl_bottom.append(cmd)
# Put labels on the diagonal...
_lbl[k] = [ (b + e) // 2, (b + e) // 2 ]
# [[[ Visualize ]]]
num_items = len(dmat)
if intst_max == "*":
intst_min = np.nanmin(dmat)
intst_max = np.nanmax(dmat)
intst_column_mean_min = np.min( [np.nanmin(column_mean_dmat), 0] )
intst_column_mean_max = np.max( [np.nanmax(column_mean_dmat), 0] )
# Create tempfile to visualize half matrix...
fl_temp = f"{fl_dmat}.dat"
if temp: fl_temp = tempfile.mktemp(".temp.dat")
with open(fl_temp,'w') as fh:
for j in range(num_items):
for k in range(j if mode != 'image' else num_items):
if mode == "sparse":
if np.isnan(dmat[j, k]): continue
## if not intst_min < dmat[j, k] < intst_max: continue
val = NaN if np.isnan(dmat[j, k]) else dmat[j, k]
fh.write(f"{k} {j} {val}\n")
fh.write("\n")
# Begin Gnuplot
gp = GnuplotPy3.GnuplotPy3()
gp(f"set terminal postscript eps size {width}, {height} \\")
gp(f" enhanced color \\")
gp(f" font 'Helvetica,{fontsize}' \\")
gp(f" linewidth {linewidth}")
# Declare the filename to export...
gp(f"set output '{fl_dmat}.eps'")
gp("unset key")
# Declare a multiplot...
gp("set origin 0,0")
gp("set size 1,1")
gp("unset bmargin")
gp("unset tmargin")
gp("unset lmargin")
gp("unset rmargin")
gp("set multiplot title ''")
# PLOT 1: mean dmat...
gp(f"unset xrange")
gp(f"unset yrange")
gp("unset xtics")
gp("unset ytics")
gp(f"unset logscale")
gp("set origin 0,0.70")
gp("set size 1,0.15")
gp("set tmargin 0")
gp("set bmargin at screen 0.70")
gp("set lmargin at screen 0.05")
gp("set rmargin at screen 0.80")
gp(f"set xrange [-1:{num_items}]")
gp(f"set yrange [{intst_column_mean_min}:{1.1 * intst_column_mean_max}]")
if not default_intst_rng: gp(f"set yrange [{intst_min}:{2.0 * intst_max}]")
gp("set key top right")
gp(f"set border linewidth {linewidth}")
gp("set view map")
if showzero: gp(f"set arrow front from graph 0, first 0 to graph 1, first 0 nohead dashtype 2 linewidth 1.0 linecolor rgb 'black'")
for cmd in cmds_lbl_top: gp(cmd)
for cmd in cmds_top: gp(cmd)
if mode == "pm3d":
gp(f"splot '-' using 1:2:3 with lines linewidth {curve_linewidth} linecolor rgb 'black' title '{title}'")
for i,v in enumerate(column_mean_dmat):
gp(f"{i} {v} 0")
gp("e")
else:
gp(f"plot '-' using 1:2 with lines linewidth {curve_linewidth} linecolor rgb 'black' title '{title}'")
for i,v in enumerate(column_mean_dmat):
gp(f"{i} {v}")
gp("e")
# PLOT 2: distance matrix...
gp(f"unset arrow")
gp(f"unset key")
gp(f"unset xrange")
gp(f"unset yrange")
gp(f"unset xtics")
gp(f"unset ytics")
gp(f"unset logscale")
gp("unset bmargin")
gp("unset tmargin")
gp("unset lmargin")
gp("unset rmargin")
gp("set origin 0,0.0")
gp("set size 1.0,0.70")
gp("set tmargin at screen 0.7")
gp("set bmargin at screen 0.05")
gp("set lmargin at screen 0.05")
gp("set rmargin at screen 0.80")
gp(f"set xrange [-1 :{num_items} ]")
gp(f"set yrange [{num_items} :-1 ]")
gp(f"set border linewidth {linewidth}")
for k, (x, y) in _lbl.items():
gp(f"set label '{k}' at {x},{y} left rotate by 45 font ', {lbl_fontsize}' front")
for k, (x, y) in diaglbl.items():
gp(f"set label '{k}' at {x},{y} left rotate by 45 font ', {diaglblfontsize}' front")
if palette == "":
gp("set palette defined ( -0.001 'white', 0 'blue', 0.5 'light-grey', 1 'red' )")
else:
gp(palette)
gp(f"set cbrange [{intst_min}:{intst_max}]")
gp(f"set cbtics font ',{lbl_fontsize}'")
if not showcolorbox: gp(f"unset colorbox")
for cmd in cmds_lbl_bottom: gp(cmd)
for cmd in cmds_bottom: gp(cmd)
for cmd in cmd_box: gp(cmd)
if mode == 'sparse':
gp("plot \\")
gp(f"'{fl_temp}' using 1:2:3 with points pointtype 6 pointsize 0.5 linewidth 0.0 linecolor palette, \\")
if showsparselabel:
gp(f"'{fl_temp}' using 1:2:(sprintf('%d,%d', int($1), int($2))) with labels offset 0.5,.3 rotate by 45 font ',3', \\")
gp("")
if mode == 'image':
gp(f"plot '{fl_temp}' using 1:2:3 with image")
if mode == 'pm3d':
gp("set view map")
gp(f"splot '{fl_temp}' using 1:2:3 with pm3d")
gp("unset multiplot")
gp("exit")
return None
def calc_rigid_framework(rmsd_dmat, seqi_fwk_list, num_seq, len_res, min_size = 5, min_mean = 0.1, epsilon = 0.00001):
''' The idea of rigid framework is taken from DOI: 10.1371/journal.pone.0077363 .
Basically, every residue should be assigned to either "rigid" or "not
rigid". The change of mean value of the submatrix upon the selection or
deselection of a residue would determine the assignment. If the change of
mean is larger than a threshold, the choice (either select or not) should
not be made. Check residues in both frameworks.
idx in rigid fwk, should it be deselected?
idx not in rigid fwk, should it be selected?
'''
atom_list_orig = [ i for b, e in seqi_fwk_list for i in range(b * len_res, e * len_res) ]
# Calculate the mean of submatrix upon the choice of rigid fwk from input...
def calc_submean(rmsd_dmat, atom_list_orig):
rmsd_dmat_sub_aux_orig = np.take(rmsd_dmat, atom_list_orig, axis = 1)
rmsd_dmat_sub_orig = np.take(rmsd_dmat_sub_aux_orig, atom_list_orig, axis = 0)
mean_rmsd_dmat_sub_orig = np.nanmean(rmsd_dmat_sub_orig, keepdims = False)
return mean_rmsd_dmat_sub_orig
mean_rmsd_dmat_sub_orig = calc_submean(rmsd_dmat, atom_list_orig)
print(f"RMSD (Init): {mean_rmsd_dmat_sub_orig}")
# Fetch index of framework-wise np.nan...
mean_rmsd_dmat_orig = np.nanmean(rmsd_dmat, axis = 0, keepdims = False)
mean_rmsd_dmat_orig[np.isnan(mean_rmsd_dmat_orig)] = 0.0
nan_list = np.argwhere(mean_rmsd_dmat_orig < min_mean).reshape(-1)
# Inspect every residue (col) represented by seqi...
# Easier to operate on atom list, this is a adhoc code anyway
# - idx in rigid fwk, should it be deselected?
# - idx not in rigid fwk, should it be selected?
rm_list = []
add_list = []
for seqi in range(num_seq):
# Infer atomi from seqi by a scale of len_res...
atomi = seqi * len_res
# If this residue is rigid???
if atomi in atom_list_orig:
# Remove nan column...
if atomi in nan_list:
rm_list.append(atomi)
continue
atom_list_aux = atom_list_orig.copy()
# Remove the associated atoms...
for i in range(len_res): atom_list_aux.remove(atomi + i)
# Calcualte the new mean rmsd...
mean_rmsd_dmat_sub_aux = calc_submean(rmsd_dmat, atom_list_aux)
# Consider NOT to keep it if it's larger than 1% of decrease???
if mean_rmsd_dmat_sub_aux > (1 - epsilon) * mean_rmsd_dmat_sub_orig: continue
rm_list.append(atomi)
# Or this residue is not rigid???
else:
# Don't consider nan...
if atomi in nan_list: continue
atom_list_aux = atom_list_orig.copy()
# Remove the associated atoms...
for i in range(len_res): atom_list_aux.append(atomi + i)
# Calcualte the new mean rmsd...
mean_rmsd_dmat_sub_aux = calc_submean(rmsd_dmat, atom_list_aux)
# Consider NOT to keep it if it's larger than 1% of increase???
if mean_rmsd_dmat_sub_aux > (1 + epsilon) * mean_rmsd_dmat_sub_orig: continue
add_list.append(atomi)
# Remove atoms that was considered rigid...
for atomi in rm_list:
# Remove the associated atoms...
for i in range(len_res): atom_list_orig.remove(atomi + i)
# Add atoms that was considered not rigid...
for atomi in add_list:
# Remove the associated atoms...
for i in range(len_res): atom_list_orig.append(atomi + i)
atom_list_orig.sort()
fwk_list = pr.utils.group_consecutive_integer(atom_list_orig)
fwk_list = [ i for i in fwk_list if i[-1] - i[0] + 1 >= min_size * len_res ]
atom_list_orig = [ i for fwk in fwk_list for i in fwk ]
mean_rmsd_dmat_sub_orig = calc_submean(rmsd_dmat, atom_list_orig)
print(f"RMSD (Final): {mean_rmsd_dmat_sub_orig}")
## return [ i for i in fwk_list if i[-1] - i[0] + 1 >= min_size * len_res ]
return fwk_list
def plot_rmsd_dmat(
dmat, # Input data, which is a distance matrix
fl_dmat, # Filename of the exported file
lbl = {}, # Labels used to mark on the diagonal
lbl_fontsize = 8, # Fontsize for label
diaglbl = {}, # diagonal label (usually for showing index)
diaglblfontsize = 5,
fwk_list = [],
fwk_linewidth = 2,
fwk_curve_color = "black",
fwk_box_color = "black",
width = 6, # inch
height = 7, # inch
fontsize = 14, # pt
linewidth = 1.0, # pt
lbl_linewidth = 2.0, # pt
curve_linewidth = 2.0, # pt
curve_color = "gray",
palette = "", # Palette definition
intst_min = "0", # Min intensity value
intst_max = "*", # Max intensity value
vrange = [],
showzero = True,
showcolorbox = True,
NaN = "NaN",
temp = True,
mode = "image", # "image", "sparse", "pm3d"
showsparselabel = False,
cmds_top = [], # Customized command for upper panel
cmds_bottom = [], # Customized command for bottom panel
):
assert len(vrange) == 0 or len(vrange) == 2, "vrange has to be an empty or 2-member tuple. "
# Partial???
range_default = ("*", "*")
if len(vrange) == 2: fl_dmat = f"{fl_dmat}.zoom"
# Get the mean...
column_mean_dmat = np.nanmean(dmat, axis = 0, keepdims = False)
# Draw lbl (optional)...
cmds_lbl_top = [""]
cmds_lbl_bottom = [""]
color_lbl = '#BBBBBB'
_lbl = {}
if len(lbl) > 0:
for k, (b,e) in lbl.items():
# Vertical lines (beginning of a region)
cmd = f"set arrow front from {b},graph 0 to {b},graph 1 nohead dashtype 2 linewidth {lbl_linewidth} linecolor rgb '{color_lbl}'"
cmds_lbl_bottom.append(cmd)
cmds_lbl_top.append(cmd)
# Vertical lines (end of a region)
cmd = f"set arrow front from {e},graph 0 to {e},graph 1 nohead dashtype 2 linewidth {lbl_linewidth} linecolor rgb '{color_lbl}'"
cmds_lbl_bottom.append(cmd)
cmds_lbl_top.append(cmd)
# Horizontal lines (beginning of a region)
cmd = f"set arrow front from graph 0,first {b} to graph 1,first {b} nohead dashtype 2 linewidth {lbl_linewidth} linecolor rgb '{color_lbl}'"
cmds_lbl_bottom.append(cmd)
# Horizontal lines (end of a region)
cmd = f"set arrow front from graph 0,first {e} to graph 1,first {e} nohead dashtype 2 linewidth {lbl_linewidth} linecolor rgb '{color_lbl}'"
cmds_lbl_bottom.append(cmd)
# Put labels on the diagonal...
_lbl[k] = [ (b + e) // 2, (b + e) // 2 ]
# [[[ Visualize ]]]
num_items = len(dmat)
if intst_max == "*":
intst_min = np.nanmin(dmat)
intst_max = np.nanmax(dmat)
intst_column_mean_min = np.min( [np.nanmin(column_mean_dmat), 0] )
intst_column_mean_max = np.max( [np.nanmax(column_mean_dmat), 0] )
# Create tempfile to visualize half matrix...
fl_temp = f"{fl_dmat}.dat"
if temp: fl_temp = tempfile.mktemp(".temp.dat")
with open(fl_temp,'w') as fh:
for j in range(num_items):
for k in range(j if mode != 'image' else num_items):
if mode == "sparse":
if np.isnan(dmat[j, k]): continue
## if not intst_min < dmat[j, k] < intst_max: continue
val = NaN if | np.isnan(dmat[j, k]) | numpy.isnan |
# -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, <EMAIL>, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
"""
Tests tensor dataset basic functionality
Author: <NAME>
"""
import copy
import logging
import numpy as np
import os
import random
import shutil
import sys
import time
from unittest import TestCase, TestSuite, TextTestRunner
import autolab_core.utils as utils
from autolab_core.constants import *
from autolab_core import TensorDataset, YamlConfig
SEED = 4134298
HEIGHT = 3
WIDTH = 3
CHANNELS = 3
DATAPOINTS_PER_FILE = 10
TEST_TENSOR_DATASET_NAME = 'test_dataset'
TENSOR_CONFIG = {
'datapoints_per_file': DATAPOINTS_PER_FILE,
'fields': {
'float_value': {
'dtype': 'float32'
},
'int_value': {
'dtype': 'int16'
},
'str_value': {
'dtype': 'str'
},
'vector_value': {
'dtype': 'float32',
'height': HEIGHT
},
'matrix_value': {
'dtype': 'float32',
'height': HEIGHT,
'width': WIDTH
},
'image_value': {
'dtype': 'float32',
'height': HEIGHT,
'width': WIDTH,
'channels': CHANNELS
},
}
}
class TensorDatasetTest(TestCase):
@classmethod
def setUpClass(cls):
if os.path.exists(TEST_TENSOR_DATASET_NAME):
shutil.rmtree(TEST_TENSOR_DATASET_NAME)
def test_single_read_write(self):
# seed
np.random.seed(SEED)
random.seed(SEED)
# open dataset
create_successful = True
try:
dataset = TensorDataset(TEST_TENSOR_DATASET_NAME,
TENSOR_CONFIG)
except:
create_successful = False
self.assertTrue(create_successful)
# check field names
write_datapoint = dataset.datapoint_template
for field_name in write_datapoint.keys():
self.assertTrue(field_name in dataset.field_names)
# add the datapoint
write_datapoint['float_value'] = np.random.rand()
write_datapoint['int_value'] = int(100 * np.random.rand())
write_datapoint['str_value'] = utils.gen_experiment_id()
write_datapoint['vector_value'] = np.random.rand(HEIGHT)
write_datapoint['matrix_value'] = np.random.rand(HEIGHT, WIDTH)
write_datapoint['image_value'] = np.random.rand(HEIGHT, WIDTH, CHANNELS)
dataset.add(write_datapoint)
# check num datapoints
self.assertTrue(dataset.num_datapoints == 1)
# add metadata
metadata_num = np.random.rand()
dataset.add_metadata('test', metadata_num)
# check written arrays
dataset.flush()
for field_name in dataset.field_names:
filename = os.path.join(TEST_TENSOR_DATASET_NAME, 'tensors', '%s_00000.npz' %(field_name))
value = np.load(filename)['arr_0']
if isinstance(value[0], str):
self.assertTrue(value[0] == write_datapoint[field_name])
else:
self.assertTrue(np.allclose(value[0], write_datapoint[field_name]))
# re-open the dataset
del dataset
dataset = TensorDataset.open(TEST_TENSOR_DATASET_NAME)
# read metadata
self.assertTrue(np.allclose(dataset.metadata['test'], metadata_num))
# read datapoint
read_datapoint = dataset.datapoint(0)
for field_name in dataset.field_names:
if isinstance(read_datapoint[field_name], str):
self.assertTrue(read_datapoint[field_name] == write_datapoint[field_name])
else:
self.assertTrue(np.allclose(read_datapoint[field_name], write_datapoint[field_name]))
# check iterator
for read_datapoint in dataset:
for field_name in dataset.field_names:
if isinstance(read_datapoint[field_name], str):
self.assertTrue(read_datapoint[field_name] == write_datapoint[field_name])
else:
self.assertTrue(np.allclose(read_datapoint[field_name], write_datapoint[field_name]))
# read individual fields
for field_name in dataset.field_names:
read_datapoint = dataset.datapoint(0, field_names=[field_name])
if isinstance(read_datapoint[field_name], str):
self.assertTrue(read_datapoint[field_name] == write_datapoint[field_name])
else:
self.assertTrue(np.allclose(read_datapoint[field_name], write_datapoint[field_name]))
# re-open the dataset in write-only
del dataset
dataset = TensorDataset.open(TEST_TENSOR_DATASET_NAME,
access_mode=READ_WRITE_ACCESS)
# delete datapoint
dataset.delete_last()
# check that the dataset is correct
self.assertTrue(dataset.num_datapoints == 0)
self.assertTrue(dataset.num_tensors == 0)
for field_name in dataset.field_names:
filename = os.path.join(TEST_TENSOR_DATASET_NAME, 'tensors', '%s_00000.npz' %(field_name))
self.assertFalse(os.path.exists(filename))
# remove dataset
if os.path.exists(TEST_TENSOR_DATASET_NAME):
shutil.rmtree(TEST_TENSOR_DATASET_NAME)
def test_multi_tensor_read_write(self):
# seed
np.random.seed(SEED)
random.seed(SEED)
# open dataset
dataset = TensorDataset(TEST_TENSOR_DATASET_NAME,
TENSOR_CONFIG)
write_datapoints = []
for i in range(DATAPOINTS_PER_FILE+1):
write_datapoint = {}
write_datapoint['float_value'] = np.random.rand()
write_datapoint['int_value'] = int(100 * np.random.rand())
write_datapoint['str_value'] = utils.gen_experiment_id()
write_datapoint['vector_value'] = np.random.rand(HEIGHT)
write_datapoint['matrix_value'] = np.random.rand(HEIGHT, WIDTH)
write_datapoint['image_value'] = np.random.rand(HEIGHT, WIDTH, CHANNELS)
dataset.add(write_datapoint)
write_datapoints.append(write_datapoint)
# check num datapoints
self.assertTrue(dataset.num_datapoints == DATAPOINTS_PER_FILE+1)
self.assertTrue(dataset.num_tensors == 2)
# check read
dataset.flush()
del dataset
dataset = TensorDataset.open(TEST_TENSOR_DATASET_NAME,
access_mode=READ_WRITE_ACCESS)
for i, read_datapoint in enumerate(dataset):
write_datapoint = write_datapoints[i]
for field_name in dataset.field_names:
if isinstance(read_datapoint[field_name], str):
self.assertTrue(read_datapoint[field_name] == write_datapoint[field_name])
else:
self.assertTrue(np.allclose(read_datapoint[field_name], write_datapoint[field_name]))
for i, read_datapoint in enumerate(dataset):
# check iterator item
write_datapoint = write_datapoints[i]
for field_name in dataset.field_names:
if isinstance(read_datapoint[field_name], str):
self.assertTrue(read_datapoint[field_name] == write_datapoint[field_name])
else:
self.assertTrue(np.allclose(read_datapoint[field_name], write_datapoint[field_name]))
# check random item
ind = np.random.choice(dataset.num_datapoints)
write_datapoint = write_datapoints[ind]
read_datapoint = dataset.datapoint(ind)
for field_name in dataset.field_names:
if isinstance(read_datapoint[field_name], str):
self.assertTrue(read_datapoint[field_name] == write_datapoint[field_name])
else:
self.assertTrue( | np.allclose(read_datapoint[field_name], write_datapoint[field_name]) | numpy.allclose |
"""
"""
import datetime
import os
# import sys
import logging
import numpy as np
import scipy as sp
import scipy.optimize # noqa
import tqdm
import h5py
import zcode.inout as zio
import zcode.math as zmath
from . import spectra, radiation # , utils
from . import PATH_DATA, MASS_EXTR, FEDD_EXTR, RADS_EXTR
from . constants import MSOL, MELC, MPRT, SPLC, K_BLTZ, H_PLNK
NUM = 10
np.seterr(divide='ignore', invalid='ignore', over='raise')
# MASS_EXTR = [1e6, 5e10]
# FEDD_EXTR = [1e-5, 1e-1]
# RADS_EXTR = [3.0, 1e5]
GRID_NAMES = ['mass', 'fedd', 'rmin', 'rmax']
ALPHA_VISC = 0.1
BETA_GP = 0.5
FRAC_ADV = 0.5
GAMMA_SH = (32 - 24*BETA_GP - 3*BETA_GP**2) / (24 - 21*BETA_GP)
EPS = (5/3 - GAMMA_SH) / (GAMMA_SH - 1.0)
EPS_PRIME = EPS / FRAC_ADV
DELTA = MELC/MPRT
GAE = np.sqrt(1.0 + 18.0 * np.square(ALPHA_VISC/(5.0 + 2*EPS_PRIME))) - 1.0
C1 = GAE * (5 + 2*EPS_PRIME) / (3 * np.square(ALPHA_VISC))
# C2 = np.sqrt(2 * EPS_PRIME * C1 / 3)
C3 = 2 * C1 / 3
MEC2 = MELC * SPLC**2
S1 = 1.42e9 * np.sqrt(1 - BETA_GP) * np.sqrt(C3 / C1 / ALPHA_VISC)
S3 = 1.05e-24
KB_OVER_MEC2 = K_BLTZ / MEC2
META = dict(ALPHA_VISC=ALPHA_VISC, BETA_GP=BETA_GP, FRAC_ADV=FRAC_ADV)
def main(num=None, recreate=True):
if num is None:
num = NUM
fname = grid_fname(num)
exists = os.path.exists(fname)
logging.warning("Grid for num={} exists: {} ({})".format(num, exists, fname))
logging.info("recreate: {}".format(recreate))
if not exists or recreate:
grid, grid_names, grid_temps, grid_valid = get_temp_grid(num)
save_grid(fname, grid, grid_names, grid_temps, grid_valid)
return
def get_interp(num=None):
if num is None:
num = NUM
fname = grid_fname(num)
grid, grid_names, grid_temps, grid_valid = load_grid(fname=fname)
grid_temps[~grid_valid] = np.mean(grid_temps[grid_valid])
# mesh = np.meshgrid(*grid)
# mesh = np.log10(mesh)
mesh = [np.log10(gg) for gg in grid]
grid_temps = np.log10(grid_temps)
interp_ll = sp.interpolate.RegularGridInterpolator(mesh, grid_temps)
def interp(xx):
try:
res = 10**interp_ll(np.log10(xx))
except ValueError:
logging.error("ValueError for argument: '{}'".format(xx))
logging.error("ValueError for argument: log: '{}'".format(np.log10(xx)))
for gg in interp_ll.grid:
logging.error("\t{}".format(zmath.minmax(gg)))
raise
return res
return interp
def grid_fname(num):
fname = "temp_grid_n{}.hdf5".format(num)
fname = os.path.join(PATH_DATA, fname)
return fname
def save_grid(fname, grid, grid_names, grid_temps, grid_valid):
fname = os.path.abspath(fname)
with h5py.File(fname, 'w') as out:
group = out.create_group('grid')
for nn, vv in zip(grid_names, grid):
group.create_dataset(nn, data=vv)
group = out.create_group('parameters')
for nn, vv in META.items():
group.create_dataset(nn, data=vv)
out.create_dataset('temps', data=grid_temps)
out.create_dataset('valid', data=grid_valid)
logging.info("Saved to '{}' size '{}'".format(fname, zio.get_file_size(fname)))
return
def load_grid(*args, num=None, fname=None):
if len(args):
raise ValueError("Only passed kwargs to `load_grid()`!")
if fname is None:
if num is None:
num = NUM
fname = grid_fname(num)
fname = os.path.abspath(fname)
if not os.path.exists(fname):
raise ValueError("fname '{}' does not exist!".format(fname))
with h5py.File(fname, 'r') as h5:
grid_group = h5['grid']
# grid_names = list(grid_group.keys())
grid_names = []
grid = []
for nn in GRID_NAMES:
grid.append(grid_group[nn][:])
grid_names.append(nn)
grid_temps = h5['temps'][:]
grid_valid = h5['valid'][:]
return grid, grid_names, grid_temps, grid_valid
def get_temp_grid(num, fix=True):
grid_extr = [np.array(MASS_EXTR)*MSOL, FEDD_EXTR, RADS_EXTR, RADS_EXTR]
grid_names = ['mass', 'fedd', 'rmin', 'rmax']
grid = [np.logspace(*np.log10(extr), num) for extr in grid_extr]
shape = [num for ii in range(len(grid))]
tot = np.product(shape)
grid_temps = np.zeros(shape)
grid_valid = np.ones(shape, dtype=bool)
cnt = 0
beg = datetime.datetime.now()
for idx in tqdm.tqdm(np.ndindex(*shape), total=tot):
# print(idx)
vals = [gg[ii] for gg, ii in zip(grid, idx)]
if vals[2] >= vals[3]:
grid_valid[idx] = False
continue
tt = solve_adaf_temp(*vals)
if tt is not None:
grid_temps[idx] = tt
cnt += 1
end = datetime.datetime.now()
dur = (end - beg)
dur_per = dur.total_seconds()/cnt
bads_nan = np.isnan(grid_temps)
grid_temps = np.nan_to_num(grid_temps)
bads = grid_valid & np.isclose(grid_temps, 0.0)
logging.warning("Success on : {}".format(zmath.frac_str(grid_temps[grid_valid] > 0.0)))
logging.warning("nan values: {}".format(zmath.frac_str(bads_nan)))
logging.warning("Bad values: {}".format(zmath.frac_str(bads)))
logging.warning("Done after {}, per iteration: {}".format(str(dur), dur_per))
if fix:
grid_temps = interp_bad_grid_vals(grid, grid_temps, grid_valid)
return grid, grid_names, grid_temps, grid_valid
def solve_adaf_temp(mass, fedd, rmin, rmax, debug=False):
msol = mass / MSOL
lvl = logging.WARNING
def heat_cool(temp):
"""Calculate heating and cooling rates for disk as a whole.
"""
nonlocal mass, fedd, rmin, rmax, msol
alpha = ALPHA_VISC
beta = BETA_GP
eps_prime = EPS_PRIME
delta = DELTA
rmin = rmin
rmax = rmax
theta_e = KB_OVER_MEC2 * temp
xm = spectra.xm_from_te(temp, msol, fedd)
tau_es = 23.87 * fedd * (0.3 / alpha) * (0.5 / C1) * np.sqrt(3/rmin)
mean_amp_a = 1.0 + 4.0 * theta_e + 16*np.square(theta_e)
alpha_crit = - np.log(tau_es) / np.log(mean_amp_a)
s2 = 1.19e-13 * xm
# Viscous Heating
# ---------------
_ge = radiation._heat_func_g(theta_e)
q1 = 1.2e38 * _ge * C3 * beta * msol * np.square(fedd) / np.square(alpha*C1) / rmin
q2 = delta * 9.39e38 * eps_prime * C3 * msol * fedd / rmin
heat_elc = q1 + q2
# Synchrotron
# -----------
# Eq. 24 [Hz]
f_p = S1 * s2 * np.sqrt(fedd/msol) * np.square(temp) * np.power(rmin, -1.25)
lum_synch_peak = np.power(S1 * s2, 3) * S3 * np.power(rmin, -1.75) * np.sqrt(msol)
lum_synch_peak *= np.power(fedd, 1.5) * np.power(temp, 7) / f_p
# Eq. 26
power_synch = 5.3e35 * np.power(xm/1000, 3) * np.power(alpha/0.3, -1.5)
power_synch *= np.power((1 - beta)/0.5, 1.5) * np.power(C1/0.5, -1.5)
# Bremsstrahlung
# --------------
# Eq. 29
power_brems = 4.78e34 * np.log(rmax/rmin) / np.square(alpha * C1)
power_brems *= radiation._brems_fit_func_f(theta_e) * fedd * msol
# Compton
# -------
power_compt = lum_synch_peak * f_p / (1 - alpha_crit)
power_compt *= (np.power(6.2e7 * (temp/1e9) / (f_p/1e12), 1 - alpha_crit) - 1.0)
return heat_elc, power_synch, power_brems, power_compt
def _func(logt):
tt = np.power(10.0, logt)
qv, qs, qb, qc = heat_cool(tt)
rv = qv - (qs + qb + qc)
return rv
start_temps = [1e11, 1e10, 1e12, 1e9, 1e8]
success = False
for ii, t0 in enumerate(start_temps):
try:
logt = sp.optimize.newton(_func, np.log10(t0), tol=1e-4, maxiter=100)
temp_e = np.power(10.0, logt)
except (RuntimeError, FloatingPointError) as err:
if debug:
logging.warn("Trial '{}' (t={:.1e}) optimization failed: {}".format(
ii, t0, str(err)))
else:
success = True
break
if success:
# logging.log(lvl, "Success with `t0`={:.2e} ==> t={:.2e}".format(t0, temp_e))
pass
else:
err = ("Unable to find electron temperature!"
"\nIf the eddington factor is larger than 1e-2, "
"this may be expected!")
if debug:
logging.log(lvl, "FAILED to find electron temperature!")
logging.log(lvl, "m = {:.2e}, f = {:.2e}".format(msol, fedd))
logging.log(lvl, err)
# raise RuntimeError(err)
return None
qv, qs, qb, qc = heat_cool(temp_e)
heat = qv
cool = qs + qb + qc
diff = np.fabs(heat - cool) / heat
if diff < 1e-2:
if debug:
logging.log(lvl, "Heating vs. cooling frac-diff: {:.2e}".format(diff))
else:
if debug:
err = "Electron temperature seems inconsistent (Te = {:.2e})!".format(temp_e)
err += "\n\tm: {:.2e}, f: {:.2e}".format(msol, fedd)
err += "\n\tHeating: {:.2e}, Cooling: {:.2e}, diff: {:.4e}".format(heat, cool, diff)
err += "\n\tThis may mean there is an input error (e.g. mdot may be too large... or small?)."
logging.log(lvl, err)
return None
return temp_e
def interp_bad_grid_vals(grid, grid_temps, grid_valid):
grid_temps = np.copy(grid_temps)
bads = grid_valid & np.isclose(grid_temps, 0.0)
shape = [len(gg) for gg in grid]
logging.warning("Fixing bad values: {}".format(zmath.frac_str(bads)))
neighbors = []
good_neighbors = []
bads_inds = np.array(np.where(bads)).T
for bad in tqdm.tqdm(bads_inds):
nbs = []
# print(bad)
cnt = 0
for dim in range(4):
for side in [-1, +1]:
test = [bb for bb in bad]
test[dim] += side
if test[dim] < 0 or test[dim] >= shape[dim]:
continue
test = tuple(test)
# print("\t", test)
# print("\t", temps[test])
nbs.append(test)
if grid_temps[test] > 0.0:
cnt += 1
neighbors.append(nbs)
good_neighbors.append(cnt)
num_nbs = [len(nbs) for nbs in neighbors]
logging.warning("All neighbors: {}".format(zmath.stats_str(num_nbs)))
logging.warning("Good neighbors: {}".format(zmath.stats_str(good_neighbors)))
goods = np.zeros(len(neighbors))
MAX_TRIES = 10
still_bad = list(np.argsort(good_neighbors)[::-1])
tries = 0
while len(still_bad) > 0 and tries < MAX_TRIES:
keep_bad = []
for kk, ii in enumerate(still_bad):
values = np.zeros(num_nbs[ii])
for jj, nbr in enumerate(neighbors[ii]):
values[jj] = grid_temps[nbr]
cnt = np.count_nonzero(values)
if cnt == 0:
keep_bad.append(kk)
continue
new = np.sum(np.log10(values[values > 0])) / cnt
loc = tuple(bads_inds[ii])
# print("\t", loc, new, cnt)
grid_temps[loc] = 10**new
goods[ii] = cnt
still_bad = [still_bad[kk] for kk in keep_bad]
num_still = len(still_bad)
logging.warning("Try: {}, still_bad: {}".format(tries, num_still))
if (tries+1 >= MAX_TRIES) and (num_still > 0):
logging.error("After {} tries, still {} bad!!".format(tries, num_still))
tries += 1
logging.warning("Filled neighbors: {}".format(zmath.stats_str(goods)))
logging.warning("Full temps array: {}".format(zmath.stats_str(grid_temps[grid_valid])))
return grid_temps
def plot_grid(grid, grid_names, temps, valid, interp=None):
import matplotlib.pyplot as plt
import zcode.plot as zplot
extr = zmath.minmax(temps, filter='>')
smap = zplot.colormap(extr, 'viridis')
# bads = valid & np.isclose(temps, 0.0)
num = len(grid)
fig, axes = plt.subplots(figsize=[14, 14], nrows=num, ncols=num)
plt.subplots_adjust(hspace=0.4, wspace=0.4)
def_idx = [-4, -4, 4, -4]
for (ii, jj), ax in np.ndenumerate(axes):
if ii < jj:
ax.set_visible(False)
continue
ax.set(xscale='log', yscale='log')
xx = grid[jj]
if ii == jj:
# print(grid_names[ii], zmath.minmax(grid[ii], filter='>'))
# idx = list(range(num))
# idx.pop(ii)
# idx = tuple(idx)
# vals = np.mean(temps, axis=idx)
idx = [slice(None) if aa == ii else def_idx[aa] for aa in range(num)]
vals = temps[tuple(idx)]
ax.plot(xx, vals, 'k-')
if interp is not None:
num_test = 10
test = [np.ones(num_test)*grid[aa][def_idx[aa]] for aa in range(num)]
test[ii] = zmath.spacing(grid[ii], 'log', num_test)
test_vals = [interp(tt) for tt in np.array(test).T]
ax.plot(test[ii], test_vals, 'r--')
# bad_vals = np.count_nonzero(bads, axis=idx)
# tw = ax.twinx()
# tw.plot(xx, bad_vals, 'r--')
else:
# print(ii, jj)
# print("\t", ii, grid_names[ii], zmath.minmax(grid[ii], filter='>'))
# print("\t", jj, grid_names[jj], zmath.minmax(grid[jj], filter='>'))
# idx = [0, 1, 2, 3]
# idx.pop(np.max([ii, jj]))
# idx.pop(np.min([ii, jj]))
# vals = np.mean(temps, axis=tuple(idx))
# idx = [slice(None) if aa in [ii, jj] else num//2 for aa in range(num)]
idx = [slice(None) if aa in [ii, jj] else def_idx[aa] for aa in range(num)]
vals = temps[tuple(idx)]
if len(vals) == 0:
continue
yy = grid[ii]
xx, yy = np.meshgrid(xx, yy, indexing='ij')
ax.pcolor(xx, yy, vals, cmap=smap.cmap, norm=smap.norm)
if np.count_nonzero(vals > 0.0) == 0:
continue
tit = "{:.1e}, {:.1e}".format(*zmath.minmax(vals, filter='>'))
ax.set_title(tit, size=10)
# bad_vals = np.count_nonzero(bads, axis=tuple(idx))
# idx = (bad_vals > 0.0)
# aa = xx[idx]
# bb = yy[idx]
# cc = bad_vals[idx]
# ax.scatter(aa, bb, s=2*cc**2, color='0.5', alpha=0.5)
# ax.scatter(aa, bb, s=cc**2, color='r')
if interp is not None:
for kk in range(10):
idx = (vals > 0.0)
x0 = 10**np.random.uniform(*zmath.minmax(np.log10(xx[idx])))
y0 = 10**np.random.uniform(*zmath.minmax(np.log10(yy[idx])))
# y0 = np.random.choice(yy[idx])
temp = [grid[ll][def_idx[ll]] for ll in range(num)]
temp[ii] = y0
temp[jj] = x0
if temp[2] >= temp[3]:
temp[2] = 3.1
iv = interp(temp)
if not np.isfinite(iv) or np.isclose(iv, 0.0):
print("\nBAD")
print(temp)
print(iv)
for kk in range(num):
if def_idx[kk] == 0:
temp[kk] = temp[kk] * 1.11
elif def_idx[kk] == -1:
temp[kk] = 0.99 * temp[kk]
iv = interp(temp)
print("\t", temp)
print("\t", iv)
cc = smap.to_rgba(iv)
ss = 20
ax.scatter(temp[jj], temp[ii], color='0.5', s=2*ss)
ax.scatter(temp[jj], temp[ii], color=cc, s=ss)
if ii == num-1:
ax.set_xlabel(grid_names[jj])
if jj == 0 and ii != 0:
ax.set_ylabel(grid_names[ii])
return fig
class Fast_Mahadevan96:
def __init__(self, mass, fedd, rmin, rmax, temp_e=None, interp=None):
"""
"""
self.mass = mass
# Mass in units of solar=masses
self.msol = mass/MSOL
self.fedd = fedd
self.rmin = rmin
self.rmax = rmax
if temp_e is None:
if interp is None:
interp = get_interp()
temp_e = interp([mass, fedd, rmin, rmax])
self.temp_e = temp_e
xm_e = spectra.xm_from_te(temp_e, self.msol, fedd)
self.s2 = 1.19e-13 * xm_e
theta_e = radiation.dimensionless_temperature_theta(temp_e, MELC)
# Eq. 31
tau_es = 23.87 * fedd * (0.3 / ALPHA_VISC) * (0.5 / C1) * np.sqrt(3/rmin)
# Eq. 32
mean_amp_a = 1.0 + 4.0 * theta_e + 16*np.square(theta_e)
# Eq. 34
self.alpha_crit = - np.log(tau_es) / np.log(mean_amp_a)
return
def spectrum(self, freqs):
synch = self._calc_spectrum_synch(freqs)
brems = self._calc_spectrum_brems(freqs)
compt = self._calc_spectrum_compt(freqs)
spectrum = synch + brems + compt
return spectrum
def _calc_spectrum_synch(self, freqs):
"""Mahadevan 1996 - Eq. 25
Cutoff above peak frequency (i.e. ignore exponential portion).
Ignore low-frequency transition to steeper (22/13 slope) from rmax.
"""
msol = self.msol
fedd = self.fedd
scalar = np.isscalar(freqs)
freqs = np.atleast_1d(freqs)
lnu = S3 * np.power(S1*self.s2, 1.6)
lnu *= np.power(msol, 1.2) * np.power(fedd, 0.8)
lnu *= np.power(self.temp_e, 4.2) * np.power(freqs, 0.4)
nu_p = self._freq_synch_peak(self.temp_e, msol, fedd)
lnu[freqs > nu_p] = 0.0
if scalar:
lnu = np.squeeze(lnu)
return lnu
def _calc_spectrum_brems(self, freqs):
"""Mahadevan 1996 - Eq. 30
"""
msol = self.msol
fedd = self.fedd
temp = self.temp_e
const = 2.29e24 # erg/s/Hz
scalar = np.isscalar(freqs)
freqs = np.atleast_1d(freqs)
t1 = np.log(self.rmax/self.rmin) / np.square(ALPHA_VISC * C1)
t2 = np.exp(-H_PLNK*freqs / (K_BLTZ * temp)) * msol * np.square(fedd) / temp
fe = radiation._brems_fit_func_f(temp)
lbrems = const * t1 * fe * t2
if scalar:
lbrems = np.squeeze(lbrems)
return lbrems
def _calc_spectrum_compt(self, freqs):
"""Compton Scattering spectrum from upscattering of Synchrotron photons.
Mahadevan 1996 - Eq. 38
"""
fedd = self.fedd
temp = self.temp_e
scalar = np.isscalar(freqs)
freqs = np.atleast_1d(freqs)
f_p, l_p = self._synch_peak(fedd, self.msol, temp)
lsp = np.power(freqs/f_p, -self.alpha_crit) * l_p
lsp[freqs < f_p] = 0.0
# See Eq. 35
max_freq = 3*K_BLTZ*temp/H_PLNK
lsp[freqs > max_freq] = 0.0
if scalar:
lsp = np.squeeze(lsp)
return lsp
def _freq_synch_peak(self, temp, msol, fedd):
"""Mahadevan 1996 Eq. 24
"""
nu_p = S1 * self.s2 * np.sqrt(fedd/msol) * np.square(temp) * np.power(self.rmin, -1.25)
return nu_p
def _synch_peak(self, fedd, msol, temp):
f_p = self._freq_synch_peak(temp, msol, fedd)
l_p = np.power(S1 * self.s2, 3) * S3 * np.power(self.rmin, -1.75) * np.sqrt(msol)
l_p *= np.power(fedd, 1.5) * np.power(temp, 7) / f_p
return f_p, l_p
class Fast_Mahadevan96_Array:
def __init__(self, mass, fedd, rmin, rmax, temp_e=None, interp=None):
"""
"""
self.mass = mass
# Mass in units of solar=masses
self.msol = mass/MSOL
self.fedd = fedd
self.rmin = rmin
self.rmax = rmax
if temp_e is None:
if interp is None:
interp = get_interp()
args = [mass, fedd, rmin, rmax]
shp = np.shape(args[0])
if not np.all([shp == np.shape(aa) for aa in args]):
all_shps = [np.shape(aa) for aa in args]
print("all shapes = ", all_shps)
raise ValueError("Shape mismatch!")
args = [aa.flatten() for aa in args]
args = np.array(args).T
temp_e = interp(args)
temp_e = temp_e.reshape(shp)
assert np.shape(temp_e) == np.shape(mass), "Output shape mismatch!"
self.temp_e = temp_e
xm_e = spectra.xm_from_te(temp_e, self.msol, fedd)
self.s2 = 1.19e-13 * xm_e
theta_e = radiation.dimensionless_temperature_theta(temp_e, MELC)
# Eq. 31
tau_es = 23.87 * fedd * (0.3 / ALPHA_VISC) * (0.5 / C1) * np.sqrt(3/rmin)
# Eq. 32
mean_amp_a = 1.0 + 4.0 * theta_e + 16*np.square(theta_e)
# Eq. 34
self.alpha_crit = - np.log(tau_es) / np.log(mean_amp_a)
return
def spectrum(self, freqs):
synch = self._calc_spectrum_synch(freqs)
brems = self._calc_spectrum_brems(freqs)
compt = self._calc_spectrum_compt(freqs)
spectrum = synch + brems + compt
return spectrum
def _calc_spectrum_synch(self, freqs):
"""Mahadevan 1996 - Eq. 25
Cutoff above peak frequency (i.e. ignore exponential portion).
Ignore low-frequency transition to steeper (22/13 slope) from rmax.
"""
msol = self.msol
fedd = self.fedd
scalar = | np.isscalar(freqs) | numpy.isscalar |
import warnings
import numpy as np
import pandas as pd
from astropy.time import Time
__all__ = [
"preprocessObservations",
"findAverageOrbits",
]
def preprocessObservations(
observations,
column_mapping,
astrometric_errors=None,
mjd_scale="utc",
attribution=False
):
"""
Create two seperate data frames: one with all observation data needed to run THOR stripped of
object IDs and the other with known object IDs and attempts to attribute unknown observations to
the latest catalog of known objects from the MPC.
Parameters
----------
observations : `~pandas.DataFrame`
DataFrame containing at minimum a column of observation IDs, exposure times in MJD (with scale
set by mjd_scale), RA in degrees, Dec in degrees, 1-sigma error in RA in degrees, 1-sigma error in
Dec in degrees and the observatory code.
column_mapping : dict
Dictionary containing internal column names as keys mapped to column names in the data frame as values.
Should include the following:
{# Internal : # External
"obs_id" : column name or None,
"mjd" : column name,
"RA_deg" : column name,
"Dec_deg" : column name,
"RA_sigma_deg" : column name or None,
"Dec_sigma_deg" : column name or None,
"observatory_code" : column name,
"obj_id" : column name or None,
}
Description of columns and their assumed values:
'obs_id' : column name or None
Observation IDs as type string. If None, THOR will assign
an observation ID to each observation.
'mjd' : column name
Observation time in MJD, the input time scale can be set with the
'time_scale' parameter. Time scale will be converted if not in UTC.
'RA_deg' : column name
Topocentric J2000 Right Ascension in degrees.
'Dec_deg' : column name
Topocentric J2000 Declination in degrees.
'RA_sigma_deg' : column name or None
1-sigma astrometric uncertainty in RA in degrees.
If certain or all observations are missing astrometric errors, use
the 'astrometric_errors' parameter to configure defaults for all observatories
or for each observatory individually. If None, THOR will use the 'astrometric_error'
parameter to assign errors.
'Dec_sigma_deg' : column name or None
1-sigma astrometric uncertainty in Dec in degrees.
If certain or all observations are missing astrometric errors, use
the 'astrometric_errors' parameter to configure defaults for all observatories
or for each observatory individually. If None, THOR will use the 'astrometric_error'
parameter to assign errors.
'observatory_code' : column_name
The MPC observatory code from which each observation was made. THOR currently
only supports ground-based observatories.
'obj_id' : column_name or None
If known, the designation in unpacked or packed form. If unknown, object ID should be
set to 'NaN'. If None, THOR will assume no observations have been associated.
mjd_scale : str, optional
Time scale of the input MJD exposure times ("utc", "tdb", etc...)
attribution : bool, optional
Place holder boolean to trigger attribution
Returns
-------
preprocessed_observations : `~pandas.DataFrame`
DataFrame with observations in the format required by THOR.
preprocessed_attributions : `~pandas.DataFrame`
DataFrame containing truths.
Raises
------
ValueError
If the astrometric_errors parameter is not of type list or dictionary,
or if the errors are not correctly defined.
Warns
-----
UserWarning:
If the observation ID, object_ID, or astrometric error columns are not
present in the column_mapping dictionary.
"""
# Required columns THOR needs
cols = [
"obs_id",
"mjd",
"RA_deg",
"Dec_deg",
"RA_sigma_deg",
"Dec_sigma_deg",
"observatory_code",
"obj_id"
]
# Check if observation IDs need to be assigned
assign_obs_ids = False
if column_mapping["obs_id"] == None:
warning = (
"No observation ID column defined in the column_mapping dictionary.\n"
"Assigning observation IDs...\n"
)
warnings.warn(
warning,
UserWarning
)
assign_obs_ids = True
cols.remove("obs_id")
# Check if object IDs need to be assigned
assign_obj_ids = False
if column_mapping["obj_id"] == None:
warning = (
"No object ID column defined in the column_mapping dictionary.\n"
"Assuming no observations have been associated with a known object...\n"
)
warnings.warn(
warning,
UserWarning
)
assign_obj_ids = True
cols.remove("obj_id")
# Check if astrometric errors need to be added
use_astrometric_errors = False
if (column_mapping["RA_sigma_deg"] == None) and (column_mapping["Dec_sigma_deg"] == None):
warning = (
"No astrometric error columns defined in the column_mapping dictionary.\n"
"Using 'astrometric_errors' parameter to assign errors...\n"
)
warnings.warn(
warning,
UserWarning
)
use_astrometric_errors = True
cols.remove("RA_sigma_deg")
cols.remove("Dec_sigma_deg")
# Create a copy of the relevant columns in observations
obs_cols = [column_mapping[c] for c in cols]
preprocessed_observations = observations[obs_cols].copy()
# Rename preprocessed observation columns to those expected by THOR
# (involves inverting the column_mapping dictionary and removing any potential
# None values passed by the user)
column_mapping_inv = {v : k for k, v in column_mapping.items()}
if None in column_mapping_inv.keys():
column_mapping_inv.pop(None)
preprocessed_observations.rename(
columns=column_mapping_inv,
inplace=True)
if use_astrometric_errors:
if type(astrometric_errors) == list:
if len(astrometric_errors) != 2:
err = (
"astrometric_errors list is not of length 2."
)
else:
preprocessed_observations.loc[:, "RA_sigma_deg"] = astrometric_errors[0]
preprocessed_observations.loc[:, "Dec_sigma_deg"] = astrometric_errors[1]
elif type(astrometric_errors) == dict:
for code, errors in astrometric_errors.items():
if len(errors) != 2:
err = (
"Astrometric errors for observatory {} should be a list of length 2 with\n"
"the 1-sigma astrometric uncertainty in RA as the first element and the\n"
"1-sigma astrometric uncertainty in Dec as the second element."
)
raise ValueError(err.format(code))
else:
observatory_mask = preprocessed_observations["observatory_code"].isin([code])
preprocessed_observations.loc[observatory_mask, "RA_sigma_deg"] = errors[0]
preprocessed_observations.loc[observatory_mask, "Dec_sigma_deg"] = errors[1]
else:
err = (
"'astrometric_errors' should be one of {None, list, dict}.\n"
"If None, then the given observations must have the ra_sigma_deg\n"
" and dec_sigma_deg columns.\n"
"If a dictionary, then each observatory code present observations in\n"
" the observations must have a corresponding key with a list of length 2\n"
" as their values. The first element in the list is assumed to be the 1-sigma\n"
" astrometric error in RA, while the second is assumed to be the same but in Dec.\n"
"If a list, then the first element in the list is assumed to be the 1-sigma\n"
" astrometric error in RA, while the second is assumed to be the same but in Dec.\n"
" Each observation will be given these errors regardless of if one is present or not.\n"
)
raise ValueError(err)
# Make sure all observations have astrometric errors
missing_codes = preprocessed_observations[(
(preprocessed_observations["RA_sigma_deg"].isna())
| (preprocessed_observations["Dec_sigma_deg"].isna())
)]["observatory_code"].unique()
if len(missing_codes) > 0:
err = (
"Missing astrometric errors for observations from:\n"
" {}\n"
)
raise ValueError(err.format(", ".join(missing_codes)))
# Make sure all observations are given in UTC, if not convert to UTC
if mjd_scale != "utc":
mjds = Time(
preprocessed_observations["mjd"].values,
format="mjd",
scale=mjd_scale
)
preprocessed_observations["mjd"] = mjds.utc.mjd
# Add _utc to mjd column name
preprocessed_observations.rename(
columns={
"mjd" : "mjd_utc"
},
inplace=True
)
# Make sure that the observations are sorted by observation time
preprocessed_observations.sort_values(
by=["mjd_utc"],
inplace=True
)
# Reset index after sort
preprocessed_observations.reset_index(
inplace=True,
drop=True
)
# Assign obervation IDs if needed
if assign_obs_ids:
preprocessed_observations.loc[:, "obs_id"] = ["obs{:09d}".format(i) for i in range(len(preprocessed_observations))]
else:
if type(preprocessed_observations["obs_id"]) != object:
warn = ("Observation IDs should be of type string, converting...")
warnings.warn(warn)
preprocessed_observations["obs_id"] = preprocessed_observations["obs_id"].astype(str)
# Assign object IDs if needed
if assign_obj_ids:
preprocessed_observations.loc[:, "obj_id"] = "None"
else:
if type(preprocessed_observations["obj_id"]) != object:
warn = ("Object IDs should be of type string, converting...")
warnings.warn(warn)
preprocessed_observations.loc[preprocessed_observations["obj_id"].isna(), "obj_id"] = "None"
preprocessed_observations["obj_id"] = preprocessed_observations["obj_id"].astype(str)
# Split observations into two dataframes (make THOR run only on completely blind observations)
preprocessed_associations = preprocessed_observations[[
"obs_id",
"obj_id"
]].copy()
preprocessed_observations = preprocessed_observations[[
"obs_id",
"mjd_utc",
"RA_deg",
"Dec_deg",
"RA_sigma_deg",
"Dec_sigma_deg",
"observatory_code",
]]
return preprocessed_observations, preprocessed_associations
COLUMN_MAPPING = {
### Observation Parameters
# Observation ID
"obs_id" : "obsId",
# Exposure time
"exp_mjd" : "exp_mjd",
# Visit ID
"visit_id" : "visitId",
# Field ID
"field_id" : "fieldId",
# Field RA in degrees
"field_RA_deg" : "fieldRA_deg",
# Field Dec in degrees
"field_Dec_deg" : "fieldDec_deg",
# Night number
"night": "night",
# RA in degrees
"RA_deg" : "RA_deg",
# Dec in degrees
"Dec_deg" : "Dec_deg",
# Observatory code
"observatory_code" : "code",
# Observer's x coordinate in AU
"obs_x_au" : "HEclObsy_X_au",
# Observer's y coordinate in AU
"obs_y_au" : "HEclObsy_Y_au",
# Observer's z coordinate in AU
"obs_z_au" : "HEclObsy_Z_au",
# Magnitude (UNUSED)
"mag" : "VMag",
### Truth Parameters
# Object name
"name" : "designation",
# Observer-object distance in AU
"Delta_au" : "Delta_au",
# Sun-object distance in AU (heliocentric distance)
"r_au" : "r_au",
# Object's x coordinate in AU
"obj_x_au" : "HEclObj_X_au",
# Object's y coordinate in AU
"obj_y_au" : "HEclObj_Y_au",
# Object's z coordinate in AU
"obj_z_au" : "HEclObj_Z_au",
# Object's x velocity in AU per day
"obj_dx/dt_au_p_day" : "HEclObj_dX/dt_au_p_day",
# Object's y velocity in AU per day
"obj_dy/dt_au_p_day" : "HEclObj_dY/dt_au_p_day",
# Object's z velocity in AU per day
"obj_dz/dt_au_p_day" : "HEclObj_dZ/dt_au_p_day",
# Semi-major axis
"a_au" : "a_au",
# Inclination
"i_deg" : "i_deg",
# Eccentricity
"e" : "e",
}
def findAverageOrbits(
observations,
orbits,
d_values=None,
element_type="keplerian",
column_mapping=COLUMN_MAPPING
):
"""
Find the object with observations that represents
the most average in terms of cartesian velocity and the
heliocentric distance. Assumes that a subset of the designations in the orbits
dataframe are identical to at least some of the designations in the observations
dataframe. No propagation is done, so the orbits need to be defined at an epoch near
the time of observations, for example like the midpoint or start of a two-week window.
Parameters
----------
observations : `~pandas.DataFrame`
DataFrame containing observations.
orbits : `~pandas.DataFrame`
DataFrame containing orbits for each unique object in observations.
d_values : {list (N>=2), None}, optional
If None, will find average orbit in all of observations. If a list, will find an
average orbit between each value in the list. For example, passing dValues = [1.0, 2.0, 4.0] will
mean an average orbit will be found in the following bins: (1.0 <= d < 2.0), (2.0 <= d < 4.0).
element_type : {'keplerian', 'cartesian'}, optional
Find average orbits using which elements. If 'keplerian' will use a-e-i for average,
if 'cartesian' will use r, v.
[Default = 'keplerian']
verbose : bool, optional
Print progress statements?
[Default = True]
column_mapping : dict, optional
Column name mapping of observations to internally used column names.
[Default = `~thor.Config.COLUMN_MAPPING`]
Returns
-------
orbits : `~pandas.DataFrame`
DataFrame with name, r, v, exposure time, and sky-plane location of the average orbit in each bin of r.
"""
if element_type == "keplerian":
d_col = column_mapping["a_au"]
elif element_type == "cartesian":
d_col = column_mapping["r_au"]
else:
err = (
"element_type should be one of {'keplerian', 'cartesian'}"
)
raise ValueError(err)
dataframe = pd.merge(orbits, observations, on=column_mapping["name"]).copy()
dataframe.reset_index(inplace=True, drop=True)
d_bins = []
if d_values != None:
for d_i, d_f in zip(d_values[:-1], d_values[1:]):
d_bins.append(dataframe[(dataframe[d_col] >= d_i) & (dataframe[d_col] < d_f)])
else:
d_bins.append(dataframe)
average_orbits = []
for i, obs in enumerate(d_bins):
if len(obs) == 0:
# No real objects
orbit = pd.DataFrame({"orbit_id" : i + 1,
column_mapping["exp_mjd"] : np.NaN,
column_mapping["obj_x_au"] : np.NaN,
column_mapping["obj_y_au"] : np.NaN,
column_mapping["obj_z_au"] : np.NaN,
column_mapping["obj_dx/dt_au_p_day"] : np.NaN,
column_mapping["obj_dy/dt_au_p_day"] : np.NaN,
column_mapping["obj_dz/dt_au_p_day"] : np.NaN,
column_mapping["RA_deg"] : np.NaN,
column_mapping["Dec_deg"] : np.NaN,
column_mapping["r_au"] : np.NaN,
column_mapping["a_au"] : np.NaN,
column_mapping["i_deg"] : np.NaN,
column_mapping["e"] : np.NaN,
column_mapping["name"]: np.NaN}, index=[0])
average_orbits.append(orbit)
continue
if element_type == "cartesian":
rv = obs[[
column_mapping["obj_dx/dt_au_p_day"],
column_mapping["obj_dy/dt_au_p_day"],
column_mapping["obj_dz/dt_au_p_day"],
column_mapping["r_au"]
]].values
# Calculate the percent difference between the median of each velocity element
# and the heliocentric distance
percent_diff = np.abs((rv - | np.median(rv, axis=0) | numpy.median |
# coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import optimization
import tokenization
import tensorflow as tf
import horovod.tensorflow as hvd
import time
from utils.utils import LogEvalRunHook, LogTrainRunHook, setup_xla_flags
from utils.gpu_affinity import set_affinity
import utils.dllogger_class
from dllogger import Verbosity
from utils.create_glue_data import *
import numpy as np
import tf_metrics
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"dllog_path", "/results/bert_dllog.json",
"filename where dllogger writes to")
flags.DEFINE_string(
"optimizer_type", "lamb",
"Optimizer type : adam or lamb")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_bool("use_trt", False, "Whether to use TF-TRT")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("display_loss_steps", 10,
"How often to print loss from estimator")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("num_accumulation_steps", 1,
"Number of accumulation steps before gradient update"
"Global batch size = num_accumulation_steps * train_batch_size")
flags.DEFINE_bool("amp", True, "Whether to enable AMP ops. When false, uses TF32 on A100 and FP32 on V100 GPUS.")
flags.DEFINE_bool("use_xla", True, "Whether to enable XLA JIT compilation.")
flags.DEFINE_bool("horovod", False, "Whether to use Horovod for multi-gpu runs")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
def file_based_input_fn_builder(input_file, batch_size, seq_length, is_training,
drop_remainder, hvd=None):
"""Creates an `input_fn` closure to be passed to Estimator."""
name_to_features = {
"input_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.io.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn():
"""The actual input function."""
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
if hvd is not None: d = d.shard(hvd.size(), hvd.rank())
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
compute_type=tf.float32)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias, name='cls_logits')
probabilities = tf.nn.softmax(logits, axis=-1, name='cls_probabilities')
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1, name='cls_per_example_loss')
loss = tf.reduce_mean(per_example_loss, name='cls_loss')
return (loss, per_example_loss, logits, probabilities)
def get_frozen_tftrt_model(bert_config, shape, num_labels, use_one_hot_embeddings, init_checkpoint):
tf_config = tf.compat.v1.ConfigProto()
tf_config.gpu_options.allow_growth = True
output_node_names = ['loss/cls_loss', 'loss/cls_per_example_loss', 'loss/cls_logits', 'loss/cls_probabilities']
with tf.Session(config=tf_config) as tf_sess:
input_ids = tf.placeholder(tf.int32, shape, 'input_ids')
input_mask = tf.placeholder(tf.int32, shape, 'input_mask')
segment_ids = tf.placeholder(tf.int32, shape, 'segment_ids')
label_ids = tf.placeholder(tf.int32, (None), 'label_ids')
create_model(bert_config, False, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf_sess.run(tf.global_variables_initializer())
print("LOADED!")
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
else:
init_string = ", *NOTTTTTTTTTTTTTTTTTTTTT"
tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string)
frozen_graph = tf.graph_util.convert_variables_to_constants(tf_sess,
tf_sess.graph.as_graph_def(), output_node_names)
num_nodes = len(frozen_graph.node)
print('Converting graph using TensorFlow-TensorRT...')
from tensorflow.python.compiler.tensorrt import trt_convert as trt
converter = trt.TrtGraphConverter(
input_graph_def=frozen_graph,
nodes_blacklist=output_node_names,
max_workspace_size_bytes=(4096 << 20) - 1000,
precision_mode = "FP16" if FLAGS.amp else "FP32",
minimum_segment_size=4,
is_dynamic_op=True,
maximum_cached_engines=1000
)
frozen_graph = converter.convert()
print('Total node count before and after TF-TRT conversion:',
num_nodes, '->', len(frozen_graph.node))
print('TRT node count:',
len([1 for n in frozen_graph.node if str(n.op) == 'TRTEngineOp']))
with tf.io.gfile.GFile("frozen_modelTRT.pb", "wb") as f:
f.write(frozen_graph.SerializeToString())
return frozen_graph
def model_fn_builder(task_name, bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps,
use_one_hot_embeddings, hvd=None):
"""Returns `model_fn` closure for Estimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for Estimator."""
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
if task_name == "cola":
FN, FN_op = tf.metrics.false_negatives(labels=label_ids, predictions=predictions)
FP, FP_op = tf.metrics.false_positives(labels=label_ids, predictions=predictions)
TP, TP_op = tf.metrics.true_positives(labels=label_ids, predictions=predictions)
TN, TN_op = tf.metrics.true_negatives(labels=label_ids, predictions=predictions)
MCC = (TP * TN - FP * FN) / ((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN)) ** 0.5
MCC_op = tf.group(FN_op, TN_op, TP_op, FP_op, tf.identity(MCC, name="MCC"))
return {"MCC": (MCC, MCC_op)}
elif task_name == "mrpc":
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions)
loss = tf.metrics.mean(values=per_example_loss)
f1 = tf_metrics.f1(labels=label_ids, predictions=predictions, num_classes=2, pos_indices=[1])
return {
"eval_accuracy": accuracy,
"eval_f1": f1,
"eval_loss": loss,
}
else:
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions)
loss = tf.metrics.mean(values=per_example_loss)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
tf.compat.v1.logging.info("*** Features ***")
tf.compat.v1.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.compat.v1.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
if not is_training and FLAGS.use_trt:
trt_graph = get_frozen_tftrt_model(bert_config, input_ids.shape, num_labels, use_one_hot_embeddings, init_checkpoint)
(total_loss, per_example_loss, logits, probabilities) = tf.import_graph_def(trt_graph,
input_map={'input_ids':input_ids, 'input_mask':input_mask, 'segment_ids':segment_ids, 'label_ids':label_ids},
return_elements=['loss/cls_loss:0', 'loss/cls_per_example_loss:0', 'loss/cls_logits:0', 'loss/cls_probabilities:0'],
name='')
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {"probabilities": probabilities}
output_spec = tf.estimator.EstimatorSpec(
mode=mode, predictions=predictions)
elif mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = metric_fn(per_example_loss, label_ids, logits)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metric_ops)
return output_spec
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
if init_checkpoint and (hvd is None or hvd.rank() == 0):
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if FLAGS.verbose_logging:
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps,
hvd, False, FLAGS.amp, FLAGS.num_accumulation_steps, FLAGS.optimizer_type)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
dummy_op = tf.no_op()
# Need to call mixed precision graph rewrite if fp16 to enable graph rewrite
if FLAGS.amp:
loss_scaler = tf.train.experimental.FixedLossScale(1)
dummy_op = tf.train.experimental.enable_mixed_precision_graph_rewrite(
optimization.LAMBOptimizer(learning_rate=0.0), loss_scaler)
eval_metric_ops = metric_fn(per_example_loss, label_ids, logits)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metric_ops)
else:
dummy_op = tf.no_op()
# Need to call mixed precision graph rewrite if fp16 to enable graph rewrite
if FLAGS.amp:
dummy_op = tf.train.experimental.enable_mixed_precision_graph_rewrite(
optimization.LAMBOptimizer(learning_rate=0.0))
output_spec = tf.estimator.EstimatorSpec(
mode=mode, predictions=probabilities)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, batch_size, seq_length, is_training, drop_remainder, hvd=None):
"""Creates an `input_fn` closure to be passed to Estimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn():
"""The actual input function."""
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
if hvd is not None: d = d.shard(hvd.size(), hvd.rank())
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def main(_):
setup_xla_flags()
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
dllogging = utils.dllogger_class.dllogger_class(FLAGS.dllog_path)
if FLAGS.horovod:
hvd.init()
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
}
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.io.gfile.makedirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
master_process = True
training_hooks = []
global_batch_size = FLAGS.train_batch_size * FLAGS.num_accumulation_steps
hvd_rank = 0
config = tf.compat.v1.ConfigProto()
if FLAGS.horovod:
tf.compat.v1.logging.info("Multi-GPU training with TF Horovod")
tf.compat.v1.logging.info("hvd.size() = %d hvd.rank() = %d", hvd.size(), hvd.rank())
global_batch_size = FLAGS.train_batch_size * FLAGS.num_accumulation_steps * hvd.size()
master_process = (hvd.rank() == 0)
hvd_rank = hvd.rank()
config.gpu_options.visible_device_list = str(hvd.local_rank())
set_affinity(hvd.local_rank())
if hvd.size() > 1:
training_hooks.append(hvd.BroadcastGlobalVariablesHook(0))
if FLAGS.use_xla:
config.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.ON_1
if FLAGS.amp:
tf.enable_resource_variables()
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.output_dir if master_process else None,
session_config=config,
save_checkpoints_steps=FLAGS.save_checkpoints_steps if master_process else None,
save_summary_steps=FLAGS.save_checkpoints_steps if master_process else None,
log_step_count_steps=FLAGS.display_loss_steps,
keep_checkpoint_max=1)
if master_process:
tf.compat.v1.logging.info("***** Configuaration *****")
for key in FLAGS.__flags.keys():
tf.compat.v1.logging.info(' {}: {}'.format(key, getattr(FLAGS, key)))
tf.compat.v1.logging.info("**************************")
train_examples = None
num_train_steps = None
num_warmup_steps = None
training_hooks.append(LogTrainRunHook(global_batch_size, hvd_rank, FLAGS.save_checkpoints_steps, num_steps_ignore_xla=25))
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / global_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
start_index = 0
end_index = len(train_examples)
tmp_filenames = [os.path.join(FLAGS.output_dir, "train.tf_record")]
if FLAGS.horovod:
tmp_filenames = [os.path.join(FLAGS.output_dir, "train.tf_record{}".format(i)) for i in range(hvd.size())]
num_examples_per_rank = len(train_examples) // hvd.size()
remainder = len(train_examples) % hvd.size()
if hvd.rank() < remainder:
start_index = hvd.rank() * (num_examples_per_rank+1)
end_index = start_index + num_examples_per_rank + 1
else:
start_index = hvd.rank() * num_examples_per_rank + remainder
end_index = start_index + (num_examples_per_rank)
model_fn = model_fn_builder(
task_name=task_name,
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate if not FLAGS.horovod else FLAGS.learning_rate * hvd.size(),
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_one_hot_embeddings=False,
hvd=None if not FLAGS.horovod else hvd)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config)
if FLAGS.do_train:
file_based_convert_examples_to_features(
train_examples[start_index:end_index], label_list, FLAGS.max_seq_length, tokenizer, tmp_filenames[hvd_rank])
tf.compat.v1.logging.info("***** Running training *****")
tf.compat.v1.logging.info(" Num examples = %d", len(train_examples))
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.compat.v1.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=tmp_filenames,
batch_size=FLAGS.train_batch_size,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True,
hvd=None if not FLAGS.horovod else hvd)
train_start_time = time.time()
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps, hooks=training_hooks)
train_time_elapsed = time.time() - train_start_time
train_time_wo_overhead = training_hooks[-1].total_time
avg_sentences_per_second = num_train_steps * global_batch_size * 1.0 / train_time_elapsed
ss_sentences_per_second = (training_hooks[-1].count - training_hooks[-1].skipped) * global_batch_size * 1.0 / train_time_wo_overhead
if master_process:
tf.compat.v1.logging.info("-----------------------------")
tf.compat.v1.logging.info("Total Training Time = %0.2f for Sentences = %d", train_time_elapsed,
num_train_steps * global_batch_size)
tf.compat.v1.logging.info("Total Training Time W/O Overhead = %0.2f for Sentences = %d", train_time_wo_overhead,
(training_hooks[-1].count - training_hooks[-1].skipped) * global_batch_size)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) with overhead = %0.2f", avg_sentences_per_second)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second)
tf.compat.v1.logging.info("-----------------------------")
if FLAGS.do_eval and master_process:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.compat.v1.logging.info("***** Running evaluation *****")
tf.compat.v1.logging.info(" Num examples = %d", len(eval_examples))
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
eval_drop_remainder = False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
batch_size=FLAGS.eval_batch_size,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
eval_hooks = [LogEvalRunHook(FLAGS.eval_batch_size)]
eval_start_time = time.time()
result = estimator.evaluate(input_fn=eval_input_fn, hooks=eval_hooks)
eval_time_elapsed = time.time() - eval_start_time
time_list = eval_hooks[-1].time_list
time_list.sort()
# Removing outliers (init/warmup) in throughput computation.
eval_time_wo_overhead = sum(time_list[:int(len(time_list) * 0.8)])
num_sentences = (int(len(time_list) * 0.8)) * FLAGS.eval_batch_size
avg = | np.mean(time_list) | numpy.mean |
#!/usr/bin/env python
#
# Copyright 2006,2007,2010,2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import numpy as np
class test_random(gr_unittest.TestCase):
# NOTE: For tests on the output distribution of the random numbers, see gnuradio-runtime/apps/evaluation_random_numbers.py.
# Check for range [0,1) of uniform distributed random numbers
def test_1(self):
num_tests = 10000
values = np.zeros(num_tests)
rndm = gr.random()
for k in range(num_tests):
values[k] = rndm.ran1()
for value in values:
self.assertLess(value, 1)
self.assertGreaterEqual(value, 0)
# Same seed should yield same random values.
def test_2_same_seed(self):
num = 5
# Init with fixed seed.
rndm0 = gr.random(42)
rndm1 = gr.random(42)
for k in range(num):
x = rndm0.ran1()
y = rndm1.ran1()
self.assertEqual(x, y)
# reseed should yield same numbers.
def test_003_reseed(self):
num = 5
x = np.zeros(num)
y = | np.zeros(num) | numpy.zeros |
#!/usr/bin/env python
import numpy as np
import pickle
import matplotlib.pyplot as plt
import os
# adopted from 6.883 MIT
def generate_spirals (n, k, curvature, jitter, x_center, y_center):
# generates coordinates for k spirals, n points in each spiral
# X = n*k by 2 matrix of coordinates
# Y = label 1..k for each data point
# additional arguments:
# curvature = curvature of the spirals (0 = no curvature)
# jitter = amount of noise (0 = no noise)
# [x_center, y_center] = center of the spirals
X = np.zeros((n*k,2))
Y = np.zeros((n*k,1))
for j in xrange(0,k):
ind = np.arange(n*j,n*(j+1))
r = np.linspace(0.1, 1, n)
t = np.linspace(j*(2*np.pi/k), (j+curvature)*(2*np.pi/k),n) + np.random.randn(1,n)*jitter
t = np.squeeze(t.transpose())
X[ind,0] = x_center + r*np.sin(t)
X[ind,1] = y_center + r*np.cos(t)
Y[ind] = j
return X, Y
def plot_spiral_datasetWrapper(X, scores, title_string, if_new_figure=1):
Y = np.argmax(scores, axis = 1)
plot_spiral_dataset(X, Y, title_string, if_new_figure)
def plot_spiral_dataset(X, Y, title_string, if_new_figure=1):
k = np.amax(Y) + 1
# plot data
colors = np.floor(64/k)*Y
if np.amax(colors) != 0:
colors = colors / np.amax(colors)
if if_new_figure == 1:
fig = plt.figure(figsize=(10, 8))
else:
plt.cla()
plt.scatter(X[:,0], X[:,1], 50, colors, cmap="rainbow")
plt.title(title_string)
plt.draw()
plt.pause(0.0001)
# plt.show(block='false')
if __name__ == "__main__":
print('hello_world from generate_spirals.py')
file_dir = os.path.dirname(os.path.realpath(__file__))
plt.rcParams.update({'font.size': 18})
# load or create training data
try:
dataset = pickle.load(open(file_dir+"/spiral_dataset_train.p","rb"))
X = dataset.X
Y = dataset.Y
k = | np.amax(Y) | numpy.amax |
# 相手の駒配置を予測
# これは不完全情報ゲームにおいて動作するようにする
# 正体が不明な相手の駒をとりあえず-1としておく
# board→14R24R34R44R15B25B35B45B41u31u21u11u40u30u20u10u
# move
import numpy as np
import itertools
import time
from game import State
# from pv_mcts import predict
from pathlib import Path
from tensorflow.keras.models import load_model
from test import convert_func_use_in_guess
# model_path = "models/10000.pth"
default_gamma = 0.9
DN_INPUT_SHAPE = (6, 6, 4)
# おそらく不完全情報ガイスター(のstateのみ?)を定義してそれを更新して管理した方がよさげ
# 不完全情報ガイスターの盤面情報及びそれらの推測値
class II_State:
# クラス変数で駒順を定義
piece_name = [
"h",
"g",
"f",
"e",
"d",
"c",
"b",
"a",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
]
# 初期化
def __init__(
self,
real_my_piece_blue_set,
real_enemy_piece_blue_set=None,
see_through_piece_id=None,
wrong_see_through_piece_id=None,
all_piece=None,
enemy_estimated_num=None,
my_estimated_num=None,
enemy_piece_list=None,
my_piece_list=None,
living_piece_color=None,
):
# 全ての駒(hgfedcbaABCDEFGHの順になっている)
# 敵駒0~7,自駒8~15
if all_piece == None:
# numpyは基本的に型指定しない方が早い(指定すると裏で余計な処理するっぽい)
self.all_piece = np.zeros(16, dtype=np.int16)
# 初期配置を代入(各値は座標を示す)(脱出が88、死亡が99)
# 0~7は敵駒, 8~15は自駒
self.all_piece[0] = 1
self.all_piece[1] = 2
self.all_piece[2] = 3
self.all_piece[3] = 4
self.all_piece[4] = 7
self.all_piece[5] = 8
self.all_piece[6] = 9
self.all_piece[7] = 10
self.all_piece[8] = 25
self.all_piece[9] = 26
self.all_piece[10] = 27
self.all_piece[11] = 28
self.all_piece[12] = 31
self.all_piece[13] = 32
self.all_piece[14] = 33
self.all_piece[15] = 34
else:
self.all_piece = all_piece
if enemy_piece_list == None:
self.enemy_piece_list = [0, 1, 2, 3, 4, 5, 6, 7]
else:
self.enemy_piece_list = enemy_piece_list
if my_piece_list == None:
self.my_piece_list = [8, 9, 10, 11, 12, 13, 14, 15]
else:
self.my_piece_list = my_piece_list
# real_my_piece_blue_setは自分の青駒のIDのセット(引数必須)
self.real_my_piece_blue_set = set(real_my_piece_blue_set)
self.real_my_piece_red_set = (
set(self.my_piece_list) - self.real_my_piece_blue_set
)
# 敵の青駒のセット(デバッグ用)
self.real_enemy_piece_blue_set = set(real_enemy_piece_blue_set)
self.real_enemy_piece_red_set = (
set(self.enemy_piece_list) - self.real_enemy_piece_blue_set
)
# {敵青, 敵赤, 自青, 自赤}
if living_piece_color == None:
self.living_piece_color = [4, 4, 4, 4]
else:
self.living_piece_color = living_piece_color
# [[推測値A,(パターンAの青駒のtuple表現)],[推測値B,(パターンBの青駒のtuple表現),...]
if enemy_estimated_num == None:
# 盤面の推測値を作成(大きい程青らしく、小さい程赤らしい)
self.enemy_estimated_num = []
for enemy_blue in itertools.combinations(
set(self.enemy_piece_list), self.living_piece_color[0]
):
self.enemy_estimated_num.append([0, enemy_blue])
else:
self.enemy_estimated_num = enemy_estimated_num
if my_estimated_num == None:
# 盤面の推測値を作成(大きい程青らしく、小さい程赤らしい)
self.my_estimated_num = []
for my_blue in itertools.combinations(
set(self.my_piece_list), self.living_piece_color[0]
):
self.my_estimated_num.append([0, my_blue])
else:
self.my_estimated_num = my_estimated_num
if see_through_piece_id == None and wrong_see_through_piece_id == None:
self.see_through_piece_id = []
self.wrong_see_through_piece_id = []
elif wrong_see_through_piece_id == None: # 間違った推測のみnullだった場合
self.see_through_piece_id = see_through_piece_id
self.wrong_see_through_piece_id = []
shave_impossible_board_from_see_through(self) # ありえない世界を初期化段階で消す
elif see_through_piece_id == None:
self.see_through_piece_id = []
self.wrong_see_through_piece_id = wrong_see_through_piece_id
rebuilding_estimated_num(
self,
set(self.see_through_piece_id),
set(self.wrong_see_through_piece_id),
)
else: # どっちもnullでない
self.see_through_piece_id = see_through_piece_id
self.wrong_see_through_piece_id = wrong_see_through_piece_id
rebuilding_estimated_num(
self,
set(self.see_through_piece_id),
set(self.wrong_see_through_piece_id),
)
# ボードの初期配置はこんな感じ(小文字が敵の駒で大文字が自分の駒)
# 0 1 2 3 4 5
# 0 h g f e
# 1 d c b a
# 2
# 3
# 4 A B C D
# 5 E F G H
# 合法手のリストの取得
# NNはactionを与えると事前に学習した方策を返す。
# 赤のゴール(非合法なので知らない手)を与えると、そこを0にして返してくれるはず(エラーは吐かないはず???)
def legal_actions(self):
actions = []
# リストに自分の駒を全て追加
piece_coordinate_array = np.array([0] * 8)
index = 0
for i in range(8, 16):
piece_coordinate_array[index] = self.all_piece[i]
index += 1
np.sort(piece_coordinate_array)
# print("my:self.all_piece", piece_coordinate_array)
for piece_coordinate in piece_coordinate_array:
# 88以上は行動できないので省く(0~35)
if piece_coordinate < 36:
actions.extend(
self.piece_coordinate_to_actions(
piece_coordinate, piece_coordinate_array
)
)
# 0と5はゴールの選択肢を追加(赤駒でも問答無用)
if piece_coordinate == 0:
actions.extend([2]) # 0*4 + 2
if piece_coordinate == 5:
actions.extend([22]) # 5*4 + 2
return actions
# 相手目線の合法手のリストを返す
def enemy_legal_actions(self):
actions = []
piece_coordinate_array = np.array([0] * 8)
index = 0
for i in range(0, 8):
if self.all_piece[i] < 36:
piece_coordinate_array[index] = 35 - self.all_piece[i]
else:
piece_coordinate_array[index] = 99
index += 1
np.sort(piece_coordinate_array)
# print("enemy:self.all_piece", piece_coordinate_array)
for piece_coordinate in piece_coordinate_array:
# 88以上は行動できないので省く(0~35)
if piece_coordinate < 36:
actions.extend(
self.piece_coordinate_to_actions(
piece_coordinate, piece_coordinate_array
)
)
# 0と5はゴールの選択肢を追加(赤駒でも問答無用)
if piece_coordinate == 0:
actions.extend([2]) # 0*4 + 2
if piece_coordinate == 5:
actions.extend([22]) # 5*4 + 2
return actions
# 駒の移動元と移動方向を行動に変換
def position_to_action(self, position, direction):
return position * 4 + direction
def piece_coordinate_to_actions(self, piece_coordinate, piece_coordinate_array):
actions = []
x = piece_coordinate % 6
y = int(piece_coordinate / 6)
if y != 5 and not np.any(piece_coordinate_array == (piece_coordinate + 6)): # 下
actions.append(self.position_to_action(piece_coordinate, 0))
if x != 0 and not np.any(piece_coordinate_array == (piece_coordinate - 1)): # 左
actions.append(self.position_to_action(piece_coordinate, 1))
if y != 0 and not np.any(piece_coordinate_array == (piece_coordinate - 6)): # 上
actions.append(self.position_to_action(piece_coordinate, 2))
if x != 5 and not np.any(piece_coordinate_array == (piece_coordinate + 1)): # 右
actions.append(self.position_to_action(piece_coordinate, 3))
return actions
# 駒ごと(駒1つに着目した)の合法手のリストの取得
def legal_actions_pos(self, position, piece_index_list):
piece_list = []
for piece_index in piece_index_list:
piece_list.append(self.all_piece[piece_index])
actions = []
x = position % 6
y = int(position / 6)
# 下左上右の順に行動できるか検証し、できるならactionに追加
# ちなみにand演算子は左の値を評価して右の値を返すか決める(左の値がTrue系でなければ右の値は無視する)ので、はみ出し参照してIndexErrorにはならない(&だとなる)
if y != 5 and (position + 6) not in piece_list: # 下端でない and 下に自分の駒がいない
actions.append(self.position_to_action(position, 0))
if x != 0 and (position - 1) not in piece_list: # 左端でない and 左に自分の駒がいない
actions.append(self.position_to_action(position, 1))
if y != 0 and (position - 6) not in piece_list: # 上端でない and 上に自分の駒がいない
actions.append(self.position_to_action(position, 2))
if x != 5 and (position + 1) not in piece_list: # 右端でない and 右に自分の駒がいない
actions.append(self.position_to_action(position, 3))
# 青駒のゴール行動の可否は1ターンに1度だけ判定すれば良いので、例外的にlegal_actionsで処理する(ここでは処理しない)
return actions
# 行動を受けて、次の状態に遷移
def next(self, action_num):
coordinate_before, coordinate_after = action_to_coordinate(action_num)
move_piece_index = np.where(self.all_piece == coordinate_before)[0][0]
# 移動先に駒が存在する場合は殺す(味方の駒も殺してしまうが、そこは行動側で制御)
if np.any(self.all_piece == coordinate_after):
dead_piece_ID = np.where(self.all_piece == coordinate_after)[0][0]
if dead_piece_ID < 8: # 死んだのが敵駒
# color_is_blue:死んだのが青駒かどうか
color_is_blue = any(
i == dead_piece_ID for i in self.real_enemy_piece_blue_set
)
reduce_pattern(dead_piece_ID, color_is_blue, self)
if self.wrong_see_through_piece_id != []:
rebuilding_estimated_num(
self,
set(self.see_through_piece_id),
set(self.wrong_see_through_piece_id),
)
else: # 死んだのが味方の駒
color_is_blue = any(
i == dead_piece_ID for i in self.real_my_piece_blue_set
)
reduce_pattern(dead_piece_ID, color_is_blue, self)
self.all_piece[move_piece_index] = coordinate_after # 駒の移動
# 推測値を返す(主にデバッグ用)
def return_estimate_value(self):
estimate_value = np.array([0] * 8, dtype="f4")
for elem in self.enemy_estimated_num:
id_matrix = [0] * 8
# 青駒IDのとこだけ1にする
for blue_id in elem[1]:
id_matrix[blue_id] = 1
estimate_value = estimate_value + (
np.array(id_matrix, dtype="f4") * elem[0]
)
if False:
print(self.enemy_estimated_num)
print(
"敵駒の住所",
self.all_piece[0],
self.all_piece[1],
self.all_piece[2],
self.all_piece[3],
)
print(
"味方駒の住所",
self.all_piece[4],
self.all_piece[5],
self.all_piece[6],
self.all_piece[7],
)
return estimate_value
# ボードの文字列表示
def __str__(self):
row = "|{}|{}|{}|{}|{}|{}|"
hr = "\n-------------------------------\n"
# 1つのボードに味方の駒と敵の駒を集める
board = [0] * 36
# 0~7が敵、8~15が自分
# 敵の駒
for enemy_piece_coo in self.all_piece[0:8]:
if enemy_piece_coo < 36 and enemy_piece_coo >= 0:
board[enemy_piece_coo] = -1
# 自分の駒
for blue_index in self.real_my_piece_blue_set:
if self.all_piece[blue_index] < 36 and self.all_piece[blue_index] >= 0:
board[self.all_piece[blue_index]] = 1
for red_index in self.real_my_piece_red_set:
if self.all_piece[red_index] < 36 and self.all_piece[red_index] >= 0:
board[self.all_piece[red_index]] = 2
board_essence = []
for i in board:
if i == 1:
board_essence.append("自青")
elif i == 2:
board_essence.append("自赤")
elif i == -1:
board_essence.append("敵駒")
else:
board_essence.append(" ")
ii_str = (
hr + row + hr + row + hr + row + hr + row + hr + row + hr + row + hr
).format(*board_essence)
ii_str += "\n" + str(self.living_piece_color)
return ii_str
# 盤面が確定しないような駒を選択する
def create_see_through_piece(enemy_blue_piece_set, through_num):
# 7個以上駒の色がわかるなら、全部わかるのと同意義
if through_num >= 7:
return set({0, 1, 2, 3, 4, 5, 6, 7})
blue_piece_set = enemy_blue_piece_set.copy()
red_piece_set = set({0, 1, 2, 3, 4, 5, 6, 7}) - blue_piece_set
# 赤と青から1つ除外(これでパターンが確定しない)
blue_piece_set.remove(random.choice(list(blue_piece_set)))
red_piece_set.remove(random.choice(list(red_piece_set)))
# セットの合成
see_thorugh_id_set = blue_piece_set | red_piece_set
# through_numが少ない場合は見える駒を多く除外する
for _ in range(6 - through_num): # 6は len(see_thorugh_id_set)
see_thorugh_id_set.remove(random.choice(list(see_thorugh_id_set)))
return see_thorugh_id_set
# まちがった推測を含めて、破綻しない推測を作成
def create_wrong_and_see_through_piece(
enemy_blue_piece_set: set, correct_through_num: int, wrong_through_num: int
):
blue_piece_set = enemy_blue_piece_set.copy()
red_piece_set = set({0, 1, 2, 3, 4, 5, 6, 7}) - blue_piece_set
est_num = correct_through_num + wrong_through_num
if est_num >= 9:
print("普通にバグ")
return
if est_num >= 7: # 7個以上駒の色がわかるなら、全部わかるのと同意義
estimated_piece_set = set({0, 1, 2, 3, 4, 5, 6, 7})
else:
# 赤と青から1つ除外(これでパターンが確定しない)
blue_piece_set.remove(random.choice(list(blue_piece_set)))
red_piece_set.remove(random.choice(list(red_piece_set)))
# 赤と青から均等に推測駒を出す
while len(blue_piece_set) + len(red_piece_set) > est_num:
if len(blue_piece_set) > len(red_piece_set):
blue_piece_set.remove(random.choice(list(blue_piece_set)))
elif len(blue_piece_set) < len(red_piece_set):
red_piece_set.remove(random.choice(list(red_piece_set)))
else: # redとblueが同じ量の場合はランダムピック
if random.randint(0, 1) == 0:
blue_piece_set.remove(random.choice(list(blue_piece_set)))
else:
red_piece_set.remove(random.choice(list(red_piece_set)))
wrong_piece_set = set()
cp_wrong_through_num = wrong_through_num
# wrong_through_numが奇数の場合
if cp_wrong_through_num % 2 == 1:
cp_wrong_through_num -= 1
if len(blue_piece_set) > len(red_piece_set):
piece = random.choice(list(blue_piece_set))
blue_piece_set.remove(piece)
wrong_piece_set.add(piece)
elif len(blue_piece_set) < len(red_piece_set):
piece = random.choice(list(red_piece_set))
red_piece_set.remove(piece)
wrong_piece_set.add(piece)
else: # redとblueが同じ量の場合はランダムピック
if random.randint(0, 1) == 0:
piece = random.choice(list(blue_piece_set))
blue_piece_set.remove(piece)
wrong_piece_set.add(piece)
else:
piece = random.choice(list(red_piece_set))
red_piece_set.remove(piece)
wrong_piece_set.add(piece)
# wrong_through_numの数だけ間違った推測駒を増やす
for _ in range(cp_wrong_through_num // 2):
piece = random.choice(list(blue_piece_set))
blue_piece_set.remove(piece)
wrong_piece_set.add(piece)
piece = random.choice(list(red_piece_set))
red_piece_set.remove(piece)
wrong_piece_set.add(piece)
correct_piece_set = blue_piece_set | red_piece_set
return [correct_piece_set, wrong_piece_set]
# stateの駒の色に応じたii_stateを作成する(初期のstateのみ使用可能)
def create_ii_state_from_state(
state, enemy_view=False, through_num=0, wrong_through_num=0
):
if enemy_view:
# 敵視点でii_stateを作成
pieces = state.enemy_pieces
enemy_pieces = state.pieces
else:
pieces = state.pieces
enemy_pieces = state.enemy_pieces
# 駒のIDと座標が紐づいたリストを手動作成(初期配置では座標番号25~28と31~34に駒が存在)
piece_id_list = [0] * 36
for i in range(4):
piece_id_list[25 + i] = 8 + i
for i in range(4):
piece_id_list[31 + i] = 12 + i
blue_piece_set = set({})
for index, piece_color in enumerate(pieces):
if piece_color == 1:
blue_piece_set.add(piece_id_list[index])
# 敵駒の処理も同様にする
enemy_piece_id_list = [0] * 36
for i in range(4):
enemy_piece_id_list[25 + i] = 8 + i
for i in range(4):
enemy_piece_id_list[31 + i] = 12 + i
enemy_blue_piece_set = set({})
for index, piece_color in enumerate(enemy_pieces):
if piece_color == 1:
enemy_blue_piece_set.add(enemy_piece_id_list[index])
# enemy_blue_piece_setの値を反転させ、推測の際に扱いやすいように変換する
# (このままでは8~15の値をとるが、0~7の値に修正し扱う必要がある)
rev_enemy_blue_piece_set = set({})
for piece_coo in enemy_blue_piece_set:
rev_enemy_blue_piece_set.add(15 - piece_coo)
if through_num == 0:
ii_state = II_State(blue_piece_set, rev_enemy_blue_piece_set)
elif wrong_through_num == 0:
see_thorugh_id_set = create_see_through_piece(
rev_enemy_blue_piece_set, through_num
)
ii_state = II_State(
blue_piece_set, rev_enemy_blue_piece_set, see_thorugh_id_set
)
else:
correct_wrong_piece_set = create_wrong_and_see_through_piece(
blue_piece_set, through_num, wrong_through_num
)
ii_state = II_State(
blue_piece_set,
rev_enemy_blue_piece_set,
correct_wrong_piece_set[0],
correct_wrong_piece_set[1],
)
return ii_state
def create_state_from_ii_state(ii_state, blue_set):
pieces = [0] * 36
enemy_pieces = [0] * 36
# 0~7は敵の駒
for index, piece_coo in enumerate(ii_state.all_piece[:8]):
if piece_coo < 36:
if index in blue_set:
enemy_pieces[35 - piece_coo] = 1
else:
enemy_pieces[35 - piece_coo] = 2
for index, piece_coo in enumerate(ii_state.all_piece[8:]):
if piece_coo < 36:
if index + 8 in ii_state.real_my_piece_blue_set:
pieces[piece_coo] = 1
else:
pieces[piece_coo] = 2
state = State(pieces, enemy_pieces)
return state
### ガイスターAI大会のプロトコル周り
# プロトコルから相手の行動は送られず、更新されたボードが送られてくるそうなので、行動した駒の座標を求める
# これは相手の行動のみ検知可能
def enemy_coordinate_checker(before_board, now_board):
for i in range(len(before_board) // 2, len(before_board)):
if before_board[i] != now_board[i]:
break
# iではなく(i//3)*3とすることで、座標と駒色(例:14R)の先頭インデックスが取れる(これしないと2文字目からとってくる恐れがある)
beginningOfTheChanged = (i // 3) * 3
# 列番号+行番号*6でgame.pyで使ってる表現に直せる
before_coordinate = (
int(before_board[beginningOfTheChanged])
+ int(before_board[beginningOfTheChanged + 1]) * 6
)
now_coordinate = (
int(now_board[beginningOfTheChanged])
+ int(now_board[beginningOfTheChanged + 1]) * 6
)
# 行動前と行動後の座標を返す
return before_coordinate, now_coordinate
# 行動番号を駒の移動元と移動方向に変換
def action_to_position(action_num):
return (int(action_num / 4), action_num % 4) # position,direction
# 行動番号を移動前の座標と移動後の座標に変換
def action_to_coordinate(action_num):
coordinate_before, direction = action_to_position(action_num)
if direction == 0: # 下
coordinate_after = coordinate_before + 6
elif direction == 1: # 左
coordinate_after = coordinate_before - 1
elif direction == 3: # 右
coordinate_after = coordinate_before + 1
elif direction == 2: # 上
if coordinate_before == 0 or coordinate_before == 5: # 0と5の上行動はゴール処理なので弾く
coordinate_after = coordinate_before # coordinate_beforeを入れて駒の場所を動かさない(勝敗は決しているので下手に動かさない方が良い(多分))
else:
coordinate_after = coordinate_before - 6
else:
print("ERROR:action_to_coordinate(illegal action_num)")
return coordinate_before, coordinate_after
# 移動前の座標と方向番号から行動番号を算出
def position_to_action(position, direction):
return position * 4 + direction
# 移動前と移動後の座標から相手の行動番号を算出
def calculate_enemy_action_number_from_coordinate(before_coordinate, now_coordinate):
enemy_looking_now_coordinate = 35 - now_coordinate
enemy_looking_before_coordinate = 35 - before_coordinate
difference = enemy_looking_now_coordinate - enemy_looking_before_coordinate
if difference == 6: # 下
return position_to_action(enemy_looking_before_coordinate, 0)
elif difference == 1: # 左
return position_to_action(enemy_looking_before_coordinate, 1)
elif difference == -6: # 上
return position_to_action(enemy_looking_before_coordinate, 2)
elif difference == -1: # 右
return position_to_action(enemy_looking_before_coordinate, 3)
else:
print("ERROR:find_enemy_action_number_from_coordinate(illegal move)")
return -1
###
# 相手の行動を受けて、ガイスターの盤面を更新(駒が死んだ場合の処理もここで行う)
def update_II_state(ii_state, before_coordinate, now_coordinate):
kill = np.any(ii_state.all_piece == now_coordinate)
# 敵駒がkillしていたら死んだ駒の処理を行う(99は死んだ駒)
if kill:
dead_piece_ID = np.where(ii_state.all_piece == now_coordinate)[0][0]
color_is_blue = np.any(ii_state.real_my_piece_blue_set == dead_piece_ID)
# print(dead_piece_ID, color_is_blue)
reduce_pattern(dead_piece_ID, color_is_blue, ii_state)
# 行動前の座標を行動後の座標に変更する
ii_state.all_piece[
np.where(ii_state.all_piece == before_coordinate)[0][0]
] = now_coordinate
# myの視点で状態を作成
def my_looking_create_state(ii_state, my_blue, my_red, enemy_blue, enemy_red):
# プレイヤー毎のデュアルネットワークの入力の2次元配列の取得
def pieces_array_of(blue_piece_list, red_piece_list):
table_list = []
blue_table = [0] * 36
table_list.append(blue_table) # ちなみにappendは参照渡し
# blue_piece_listは駒のIDの値なので、ii_state.all_pieceでそのIDを参照してあげると座標が取れる
for blue_piece in blue_piece_list:
if ii_state.all_piece[blue_piece] < 36: # 死駒を除外
blue_table[ii_state.all_piece[blue_piece]] = 1
red_table = [0] * 36
table_list.append(red_table)
for red_piece in red_piece_list:
if ii_state.all_piece[red_piece] < 36:
red_table[ii_state.all_piece[red_piece]] = 1
return table_list
# デュアルネットワークの入力の2次元配列の取得(自分と敵両方)
return [pieces_array_of(my_blue, my_red), pieces_array_of(enemy_blue, enemy_red)]
# # 入力の順序はcreate
# # enemyの視点から状態を作成
# def enemy_looking_create_state(ii_state, my_blue, my_red, enemy_blue, enemy_red):
# # プレイヤー毎のデュアルネットワークの入力の2次元配列の取得
# def pieces_array_of(blue_piece_list, red_piece_list):
# table_list = []
# blue_table = [0] * 36
# # blue_piece_listは駒のIDの値なので、ii_state.all_pieceでそのIDを参照してあげると座標が取れる
# for blue_piece in blue_piece_list:
# if ii_state.all_piece[blue_piece] < 36: # 死駒を除外
# blue_table[ii_state.all_piece[blue_piece]] = 1
# blue_table.reverse() # 逆視点にするために要素を反転
# table_list.append(blue_table)
# red_table = [0] * 36
# for red_piece in red_piece_list:
# if ii_state.all_piece[red_piece] < 36:
# red_table[ii_state.all_piece[red_piece]] = 1
# red_table.reverse() # 逆視点にするために要素を反転
# table_list.append(red_table)
# return table_list
# # デュアルネットワークの入力の2次元配列の取得(自分と敵両方)
# return [pieces_array_of(enemy_blue, enemy_red), pieces_array_of(my_blue, my_red)]
# 諸々の情報からstateを作る
def create_state_from_enemy_looking(ii_state, my_blue, my_red, enemy_blue, enemy_red):
# 自分の駒を格納
my_table = [0] * 36
for my_b in my_blue:
if ii_state.all_piece[my_b] < 36:
my_table[ii_state.all_piece[my_b]] = 1
for my_r in my_red:
if ii_state.all_piece[my_r] < 36:
my_table[ii_state.all_piece[my_r]] = 2
# 敵の駒を格納
enemy_table = [0] * 36
for en_b in enemy_blue:
if ii_state.all_piece[en_b] < 36:
enemy_table[ii_state.all_piece[en_b]] = 1
for en_r in enemy_red:
if ii_state.all_piece[en_r] < 36:
enemy_table[ii_state.all_piece[en_r]] = 2
enemy_table.reverse() # このままでは敵の駒の座標が逆なので反転させて戻す
# 敵視点でのstateを生成
state = State(enemy_table, my_table)
return state
# enemy→各駒の推測値を保存。推測のために70パターン想定するが、足し合わせるだけ(各盤面について保存はしない)
# my→推測したい駒配置。
# 行動と推測盤面に対応した行動価値のリストを返す
def my_ii_predict(model_path, ii_state):
# 推論のための入力データのシェイプの変換
a, b, c = DN_INPUT_SHAPE # (6, 6, 4)
# ii_stateから生きてる駒のリストを取得
my_piece_set = set(ii_state.my_piece_list)
enemy_piece_set = set(ii_state.enemy_piece_list)
# policies_list[パターン(0~最大69)][行動(盤面依存)]
policies_list = []
legal_actions = list(ii_state.legal_actions())
# HandyRLで学習させた方策を取れる関数を定義
convert_func = convert_func_use_in_guess(model_path)
for num_and_my_blue in ii_state.my_estimated_num:
sum_np_policies = np.array([0] * len(legal_actions), dtype="f4")
# 赤駒のインデックスをセット形式で獲得(青駒以外の駒は赤駒)
my_red_set = my_piece_set - set(num_and_my_blue[1])
for num_and_enemy_blue in ii_state.enemy_estimated_num:
# 同様に赤駒のインデックスを獲得
enemy_red_set = enemy_piece_set - set(num_and_enemy_blue[1])
ii_pieces_array = my_looking_create_state(
ii_state,
num_and_my_blue[1],
my_red_set,
num_and_enemy_blue[1],
enemy_red_set,
)
# HandyRLに適応
policies = convert_func(ii_pieces_array, legal_actions)
# 行列演算するためにndarrayに変換
np_policies = | np.array(policies, dtype="f4") | numpy.array |
from typing import Optional, Dict, Hashable, Any
import numpy as np
from napari import Viewer
from napari.layers import Points, Layer
from napari.utils.events import Event
from pyqtgraph import (
PlotDataItem,
PlotWidget,
PlotItem,
AxisItem,
InfiniteLine,
TextItem,
)
from qtpy.QtCore import Qt, QSize
from qtpy.QtGui import QFont, QResizeEvent
from qtpy.QtWidgets import (
QWidget,
QHBoxLayout,
QVBoxLayout,
QSlider,
QLabel,
QStyle,
QGridLayout,
QSplitter,
QScrollArea,
)
from vispy.color import get_colormap
from xarray import Dataset
class JumpSlider(QSlider):
def mousePressEvent(self, ev):
""" Jump to click position """
self.setValue(
QStyle.sliderValueFromPosition(
self.minimum(), self.maximum(), ev.x(), self.width()
)
)
def mouseMoveEvent(self, ev):
""" Jump to pointer position while moving """
self.setValue(
QStyle.sliderValueFromPosition(
self.minimum(), self.maximum(), ev.x(), self.width()
)
)
class WavenumberSlider(QWidget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.slider = JumpSlider(Qt.Horizontal)
self.label = QLabel()
self.label.setStyleSheet("font-size: 14pt")
layout = QHBoxLayout()
layout.addWidget(self.slider, stretch=1)
layout.addWidget(self.label, stretch=0)
self.setLayout(layout)
class InspectorLine(InfiniteLine):
def __init__(self):
super().__init__(angle=90, movable=False)
self._labels = []
self._plot_item = None
self.sigPositionChanged.connect(self._onMoved)
def _onMoved(self):
pixelSize, _ = self.getViewBox().viewPixelSize()
mouseX = self.value()
self._removeLabels()
points = []
# iterate over the existing curves
for c in self._plot_item.curves:
# find the index of the closest point of this curve
if c.xData is None:
continue
adiff = np.abs(c.xData - mouseX)
idx = np.argmin(adiff)
# set label side to avoid clipping at edges of viewport
side = (
"left"
if (mouseX >= c.xData.min())
and (mouseX <= (c.xData.max() + c.xData.min()) / 2)
else "right"
)
# only add a label if the line touches the symbol
tolerance = 0.5 * max(1, c.opts["symbolSize"]) * pixelSize
if adiff[idx] < tolerance:
points.append((c.xData[idx], c.yData[idx], side))
self._createLabels(points)
def _createLabels(self, points):
for x, y, side in points:
text = f"nu={x:.1f}, I={y:.1f}"
text_item = TextItem(text=text, anchor=(0, 0) if side == "left" else (1.0))
text_item.setPos(x, y)
self._labels.append(text_item)
self._plot_item.addItem(text_item)
def _removeLabels(self):
# remove existing texts
for item in self._labels:
self._plot_item.removeItem(item)
self._labels = []
def attachToPlotItem(self, plot_item):
self._plot_item = plot_item
plot_item.addItem(self, ignoreBounds=True)
def detach(self, plot_item):
self._removeLabels()
self._plot_item.removeItem(self)
self._plot_item = None
class SpectrumViewerWidget(QWidget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# attribute viewer
_label = QLabel()
_label.setText("<b>Acquisition Parameters</b>")
_label.setStyleSheet("font-size: 16pt")
_label.setAlignment(Qt.AlignCenter)
self.parametersLayout = QGridLayout()
_parametersLayout = QVBoxLayout()
_parametersLayout.addWidget(_label, stretch=0)
_parametersLayout.addSpacing(5)
_parametersLayout.addLayout(self.parametersLayout, stretch=0)
_parametersLayout.addWidget(QWidget(), stretch=1)
_parametersWidget = QWidget()
_parametersWidget.setLayout(_parametersLayout)
_parametersScrollArea = QScrollArea()
_parametersScrollArea.setWidget(_parametersWidget)
_parametersScrollArea.setWidgetResizable(True)
# label for user experience purposes
self.noPointSelectedLabel = QLabel(self)
self.noPointSelectedLabel.setMinimumWidth(300)
self.noPointSelectedLabel.setMinimumHeight(60)
self.noPointSelectedLabel.setAlignment(Qt.AlignCenter)
self.noPointSelectedLabel.setVisible(True)
self.noPointSelectedLabel.setText("No point selected")
self.noPointSelectedLabel.setStyleSheet("font-size: 24pt; color: gray")
self.plot = PlotDataItem()
self.vLine = InspectorLine()
plotWidget = PlotWidget()
plotWidget.addItem(self.plot)
self.vLine.attachToPlotItem(plot_item=plotWidget.getPlotItem())
labelStyle = {"font-size": "14pt", "color": "#FFF"}
plotItem: PlotItem = plotWidget.getPlotItem()
plotItem.setLabel("bottom", "Wavenumber (cm<sup>-1</sup>)", **labelStyle)
plotItem.setLabel("left", "Counts", **labelStyle)
font = QFont()
font.setPixelSize(16)
bottomAxis: AxisItem = plotItem.getAxis("bottom")
bottomAxis.setStyle(tickFont=font)
leftAxis: AxisItem = plotItem.getAxis("left")
leftAxis.setStyle(tickFont=font)
self.slider = JumpSlider(Qt.Horizontal)
self.label = QLabel()
self.label.setStyleSheet("font-size: 14pt")
wavenumberLayout = QHBoxLayout()
wavenumberLayout.addWidget(self.slider, stretch=1)
wavenumberLayout.addWidget(self.label, stretch=0)
layout = QVBoxLayout()
layout.addWidget(plotWidget, stretch=1)
layout.addLayout(wavenumberLayout, stretch=0)
layoutWidget = QWidget()
layoutWidget.setLayout(layout)
splitter = QSplitter()
splitter.setOrientation(Qt.Vertical)
splitter.addWidget(_parametersScrollArea)
splitter.addWidget(layoutWidget)
splitterLayout = QVBoxLayout()
splitterLayout.addWidget(splitter)
self.setLayout(splitterLayout)
def resizeEvent(self, event: QResizeEvent):
width = self.noPointSelectedLabel.rect().width()
height = self.noPointSelectedLabel.rect().height()
size: QSize = self.size()
x = size.width() / 2 - width / 2
y = size.height() / 2 - height / 2
self.noPointSelectedLabel.move(x, y)
super().resizeEvent(event)
def setAcquisitionParameters(self, dct: Dict[Hashable, Any]):
layout = self.parametersLayout
keys = sorted([key for key in dct.keys()])
for row, key in enumerate(keys):
keyLabel = QLabel()
keyLabel.setText("<b>" + str(key) + "</b>")
valueLabel = QLabel()
valueLabel.setText(str(dct[key]))
layout.addWidget(keyLabel, row, 0, Qt.AlignRight)
layout.addWidget(valueLabel, row, 1, Qt.AlignLeft)
layout.setRowStretch(row, 0)
class SpectrumViewer:
def __init__(self, viewer: Viewer, dataset: Dataset, cmap: str = "viridis"):
self.viewer = viewer
self._view = SpectrumViewerWidget()
self._view.setAcquisitionParameters(dataset.attrs)
self._current_layer: Optional[Layer] = None
self._current_index: Optional[int] = None
# init points
self.dataset = dataset
coords = np.asarray(dataset["coords"].values.tolist())
coords = np.flip(coords, axis=1) # XY to YX
self.layer: Points = viewer.add_points(
coords, size=20, edge_color="gray", name="Raman raster"
)
self.layer.events.highlight.connect(self.on_select)
self.cmap = get_colormap(cmap)
# init spectrum
self.wavenumbers = dataset["wavenumber"].values.tolist()
self._view.slider.setRange(0, len(self.wavenumbers) - 1)
self._view.slider.valueChanged.connect(lambda _: self.update_wavenumber())
self.update_wavenumber()
self.on_select()
def update_wavenumber(self):
# update plot
index = self._view.slider.value()
wavenumber = self.wavenumbers[index]
self._view.label.setText(f"{wavenumber:.1f} cm<sup>-1</sup>")
self._view.vLine.setPos(wavenumber)
# update points
at_wavenumber = self.dataset.sel(wavenumber=wavenumber)
intensity = np.copy(np.asarray(at_wavenumber["intensity"]))
intensity = np.ma.array(intensity, mask=intensity <= 0)
intensity = | np.ma.log(intensity) | numpy.ma.log |
import numpy as np
import os, sys
import math, time
from scipy.interpolate import InterpolatedUnivariateSpline as iuspline
from matplotlib import pyplot as plt
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import tensorflow_probability as tfp
import tensorflow_addons as tfa
import mesh_tensorflow as mtf
import flowpm
import flowpm.mesh_ops as mpm
import flowpm.mtfpm as mtfpm
import flowpm.mesh_utils as mesh_utils
from astropy.cosmology import Planck15
from flowpm.tfpm import PerturbationGrowth
from flowpm import linear_field, lpt_init, nbody, cic_paint
from flowpm.utils import r2c3d, c2r3d
sys.path.append('../utils/')
import tools
import diagnostics as dg
import contextlib
import functools
##
cosmology=Planck15
np.random.seed(100)
tf.random.set_random_seed(200)
cscratch = "../figs_recon/"
#
tf.flags.DEFINE_integer("nc", 64, "Size of the cube")
tf.flags.DEFINE_integer("batch_size", 1, "Batch Size")
tf.flags.DEFINE_float("box_size", 200, "Batch Size")
tf.flags.DEFINE_float("a0", 0.1, "initial scale factor")
tf.flags.DEFINE_float("af", 1.0, "final scale factor")
tf.flags.DEFINE_integer("nsteps", 5, "Number of time steps")
tf.flags.DEFINE_bool("nbody", True, "Do nbody evolution")
tf.flags.DEFINE_string("suffix", "", "suffix for the folder name")
tf.flags.DEFINE_bool("anneal", False, "Anneal")
tf.flags.DEFINE_float("lr", 0.01, "Learning rate")
tf.flags.DEFINE_integer("niter", 200, "Number of iterations")
tf.flags.DEFINE_float("plambda", 0.10, "Lambda of poisson probability")
FLAGS = tf.flags.FLAGS
nc, bs = FLAGS.nc, FLAGS.box_size
a0, a, nsteps =FLAGS.a0, FLAGS.af, FLAGS.nsteps
plambda = FLAGS.plambda
klin = np.loadtxt('..//data/Planck15_a1p00.txt').T[0].astype(np.float32)
plin = np.loadtxt('..//data/Planck15_a1p00.txt').T[1].astype(np.float32)
ipklin = iuspline(klin, plin)
# Compute necessary Fourier kernels
#kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)
kvec = tools.fftk((nc, nc, nc), boxsize=nc, symmetric=False)
kmesh = (sum(k**2 for k in kvec)**0.5).astype(np.float32)
priorwt = ipklin(kmesh)
stages = np.linspace(a0, a, nsteps, endpoint=True)
print(stages)
fpath = "./tmp/poisson_L%04d_N%03d_p%0.02f"%(bs, nc, plambda)
if FLAGS.anneal: fpath = fpath + '-anneal'
fpath = fpath + '%s/'%FLAGS.suffix
print(fpath)
for ff in [fpath]:
#for ff in [fpath, fpath + '/figs']:
try: os.makedirs(ff)
except Exception as e: print (e)
def make_val_and_grad_fn(value_fn):
@functools.wraps(value_fn)
def val_and_grad(x):
return tfp.math.value_and_gradient(value_fn, x)
return val_and_grad
@contextlib.contextmanager
def timed_execution():
t0 = time.time()
yield
dt = time.time() - t0
print('Evaluation took: %f seconds' % dt)
def np_value(tensor):
"""Get numpy value out of possibly nested tuple of tensors."""
if isinstance(tensor, tuple):
return type(tensor)(*(np_value(t) for t in tensor))
else:
#return tensor.numpy()
return tensor.value()
def run(optimizer):
"""Run an optimizer and measure it's evaluation time."""
optimizer() # Warmup.
with timed_execution():
result = optimizer()
return np_value(result)
##############################################
def main(_):
dtype=tf.float32
startw = time.time()
tf.random.set_random_seed(100)
| np.random.seed(100) | numpy.random.seed |
#!/Library/Frameworks/Python.framework/Versions/3.8/bin/python3
"""
"""
from numpy import array, abs, argsort, savetxt
from sklearn.cross_decomposition import PLSRegression
from sklearn.preprocessing import StandardScaler
from matplotlib import pyplot as plt, cm, rcParams
from DmimData.data import DMD
# define a global pRNG seed
SEED = 420
# set the global fontsize on plots
rcParams['font.size'] = 8
def plot_plsra_c12(c1, c2, c=None, c_min=None, c_max=None, alpha=1., use_ax=None, use_fig=None, s=6, colorbar=False):
""" sets up PLS-RA projection plots """
if use_ax is None:
fig = plt.figure(figsize=(3.5, 3.5))
ax = fig.add_subplot(111)
ax.axvline(lw=0.5, c='k', zorder=0)
ax.axhline(lw=0.5, c='k', zorder=0)
else:
ax = use_ax
fig = use_fig
if c_min is None:
ax.scatter(c1, c2, marker='.', s=s, c=c, edgecolors='none', alpha=alpha)
else:
scat = ax.scatter(c1, c2, marker='.', s=s, c=c, cmap=cm.plasma, vmin=c_min, vmax=c_max, edgecolors='none', alpha=alpha)
if colorbar:
cax = fig.add_axes([0.2, 0.8, 0.6, 0.05])
cb = fig.colorbar(scat, cax=cax, orientation='horizontal', ticks=[c_min, c_max])
if use_ax is None:
for d in ['top', 'right', 'bottom', 'left']:
ax.spines[d].set_visible(False)
ax.set_xlabel('scores[0]', fontsize=8)
ax.set_ylabel('scores[1]', fontsize=8)
ax.ticklabel_format(style='sci', scilimits=(0, 0))
return fig, ax
def compute_mqn_plsra(mqn, ccs):
""" compute a PLS-RA using the MQN data, return the fitted PLS-RA instance
make a plot of the projections and save it """
plsra = PLSRegression(scale=False)
projections = plsra.fit_transform(mqn, ccs)
print('PLS-RA (MQN)')
# make a plot
p1, p2 = projections[0].T[0], projections[0].T[1]
fig, ax = plot_plsra_c12(p1, p2, c=ccs, c_min=120, c_max=240, colorbar=True, s=16)
fig.savefig('DMIM_MQN_plsra12_cCCS.png', dpi=400, bbox_inches='tight')
plt.close()
return plsra
def compute_md3d_plsra(md3d, ccs):
""" compute a PLS-RA using the MD3D data, return the fitted PCA instance
make a plot of the projections and save it """
plsra = PLSRegression(scale=False)
projections = plsra.fit_transform(md3d, ccs)
print('PLS-RA (MD3D)')
# make a plot
p1, p2 = projections[0].T[0], projections[0].T[1]
fig, ax = plot_plsra_c12(p1, p2, c=ccs, c_min=120, c_max=240, colorbar=True, s=8)
fig.savefig('DMIM_MD3D_plsra12_cCCS.png', dpi=400, bbox_inches='tight')
plt.close()
return plsra
def compute_comb_plsra(comb, ccs):
""" compute a PLS-RA using the MQN + MD3D data, return the fitted PLS-RA instance
make a plot of the projections and save it """
plsra = PLSRegression(scale=False)
projections = plsra.fit_transform(comb, ccs)
print('PLS-RA (combined)')
# make a plot
p1, p2 = projections[0].T[0], projections[0].T[1]
fig, ax = plot_plsra_c12(p1, p2, c=ccs, c_min=120, c_max=240, colorbar=True, s=16)
fig.savefig('DMIM_COMB_plsra12_cCCS.png', dpi=400, bbox_inches='tight')
plt.close()
return plsra
def report_mqn_x_loadings(mqn_plsra):
""" print PCN loadings in order of descending absolute magnitude, make a plot """
idx = argsort(abs(mqn_plsra.x_loadings_.T[0]))[::-1]
labels = array([
'c', 'f', 'cl', 'br',
'i', 's', 'p', 'an',
'cn', 'ao', 'co', 'hac',
'hbam', 'hba', 'hbdm',
'hbd', 'neg', 'pos',
'asb', 'adb', 'atb', 'csb',
'cdb', 'ctb', 'rbc',
'asv', 'adv', 'atv', 'aqv',
'cdv', 'ctv', 'cqv', 'r3',
'r4', 'r5', 'r6', 'r7',
'r8', 'r9', 'rg10',
'afr', 'bfr'
])
print('feature X loadings')
print(labels[idx])
#print(mqn_pca.components_[pc_n - 1][idx])
# plot the loadings
fig = plt.figure(figsize=(4, 1.5))
ax = fig.add_subplot(111)
y = mqn_plsra.x_loadings_.T[0][idx]
pc = ['b' for _ in y]
for i in range(len(y)):
if y[i] < 0:
pc[i] = 'r'
ax.bar([_ + 1 for _ in range(len(y))], y, color=pc)
ax.axhline(0, ls='--', c='k', lw=0.75)
ax.set_xticks([_ + 1 for _ in range(len(y))])
for d in ['top', 'right']:
ax.spines[d].set_visible(False)
ax.set_xticklabels(labels[idx], rotation='vertical', fontsize=6)
for xtick, c_ in zip(ax.get_xticklabels(), pc):
xtick.set_color(c_)
ax.set_ylabel('x loading')
fig.savefig('DMIM_MQN_plsra_x-loadings.png', dpi=400, bbox_inches='tight')
plt.close()
def report_md3d_x_loadings(md3d_plsra):
""" print PCN loadings in order of descending absolute magnitude, make a plot """
idx = argsort(abs(md3d_plsra.x_loadings_.T[0]))[::-1]
labels = | array(['pmi1', 'pmi2', 'pmi3', 'rmd02', 'rmd24', 'rmd46', 'rmd68', 'rmd8p']) | numpy.array |
# -*- coding: utf-8 -*-
import os
import itertools
import numpy as np
from kanapy.collision_detect_react import collision_routine
def cub_oct_split(cub):
"""
Splits cuboid object of the class :class:`~Cuboid` into eight smaller cuboid objects
:param cub: Branch cuboid object containing ellipsoids
:type cub: object of the class :class:`~Cuboid`
:returns: Eight new sub-branch cuboid objects in a list
:rtype: List
"""
w = cub.width/2.0
h = cub.height/2.0
d = cub.depth/2.0
cl = []
cl.append(Cuboid(cub.left, cub.top, cub.left + w,
cub.top + h, cub.front, cub.front + d))
cl.append(Cuboid(cub.left+w, cub.top, cub.left+2.*w,
cub.top + h, cub.front, cub.front + d))
cl.append(Cuboid(cub.left, cub.top+h, cub.left + w,
cub.top + 2.*h, cub.front, cub.front + d))
cl.append(Cuboid(cub.left+w, cub.top+h, cub.left+2.*w,
cub.top+2.*h, cub.front, cub.front + d))
cl.append(Cuboid(cub.left, cub.top, cub.left + w,
cub.top + h, cub.front+d, cub.front + 2.*d))
cl.append(Cuboid(cub.left+w, cub.top, cub.left+2.*w,
cub.top + h, cub.front+d, cub.front + 2.*d))
cl.append(Cuboid(cub.left, cub.top+h, cub.left + w,
cub.top + 2.*h, cub.front+d, cub.front + 2.*d))
cl.append(Cuboid(cub.left+w, cub.top+h, cub.left+2.*w,
cub.top+2.*h, cub.front+d, cub.front + 2.*d))
return cl
class Simulation_Box(object):
"""
Creates :class:`~Simulation_Box` objects for the defined simulation domain.
:param w: width
:param h: height
:param d: depth of the simulation domain
:type w: float
:type h: float
:type d: float
"""
def __init__(self, w, h, d):
self.w = w # Width
self.h = h # Height
self.d = d # Depth
self.sim_ts = 0 # Initialize simulation time-step
self.left = 0
self.top = 0
self.front = 0
self.right = w
self.bottom = h
self.back = d
class Ellipsoid(object):
"""
Creates :class:`~Ellipsoid` objects for each ellipsoid generated from input statistics.
:param iden: ID of the ellipsoid
:type iden: integer
:param center: Position :math:`(x, y, z)` of the ellipsoid center in the simulation domain
:type center: floats
:param coefficient: Semi-major and semin-minor axes lengths :math:`(a, b, c)` of the ellipsoid
:type coefficient: floats
:param quat: Quaternion representing ellipsoid's axis and tilt angle with respect
to the positive x-axis
:type quat: numpy array
.. note:: 1. The orientations of ellipsoid :math:`i` in the global coordinate space is defined by its
tilt angle and axis vector and expressed in quaternion notation as,
.. image:: /figs/quaternion_ell.png
:width: 210px
:height: 40px
:align: center
2. Ellipsoids are initilaized without a value for its velocity,
and is later assigned a random value by :mod:`kanapy.packing.particle_generator`.
3. An empty list for storing voxels belonging to the ellipsoid is initialized.
"""
def __init__(self, iden, x, y, z, a, b, c, quat):
self.id = iden
self.x = x
self.y = y
self.z = z
self.a = a
self.b = b
self.c = c
self.quat = quat
self.oria, self.orib, self.oric = a, b, c # Store the original size of the particle
self.speedx = 0.
self.speedy = 0.
self.speedz = 0.
self.rotationMatrixGen() # Initialize roatation matrix for the ellipsoid
self.surfacePointsGen() # Initialize surface points for the ellipsoid
self.inside_voxels = [] # List that stores voxels belonging to the ellipsoid
self.set_cub() # sets particle cuboid for collision testing with octree boxes
self.duplicate = None # Duplicate status used for voxelization
def get_pos(self):
"""
Returns the position array of the ellipsoid
:rtype: numpy array
"""
return np.array([self.x, self.y, self.z])
def get_coeffs(self):
"""
Returns the coefficients array of the ellipsoid
:rtype: numpy array
"""
return np.array([self.a, self.b, self.c])
def get_volume(self):
"""
Returns the volume of the ellipsoid
:rtype: float
"""
return (4/3)*np.pi*self.a*self.b*self.c
def rotationMatrixGen(self):
"""
Evaluates the rotation matrix for the ellipsoid using the quaternion
:rtype: numpy array
"""
FLOAT_EPS = np.finfo(np.float).eps
w, x, y, z = self.quat
Nq = w*w + x*x + y*y + z*z
s = 2.0/Nq
X = x*s
Y = y*s
Z = z*s
wX = w*X
wY = w*Y
wZ = w*Z
xX = x*X
xY = x*Y
xZ = x*Z
yY = y*Y
yZ = y*Z
zZ = z*Z
if Nq < FLOAT_EPS:
self.rotation_matrix = np.eye(3)
else:
self.rotation_matrix = np.array([[1.0-(yY+zZ), xY-wZ, xZ+wY],
[xY+wZ, 1.0-(xX+zZ), yZ-wX],
[xZ-wY, yZ+wX, 1.0-(xX+yY)]])
# Rotation matrix has to be transposed as OVITO uses the transposed matrix for visualization.
#self.rotation_matrix = self.rotation_matrix.T # Not required, it's consistent!!!
def surfacePointsGen(self):
"""
Generates points on the outer surface of the ellipsoid using the rotation matrix from :meth:`rotationMatrixGen`
:rtype: numpy array
"""
# Points on the outer surface of Ellipsoid
u = np.linspace(0, 2 * np.pi, 20)
v = | np.linspace(0, np.pi, 20) | numpy.linspace |
import os
import numpy as np
from scipy import stats
from . import base
class Continuous(base.DoseResponseModel):
INDIVIDUAL = 1
SUMMARY = 0
@classmethod
def get_precompiled_path(cls, data_type):
fn = '{}.individual.pkl'.format(cls.__name__.lower())\
if data_type == cls.INDIVIDUAL \
else '{}.summary.pkl'.format(cls.__name__.lower())
return os.path.join(os.path.dirname(__file__), 'compiled', fn)
def get_input_count(self):
return self.data['len']
@property
def response_direction(self):
if not hasattr(self, '_response_direction'):
if self.is_individual_dataset:
doses = self.data['dnorm']
resps = self.data['y']
avg_min_dose = np.mean(resps[np.where(doses == doses.min())])
avg_max_dose = np.mean(resps[np.where(doses == doses.max())])
self._response_direction = \
1 if (avg_min_dose < avg_max_dose) else -1
else:
dnorm = self.data['dnorm']
resp = self.data['resp']
self._response_direction = 1 if \
resp[dnorm.argmin()] < resp[dnorm.argmax()] \
else -1
return self._response_direction
@property
def is_individual_dataset(self):
return self.data['individual'] == self.INDIVIDUAL
def get_stan_model(self):
return self.STAN_INDIVIDUAL \
if self.is_individual_dataset \
else self.STAN_SUMMARY
def get_prior_upper(self):
if self.is_individual_dataset:
return self.data['y'].max() * 2.
else:
return (
self.data['resp'].max() +
2. * self.data['stdev'][np.argmax(self.data['resp'])]
) * 2.
def get_prior_slope(self):
if self.is_individual_dataset:
y = self.data['y']
dnorm = self.data['dnorm']
slope = (y.max() - y.min()) /\
(dnorm[y.argmax()] - dnorm[y.argmin()])
else:
dose = self.data['d']
resp = self.data['resp']
stdev = self.data['stdev']
dnorm = self.data['dnorm']
mean_dmax = resp[dose == dose.max()]
std_dmax = stdev[dose == dose.max()]
mean_dmin = resp[dose == dose.min()]
std_dmin = stdev[dose == dose.min()]
slope = (mean_dmax + std_dmax * 2 - mean_dmin - std_dmin * 2) /\
(dnorm.max() - dnorm.min())
b = np.array([0., slope * 5.])
if self.response_direction == -1:
b = b[::-1]
return b
def likelihoodI(self, resplog, meanlog, sdlog):
return np.sum(np.log(stats.norm.pdf(resplog, meanlog, sdlog)))
def likelihoodC(self, resplog, sdlog, iresplog, isdlog, ins):
return (
-0.5 * np.sum(ins) * np.log(np.pi * 2.) -
np.sum(0.5 * ins * np.log(sdlog ** 2.) +
0.5 * (ins * (iresplog - resplog) ** 2. +
(ins - 1.) * isdlog ** 2.) / sdlog ** 2.))
def get_plot_bounds(self, xs, vectors):
sigma = np.percentile(self.parameters['sigma'], 50.)
for i in xrange(xs.size):
resps = self.get_response_values(xs[i], **self.parameters)
resp = np.percentile(resps, 50.)
vectors[i, :] = (
xs[i],
np.exp(stats.norm.ppf(0.05, np.log(resp), sigma)),
resp,
np.exp(stats.norm.ppf(0.95, np.log(resp), sigma)),
)
return vectors
class Exponential2(Continuous):
PARAMETERS = ('a', 'b', 'sigma')
STAN_INDIVIDUAL = """
data{
int <lower=0> len; // number of dose points
real <lower=0> dnorm[len]; // dose levels
real <lower=0> y[len]; // observed responses
real p_a[2]; // prior for a
real p_b[2]; // prior for b
real p_sig[2]; // prior for sig
}
parameters{
real <lower=0> a;
real b;
real <lower=0> sigma;
}
model{
a ~ uniform(p_a[1], p_a[2]);
b ~ uniform(p_b[1], p_b[2]);
sigma ~ cauchy(p_sig[1], p_sig[2]);
for (i in 1:len)
log(y[i]) ~ normal(log(a*exp(b*dnorm[i])), sigma);
}
"""
STAN_SUMMARY = """
data{
int <lower=0> len; // number of dose groups
int <lower=0> n[len]; // number of subjects in each dose group
real <lower=0> dnorm[len]; // dose levels
real ym[len]; // observed mean of responses
real <lower=0> ysd[len]; // observed stdev of responses
real p_a[2]; // prior for a
real p_b[2]; // prior for b
real p_sig[2]; // prior for sig
}
parameters{
real <lower=0> a;
real b;
real <lower=0> sigma;
}
model{
a ~ uniform(p_a[1], p_a[2]);
b ~ uniform(p_b[1], p_b[2]);
sigma ~ cauchy(p_sig[1], p_sig[2]);
for (i in 1:len){
target += (-n[i]*log(sigma^2)*0.5-(n[i]*(ym[i]-log(a*exp(b*dnorm[i])))^2+(n[i]-1)*ysd[i]^2)/(2*sigma^2));
}
}
"""
LATEX_EQUATION = r'$f(dose) = a \times e^{b \times dose}$' # noqa
def get_priors(self):
if self.response_direction == 1:
b_prior = np.array([0., 50.])
else:
b_prior = np.array([-50., 0.])
return {
'p_a': np.array([0., self.get_prior_upper()]),
'p_b': b_prior,
'p_sig': np.array([0., 2.5]),
}
def get_response_vector(self, a, b, doses):
return a * np.exp(b * doses)
def get_predicted_response_vector(self):
a = self.parameters['a']
b = self.parameters['b']
sigma = self.parameters['sigma']
predicted = np.zeros(a.size, dtype=np.float64)
observed = np.zeros(a.size, dtype=np.float64)
if self.is_individual_dataset:
doses = self.data['dnorm']
resps = self.data['y']
for i in xrange(a.size):
mean_posterior = np.log(self.get_response_vector(a[i], b[i], doses))
y_post_pred = np.random.normal(mean_posterior, sigma[i])
predicted[i] = -2. * self.likelihoodI(mean_posterior, y_post_pred, sigma[i])
observed[i] = -2. * self.likelihoodI(mean_posterior, np.log(resps), sigma[i])
else:
dnorm = self.data['dnorm']
ns = self.data['n']
resp_mean_ln = self.data['ym']
resp_std_ln = self.data['ysd']
for i in xrange(a.size):
mean_posterior = np.log(self.get_response_vector(a[i], b[i], dnorm))
mean_pred = np.empty(dnorm.size)
std_pred = np.empty(dnorm.size)
for j in xrange(dnorm.size):
resp_ind_pred = np.random.normal(mean_posterior[j], sigma[i], ns[j])
mean_pred[j] = np.average(resp_ind_pred)
std_pred[j] = np.std(resp_ind_pred)
predicted[i] = -2. * self.likelihoodC(
mean_posterior, sigma[i],
mean_pred, std_pred, ns)
observed[i] = -2. * self.likelihoodC(
mean_posterior, sigma[i],
resp_mean_ln, resp_std_ln, ns)
return predicted, observed
def get_loglikelihood(self, samples):
a = samples[0, :]
b = samples[1, :]
sigma = samples[2, :]
predicted = np.zeros(a.size, dtype=np.float64)
if self.is_individual_dataset:
doses = self.data['dnorm']
resps = self.data['y']
for i in xrange(a.size):
resp = np.log(self.get_response_vector(a[i], b[i], doses))
predicted[i] = self.likelihoodI(resp, np.log(resps), sigma[i])
else:
dnorm = self.data['dnorm']
ns = self.data['n']
resp_mean_ln = self.data['ym']
resp_std_ln = self.data['ysd']
for i in xrange(a.size):
resp = np.log(self.get_response_vector(a[i], b[i], dnorm))
predicted[i] = self.likelihoodC(
resp, sigma[i],
resp_mean_ln, resp_std_ln, ns)
return predicted
def get_response_values(self, x, **kw):
return self.get_response_vector(kw['a'], kw['b'], x)
def get_control_vector(self):
a = self.parameters['a']
b = self.parameters['b']
return self.get_response_vector(a, b, 0.)
def calc_central_tendency(self, cutoff):
a = self.parameters['a']
b = self.parameters['b']
return np.log(cutoff / a) / b
def added_risk(self, bmr):
return 1.
class Exponential3(Continuous):
PARAMETERS = ('a', 'b', 'g', 'sigma')
STAN_INDIVIDUAL = """
data{
int <lower=0> len; // number of dose points
real pwr_lbound; // restraint value
real <lower=0> dnorm[len]; // dose levels
real <lower=0> y[len]; // observed responses
real p_a[2]; // prior for a
real p_b[2]; // prior for b
real p_g[2]; // prior for g
real p_sig[2]; // prior for sig
}
parameters{
real <lower=0> a;
real b;
real <lower=pwr_lbound> g;
real <lower=0> sigma;
}
model{
a ~ uniform(p_a[1], p_a[2]);
b ~ uniform(p_b[1], p_b[2]);
g ~ uniform(p_g[1], p_g[2]);
sigma ~ cauchy(p_sig[1], p_sig[2]);
for (i in 1:len)
log(y[i]) ~ normal(log(a*exp(b*dnorm[i]^g)), sigma);
}
"""
STAN_SUMMARY = """
data{
int <lower=0> len; // number of dose groups
real pwr_lbound; // restraint value
int <lower=0> n[len]; // number of subjects in each dose group
real <lower=0> dnorm[len]; // dose levels
real ym[len]; // observed mean of responses
real <lower=0> ysd[len]; // observed stdev of responses
real p_a[2]; // prior for a
real p_b[2]; // prior for b
real p_g[2]; // prior for g
real p_sig[2]; // prior for sig
}
parameters{
real <lower=0> a;
real b;
real <lower=pwr_lbound> g;
real <lower=0> sigma;
}
model{
a ~ uniform(p_a[1], p_a[2]);
b ~ uniform(p_b[1], p_b[2]);
g ~ uniform(p_g[1], p_g[2]);
sigma ~ cauchy(p_sig[1], p_sig[2]);
for (i in 1:len){
target += (-n[i]*log(sigma^2)*0.5-(n[i]*(ym[i]-log(a*exp(b*dnorm[i]^g)))^2+(n[i]-1)*ysd[i]^2)/(2*sigma^2));
}
}
"""
LATEX_EQUATION = r'$f(dose) = a \times e^{b \times dose^g}$' # noqa
def get_priors(self):
if self.response_direction == 1:
b_prior = np.array([0., 50.])
else:
b_prior = np.array([-50., 0.])
return {
'p_a': np.array([0., self.get_prior_upper()]),
'p_b': b_prior,
'p_g': np.array([0., 15.]),
'p_sig': np.array([0., 2.5]),
}
def get_settings(self):
pwr_lbound = self.kwargs.get('pwr_lbound', 1.)
if pwr_lbound < 0. or pwr_lbound > 1.:
raise ValueError('Invalid pwr_lbound: {}'.format(pwr_lbound))
return {
'pwr_lbound': pwr_lbound,
}
def get_response_vector(self, a, b, g, doses):
return a * np.exp(b * doses ** g)
def get_predicted_response_vector(self):
a = self.parameters['a']
b = self.parameters['b']
g = self.parameters['g']
sigma = self.parameters['sigma']
predicted = np.zeros(a.size, dtype=np.float64)
observed = np.zeros(a.size, dtype=np.float64)
if self.is_individual_dataset:
doses = self.data['dnorm']
resps = self.data['y']
for i in xrange(a.size):
mean_posterior = np.log(self.get_response_vector(a[i], b[i], g[i], doses))
y_post_pred = np.random.normal(mean_posterior, sigma[i])
predicted[i] = -2. * self.likelihoodI(mean_posterior, y_post_pred, sigma[i])
observed[i] = -2. * self.likelihoodI(mean_posterior, np.log(resps), sigma[i])
else:
dnorm = self.data['dnorm']
ns = self.data['n']
resp_mean_ln = self.data['ym']
resp_std_ln = self.data['ysd']
for i in xrange(a.size):
mean_posterior = np.log(self.get_response_vector(a[i], b[i], g[i], dnorm))
mean_pred = np.empty(dnorm.size)
std_pred = | np.empty(dnorm.size) | numpy.empty |
r"""Polynomials on an m-dimensional simplex T with values in :math:`\mathbb{R}^n`, expressed using the
Lagrange basis.
.. math::
l(x) = \sum_{\substack{\nu \in \mathbb{N}_0^m \\ |\nu| \leq r}} a_{\nu} l_{\nu, r}(x)
= \sum_{\nu} a_{\nu} (\bar{l}_{\nu, r} \circ \Phi^{-1})(x),
where :math:`a_{\nu} \in \mathbb{R}^n, \bar{l}_{\nu, r}` is the Lagrange basis on the unit simplex and :math:`\Phi`
is the unique affine map which maps the unit simplex onto the simplex T (the i:th vertex of the unit simplex is mapped
to the i:th vertex of the simplex T).
The basis polynomials :math:`l_{\nu, r} = \bar{l}_{\nu, r} \circ \Phi^{-1}` satisfies
.. math:: l_{\nu, r}(\Phi(x_{\mu}) = \delta_{\mu, \nu},
where :math:`x_{\mu}` are the Lagrange points on the unit simplex.
The set :math:`\{ l_{\nu, r} \}_{\substack{\nu \in \mathbb{N}_0^m \\ |\nu| \leq r}}` is a basis for the space
of all polynomials of degree less than or equal to r on the simplex T, :math:`\mathcal{P}_r (T)`.
"""
import numbers
import numpy as np
import polynomials_on_simplices.algebra.multiindex as multiindex
from polynomials_on_simplices.generic_tools.code_generation_utils import CodeWriter
from polynomials_on_simplices.generic_tools.str_utils import str_dot_product, str_number, str_number_array
from polynomials_on_simplices.geometry.primitives.simplex import (
affine_map_from_unit, affine_map_to_unit, affine_transformation_to_unit, dimension)
from polynomials_on_simplices.polynomial.polynomials_base import get_dimension
from polynomials_on_simplices.polynomial.polynomials_monomial_basis import Polynomial, dual_monomial_basis
from polynomials_on_simplices.polynomial.polynomials_simplex_base import PolynomialSimplexBase
from polynomials_on_simplices.polynomial.polynomials_unit_simplex_lagrange_basis import (
PolynomialLagrange, generate_lagrange_point, generate_lagrange_points, lagrange_basis_latex_compact)
def unique_identifier_lagrange_basis_simplex(vertices):
"""
Get unique identifier for the Lagrange polynomial basis on a simplex T.
:param vertices: Vertices of the simplex T ((m + 1) x m matrix where row i contains the i:th vertex of the
simplex).
:return: Unique identifier.
:rtype: str
"""
from polynomials_on_simplices.generic_tools.code_generation_utils import CodeWriter
identifier = CodeWriter()
identifier.wl("Lagrange(")
identifier.inc_indent()
identifier.wc(str(vertices))
identifier.dec_indent()
identifier.wl(")")
return identifier.code
def generate_lagrange_point_simplex(vertices, r, nu):
r"""
Generate a Lagrange point indexed by a multi-index on an n-dimensional simplex T from the set
:math:`\{ \bar{x}_nu \}` of evenly spaced Lagrange points on the m-dimensional unit simplex (:math:`\Delta_c^n`)
(Lagrange basis points are constructed so that each basis function has the value 1 at one of the points,
and 0 at all the other points).
.. math:: \bar{x}_{\nu} = \frac{\nu}{r},
.. math:: x_{\nu} = \Phi(\bar{x}_{\nu},
where :math:`\Phi` is the unique affine map which maps the unit simplex to the simplex T.
:param vertices: Vertices of the simplex T ((n + 1) x n matrix where row i contains the i:th vertex of the
simplex).
:param int r: Degree of the polynomial.
:param nu: Multi-index :math:`\nu` indexing the Lagrange point, where :math:`\frac{\nu_i}{r}` gives
the i:th coordinate of the corresponding Lagrange point in the unit simplex.
:return: Point in the n-dimensional simplex T.
:rtype: :class:`Numpy array <numpy.ndarray>`
"""
n = dimension(vertices)
x = generate_lagrange_point(n, r, nu)
phi = affine_map_from_unit(vertices)
return phi(x)
def generate_lagrange_points_simplex(vertices, r):
r"""
Generate evenly spaced Lagrange points on an n-dimensional simplex T
(Lagrange basis points are constructed so that each basis function has the value 1 at one of the points,
and 0 at all the other points).
.. math:: \{ x_{\nu} \}_{\substack{\nu \in \mathbb{N}_0^n \\ |\nu| \leq r}}, x_{\nu} = x_{\nu} = \Phi(\bar{x}_{\nu},
where :math:`\{ \bar{x}_{\nu} \}` is the set of evenly spaced Lagrange points on the unit simplex, and :math:`\Phi`
is the unique affine map which maps the unit simplex to the simplex T.
:param vertices: Vertices of the simplex T ((n + 1) x n matrix where row i contains the i:th vertex of the
simplex).
:param int r: Degree of the polynomial.
:return: List of points in the n-dimensional simplex T.
:rtype: :class:`Numpy array <numpy.ndarray>`
"""
phi = affine_map_from_unit(vertices)
n = len(vertices[0])
if n == 1:
x = np.empty(r + 1)
else:
x = np.empty((get_dimension(r, n), n))
xbar = generate_lagrange_points(n, r)
for i in range(len(x)):
x[i] = phi(xbar[i])
return x
class PolynomialLagrangeSimplex(PolynomialSimplexBase):
r"""
Implementation of the abstract polynomial base class for a polynomial on an m-dimensional simplex T,
expressed in the Lagrange basis.
.. math:: l(x) = \sum_{i = 0}^{\dim(\mathcal{P}_r(\mathbb{R}^m)) - 1} a_{\nu_i} l_{\nu_i, r}(x).
"""
def __init__(self, coeff, vertices, r=None):
r"""
:param coeff: Coefficients for the polynomial in the Lagrange basis for :math:`\mathcal{P}_r (T,
\mathbb{R}^n). \text{coeff}[i] = a_{\nu_i}`, where :math:`\nu_i` is the i:th multi-index in the sequence
of all multi-indices of dimension m with norm :math:`\leq r`
(see :func:`polynomials_on_simplices.algebra.multiindex.generate` function).
Array of scalars for a scalar valued polynomial (n = 1) and array of n-dimensional vectors for a vector
valued polynomial (:math:`n \geq 2`).
:param vertices: Vertices of the simplex T ((m + 1) x m matrix where row i contains the i:th vertex of the
simplex).
:param int r: Degree of the polynomial space. Optional, will be inferred from the number of polynomial
coefficients if not specified.
"""
m = len(vertices[0])
PolynomialSimplexBase.__init__(self, coeff, vertices, r)
self.vertices = vertices
self._unit_simplex_polynomial = PolynomialLagrange(coeff, r, m)
self._a, self._b = affine_transformation_to_unit(vertices)
self._phi_inv = affine_map_to_unit(vertices)
def __repr__(self):
return "polynomials_on_simplices.algebra.polynomial.polynomials_simplex_lagrange_basis.PolynomialLagrangeSimplex("\
+ str(self.coeff) + ", " + str(self.vertices) + ", " + str(self.degree()) + ")"
def basis(self):
r"""
Get basis for the space :math:`\mathcal{P}_r (\mathbb{R}^m)` used to express this polynomial.
:return: Unique identifier for the basis used.
:rtype: str
"""
return unique_identifier_lagrange_basis_simplex(self.vertices)
def __call__(self, x):
r"""
Evaluate the polynomial at a point :math:`x \in \mathbb{R}^m`.
:param x: Point where the polynomial should be evaluated.
:type x: float or length m :class:`Numpy array <numpy.ndarray>`
:return: Value of the polynomial.
:rtype: float or length n :class:`Numpy array <numpy.ndarray>`.
"""
return self._unit_simplex_polynomial(self._phi_inv(x))
def __mul__(self, other):
"""
Multiplication of this polynomial with another polynomial, a scalar, or a vector (for a scalar valued
polynomial), self * other.
:param other: Polynomial, scalar or vector we should multiply this polynomial with.
:type: PolynomialLagrangeSimplex, scalar or vector
:return: Product of this polynomial with other.
:rtype: :class:`PolynomialLagrangeSimplex`.
"""
if isinstance(other, numbers.Number) or isinstance(other, np.ndarray):
return self.multiply_with_constant(other)
# Multiplication of two polynomials
# Multiplied polynomials need to have the same domain dimension
assert self.domain_dimension() == other.domain_dimension()
# Cannot multiply two vector valued polynomials
assert self.target_dimension() == 1
assert other.target_dimension() == 1
m = self.domain_dimension()
r = self.degree() + other.degree()
dim = get_dimension(r, m)
coeff = np.empty(dim)
x = generate_lagrange_points_simplex(self.vertices, r)
for i in range(len(x)):
coeff[i] = self(x[i]) * other(x[i])
return PolynomialLagrangeSimplex(coeff, self.vertices, r)
def __pow__(self, exp):
r"""
Raise the polynomial to a power.
.. math::
(l^{\mu})(x) = l(x)^{\mu} = l_1(x)^{\mu_1} l_2(x)^{\mu_2} \ldots l_n(x)^{\mu_n}.
:param exp: Power we want the raise the polynomial to (natural number or multi-index depending on the dimension
of the target of the polynomial).
:type exp: int or :class:`~polynomials_on_simplices.algebra.multiindex.MultiIndex` or Tuple[int, ...]
:return: This polynomial raised to the given power.
:rtype: :class:`PolynomialLagrangeSimplex`.
"""
if isinstance(exp, numbers.Integral):
assert exp >= 0
assert self.target_dimension() == 1
if exp == 0:
return unit_polynomial_simplex(self.vertices, 1)
if exp == 1:
return PolynomialLagrangeSimplex(self.coeff, self.vertices, self.r)
return self * self**(exp - 1)
else:
assert len(exp) == self.target_dimension()
assert [entry >= 0 for entry in exp]
m = self.domain_dimension()
r = self.degree() * multiindex.norm(exp)
dim = get_dimension(r, m)
coeff = np.empty(dim)
# Get the coefficients by applying the dual basis (evaluate at
# Lagrange points) to the exponentiated polynomial
x = generate_lagrange_points_simplex(self.vertices, r)
for i in range(len(x)):
coeff[i] = multiindex.power(self(x[i]), exp)
return PolynomialLagrangeSimplex(coeff, self.vertices, r)
def partial_derivative(self, i=0):
"""
Compute the i:th partial derivative of the polynomial.
:param int i: Index of partial derivative.
:return: i:th partial derivative of this polynomial.
:rtype: :class:`PolynomialLagrangeSimplex`.
"""
assert isinstance(i, numbers.Integral)
assert i >= 0
m = self.domain_dimension()
n = self.target_dimension()
assert i < m
r = self.degree()
if r == 0:
return zero_polynomial_simplex(self.vertices, 0, n)
# Compute derivative using the chain rule
# We have D(l)(x) = D((lb o pi)(x) = D(lb)(pi(x)) * D(pi)(x)
from polynomials_on_simplices.calculus.polynomial.polynomials_calculus import gradient, jacobian
if m == 1:
if n == 1:
db = self._unit_simplex_polynomial.partial_derivative()
return PolynomialLagrangeSimplex(db.coeff, self.vertices, self.r - 1) * self._a
else:
jb = jacobian(self._unit_simplex_polynomial)
coeff = np.empty((len(jb[0][0].coeff), n))
for j in range(n):
coeff[:, j] = jb[j][0].coeff * self._a
return PolynomialLagrangeSimplex(coeff, self.vertices, self.r - 1)
else:
if n == 1:
gb = gradient(self._unit_simplex_polynomial)
d = PolynomialLagrangeSimplex(gb[0].coeff, self.vertices, self.r - 1) * self._a[0, i]
for k in range(1, m):
d += PolynomialLagrangeSimplex(gb[k].coeff, self.vertices, self.r - 1) * self._a[k, i]
return d
else:
jb = jacobian(self._unit_simplex_polynomial)
coeff = np.empty((len(jb[0][0].coeff), n))
for j in range(n):
coeff[:, j] = jb[j][0].coeff * self._a[0, i]
for k in range(1, m):
coeff[:, j] += jb[j][k].coeff * self._a[k, i]
return PolynomialLagrangeSimplex(coeff, self.vertices, self.r - 1)
def degree_elevate(self, s):
r"""
Express the polynomial using a higher degree basis.
Let :math:`p(x) = \sum_{\substack{\nu \in \mathbb{N}_0^m \\ |\nu| \leq r}} a_{\nu} l_{\nu, r}(x)` be this
polynomial, where :math:`\{ l_{\nu, r} \}_{\substack{\nu \in \mathbb{N}_0^m \\ |\nu| \leq r}}` is the Lagrange
basis for :math:`\mathcal{P}_r (T)`. Let :math:`\{ l_{\nu, s} \}_{\substack{\nu \in \mathbb{N}_0^m
\\ |\nu| \leq s}}, s \geq r` be the Lagrange basis for :math:`\mathcal{P}_s (T)`. Then this function
returns a polynomial :math:`q(x)`
.. math:: q(x) = \sum_{\substack{\nu \in \mathbb{N}_0^m \\ |\nu| \leq s}} \tilde{a}_{\nu} l_{\nu, s}(x),
such that :math:`p(x) = q(x) \, \forall x \in T`.
:param int s: New degree for the polynomial basis the polynomial should be expressed in.
:return: Elevation of this polynomial to the higher degree basis.
:rtype: :class:`PolynomialLagrangeSimplex`.
"""
assert s >= self.degree()
if s == self.degree():
return PolynomialLagrangeSimplex(self.coeff, self.vertices, self.r)
p = self._unit_simplex_polynomial.degree_elevate(s)
return PolynomialLagrangeSimplex(p.coeff, self.vertices, s)
def to_monomial_basis(self):
"""
Compute the monomial representation of this polynomial.
:return: This polynomial expressed in the monomial basis.
:rtype: :class:`~polynomials_on_simplices.polynomial.polynomials_monomial_basis.Polynomial`.
"""
if self.n == 1:
a = np.empty(get_dimension(self.r, self.m))
else:
a = np.empty((get_dimension(self.r, self.m), self.n))
q = dual_monomial_basis(self.r, self.m)
for i in range(len(q)):
a[i] = q[i](self)
return Polynomial(a, self.r, self.m)
def latex_str(self):
r"""
Generate a Latex string for this polynomial.
:return: Latex string for this polynomial.
:rtype: str
"""
try:
len(self.coeff[0])
coeff_strs = [str_number_array(c, latex=True) for c in self.coeff]
basis_strs = lagrange_basis_latex_compact(self.r, self.m)
return str_dot_product(coeff_strs, basis_strs)
except TypeError:
coeff_strs = [str_number(c, latex_fraction=True) for c in self.coeff]
basis_strs = lagrange_basis_latex_compact(self.r, self.m)
return str_dot_product(coeff_strs, basis_strs)
def latex_str_expanded(self):
r"""
Generate a Latex string for this polynomial, where each basis function has been expanded in the monomial
basis.
:return: Latex string for this polynomial.
:rtype: str
"""
try:
len(self.coeff[0])
coeff_strs = [str_number_array(c, latex=True) for c in self.coeff]
basis_strs = lagrange_basis_simplex_latex(self.r, self.vertices)
for i in range(len(basis_strs)):
if len(basis_strs[i]) > 3:
basis_strs[i] = "(" + basis_strs[i] + ")"
return str_dot_product(coeff_strs, basis_strs)
except TypeError:
coeff_strs = [str_number(c, latex_fraction=True) for c in self.coeff]
basis_strs = lagrange_basis_simplex_latex(self.r, self.vertices)
for i in range(len(basis_strs)):
if len(basis_strs[i]) > 3:
basis_strs[i] = "(" + basis_strs[i] + ")"
return str_dot_product(coeff_strs, basis_strs)
@staticmethod
def _generate_function_specific_name(a, vertices):
"""
Generate name for a general function evaluating a polynomial.
:param a: Coefficients for the polynomial used to generate a unique name.
:return: Name for the function.
:rtype: str
"""
coeff_hash = hash(str(a))
if coeff_hash < 0:
# Cannot have minus sign in name
coeff_hash *= -1
vertices_hash = hash(str(vertices))
if vertices_hash < 0:
# Cannot have minus sign in name
vertices_hash *= -1
return str(coeff_hash) + "_" + str(vertices_hash)
def code_str(self, fn_name):
r"""
Generate a function code string for evaluating this polynomial.
:param str fn_name: Name for the function in the generated code.
:return: Code string for evaluating this polynomial.
:rtype: str
"""
code = CodeWriter()
code.wl("def " + fn_name + "(y):")
code.inc_indent()
if self.m == 1:
code.wl("x = " + str(self._a) + " * y + " + str(self._b))
else:
code.wl("a = np." + self._a.__repr__())
code.wl("b = np." + self._b.__repr__())
code.wl("x = np.dot(a, y) + b")
poly_eval_code = self._unit_simplex_polynomial.code_str("temp")
poly_eval_code = poly_eval_code.split('\n')[1:]
poly_eval_code = "\n".join(poly_eval_code)
code.verbatim(poly_eval_code)
code.dec_indent()
return code.code
def lagrange_basis_fn_simplex(nu, r, vertices):
r"""
Generate a Lagrange basis polynomial on an n-dimensional simplex T,
where n is equal to the length of nu.
.. math:: l_{\nu, r}(x) = (\bar{l}_{\nu, r} \circ \Phi^{-1})(x),
where :math:`\bar{l}_{\nu, r}` is the corresponding Lagrange basis polynomial on the (n-dimensional) unit simplex,
and :math:`\Phi` is the unique affine map which maps the unit simplex to the simplex T.
:param nu: Multi-index indicating which Lagrange basis polynomial should be generated.
The polynomial will have the value 1 at the point associated with the multi-index,
and value 0 at all other points.
:type nu: int or :class:`~polynomials_on_simplices.algebra.multiindex.MultiIndex` or Tuple[int, ...]
:param int r: Degree of polynomial.
:param vertices: Vertices of the simplex T ((n + 1) x n matrix where row i contains the i:th vertex of the
simplex).
:return: The Lagrange base polynomial on the simplex T, as specified by nu and r.
:rtype: :class:`PolynomialLagrangeSimplex`.
"""
try:
m = len(nu)
except TypeError:
m = 1
nu = (nu,)
dim = get_dimension(r, m)
coeff = | np.zeros(dim, dtype=int) | numpy.zeros |
from tkinter import *
from tkinter import ttk
import tkinter.filedialog as filedialog
from tkinter import messagebox
from PIL import Image,ImageDraw,ImageFont
from PIL import ImageTk,ImageGrab
import cv2
from skimage import filters
#import rasterio
import matplotlib.pyplot as pyplt
#from matplotlib.figure import Figure
import numpy as np
import os
#import time
import csv
import scipy.linalg as la
from functools import partial
#import threading
#import sys
#import kplus
from sklearn.cluster import KMeans
import tkintercorestat
#import tkintercorestat_plot
import tkintercore
import cal_kernelsize
#import histograms
#import createBins
import axistest
#from multiprocessing import Pool
import lm_method
#import batchprocess
import sel_area
class img():
def __init__(self,size,bands):
self.size=size
self.bands=bands
import batchprocess
displayimg={'Origin':None,
'PCs':None,
'Color Deviation':None,
'ColorIndices':None,
'Output':None}
previewimg={'Color Deviation':None,
'ColorIndices':None}
#cluster=['LabOstu','NDI'] #,'Greenness','VEG','CIVE','MExG','NDVI','NGRDI','HEIGHT']
#cluster=['LabOstu','NDI','Greenness','VEG','CIVE','MExG','NDVI','NGRDI','HEIGHT','Band1','Band2','Band3']
cluster=['PAT_R','PAT_G','PAT_B',
'DIF_R','DIF_G','DIF_B',
'ROO_R','ROO_G','ROO_B',
'GLD_R','GLD_G','GLD_B',
'Band1','Band2','Band3']
colorbandtable=np.array([[255,0,0],[255,127,0],[255,255,0],[127,255,0],[0,255,255],[0,127,255],[0,0,255],[127,0,255],[75,0,130],[255,0,255]],'uint8')
#print('colortableshape',colortable.shape)
filenames=[]
Multiimage={}
Multigray={}
Multitype={}
Multiimagebands={}
Multigraybands={}
workbandarray={}
displaybandarray={}
originbandarray={}
colorindicearray={}
clusterdisplay={}
kernersizes={}
multi_results={}
outputimgdict={}
outputimgbands={}
outputsegbands={}
originsegbands={}
oldpcachoice=[]
multiselectitems=[]
coinbox_list=[]
pre_checkbox=[]
originpcabands={}
batch={'PCweight':[],
'PCsel':[],
'Kmeans':[],
'Kmeans_sel':[],
'Area_max':[],
'Area_min':[],
'shape_max':[],
'shape_min':[],
'nonzero':[]}
root=Tk()
root.title('GridFree v.1.1.0 ')
root.geometry("")
root.option_add('*tearoff',False)
emptymenu=Menu(root)
root.config(menu=emptymenu)
screenheight=root.winfo_screenheight()
screenwidth=root.winfo_screenwidth()
print('screenheight',screenheight,'screenwidth',screenwidth)
screenstd=min(screenheight-100,screenwidth-100,850)
coinsize=StringVar()
selarea=StringVar()
refvar=StringVar()
imgtypevar=StringVar()
edge=StringVar()
kmeans=IntVar()
pc_combine_up=DoubleVar()
pc_combine_down=IntVar()
filedropvar=StringVar()
displaybut_var=StringVar()
buttonvar=IntVar()
bandchoice={}
checkboxdict={}
#minipixelareaclass=0
coinbox=None
currentfilename=''
currentlabels=None
displaylabels=None
workingimg=None
displaypclabels=None
boundaryarea=None
outputbutton=None
font=None
reseglabels=None
coindict=None
## Funcitons
refarea=None
originlabels=None
originlabeldict=None
changekmeans=False
convband=None
reflabel=0
minflash=[]
dotflash=[]
labelplotmap={}
mappath=''
elesize=[]
labellist=[]
figdotlist={}
havecolorstrip=True
kmeanschanged=False
pcweightchanged=False
originbinaryimg=None
clusterchanged=False
originselarea=False
zoomoff=False
maxx=0
minx=0
bins=None
loccanvas=None
linelocs=[0,0,0,0]
maxy=0
miny=0
segmentratio=0
zoombox=[]
displayfea_l=0
displayfea_w=0
resizeshape=[]
previewshape=[]
pcbuttons=[]
pcbuttonsgroup=[]
def distance(p1,p2):
return np.sum((p1-p2)**2)
def findratio(originsize,objectsize):
oria=originsize[0]
orib=originsize[1]
obja=objectsize[0]
objb=objectsize[1]
if oria>obja or orib>objb:
ratio=round(max((oria/obja),(orib/objb)))
else:
ratio=round(min((obja/oria),(objb/orib)))
# if oria*orib>850 * 850:
if oria*orib>screenstd * screenstd:
if ratio<2:
ratio=2
return ratio
def getkeys(dict):
return [*dict]
def deletezoom(event,widget):
print('leave widget')
if len(zoombox)>0:
for i in range(len(zoombox)):
#print('delete')
widget.delete(zoombox.pop(0))
widget.update()
def zoom(event,widget,img):
global zoombox
x=event.x
y=event.y
#print(x,y)
if len(zoombox)>1:
widget.delete(zoombox.pop(0))
#print('delete')
crop=img.crop((x-15,y-15,x+15,y+15))
w,h=crop.size
#print(w,h)
crop=crop.resize([w*3,h*3],resample=Image.BILINEAR)
w,h=crop.size
crop=ImageTk.PhotoImage(crop)
zoombox.append(widget.create_image(x+5,y-5,image=crop))
root.update_idletasks()
raise NameError
#time.sleep(0.1)
def changedisplay_pc(frame):
for widget in frame.winfo_children():
widget.pack_forget()
#widget.configure(image=displayimg[text])
#widget.image=displayimg[text]
#widget.pack()
w=displayimg['PCs']['Size'][1]
l=displayimg['PCs']['Size'][0]
widget.config(width=w,height=l)
widget.create_image(0,0,image=displayimg['PCs']['Image'],anchor=NW)
widget.pack()
widget.update()
def pcweightupdate(displayframe):
getPCs()
changedisplay_pc(displayframe)
def buttonpress(val,displayframe,buttonframe):
global buttonvar,pc_combine_up,kmeans
buttonvar.set(val)
kmeans.set(1)
pc_combine_up.set(0.5)
buttonchildren=buttonframe.winfo_children()
for child in buttonchildren:
child.config(highlightbackground='white')
print(buttonchildren[val])
buttonchild=buttonchildren[val]
buttonchild.config(highlightbackground='red')
print('press button ',buttonvar.get())
getPCs()
changedisplay_pc(displayframe)
# if kmeans.get()>1:
changekmeansbar('')
beforecluster('')
# changecluster('')
def PCbuttons(frame,displayframe):
#display pc buttons
# buttonvar=IntVar()
#buttonvar.set(0)
for widget in frame.winfo_children():
widget.pack_forget()
buttonframe=LabelFrame(frame)
buttonframe.pack()
for i in range(len(pcbuttons)):
butimg=pcbuttons[i]
but=Button(buttonframe,text='',image=butimg,compound=TOP,command=partial(buttonpress,i,displayframe,buttonframe))
if i==buttonvar.get():
but.config(highlightbackground='red')
row=int(i/3)
col=i%3
# print(row,col)
but.grid(row=int(i/3),column=col)
print('default button',buttonvar.get())
# change cluster,display
def displaypreview(text):
global figcanvas,resviewframe
for widget in resviewframe.winfo_children():
widget.pack_forget()
# previewframe=Canvas(frame,width=450,height=400,bg='white')
figcanvas.pack()
figcanvas.delete(ALL)
if text=='Color Deviation':
previewtext='ColorIndices'
if text=='ColorIndices':
previewtext='Color Deviation'
previewimage=previewimg[previewtext]['Image']
figcanvas.create_image(0,0,image=previewimage,anchor=NW)
figcanvas.update()
def switchevent(event,widget,img):
global zoomoff,zoomfnid_m,zoomfnid_l,zoombox
zoomoff= not zoomoff
if zoomoff==True:
widget.unbind('<Motion>',zoomfnid_m)
widget.unbind('<Leave>',zoomfnid_l)
if len(zoombox)>0:
for i in range(len(zoombox)):
widget.delete(zoombox.pop(0))
widget.update()
else:
zoomfnid_m=widget.bind('<Motion>',lambda event,arg=widget:zoom(event,arg,img))
zoomfnid_l=widget.bind('<Leave>',lambda event,arg=widget:deletezoom(event,arg))
def changedisplayimg(frame,text):
global displaybut_var,figcanvas,resviewframe,reflabel
displaybut_var.set(disbuttonoption[text])
for widget in frame.winfo_children():
widget.pack_forget()
#widget.configure(image=displayimg[text])
#widget.image=displayimg[text]
#widget.pack()
w=displayimg[text]['Size'][1]
l=displayimg[text]['Size'][0]
widget.config(width=w,height=l)
widget.create_image(0,0,image=displayimg[text]['Image'],anchor=NW)
widget.pack()
widget.update()
global rects,selareapos,app,delapp,delrects,delselarea,originselarea
global zoomfnid_m,zoomfnid_l
app=sel_area.Application(widget)
# delapp=sel_area.Application(widget)
if text=='Output':
try:
image=outputsegbands[currentfilename]['iter0']
displayfig()
except:
return
zoomfnid_m=widget.bind('<Motion>',lambda event,arg=widget:zoom(event,arg,image))
zoomfnid_l=widget.bind('<Leave>',lambda event,arg=widget:deletezoom(event,arg))
delrects=app.start(zoomfnid_m,zoomfnid_l)
widget.bind('<Double-Button-1>',lambda event,arg=widget:switchevent(event,arg,image))
print('delrects',delrects)
else:
reflabel=0
print('reflabel=',reflabel)
try:
delelareadim=app.getinfo(delrects[1])
if delelareadim!=[]:
delselarea=delelareadim
app.end()
except:
pass
if text=='Origin':
try:
image=originsegbands['Origin']
zoomfnid_m=widget.bind('<Motion>',lambda event,arg=widget:zoom(event,arg,image))
zoomfnid_l=widget.bind('<Leave>',lambda event,arg=widget:deletezoom(event,arg))
except:
return
widget.bind('<Double-Button-1>',lambda event,arg=widget:switchevent(event,arg,image))
for widget in resviewframe.winfo_children():
widget.pack_forget()
rects=app.start()
print(rects)
originselarea=True
else:
widget.unbind('<Motion>')
selareadim=app.getinfo(rects[1])
if selareadim!=[]:
selareapos=selareadim
app.end(rects)
if text=='PCs':
selareadim=app.getinfo(rects[1])
if selareadim!=[0,0,1,1] and selareadim!=[] and selareadim!=selareapos:
selareapos=selareadim
if selareapos!=[0,0,1,1] and originselarea==True:
#need to redo PCA
npfilter=np.zeros((displayimg['Origin']['Size'][0],displayimg['Origin']['Size'][1]))
filter=Image.fromarray(npfilter)
draw=ImageDraw.Draw(filter)
draw.ellipse(selareapos,fill='red')
filter=np.array(filter)
filter=np.divide(filter,np.max(filter))
filter=cv2.resize(filter,(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)
partialsingleband(filter)
originselarea=False
pass
PCbuttons(resviewframe,frame)
pass
if text=='Color Deviation':
#displaypreview
displaypreview(text)
pass
if text=='ColorIndices':
#displaypreview
displaypreview(text)
pass
#print('change to '+text)
#time.sleep(1)
def updateresizeshape(shape,content):
shape.append(int(content))
return shape
def generatedisplayimg(filename): # init display images
global resizeshape,previewshape
try:
# firstimg=Multiimagebands[filename]
#height,width=firstimg.size
# height,width,c=displaybandarray[filename]['LabOstu'].shape
bandsize=Multiimagebands[filename].size
if bandsize[0]*bandsize[1]>2000*2000:
ratio=findratio([bandsize[0],bandsize[1]],[2000,2000])
else:
ratio=1
height,width=bandsize[0]/ratio,bandsize[1]/ratio
# ratio=findratio([height,width],[850,850])
ratio=findratio([height,width],[screenstd,screenstd])
print('displayimg ratio',ratio)
resizeshape=[]
# if height*width<850*850:
if height*width<screenstd*screenstd:
#resize=cv2.resize(Multiimage[filename],(int(width*ratio),int(height*ratio)),interpolation=cv2.INTER_LINEAR)
updateresizeshape(resizeshape,width*ratio)
updateresizeshape(resizeshape,height*ratio)
# resizeshape.append(width*ratio)
# resizeshape.append(height*ratio)
if height>screenstd:
resizeshape=[]
ratio=round(height/screenstd)
updateresizeshape(resizeshape,width*ratio)
updateresizeshape(resizeshape,height*ratio)
if width>screenstd:
resizeshape=[]
ratio=round(width/screenstd)
updateresizeshape(resizeshape,width*ratio)
updateresizeshape(resizeshape,height*ratio)
else:
#resize=cv2.resize(Multiimage[filename],(int(width/ratio),int(height/ratio)),interpolation=cv2.INTER_LINEAR)
updateresizeshape(resizeshape,width/ratio)
updateresizeshape(resizeshape,height/ratio)
ratio=findratio([height,width],[400,450])
previewshape=[]
if height*width<450*400:
#resize=cv2.resize(Multiimage[filename],(int(width*ratio),int(height*ratio)),interpolation=cv2.INTER_LINEAR)
updateresizeshape(previewshape,width*ratio)
updateresizeshape(previewshape,height*ratio)
if height>400:
previewshape=[]
ratio=round(height/screenstd)
updateresizeshape(previewshape,width/ratio)
updateresizeshape(previewshape,height/ratio)
if width>450:
previewshape=[]
ratio=round(width/screenstd)
updateresizeshape(previewshape,width/ratio)
updateresizeshape(previewshape,height/ratio)
else:
#resize=cv2.resize(Multiimage[filename],(int(width/ratio),int(height/ratio)),interpolation=cv2.INTER_LINEAR)
updateresizeshape(previewshape,width/ratio)
updateresizeshape(previewshape,height/ratio)
resize=cv2.resize(Multiimage[filename],(int(resizeshape[0]),int(resizeshape[1])),interpolation=cv2.INTER_LINEAR)
originimg=Image.fromarray(resize.astype('uint8'))
originsegbands.update({'Origin':originimg})
rgbimg=Image.fromarray(resize.astype('uint8'))
draw=ImageDraw.Draw(rgbimg)
suggsize=14
font=ImageFont.truetype('cmb10.ttf',size=suggsize)
content='\n File: '+filename
draw.text((10-1, 10+1), text=content, font=font, fill='white')
draw.text((10+1, 10+1), text=content, font=font, fill='white')
draw.text((10-1, 10-1), text=content, font=font, fill='white')
draw.text((10+1, 10-1), text=content, font=font, fill='white')
#draw.text((10,10),text=content,font=font,fill=(141,2,31,0))
draw.text((10,10),text=content,font=font,fill='black')
rgbimg=ImageTk.PhotoImage(rgbimg)
tempdict={}
tempdict.update({'Size':resize.shape})
tempdict.update({'Image':rgbimg})
except:
tempdict={}
tempimg=np.zeros((screenstd,screenstd))
tempdict.update({'Size':tempimg.shape})
tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(tempimg.astype('uint8')))})
displayimg['Origin']=tempdict
#if height*width<850*850:
# resize=cv2.resize(Multigray[filename],(int(width*ratio),int(height*ratio)),interpolation=cv2.INTER_LINEAR)
#else:
#resize=cv2.resize(Multigray[filename],(int(width/ratio),int(height/ratio)),interpolation=cv2.INTER_LINEAR)
tempimg=np.zeros((screenstd,screenstd))
tempdict={}
try:
tempdict.update({'Size':resize.shape})
tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(np.zeros((int(resizeshape[1]),int(resizeshape[0]))).astype('uint8')))})
except:
tempdict.update({'Size':tempimg.shape})
#if height*width<850*850:
# tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(np.zeros((int(height*ratio),int(width*ratio))).astype('uint8')))})
#else:
# tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(np.zeros((int(height/ratio),int(width/ratio))).astype('uint8')))})
# tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(np.zeros((int(resizeshape[1]),int(resizeshape[0]))).astype('uint8')))})
tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(tempimg.astype('uint8')))})
displayimg['Output']=tempdict
tempdict={}
try:
tempdict.update({'Size':resize.shape})
tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(np.zeros((int(resizeshape[1]),int(resizeshape[0]))).astype('uint8')))})
except:
tempdict.update({'Size':tempimg.shape})
tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(tempimg.astype('uint8')))})
displayimg['PCs']=tempdict
tempdict={}
temppreviewdict={}
temppreviewimg=np.zeros((450,400))
try:
tempband=np.zeros((displaybandarray[filename]['LabOstu'][:,:,0].shape))
# tempband=tempband+displaybandarray[filename]['LabOstu']
# ratio=findratio([tempband.shape[0],tempband.shape[1]],[850,850])
#if tempband.shape[0]*tempband.shape[1]<850*850:
# tempband=cv2.resize(ratio,(int(tempband.shape[1]*ratio),int(tempband.shape[0]*ratio)),interpolation=cv2.INTER_LINEAR)
#else:
# tempband=cv2.resize(ratio,(int(tempband.shape[1]/ratio),int(tempband.shape[0]/ratio)),interpolation=cv2.INTER_LINEAR)
tempband=cv2.resize(tempband,(int(resizeshape[0]),int(resizeshape[1])),interpolation=cv2.INTER_LINEAR)
tempdict.update({'Size':tempband.shape})
tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(tempband[:,:,2].astype('uint8')))})
temppreview=cv2.resize(tempband,(int(previewshape[0]),int(previewshape[1])),interpolation=cv2.INTER_LINEAR)
temppreview=Image.fromarray(temppreview.astype('uint8'))
temppreviewdict.update({'Size':previewshape})
temppreviewdict.update({'Image':ImageTk.PhotoImage(temppreview)})
# print('resizeshape',resizeshape)
#pyplt.imsave('displayimg.png',tempband[:,:,0])
#indimg=cv2.imread('displayimg.png')
except:
tempdict.update({'Size':tempimg.shape})
tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(tempimg.astype('uint8')))})
temppreviewdict.update({'Size':temppreviewimg.shape})
temppreviewdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(temppreviewimg.astype('uint8')))})
displayimg['ColorIndices']=tempdict
previewimg['ColorIndices']=temppreviewdict
#resize=cv2.resize(Multigray[filename],(int(resizeshape[0]),int(resizeshape[1])),interpolation=cv2.INTER_LINEAR)
#grayimg=ImageTk.PhotoImage(Image.fromarray(resize.astype('uint8')))
#tempdict={}
#tempdict.update({'Size':resize.shape})
#tempdict.update({'Image':grayimg})
tempdict={}
temppreviewdict={}
try:
colordeviate=np.zeros((tempband[:,:,0].shape[0],tempband[:,:,0].shape[1],3),'uint8')
kvar=int(kmeans.get())
for i in range(kvar):
locs=np.where(tempband[:,:,0]==i)
colordeviate[locs]=colorbandtable[i,:]
# pyplt.imsave('colordeviation.png',colordeviate)
# # colordevimg=Image.fromarray(colordeviate.astype('uint8'))
# # colordevimg.save('colordeviation.png',"PNG")
# testcolor=Image.open('colordeviation.png')
print('colordeviation.png')
# colortempdict={}
colordeviate=cv2.resize(colordeviate,(int(resizeshape[0]),int(resizeshape[1])),interpolation=cv2.INTER_LINEAR)
tempdict.update({'Size':colordeviate.shape})
tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(colordeviate.astype('uint8')))})
# colortempdict.update({'Size':colordeviate.shape})
# colortempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(colordeviate.astype('uint8')))})
# colortempdict.update({'Image':ImageTk.PhotoImage(testcolor)})
# tempdict={}
temppreview=cv2.resize(colordeviate,(int(previewshape[0]),int(previewshape[1])),interpolation=cv2.INTER_LINEAR)
temppreviewdict.update({'Size':temppreview.shape})
temppreviewdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(temppreview[:,:,0].astype('uint8')))})
except:
tempdict.update({'Size':tempimg.shape})
tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(tempimg.astype('uint8')))})
temppreviewdict.update({'Size':temppreviewimg.shape})
temppreviewdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(temppreviewimg.astype('uint8')))})
# displayimg['Color Deviation']=colortempdict
displayimg['Color Deviation']=tempdict
previewimg['Color Deviation']=temppreviewdict
def Open_File(filename): #add to multi-image,multi-gray #call band calculation
global Multiimage,Multigray,Multitype,Multiimagebands,Multigraybands,filenames
try:
Filersc=cv2.imread(filename,flags=cv2.IMREAD_ANYCOLOR)
ndim=np.ndim(Filersc)
if ndim==2:
height,width=np.shape(Filersc)
channel=1
Filersc.reshape((height,width,channel))
else:
height,width,channel=np.shape(Filersc)
Filesize=(height,width)
print('filesize:',height,width)
RGBfile=cv2.cvtColor(Filersc,cv2.COLOR_BGR2RGB)
Multiimage.update({filename:RGBfile})
if ndim==2:
Grayfile=np.copy(Filersc)
else:
Grayfile=cv2.cvtColor(Filersc,cv2.COLOR_BGR2Lab)
Grayfile=cv2.cvtColor(Grayfile,cv2.COLOR_BGR2GRAY)
#Grayfile=cv2.GaussianBlur(Grayfile,(3,3),cv2.BORDER_DEFAULT)
#ostu=filters.threshold_otsu(Grayfile)
#Grayfile=Grayfile.astype('float32')
#Grayfile=Grayfile/ostu
Grayimg=img(Filesize,Grayfile)
RGBbands=np.zeros((channel,height,width))
for j in range(channel):
band=RGBfile[:,:,j]
band=np.where(band==0,1e-6,band)
nans=np.isnan(band)
band[nans]=1e-6
#ostu=filters.threshold_otsu(band)
#band=band/ostu
RGBbands[j,:,:]=band
RGBimg=img(Filesize,RGBbands)
tempdict={filename:RGBimg}
Multiimagebands.update(tempdict)
tempdict={filename:Grayfile}
Multigray.update(tempdict)
tempdict={filename:0}
Multitype.update(tempdict)
tempdict={filename:Grayimg}
Multigraybands.update(tempdict)
except:
messagebox.showerror('Invalid Image Format','Cannot open '+filename)
return False
filenames.append(filename)
return True
def Open_Map():
if proc_mode[proc_name].get()=='1':
batchprocess.Open_batchfile()
return
global mappath,elesize,labellist
filepath=filedialog.askopenfilename()
if len(filepath)>0:
if 'csv' in filepath:
mappath=filepath
elesize=[]
labellist=[]
rows=[]
print('open map at: '+mappath)
with open(mappath,mode='r',encoding='utf-8-sig') as f:
csvreader=csv.reader(f)
for row in csvreader:
rows.append(row)
temprow=[]
for ele in row:
if ele is not '':
temprow.append(ele)
elesize.append(len(temprow))
for i in range(len(rows)):
for j in range(len(rows[i])):
if rows[i][j]!='':
labellist.append(rows[i][j])
else:
messagebox.showerror('Invalide File',message='Please open csv formate file as map file.')
corlortable=tkintercorestat.get_colortable(reseglabels)
tup=(reseglabels,[],corlortable,{},currentfilename)
print(elesize)
mapdict,mapimage,smallset=showcounting(tup,True,True,True)
tempimgbands={}
tempimgdict={}
tempsmall={}
tempimgbands.update({'iter0':mapimage})
tempimgdict.update({'iter0':mapdict})
tempsmall.update({'iter0':smallset})
outputimgdict.update({currentfilename:tempimgdict})
outputimgbands.update({currentfilename:tempimgbands})
outputsegbands.update({currentfilename:tempsmall})
changeoutputimg(currentfilename,'1')
def Open_Multifile():
global extractbutton,outputbutton
if proc_mode[proc_name].get()=='1':
batchprocess.Open_batchfolder()
extractbutton.config(state=NORMAL)
outputbutton.config(state=NORMAL)
return
# else:
# extractbutton.config(state=DISABLED)
global Multiimage,Multigray,Multitype,Multiimagebands,changefileframe,imageframe,Multigraybands,filenames
global changefiledrop,filedropvar,originbandarray,displaybandarray,clusterdisplay,currentfilename,resviewframe
global refsubframe,reseglabels,refbutton,figcanvas,loccanvas,originlabels,changekmeans,refarea
global originlabeldict,convband,panelA
global havecolorstrip
global colordicesband,oldpcachoice
global pccombinebar_up
global displaylabels,displaypclabels
global buttonvar
global colorindicearray
global selarea
MULTIFILES=filedialog.askopenfilenames()
root.update()
if len(MULTIFILES)>0:
Multiimage={}
Multigray={}
Multitype={}
Multiimagebands={}
Multigraybands={}
filenames=[]
originbandarray={}
colorindicearray={}
displaybandarray={}
clusterdisplay={}
oldpcachoice=[]
reseglabels=None
originlabels=None
originlabeldict=None
#changekmeans=True
convband=None
refvar.set('0')
kmeans.set('2')
panelA.delete(ALL)
panelA.unbind('<Button-1>')
panelA.unbind('<Shift-Button-1>')
refarea=None
havecolorstrip=False
displaypclabels=None
buttonvar.set(0)
# if 'NDI' in bandchoice:
# bandchoice['NDI'].set('1')
# if 'NDVI' in bandchoice:
# bandchoice['NDVI'].set('1')
refbutton.config(state=DISABLED)
# selareabutton.configure(state=DISABLED)
selarea.set('0')
figcanvas.delete(ALL)
#loccanvas=None
for widget in refsubframe.winfo_children():
widget.config(state=DISABLED)
#for widget in resviewframe.winfo_children():
# widget.config(state=DISABLED)
if outputbutton is not None:
outputbutton.config(state=DISABLED)
for i in range(len(MULTIFILES)):
if Open_File(MULTIFILES[i])==False:
return
generatedisplayimg(filenames[0])
changedisplayimg(imageframe,'Origin')
# imageframe.update()
# raise NameError
# yield
# thread=threading.Thread(target=singleband,args=(MULTIFILES[i],))
singleband(MULTIFILES[i])
# thread.start()
# thread.join()
for widget in changefileframe.winfo_children():
widget.pack_forget()
currentfilename=filenames[0]
# filedropvar.set(filenames[0])
# changefiledrop=OptionMenu(changefileframe,filedropvar,*filenames,command=partial(changeimage,imageframe))
# changefiledrop.pack()
#singleband(filenames[0])
generatedisplayimg(filenames[0])
# changedisplayimg(imageframe,'Origin')
getPCs()
if len(bandchoice)>0:
for i in range(len(cluster)):
bandchoice[cluster[i]].set('')
#changedisplayimg(imageframe,'Origin')
kmeans.set(1)
#reshapemodified_tif=np.zeros((displaybandarray[currentfilename]['LabOstu'].shape[0]*displaybandarray[currentfilename]['LabOstu'].shape[1],3))
#colordicesband=kmeansclassify(['LabOstu'],reshapemodified_tif)
displaylabels=kmeansclassify()
generateimgplant('')
changedisplayimg(imageframe,'Origin')
# if len(bandchoice)>0:
# bandchoice['LabOstu'].set('1')
global buttondisplay,pcaframe,kmeansbar
for widget in buttondisplay.winfo_children():
widget.config(state=NORMAL)
# for widget in pcaframe.winfo_children():
# for widget in pcselframe.winfo_children():
# widget.config(state=NORMAL)
extractbutton.config(state=NORMAL)
kmeansbar.state(["!disabled"])
pccombinebar_up.state(["!disabled"])
def fillpartialbands(vector,vectorindex,band,filter_vector):
nonzero=np.where(filter_vector!=0)
vector[nonzero,vectorindex]=vector[nonzero,vectorindex]+band
def fillbands(originbands,displaybands,vector,vectorindex,name,band,filter=0):
tempdict={name:band}
if isinstance(filter,int):
if name not in originbands:
originbands.update(tempdict)
image=cv2.resize(band,(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)
displaydict={name:image}
displaybands.update(displaydict)
fea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0]
vector[:,vectorindex]=vector[:,vectorindex]+fea_bands
else:
if name not in originbands:
originbands.update(tempdict)
image=cv2.resize(band,(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)
image=np.multiply(image,filter)
displaydict={name:image}
displaybands.update(displaydict)
fea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0]
vector[:,vectorindex]=vector[:,vectorindex]+fea_bands
return
def plot3d(pcas):
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
import matplotlib.pyplot as plt
fig=plt.figure()
ax=fig.add_subplot(111,projection='3d')
x=pcas[:,0]
y=pcas[:,1]
z=pcas[:,2]*0+np.min(pcas[:,2])
ax.scatter(x,y,z,color='tab:purple')
x=pcas[:,0]*0+np.min(pcas[:,0])
y=pcas[:,1]
z=pcas[:,2]
ax.scatter(x,y,z,color='tab:pink')
x=pcas[:,0]
y=pcas[:,1]*0+np.max(pcas[:,1])
z=pcas[:,2]
ax.scatter(x,y,z,color='tab:olive')
ax.set_xlabel('Color Indices PC1')
ax.set_ylabel('Color Indices PC2')
ax.set_zlabel('Color Indices PC3')
# plt.show()
plt.savefig('3dplot_PC.png')
def partialoneband(filter):
global displaybandarray,originpcabands
global pcbuttons
global nonzero_vector,partialpca
partialpca=True
bands=Multiimagebands[currentfilename].bands
channel,fea_l,fea_w=bands.shape
nonzero=np.where(filter!=0)
RGB_vector=np.zeros((displayfea_l*displayfea_w,3))
colorindex_vector=np.zeros((displayfea_l*displayfea_w,12))
filter_vector=filter.reshape((displayfea_l*displayfea_w),1)[:,0]
originbands={}
displays={}
Red=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero]
Green=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero]
# Red=cv2.adaptiveThreshold(Red,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)
# Green=cv2.adaptiveThreshold(Green,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
Blue=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero]
# Blue=cv2.threshold(Blue,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
fillpartialbands(RGB_vector,0,Red,filter_vector)
fillpartialbands(RGB_vector,1,Green,filter_vector)
fillpartialbands(RGB_vector,2,Blue,filter_vector)
PAT_R=Red
PAT_G=Red
PAT_B=Red
ROO_R=Red
ROO_G=Red
ROO_B=Red
DIF_R=Red
DIF_G=Red
DIF_B=Red
GLD_R=Red
GLD_G=Red
GLD_B=Red
fillpartialbands(colorindex_vector,0,PAT_R,filter_vector)
fillpartialbands(colorindex_vector,1,PAT_G,filter_vector)
fillpartialbands(colorindex_vector,2,PAT_B,filter_vector)
fillpartialbands(colorindex_vector,3,ROO_R,filter_vector)
fillpartialbands(colorindex_vector,4,ROO_G,filter_vector)
fillpartialbands(colorindex_vector,5,ROO_B,filter_vector)
fillpartialbands(colorindex_vector,6,DIF_R,filter_vector)
fillpartialbands(colorindex_vector,7,DIF_G,filter_vector)
fillpartialbands(colorindex_vector,8,DIF_B,filter_vector)
fillpartialbands(colorindex_vector,9,GLD_R,filter_vector)
fillpartialbands(colorindex_vector,10,GLD_G,filter_vector)
fillpartialbands(colorindex_vector,11,GLD_B,filter_vector)
nonzero_vector=np.where(filter_vector!=0)
displayfea_vector=np.concatenate((RGB_vector,colorindex_vector),axis=1)
featurechannel=14
# np.savetxt('color-index.csv',displayfea_vector,delimiter=',',fmt='%10.5f')
# displayfea_vector=np.concatenate((RGB_vector,colorindex_vector),axis=1)
originpcabands.update({currentfilename:displayfea_vector})
pcabandsdisplay=displayfea_vector[:,:14]
pcabandsdisplay=pcabandsdisplay.reshape(displayfea_l,displayfea_w,featurechannel)
tempdictdisplay={'LabOstu':pcabandsdisplay}
displaybandarray.update({currentfilename:tempdictdisplay})
# originbandarray.update({currentfilename:originbands})
# Red=displays['Band1']
# Green=displays['Band2']
# Blue=displays['Band3']
# convimg=np.zeros((Red.shape[0],Red.shape[1],3))
# convimg[:,:,0]=Red
# convimg[:,:,1]=Green
# convimg[:,:,2]=Blue
# convimg=Image.fromarray(convimg.astype('uint8'))
# convimg.save('convimg.png','PNG')
pcbuttons=[]
need_w=int(450/3)
need_h=int(400/4)
for i in range(2,3):
band=np.copy(pcabandsdisplay[:,:,i])
# imgband=(band-band.min())*255/(band.max()-band.min())
imgband=np.copy(band)
pcimg=Image.fromarray(imgband.astype('uint8'),'L')
# pcimg.save('pc'+'_'+str(i)+'.png',"PNG")
pcimg.thumbnail((need_w,need_h),Image.ANTIALIAS)
# pcimg.save('pc'+'_'+str(i)+'.png',"PNG")
# ratio=max(displayfea_l/need_h,displayfea_w/need_w)
# print('origin band range',band.max(),band.min())
# # band,cache=tkintercorestat.pool_forward(band,{"f":int(ratio),"stride":int(ratio)})
# band=cv2.resize(band,(need_w,need_h),interpolation=cv2.INTER_LINEAR)
# bandrange=band.max()-band.min()
# print('band range',band.max(),band.min())
# band=(band-band.min())/bandrange*255
# print('button img range',band.max(),band.min())
# buttonimg=Image.fromarray(band.astype('uint8'),'L')
pcbuttons.append(ImageTk.PhotoImage(pcimg))
def partialsingleband(filter):
global displaybandarray,originpcabands
global pcbuttons
global nonzero_vector,partialpca
partialpca=True
bands=Multiimagebands[currentfilename].bands
channel,fea_l,fea_w=bands.shape
nonzero=np.where(filter!=0)
RGB_vector=np.zeros((displayfea_l*displayfea_w,3))
colorindex_vector=np.zeros((displayfea_l*displayfea_w,12))
filter_vector=filter.reshape((displayfea_l*displayfea_w),1)[:,0]
originbands={}
displays={}
if channel==1:
# Red=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero]
# Green=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero]
# Blue=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero]
# fillpartialbands(RGB_vector,0,Red,filter_vector)
# fillpartialbands(RGB_vector,1,Green,filter_vector)
# fillpartialbands(RGB_vector,2,Blue,filter_vector)
partialoneband(filter)
return
else:
Red=cv2.resize(bands[0,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero]
Green=cv2.resize(bands[1,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero]
Blue=cv2.resize(bands[2,:,:],(displayfea_w,displayfea_l),interpolation=cv2.INTER_LINEAR)[nonzero]
fillpartialbands(RGB_vector,0,Red,filter_vector)
fillpartialbands(RGB_vector,1,Green,filter_vector)
fillpartialbands(RGB_vector,2,Blue,filter_vector)
PAT_R=Red/(Red+Green)
PAT_G=Green/(Green+Blue)
PAT_B=Blue/(Blue+Red)
ROO_R=Red/Green
ROO_G=Green/Blue
ROO_B=Blue/Red
DIF_R=2*Red-Green-Blue
DIF_G=2*Green-Blue-Red
DIF_B=2*Blue-Red-Green
GLD_R=Red/(np.multiply(np.power(Blue,0.618),np.power(Green,0.382)))
GLD_G=Green/(np.multiply(np.power(Blue,0.618),np.power(Red,0.382)))
GLD_B=Blue/(np.multiply(np.power(Green,0.618),np.power(Red,0.382)))
fillpartialbands(colorindex_vector,0,PAT_R,filter_vector)
fillpartialbands(colorindex_vector,1,PAT_G,filter_vector)
fillpartialbands(colorindex_vector,2,PAT_B,filter_vector)
fillpartialbands(colorindex_vector,3,ROO_R,filter_vector)
fillpartialbands(colorindex_vector,4,ROO_G,filter_vector)
fillpartialbands(colorindex_vector,5,ROO_B,filter_vector)
fillpartialbands(colorindex_vector,6,DIF_R,filter_vector)
fillpartialbands(colorindex_vector,7,DIF_G,filter_vector)
fillpartialbands(colorindex_vector,8,DIF_B,filter_vector)
fillpartialbands(colorindex_vector,9,GLD_R,filter_vector)
fillpartialbands(colorindex_vector,10,GLD_G,filter_vector)
fillpartialbands(colorindex_vector,11,GLD_B,filter_vector)
for i in range(12):
perc=np.percentile(colorindex_vector[:,i],1)
print('perc',perc)
colorindex_vector[:,i]=np.where(colorindex_vector[:,i]<perc,perc,colorindex_vector[:,i])
perc=np.percentile(colorindex_vector[:,i],99)
print('perc',perc)
colorindex_vector[:,i]=np.where(colorindex_vector[:,i]>perc,perc,colorindex_vector[:,i])
for i in range(3):
perc=np.percentile(RGB_vector[:,i],1)
print('perc',perc)
RGB_vector[:,i]=np.where(RGB_vector[:,i]<perc,perc,RGB_vector[:,i])
perc=np.percentile(RGB_vector[:,i],99)
print('perc',perc)
RGB_vector[:,i]=np.where(RGB_vector[:,i]>perc,perc,RGB_vector[:,i])
nonzero_vector=np.where(filter_vector!=0)
rgb_M=np.mean(RGB_vector[nonzero_vector,:].T,axis=1)
colorindex_M=np.mean(colorindex_vector[nonzero_vector,:].T,axis=1)
print('rgb_M',rgb_M,'colorindex_M',colorindex_M)
rgb_C=RGB_vector[nonzero_vector,:][0]-rgb_M.T
colorindex_C=colorindex_vector[nonzero_vector,:][0]-colorindex_M.T
rgb_V=np.corrcoef(rgb_C.T)
color_V=np.corrcoef(colorindex_C.T)
nans=np.isnan(color_V)
color_V[nans]=1e-6
rgb_std=rgb_C/(np.std(RGB_vector[nonzero_vector,:].T,axis=1)).T
color_std=colorindex_C/(np.std(colorindex_vector[nonzero_vector,:].T,axis=1)).T
nans=np.isnan(color_std)
color_std[nans]=1e-6
rgb_eigval,rgb_eigvec=np.linalg.eig(rgb_V)
color_eigval,color_eigvec=np.linalg.eig(color_V)
print('rgb_eigvec',rgb_eigvec)
print('color_eigvec',color_eigvec)
featurechannel=12
pcabands=np.zeros((colorindex_vector.shape[0],featurechannel))
rgbbands=np.zeros((colorindex_vector.shape[0],3))
for i in range(0,9):
pcn=color_eigvec[:,i]
pcnbands=np.dot(color_std,pcn)
pcvar=np.var(pcnbands)
print('color index pc',i+1,'var=',pcvar)
pcabands[nonzero_vector,i]=pcabands[nonzero_vector,i]+pcnbands
for i in range(9,12):
pcn=rgb_eigvec[:,i-9]
pcnbands=np.dot(rgb_std,pcn)
pcvar=np.var(pcnbands)
print('rgb pc',i-9+1,'var=',pcvar)
pcabands[nonzero_vector,i]=pcabands[nonzero_vector,i]+pcnbands
rgbbands[nonzero_vector,i-9]=rgbbands[nonzero_vector,i-9]+pcnbands
# plot3d(pcabands)
# np.savetxt('rgb.csv',rgbbands,delimiter=',',fmt='%10.5f')
# pcabands[:,1]=np.copy(pcabands[:,1])
# pcabands[:,2]=pcabands[:,2]*0
# indexbands=np.zeros((colorindex_vector.shape[0],3))
# if i<5:
# indexbands[:,i-2]=indexbands[:,i-2]+pcnbands
for i in range(12):
perc=np.percentile(pcabands[:,i],1)
print('perc',perc)
pcabands[:,i]=np.where(pcabands[:,i]<perc,perc,pcabands[:,i])
perc=np.percentile(pcabands[:,i],99)
print('perc',perc)
pcabands[:,i]=np.where(pcabands[:,i]>perc,perc,pcabands[:,i])
'''save to csv'''
# indexbands[:,0]=indexbands[:,0]+pcabands[:,2]
# indexbands[:,1]=indexbands[:,1]+pcabands[:,3]
# indexbands[:,2]=indexbands[:,2]+pcabands[:,4]
# plot3d(indexbands)
# np.savetxt('pcs.csv',pcabands,delimiter=',',fmt='%10.5f')
displayfea_vector=np.concatenate((RGB_vector,colorindex_vector),axis=1)
# np.savetxt('color-index.csv',displayfea_vector,delimiter=',',fmt='%10.5f')
# displayfea_vector=np.concatenate((RGB_vector,colorindex_vector),axis=1)
originpcabands.update({currentfilename:displayfea_vector})
pcabandsdisplay=pcabands.reshape(displayfea_l,displayfea_w,featurechannel)
tempdictdisplay={'LabOstu':pcabandsdisplay}
displaybandarray.update({currentfilename:tempdictdisplay})
# originbandarray.update({currentfilename:originbands})
# Red=displays['Band1']
# Green=displays['Band2']
# Blue=displays['Band3']
# convimg=np.zeros((Red.shape[0],Red.shape[1],3))
# convimg[:,:,0]=Red
# convimg[:,:,1]=Green
# convimg[:,:,2]=Blue
# convimg=Image.fromarray(convimg.astype('uint8'))
# convimg.save('convimg.png','PNG')
pcbuttons=[]
need_w=int(450/3)
need_h=int(400/4)
for i in range(12):
band=np.copy(pcabandsdisplay[:,:,i])
imgband=(band-band.min())*255/(band.max()-band.min())
pcimg=Image.fromarray(imgband.astype('uint8'),'L')
# pcimg.save('pc'+'_'+str(i)+'.png',"PNG")
pcimg.thumbnail((need_w,need_h),Image.ANTIALIAS)
# pcimg.save('pc'+'_'+str(i)+'.png',"PNG")
# ratio=max(displayfea_l/need_h,displayfea_w/need_w)
# print('origin band range',band.max(),band.min())
# # band,cache=tkintercorestat.pool_forward(band,{"f":int(ratio),"stride":int(ratio)})
# band=cv2.resize(band,(need_w,need_h),interpolation=cv2.INTER_LINEAR)
# bandrange=band.max()-band.min()
# print('band range',band.max(),band.min())
# band=(band-band.min())/bandrange*255
# print('button img range',band.max(),band.min())
# buttonimg=Image.fromarray(band.astype('uint8'),'L')
pcbuttons.append(ImageTk.PhotoImage(pcimg))
def oneband(file):
global displaybandarray,originbandarray,originpcabands,displayfea_l,displayfea_w
global pcbuttons
global partialpca
partialpca=False
try:
bands=Multiimagebands[file].bands
except:
return
pcbuttons=[]
channel,fea_l,fea_w=bands.shape
print('bandsize',fea_l,fea_w)
if fea_l*fea_w>2000*2000:
ratio=findratio([fea_l,fea_w],[2000,2000])
else:
ratio=1
print('ratio',ratio)
originbands={}
displays={}
displaybands=cv2.resize(bands[0,:,:],(int(fea_w/ratio),int(fea_l/ratio)),interpolation=cv2.INTER_LINEAR)
displayfea_l,displayfea_w=displaybands.shape
RGB_vector=np.zeros((displayfea_l*displayfea_w,3))
colorindex_vector=np.zeros((displayfea_l*displayfea_w,12))
Red=bands[0,:,:].astype('uint8')
# _,Red=cv2.threshold(Red,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
Green=bands[0,:,:].astype('uint8')
# _,Green=cv2.threshold(Green,0,255,cv2.THRESH_OTSU)
Blue=bands[0,:,:].astype('uint8')
# _,Blue=cv2.threshold(Blue,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
fillbands(originbands,displays,RGB_vector,0,'Band1',Red)
fillbands(originbands,displays,RGB_vector,1,'Band2',Green)
fillbands(originbands,displays,RGB_vector,2,'Band3',Blue)
PAT_R=bands[0,:,:].astype('uint8')
# PAT_R=cv2.adaptiveThreshold(PAT_R,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
PAT_G=bands[0,:,:]
# PAT_G=cv2.adaptiveThreshold(PAT_G,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)
PAT_B=bands[0,:,:]
ROO_R=bands[0,:,:]
ROO_G=bands[0,:,:]
ROO_B=bands[0,:,:]
DIF_R=bands[0,:,:]
DIF_G=bands[0,:,:]
DIF_B=bands[0,:,:]
GLD_R=bands[0,:,:]
GLD_G=bands[0,:,:]
GLD_B=bands[0,:,:]
fillbands(originbands,displays,colorindex_vector,0,'PAT_R',PAT_R)
fillbands(originbands,displays,colorindex_vector,1,'PAT_G',PAT_G)
fillbands(originbands,displays,colorindex_vector,2,'PAT_B',PAT_B)
fillbands(originbands,displays,colorindex_vector,3,'ROO_R',ROO_R)
fillbands(originbands,displays,colorindex_vector,4,'ROO_G',ROO_G)
fillbands(originbands,displays,colorindex_vector,5,'ROO_B',ROO_B)
fillbands(originbands,displays,colorindex_vector,6,'DIF_R',DIF_R)
fillbands(originbands,displays,colorindex_vector,7,'DIF_G',DIF_G)
fillbands(originbands,displays,colorindex_vector,8,'DIF_B',DIF_B)
fillbands(originbands,displays,colorindex_vector,9,'GLD_R',GLD_R)
fillbands(originbands,displays,colorindex_vector,10,'GLD_G',GLD_G)
fillbands(originbands,displays,colorindex_vector,11,'GLD_B',GLD_B)
displayfea_vector=np.concatenate((RGB_vector,colorindex_vector),axis=1)
# np.savetxt('color-index.csv',displayfea_vector,delimiter=',',fmt='%10.5f')
featurechannel=14
originpcabands.update({file:displayfea_vector})
# pcabandsdisplay=pcabands.reshape(displayfea_l,displayfea_w,featurechannel)
# pcabandsdisplay=np.concatenate((RGB_vector,colorindex_vector),axis=2)
pcabandsdisplay=displayfea_vector[:,:14]
pcabandsdisplay=pcabandsdisplay.reshape(displayfea_l,displayfea_w,featurechannel)
tempdictdisplay={'LabOstu':pcabandsdisplay}
displaybandarray.update({file:tempdictdisplay})
originbandarray.update({file:originbands})
# Red=displays['Band1']
# Green=displays['Band2']
# Blue=displays['Band3']
# convimg=np.zeros((Red.shape[0],Red.shape[1],3))
# convimg[:,:,0]=Red
# convimg[:,:,1]=Green
# convimg[:,:,2]=Blue
# convimg=Image.fromarray(convimg.astype('uint8'))
# convimg.save('convimg.png','PNG')
need_w=int(450/3)
need_h=int(400/4)
for i in range(2,3):
band=np.copy(pcabandsdisplay[:,:,i])
# band=np.copy(Red)
# imgband=(band-band.min())*255/(band.max()-band.min())
imgband=np.copy(band)
pcimg=Image.fromarray(imgband.astype('uint8'),'L')
# pcimg.save('pc'+'_'+str(i)+'.png',"PNG")
pcimg.thumbnail((need_w,need_h),Image.ANTIALIAS)
# pcimg.save('pc'+'_'+str(i)+'.png',"PNG")
# ratio=max(displayfea_l/need_h,displayfea_w/need_w)
# print('origin band range',band.max(),band.min())
# # band,cache=tkintercorestat.pool_forward(band,{"f":int(ratio),"stride":int(ratio)})
# band=cv2.resize(band,(need_w,need_h),interpolation=cv2.INTER_LINEAR)
# bandrange=band.max()-band.min()
# print('band range',band.max(),band.min())
# band=(band-band.min())/bandrange*255
# print('button img range',band.max(),band.min())
# buttonimg=Image.fromarray(band.astype('uint8'),'L')
pcbuttons.append(ImageTk.PhotoImage(pcimg))
def singleband(file):
global displaybandarray,originbandarray,originpcabands,displayfea_l,displayfea_w
global pcbuttons
global partialpca
partialpca=False
try:
bands=Multiimagebands[file].bands
except:
return
pcbuttons=[]
channel,fea_l,fea_w=bands.shape
print('bandsize',fea_l,fea_w)
if fea_l*fea_w>2000*2000:
ratio=findratio([fea_l,fea_w],[2000,2000])
else:
ratio=1
print('ratio',ratio)
originbands={}
displays={}
displaybands=cv2.resize(bands[0,:,:],(int(fea_w/ratio),int(fea_l/ratio)),interpolation=cv2.INTER_LINEAR)
# displaybands=np.copy(bands[0,:,:])
displayfea_l,displayfea_w=displaybands.shape
# displayfea_l,displayfea_w=fea_l,fea_w
print(displayfea_l,displayfea_w)
RGB_vector=np.zeros((displayfea_l*displayfea_w,3))
colorindex_vector=np.zeros((displayfea_l*displayfea_w,12))
if channel==1:
# Red=bands[0,:,:]
# Green=bands[0,:,:]
# Blue=bands[0,:,:]
oneband(file)
return
else:
Red=bands[0,:,:]
Green=bands[1,:,:]
Blue=bands[2,:,:]
fillbands(originbands,displays,RGB_vector,0,'Band1',Red)
fillbands(originbands,displays,RGB_vector,1,'Band2',Green)
fillbands(originbands,displays,RGB_vector,2,'Band3',Blue)
# import matplotlib.pyplot as plt
# fig,axs=plt.subplots(1,3)
# for i in range(3):
# minpc2=np.min(RGB_vector[:,i])
# maxpc2=np.max(RGB_vector[:,i])
# print(minpc2,maxpc2)
# bins=range(int(minpc2),int(maxpc2),10)
# axs[i].hist(RGB_vector[:,i],bins,range=(minpc2,maxpc2))
# axs[i].set_title('RGBband_'+str(i+1))
# # plt.hist(pcabands[:,13],bins,range=(minpc2,maxpc2))
# plt.show()
# secondsmallest_R=np.partition(Red,1)[1][0]
# secondsmallest_G=np.partition(Green,1)[1][0]
# secondsmallest_B=np.partition(Blue,1)[1][0]
#
# Red=Red+secondsmallest_R
# Green=Green+secondsmallest_G
# Blue=Blue+secondsmallest_B
# Red=Red/255+1
# Green=Green/255+1
# Blue=Blue/255+1
PAT_R=Red/(Red+Green)
PAT_G=Green/(Green+Blue)
PAT_B=Blue/(Blue+Red)
ROO_R=Red/(Green+1e-6)
ROO_G=Green/(Blue+1e-6)
ROO_B=Blue/(Red+1e-6)
DIF_R=2*Red-Green-Blue
DIF_G=2*Green-Blue-Red
DIF_B=2*Blue-Red-Green
GLD_R=Red/(np.multiply(np.power(Blue,0.618),np.power(Green,0.382))+1e-6)
GLD_G=Green/(np.multiply(np.power(Blue,0.618),np.power(Red,0.382))+1e-6)
GLD_B=Blue/(np.multiply(np.power(Green,0.618),np.power(Red,0.382))+1e-6)
fillbands(originbands,displays,colorindex_vector,0,'PAT_R',PAT_R)
fillbands(originbands,displays,colorindex_vector,1,'PAT_G',PAT_G)
fillbands(originbands,displays,colorindex_vector,2,'PAT_B',PAT_B)
fillbands(originbands,displays,colorindex_vector,3,'ROO_R',ROO_R)
fillbands(originbands,displays,colorindex_vector,4,'ROO_G',ROO_G)
fillbands(originbands,displays,colorindex_vector,5,'ROO_B',ROO_B)
fillbands(originbands,displays,colorindex_vector,6,'DIF_R',DIF_R)
fillbands(originbands,displays,colorindex_vector,7,'DIF_G',DIF_G)
fillbands(originbands,displays,colorindex_vector,8,'DIF_B',DIF_B)
fillbands(originbands,displays,colorindex_vector,9,'GLD_R',GLD_R)
fillbands(originbands,displays,colorindex_vector,10,'GLD_G',GLD_G)
fillbands(originbands,displays,colorindex_vector,11,'GLD_B',GLD_B)
# for i in [5,11]:
# colorindex_vector[:,i]=np.log10(colorindex_vector[:,i])
# perc=np.percentile(colorindex_vector[:,i],99)
# print('perc',perc)
# colorindex_vector[:,i]=np.where(colorindex_vector[:,i]>perc,perc,colorindex_vector[:,i])
#
# for i in [0,1,3,4,9,10]:
# colorindex_vector[:,i]=np.log10(colorindex_vector[:,i])
# perc=np.percentile(colorindex_vector[:,i],90)
# print('perc',perc)
# colorindex_vector[:,i]=np.where(colorindex_vector[:,i]>perc,perc,colorindex_vector[:,i])
# for i in [5,11]:
# colorindex_vector[:,i]=np.log10(colorindex_vector[:,i])
# perc=np.percentile(colorindex_vector[:,i],99)
# print('perc',perc)
# colorindex_vector[:,i]=np.where(colorindex_vector[:,i]>perc,perc,colorindex_vector[:,i])
#
# for i in [3,4,9,10]:
# colorindex_vector[:,i]=np.log10(colorindex_vector[:,i])
# perc=np.percentile(colorindex_vector[:,i],1)
# print('perc',perc)
# colorindex_vector[:,i]=np.where(colorindex_vector[:,i]<perc,perc,colorindex_vector[:,i])
# perc=np.percentile(colorindex_vector[:,i],99)
# print('perc',perc)
# colorindex_vector[:,i]=np.where(colorindex_vector[:,i]>perc,perc,colorindex_vector[:,i])
#
# for i in [0,1]:
# colorindex_vector[:,i]=np.log10(colorindex_vector[:,i])
# perc=np.percentile(colorindex_vector[:,i],2)
# print('perc',perc)
# colorindex_vector[:,i]=np.where(colorindex_vector[:,i]<perc,perc,colorindex_vector[:,i])
# for i in [0,1,3,4,9,10]:
# colorindex_vector[:,i]=np.log10(colorindex_vector[:,i])
for i in range(12):
perc=np.percentile(colorindex_vector[:,i],1)
print('perc',perc)
colorindex_vector[:,i]=np.where(colorindex_vector[:,i]<perc,perc,colorindex_vector[:,i])
perc=np.percentile(colorindex_vector[:,i],99)
print('perc',perc)
colorindex_vector[:,i]=np.where(colorindex_vector[:,i]>perc,perc,colorindex_vector[:,i])
for i in range(3):
perc=np.percentile(RGB_vector[:,i],1)
print('perc',perc)
RGB_vector[:,i]=np.where(RGB_vector[:,i]<perc,perc,RGB_vector[:,i])
perc=np.percentile(RGB_vector[:,i],99)
print('perc',perc)
RGB_vector[:,i]=np.where(RGB_vector[:,i]>perc,perc,RGB_vector[:,i])
# import matplotlib.pyplot as plt
# fig,axs=plt.subplots(4,3)
# for i in range(12):
# minpc2=np.min(colorindex_vector[:,i])
# maxpc2=np.max(colorindex_vector[:,i])
# print(minpc2,maxpc2)
# # bins=range(int(minpc2),int(maxpc2)+1,10)
# axs[int(i/3),i%3].hist(colorindex_vector[:,i],10,range=(minpc2,maxpc2))
# axs[int(i/3),i%3].set_title('Colorindex_'+str(i+1))
# # axs[i].hist(colorindex_vector[:,i],10,range=(minpc2,maxpc2))
# # axs[i].set_title('Colorindex_'+str(i+1))
# # plt.hist(pcabands[:,13],bins,range=(minpc2,maxpc2))
# plt.show()
rgb_M=np.mean(RGB_vector.T,axis=1)
colorindex_M=np.mean(colorindex_vector.T,axis=1)
print('rgb_M',rgb_M,'colorindex_M',colorindex_M)
rgb_C=RGB_vector-rgb_M
colorindex_C=colorindex_vector-colorindex_M
rgb_V=np.corrcoef(rgb_C.T)
color_V=np.corrcoef(colorindex_C.T)
nans=np.isnan(color_V)
color_V[nans]=1e-6
rgb_std=rgb_C/np.std(RGB_vector.T,axis=1)
color_std=colorindex_C/np.std(colorindex_vector.T,axis=1)
nans=np.isnan(color_std)
color_std[nans]=1e-6
rgb_eigval,rgb_eigvec=np.linalg.eig(rgb_V)
color_eigval,color_eigvec=np.linalg.eig(color_V)
print('rgb_eigvec',rgb_eigvec)
print('color_eigvec',color_eigvec)
featurechannel=12
pcabands=np.zeros((colorindex_vector.shape[0],featurechannel))
rgbbands=np.zeros((colorindex_vector.shape[0],3))
# plot3d(pcabands)
# np.savetxt('rgb.csv',rgbbands,delimiter=',',fmt='%10.5f')
# pcabands[:,1]=np.copy(pcabands[:,1])
# pcabands[:,2]=pcabands[:,2]*0
indexbands=np.zeros((colorindex_vector.shape[0],3))
# for i in range(3,featurechannel):
# csvpcabands=np.zeros((colorindex_vector.shape[0],15))
for i in range(0,9):
pcn=color_eigvec[:,i]
pcnbands=np.dot(color_std,pcn)
pcvar=np.var(pcnbands)
print('color index pc',i+1,'var=',pcvar)
pcabands[:,i]=pcabands[:,i]+pcnbands
# if i<5:
# indexbands[:,i-2]=indexbands[:,i-2]+pcnbands
for i in range(9,12):
pcn=rgb_eigvec[:,i-9]
pcnbands=np.dot(rgb_std,pcn)
pcvar=np.var(pcnbands)
print('rgb pc',i+1,'var=',pcvar)
pcabands[:,i]=pcabands[:,i]+pcnbands
rgbbands[:,i-9]=rgbbands[:,i-9]+pcnbands
# for i in range(0,12):
# pcn=color_eigvec[:,i]
# pcnbands=np.dot(color_std,pcn)
# pcvar=np.var(pcnbands)
# print('csv color index pc',i+1,'var=',pcvar)
# csvpcabands[:,i]=csvpcabands[:,i]+pcnbands
# for i in range(12,15):
# pcn=rgb_eigvec[:,i-12]
# pcnbands=np.dot(rgb_std,pcn)
# csvpcabands[:,i]=csvpcabands[:,i]+pcnbands
#
'''save to csv'''
# indexbands[:,0]=indexbands[:,0]+pcabands[:,2]
# indexbands[:,1]=indexbands[:,1]+pcabands[:,3]
# indexbands[:,2]=indexbands[:,2]+pcabands[:,4]
# plot3d(indexbands)
# np.savetxt('pcs.csv',pcabands,delimiter=',',fmt='%10.5f')
# minpc=np.min(pcabands)
#
# meanpc=np.mean(pcabands)
# stdpc=np.std(pcabands)
# print('meanpc',meanpc,'stdpc',stdpc)
# pcabands=pcabands-meanpc/stdpc
# import matplotlib.pyplot as plt
# minpc2=np.min(pcabands[:,13])
# maxpc2=np.max(pcabands[:,13])
# print(minpc2,maxpc2)
# bins=range(int(minpc2),int(maxpc2),10)
# plt.hist(pcabands[:,13],bins,range=(minpc2,maxpc2))
# plt.show()
# np.savetxt('pcs.csv',pcabands[:,3],delimiter=',',fmt='%10.5f')
for i in range(12):
perc=np.percentile(pcabands[:,i],1)
print('perc',perc)
pcabands[:,i]=np.where(pcabands[:,i]<perc,perc,pcabands[:,i])
perc=np.percentile(pcabands[:,i],99)
print('perc',perc)
pcabands[:,i]=np.where(pcabands[:,i]>perc,perc,pcabands[:,i])
# import matplotlib.pyplot as plt
# fig,axs=plt.subplots(4,3)
# for i in range(2,14):
# minpc2=np.min(pcabands[:,i])
# maxpc2=np.max(pcabands[:,i])
# print(minpc2,maxpc2)
# # bins=range(int(minpc2),int(maxpc2)+1,10)
# axs[int((i-2)/3),(i-2)%3].hist(pcabands[:,i],10,range=(minpc2,maxpc2))
# axs[int((i-2)/3),(i-2)%3].set_title('PC_'+str(i-2+1))
# # axs[i].hist(colorindex_vector[:,i],10,range=(minpc2,maxpc2))
# # axs[i].set_title('Colorindex_'+str(i+1))
# # plt.hist(pcabands[:,13],bins,range=(minpc2,maxpc2))
# plt.show()
# header=['R','G','B',
# 'PAT_R','PAT_G','PAT_B',
# 'DIF_R','DIF_G','DIF_B',
# 'ROO_R','ROO_G','ROO_B',
# 'GLD_R','GLD_G','GLD_B',]
# displayfea_vector=np.concatenate((RGB_vector,colorindex_vector),axis=1)
# with open('color-index.csv','w') as f:
# writer=csv.writer(f)
# writer.writerow(header)
# for i in range(displayfea_vector.shape[0]):
# writer.writerow(list(displayfea_vector[i,:]))
# np.savetxt('color-index.csv',displayfea_vector,delimiter=',',fmt='%10.5f')
displayfea_vector=np.concatenate((RGB_vector,colorindex_vector),axis=1)
originpcabands.update({file:displayfea_vector})
pcabandsdisplay=pcabands.reshape(displayfea_l,displayfea_w,featurechannel)
tempdictdisplay={'LabOstu':pcabandsdisplay}
displaybandarray.update({file:tempdictdisplay})
originbandarray.update({file:originbands})
# Red=displays['Band1']
# Green=displays['Band2']
# Blue=displays['Band3']
# convimg=np.zeros((Red.shape[0],Red.shape[1],3))
# convimg[:,:,0]=Red
# convimg[:,:,1]=Green
# convimg[:,:,2]=Blue
# convimg=Image.fromarray(convimg.astype('uint8'))
# convimg.save('convimg.png','PNG')
need_w=int(450/3)
need_h=int(400/4)
# pcdisplay=[3,4,5,6,7,8,9,10,11,0,1,2]
# for i in range(2,featurechannel):
for i in range(featurechannel):
band=np.copy(pcabandsdisplay[:,:,i])
imgband=(band-band.min())*255/(band.max()-band.min())
pcimg=Image.fromarray(imgband.astype('uint8'),'L')
# pcimg.save('pc'+'_'+str(i)+'.png',"PNG")
pcimg.thumbnail((need_w,need_h),Image.ANTIALIAS)
# pcimg.save('pc'+'_'+str(i)+'.png',"PNG")
# ratio=max(displayfea_l/need_h,displayfea_w/need_w)
# print('origin band range',band.max(),band.min())
# # band,cache=tkintercorestat.pool_forward(band,{"f":int(ratio),"stride":int(ratio)})
# band=cv2.resize(band,(need_w,need_h),interpolation=cv2.INTER_LINEAR)
# bandrange=band.max()-band.min()
# print('band range',band.max(),band.min())
# band=(band-band.min())/bandrange*255
# print('button img range',band.max(),band.min())
# buttonimg=Image.fromarray(band.astype('uint8'),'L')
pcbuttons.append(ImageTk.PhotoImage(pcimg))
def colorindices_cal(file):
global colorindicearray
try:
bands=Multiimagebands[file].bands
except:
return
channel,fea_l,fea_w=bands.shape
print('bandsize',fea_l,fea_w)
if fea_l*fea_w>2000*2000:
ratio=findratio([fea_l,fea_w],[2000,2000])
else:
ratio=1
print('ratio',ratio)
originbands={}
displays={}
# displaybands=cv2.resize(bands[0,:,:],(int(fea_w/ratio),int(fea_l/ratio)),interpolation=cv2.INTER_LINEAR)
# displaybands=np.copy(bands[0,:,:])
# displayfea_l,displayfea_w=displaybands.shape
# displayfea_l,displayfea_w=fea_l,fea_w
print(displayfea_l,displayfea_w)
colorindex_vector=np.zeros((displayfea_l*displayfea_w,7))
if channel==1:
Red=bands[0,:,:]
Green=bands[0,:,:]
Blue=bands[0,:,:]
else:
Red=bands[0,:,:]
Green=bands[1,:,:]
Blue=bands[2,:,:]
secondsmallest_R=np.partition(Red,1)[1][0]
secondsmallest_G=np.partition(Green,1)[1][0]
secondsmallest_B=np.partition(Blue,1)[1][0]
Red=Red+secondsmallest_R
Green=Green+secondsmallest_G
Blue=Blue+secondsmallest_B
NDI=128*((Green-Red)/(Green+Red)+1)
VEG=Green/(np.power(Red,0.667)*np.power(Blue,(1-0.667)))
Greenness=Green/(Green+Red+Blue)
CIVE=0.44*Red+0.811*Green+0.385*Blue+18.7845
MExG=1.262*Green-0.844*Red-0.311*Blue
NDRB=(Red-Blue)/(Red+Blue)
NGRDI=(Green-Red)/(Green+Red)
fillbands(originbands,displays,colorindex_vector,0,'NDI',NDI)
fillbands(originbands,displays,colorindex_vector,1,'VEG',VEG)
fillbands(originbands,displays,colorindex_vector,2,'Greenness',Greenness)
fillbands(originbands,displays,colorindex_vector,3,'CIVE',CIVE)
fillbands(originbands,displays,colorindex_vector,4,'MExG',MExG)
fillbands(originbands,displays,colorindex_vector,5,'NDRB',NDRB)
fillbands(originbands,displays,colorindex_vector,6,'NGRDI',NGRDI)
colorindicearray.update({file:originbands})
def singleband_oldversion(file):
global displaybandarray,originbandarray,originpcabands,displayfea_l,displayfea_w
global pcbuttons
try:
bands=Multigraybands[file].bands
except:
return
pcbuttons=[]
bandsize=Multigraybands[file].size
print('bandsize',bandsize)
try:
channel,height,width=bands.shape
except:
channel=0
if channel>1:
bands=bands[0,:,:]
#bands=cv2.GaussianBlur(bands,(3,3),cv2.BORDER_DEFAULT)
ostu=filters.threshold_otsu(bands)
bands=bands.astype('float32')
bands=bands/ostu
#display purpose
if bandsize[0]*bandsize[1]>2000*2000:
ratio=findratio([bandsize[0],bandsize[1]],[2000,2000])
else:
ratio=1
print('ratio',ratio)
#if bandsize[0]*bandsize[1]>850*850:
# ratio=findratio([bandsize[0],bandsize[1]],[850,850])
#else:
# ratio=1
#ttestbands=np.copy(bands)
#testdisplaybands=cv2.resize(ttestbands,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR)
#testdisplaybands=cv2.resize(testdisplaybands,(int(resizeshape[0]),int(resizeshape[1])),interpolation=cv2.INTER_LINEAR)
#print('testdisplaybands size',testdisplaybands.size)
#if bandsize[0]*bandsize[1]>850*850:
# ratio=findratio([bandsize[0],bandsize[1]],[850,850])
#else:
# ratio=1
originbands={}
displays={}
fea_l,fea_w=bands.shape
# fea_vector=np.zeros((fea_l*fea_w,3))
pyplt.imsave('bands.png',bands)
displaybands=cv2.resize(bands,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR)
pyplt.imsave('displaybands.png',displaybands)
displayfea_l,displayfea_w=displaybands.shape
fea_vector=np.zeros((displayfea_l*displayfea_w,3))
displayfea_vector=np.zeros((displayfea_l*displayfea_w,7))
colorfea_vector=np.zeros((displayfea_l*displayfea_w,7))
# originfea_vector=np.zeros((bandsize[0],bandsize[1],10))
# saveimg=np.copy(bands).astype('uint8')
# pyplt.imsave('ostuimg.png',saveimg)
if 'LabOstu' not in originbands:
originbands.update({'LabOstu':bands})
fea_bands=bands.reshape(fea_l*fea_w,1)[:,0]
# originfea_vector[:,9]=originfea_vector[:,0]+fea_bands
displayfea_bands=displaybands.reshape((displayfea_l*displayfea_w),1)[:,0]
# fea_vector[:,9]=fea_vector[:,0]+fea_bands
displayfea_vector[:,6]=displayfea_vector[:,6]+displayfea_bands
minv=displayfea_bands.min()
maxv=displayfea_bands.max()
fearange=maxv-minv
colorfeabands=displayfea_bands-minv
colorfeabands=colorfeabands/fearange*255
colorfea_vector[:,6]=colorfea_vector[:,6]+colorfeabands
#displaybands=displaybands.reshape((int(bandsize[1]/ratio),int(bandsize[0]/ratio),3))
#kernel=np.ones((2,2),np.float32)/4
#displaybands=np.copy(bands)
displays.update({'LabOstu':displaybands})
#displaybandarray.update({'LabOstu':cv2.filter2D(displaybands,-1,kernel)})
bands=Multiimagebands[file].bands
#for i in range(3):
# bands[i,:,:]=cv2.GaussianBlur(bands[i,:,:],(3,3),cv2.BORDER_DEFAULT)
NDI=128*((bands[1,:,:]-bands[0,:,:])/(bands[1,:,:]+bands[0,:,:])+1)
tempdict={'NDI':NDI}
# saveimg=np.copy(NDI).astype('uint8')
# pyplt.imsave('NDIimg.png',saveimg)
if 'NDI' not in originbands:
originbands.update(tempdict)
displaybands=cv2.resize(NDI,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR)
fea_bands=NDI.reshape(fea_l*fea_w,1)[:,0]
# originfea_vector[:,1]=originfea_vector[:,1]+fea_bands
displayfea_bands=displaybands.reshape((displayfea_l*displayfea_w),1)[:,0]
# fea_vector[:,1]=fea_vector[:,1]+fea_bands
displayfea_vector[:,1]=displayfea_vector[:,1]+displayfea_bands
minv=displayfea_bands.min()
maxv=displayfea_bands.max()
fearange=maxv-minv
colorfeabands=displayfea_bands-minv
colorfeabands=colorfeabands/fearange*255
colorfea_vector[:,1]=colorfea_vector[:,1]+colorfeabands
#displaybands=np.copy(NDI)
#kernel=np.ones((2,2),np.float32)/4
#displaydict={'NDI':cv2.filter2D(displaybands,-1,kernel)}
displaydict={'NDI':displaybands}
#displaydict=displaydict.reshape((int(bandsize[1]/ratio),int(bandsize[0]/ratio),3))
displays.update(displaydict)
Red=bands[0,:,:]
Green=bands[1,:,:]
Blue=bands[2,:,:]
tempdict={'Band1':Red}
# saveimg=np.zeros((bandsize[0],bandsize[1],3),'uint8')
# saveimg[:,:,0]=np.copy(Red).astype('uint8')
# pyplt.imsave('Redimg.png',saveimg)
# saveimg=np.zeros((bandsize[0],bandsize[1],3),'uint8')
# saveimg[:,:,1]=np.copy(Green).astype('uint8')
# pyplt.imsave('Greenimg.png',saveimg)
# saveimg=np.zeros((bandsize[0],bandsize[1],3),'uint8')
# saveimg[:,:,2]=np.copy(Blue).astype('uint8')
# pyplt.imsave('Blueimg.png',saveimg)
if 'Band1' not in originbands:
originbands.update(tempdict)
image=cv2.resize(Red,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR)
displaydict={'Band1':image}
displays.update(displaydict)
# fea_bands=Red.reshape(fea_l*fea_w,1)[:,0]
fea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0]
# originfea_vector[:,2]=originfea_vector[:,2]+fea_bands
displayfea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0]
fea_vector[:,0]=fea_vector[:,0]+fea_bands
# displayfea_vector[:,2]=displayfea_vector[:,2]+displayfea_bands
tempdict={'Band2':Green}
if 'Band2' not in originbands:
originbands.update(tempdict)
image=cv2.resize(Green,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR)
displaydict={'Band2':image}
displays.update(displaydict)
# fea_bands=Green.reshape(fea_l*fea_w,1)[:,0]
fea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0]
# originfea_vector[:,3]=originfea_vector[:,3]+fea_bands
displayfea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0]
fea_vector[:,1]=fea_vector[:,1]+fea_bands
# displayfea_vector[:,3]=displayfea_vector[:,3]+displayfea_bands
tempdict={'Band3':Blue}
if 'Band3' not in originbands:
originbands.update(tempdict)
# originfea_vector[:,4]=originfea_vector[:,4]+Blue
image=cv2.resize(Blue,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR)
displaydict={'Band3':image}
displays.update(displaydict)
# fea_bands=Blue.reshape(fea_l*fea_w,1)[:,0]
fea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0]
displayfea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0]
fea_vector[:,2]=fea_vector[:,2]+fea_bands
# displayfea_vector[:,4]=displayfea_vector[:,4]+displayfea_bands
Greenness = bands[1, :, :] / (bands[0, :, :] + bands[1, :, :] + bands[2, :, :])
tempdict = {'Greenness': Greenness}
if 'Greenness' not in originbands:
originbands.update(tempdict)
# originfea_vector[:,5]=originfea_vector[:,5]+Greenness
image=cv2.resize(Greenness,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR)
#image=image.reshape((int(bandsize[1]/ratio),int(bandsize[0]/ratio),3))
displaydict={'Greenness':image}
#displaybandarray.update(worktempdict)
displays.update(displaydict)
fea_bands=Greenness.reshape(fea_l*fea_w,1)[:,0]
displayfea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0]
# fea_vector[:,5]=fea_vector[:,5]+fea_bands
displayfea_vector[:,2]=displayfea_vector[:,2]+displayfea_bands
minv=displayfea_bands.min()
maxv=displayfea_bands.max()
fearange=maxv-minv
colorfeabands=displayfea_bands-minv
colorfeabands=colorfeabands/fearange*255
colorfea_vector[:,2]=colorfea_vector[:,2]+colorfeabands
VEG=bands[1,:,:]/(np.power(bands[0,:,:],0.667)*np.power(bands[2,:,:],(1-0.667)))
tempdict={'VEG':VEG}
if 'VEG' not in originbands:
originbands.update(tempdict)
# originfea_vector[:,6]=originfea_vector[:,6]+VEG
image=cv2.resize(VEG,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR)
kernel=np.ones((4,4),np.float32)/16
#displaybandarray.update({'LabOstu':})
#image=image.reshape((int(bandsize[1]/ratio),int(bandsize[0]/ratio),3))
worktempdict={'VEG':cv2.filter2D(image,-1,kernel)}
displays.update(worktempdict)
fea_bands=VEG.reshape(fea_l*fea_w,1)[:,0]
displayfea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0]
# fea_vector[:,6]=fea_vector[:,6]+fea_bands
displayfea_vector[:,3]=displayfea_vector[:,3]+displayfea_bands
minv=displayfea_bands.min()
maxv=displayfea_bands.max()
fearange=maxv-minv
colorfeabands=displayfea_bands-minv
colorfeabands=colorfeabands/fearange*255
colorfea_vector[:,3]=colorfea_vector[:,3]+colorfeabands
CIVE=0.441*bands[0,:,:]-0.811*bands[1,:,:]+0.385*bands[2,:,:]+18.78745
tempdict={'CIVE':CIVE}
if 'CIVE' not in originbands:
originbands.update(tempdict)
# originfea_vector[:,7]=originfea_vector[:,7]+CIVE
image=cv2.resize(CIVE,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR)
#image=image.reshape((int(bandsize[1]/ratio),int(bandsize[0]/ratio),3))
worktempdict={'CIVE':image}
displays.update(worktempdict)
fea_bands=CIVE.reshape(fea_l*fea_w,1)[:,0]
displayfea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0]
# fea_vector[:,7]=fea_vector[:,7]+fea_bands
displayfea_vector[:,4]=displayfea_vector[:,4]+displayfea_bands
minv=displayfea_bands.min()
maxv=displayfea_bands.max()
fearange=maxv-minv
colorfeabands=displayfea_bands-minv
colorfeabands=colorfeabands/fearange*255
colorfea_vector[:,4]=colorfea_vector[:,4]+colorfeabands
MExG=1.262*bands[1,:,:]-0.884*bands[0,:,:]-0.311*bands[2,:,:]
tempdict={'MExG':MExG}
if 'MExG' not in originbands:
originbands.update(tempdict)
# originfea_vector[:,8]=originfea_vector[:,8]+MExG
image=cv2.resize(MExG,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR)
#image=image.reshape((int(bandsize[1]/ratio),int(bandsize[0]/ratio),3))
worktempdict={'MExG':image}
displays.update(worktempdict)
fea_bands=MExG.reshape(fea_l*fea_w,1)[:,0]
displayfea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0]
# fea_vector[:,8]=fea_vector[:,8]+fea_bands
displayfea_vector[:,5]=displayfea_vector[:,5]+displayfea_bands
minv=displayfea_bands.min()
maxv=displayfea_bands.max()
fearange=maxv-minv
colorfeabands=displayfea_bands-minv
colorfeabands=colorfeabands/fearange*255
colorfea_vector[:,5]=colorfea_vector[:,5]+colorfeabands
NDVI=(bands[0,:,:]-bands[2,:,:])/(bands[0,:,:]+bands[2,:,:])
tempdict={'NDVI':NDVI}
if 'NDVI' not in originbands:
originbands.update(tempdict)
# originfea_vector[:,0]=originfea_vector[:,9]+NDVI
image=cv2.resize(NDVI,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR)
#image=image.reshape((int(bandsize[1]/ratio),int(bandsize[0]/ratio),3))
worktempdict={'NDVI':image}
displays.update(worktempdict)
fea_bands=NDVI.reshape(fea_l*fea_w,1)[:,0]
displayfea_bands=image.reshape((displayfea_l*displayfea_w),1)[:,0]
# fea_vector[:,0]=fea_vector[:,9]+fea_bands
displayfea_vector[:,0]=displayfea_vector[:,0]+displayfea_bands
minv=displayfea_bands.min()
maxv=displayfea_bands.max()
fearange=maxv-minv
colorfeabands=displayfea_bands-minv
colorfeabands=colorfeabands/fearange*255
colorfea_vector[:,0]=colorfea_vector[:,0]+colorfeabands
NGRDI=(bands[1,:,:]-bands[0,:,:])/(bands[1,:,:]+bands[0,:,:])
tempdict={'NGRDI':NGRDI}
if 'NGRDI' not in originbands:
originbands.update(tempdict)
image=cv2.resize(NGRDI,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR)
#image=image.reshape((int(bandsize[1]/ratio),int(bandsize[0]/ratio),3))
worktempdict={'NGRDI':image}
displays.update(worktempdict)
if channel>=1:
nirbands=Multigraybands[file].bands
NDVI=(nirbands[0,:,:]-bands[1,:,:])/(nirbands[0,:,:]+bands[1,:,:])
tempdict={'NDVI':NDVI}
#if 'NDVI' not in originbandarray:
originbands.update(tempdict)
image=cv2.resize(NDVI,(int(bandsize[1]/ratio),int(bandsize[0]/ratio)),interpolation=cv2.INTER_LINEAR)
#image=image.reshape((int(bandsize[1]/ratio),int(bandsize[0]/ratio),3))
worktempdict={'NDVI':image}
displays.update(worktempdict)
'''PCA part'''
displayfea_vector=np.concatenate((fea_vector,displayfea_vector),axis=1)
M=np.mean(displayfea_vector.T,axis=1)
OM=np.mean(fea_vector.T,axis=1)
print('M',M,'M shape',M.shape, 'OM',OM,'OM Shape',OM.shape)
C=displayfea_vector-M
OC=fea_vector-OM
#max=np.max(C.T,axis=1)
#print('MAX',max)
#C=C/max
print('C',C,'OC',OC)
#V=np.cov(C.T)
V=np.corrcoef(C.T)
OV=np.corrcoef(OC.T)
std=np.std(displayfea_vector.T,axis=1)
O_std=np.std(fea_vector.T,axis=1)
print(std,O_std)
std_displayfea=C/std
O_stddisplayfea=OC/O_std
print(std_displayfea,O_stddisplayfea)
#eigvalues,eigvectors=np.linalg.eig(V)
#n,m=displayfea_vector.shape
#C=np.dot(displayfea_vector.T,displayfea_vector)/(n-1)
V_var=np.cov(std_displayfea.T)
print('COV',V_var)
print('COR',V)
eigvalues=la.eigvals(V_var)
#eigvalues=np.linalg.eigvals(C)
print('eigvalue',eigvalues)
idx=np.argsort(eigvalues)
print('idx',idx)
eigvalues,eigvectors=np.linalg.eig(V)
print('eigvalue',eigvalues)
print('eigvectors',eigvectors)
eigvalueperc={}
featurechannel=10
# for i in range(len(eigvalues)):
# print('percentage',i,eigvalues[i]/sum(eigvalues))
# eigvalueperc.update({i:eigvalues[i]/sum(eigvalues)})
# #if eigvalues[i]>0:
# featurechannel+=1
# o_eigenvalue,o_eigenvector=np.linalg.eig(OV)
pcabands=np.zeros((displayfea_vector.shape[0],featurechannel))
# o_pcabands=np.zeros((fea_vector.shape[0],featurechannel))
pcavar={}
# #
# # # separate PCs
# # for i in range(3):
# # pcn=o_eigenvector[:,i]
# # pcnbands=np.dot(O_stddisplayfea,pcn)
# # pcvar=np.var(pcnbands)
# # print('pc',i+1,' var=',pcvar)
# # pcabands[:,i]=pcabands[:,i]+pcnbands
# # for i in range(7):
# # pcn=eigvectors[:,i]
# # pcnbands=np.dot(std_displayfea,pcn)
# # pcvar=np.var(pcnbands)
# # print('pc',i+1,' var=',pcvar)
# # temppcavar={i:pcvar}
# # pcavar.update(temppcavar)
# # pcabands[:,i+3]=pcabands[:,i+3]+pcnbands
# #
# #
# combined PCs
for i in range(featurechannel):
pcn=eigvectors[:,i]
# pcnbands=np.dot(std_displayfea,pcn)
pcnbands=np.dot(C,pcn)
pcvar=np.var(pcnbands)
print('pc',i+1,' var=',pcvar)
temppcavar={i:pcvar}
pcavar.update(temppcavar)
pcabands[:,i]=pcabands[:,i]+pcnbands
# ''' NO PCA'''
# colorfea_vector=np.concatenate((fea_vector,colorfea_vector),axis=1)
# displayfea_vector=np.concatenate((fea_vector,displayfea_vector),axis=1)
# M=np.mean(colorfea_vector.T,axis=1)
# print('colorfea_vector M',M)
# pcabands=np.copy(colorfea_vector)
# featurechannel=10
'''Export to CSV'''
# np.savetxt('pcs.csv',pcabands,delimiter=',',fmt='%s')
# np.savetxt('color-index.csv',displayfea_vector,delimiter=',',fmt='%s')
#threedplot(pcabands)
# originpcabands.update({file:o_pcabands})
originpcabands.update({file:displayfea_vector})
pcabandsdisplay=pcabands.reshape(displayfea_l,displayfea_w,featurechannel)
#originbands={'LabOstu':pcabandsdisplay}
tempdictdisplay={'LabOstu':pcabandsdisplay}
#displaybandarray.update({file:displays})
displaybandarray.update({file:tempdictdisplay})
originbandarray.update({file:originbands})
need_w=int(450/4)
need_h=int(400/3)
for i in range(featurechannel):
band=np.copy(pcabandsdisplay[:,:,i])
ratio=max(displayfea_l/need_h,displayfea_w/need_w)
band,cache=tkintercorestat.pool_forward(band,{"f":int(ratio),"stride":int(ratio)})
bandrange=band.max()-band.min()
band=(band-band.min())/bandrange*255
buttonimg=Image.fromarray(band.astype('uint8'),'L')
pcbuttons.append(ImageTk.PhotoImage(buttonimg))
# buttonimg.save('pcbutton_'+str(i)+'.png',"PNG")
# print('saved')
from mpl_toolkits.mplot3d import Axes3D
def threedplot(area):
fig=pyplt.figure()
ax=fig.add_subplot(111,projection='3d')
n=100
xs=np.copy(area[0:n,0])
ys=np.copy(area[0:n,1])
zs=np.copy(area[0:n,3])
colors=("red","green","blue")
groups=("PC1","PC2","PC3")
#for c,l in [('r','o'),('g','^')]:
ax.scatter(xs,ys,np.max(zs),c='r',marker='o')
ax.scatter(xs,np.min(ys),zs,c='b',marker='^')
ax.scatter(np.max(xs),ys,zs,c='g')
ax.set_xlabel('PC1')
ax.set_ylabel('PC2')
ax.set_zlabel('PC3')
pyplt.show()
def changeimage(frame,filename):
global clusterdisplay,currentfilename,resviewframe
clusterdisplay={}
currentfilename=filename
print(filename)
generatedisplayimg(filename)
changedisplayimg(frame,'Origin')
for key in cluster:
tuplist=[]
for i in range(len(cluster)):
tuplist.append('')
tup=tuple(tuplist)
bandchoice[key].set(tup)
#for key in cluster:
# ch=ttk.Checkbutton(contentframe,text=key,variable=bandchoice[key],command=changecluster)#,command=partial(autosetclassnumber,clusternumberentry,bandchoice))
# ch.pack()
if filename in multi_results.keys():
for widget in resviewframe.winfo_children():
widget.pack_forget()
iternum=len(list(multi_results[filename][0].keys()))
itervar=IntVar()
itervar.set(iternum)
resscaler=Scale(resviewframe,from_=1,to=iternum,tickinterval=1,length=220,orient=HORIZONTAL,variable=itervar,command=partial(changeoutputimg,filename))
resscaler.pack()
outputbutton=Button(resviewframe,text='Export Results',command=partial(export_result,itervar))
outputbutton.pack()
def generatecheckbox(frame,classnum):
global checkboxdict,havecolorstrip
changekmeansbar('')
for widget in frame.winfo_children():
widget.pack_forget()
checkboxdict={}
havecolorstrip=False
addcolorstrip()
for i in range(10):
dictkey=str(i+1)
tempdict={dictkey:Variable()}
tempdict[dictkey].set('0')
checkboxdict.update(tempdict)
ch=Checkbutton(checkboxframe,text=dictkey,variable=checkboxdict[dictkey],command=partial(changeclusterbox,''))#,command=partial(changecluster,''))
if i+1>int(kmeans.get()):
ch.config(state=DISABLED)
ch.pack(side=LEFT)
#if i==0:
# ch.invoke()
#for i in range(int(classnum)):
# dictkey='class '+str(i+1)
# tempdict={dictkey:Variable()}
# checkboxdict.update(tempdict)
#ch=ttk.Checkbutton(frame,text=dictkey,command=partial(generateplant,checkboxdict,bandchoice,classnum),variable=checkboxdict[dictkey])
# ch=ttk.Checkbutton(frame,text=dictkey,command=changecluster,variable=checkboxdict[dictkey])
# ch.grid(row=int(i/3),column=int(i%3))
# if i==minipixelareaclass:
# ch.invoke()
def generateimgplant(event):
global currentlabels,changekmeans,colordicesband,originbinaryimg,pre_checkbox
colordicesband=np.copy(displaylabels)
keys=checkboxdict.keys()
plantchoice=[]
pre_checkbox=[]
for key in keys:
plantchoice.append(checkboxdict[key].get())
pre_checkbox.append(checkboxdict[key].get())
origindisplaylabels=np.copy(displaybandarray[currentfilename]['LabOstu'])
h,w,c=origindisplaylabels.shape
# tempdisplayimg=np.zeros((displaybandarray[currentfilename]['LabOstu'].shape[0],
# displaybandarray[currentfilename]['LabOstu'].shape[1]))
# colordivimg=np.zeros((displaybandarray[currentfilename]['LabOstu'].shape[0],
# displaybandarray[currentfilename]['LabOstu'].shape[1]))
tempdisplayimg=np.zeros((h,w))
colordivimg=np.zeros((h,w))
sel_count=plantchoice.count('1')
if sel_count == int(kmeans.get()):
tempdisplayimg=tempdisplayimg+1
else:
for i in range(int(kmeans.get())):
tup=plantchoice[i]
if '1' in tup:
tempdisplayimg=np.where(displaylabels==i,1,tempdisplayimg)
# uniquecolor=np.unique(tempdisplayimg)
# if len(uniquecolor)==1 and uniquecolor[0]==1:
# tempdisplayimg=np.copy(displaylabels).astype('float32')
currentlabels=np.copy(tempdisplayimg)
originbinaryimg=np.copy(tempdisplayimg)
tempcolorimg=np.copy(displaylabels).astype('float32')
# ratio=findratio([h,w],[850,850])
# if h*w<850*850:
# tempdisplayimg=cv2.resize(tempdisplayimg,(int(w*ratio),int(h*ratio)))
# colordivimg=cv2.resize(tempcolorimg,(int(w*ratio),int(h*ratio)))
# if h>850:
# ratio=round(h/850)
# tempdisplayimg=cv2.resize(tempdisplayimg,(int(w/ratio),int(h/ratio)))
# colordivimg=cv2.resize(tempcolorimg,(int(w/ratio),int(h/ratio)))
# if w>850:
# ratio=round(w/850)
# tempdisplayimg=cv2.resize(tempdisplayimg,(int(w/ratio),int(h/ratio)))
# colordivimg=cv2.resize(tempcolorimg,(int(w/ratio),int(h/ratio)))
# else:
# tempdisplayimg=cv2.resize(tempdisplayimg,(int(w/ratio),int(h/ratio)))
# colordivimg=cv2.resize(tempcolorimg,(int(w/ratio),int(h/ratio)))
# tempdisplayimg=cv2.resize(tempdisplayimg,(int(resizeshape[0]),int(resizeshape[1])))
# colordivimg=cv2.resize(tempcolorimg,(int(resizeshape[0]),int(resizeshape[1])))
colordivimg=np.copy(tempcolorimg)
binaryimg=np.zeros((h,w,3))
kvar=int(kmeans.get())
locs=np.where(tempdisplayimg==1)
binaryimg[locs]=[240,228,66]
colordeimg=np.zeros((h,w,3))
# binarypreview=cv2.resize(binaryimg,(int(previewshape[0]),int(previewshape[1])))
binarypreview=np.copy(binaryimg)
if kvar==1:
if colordivimg.min()<0:
# if abs(colordivimg.min())<colordivimg.max():
colordivimg=colordivimg-colordivimg.min()
colorrange=colordivimg.max()-colordivimg.min()
colordivimg=colordivimg*255/colorrange
grayimg=Image.fromarray(colordivimg.astype('uint8'),'L')
grayimg=grayimg.resize((int(resizeshape[0]),int(resizeshape[1])))
#grayimg.show()
colordivdict={}
colordivdict.update({'Size':[resizeshape[1],resizeshape[0]]})
colordivdict.update({'Image':ImageTk.PhotoImage(grayimg)})
displayimg['Color Deviation']=colordivdict
colordivpreview={}
# colordivpreimg=cv2.resize(colordivimg,(int(previewshape[0]),int(previewshape[1])))
graypreviewimg=Image.fromarray(colordivimg.astype('uint8'),'L')
graypreviewimg=graypreviewimg.resize((int(previewshape[0]),int(previewshape[1])))
colordivpreview.update({'Size':[previewshape[1],previewshape[0]]})
colordivpreview.update({'Image':ImageTk.PhotoImage(graypreviewimg)})
previewimg['Color Deviation']=colordivpreview
binaryimg=np.zeros((resizeshape[1],resizeshape[0],3))
tempdict={}
tempdict.update({'Size':[resizeshape[1],resizeshape[0]]})
tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(binaryimg.astype('uint8')))})
displayimg['ColorIndices']=tempdict
binarypreview=np.zeros((int(previewshape[1]),int(previewshape[0])))
tempdict={}
tempdict.update({'Size':binarypreview.shape})
tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(binarypreview.astype('uint8')))})
previewimg['ColorIndices']=tempdict
# changedisplayimg(imageframe,'Color Deviation')
else:
for i in range(kvar):
locs=np.where(colordivimg==i)
colordeimg[locs]=colorbandtable[i]
#pyplt.imsave('displayimg.png',tempdisplayimg)
#pyplt.imsave('allcolorindex.png',colordivimg)
#bands=Image.fromarray(tempdisplayimg)
#bands=bands.convert('L')
#bands.save('displayimg.png')
#indimg=cv2.imread('displayimg.png')
colordeimg=Image.fromarray(colordeimg.astype('uint8'))
colordeimg.save('allcolorindex.png',"PNG")
binaryimg=Image.fromarray(binaryimg.astype('uint8'))
binaryimg.save('binaryimg.png',"PNG")
binaryimg=binaryimg.resize((int(resizeshape[0]),int(resizeshape[1])))
tempdict={}
tempdict.update({'Size':[resizeshape[1],resizeshape[0]]})
tempdict.update({'Image':ImageTk.PhotoImage(binaryimg)})
displayimg['ColorIndices']=tempdict
tempdict={}
binaryimg=binaryimg.resize((int(previewshape[0]),int(previewshape[1])))
tempdict.update({'Size':[previewshape[1],previewshape[0]]})
tempdict.update({'Image':ImageTk.PhotoImage(binaryimg)})
previewimg['ColorIndices']=tempdict
#indimg=cv2.imread('allcolorindex.png')
#tempdict.update({'Image':ImageTk.PhotoImage(Image.fromarray(indimg))})
#
# colorimg=cv2.imread('allcolorindex.png')
# Image.fromarray((binaryimg.astype('uint8'))).save('binaryimg.png',"PNG")
colordeimg=colordeimg.resize((resizeshape[0],resizeshape[1]))
colordivdict={}
colordivdict.update({'Size':[resizeshape[1],resizeshape[0]]})
colordivdict.update({'Image':ImageTk.PhotoImage(colordeimg)})
displayimg['Color Deviation']=colordivdict
colordivdict={}
# colordeimgpre=cv2.resize(colordeimg,(int(previewshape[0]),int(previewshape[1])))
colordeimg=colordeimg.resize((previewshape[0],previewshape[1]))
colordivdict.update({'Size':[previewshape[1],previewshape[0]]})
colordivdict.update({'Image':ImageTk.PhotoImage(colordeimg)})
previewimg['Color Deviation']=colordivdict
# changedisplayimg(imageframe,'ColorIndices')
# print('sel count',sel_count)
if kvar>1:
if sel_count==0:
changedisplayimg(imageframe,'Color Deviation')
else:
changedisplayimg(imageframe,'ColorIndices')
# changekmeans=True
#def kmeansclassify(choicelist,reshapedtif):
def kmeansclassify_oldversion():
global clusterdisplay
#,minipixelareaclass
if int(kmeans.get())==0:
return
#for i in range(len(choicelist)):
# tempband=displaybandarray[currentfilename][choicelist[i]]
#tempband=cv2.resize(tempband,(450,450),interpolation=cv2.INTER_LINEAR)
# reshapedtif[:,i]=tempband.reshape(tempband.shape[0]*tempband.shape[1],2)[:,0]
#if len(choicelist)==0:
originpcabands=displaybandarray[currentfilename]['LabOstu']
pcah,pcaw,pcac=originpcabands.shape
pcacount={}
keys=list(pcaboxdict.keys())
for item in keys:
if pcaboxdict[item].get()=='1':
pcacount.update({item:pcaboxdict[item]})
pcakeys=list(pcacount.keys())
tempband=np.zeros((pcah,pcaw,len(pcakeys)))
for i in range(len(pcakeys)):
channel=int(pcakeys[i])-1
tempband[:,:,i]=tempband[:,:,i]+originpcabands[:,:,channel]
if int(kmeans.get())==1:
print('kmeans=1')
displaylabels=np.mean(tempband,axis=2)
pyplt.imsave('k=1.png',displaylabels)
else:
#tempband=displaybandarray[currentfilename]['LabOstu']
if int(kmeans.get())>1:
h,w,c=tempband.shape
print('shape',tempband.shape)
reshapedtif=tempband.reshape(tempband.shape[0]*tempband.shape[1],c)
print('reshape',reshapedtif.shape)
clf=KMeans(n_clusters=int(kmeans.get()),init='k-means++',n_init=10,random_state=0)
tempdisplayimg=clf.fit(reshapedtif)
# print('label=0',np.any(tempdisplayimg==0))
displaylabels=tempdisplayimg.labels_.reshape((displaybandarray[currentfilename]['LabOstu'].shape[0],
displaybandarray[currentfilename]['LabOstu'].shape[1]))
clusterdict={}
displaylabels=displaylabels+10
for i in range(int(kmeans.get())):
locs=np.where(tempdisplayimg.labels_==i)
maxval=reshapedtif[locs].max()
print(maxval)
clusterdict.update({maxval:i+10})
print(clusterdict)
sortcluster=list(sorted(clusterdict))
print(sortcluster)
for i in range(len(sortcluster)):
cluster_num=clusterdict[sortcluster[i]]
displaylabels=np.where(displaylabels==cluster_num,i,displaylabels)
# pixelarea=1.0
# for i in range(int(kmeans.get())):
# pixelloc=np.where(displaylabels==i)
# pixelnum=len(pixelloc[0])
# temparea=float(pixelnum/(displaylabels.shape[0]*displaylabels.shape[1]))
# if temparea<pixelarea:
# #minipixelareaclass=i
# pixelarea=temparea
if kmeans.get() not in clusterdisplay:
tempdict={kmeans.get():displaylabels}
#clusterdisplay.update({''.join(choicelist):tempdict})
clusterdisplay.update(tempdict)
return displaylabels
def kmeansclassify():
global clusterdisplay,displaylabels
if int(kmeans.get())==0:
return
originpcabands=displaybandarray[currentfilename]['LabOstu']
pcah,pcaw,pcac=originpcabands.shape
pcpara=pc_combine_up.get()
print(pcpara,type(pcpara))
tempband=np.zeros((pcah,pcaw,1))
# pcsel=buttonvar.get()+2
pcsel=buttonvar.get()
pcweights=pc_combine_up.get()-0.5
if pcweights==0.0:
tempband[:,:,0]=tempband[:,:,0]+originpcabands[:,:,pcsel]
else:
if pcweights<0.0: #RGBPC1
rgbpc=originpcabands[:,:,9]
else:
rgbpc=originpcabands[:,:,10]
rgbpc=(rgbpc-rgbpc.min())*255/(rgbpc.max()-rgbpc.min())
firstterm=abs(pcweights)*2*rgbpc
colorpc=originpcabands[:,:,pcsel]
colorpc=(colorpc-colorpc.min())*255/(colorpc.max()-colorpc.min())
secondterm=(1-abs(pcweights)*2)*colorpc
tempband[:,:,0]=tempband[:,:,0]+firstterm+secondterm
if int(kmeans.get())==1:
print('kmeans=1')
displaylabels=np.mean(tempband,axis=2)
pyplt.imsave('k=1.png',displaylabels)
else:
if int(kmeans.get())>1:
h,w,c=tempband.shape
print('shape',tempband.shape)
reshapedtif=tempband.reshape(tempband.shape[0]*tempband.shape[1],c)
if partialpca==True:
partialshape=reshapedtif[nonzero_vector]
print('partial reshape',partialshape.shape)
clf=KMeans(n_clusters=int(kmeans.get()),init='k-means++',n_init=10,random_state=0)
tempdisplayimg=clf.fit(partialshape)
reshapedtif[nonzero_vector,0]=np.add(tempdisplayimg.labels_,1)
print(reshapedtif[nonzero_vector])
displaylabels=reshapedtif.reshape((displaybandarray[currentfilename]['LabOstu'].shape[0],
displaybandarray[currentfilename]['LabOstu'].shape[1]))
# reshapedtif=cv2.resize(reshapedtif,(c,resizeshape[0]*resizeshape[1]),cv2.INTER_LINEAR)
clusterdict={}
displaylabels=displaylabels+10
for i in range(int(kmeans.get())):
locs=np.where(tempdisplayimg.labels_==i)
try:
maxval=partialshape[locs].max()
except:
print('kmeans',i)
messagebox.showerror('Cluster maximum value is ', i)
return displaylabels
print(maxval)
clusterdict.update({maxval:i+11})
print(clusterdict)
sortcluster=list(sorted(clusterdict))
print(sortcluster)
for i in range(len(sortcluster)):
cluster_num=clusterdict[sortcluster[i]]
displaylabels=np.where(displaylabels==cluster_num,i,displaylabels)
return displaylabels
else:
print('reshape',reshapedtif.shape)
clf=KMeans(n_clusters=int(kmeans.get()),init='k-means++',n_init=10,random_state=0)
tempdisplayimg=clf.fit(reshapedtif)
# print('label=0',np.any(tempdisplayimg==0))
displaylabels=tempdisplayimg.labels_.reshape((displaybandarray[currentfilename]['LabOstu'].shape[0],
displaybandarray[currentfilename]['LabOstu'].shape[1]))
# displaylabels=tempdisplayimg.labels_.reshape((resizeshape[1],resizeshape[0]))
clusterdict={}
displaylabels=displaylabels+10
for i in range(int(kmeans.get())):
locs=np.where(tempdisplayimg.labels_==i)
maxval=reshapedtif[locs].max()
print(maxval)
clusterdict.update({maxval:i+10})
print(clusterdict)
sortcluster=list(sorted(clusterdict))
print(sortcluster)
for i in range(len(sortcluster)):
cluster_num=clusterdict[sortcluster[i]]
displaylabels=np.where(displaylabels==cluster_num,i,displaylabels)
# if kmeans.get() not in clusterdisplay:
# tempdict={kmeans.get():displaylabels}
# #clusterdisplay.update({''.join(choicelist):tempdict})
# clusterdisplay.update(tempdict)
return displaylabels
def addcolorstrip():
global kmeanscanvasframe,havecolorstrip
if havecolorstrip is False:
colornum=int(kmeans.get())
for widget in kmeanscanvasframe.winfo_children():
widget.pack_forget()
widget.delete(ALL)
widget.config(width=350,height=10)
widget.create_image(3,0,image=colorstripdict['colorstrip'+str(colornum)],anchor=NW)
widget.pack()
havecolorstrip=True
def getPCs():
global displayimg,displaypclabels
originpcabands=displaybandarray[currentfilename]['LabOstu']
pcah,pcaw,pcac=originpcabands.shape
pcweights=pc_combine_up.get()-0.5
tempband=np.zeros((pcah,pcaw))
# pcsel=buttonvar.get()+2
pcsel=buttonvar.get()
if pcweights==0.0:
tempband=tempband+originpcabands[:,:,pcsel]
else:
if pcweights<0.0: #RGBPC1
rgbpc=originpcabands[:,:,9]
else:
rgbpc=originpcabands[:,:,10]
rgbpc=(rgbpc-rgbpc.min())*255/(rgbpc.max()-rgbpc.min())
firstterm=abs(pcweights)*2*rgbpc
colorpc=originpcabands[:,:,pcsel]
colorpc=(colorpc-colorpc.min())*255/(colorpc.max()-colorpc.min())
secondterm=(1-abs(pcweights)*2)*colorpc
tempband=tempband+firstterm+secondterm
displaypclabels=np.copy(tempband)
displaylabels=np.copy(tempband)
pyplt.imsave('k=1.png',displaylabels)
colordivimg=np.copy(displaylabels)
print('origin pc range',colordivimg.max(),colordivimg.min())
# colordivimg=cv2.resize(tempcolorimg,(int(resizeshape[0]),int(resizeshape[1])))
print('pc range',colordivimg.max(),colordivimg.min())
if colordivimg.min()<0:
colordivimg=colordivimg-colordivimg.min()
colorrange=colordivimg.max()-colordivimg.min()
colordivimg=(colordivimg)*255/colorrange
colordivimg=Image.fromarray(colordivimg.astype('uint8'),'L')
colordivimg=colordivimg.resize((int(resizeshape[0]),int(resizeshape[1])),Image.ANTIALIAS)
displayimg['PCs']['Image']=ImageTk.PhotoImage(colordivimg)
# displayimg['Color Deviation']['Image']=ImageTk.PhotoImage(colordivimg)
def getPCs_olcversion():
global displayimg
originpcabands=displaybandarray[currentfilename]['LabOstu']
pcah,pcaw,pcac=originpcabands.shape
pcacount={}
keys=list(pcaboxdict.keys())
for item in keys:
if pcaboxdict[item].get()=='1':
pcacount.update({item:pcaboxdict[item]})
pcakeys=list(pcacount.keys())
tempband=np.zeros((pcah,pcaw,len(pcakeys)))
for i in range(len(pcakeys)):
channel=int(pcakeys[i])-1
tempband[:,:,i]=tempband[:,:,i]+originpcabands[:,:,channel]
# if int(kmeans.get())==1:
print('kmeans=1')
displaylabels=np.mean(tempband,axis=2)
pyplt.imsave('k=1.png',displaylabels)
ratio=findratio([originpcabands.shape[0],originpcabands.shape[1]],[screenstd,screenstd])
tempcolorimg=np.copy(displaylabels)
colordivimg=np.zeros((displaylabels.shape[0],
displaylabels.shape[1]))
# if originpcabands.shape[0]*originpcabands.shape[1]<850*850:
# # tempdisplayimg=cv2.resize(originpcabands,(int(originpcabands.shape[1]*ratio),int(originpcabands.shape[0]*ratio)))
# colordivimg=cv2.resize(tempcolorimg,(int(colordivimg.shape[1]*ratio),int(colordivimg.shape[0]*ratio)))
# else:
# # tempdisplayimg=cv2.resize(originpcabands,(int(originpcabands.shape[1]/ratio),int(originpcabands.shape[0]/ratio)))
# colordivimg=cv2.resize(tempcolorimg,(int(colordivimg.shape[1]/ratio),int(colordivimg.shape[0]/ratio)))
# if colordivimg.min()<0:
# if abs(colordivimg.min())<colordivimg.max():
# colordivimg=colordivimg-colordivimg.min()
colordivimg=cv2.resize(tempcolorimg,(int(resizeshape[0]),int(resizeshape[1])))
if colordivimg.min()<0:
colordivimg=colordivimg-colordivimg.min()
colorrange=colordivimg.max()-colordivimg.min()
colordivimg=colordivimg*255/colorrange
colordivimg=colordivimg.astype('uint8')
grayimg=Image.fromarray(colordivimg,'L')
displayimg['PCs']['Image']=ImageTk.PhotoImage(grayimg)
def changepca(event):
global clusterdisplay,colordicesband,oldpcachoice
global displaylabels
if len(oldpcachoice)>0:
keys=pcaboxdict.keys()
newlist=[]
for key in keys:
newlist.append(pcaboxdict[key].get())
samecount=0
print('oldlist',oldpcachoice)
print('newlist',newlist)
for i in range(len(oldpcachoice)):
if oldpcachoice[i]==newlist[i]:
samecount+=1
if samecount==len(oldpcachoice):
return
getPCs()
clusterdisplay={}
keys=pcaboxdict.keys()
oldpcachoice=[]
for key in keys:
oldpcachoice.append(pcaboxdict[key].get())
displaylabels=kmeansclassify()
colordicesband=np.copy(displaylabels)
generateimgplant()
return
def savePCAimg(path,originfile,file):
originpcabands=displaybandarray[currentfilename]['LabOstu']
pcah,pcaw,pcac=originpcabands.shape
# pcacount={}
# keys=list(pcaboxdict.keys())
# for item in keys:
# if pcaboxdict[item].get()=='1':
# pcacount.update({item:pcaboxdict[item]})
# pcakeys=list(pcacount.keys())
# tempband=np.zeros((pcah,pcaw,len(pcakeys)))
# for i in range(len(pcakeys)):
# channel=int(pcakeys[i])-1
# tempband[:,:,i]=tempband[:,:,i]+originpcabands[:,:,channel]
# displaylabels=np.mean(tempband,axis=2)
# generateimgplant(displaylabels)
# grayimg=(((displaylabels-displaylabels.min())/(displaylabels.max()-displaylabels.min()))*255.9).astype(np.uint8)
# pyplt.imsave('k=1.png',displaylabels.astype('uint8'))
# pyplt.imsave('k=1.png',grayimg)
pcweights=pc_combine_up.get()-0.5
tempband=np.zeros((pcah,pcaw))
# pcsel=buttonvar.get()+2
pcsel=buttonvar.get()
if pcweights==0.0:
tempband=tempband+originpcabands[:,:,pcsel]
else:
if pcweights<0.0: #RGBPC1
rgbpc=originpcabands[:,:,9]
else:
rgbpc=originpcabands[:,:,10]
rgbpc=(rgbpc-rgbpc.min())*255/(rgbpc.max()-rgbpc.min())
firstterm=abs(pcweights)*2*rgbpc
colorpc=originpcabands[:,:,pcsel]
colorpc=(colorpc-colorpc.min())*255/(colorpc.max()-colorpc.min())
secondterm=(1-abs(pcweights)*2)*colorpc
tempband=tempband+firstterm+secondterm
displaylabels=np.copy(tempband)
if displaylabels.min()<0:
# if abs(displaylabels.min())<displaylabels.max():
displaylabels=displaylabels-displaylabels.min()
colorrange=displaylabels.max()-displaylabels.min()
displaylabels=displaylabels*255/colorrange
grayimg=Image.fromarray(displaylabels.astype('uint8'),'L')
originheight,originwidth=Multigraybands[file].size
origingray=grayimg.resize([originwidth,originheight],resample=Image.BILINEAR)
origingray.save(path+'/'+originfile+'-PCAimg.png',"PNG")
# addcolorstrip()
return
def changecluster(event):
global havecolorstrip,pre_checkbox,displaylabels,needreclass
originpcabands=displaybandarray[currentfilename]['LabOstu']
pcah,pcaw,pcac=originpcabands.shape
pcweights=pc_combine_up.get()-0.5
tempband=np.zeros((pcah,pcaw,1))
# pcsel=buttonvar.get()+2
pcsel=buttonvar.get()
if pcweights==0.0:
tempband[:,:,0]=tempband[:,:,0]+originpcabands[:,:,pcsel]
else:
if pcweights<0.0: #RGBPC1
rgbpc=originpcabands[:,:,9]
else:
rgbpc=originpcabands[:,:,10]
rgbpc=(rgbpc-rgbpc.min())*255/(rgbpc.max()-rgbpc.min())
firstterm=abs(pcweights)*2*rgbpc
colorpc=originpcabands[:,:,pcsel]
colorpc=(colorpc-colorpc.min())*255/(colorpc.max()-colorpc.min())
secondterm=(1-abs(pcweights)*2)*colorpc
tempband[:,:,0]=tempband[:,:,0]+firstterm+secondterm
if int(kmeans.get())==1:
displaylabels=np.mean(tempband,axis=2)
generateimgplant(displaylabels)
print('max',displaylabels.max())
print('min',displaylabels.min())
if displaylabels.min()<0:
# if abs(displaylabels.min())<displaylabels.max():
displaylabels=displaylabels-displaylabels.min()
colorrange=displaylabels.max()-displaylabels.min()
displaylabels=displaylabels*255/colorrange
grayimg=Image.fromarray(displaylabels.astype('uint8'),'L')
print('max',displaylabels.max())
print('min',displaylabels.min())
# grayimg.thumbnail((int(resizeshape[0]),int(resizeshape[1])),Image.ANTIALIAS)
grayimg.save('k=1.png',"PNG")
addcolorstrip()
return
else:
# if kmeans.get() in clusterdisplay:
# displaylabels=clusterdisplay[kmeans.get()]
#
# else:
# havecolorstrip=False
# # choicelist=[]
# #reshapemodified_tif=np.zeros((displaybandarray[currentfilename]['LabOstu'].shape[0]*displaybandarray[currentfilename]['LabOstu'].shape[1],len(choicelist)))
# #displaylabels=kmeansclassify(choicelist,reshapemodified_tif)
# displaylabels=kmeansclassify()
displaylabels=kmeansclassify()
# changedisplayimg(imageframe,'Color Deviation')
global checkboxdict
keys=checkboxdict.keys()
for key in keys:
checkboxdict[key].set('0')
generateimgplant('')
# pyplt.imsave('allcolorindex.png',displaylabels)
#kmeanscanvas.update()
addcolorstrip()
return
def changecluster_oldversion(event):
global havecolorstrip,pre_checkbox
imageband=np.copy(displaybandarray[currentfilename]['LabOstu'])
if int(kmeans.get())==1:
originpcabands=displaybandarray[currentfilename]['LabOstu']
pcah,pcaw,pcac=originpcabands.shape
pcacount={}
keys=list(pcaboxdict.keys())
for item in keys:
if pcaboxdict[item].get()=='1':
pcacount.update({item:pcaboxdict[item]})
pcakeys=list(pcacount.keys())
tempband=np.zeros((pcah,pcaw,len(pcakeys)))
for i in range(len(pcakeys)):
channel=int(pcakeys[i])-1
tempband[:,:,i]=tempband[:,:,i]+originpcabands[:,:,channel]
displaylabels=np.mean(tempband,axis=2)
generateimgplant(displaylabels)
# grayimg=(((displaylabels-displaylabels.min())/(displaylabels.max()-displaylabels.min()))*255.9).astype(np.uint8)
# pyplt.imsave('k=1.png',displaylabels.astype('uint8'))
# pyplt.imsave('k=1.png',grayimg)
print('max',displaylabels.max())
print('min',displaylabels.min())
if displaylabels.min()<0:
# if abs(displaylabels.min())<displaylabels.max():
displaylabels=displaylabels-displaylabels.min()
colorrange=displaylabels.max()-displaylabels.min()
displaylabels=displaylabels*255/colorrange
grayimg=Image.fromarray(displaylabels.astype('uint8'),'L')
print('max',displaylabels.max())
print('min',displaylabels.min())
grayimg.save('k=1.png',"PNG")
# originheight,originwidth=Multigraybands[filenames[0]].size
# origingray=grayimg.resize([originwidth,originheight],resample=Image.BILINEAR)
# origingray.save('PCAimg.png',"PNG")
addcolorstrip()
return
else:
if kmeans.get() in clusterdisplay:
displaylabels=clusterdisplay[kmeans.get()]
if len(pre_checkbox)>0:
keys=checkboxdict.keys()
plantchoice=[]
for key in keys:
plantchoice.append(checkboxdict[key].get())
allsame=True
for i in range(len(pre_checkbox)):
if pre_checkbox[i]!=plantchoice[i]:
allsame=False
if allsame==True:
print('allsame=true')
return
else:
havecolorstrip=False
choicelist=[]
#reshapemodified_tif=np.zeros((displaybandarray[currentfilename]['LabOstu'].shape[0]*displaybandarray[currentfilename]['LabOstu'].shape[1],len(choicelist)))
#displaylabels=kmeansclassify(choicelist,reshapemodified_tif)
displaylabels=kmeansclassify()
generateimgplant(displaylabels)
# pyplt.imsave('allcolorindex.png',displaylabels)
#kmeanscanvas.update()
addcolorstrip()
return
def showcounting(tup,number=True,frame=True,header=True,whext=False,blkext=False):
global multi_results,kernersizes#,pixelmmratio,kernersizes
global font
labels=tup[0]
counts=tup[1]
if len(mappath)>0:
colortable=tkintercorestat.get_mapcolortable(labels,elesize.copy(),labellist.copy())
else:
colortable=tup[2]
#colortable=labeldict[itervalue]['colortable']
if type(refarea)!=type(None):
colortable.update({65535:'Ref'})
labels[refarea]=65535
#labeldict=tup[0]
coinparts=tup[3]
filename=tup[4]
#currlabeldict=labeldict['iter'+str(int(itervar)-1)]
#print(currlabeldict)
#labels=currlabeldict['labels']
#counts=currlabeldict['counts']
#colortable=currlabeldict['colortable']
uniquelabels=list(colortable.keys())
originfile,extension=os.path.splitext(filename)
imgrsc=cv2.imread(filename,flags=cv2.IMREAD_ANYCOLOR)
imgrsc=cv2.cvtColor(imgrsc,cv2.COLOR_BGR2RGB)
imgrsc=cv2.resize(imgrsc,(labels.shape[1],labels.shape[0]),interpolation=cv2.INTER_LINEAR)
image=Image.fromarray(imgrsc)
if whext==True:
# blkbkg=np.zeros((labels.shape[0],labels.shape[1],3),dtype='float')
whbkg=np.zeros((labels.shape[0],labels.shape[1],3),dtype='float')
whbkg[:,:,:]=[255,255,255]
itemlocs=np.where(labels!=0)
# blkbkg[itemlocs]=imgrsc[itemlocs]
whbkg[itemlocs]=imgrsc[itemlocs]
image=Image.fromarray(whbkg.astype('uint8'))
if blkext==True:
blkbkg=np.zeros((labels.shape[0],labels.shape[1],3),dtype='float')
itemlocs=np.where(labels!=0)
blkbkg[itemlocs]=imgrsc[itemlocs]
blkbkg[itemlocs]=imgrsc[itemlocs]
image=Image.fromarray(blkbkg.astype('uint8'))
#print('showcounting img',image.size)
#image.save('beforeresize.gif',append_images=[image])
#image=image.resize([labels.shape[1],labels.shape[0]],resample=Image.BILINEAR)
print('showcounting_resize',image.size)
image.save('beforlabel.gif',append_images=[image])
draw=ImageDraw.Draw(image)
#font=ImageFont.load_default()
sizeuniq,sizecounts=np.unique(labels,return_counts=True)
minsize=min(image.size[0],image.size[1])
suggsize=int(minsize**0.5)
# if suggsize>22:
# suggsize=22
# if suggsize<14:
# suggsize=14
#suggsize=8
#print('fontsize',suggsize)
# suggsize=22
font=ImageFont.truetype('cmb10.ttf',size=suggsize)
#if labels.shape[1]<850:
# font=ImageFont.truetype('cmb10.ttf',size=16)
#else:
# font=ImageFont.truetype('cmb10.ttf',size=22)
if len(coinparts)>0:
tempband=np.zeros(labels.shape)
coinkeys=coinparts.keys()
for coin in coinkeys:
coinlocs=coinparts[coin]
tempband[coinlocs]=1
global recborder
for uni in uniquelabels:
if uni!=0:
uni=colortable[uni]
if uni=='Ref':
pixelloc = np.where(labels == 65535)
else:
pixelloc = np.where(labels == uni)
try:
ulx = min(pixelloc[1])
except:
print('no pixellloc[1] on uni=',uni)
print('pixelloc =',pixelloc)
continue
uly = min(pixelloc[0])
rlx = max(pixelloc[1])
rly = max(pixelloc[0])
midx = ulx + int((rlx - ulx) / 2)
midy = uly + int((rly - uly) / 2)
print(ulx, uly, rlx, rly)
if frame==True:
draw.polygon([(ulx,uly),(rlx,uly),(rlx,rly),(ulx,rly)],outline='red')
if number==True:
if uni in colortable:
canvastext = str(colortable[uni])
else:
# canvastext = 'No label'
canvastext=uni
canvastext=str(canvastext)
if imgtypevar.get()=='0':
draw.text((midx-1, midy+1), text=canvastext, font=font, fill='white')
draw.text((midx+1, midy+1), text=canvastext, font=font, fill='white')
draw.text((midx-1, midy-1), text=canvastext, font=font, fill='white')
draw.text((midx+1, midy-1), text=canvastext, font=font, fill='white')
#draw.text((midx,midy),text=canvastext,font=font,fill=(141,2,31,0))
draw.text((midx,midy),text=canvastext,font=font,fill='black')
if header==True:
if refarea is not None:
content='item count:'+str(len(uniquelabels)-1)+'\n File: '+filename
else:
content='item count:'+str(len(uniquelabels))+'\n File: '+filename
contentlength=len(content)+50
#rectext=canvas.create_text(10,10,fill='black',font='Times 16',text=content,anchor=NW)
draw.text((10-1, 10+1), text=content, font=font, fill='white')
draw.text((10+1, 10+1), text=content, font=font, fill='white')
draw.text((10-1, 10-1), text=content, font=font, fill='white')
draw.text((10+1, 10-1), text=content, font=font, fill='white')
#draw.text((10,10),text=content,font=font,fill=(141,2,31,0))
draw.text((10,10),text=content,font=font,fill='black')
#image.save(originfile+'-countresult'+extension,"JPEG")
#firstimg=Multigraybands[currentfilename]
#height,width=firstimg.size
height,width,channel=displaybandarray[filename]['LabOstu'].shape
ratio=findratio([height,width],[screenstd,screenstd])
#if labels.shape[0]*labels.shape[1]<850*850:
# disimage=image.resize([int(labels.shape[1]*ratio),int(labels.shape[0]*ratio)],resample=Image.BILINEAR)
#else:
# disimage=image.resize([int(labels.shape[1]/ratio),int(labels.shape[0]/ratio)],resample=Image.BILINEAR)
print('show counting ratio',ratio)
if height*width<screenstd*screenstd:
print('showcounting small')
disimage=image.resize([int(width*ratio),int(height*ratio)],resample=Image.BILINEAR)
else:
print('showcounting big')
disimage=image.resize([int(width/ratio),int(height/ratio)],resample=Image.BILINEAR)
print('showcounting shape',disimage.size)
displayoutput=ImageTk.PhotoImage(disimage)
disimage.save('output.gif',append_images=[disimage])
#image.save('originoutput.gif',append_images=[image])
return displayoutput,image,disimage
#displayimg['Output']=displayoutput
#changedisplayimg(imageframe,'Output')
#time.sleep(5)
#image.show()
def changeoutputimg(file,intnum):
outputimg=outputimgdict[file]['iter'+str(int(intnum)-1)]
tempdict={}
tempdict.update({'Size':displayimg['ColorIndices']['Size']})
tempdict.update({'Image':outputimg})
displayimg['Output']=tempdict
changedisplayimg(imageframe,'Output')
def export_ext(iterver,path,whext=False,blkext=False):
suggsize=8
print('fontsize',suggsize)
smallfont=ImageFont.truetype('cmb10.ttf',size=suggsize)
files=multi_results.keys()
# path=filedialog.askdirectory()
for file in files:
labeldict=multi_results[file][0]
totalitervalue=len(list(labeldict.keys()))
#itervalue='iter'+str(int(iterver.get())-1)
#itervalue='iter'+str(totalitervalue-1)
#itervalue=int(iterver.get())
itervalue='iter'+iterver
print(itervalue)
print(labeldict)
labels=labeldict[itervalue]['labels']
counts=labeldict[itervalue]['counts']
if len(mappath)>0:
colortable=tkintercorestat.get_mapcolortable(labels,elesize.copy(),labellist.copy())
else:
colortable=labeldict[itervalue]['colortable']
#originheight,originwidth=Multigraybands[file].size
#copylabels=np.copy(labels)
#copylabels[refarea]=65535
#labels=cv2.resize(copylabels.astype('float32'),dsize=(originwidth,originheight),interpolation=cv2.INTER_LINEAR)
head_tail=os.path.split(file)
originfile,extension=os.path.splitext(head_tail[1])
if len(path)>0:
tup=(labels,counts,colortable,[],currentfilename)
_band,segimg,small_segimg=showcounting(tup,False,True,True,whext,blkext)
#imageband=outputimgbands[file][itervalue]
imageband=segimg
draw=ImageDraw.Draw(imageband)
uniquelabels=list(colortable.keys())
tempdict={}
if refarea is not None:
specarea=float(sizeentry.get())
pixelmmratio=(specarea/len(refarea[0]))**0.5
else:
pixelmmratio=1.0
#print('coinsize',coinsize.get(),'pixelmmratio',pixelmmratio)
print('pixelmmratio',pixelmmratio)
for uni in uniquelabels:
if uni !=0:
tempuni=colortable[uni]
if tempuni=='Ref':
pixelloc=np.where(labels==65535)
else:
pixelloc = np.where(labels == float(uni))
try:
ulx = min(pixelloc[1])
except:
continue
uly = min(pixelloc[0])
rlx = max(pixelloc[1])
rly = max(pixelloc[0])
print(ulx, uly, rlx, rly)
midx = ulx + int((rlx - ulx) / 2)
midy = uly + int((rly - uly) / 2)
length={}
currborder=tkintercore.get_boundaryloc(labels,uni)
for i in range(len(currborder[0])):
for j in range(i+1,len(currborder[0])):
templength=float(((currborder[0][i]-currborder[0][j])**2+(currborder[1][i]-currborder[1][j])**2)**0.5)
length.update({(i,j):templength})
sortedlength=sorted(length,key=length.get,reverse=True)
try:
topcouple=sortedlength[0]
except:
continue
kernellength=length[topcouple]
i=topcouple[0]
j=topcouple[1]
x0=currborder[1][i]
y0=currborder[0][i]
x1=currborder[1][j]
y1=currborder[0][j]
#slope=float((y0-y1)/(x0-x1))
#linepoints=[(currborder[1][i],currborder[0][i]),(currborder[1][j],currborder[0][j])]
#draw.line(linepoints,fill='yellow')
#points=linepixels(currborder[1][i],currborder[0][i],currborder[1][j],currborder[0][j])
lengthpoints=cal_kernelsize.bresenhamline(x0,y0,x1,y1) #x0,y0,x1,y1
for point in lengthpoints:
if imgtypevar.get()=='0':
draw.point([int(point[0]),int(point[1])],fill='yellow')
# abovecenter=[]
# lowercenter=[]
# for i in range(len(currborder[0])):
# for j in range(len(lengthpoints)):
# if currborder[0][i]<lengthpoints[j][1]:
# lowercenter.append((currborder[1][i],currborder[0][i])) #append(x,y)
# break
# loc=(currborder[1][i],currborder[0][i])
# if loc not in abovecenter and loc not in lowercenter:
# abovecenter.append(loc)
othodict={}
# widthdict={}
for i in range(len(currborder[0])):
for j in range(i+1,len(currborder[0])):
wx0=currborder[1][i]
wy0=currborder[0][i]
wx1=currborder[1][j]
wy1=currborder[0][j]
u1=x1-x0
u2=y1-y0
v1=wx1-wx0
v2=wy1-wy0
otho=abs(u1*v1+u2*v2)/(((u1**2+u2**2)**0.5)*(v1**2+v2**2)**0.5)
wlength=float((wx0-wx1)**2+(wy0-wy1)**2)**0.5
if otho<=0.13:
othodict.update({(wx0,wy0,wx1,wy1):wlength})
sortedwidth=sorted(othodict,key=othodict.get,reverse=True)
try:
topwidth=sortedwidth[0]
except:
continue
widepoints=cal_kernelsize.bresenhamline(topwidth[0],topwidth[1],topwidth[2],topwidth[3])
for point in widepoints:
if imgtypevar.get()=='0':
draw.point([int(point[0]),int(point[1])],fill='black')
width=othodict[topwidth]
print('width',width,'length',kernellength)
print('kernelwidth='+str(width*pixelmmratio))
print('kernellength='+str(kernellength*pixelmmratio))
#print('kernelwidth='+str(kernelwidth*pixelmmratio))
tempdict.update({colortable[uni]:[kernellength,width,pixelmmratio**2*len(pixelloc[0]),kernellength*pixelmmratio,width*pixelmmratio]})
#if uni in colortable:
canvastext = str(colortable[uni])
#else:
# canvastext = uni
if imgtypevar.get()=='0':
draw.text((midx-1, midy+1), text=canvastext, font=smallfont, fill='white')
draw.text((midx+1, midy+1), text=canvastext, font=smallfont, fill='white')
draw.text((midx-1, midy-1), text=canvastext, font=smallfont, fill='white')
draw.text((midx+1, midy-1), text=canvastext, font=smallfont, fill='white')
#draw.text((midx,midy),text=canvastext,font=font,fill=(141,2,31,0))
draw.text((midx,midy),text=canvastext,font=smallfont,fill='black')
#print(event.x, event.y, labels[event.x, event.y], ulx, uly, rlx, rly)
#recborder = canvas.create_rectangle(ulx, uly, rlx, rly, outline='red')
#drawcontents.append(recborder)
kernersizes.update({file:tempdict})
originheight,originwidth=Multigraybands[file].size
image=imageband.resize([originwidth,originheight],resample=Image.BILINEAR)
extcolor=""
if whext==True:
extcolor= "-extwht"
if blkext==True:
extcolor="-extblk"
image.save(path+'/'+originfile+extcolor+'-sizeresult'+'.png',"PNG")
tup=(labels,counts,colortable,[],currentfilename)
_band,segimg,small_segimg=showcounting(tup,False,True,True,whext,blkext)
segimage=segimg.resize([originwidth,originheight],resample=Image.BILINEAR)
segimage.save(path+'/'+originfile+extcolor+'-segmentresult'+'.png',"PNG")
_band,segimg,small_segimg=showcounting(tup,True,True,True,whext,blkext)
segimage=segimg.resize([originwidth,originheight],resample=Image.BILINEAR)
segimage.save(path+'/'+originfile+extcolor+'-labelresult'+'.png',"PNG")
def export_result(iterver):
global batch
if proc_mode[proc_name].get()=='1':
batchprocess.batch_exportpath()
return
suggsize=8
print('fontsize',suggsize)
smallfont=ImageFont.truetype('cmb10.ttf',size=suggsize)
files=multi_results.keys()
path=filedialog.askdirectory()
root.update()
# export_ext(iterver,path,True,False)
# export_ext(iterver,path,False,True)
for file in files:
labeldict=multi_results[file][0]
totalitervalue=len(list(labeldict.keys()))
#itervalue='iter'+str(int(iterver.get())-1)
#itervalue='iter'+str(totalitervalue-1)
#itervalue=int(iterver.get())
itervalue='iter'+iterver
print(itervalue)
print(labeldict)
labels=labeldict[itervalue]['labels']
counts=labeldict[itervalue]['counts']
if len(mappath)>0:
colortable=tkintercorestat.get_mapcolortable(labels,elesize.copy(),labellist.copy())
else:
colortable=labeldict[itervalue]['colortable']
#originheight,originwidth=Multigraybands[file].size
#copylabels=np.copy(labels)
#copylabels[refarea]=65535
#labels=cv2.resize(copylabels.astype('float32'),dsize=(originwidth,originheight),interpolation=cv2.INTER_LINEAR)
head_tail=os.path.split(file)
originfile,extension=os.path.splitext(head_tail[1])
originimg_crop=cv2.imread(file)
uniquelabels=list(colortable.keys())
originheight,originwidth=Multigraybands[file].size
ratio=int(findratio([512,512],[labels.shape[0],labels.shape[1]]))
if labels.shape[0]<512:
cache=(np.zeros((labels.shape[0]*ratio,labels.shape[1]*ratio)),{"f":int(ratio),"stride":int(ratio)})
convband=tkintercorestat.pool_backward(labels,cache)
else:
if labels.shape[0]>512:
convband=cv2.resize(labels,(512,512),interpolation=cv2.INTER_LINEAR)
else:
if labels.shape[0]==512:
convband=np.copy(labels)
locfilename=path+'/'+originfile+'-pixellocs.csv'
#from spectral import imshow, view_cube
'''hyperspectral img process'''
# import spectral.io.envi as envi
lesszeroonefive=[]
with open(locfilename,mode='w') as f:
csvwriter=csv.writer(f)
rowcontent=['id','locs']
csvwriter.writerow(rowcontent)
# result_ref=envi.open(head_tail[0]+'/'+originfile+'/results/REFLECTANCE_'+originfile+'.hdr', head_tail[0]+'/'+originfile+'/results/REFLECTANCE_'+originfile+'.dat')
# result_nparr=np.array(result_ref.load())
# corrected_nparr=np.copy(result_nparr)
for uni in uniquelabels:
if uni!=0:
tempuni=colortable[uni]
if tempuni=='Ref':
pixelloc = np.where(convband == 65535)
else:
pixelloc = np.where(convband == float(uni))
# kernelval=corrected_nparr[pixelloc]
# nirs=np.mean(kernelval,axis=0)
# print('nirs 170',nirs[170])
# if nirs[170]<0.15:
# lesszeroonefive.append(uni)
rowcontent=[colortable[uni]]
rowcontent=rowcontent+list(pixelloc[0])
csvwriter.writerow(rowcontent)
rowcontent=[colortable[uni]]
rowcontent=rowcontent+list(pixelloc[1])
csvwriter.writerow(rowcontent)
f.close()
# print(lesszeroonefive)
'''end'''
if len(path)>0:
tup=(labels,counts,colortable,[],currentfilename)
_band,segimg,small_segimg=showcounting(tup,False)
#imageband=outputimgbands[file][itervalue]
imageband=segimg
draw=ImageDraw.Draw(imageband)
uniquelabels=list(colortable.keys())
tempdict={}
if refarea is not None:
specarea=float(sizeentry.get())
pixelmmratio=(specarea/len(refarea[0]))**0.5
else:
pixelmmratio=1.0
#print('coinsize',coinsize.get(),'pixelmmratio',pixelmmratio)
print('pixelmmratio',pixelmmratio)
for uni in uniquelabels:
if uni !=0:
#uni=colortable[uni]
tempuni=colortable[uni]
if tempuni=='Ref':
pixelloc = np.where(labels == 65535)
else:
pixelloc = np.where(labels == float(uni))
try:
ulx = min(pixelloc[1])
except:
continue
uly = min(pixelloc[0])
rlx = max(pixelloc[1])
rly = max(pixelloc[0])
print(ulx, uly, rlx, rly)
midx = ulx + int((rlx - ulx) / 2)
midy = uly + int((rly - uly) / 2)
length={}
currborder=tkintercore.get_boundaryloc(labels,uni)
for i in range(len(currborder[0])):
for j in range(i+1,len(currborder[0])):
templength=float(((currborder[0][i]-currborder[0][j])**2+(currborder[1][i]-currborder[1][j])**2)**0.5)
length.update({(i,j):templength})
sortedlength=sorted(length,key=length.get,reverse=True)
try:
topcouple=sortedlength[0]
except:
continue
kernellength=length[topcouple]
i=topcouple[0]
j=topcouple[1]
x0=currborder[1][i]
y0=currborder[0][i]
x1=currborder[1][j]
y1=currborder[0][j]
#slope=float((y0-y1)/(x0-x1))
linepoints=[(currborder[1][i],currborder[0][i]),(currborder[1][j],currborder[0][j])]
#draw.line(linepoints,fill='yellow')
#points=linepixels(currborder[1][i],currborder[0][i],currborder[1][j],currborder[0][j])
lengthpoints=cal_kernelsize.bresenhamline(x0,y0,x1,y1) #x0,y0,x1,y1
for point in lengthpoints:
if imgtypevar.get()=='0':
draw.point([int(point[0]),int(point[1])],fill='yellow')
othodict={}
# widthdict={}
for i in range(len(currborder[0])):
for j in range(i+1,len(currborder[0])):
wx0=currborder[1][i]
wy0=currborder[0][i]
wx1=currborder[1][j]
wy1=currborder[0][j]
u1=x1-x0
u2=y1-y0
v1=wx1-wx0
v2=wy1-wy0
otho=abs(u1*v1+u2*v2)/(((u1**2+u2**2)**0.5)*(v1**2+v2**2)**0.5)
wlength=float((wx0-wx1)**2+(wy0-wy1)**2)**0.5
if otho<=0.13:
othodict.update({(wx0,wy0,wx1,wy1):wlength})
sortedwidth=sorted(othodict,key=othodict.get,reverse=True)
try:
topwidth=sortedwidth[0]
except:
continue
widepoints=cal_kernelsize.bresenhamline(topwidth[0],topwidth[1],topwidth[2],topwidth[3])
for point in widepoints:
if imgtypevar.get()=='0':
draw.point([int(point[0]),int(point[1])],fill='black')
width=othodict[topwidth]
print('width',width,'length',kernellength)
print('kernelwidth='+str(width*pixelmmratio))
print('kernellength='+str(kernellength*pixelmmratio))
#print('kernelwidth='+str(kernelwidth*pixelmmratio))
tempdict.update({colortable[uni]:[kernellength,width,pixelmmratio**2*len(pixelloc[0]),kernellength*pixelmmratio,width*pixelmmratio]})
#if uni in colortable:
canvastext = str(colortable[uni])
# else:
# canvastext = 'No label'
# canvastext = uni
if imgtypevar.get()=='0':
if uni in lesszeroonefive:
draw.text((midx-1, midy+1), text=canvastext, font=smallfont, fill='white')
draw.text((midx+1, midy+1), text=canvastext, font=smallfont, fill='white')
draw.text((midx-1, midy-1), text=canvastext, font=smallfont, fill='white')
draw.text((midx+1, midy-1), text=canvastext, font=smallfont, fill='white')
#draw.text((midx,midy),text=canvastext,font=font,fill=(141,2,31,0))
draw.text((midx,midy),text=canvastext,font=smallfont,fill='red')
else:
draw.text((midx-1, midy+1), text=canvastext, font=smallfont, fill='white')
draw.text((midx+1, midy+1), text=canvastext, font=smallfont, fill='white')
draw.text((midx-1, midy-1), text=canvastext, font=smallfont, fill='white')
draw.text((midx+1, midy-1), text=canvastext, font=smallfont, fill='white')
#draw.text((midx,midy),text=canvastext,font=font,fill=(141,2,31,0))
draw.text((midx,midy),text=canvastext,font=smallfont,fill='black')
#print(event.x, event.y, labels[event.x, event.y], ulx, uly, rlx, rly)
#recborder = canvas.create_rectangle(ulx, uly, rlx, rly, outline='red')
#drawcontents.append(recborder)
kernersizes.update({file:tempdict})
image=imageband.resize([originwidth,originheight],resample=Image.BILINEAR)
image.save(path+'/'+originfile+'-sizeresult'+'.png',"PNG")
tup=(labels,counts,colortable,[],currentfilename)
_band,segimg,small_segimg=showcounting(tup,False)
segimage=segimg.resize([originwidth,originheight],resample=Image.BILINEAR)
segimage.save(path+'/'+originfile+'-segmentresult'+'.png',"PNG")
_band,segimg,small_segimg=showcounting(tup,True)
segimage=segimg.resize([originwidth,originheight],resample=Image.BILINEAR)
segimage.save(path+'/'+originfile+'-labelresult'+'.png',"PNG")
originrestoredband= | np.copy(labels) | numpy.copy |
import glob
import os
import subprocess,shlex,shutil
import sys
from astropy.io import fits
from spectral_cube import SpectralCube
import numpy as np
################
#Parameters
#define the number of subcubes per axis
splitfactor=7
#specify source cube location
sourcefile='/avatar/nickill/smc/diagnostic_cubes/smc_masked_0.07.fits'
#Naomis original smc cube: '/avatar/naomi/ASKAP/SMC/SB_8906/SMC_8906.lsr.K.fits'
cubenameprefix='/avatar/nickill/smc/grid_cubes/smc_grid7x7_masked'
wholecube=fits.open(sourcefile)
print(wholecube[0].shape)
###################
###################
##Find dimensions
xlen=len(wholecube[0].data[0,:,0])
ylen=len(wholecube[0].data[0,0,:])
xax=[]
for i in np.arange(splitfactor+1):
xax=np.append(xax,i*xlen/splitfactor)
yax=[]
for i in np.arange(splitfactor+1):
yax=np.append(yax,i*ylen/splitfactor)
#yax=[0,ylen/3,(ylen/3)*2,ylen]
wholecube.close()
##################
##################
#Make mom0 to overlay regions on and split off subregions
wholecube=SpectralCube.read(sourcefile)
#make the mom0 to overwrite
moment0=wholecube.moment(order=0)
for j in np.arange(0,splitfactor):
for i in | np.arange(0,splitfactor) | numpy.arange |
from __future__ import print_function
import copy
import os
import sys
import time
import unittest
from nose.plugins.skip import SkipTest
from nose.tools import assert_raises
import numpy
from six.moves import xrange
import theano
from theano import tensor, config
from theano.sandbox import rng_mrg
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.sandbox.cuda import cuda_available
from theano.tests import unittest_tools as utt
from theano.tests.unittest_tools import attr
if cuda_available:
from theano.sandbox.cuda import float32_shared_constructor
# TODO: test gpu
# Done in test_consistency_GPU_{serial,parallel}
# TODO: test MRG_RandomStreams
# Partly done in test_consistency_randomstreams
# TODO: test optimizer mrg_random_make_inplace
# TODO: make tests work when no flags gived. Now need:
# THEANO_FLAGS=device=gpu0,floatX=float32
# Partly done, in test_consistency_GPU_{serial,parallel}
mode = config.mode
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
utt.seed_rng()
# Results generated by Java code using L'Ecuyer et al.'s code, with:
# main seed: [12345]*6 (default)
# 12 streams
# 7 substreams for each stream
# 5 samples drawn from each substream
java_samples = numpy.loadtxt(os.path.join(os.path.split(theano.__file__)[0],
'sandbox',
'samples_MRG31k3p_12_7_5.txt'))
def test_deterministic():
seed = utt.fetch_seed()
sample_size = (10, 20)
test_use_cuda = [False]
if cuda_available:
test_use_cuda.append(True)
for use_cuda in test_use_cuda:
# print 'use_cuda =', use_cuda
R = MRG_RandomStreams(seed=seed, use_cuda=use_cuda)
u = R.uniform(size=sample_size)
f = theano.function([], u)
fsample1 = f()
fsample2 = f()
assert not numpy.allclose(fsample1, fsample2)
R2 = MRG_RandomStreams(seed=seed, use_cuda=use_cuda)
u2 = R2.uniform(size=sample_size)
g = theano.function([], u2)
gsample1 = g()
gsample2 = g()
assert numpy.allclose(fsample1, gsample1)
assert numpy.allclose(fsample2, gsample2)
def test_consistency_randomstreams():
"""
Verify that the random numbers generated by MRG_RandomStreams
are the same as the reference (Java) implementation by L'Ecuyer et al.
"""
seed = 12345
n_samples = 5
n_streams = 12
n_substreams = 7
test_use_cuda = [False]
if cuda_available:
test_use_cuda.append(True)
for use_cuda in test_use_cuda:
# print 'use_cuda =', use_cuda
samples = []
rng = MRG_RandomStreams(seed=seed, use_cuda=use_cuda)
for i in range(n_streams):
stream_samples = []
u = rng.uniform(size=(n_substreams,), nstreams=n_substreams)
f = theano.function([], u)
for j in range(n_samples):
s = f()
stream_samples.append(s)
stream_samples = numpy.array(stream_samples)
stream_samples = stream_samples.T.flatten()
samples.append(stream_samples)
samples = numpy.array(samples).flatten()
assert(numpy.allclose(samples, java_samples))
def test_consistency_cpu_serial():
"""
Verify that the random numbers generated by mrg_uniform, serially,
are the same as the reference (Java) implementation by L'Ecuyer et al.
"""
seed = 12345
n_samples = 5
n_streams = 12
n_substreams = 7
samples = []
curr_rstate = numpy.array([seed] * 6, dtype='int32')
for i in range(n_streams):
stream_rstate = curr_rstate.copy()
for j in range(n_substreams):
rstate = theano.shared(numpy.array([stream_rstate.copy()],
dtype='int32'))
new_rstate, sample = rng_mrg.mrg_uniform.new(rstate, ndim=None,
dtype=config.floatX,
size=(1,))
# Not really necessary, just mimicking
# rng_mrg.MRG_RandomStreams' behavior
sample.rstate = rstate
sample.update = (rstate, new_rstate)
rstate.default_update = new_rstate
f = theano.function([], sample)
for k in range(n_samples):
s = f()
samples.append(s)
# next substream
stream_rstate = rng_mrg.ff_2p72(stream_rstate)
# next stream
curr_rstate = rng_mrg.ff_2p134(curr_rstate)
samples = numpy.array(samples).flatten()
assert(numpy.allclose(samples, java_samples))
def test_consistency_cpu_parallel():
"""
Verify that the random numbers generated by mrg_uniform, in parallel,
are the same as the reference (Java) implementation by L'Ecuyer et al.
"""
seed = 12345
n_samples = 5
n_streams = 12
n_substreams = 7 # 7 samples will be drawn in parallel
samples = []
curr_rstate = numpy.array([seed] * 6, dtype='int32')
for i in range(n_streams):
stream_samples = []
rstate = [curr_rstate.copy()]
for j in range(1, n_substreams):
rstate.append(rng_mrg.ff_2p72(rstate[-1]))
rstate = numpy.asarray(rstate)
rstate = theano.shared(rstate)
new_rstate, sample = rng_mrg.mrg_uniform.new(rstate, ndim=None,
dtype=config.floatX,
size=(n_substreams,))
# Not really necessary, just mimicking
# rng_mrg.MRG_RandomStreams' behavior
sample.rstate = rstate
sample.update = (rstate, new_rstate)
rstate.default_update = new_rstate
f = theano.function([], sample)
for k in range(n_samples):
s = f()
stream_samples.append(s)
samples.append(numpy.array(stream_samples).T.flatten())
# next stream
curr_rstate = rng_mrg.ff_2p134(curr_rstate)
samples = numpy.array(samples).flatten()
assert(numpy.allclose(samples, java_samples))
def test_consistency_GPU_serial():
"""
Verify that the random numbers generated by GPU_mrg_uniform, serially,
are the same as the reference (Java) implementation by L'Ecuyer et al.
"""
if not cuda_available:
raise SkipTest('Optional package cuda not available')
if config.mode == 'FAST_COMPILE':
mode = 'FAST_RUN'
else:
mode = config.mode
seed = 12345
n_samples = 5
n_streams = 12
n_substreams = 7
samples = []
curr_rstate = numpy.array([seed] * 6, dtype='int32')
for i in range(n_streams):
stream_rstate = curr_rstate.copy()
for j in range(n_substreams):
substream_rstate = numpy.array(stream_rstate.copy(), dtype='int32')
# HACK - we transfer these int32 to the GPU memory as float32
# (reinterpret_cast)
tmp_float_buf = numpy.frombuffer(substream_rstate.data,
dtype='float32')
# Transfer to device
rstate = float32_shared_constructor(tmp_float_buf)
new_rstate, sample = rng_mrg.GPU_mrg_uniform.new(rstate, ndim=None,
dtype='float32',
size=(1,))
rstate.default_update = new_rstate
# Not really necessary, just mimicking
# rng_mrg.MRG_RandomStreams' behavior
sample.rstate = rstate
sample.update = (rstate, new_rstate)
# We need the sample back in the main memory
cpu_sample = tensor.as_tensor_variable(sample)
f = theano.function([], cpu_sample, mode=mode)
for k in range(n_samples):
s = f()
samples.append(s)
# next substream
stream_rstate = rng_mrg.ff_2p72(stream_rstate)
# next stream
curr_rstate = rng_mrg.ff_2p134(curr_rstate)
samples = numpy.array(samples).flatten()
assert(numpy.allclose(samples, java_samples))
def test_consistency_GPU_parallel():
"""
Verify that the random numbers generated by GPU_mrg_uniform, in
parallel, are the same as the reference (Java) implementation by
<NAME> al.
"""
if not cuda_available:
raise SkipTest('Optional package cuda not available')
if config.mode == 'FAST_COMPILE':
mode = 'FAST_RUN'
else:
mode = config.mode
seed = 12345
n_samples = 5
n_streams = 12
n_substreams = 7 # 7 samples will be drawn in parallel
samples = []
curr_rstate = numpy.array([seed] * 6, dtype='int32')
for i in range(n_streams):
stream_samples = []
rstate = [curr_rstate.copy()]
for j in range(1, n_substreams):
rstate.append(rng_mrg.ff_2p72(rstate[-1]))
rstate = numpy.asarray(rstate).flatten()
# HACK - transfer these int32 to the GPU memory as float32
# (reinterpret_cast)
tmp_float_buf = numpy.frombuffer(rstate.data, dtype='float32')
# Transfer to device
rstate = float32_shared_constructor(tmp_float_buf)
new_rstate, sample = rng_mrg.GPU_mrg_uniform.new(rstate, ndim=None,
dtype='float32',
size=(n_substreams,))
rstate.default_update = new_rstate
# Not really necessary, just mimicking
# rng_mrg.MRG_RandomStreams' behavior
sample.rstate = rstate
sample.update = (rstate, new_rstate)
# We need the sample back in the main memory
cpu_sample = tensor.as_tensor_variable(sample)
f = theano.function([], cpu_sample, mode=mode)
for k in range(n_samples):
s = f()
stream_samples.append(s)
samples.append(numpy.array(stream_samples).T.flatten())
# next stream
curr_rstate = rng_mrg.ff_2p134(curr_rstate)
samples = numpy.array(samples).flatten()
assert(numpy.allclose(samples, java_samples))
def test_GPU_nstreams_limit():
"""
Verify that a ValueError is raised when n_streams
is greater than 2**20 on GPU. This is the value of
(NUM_VECTOR_OP_THREADS_PER_BLOCK * NUM_VECTOR_OP_BLOCKS).
"""
if not cuda_available:
raise SkipTest('Optional package cuda not available')
seed = 12345
R = MRG_RandomStreams(seed=seed, use_cuda=True)
def eval_uniform(size, nstreams):
if theano.config.mode == "FAST_COMPILE":
mode = "FAST_RUN"
else:
mode = copy.copy(theano.compile.get_default_mode())
mode.check_py_code = False
out = R.uniform(size=size, nstreams=nstreams, dtype='float32')
f = theano.function([], out, mode=mode)
return f()
eval_uniform((10,), 2**20)
assert_raises(ValueError, eval_uniform, (10,), 2**20 + 1)
def test_consistency_GPUA_serial():
"""
Verify that the random numbers generated by GPUA_mrg_uniform, serially,
are the same as the reference (Java) implementation by <NAME> al.
"""
from theano.sandbox.gpuarray.tests.test_basic_ops import \
mode_with_gpu as mode
from theano.sandbox.gpuarray.type import gpuarray_shared_constructor
seed = 12345
n_samples = 5
n_streams = 12
n_substreams = 7
samples = []
curr_rstate = numpy.array([seed] * 6, dtype='int32')
for i in range(n_streams):
stream_rstate = curr_rstate.copy()
for j in range(n_substreams):
substream_rstate = numpy.array([stream_rstate.copy()],
dtype='int32')
# Transfer to device
rstate = gpuarray_shared_constructor(substream_rstate)
new_rstate, sample = rng_mrg.GPUA_mrg_uniform.new(rstate,
ndim=None,
dtype='float32',
size=(1,))
rstate.default_update = new_rstate
# Not really necessary, just mimicking
# rng_mrg.MRG_RandomStreams' behavior
sample.rstate = rstate
sample.update = (rstate, new_rstate)
# We need the sample back in the main memory
cpu_sample = tensor.as_tensor_variable(sample)
f = theano.function([], cpu_sample, mode=mode)
for k in range(n_samples):
s = f()
samples.append(s)
# next substream
stream_rstate = rng_mrg.ff_2p72(stream_rstate)
# next stream
curr_rstate = rng_mrg.ff_2p134(curr_rstate)
samples = numpy.array(samples).flatten()
assert(numpy.allclose(samples, java_samples))
def test_consistency_GPUA_parallel():
"""
Verify that the random numbers generated by GPUA_mrg_uniform, in
parallel, are the same as the reference (Java) implementation by
<NAME> al.
"""
from theano.sandbox.gpuarray.tests.test_basic_ops import \
mode_with_gpu as mode
from theano.sandbox.gpuarray.type import gpuarray_shared_constructor
seed = 12345
n_samples = 5
n_streams = 12
n_substreams = 7 # 7 samples will be drawn in parallel
samples = []
curr_rstate = numpy.array([seed] * 6, dtype='int32')
for i in range(n_streams):
stream_samples = []
rstate = [curr_rstate.copy()]
for j in range(1, n_substreams):
rstate.append(rng_mrg.ff_2p72(rstate[-1]))
rstate = numpy.asarray(rstate)
rstate = gpuarray_shared_constructor(rstate)
new_rstate, sample = rng_mrg.GPUA_mrg_uniform.new(rstate, ndim=None,
dtype='float32',
size=(n_substreams,))
rstate.default_update = new_rstate
# Not really necessary, just mimicking
# rng_mrg.MRG_RandomStreams' behavior
sample.rstate = rstate
sample.update = (rstate, new_rstate)
# We need the sample back in the main memory
cpu_sample = tensor.as_tensor_variable(sample)
f = theano.function([], cpu_sample, mode=mode)
for k in range(n_samples):
s = f()
stream_samples.append(s)
samples.append(numpy.array(stream_samples).T.flatten())
# next stream
curr_rstate = rng_mrg.ff_2p134(curr_rstate)
samples = numpy.array(samples).flatten()
assert(numpy.allclose(samples, java_samples))
def basictest(f, steps, sample_size, prefix="", allow_01=False, inputs=None,
target_avg=0.5, target_std=None, mean_rtol=0.01, std_tol=0.01):
if inputs is None:
inputs = []
dt = 0.0
avg_var = 0.0
for i in xrange(steps):
t0 = time.time()
ival = f(*inputs)
assert ival.shape == sample_size
dt += time.time() - t0
ival = numpy.asarray(ival)
if i == 0:
mean = numpy.array(ival, copy=True)
avg_var = numpy.mean((ival - target_avg) ** 2)
min_ = ival.min()
max_ = ival.max()
else:
alpha = 1.0 / (1 + i)
mean = alpha * ival + (1 - alpha) * mean
avg_var = (alpha * numpy.mean((ival - target_avg) ** 2) +
(1 - alpha) * avg_var)
min_ = min(min_, ival.min())
max_ = max(max_, ival.max())
if not allow_01:
assert min_ > 0
assert max_ < 1
if hasattr(target_avg, 'shape'): # looks if target_avg is an array
diff = numpy.mean(abs(mean - target_avg))
# print prefix, 'mean diff with mean', diff
assert numpy.all(diff < mean_rtol * (1 + abs(target_avg))), (
'bad mean? %s %s' % (mean, target_avg))
else:
# if target_avg is a scalar, then we can do the mean of
# `mean` to get something more precise
mean = numpy.mean(mean)
# print prefix, 'mean', mean
assert abs(mean - target_avg) < mean_rtol * (1 + abs(target_avg)), (
'bad mean? %f %f' % (mean, target_avg))
std = numpy.sqrt(avg_var)
# print prefix, 'var', avg_var
# print prefix, 'std', std
if target_std is not None:
assert abs(std - target_std) < std_tol * (1 + abs(target_std)), (
'bad std? %f %f %f' % (std, target_std, std_tol))
# print prefix, 'time', dt
# print prefix, 'elements', steps * sample_size[0] * sample_size[1]
# print prefix, 'samples/sec', steps * sample_size[0] * sample_size[1] / dt
# print prefix, 'min', min_, 'max', max_
def test_uniform():
# TODO: test param low, high
# TODO: test size=None
# TODO: test ndim!=size.ndim
# TODO: test bad seed
# TODO: test size=Var, with shape that change from call to call
if (mode in ['DEBUG_MODE', 'DebugMode', 'FAST_COMPILE'] or
mode == 'Mode' and config.linker in ['py']):
sample_size = (10, 100)
steps = 50
else:
sample_size = (500, 50)
steps = int(1e3)
x = tensor.matrix()
for size, const_size, var_input, input in [
(sample_size, sample_size, [], []),
(x.shape, sample_size, [x],
[numpy.zeros(sample_size, dtype=config.floatX)]),
((x.shape[0], sample_size[1]), sample_size, [x],
[numpy.zeros(sample_size, dtype=config.floatX)]),
# test empty size (scalar)
((), (), [], []),
]:
# TEST CPU IMPLEMENTATION
# The python and C implementation are tested with DebugMode
# print ''
# print 'ON CPU with size=(%s):' % str(size)
x = tensor.matrix()
R = MRG_RandomStreams(234, use_cuda=False)
# Note: we specify `nstreams` to avoid a warning.
# TODO Look for all occurrences of `guess_n_streams` and `30 * 256`
# for such situations: it would be better to instead filter the
# warning using the warning module.
u = R.uniform(size=size,
nstreams=rng_mrg.guess_n_streams(size, warn=False))
f = theano.function(var_input, u, mode=mode)
assert any([isinstance(node.op, theano.sandbox.rng_mrg.mrg_uniform)
for node in f.maker.fgraph.toposort()])
# theano.printing.debugprint(f)
cpu_out = f(*input)
# print 'CPU: random?[:10], random?[-10:]'
# print cpu_out[0, 0:10]
# print cpu_out[-1, -10:]
# Increase the number of steps if sizes implies only a few samples
if numpy.prod(const_size) < 10:
steps_ = steps * 100
else:
steps_ = steps
basictest(f, steps_, const_size, prefix='mrg cpu', inputs=input)
if mode != 'FAST_COMPILE' and cuda_available:
# print ''
# print 'ON GPU with size=(%s):' % str(size)
R = MRG_RandomStreams(234, use_cuda=True)
u = R.uniform(size=size, dtype='float32',
nstreams=rng_mrg.guess_n_streams(size, warn=False))
# well, it's really that this test w GPU doesn't make sense otw
assert u.dtype == 'float32'
f = theano.function(var_input, theano.Out(
theano.sandbox.cuda.basic_ops.gpu_from_host(u),
borrow=True), mode=mode_with_gpu)
assert any([isinstance(node.op,
theano.sandbox.rng_mrg.GPU_mrg_uniform)
for node in f.maker.fgraph.toposort()])
# theano.printing.debugprint(f)
gpu_out = numpy.asarray(f(*input))
# print 'GPU: random?[:10], random?[-10:]'
# print gpu_out[0, 0:10]
# print gpu_out[-1, -10:]
basictest(f, steps_, const_size, prefix='mrg gpu', inputs=input)
numpy.testing.assert_array_almost_equal(cpu_out, gpu_out,
decimal=6)
# print ''
# print 'ON CPU w Numpy with size=(%s):' % str(size)
RR = theano.tensor.shared_randomstreams.RandomStreams(234)
uu = RR.uniform(size=size)
ff = theano.function(var_input, uu, mode=mode)
# It's not our problem if numpy generates 0 or 1
basictest(ff, steps_, const_size, prefix='numpy',
allow_01=True, inputs=input)
@attr('slow')
def test_binomial():
# TODO: test size=None, ndim=X
# TODO: test size=X, ndim!=X.ndim
# TODO: test random seed in legal value(!=0 and other)
# TODO: test sample_size not a multiple of guessed #streams
# TODO: test size=Var, with shape that change from call to call
# we test size in a tuple of int and a tensor.shape.
# we test the param p with int.
if (mode in ['DEBUG_MODE', 'DebugMode', 'FAST_COMPILE'] or
mode == 'Mode' and config.linker in ['py']):
sample_size = (10, 50)
steps = 50
rtol = 0.02
else:
sample_size = (500, 50)
steps = int(1e3)
rtol = 0.01
x = tensor.matrix()
for mean in [0.1, 0.5]:
for size, const_size, var_input, input in [
(sample_size, sample_size, [], []),
(x.shape, sample_size, [x],
[numpy.zeros(sample_size, dtype=config.floatX)]),
((x.shape[0], sample_size[1]), sample_size, [x],
[numpy.zeros(sample_size, dtype=config.floatX)]),
# test empty size (scalar)
((), (), [], []),
]:
yield (t_binomial, mean, size, const_size, var_input, input,
steps, rtol)
def t_binomial(mean, size, const_size, var_input, input, steps, rtol):
R = MRG_RandomStreams(234, use_cuda=False)
u = R.binomial(size=size, p=mean)
f = theano.function(var_input, u, mode=mode)
out = f(*input)
# Increase the number of steps if sizes implies only a few samples
if numpy.prod(const_size) < 10:
steps_ = steps * 100
else:
steps_ = steps
basictest(f, steps_, const_size, prefix='mrg cpu',
inputs=input, allow_01=True,
target_avg=mean, mean_rtol=rtol)
if mode != 'FAST_COMPILE' and cuda_available:
R = MRG_RandomStreams(234, use_cuda=True)
u = R.binomial(size=size, p=mean, dtype='float32')
# well, it's really that this test w GPU doesn't make sense otw
assert u.dtype == 'float32'
f = theano.function(var_input, theano.Out(
theano.sandbox.cuda.basic_ops.gpu_from_host(u),
borrow=True), mode=mode_with_gpu)
gpu_out = numpy.asarray(f(*input))
basictest(f, steps_, const_size, prefix='mrg gpu',
inputs=input, allow_01=True,
target_avg=mean, mean_rtol=rtol)
numpy.testing.assert_array_almost_equal(out, gpu_out,
decimal=6)
RR = theano.tensor.shared_randomstreams.RandomStreams(234)
uu = RR.binomial(size=size, p=mean)
ff = theano.function(var_input, uu, mode=mode)
# It's not our problem if numpy generates 0 or 1
basictest(ff, steps_, const_size, prefix='numpy', allow_01=True,
inputs=input, target_avg=mean, mean_rtol=rtol)
@attr('slow')
def test_normal0():
steps = 50
std = 2.
if (mode in ['DEBUG_MODE', 'DebugMode', 'FAST_COMPILE'] or
mode == 'Mode' and config.linker in ['py']):
sample_size = (25, 30)
default_rtol = .02
else:
sample_size = (999, 50)
default_rtol = .01
sample_size_odd = (sample_size[0], sample_size[1] - 1)
x = tensor.matrix()
for size, const_size, var_input, input, avg, rtol, std_tol in [
(sample_size, sample_size, [], [], -5., default_rtol, default_rtol),
(x.shape, sample_size, [x],
[numpy.zeros(sample_size, dtype=config.floatX)],
-5., default_rtol, default_rtol),
((x.shape[0], sample_size[1]), sample_size, [x],
[numpy.zeros(sample_size, dtype=config.floatX)],
-5., default_rtol, default_rtol),
# test odd value
(sample_size_odd, sample_size_odd, [], [], -5.,
default_rtol, default_rtol),
# test odd value
(x.shape, sample_size_odd, [x],
[numpy.zeros(sample_size_odd, dtype=config.floatX)],
-5., default_rtol, default_rtol),
(sample_size, sample_size, [], [],
numpy.arange(numpy.prod(sample_size),
dtype='float32').reshape(sample_size),
10. * std / numpy.sqrt(steps), default_rtol),
# test empty size (scalar)
((), (), [], [], -5., default_rtol, 0.02),
# test with few samples at the same time
((1,), (1,), [], [], -5., default_rtol, 0.02),
((2,), (2,), [], [], -5., default_rtol, 0.02),
((3,), (3,), [], [], -5., default_rtol, 0.02),
]:
# print ''
# print 'ON CPU:'
R = MRG_RandomStreams(234, use_cuda=False)
# Note: we specify `nstreams` to avoid a warning.
n = R.normal(size=size, avg=avg, std=std,
nstreams=rng_mrg.guess_n_streams(size, warn=False))
f = theano.function(var_input, n, mode=mode)
# theano.printing.debugprint(f)
out = f(*input)
# print 'random?[:10]\n', out[0, 0:10]
# Increase the number of steps if size implies only a few samples
if numpy.prod(const_size) < 10:
steps_ = steps * 50
else:
steps_ = steps
basictest(f, steps_, const_size, target_avg=avg, target_std=std,
prefix='mrg ', allow_01=True, inputs=input,
mean_rtol=rtol, std_tol=std_tol)
sys.stdout.flush()
if mode != 'FAST_COMPILE' and cuda_available:
# print ''
# print 'ON GPU:'
R = MRG_RandomStreams(234, use_cuda=True)
n = R.normal(size=size, avg=avg, std=std, dtype='float32',
nstreams=rng_mrg.guess_n_streams(size, warn=False))
# well, it's really that this test w GPU doesn't make sense otw
assert n.dtype == 'float32'
f = theano.function(var_input, theano.Out(
theano.sandbox.cuda.basic_ops.gpu_from_host(n),
borrow=True), mode=mode_with_gpu)
# theano.printing.debugprint(f)
sys.stdout.flush()
gpu_out = numpy.asarray(f(*input))
# print 'random?[:10]\n', gpu_out[0, 0:10]
# print '----'
sys.stdout.flush()
basictest(f, steps_, const_size, target_avg=avg, target_std=std,
prefix='gpu mrg ', allow_01=True, inputs=input,
mean_rtol=rtol, std_tol=std_tol)
# Need to allow some rounding error as their is float
# computation that are done on the gpu vs cpu
assert | numpy.allclose(out, gpu_out, rtol=5e-6, atol=5e-6) | numpy.allclose |
# stdlib
from random import randint
# third party
import numpy as np
import pytest
# syft absolute
from syft.core.adp.entity import Entity
from syft.core.adp.vm_private_scalar_manager import VirtualMachinePrivateScalarManager
from syft.core.tensor.autodp.initial_gamma import IntermediateGammaTensor as IGT
from syft.core.tensor.autodp.single_entity_phi import SingleEntityPhiTensor as SEPT
from syft.core.tensor.tensor import Tensor
@pytest.fixture
def ishan() -> Entity:
return Entity(name="Ishan")
@pytest.fixture
def traskmaster() -> Entity:
return Entity(name="Andrew")
@pytest.fixture
def highest() -> int:
return 50
@pytest.fixture
def lowest(highest) -> int:
return -1 * int(highest)
@pytest.fixture
def dims() -> int:
"""This generates a random integer for the number of dimensions in our testing tensors"""
dims = int(max(3, np.random.randint(10) + 3)) # Avoid size 0 and 1
# Failsafe
if dims < 2:
dims += 3
assert dims > 1, "Tensor not large enough for several tests."
return dims
@pytest.fixture
def reference_data(highest, dims) -> np.ndarray:
"""This generates random data to test the equality operators"""
reference_data = np.random.randint(
low=-highest, high=highest, size=(dims, dims), dtype=np.int32
)
assert dims > 1, "Tensor not large enough"
return reference_data
@pytest.fixture
def upper_bound(reference_data: np.ndarray, highest: int) -> np.ndarray:
"""This is used to specify the max_vals for a SEPT that is either binary or randomly generated b/w 0-1"""
max_values = np.ones_like(reference_data) * highest
return max_values
@pytest.fixture
def lower_bound(reference_data: np.ndarray, highest: int) -> np.ndarray:
"""This is used to specify the min_vals for a SEPT that is either binary or randomly generated b/w 0-1"""
min_values = np.ones_like(reference_data) * -highest
return min_values
@pytest.fixture
def reference_binary_data(dims: int) -> np.ndarray:
"""Generate binary data to test the equality operators with bools"""
binary_data = np.random.randint(2, size=(dims, dims))
return binary_data
@pytest.fixture
def reference_scalar_manager() -> VirtualMachinePrivateScalarManager:
"""Generate a ScalarFactory that will allow GammaTensors to be created."""
reference_scalar_manager = VirtualMachinePrivateScalarManager()
return reference_scalar_manager
@pytest.mark.skip(
reason="Equality works but the current method of checking it throws DeprecationWarnings"
)
def test_eq(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Test equality between two identical SingleEntityPhiTensors"""
reference_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
# Duplicate the tensor and check if equality holds
same_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
also_same_tensor = reference_tensor
assert (
reference_data == same_tensor
).child.all(), "Equality between identical SEPTs fails"
assert (
reference_tensor == also_same_tensor
).child.all(), "Equality between identical SEPTs fails"
return None
def test_eq_public_shape(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Test equality of SEPT tensor with Public Tensor, and with Public Tensor with a public_shape"""
sept_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
# Without public shape
normal_tensor: Tensor = Tensor(child=reference_data)
# With public shape
tensor_with_shape = Tensor(child=reference_data, public_shape=reference_data.shape)
assert (
sept_tensor == normal_tensor
).child.all(), "SEPT & Public Tensor equality failed"
assert (
sept_tensor == tensor_with_shape
).child.all(), "SEPT & Public Tensor w/ public shape equality failed"
def test_eq_diff_entities(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
traskmaster: Entity,
) -> SEPT:
"""Test equality between Private Tensors with different owners. This is currently not implemented."""
tensor1 = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
tensor2 = SEPT(
child=reference_data,
entity=traskmaster,
max_vals=upper_bound,
min_vals=lower_bound,
)
with pytest.raises(NotImplementedError):
return tensor1 == tensor2
def test_eq_ndarray(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> bool:
"""Test equality between a SEPT and a simple type (int, float, bool, np.ndarray)"""
reference_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
assert (
reference_tensor == reference_data
).child.all(), "SEPT is apparently not equal to its underlying data."
return True
def test_eq_bool(
reference_binary_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> bool:
"""Test equality between a SEPT and a simple type (int, float, bool, np.ndarray)"""
reference_tensor = SEPT(
child=reference_binary_data,
entity=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
assert (reference_tensor == reference_binary_data).child.all(), (
"SEPT is apparently not equal to its underlying " "data."
)
return True
def test_eq_int(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> bool:
"""Test equality between a SEPT and a simple type (int, float, bool, np.ndarray)"""
reference_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
assert (
reference_tensor == reference_data
).child.all(), "SEPT is apparently not equal to its underlying data."
return True
def test_ne_values(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Test non-equality between SEPTs with diff values but the same shape"""
reference_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
comparison_tensor = SEPT(
child=reference_data + 1,
entity=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
assert (
reference_tensor != comparison_tensor
).child.any(), "SEPTs with different values are somehow equal"
return None
@pytest.mark.skipif(dims == 1, reason="Tensor generated did not have two dimensions")
def test_ne_shapes(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
dims: int,
highest,
) -> None:
"""Test non-equality between SEPTs with different shapes"""
reference_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
comparison_tensor = SEPT(
child=np.random.randint(
low=-highest, high=highest, size=(dims + 10, dims + 10), dtype=np.int32
),
entity=ishan,
max_vals=np.ones(dims + 10),
min_vals=np.ones(dims + 10),
)
with pytest.raises(Exception):
reference_tensor != comparison_tensor
return None
def test_ne_broadcastability(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
dims: int,
) -> None:
"""Test to ensure broadcastability of array sizes works"""
reference_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
comparison_tensor = SEPT(
child=np.random.random((dims, 1)),
entity=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
assert reference_tensor != comparison_tensor, "Randomly generated tensors are equal"
def test_ne_diff_entities(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
traskmaster: Entity,
) -> None:
"""Test non-equality between SEPTs of different entities"""
reference_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
comparison_tensor = SEPT(
child=reference_data,
entity=traskmaster,
max_vals=upper_bound,
min_vals=lower_bound,
)
with pytest.raises(NotImplementedError):
reference_tensor != comparison_tensor
return None
def test_add_wrong_types(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Ensure that addition with incorrect types aren't supported"""
reference_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
with pytest.raises(NotImplementedError):
reference_tensor + "some string"
reference_tensor + dict()
# TODO: Double check how tuples behave during addition/subtraction with np.ndarrays
return None
def test_add_simple_types(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
dims: int,
) -> None:
"""Test addition of a SEPT with simple types (float, ints, bools, etc)"""
tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
random_int = np.random.randint(low=15, high=1000)
result = tensor + random_int
assert isinstance(result, SEPT), "SEPT + int != SEPT"
assert (
result.max_vals == tensor.max_vals + random_int
).all(), "SEPT + int: incorrect max_val"
assert (
result.min_vals == tensor.min_vals + random_int
).all(), "SEPT + int: incorrect min_val"
random_float = random_int * np.random.rand()
result = tensor + random_float
assert isinstance(result, SEPT), "SEPT + float != SEPT"
assert (
result.max_vals == tensor.max_vals + random_float
).all(), "SEPT + float: incorrect max_val"
assert (
result.min_vals == tensor.min_vals + random_float
).all(), "SEPT + float: incorrect min_val"
random_ndarray = np.random.random((dims, dims))
result = tensor + random_ndarray
assert isinstance(result, SEPT), "SEPT + np.ndarray != SEPT"
# assert (result.max_vals == tensor.max_vals + random_ndarray.max()).all(), "SEPT + np.ndarray: incorrect max_val"
# assert (result.min_vals == tensor.min_vals + random_ndarray.min()).all(), "SEPT + np.ndarray: incorrect min_val"
return None
def test_add_tensor_types(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
highest,
dims: int,
) -> None:
"""Test addition of a SEPT with various other kinds of Tensors"""
# TODO: Add tests for REPT, GammaTensor, etc when those are built out.
reference_tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
simple_tensor = Tensor(
child=np.random.randint(
low=-highest, high=highest, size=(dims + 10, dims + 10), dtype=np.int32
)
)
with pytest.raises(NotImplementedError):
result = reference_tensor + simple_tensor
assert isinstance(result, SEPT), "SEPT + Tensor != SEPT"
assert (
result.max_vals == reference_tensor.max_vals + simple_tensor.child.max()
), "SEPT + Tensor: incorrect max_val"
assert (
result.min_vals == reference_tensor.min_vals + simple_tensor.child.min()
), "SEPT + Tensor: incorrect min_val"
return None
def test_add_single_entities(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Test the addition of SEPTs"""
tensor1 = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
tensor2 = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
result = tensor2 + tensor1
assert isinstance(result, SEPT), "Addition of two SEPTs is wrong type"
assert (
result.max_vals == 2 * upper_bound
).all(), "Addition of two SEPTs results in incorrect max_val"
assert (
result.min_vals == 2 * lower_bound
).all(), "Addition of two SEPTs results in incorrect min_val"
# Try with negative values
tensor3 = SEPT(
child=reference_data * -1.5,
entity=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
result = tensor3 + tensor1
assert isinstance(result, SEPT), "Addition of two SEPTs is wrong type"
assert (
result.max_vals == tensor3.max_vals + tensor1.max_vals
).all(), "SEPT + SEPT results in incorrect max_val"
assert (
result.min_vals == tensor3.min_vals + tensor1.min_vals
).all(), "SEPT + SEPT results in incorrect min_val"
return None
@pytest.mark.skip(reason="GammaTensors have now been implemented")
def test_add_diff_entities(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
traskmaster: Entity,
) -> None:
"""Test the addition of SEPTs"""
tensor1 = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
tensor2 = SEPT(
child=reference_data,
entity=traskmaster,
max_vals=upper_bound,
min_vals=lower_bound,
)
assert tensor2.entity != tensor1.entity, "Entities aren't actually different"
with pytest.raises(NotImplementedError):
tensor2 + tensor1
return None
def test_add_sub_equivalence(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Test that the addition of negative values is the same as subtraction."""
tensor1 = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
tensor2 = SEPT(
child=reference_data * -1,
entity=ishan,
max_vals=upper_bound,
min_vals=lower_bound,
)
add_result = tensor1 + tensor2
sub_result = tensor1 - tensor1
assert (
add_result == sub_result
), "Addition of negative values does not give the same result as subtraction"
return None
def test_add_to_gamma_tensor(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
reference_scalar_manager: VirtualMachinePrivateScalarManager,
ishan: Entity,
traskmaster: Entity,
) -> None:
"""Test that SEPTs with different entities create a GammaTensor when added"""
# We have to use a reference scalar manager for now because we can't combine scalar factories yet.
tensor1 = SEPT(
child=reference_data,
entity=ishan,
max_vals=np.ones_like(reference_data),
min_vals=np.zeros_like(reference_data),
scalar_manager=reference_scalar_manager,
)
tensor2 = SEPT(
child=reference_data,
entity=traskmaster,
max_vals=np.ones_like(reference_data),
min_vals=np.zeros_like(reference_data),
scalar_manager=reference_scalar_manager,
)
assert tensor2.entity != tensor1.entity, "Entities aren't actually different"
result = tensor2 + tensor1
assert isinstance(
result, IGT
), "Addition of SEPTs with diff entities did not give GammaTensor"
assert result.shape == tensor2.shape, "SEPT + SEPT changed shape"
assert result.shape == tensor1.shape, "SEPT + SEPT changed shape"
# Check that all values are as expected, and addition was conducted correctly.
for i in range(len(result.flat_scalars)):
assert (
result.flat_scalars[i].value
== tensor2.child.flatten()[i] + tensor1.child.flatten()[i]
), "Wrong value."
return None
def test_sub_to_gamma_tensor(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
reference_scalar_manager: VirtualMachinePrivateScalarManager,
ishan: Entity,
traskmaster: Entity,
) -> None:
"""Test that SEPTs with different entities create a GammaTensor when subtracted"""
# We have to use a reference scalar manager for now because we can't combine scalar factories yet.
tensor1 = SEPT(
child=reference_data,
entity=ishan,
max_vals=np.ones_like(reference_data),
min_vals=np.zeros_like(reference_data),
scalar_manager=reference_scalar_manager,
)
tensor2 = SEPT(
child=reference_data,
entity=traskmaster,
max_vals=np.ones_like(reference_data),
min_vals=np.zeros_like(reference_data),
scalar_manager=reference_scalar_manager,
)
assert tensor2.entity != tensor1.entity, "Entities aren't actually different"
result = tensor2 - tensor1
assert isinstance(
result, IGT
), "Addition of SEPTs with diff entities did not give GammaTensor"
assert result.shape == tensor2.shape, "SEPT + SEPT changed shape"
assert result.shape == tensor1.shape, "SEPT + SEPT changed shape"
# Check that all values are as expected, and addition was conducted correctly.
for i in range(len(result.flat_scalars)):
assert (
result.flat_scalars[i].value
== tensor2.child.flatten()[i] - tensor1.child.flatten()[i]
), "Wrong value."
return None
def test_pos(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
reference_scalar_manager: VirtualMachinePrivateScalarManager,
ishan: Entity,
) -> None:
"""Ensure the __pos__ operator works as intended"""
tensor = SEPT(
child=reference_data,
entity=ishan,
max_vals=np.ones_like(reference_data),
min_vals=np.zeros_like(reference_data),
scalar_manager=reference_scalar_manager,
)
assert (
+tensor == tensor
), "__pos__ failed at literally the one thing it was supposed to do."
# Change to integer tensor
tensor.child = tensor.child.astype("int32")
assert +tensor == tensor, "__pos__ failed after converting floats to ints."
def test_repeat(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
reference_scalar_manager: VirtualMachinePrivateScalarManager,
ishan: Entity,
) -> None:
"""Test that the repeat method extends a SEPT.child normally"""
repeat_count = np.random.randint(5, 10)
tensor = SEPT(
child=reference_data,
max_vals=upper_bound,
min_vals=lower_bound,
entity=ishan,
scalar_manager=reference_scalar_manager,
)
repeated_tensor = tensor.repeat(repeat_count) # shape = (dims*dims*repeat_count, )
for i in range(len(tensor.child.flatten())):
for j in range(i * repeat_count, (i + 1) * repeat_count - 1):
assert (
tensor.child.flatten()[i] == repeated_tensor.child[j]
), "Repeats did not function as intended!"
def test_repeat_axes(
reference_data: np.ndarray,
reference_scalar_manager: VirtualMachinePrivateScalarManager,
ishan: Entity,
) -> None:
"""Test that the axes argument of the repeat method works as intended"""
repeat_count = np.random.randint(5, 10)
tensor = SEPT(
child=reference_data,
max_vals=np.ones_like(reference_data),
min_vals=np.zeros_like(reference_data),
entity=ishan,
scalar_manager=reference_scalar_manager,
)
repeated_tensor = tensor.repeat(
repeat_count, axis=1
) # shape = (dims*dims*repeat_count, )
for i in range(len(tensor.child.flatten())):
for j in range(i * repeat_count, (i + 1) * repeat_count - 1):
assert (
tensor.child.flatten()[i] == repeated_tensor.child.flatten()[j]
), "Repeats did not function as intended!"
def test_transpose_simple_types(ishan: Entity) -> None:
"""Test that if self.child can't be transposed (b/c it's an int/float/bool/etc), it isn't changed"""
random_int = np.random.randint(low=50, high=100)
int_tensor = SEPT(child=random_int, entity=ishan, min_vals=50, max_vals=100)
int_tensor_transposed = int_tensor.transpose()
assert (
int_tensor_transposed.shape == int_tensor.shape
), "Transpose shape is incorrect"
assert int_tensor_transposed.child == int_tensor.child, "Transpose: child incorrect"
assert (
int_tensor_transposed.min_vals == int_tensor.min_vals
), "Transpose: min values incorrect"
assert (
int_tensor_transposed.max_vals == int_tensor.max_vals
), "Transpose: max_values incorrect"
# assert int_tensor_transposed.transpose() == int_tensor, "Transpose: equality error"
random_float = random_int * np.random.random()
float_tensor = SEPT(child=random_float, entity=ishan, min_vals=0, max_vals=100)
float_tensor_transposed = float_tensor.transpose()
assert (
float_tensor_transposed.shape == float_tensor.shape
), "Transpose shape is incorrect"
assert (
float_tensor_transposed.child == float_tensor.child
), "Transpose: child incorrect"
assert (
float_tensor_transposed.min_vals == float_tensor.min_vals
), "Transpose: min values incorrect"
assert (
float_tensor_transposed.max_vals == float_tensor.max_vals
), "Transpose: max_values incorrect"
# assert float_tensor_transposed == float_tensor, "Transpose: equality error"
random_bool = np.random.choice([True, False], p=[0.5, 0.5])
bool_tensor = SEPT(child=random_bool, entity=ishan, min_vals=0, max_vals=1)
bool_tensor_transposed = bool_tensor.transpose()
assert (
bool_tensor_transposed.shape == bool_tensor.shape
), "Transpose shape is incorrect"
assert (
bool_tensor_transposed.child == bool_tensor.child
), "Transpose: child incorrect"
assert (
bool_tensor_transposed.min_vals == bool_tensor.min_vals
), "Transpose: min values incorrect"
assert (
bool_tensor_transposed.max_vals == bool_tensor.max_vals
), "Transpose: max_values incorrect"
# assert bool_tensor_transposed == bool_tensor, "Transpose: equality error"
return None
def test_transpose_square_matrix(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
dims: int,
) -> None:
"""Test transpose works on the most important use case, which is when self.child is a np.array or Tensor"""
tensor = SEPT(
child=reference_data, entity=ishan, max_vals=upper_bound, min_vals=lower_bound
)
transposed_tensor = tensor.transpose()
assert (
tensor.shape == transposed_tensor.shape
), "Transposing square matrix changed shape"
assert (
upper_bound.transpose() == transposed_tensor.max_vals
).all(), "Transpose: Incorrect max_vals"
assert (
lower_bound.transpose() == transposed_tensor.min_vals
).all(), "Transpose: Incorrect min_vals"
assert (
transposed_tensor.transpose() == tensor
), "Transposing tensor twice should return the original tensor"
# Can't index directly into SEPT due to IndexErrors arising due to __getitem__'s effect on min_val/max_val
for i in range(dims):
for j in range(dims):
assert (
tensor.child[i, j] == transposed_tensor.child[j, i]
), "Transpose failed"
def test_transpose_non_square_matrix(ishan: Entity, dims: int) -> None:
"""Test transpose on SEPTs where self.child is not a square matrix"""
rows = dims
cols = dims + np.random.randint(low=1, high=5)
tensor = SEPT(
child=np.random.random((rows, cols)),
entity=ishan,
max_vals=np.ones(rows),
min_vals=np.zeros(rows),
)
transposed_tensor = tensor.transpose()
assert (
tensor.shape != transposed_tensor.shape
), "Transposing non-square matrix did not change shape"
assert (
tensor.shape[::-1] == transposed_tensor.shape
), "Transposing non-square matrix resulted in incorrect shape"
assert (
np.ones((1, rows)) == transposed_tensor.max_vals
).all(), "Transpose: Incorrect max_vals"
assert (
np.zeros((1, rows)) == transposed_tensor.min_vals
).all(), "Transpose: Incorrect min_vals"
assert (
transposed_tensor.transpose() == tensor
), "Transposing tensor twice should return the original tensor"
# Can't index directly into SEPT due to IndexErrors arising due to __getitem__'s effect on min_val/max_val
for i in range(dims):
for j in range(dims):
assert (
tensor.child[i, j] == transposed_tensor.child[j, i]
), "Transpose failed"
@pytest.mark.skip(
reason="Test works, but checking that it works using elementwise comparison raises Deprecation Warnings"
)
def test_transpose_args(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
highest,
) -> None:
"""Ensure the optional arguments passed to .transpose() work as intended."""
# Try with square matrix
square_tensor = SEPT(
child=reference_data, entity=ishan, min_vals=lower_bound, max_vals=upper_bound
)
order = list(range(len(square_tensor.shape)))
np.random.shuffle(order)
transposed_square_tensor = square_tensor.transpose(order)
assert (
square_tensor.shape == transposed_square_tensor.shape
), "Transposing square matrix changed shape"
for original_index, final_index in enumerate(order):
assert (
square_tensor.child[:, original_index]
== transposed_square_tensor[final_index]
), "Transposition failed"
# TODO: check by reverse/undo the transpose
# TODO: check arguments don't interfere with simple type transpose
# Try with non-square matrix
rows = dims
cols = dims + np.random.randint(low=1, high=5)
non_square_data = np.random.randint(
low=-highest, high=highest, size=(rows, cols), dtype=np.int32
)
tensor = SEPT(
child=non_square_data,
entity=ishan,
max_vals=np.ones_like(non_square_data) * highest,
min_vals=np.ones_like(non_square_data) * -highest,
)
order = list(range(len(tensor.shape)))
np.random.shuffle(order)
transposed_tensor = tensor.transpose(order)
assert (
tensor.shape[::-1] == transposed_tensor.shape
), "Transposing non-square matrix resulted in incorrect shape"
for original_index, final_index in enumerate(order):
assert (
tensor.child[:, original_index] == transposed_tensor[final_index]
), "Transposition failed"
def test_reshape(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Ensure reshape happens when it is able"""
reference_tensor = SEPT(
child=reference_data, max_vals=upper_bound, min_vals=lower_bound, entity=ishan
)
new_shape = reference_data.flatten().shape[0]
reference_tensor.reshape(new_shape)
def test_reshape_fail(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Make sure errors are raised correctly when reshape is not possible due to shape mismatch."""
reference_tensor = SEPT(
child=reference_data, max_vals=upper_bound, min_vals=lower_bound, entity=ishan
)
new_shape = reference_data.flatten().shape[0]
with pytest.raises(ValueError):
reference_tensor.reshape(new_shape - 1)
@pytest.mark.skip(reason="Unnecessary for now, testing in reshape_fail()")
def test_reshape_simple_type() -> None:
"""Ensure reshape has no effect on simple types without shapes"""
pass
def test_resize(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Ensure resize happens when it is able"""
reference_tensor = SEPT(
child=reference_data, max_vals=upper_bound, min_vals=lower_bound, entity=ishan
)
new_shape = reference_data.flatten().shape[0]
reference_tensor.reshape(new_shape)
def test_resize_fail(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Make sure errors are raised correctly when resize is not possible due to shape mismatch."""
reference_tensor = SEPT(
child=reference_data, max_vals=upper_bound, min_vals=lower_bound, entity=ishan
)
new_shape = int(reference_data.flatten().shape[0])
with pytest.raises(ValueError):
reference_tensor.resize(int(new_shape - 1))
np.resize()
def test_resize_inplace(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Ensure resize changes shape in place"""
reference_tensor = SEPT(
child=reference_data, max_vals=upper_bound, min_vals=lower_bound, entity=ishan
)
initial_shape = reference_tensor.shape
new_shape = int(reference_data.flatten().shape[0])
assert isinstance(
new_shape, int
), "new shape is not an integer, resize not possible"
reference_tensor.resize(new_shape)
assert (
reference_tensor.shape != initial_shape
), "Resize operation failed to change shape in-place."
def test_flatten(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Test that self.child can be flattened for appropriate data types"""
reference_tensor = SEPT(
child=reference_data, max_vals=upper_bound, min_vals=lower_bound, entity=ishan
)
target_shape = reference_data.flatten().shape
flattened_tensor = reference_tensor.flatten()
assert (
flattened_tensor.shape != reference_tensor.shape
), "Flattening the array really didn't do much eh"
assert (
flattened_tensor.shape == target_shape
), "Flattening did not result in the correct shape"
assert (
flattened_tensor == reference_data.flatten()
).child.all(), "Flattening changed the order of entries"
def test_ravel(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
highest,
) -> None:
"""Test that self.child can be ravelled for appropriate data types"""
reference_tensor = SEPT(
child=reference_data, max_vals=upper_bound, min_vals=lower_bound, entity=ishan
)
target_shape = reference_data.ravel().shape
ravelled_tensor = reference_tensor.ravel()
assert (
ravelled_tensor.shape != reference_tensor.shape
), "Ravelling the array really didn't do much eh"
assert (
ravelled_tensor.shape == target_shape
), "Ravelling did not result in the correct shape"
assert (
ravelled_tensor == reference_data.flatten()
).child.all(), "Ravelling changed the order of entries"
def test_squeeze(highest) -> None:
"""Test that squeeze works on an ideal case"""
_data = np.random.randint(
low=-highest, high=highest, size=(10, 1, 10, 1, 10), dtype=np.int32
)
initial_shape = _data.shape
reference_tensor = SEPT(
child=_data,
max_vals=np.ones_like(_data) * highest,
min_vals=np.ones_like(_data) * -1 * highest,
entity=ishan,
)
target_data = _data.squeeze()
target_shape = target_data.shape
squeezed_tensor = reference_tensor.squeeze()
assert squeezed_tensor.shape != initial_shape, "Squeezing the tensor did nothing"
assert (
squeezed_tensor.shape == target_shape
), "Squeezing the tensor gave the wrong shape"
assert (
squeezed_tensor == target_data
).child.all(), "Squeezing the tensor eliminated the wrong values"
def test_squeeze_correct_axes(highest, ishan: Entity) -> None:
"""Test that squeeze works on an ideal case with correct axes specified"""
_data = np.random.randint(
low=-1 * highest, high=highest, size=(10, 1, 10, 1, 10), dtype=np.int32
)
initial_shape = _data.shape
reference_tensor = SEPT(
child=_data,
max_vals=np.ones_like(_data) * highest,
min_vals=np.ones_like(_data) * -highest,
entity=ishan,
)
target_data = _data.squeeze(1)
target_shape = target_data.shape
squeezed_tensor = reference_tensor.squeeze(1)
assert squeezed_tensor.shape != initial_shape, "Squeezing the tensor did nothing"
assert (
squeezed_tensor.shape == target_shape
), "Squeezing the tensor gave the wrong shape"
assert (
squeezed_tensor == target_data
).child.all(), "Squeezing the tensor eliminated the wrong values"
def test_swap_axes(highest, ishan: Entity) -> None:
"""Test that swap_axes works on an ideal case"""
data = np.random.randint(
low=-highest, high=highest, size=(10, 1, 10, 1, 10), dtype=np.int32
)
initial_shape = data.shape
reference_tensor = SEPT(
child=data,
max_vals=np.ones_like(data) * highest,
min_vals=np.ones_like(data) * -highest,
entity=ishan,
)
target_data = data.swapaxes(1, 2)
target_shape = target_data.shape
swapped_tensor = reference_tensor.swapaxes(1, 2)
assert (
swapped_tensor.shape != initial_shape
), "Swapping axes of the tensor did nothing"
assert (
swapped_tensor.shape == target_shape
), "Swapping axes of the tensor gave the wrong shape"
assert (
swapped_tensor == target_data
).child.all(), "Swapping axes of the tensor eliminated the wrong values"
@pytest.mark.skipif(dims == 1, reason="Tensor generated did not have two dimensions")
def test_compress(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
reference_tensor = SEPT(
child=reference_data, max_vals=upper_bound, min_vals=lower_bound, entity=ishan
)
result = reference_tensor.compress([0, 1])
assert result == reference_data.compress(
[0, 1]
), "Compress did not work as expected"
result2 = reference_tensor.compress([0, 1], axis=1)
assert result2 == reference_data.compress(
[0, 1], axis=1
), "Compress did not work as expected"
@pytest.mark.skipif(dims == 1, reason="Tensor generated did not have two dimensions")
def test_partition(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
reference_tensor = SEPT(
child=reference_data, max_vals=upper_bound, min_vals=lower_bound, entity=ishan
)
k = 1
reference_tensor.partition(k)
assert reference_tensor != reference_data, "Partition did not work as expected"
reference_data.partition(k)
assert reference_tensor == reference_data, "Partition did not work as expected"
@pytest.mark.skipif(dims == 1, reason="Tensor generated did not have two dimensions")
def test_partition_axis(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
reference_tensor = SEPT(
child=reference_data, max_vals=upper_bound, min_vals=lower_bound, entity=ishan
)
k = 1
reference_tensor.partition(k, axis=1)
assert reference_tensor != reference_data, "Partition did not work as expected"
reference_data.partition(k, axis=1)
assert reference_tensor == reference_data, "Partition did not work as expected"
def test_mul(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
reference_scalar_manager: VirtualMachinePrivateScalarManager,
ishan: Entity,
traskmaster: Entity,
) -> None:
""" """
sept1 = SEPT(
child=reference_data,
min_vals=lower_bound,
max_vals=upper_bound,
entity=ishan,
scalar_manager=reference_scalar_manager,
)
sept2 = SEPT(
child=reference_data,
min_vals=lower_bound,
max_vals=upper_bound,
entity=traskmaster,
scalar_manager=reference_scalar_manager,
)
# Public-Public
output = sept2 * sept2
assert output.shape == sept2.shape
# assert (output.min_vals == sept2.min_vals * sept2.min_vals).all()
# assert (output.max_vals == sept2.max_vals * sept2.max_vals).all()
assert (output.child == sept2.child * sept2.child).all()
# Public - Private
output: IGT = sept2 * sept1
assert output.shape == sept2.shape
# assert (output.min_vals == sept1.min_vals * sept2.min_vals).all()
# assert (output.max_vals == sept1.max_vals * sept2.max_vals).all()
values = np.array([i.value for i in output.flat_scalars], dtype=np.int32).reshape(
output.shape
)
target = sept1.child + sept2.child
assert target.shape == values.shape
assert (sept1.child + sept2.child == values).all()
# assert output.child == sept1.child * sept2.child
return None
def test_neg(
reference_data: np.ndarray,
upper_bound: np.ndarray,
lower_bound: np.ndarray,
ishan: Entity,
) -> None:
"""Test __neg__"""
reference_tensor = SEPT(
child=reference_data, max_vals=upper_bound, min_vals=lower_bound, entity=ishan
)
negative_tensor = reference_tensor.__neg__()
assert (negative_tensor.child == reference_tensor.child * -1).all()
assert (negative_tensor.min_vals == reference_tensor.max_vals * -1).all()
assert (negative_tensor.max_vals == reference_tensor.min_vals * -1).all()
assert negative_tensor.shape == reference_tensor.shape
def test_and(reference_binary_data: np.ndarray, ishan: Entity) -> None:
"""Test bitwise and"""
reference_tensor = SEPT(
child=reference_binary_data,
max_vals=np.ones_like(reference_binary_data),
min_vals=np.zeros_like(reference_binary_data),
entity=ishan,
)
output = reference_tensor & False
target = reference_binary_data & False
assert (output.child == target).all()
def test_or(reference_binary_data: np.ndarray, ishan: Entity) -> None:
"""Test bitwise or"""
reference_tensor = SEPT(
child=reference_binary_data,
max_vals=np.ones_like(reference_binary_data),
min_vals=np.zeros_like(reference_binary_data),
entity=ishan,
)
output = reference_tensor | False
target = reference_binary_data | False
assert (output.child == target).all()
# End of Ishan's tests
@pytest.fixture
def child1(dims: int) -> np.ndarray:
return np.random.randint(low=-2, high=4, size=dims)
@pytest.fixture
def child2(dims: int) -> np.ndarray:
return | np.random.randint(low=4, high=7, size=dims) | numpy.random.randint |
from os import path
import configparser
import numpy as np
import random
import gym
import gym_flock
import torch
import sys
import datetime
import time
from learner.gnn_dagger import train_dagger
from learner.agg_gnn_dagger import train_agg_dagger
from learner.gnn_baseline import train_baseline
from learner.cta_gnn_dagger import train_CTADAGGER
from learner.ca_gnn_dagger import train_CADAGGER
from learner.half_cta_gnn_dagger import train_HalfCTADAGGER
from learner.half_ca_gnn_dagger import train_HalfCADAGGER
def tprint(s):
"""
An enhanced print function with time concatenated to the output.
Source: Convexified Convolutional Neural Networks, by Zhang et al.
"""
tm_str = time.strftime("%H:%M:%S", time.gmtime(time.time()))
print(tm_str + ": " + str(s))
sys.stdout.flush()
def run_experiment(args):
# initialize gym env
env_name = args.get('env')
env = gym.make(env_name)
if isinstance(env.env, gym_flock.envs.flocking.FlockingRelativeEnv):
env.env.params_from_cfg(args)
# use seed
seed = args.getint('seed')
env.seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# initialize params tuple
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
alg = args.get('alg').lower()
if alg == 'dagger':
stats = train_dagger(env, args, device)
elif alg == 'aggdagger':
stats = train_agg_dagger(env, args, device)
elif alg == 'baseline':
stats = train_baseline(env, args)
elif alg == 'ctadagger':
stats = train_CTADAGGER(env, args, device)
elif alg == 'cadagger':
stats = train_CADAGGER(env, args, device)
elif alg == 'halfctadagger':
stats = train_HalfCTADAGGER(env, args, device)
elif alg == 'halfcadagger':
stats = train_HalfCADAGGER(env, args, device)
else:
raise Exception('Invalid algorithm/mode name')
return stats
def main():
fname = sys.argv[1]
config_file = path.join(path.dirname(__file__), fname)
config = configparser.ConfigParser()
config.read(config_file)
printed_header = False
if config.sections():
for section_name in config.sections():
if not printed_header:
print(config[section_name].get('header'))
printed_header = True
stats = run_experiment(config[section_name])
tprint(section_name + ", " + str(stats['mean']) + ", " + str(stats['std']) + ", vel_diffs(mean): " + str(np.mean(stats['vel_diffs'])) + ", vel_diffs(std): " + str(np.std(stats['vel_diffs'])) + ", min_dists: " + str( | np.mean(stats['min_dists']) | numpy.mean |
import matplotlib.pyplot as plt
import utils
import numpy as np
import skimage.morphology
import pandas as pd
import score
import os
def overview_trn_Img_n_Masks(train_df):
# Overview of train images/masks. There is a lot of variation concerning
# the form/size/number of nuclei and the darkness/lightness/colorfulness of
# the images.
fig, axs = plt.subplots(4, 3, figsize=(20, 20))
for i in range(4):
n = np.random.randint(0, len(train_df))
axs[i, 0].imshow(utils.read_image(train_df['image_path'].loc[n]))
axs[i, 0].set_title('{}. image'.format(n))
axs[i, 1].imshow(utils.read_mask(
train_df['mask_dir'].loc[n]), cmap='gray')
axs[i, 1].set_title('{}. mask'.format(n))
axs[i, 2].imshow(utils.calculate_weights_from_dir(
train_df['mask_dir'].loc[n]), cmap='jet')
axs[i, 2].set_title('{}. weights'.format(n))
def get_nuclei_sizes(y_train):
nuclei_sizes = []
mask_idx = []
for i in range(len(y_train)):
mask = y_train[i].reshape(y_train.shape[1], y_train.shape[2])
lab_mask = skimage.morphology.label(mask > .5)
(mask_labels, mask_sizes) = np.unique(lab_mask, return_counts=True)
nuclei_sizes.extend(mask_sizes[1:])
mask_idx.extend([i] * len(mask_sizes[1:]))
return mask_idx, nuclei_sizes
def analyse_nuclei_sizes():
# Analyze nuclei sizes.
mask_idx, nuclei_sizes = get_nuclei_sizes()
nuclei_sizes_df = pd.DataFrame()
nuclei_sizes_df['mask_index'] = mask_idx
nuclei_sizes_df['nucleous_size'] = nuclei_sizes
print(nuclei_sizes_df.describe())
nuclei_sizes_df.sort_values(by='nucleous_size', ascending=True).head(10)
def img_comparison_plot(train_df, x_train, y_train, y_weights,
target_size, n):
"""Plot the original and transformed images/masks."""
fig, axs = plt.subplots(1, 6, figsize=(20, 20))
axs[0].imshow(utils.read_image(train_df['image_path'].loc[n]))
axs[0].set_title('{}.) original image'.format(n))
img, img_type = utils.imshow_args(x_train[n])
axs[1].imshow(img, img_type)
axs[1].set_title('{}.) transformed image'.format(n))
axs[2].imshow(utils.read_mask(train_df['mask_dir'].loc[n]), cmap='jet')
axs[2].set_title('{}.) original mask'.format(n))
axs[3].imshow(y_train[n, :, :, 0], cmap='jet')
axs[3].set_title('{}.) transformed mask'.format(n))
axs[4].imshow(utils.calculate_weights(train_df['mask_dir'].loc[n],
target_size=target_size), cmap='jet')
axs[4].set_title('{}.) original weights'.format(n))
axs[5].imshow(y_weights[n, :, :, 0], cmap='jet')
axs[5].set_title('{}.) transformed weights'.format(n))
def plot_generated_image_mask(x_train, y_train, y_weights, n):
# Generate new images/masks via transformations applied on the original
# images/maks. Data augmentations can be used for regularization.
fig, axs = plt.subplots(1, 6, figsize=(20, 20))
img_new, mask_new, weights_new = utils.generate_images_and_masks(
x_train[n:n + 1], y_train[n:n + 1], y_weights[n:n + 1])
img, img_type = utils.imshow_args(x_train[n])
axs[0].imshow(img, img_type)
axs[0].set_title('{}. original image'.format(n))
img, img_type = utils.imshow_args(img_new[0])
axs[1].imshow(img, img_type)
axs[1].set_title('{}. generated image'.format(n))
axs[2].imshow(y_train[n, :, :, 0], cmap='gray')
axs[2].set_title('{}. original mask'.format(n))
axs[3].imshow(mask_new[0, :, :, 0], cmap='gray')
axs[3].set_title('{}. generated mask'.format(n))
axs[4].imshow(y_weights[n, :, :, 0], cmap='jet')
axs[4].set_title('{}. weights'.format(n))
axs[5].imshow(weights_new[0, :, :, 0], cmap='jet')
axs[5].set_title('{}. generated weights'.format(n))
def check_score_metric(n, train_df, y_train):
# Check the score metric for one sample. The predicted mask is simulated
# and can be modified in order to check the correct implementation of
# the score metric.
true_mask = y_train[n, :, :, 0].copy()
lab_true_mask = score.get_labeled_mask(true_mask)
pred_mask = true_mask.copy() # Create predicted mask from true mask.
true_mask[lab_true_mask == 7] = 0 # Remove one object => false postive
pred_mask[lab_true_mask == 10] = 0 # Remove one object => false negative
offset = 5 # Offset.
pred_mask = pred_mask[offset:, offset:]
pred_mask = np.pad(pred_mask, ((0, offset), (0, offset)), mode="constant")
score.plot_score_summary(n, train_df, true_mask, pred_mask)
def check_num_identifiable_obj(y_train):
# Study how many objects in the masks can be identified.
# This is a limiting factor for the overall performance.
min_pixels_per_object = 20
summary = []
for n in range(len(y_train)):
img = y_train[n, :, :, 0]
lab_img = score.get_labeled_mask(img)
img_labels, img_area = | np.unique(lab_img, return_counts=True) | numpy.unique |
from evalutils.exceptions import ValidationError
from evalutils.io import CSVLoader, FileLoader, ImageLoader
import json
import nibabel as nib
import numpy as np
import os.path
from pathlib import Path
from pandas import DataFrame, MultiIndex
import scipy.ndimage
from scipy.ndimage.interpolation import map_coordinates, zoom
from surface_distance import *
##### paths #####
DEFAULT_INPUT_PATH = Path("/input/")
DEFAULT_GROUND_TRUTH_PATH = Path("/opt/evaluation/ground-truth/")
DEFAULT_EVALUATION_OUTPUT_FILE_PATH = Path("/output/metrics.json")
##### metrics #####
def jacobian_determinant(disp):
_, _, H, W, D = disp.shape
gradx = np.array([-0.5, 0, 0.5]).reshape(1, 3, 1, 1)
grady = np.array([-0.5, 0, 0.5]).reshape(1, 1, 3, 1)
gradz = np.array([-0.5, 0, 0.5]).reshape(1, 1, 1, 3)
gradx_disp = np.stack([scipy.ndimage.correlate(disp[:, 0, :, :, :], gradx, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 1, :, :, :], gradx, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 2, :, :, :], gradx, mode='constant', cval=0.0)], axis=1)
grady_disp = np.stack([scipy.ndimage.correlate(disp[:, 0, :, :, :], grady, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 1, :, :, :], grady, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 2, :, :, :], grady, mode='constant', cval=0.0)], axis=1)
gradz_disp = np.stack([scipy.ndimage.correlate(disp[:, 0, :, :, :], gradz, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 1, :, :, :], gradz, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 2, :, :, :], gradz, mode='constant', cval=0.0)], axis=1)
grad_disp = np.concatenate([gradx_disp, grady_disp, gradz_disp], 0)
jacobian = grad_disp + np.eye(3, 3).reshape(3, 3, 1, 1, 1)
jacobian = jacobian[:, :, 2:-2, 2:-2, 2:-2]
jacdet = jacobian[0, 0, :, :, :] * (jacobian[1, 1, :, :, :] * jacobian[2, 2, :, :, :] - jacobian[1, 2, :, :, :] * jacobian[2, 1, :, :, :]) -\
jacobian[1, 0, :, :, :] * (jacobian[0, 1, :, :, :] * jacobian[2, 2, :, :, :] - jacobian[0, 2, :, :, :] * jacobian[2, 1, :, :, :]) +\
jacobian[2, 0, :, :, :] * (jacobian[0, 1, :, :, :] * jacobian[1, 2, :, :, :] - jacobian[0, 2, :, :, :] * jacobian[1, 1, :, :, :])
return jacdet
def compute_tre(x, y, spacing):
return np.linalg.norm((x - y) * spacing, axis=1)
##### file loader #####
class NiftiLoader(ImageLoader):
@staticmethod
def load_image(fname):
return nib.load(str(fname))
@staticmethod
def hash_image(image):
return hash(image.get_fdata().tostring())
class NumpyLoader(ImageLoader):
@staticmethod
def load_image(fname):
return np.load(str(fname))['arr_0']
@staticmethod
def hash_image(image):
return hash(image.tostring())
class CURIOUSLmsLoader(FileLoader):
def load(self, fname):
lms_fixed = []
lms_moving = []
f = open(fname, 'r')
for line in f.readlines()[5:]:
lms = [float(lm) for lm in line.split(' ')[1:-1]]
lms_fixed.append(lms[:3])
lms_moving.append(lms[3:])
return {'lms_fixed': lms_fixed, 'lms_moving': lms_moving}
class L2RLmsLoader(FileLoader):
def load(self, fname):
lms_fixed = []
lms_moving = []
f = open(fname, 'r')
for line in f.readlines():
lms = [float(lm) for lm in line.split(',')]
lms_fixed.append(lms[:3])
lms_moving.append(lms[3:])
return {'lms_fixed': lms_fixed, 'lms_moving': lms_moving}
##### validation errors #####
def raise_missing_file_error(fname):
message = (
f"The displacement field {fname} is missing. "
f"Please provide all required displacement fields."
)
raise ValidationError(message)
def raise_dtype_error(fname, dtype):
message = (
f"The displacement field {fname} has a wrong dtype ('{dtype}'). "
f"All displacement fields should have dtype 'float16'."
)
raise ValidationError(message)
def raise_shape_error(fname, shape, expected_shape):
message = (
f"The displacement field {fname} has a wrong shape ('{shape[0]}x{shape[1]}x{shape[2]}x{shape[3]}'). "
f"The expected shape of displacement fields for this task is {expected_shape[0]}x{expected_shape[1]}x{expected_shape[2]}x{expected_shape[3]}."
)
raise ValidationError(message)
##### eval val #####
class EvalVal():
def __init__(self):
self.ground_truth_path = DEFAULT_GROUND_TRUTH_PATH
self.predictions_path = DEFAULT_INPUT_PATH
self.output_file = DEFAULT_EVALUATION_OUTPUT_FILE_PATH
self.csv_loader = CSVLoader()
self.nifti_loader = NiftiLoader()
self.numpy_loader = NumpyLoader()
self.curious_lms_loader = CURIOUSLmsLoader()
self.l2r_lms_loader = L2RLmsLoader()
self.pairs_task_01 = DataFrame()
self.imgs_task_01 = DataFrame()
self.lms_task_01 = DataFrame()
self.disp_fields_task_01 = DataFrame()
self.cases_task_01 = DataFrame()
self.pairs_task_02 = DataFrame()
self.imgs_task_02 = DataFrame()
self.lms_task_02 = DataFrame()
self.disp_fields_task_02 = DataFrame()
self.cases_task_02 = DataFrame()
self.pairs_task_03 = DataFrame()
self.segs_task_03 = DataFrame()
self.disp_fields_task_03 = DataFrame()
self.cases_task_03 = DataFrame()
self.pairs_task_04 = DataFrame()
self.segs_task_04 = DataFrame()
self.disp_fields_task_04 = DataFrame()
self.cases_task_04 = DataFrame()
def evaluate(self):
self.load_task_01()
self.merge_ground_truth_and_predictions_task_01()
self.score_task_01()
self.load_task_02()
self.merge_ground_truth_and_predictions_task_02()
self.score_task_02()
self.load_task_03()
self.merge_ground_truth_and_predictions_task_03()
self.score_task_03()
self.load_task_04()
self.merge_ground_truth_and_predictions_task_04()
self.score_task_04()
self.save()
def load_task_01(self):
self.pairs_task_01 = self.load_pairs(DEFAULT_GROUND_TRUTH_PATH / 'task_01' / 'pairs_val.csv')
self.imgs_task_01 = self.load_imgs_task_01()
self.lms_task_01 = self.load_lms_task_01()
self.disp_fields_task_01 = self.load_disp_fields(self.pairs_task_01, DEFAULT_INPUT_PATH / 'task_01', np.array([3, 128, 128, 144]))
def load_task_02(self):
self.pairs_task_02 = self.load_pairs(DEFAULT_GROUND_TRUTH_PATH / 'task_02' / 'pairs_val.csv')
self.imgs_task_02 = self.load_imgs_task_02()
self.lms_task_02 = self.load_lms_task_02()
self.disp_fields_task_02 = self.load_disp_fields(self.pairs_task_02, DEFAULT_INPUT_PATH / 'task_02', np.array([3, 96, 96, 104]))
def load_task_03(self):
self.pairs_task_03 = self.load_pairs(DEFAULT_GROUND_TRUTH_PATH / 'task_03' / 'pairs_val.csv')
self.segs_task_03 = self.load_segs_task_03()
self.disp_fields_task_03 = self.load_disp_fields(self.pairs_task_03, DEFAULT_INPUT_PATH / 'task_03', np.array([3, 96, 80, 128]))
def load_task_04(self):
self.pairs_task_04 = self.load_pairs(DEFAULT_GROUND_TRUTH_PATH / 'task_04' / 'pairs_val.csv')
self.segs_task_04 = self.load_segs_task_04()
self.disp_fields_task_04 = self.load_disp_fields(self.pairs_task_04, DEFAULT_INPUT_PATH / 'task_04', np.array([3, 64, 64, 64]))
def load_imgs_task_01(self):
cases = None
for _, row in self.pairs_task_01.iterrows():
case = self.nifti_loader.load(fname=DEFAULT_GROUND_TRUTH_PATH / 'task_01' / 'EASY-RESECT' / 'NIFTI' / 'Case{}'.format(row['fixed']) / 'Case{}-FLAIR-resize.nii'.format(row['fixed']))
if cases is None:
cases = case
index = [row['fixed']]
else:
cases += case
index += [row['fixed']]
return DataFrame(cases, index=index)
def load_imgs_task_02(self):
cases = None
for _, row in self.pairs_task_02.iterrows():
case = self.nifti_loader.load(fname=DEFAULT_GROUND_TRUTH_PATH / 'task_02' / 'training' / 'lungMasks' / 'case_{:03d}_exp.nii.gz'.format(row['fixed']))
if cases is None:
cases = case
index = [row['fixed']]
else:
cases += case
index += [row['fixed']]
return DataFrame(cases, index=index)
def load_segs_task_03(self):
cases = None
indices = []
for _, row in self.pairs_task_03.iterrows():
indices.append(row['fixed'])
indices.append(row['moving'])
indices = np.array(indices)
for i in np.unique(indices):
case = self.nifti_loader.load(fname=DEFAULT_GROUND_TRUTH_PATH / 'task_03' / 'Training' / 'label' / 'label{:04d}.nii.gz'.format(i))
if cases is None:
cases = case
index = [i]
else:
cases += case
index += [i]
return DataFrame(cases, index=index)
def load_segs_task_04(self):
cases = None
indices = []
for _, row in self.pairs_task_04.iterrows():
indices.append(row['fixed'])
indices.append(row['moving'])
indices = np.array(indices)
for i in np.unique(indices):
case = self.nifti_loader.load(fname=DEFAULT_GROUND_TRUTH_PATH / 'task_04' / 'Training' / 'label' / 'hippocampus_{}.nii.gz'.format(i))
if cases is None:
cases = case
index = [i]
else:
cases += case
index += [i]
return DataFrame(cases, index=index)
def load_lms_task_01(self):
cases = None
for _, row in self.pairs_task_01.iterrows():
case = self.curious_lms_loader.load(fname=DEFAULT_GROUND_TRUTH_PATH / 'task_01' / 'EASY-RESECT' / 'landmarks' / 'Coordinates' / 'Case{}-MRI-beforeUS.tag'.format(row['fixed']))
if cases is None:
cases = [case]
index = [row['fixed']]
else:
cases += [case]
index += [row['fixed']]
return DataFrame(cases, index=index)
def load_lms_task_02(self):
cases = None
for _, row in self.pairs_task_02.iterrows():
case = self.l2r_lms_loader.load(fname=DEFAULT_GROUND_TRUTH_PATH / 'task_02' / 'training' / 'lms' / 'case_{:03d}.txt'.format(row['fixed']))
if cases is None:
cases = [case]
index = [row['fixed']]
else:
cases += [case]
index += [row['fixed']]
return DataFrame(cases, index=index)
def merge_ground_truth_and_predictions_task_01(self):
cases = []
for _, row in self.pairs_task_01.iterrows():
case = {'img' : self.imgs_task_01.loc[row['fixed']],
'lms_fixed' : self.lms_task_01.loc[row['fixed']]['lms_fixed'],
'lms_moving' : self.lms_task_01.loc[row['moving']]['lms_moving'],
'disp_field' : self.disp_fields_task_01.loc[(row['fixed'], row['moving'])]}
cases += [case]
self.cases_task_01 = DataFrame(cases)
def merge_ground_truth_and_predictions_task_02(self):
cases = []
for _, row in self.pairs_task_02.iterrows():
case = {'img' : self.imgs_task_02.loc[row['fixed']],
'lms_fixed' : self.lms_task_02.loc[row['fixed']]['lms_fixed'],
'lms_moving' : self.lms_task_02.loc[row['moving']]['lms_moving'],
'disp_field' : self.disp_fields_task_02.loc[(row['fixed'], row['moving'])]}
cases += [case]
self.cases_task_02 = DataFrame(cases)
def merge_ground_truth_and_predictions_task_03(self):
cases = []
for _, row in self.pairs_task_03.iterrows():
case = {'seg_fixed' : self.segs_task_03.loc[row['fixed']],
'seg_moving' : self.segs_task_03.loc[row['moving']],
'disp_field' : self.disp_fields_task_03.loc[(row['fixed'], row['moving'])]}
cases += [case]
self.cases_task_03 = DataFrame(cases)
def merge_ground_truth_and_predictions_task_04(self):
cases = []
for _, row in self.pairs_task_04.iterrows():
case = {'seg_fixed' : self.segs_task_04.loc[row['fixed']],
'seg_moving' : self.segs_task_04.loc[row['moving']],
'disp_field' : self.disp_fields_task_04.loc[(row['fixed'], row['moving'])]}
cases += [case]
self.cases_task_04 = DataFrame(cases)
def score_task_01(self):
self.cases_results_task_01 = DataFrame()
for idx, case in self.cases_task_01.iterrows():
self.cases_results_task_01 = self.cases_results_task_01.append(self.score_case_task_01(idx=idx, case=case), ignore_index=True)
self.aggregate_results_task_01 = self.score_aggregates_task_01()
def score_task_02(self):
self.cases_results_task_02 = DataFrame()
for idx, case in self.cases_task_02.iterrows():
self.cases_results_task_02 = self.cases_results_task_02.append(self.score_case_task_02(idx=idx, case=case), ignore_index=True)
self.aggregate_results_task_02 = self.score_aggregates_task_02()
def score_task_03(self):
self.cases_results_task_03 = DataFrame()
for idx, case in self.cases_task_03.iterrows():
self.cases_results_task_03 = self.cases_results_task_03.append(self.score_case_task_03(idx=idx, case=case), ignore_index=True)
self.aggregate_results_task_03 = self.score_aggregates_task_03()
def score_task_04(self):
self.cases_results_task_04 = DataFrame()
for idx, case in self.cases_task_04.iterrows():
self.cases_results_task_04 = self.cases_results_task_04.append(self.score_case_task_04(idx=idx, case=case), ignore_index=True)
self.aggregate_results_task_04 = self.score_aggregates_task_04()
def score_case_task_01(self, *, idx, case):
img_path = case['img']['path']
disp_field_path = case['disp_field']['path']
img = self.nifti_loader.load_image(img_path)
affine = img.affine
spacing = img.header.get_zooms()
disp_field = self.numpy_loader.load_image(disp_field_path).astype('float32')
disp_field = np.array([zoom(disp_field[i], 2, order=2) for i in range(3)])
lms_fixed = np.dot(np.linalg.inv(affine), np.concatenate((np.array(case['lms_fixed']), np.ones((len(case['lms_fixed']), 1))), axis=1).transpose()).transpose()[:,:3]
lms_moving = np.dot(np.linalg.inv(affine), np.concatenate((np.array(case['lms_moving']), np.ones((len(case['lms_moving']), 1))), axis=1).transpose()).transpose()[:,:3]
jac_det = (jacobian_determinant(disp_field[np.newaxis, :, :, :, :]) + 3).clip(0.000000001, 1000000000)
log_jac_det = np.log(jac_det)
lms_fixed_disp_x = map_coordinates(disp_field[0], lms_fixed.transpose())
lms_fixed_disp_y = map_coordinates(disp_field[1], lms_fixed.transpose())
lms_fixed_disp_z = map_coordinates(disp_field[2], lms_fixed.transpose())
lms_fixed_disp = np.array((lms_fixed_disp_x, lms_fixed_disp_y, lms_fixed_disp_z)).transpose()
lms_fixed_warped = lms_fixed + lms_fixed_disp
tre = compute_tre(lms_fixed_warped, lms_moving, spacing)
return {'TRE' : tre.mean(),
'LogJacDetStd' : log_jac_det.std()}
def score_case_task_02(self, *, idx, case):
img_path = case['img']['path']
disp_field_path = case['disp_field']['path']
img = self.nifti_loader.load_image(img_path)
spacing = img.header.get_zooms()
disp_field = self.numpy_loader.load_image(disp_field_path).astype('float32')
disp_field = np.array([zoom(disp_field[i], 2, order=2) for i in range(3)])
lms_fixed = np.array(case['lms_fixed'])
lms_moving = np.array(case['lms_moving'])
jac_det = (jacobian_determinant(disp_field[np.newaxis, :, :, :, :]) + 3).clip(0.000000001, 1000000000)
log_jac_det = np.log(jac_det)
lms_fixed_disp_x = map_coordinates(disp_field[0], lms_fixed.transpose())
lms_fixed_disp_y = map_coordinates(disp_field[1], lms_fixed.transpose())
lms_fixed_disp_z = map_coordinates(disp_field[2], lms_fixed.transpose())
lms_fixed_disp = np.array((lms_fixed_disp_x, lms_fixed_disp_y, lms_fixed_disp_z)).transpose()
lms_fixed_warped = lms_fixed + lms_fixed_disp
tre = compute_tre(lms_fixed_warped, lms_moving, spacing)
return {'TRE' : tre.mean(),
'LogJacDetStd' : np.ma.MaskedArray(log_jac_det, 1-img.get_fdata()[2:-2, 2:-2, 2:-2]).std()}
def score_case_task_03(self, *, idx, case):
fixed_path = case['seg_fixed']['path']
moving_path = case['seg_moving']['path']
disp_field_path = case['disp_field']['path']
fixed = self.nifti_loader.load_image(fixed_path).get_fdata()
spacing = self.nifti_loader.load_image(fixed_path).header.get_zooms()
moving = self.nifti_loader.load_image(moving_path).get_fdata()
disp_field = self.numpy_loader.load_image(disp_field_path).astype('float32')
disp_field = np.array([zoom(disp_field[i], 2, order=2) for i in range(3)])
jac_det = (jacobian_determinant(disp_field[np.newaxis, :, :, :, :]) + 3).clip(0.000000001, 1000000000)
log_jac_det = np.log(jac_det)
D, H, W = fixed.shape
identity = np.meshgrid(np.arange(D), np.arange(H), np.arange(W), indexing='ij')
moving_warped = map_coordinates(moving, identity + disp_field, order=0)
# dice
dice = 0
count = 0
for i in range(1, 14):
if ((fixed==i).sum()==0) or ((moving==i).sum()==0):
continue
dice += compute_dice_coefficient((fixed==i), (moving_warped==i))
count += 1
dice /= count
# hd95
hd95 = 0
count = 0
for i in range(1, 14):
if ((fixed==i).sum()==0) or ((moving==i).sum()==0):
continue
hd95 += compute_robust_hausdorff(compute_surface_distances((fixed==i), (moving_warped==i), np.ones(3)), 95.)
count += 1
hd95 /= count
return {'DiceCoefficient' : dice,
'HausdorffDistance95' : hd95,
'LogJacDetStd' : log_jac_det.std()}
def score_case_task_04(self, *, idx, case):
fixed_path = case['seg_fixed']['path']
moving_path = case['seg_moving']['path']
disp_field_path = case['disp_field']['path']
fixed = self.nifti_loader.load_image(fixed_path).get_fdata()
spacing = self.nifti_loader.load_image(fixed_path).header.get_zooms()
moving = self.nifti_loader.load_image(moving_path).get_fdata()
disp_field = self.numpy_loader.load_image(disp_field_path).astype('float32')
jac_det = (jacobian_determinant(disp_field[np.newaxis, :, :, :, :]) + 3).clip(0.000000001, 1000000000)
log_jac_det = np.log(jac_det)
D, H, W = fixed.shape
identity = np.meshgrid(np.arange(D), np.arange(H), | np.arange(W) | numpy.arange |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 22 16:06:27 2020
@author: glatt
"""
import random
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
#COSTANTS
max_E=200 #Max value for interactions matrix
l=8 #Average preys for predator
b=5 #Energy gain for basal
d=2.5 #Energy dissipation for timestep
delta=20 #Energy for new born
P_death=0.002 #Death chance for timestep
E_rep=200 #Energy needed for reproducing
class IBM:
def __init__(self):
self.max_E=200
self.l=8
self.b=5
self.d=2.5
self.delta=20
self.P_death=0.002
self.E_rep=200
#self.default_par=[self.Area,E_rep,P_death,max_E,delta,l,d,b]
#creo dataframe vuoto in cui ogni riga conterrò
#l'energia e la specie dell'i-esimo individuo
#Individuals[individual_energy][species][ID]
self.Individuals=np.empty((0,3))
self.last_ID=0
#creo dataframe contenente le info sull'evoluzione del sistema in funz. del tempo
self.Population_t=np.empty((0,3),dtype=int)
self.N_deaths=0
self.N_births=0
self.G=nx.DiGraph()
"""
#allowed method : "RM" and "CM"
def build_interactions(self):
'''It builds an interaction matrix for all the species, depending on which methods has been chosen'''
if (self.method=="RM"):
for pred in self.Predators:
#Inizialmente tutte le specie diverse da pred sono potenziali prede
preys=np.setdiff1d(self.all_species,pred)
#RIMUOVO DALLE POSSIBILI PREYS DI PREDATORE GLI ANIMALI DI CUI LUI È PREDA
preys=np.setdiff1d(preys,self.get_predators(pred))
#controllo se il num. max di prede l è minore delle prede disponibili
if (len(preys)>=self.l):
#estraggo l prede casualmente e inizializzo i coefficienti di interazione predatore-preda
for idx in random.sample(list(preys),k=self.l):
self.C[pred][idx]=np.random.uniform(0,self.max_E)
else:
#ridefinisco il max numero di prede
max_n=len(preys)
for idx in random.sample(list(preys),max_n):
self.C[pred][idx]=np.random.uniform(0,self.max_E)
elif(self.method=="CM"):
#defining prey probability as costant/num. of species
prob=self.l/len(self.all_species)
for predator in self.all_species:
#randomly choosing the number of preys
chances=np.random.uniform(0,1,size=len(self.all_species[:predator]))
n_preys=len(chances[chances<prob])
#choosing only preys of species below the predator one
preys=np.random.choice(self.all_species[:predator],size=n_preys,replace=False)
for prey in preys:
self.C[predator][prey]=np.random.uniform(0,self.max_E)
#Categorizza le basals species e le animals affinchè il resto del codice sia coerente
self.Basals=[]
i=0
for row in self.C:
if (row.any()==0):
self.Basals=np.append(self.Basals,i)
i+=1
self.Predators=np.setdiff1d(self.all_species,self.Basals)
"""
def get_params(self):
'''It prints and return all the ecosystem parameters'''
print("Area=",self.Area,",Method:",self.method)
print("E_rep=",self.E_rep,",Death probability=",self.P_death,",Max E=",self.max_E,",delta=",self.delta)
print("l=",self.l,",dissipation=",self.d,",basals growth=",self.b)
return [self.Area,self.max_E,self.l,self.b,self.d,self.delta,self.P_death,self.E_rep]
def set_params(self,par_arr):
'''The given argument array modifies all the ecosystem parameters in this order: Area, E_rep, P_death, max_E, delta, l, d, b'''
self.Area=par_arr[0]
self.max_E=par_arr[1]
self.l=par_arr[2]
self.b=par_arr[3]
self.d=par_arr[4]
self.delta=par_arr[5]
self.P_death=par_arr[6]
self.E_rep=par_arr[7]
print("Parameters has been succesfully changed")
print("New parameters are:")
self.get_params()
def set_default(self):
'''sets ecosystem parameters with prefixed default values'''
self.set_params(self.default_par)
def species_alive(self):
'''Return an array filled with all the unique species ID living in the ecosystem'''
return np.unique(self.Individuals[:,1])
def current_situation(self):
'''It prints out all the information about the evolution and the current status of the ecosystem'''
print("Population number :",len(self.Individuals))
print("Different species alive :",len(self.species_alive()))
if (len(self.Population_t)>0):
print("Animals/Basals number= ",self.Population_t[-1][0],"/",self.Population_t[-1][1])
else:
print("Ecosystem at time=0 is empty; first call Evolve function")
print("Number of births: ",self.N_births)
print("Number of deaths: ",self.N_deaths)
print("Time passed: ",len(self.Population_t))
def add_individual(self,Species=-1,energy=b):
'''If arg Species=-1, then the species added to the ecosystem is randomly extracted from the pool of allowed species. If Species = some_species_ID, then it adds an individual belonging to that species with the desired energy'''
if (energy==b):
energy=self.b
if(Species==-1):
Species=np.random.choice(self.all_species,1)
self.Individuals=np.append(self.Individuals,[[energy,Species,self.last_ID]],axis=0)
self.last_ID+=1
elif( (np.shape(Species)==()) or (Species in self.all_species) or np.shape(Species)==(1,)):
Species=np.reshape(np.array([Species]),(1,))
self.Individuals=np.append(self.Individuals,[[energy,Species,self.last_ID]],axis=0)
self.last_ID+=1
else:
print("Input args error: insert a valid species number")
def get_predators(self,species):
'''It returns an array filled with all the predators of a certain species'''
return np.where(self.C[:,species]!=0)
def get_preys(self,species):
'''It returns an array filled with all the preys of a certain species'''
return np.where(self.C[species]!=0)
def deaths(self):
'''It computes a random die chance for each living individual, then check if any individuals has energy below a certain threshold and if yes, set that individual as died'''
#estraggo un numero per ogni specie
chances=np.random.uniform(0,1,size=len(self.Individuals))
n_deaths=len(chances[chances<=self.P_death])
daily_deaths=n_deaths+len(self.Individuals[self.Individuals[:,0]<=0])
self.N_deaths+=daily_deaths
death_ID=np.random.choice(self.Individuals[:,2],n_deaths)
#per ognuno degli ID estratti pongo la loro energia a 0 (morte)
for ID in death_ID:
idx=int(np.reshape(np.where(self.Individuals[:,2]==ID),()))
self.Individuals[idx][0]=0
#Tutti gli individui con energia<=0 muoiono
self.Individuals=self.Individuals[self.Individuals[:,0]>0]
return daily_deaths
def births(self):
'''It checks if any individual has an energy above a certain reproduction threshold and when yes, it adds to the ecosystem as many new individual as the pregnant individuals'''
pregnant_index=np.where(self.Individuals[:,0]>self.E_rep)
pregnant_index=np.reshape(pregnant_index,(len(pregnant_index[0]),))
aborts=0
#daily_space finchè è >0 garantisce che ci sia spazio available for basals reproduction
#daily_space=self.Area-self.basals_counts()
for i in pregnant_index:
#if (self.Individuals[i][1] in self.Basals and not daily_space):
if (self.Individuals[i][1] in self.Basals and self.basals_counts()>=self.Area):
aborts+=1
else:
#print("Species",self.Individuals[i][1],"is reproducing!")
self.Individuals[i][0]-=self.delta
self.add_individual(Species=self.Individuals[i][1],energy=self.delta)
#if (self.Individuals[i][1] in self.Basals):
#daily_space-=1
self.N_births+=len(pregnant_index)-aborts
return (len(pregnant_index)-aborts)
def plot_pop(self,Animals=True,Basals=True,Species=True,Area=False):
'''Plots the counts of Individuals through time. Arguments can be changed in order to show more curves to the plot.'''
plt.xlabel("Time")
plt.ylabel("Counts")
if (Animals):
plt.plot(range(len(self.Population_t)),self.Population_t[:,0],label="Animals",alpha=0.7)
if (Basals):
plt.plot(range(len(self.Population_t)),self.Population_t[:,1],label="Basals",alpha=0.7)
if(Area):
plt.axhline(y=self.Area,label="Area",linestyle='--')
if (Species):
plt.plot(range(len(self.Population_t)),self.Population_t[:,2],label="Species",alpha=0.3)
plt.legend()
return plt.show()
def food_web(self,draw=False):
'''It prints, when draw=True, a graphic visualization of the food web related to the ecosystem interactions. Also it prints a set of network measurements about the food web.'''
self.G=nx.from_numpy_matrix(self.C,create_using=nx.DiGraph)
if (draw):
#The out_degree value for a species represent its number of preys
d = dict(self.G.out_degree)
low, *_, high = sorted(d.values())
norm = mpl.colors.Normalize(vmin=low, vmax=high, clip=True)
mapper = mpl.cm.ScalarMappable(norm=norm, cmap=mpl.cm.coolwarm)
nx.draw_shell(self.G,
nodelist=d,
node_size=500,
node_color=[mapper.to_rgba(i)
for i in d.values()],
with_labels=True,
font_color='white')
plt.show()
out_degree=self.G.out_degree
in_degree=self.G.in_degree
n_predators=len(self.Predators)
print("Average predators for species:",np.average(in_degree,0)[1])
print("Average preys for predator :",( | np.sum(out_degree,0) | numpy.sum |
"""This module handles all the real work to run the quantum simulation."""
from typing import Tuple, Dict, List, Iterable
from collections import defaultdict
from dataclasses import dataclass
import numpy as np
from numpy.random import choice
from .gates import Instruction, SWAP, Parametric
def get_ground_state(num_qubits: int) -> np.ndarray:
""" Build the zero state given a fixed number of qubits.
Args:
num_qubits: number of qubits
Returns:
A vector of size 2**num_qubits with all zeroes except first instructionment which is 1.
"""
vec = np.zeros(2**num_qubits)
vec[0] = 1
return vec
def preprocess_parametric(program: List[Instruction],
feed_dict: Dict[str, complex]) -> List[Instruction]:
"""For all parametric instructions in the list, evaluate them given the feed_dict variables.
Args:
program: A list of instructions to parse.
feed_dict: A mapping of string variables to complex replacements.
Returns:
A new list of instructions without any Parametric gates.
"""
evaluate_vectorized = np.vectorize(
lambda cell: complex(cell.evalf(subs=feed_dict)))
ret = []
for instruction in program:
if isinstance(instruction, Parametric):
unitary = evaluate_vectorized(instruction.unitary)
ret.append(
Instruction(targets=instruction.targets,
unitary=unitary,
commutative=instruction.commutative))
else:
ret.append(instruction)
return ret
def _generate_swap_indices(targets: Iterable[int]) -> List[Tuple[int, int]]:
"""Given a list of indices, return the list of swaps required to move all indices towards the lowest index in the given order.
For example, let us assume that we are given [3, 6]. Then in an array, this would look like the following,
_ _ _ 0 _ _ 1
init 0 1 2 3 4 5 6
the '3' index will remain where it is, as it is first in the given order. The '1' must be moved over to the
'0', or the 3 index. To do that, we perform the following swaps,
_ _ _ 0 _ 1 _
swap[5, 6] 0 1 2 3 4 5 6
_ _ _ 0 1 _ _
swap[4, 5] 0 1 2 3 4 5 6
Therefore we would return [(5, 6), (4, 5)]
Here's another example. What if the 3, 6 were swapped? Meaning, our input looked like [6, 3]
_ _ _ 1 _ _ 0
init 0 1 2 3 4 5 6
As before, we look for the lowest value (0) and move it towards the lowest index (3)
_ _ _ 1 _ 0 _
swap[5, 6] 0 1 2 3 4 5 6
_ _ _ 1 0 _ _
swap[4, 5] 0 1 2 3 4 5 6
_ _ _ 0 1 _ _
swap[3, 4] 0 1 2 3 4 5 6
Since the 1 is already in the correct position, all we need to return is
[(5, 6), (4, 5), (3, 4)]
Args:
targets: An ordered iterable of indices [i_1, i_2, ..., i_n] that are meant to appear in the order given.
Returns:
A list of tuples containing flip instructions. All indices within the tuples are guaranteed to be of the form
(a, a + 1) where a is an integer greater than or equal to 0.
"""
swap_list = []
min_target = min(targets)
max_target = max(targets)
offset = max_target - min_target
tmp = np.full((max_target - min_target + 1, ), np.nan)
for idx, target in enumerate(targets):
tmp[target - min_target] = idx
for idx in range(len(targets)):
tmp_idx = np.where(tmp == idx)[0][0]
for jdx in reversed(range(idx + 1, tmp_idx + 1)):
swap_list.append((jdx - 1 + min_target, jdx + min_target))
tmp[jdx], tmp[jdx - 1] = tmp[jdx - 1], tmp[jdx]
return swap_list
def preprocess_swaps(program: Iterable[Instruction]) -> List[Instruction]:
"""Generate an equivalent list of constructions s.t. all gates have strictly contiguous inputs.
If all the operators have striclty contiguous inputs, then it becomes easier to generate
operations on them using simple rules like I x A x I x I, etc...
This is accomplished by inserting swaps before and after a instruction that has operations on
non contiguous wires. For example,
[Op(5, 3)] -> [Swap(4, 5), Swap(3, 4), Op(3, 4), Swap(3, 4), Swap(4, 5)]
# This will also leave the base case as a no-op.
[Op(3, 4)] -> [Op(3, 4)]
Args:
program: A list of gates.
Returns:
A new list of gates that is algebraically equivalent, but has strictly contiguous inputs.
"""
ret = []
for instruction in program:
# Grab the min target for reference.
min_target = min(instruction.targets)
# Generate a list of swap indices.
swap_indices = _generate_swap_indices(instruction.targets)
# Convert those swap indices into SWAP operations.
swaps = [SWAP(idx, jdx) for (idx, jdx) in swap_indices]
# Assuming the swapping will work, the new instruction should be correctly contiguous.
new_instruction_targets = tuple(
range(min_target, min_target + len(instruction.targets)))
# Build the new operator.
op = Instruction(new_instruction_targets, instruction.unitary,
instruction.commutative)
# The new set of instructions will swap the gates s.t. they line up, then run the operator, and
# finally undo what it just did.
ret += swaps + [op] + list(reversed(swaps))
return ret
def get_operator(total_qubits: int, instruction: Instruction) -> np.ndarray:
"""Given a unitary operator, builds an operator to run on a specific set of contiguous qubits.
Args:
total_qubits: The total number of qubits that the new operator will adhere to.
gate_unitary: The unitary operator to modify.
target_qubits: The qubits that the unitary will operate on. These qubits must be strictly contiguous,
i.e. 2, 3 or 4, 5 NOT 4, 6.
Returns:
A 2 ^ total_qubits x 2 ^ total_qubits operator.
"""
# This formulation assumes that all numbers are sorted and consecutive.
if len(instruction.targets) > 1 and not np.array_equal(
instruction.targets,
list(range(min(instruction.targets),
max(instruction.targets) + 1))):
raise ValueError(
f'Target qubits must be sorted and consecutive. Got {instruction.targets}'
)
# Make sure that the number of qubits is less tahn the given indices.
if max(instruction.targets) >= total_qubits:
raise IndexError('Index out of bounds exception.')
# If the number of states matches the number of rows of the gate, then return the matrix.
if 2**total_qubits == instruction.unitary.shape[0]:
return instruction.unitary
# This is the smallest qubit in the list by construction.
min_qubit_index = instruction.targets[0]
before = instruction.unitary if min_qubit_index == 0 else np.kron(
np.eye(2**min_qubit_index), instruction.unitary)
qubits_after = total_qubits - min_qubit_index - len(instruction.targets)
return np.kron(before, np.eye(2**(qubits_after)))
def run_program(program: List[Instruction], n_qubits: int,
initial_state: np.ndarray) -> np.ndarray:
"""Run a program given a list of instructions.
Args:
program: The list of instructions to use.
n_qubits: The max number of qubits on the instruction.
initial_state: The initial state of the simulation.
Returns:
The new state after running the program.
"""
operator = np.eye(len(initial_state))
for instruction in program:
operator = operator @ get_operator(n_qubits, instruction)
return initial_state.dot(operator)
def _format_binary(num: int, padding: int) -> str:
"""Format a number in binary."""
return format(num, f'#0{padding + 2}b')[2:]
def get_counts(state_vector: np.ndarray, num_shots: int) -> Dict[str, int]:
"""Run a monte-carlo simulation to sample a state vector.
Args:
state_vector: The state vector to sample.
num_shots: The number of shots in the simulation.
Returns:
A dictionary of counts for each binary state.
"""
# Technically if this is weighted by the same scalar, we don't need to normalize
# if we really cared about efficiency.
probs = np.abs(state_vector)**2 / np.linalg.norm(state_vector)**2
states = [
_format_binary(idx, int(np.log2(len(state_vector))))
for idx in range(len(state_vector))
]
samples = | choice(states, num_shots, p=probs) | numpy.random.choice |
from ..cumprod.jit import (
mod_cumprod,
)
from ..inverse.fermat.jit import (
mod_inverse,
)
#TODO cut below
import numpy as np
import numba as nb
@nb.njit
def mod_factorial(n: int, mod: int) -> np.ndarray:
a = | np.arange(n) | numpy.arange |
import os, sys, trimesh, matplotlib.pyplot as pyplot, numpy as np, time, random, progressbar, json
from plyfile import PlyData, PlyElement
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.6f')
from subprocess import call
from collections import deque
from imageio import imread
colors = [[0, 0, 1], [1, 0, 0], [0, 1, 0],
[0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.3, 0.6, 0], [0.6, 0, 0.3], [0.3, 0, 0.6],
[0.6, 0.3, 0], [0.3, 0, 0.6], [0.6, 0, 0.3],
[0.8, 0.2, 0.5]]
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
# ----------------------------------------
# Point Cloud Sampling
# ----------------------------------------
def random_sampling(pc, num_sample, replace=None, return_choices=False):
""" Input is NxC, output is num_samplexC
"""
if replace is None: replace = (pc.shape[0] < num_sample)
choices = np.random.choice(pc.shape[0], num_sample, replace=replace)
if return_choices:
return pc[choices], choices
else:
return pc[choices]
# ----------------------------------------
# Point Cloud/Volume Conversions
# ----------------------------------------
def point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.0, flatten=True):
""" Input is BxNx3 batch of point cloud
Output is Bx(vsize^3)
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume(np.squeeze(point_clouds[b, :, :]), vsize, radius)
if flatten:
vol_list.append(vol.flatten())
else:
vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0))
if flatten:
return np.vstack(vol_list)
else:
return np.concatenate(vol_list, 0)
def point_cloud_to_volume(points, vsize, radius=1.0):
""" input is Nx3 points.
output is vsize*vsize*vsize
assumes points are in range [-radius, radius]
"""
vol = np.zeros((vsize, vsize, vsize))
voxel = 2 * radius / float(vsize)
locations = (points + radius) / voxel
locations = locations.astype(int)
vol[locations[:, 0], locations[:, 1], locations[:, 2]] = 1.0
return vol
def volume_to_point_cloud(vol):
""" vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize
return Nx3 numpy array.
"""
vsize = vol.shape[0]
assert (vol.shape[1] == vsize and vol.shape[1] == vsize)
points = []
for a in range(vsize):
for b in range(vsize):
for c in range(vsize):
if vol[a, b, c] == 1:
points.append(np.array([a, b, c]))
if len(points) == 0:
return np.zeros((0, 3))
points = np.vstack(points)
return points
def point_cloud_to_volume_v2_batch(point_clouds, vsize=12, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxVxVxVxnum_samplex3
Added on Feb 19
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume_v2(point_clouds[b, :, :], vsize, radius, num_sample)
vol_list.append(np.expand_dims(vol, 0))
return np.concatenate(vol_list, 0)
def point_cloud_to_volume_v2(points, vsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is vsize*vsize*vsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each voxel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
vol = np.zeros((vsize, vsize, vsize, num_sample, 3))
voxel = 2 * radius / float(vsize)
locations = (points + radius) / voxel
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n, :])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n, :])
for i in range(vsize):
for j in range(vsize):
for k in range(vsize):
if (i, j, k) not in loc2pc:
vol[i, j, k, :, :] = np.zeros((num_sample, 3))
else:
pc = loc2pc[(i, j, k)] # a list of (3,) arrays
pc = np.vstack(pc) # kx3
# Sample/pad to num_sample points
if pc.shape[0] > num_sample:
pc = random_sampling(pc, num_sample, False)
elif pc.shape[0] < num_sample:
pc = np.lib.pad(pc, ((0, num_sample - pc.shape[0]), (0, 0)), 'edge')
# Normalize
pc_center = (np.array([i, j, k]) + 0.5) * voxel - radius
pc = (pc - pc_center) / voxel # shift and scale
vol[i, j, k, :, :] = pc
return vol
def point_cloud_to_image_batch(point_clouds, imgsize, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxIxIxnum_samplex3
Added on Feb 19
"""
img_list = []
for b in range(point_clouds.shape[0]):
img = point_cloud_to_image(point_clouds[b, :, :], imgsize, radius, num_sample)
img_list.append(np.expand_dims(img, 0))
return np.concatenate(img_list, 0)
def point_cloud_to_image(points, imgsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is imgsize*imgsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each pixel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
img = np.zeros((imgsize, imgsize, num_sample, 3))
pixel = 2 * radius / float(imgsize)
locations = (points[:, 0:2] + radius) / pixel # Nx2
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n, :])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n, :])
for i in range(imgsize):
for j in range(imgsize):
if (i, j) not in loc2pc:
img[i, j, :, :] = np.zeros((num_sample, 3))
else:
pc = loc2pc[(i, j)]
pc = np.vstack(pc)
if pc.shape[0] > num_sample:
pc = random_sampling(pc, num_sample, False)
elif pc.shape[0] < num_sample:
pc = np.lib.pad(pc, ((0, num_sample - pc.shape[0]), (0, 0)), 'edge')
pc_center = (np.array([i, j]) + 0.5) * pixel - radius
pc[:, 0:2] = (pc[:, 0:2] - pc_center) / pixel
img[i, j, :, :] = pc
return img
# ----------------------------------------
# Point cloud IO
# ----------------------------------------
def read_ply(filename):
""" read XYZ point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z] for x, y, z in pc])
return pc_array
def write_ply(points, filename, text=True):
""" input: Nx3, write points to filename as PLY format. """
points = [(points[i, 0], points[i, 1], points[i, 2]) for i in range(points.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
def write_ply_color(points, labels, filename, num_classes=None, colormap=pyplot.cm.jet):
""" Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as OBJ file """
labels = labels.astype(int)
N = points.shape[0]
if num_classes is None:
num_classes = np.max(labels) + 1
else:
assert (num_classes > np.max(labels))
vertex = []
# colors = [pyplot.cm.jet(i / float(num_classes)) for i in range(num_classes)]
colors = [colormap(i / float(num_classes)) for i in range(num_classes)]
for i in range(N):
c = colors[labels[i]]
c = [int(x * 255) for x in c]
vertex.append((points[i, 0], points[i, 1], points[i, 2], c[0], c[1], c[2]))
vertex = np.array(vertex,
dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
PlyData([el], text=True).write(filename)
return colors
def merge_mesh_with_color(meshes):
face_colors = [mesh.visual.face_colors for mesh in meshes]
vertex_colors = [mesh.visual.vertex_colors for mesh in meshes]
vertice_list = [mesh.vertices for mesh in meshes]
faces_list = [mesh.faces for mesh in meshes]
faces_offset = np.cumsum([v.shape[0] for v in vertice_list])
faces_offset = np.insert(faces_offset, 0, 0)[:-1]
vertices = np.vstack(vertice_list)
faces = np.vstack([face + offset for face, offset in zip(faces_list, faces_offset)])
vertex_colors = np.vstack(vertex_colors)
face_colors = np.vstack(face_colors)
# print(vertex_colors.shape, faces.shape, vertices.shape)
# exit(0)
merged_meshes = trimesh.Trimesh(vertices, faces, face_colors=face_colors, vertex_colors=vertex_colors)
return merged_meshes
def write_ply_bbox_color(vertices, vertex_colors, edges, edge_colors, filename, num_classes=None, colormap=pyplot.cm.jet):
""" Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as OBJ file """
vertex = []
for i in range(len(vertices)):
vertex.append((vertices[i, 0], vertices[i, 1], vertices[i, 2], vertex_colors[i, 0],
vertex_colors[i, 1], vertex_colors[i, 2]))
vertex = np.array(vertex,
dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
edge = []
for i in range(len(edges)):
edge.append((edges[i, 0], edges[i, 1], edge_colors[i, 0], edge_colors[i, 1], edge_colors[i, 2]))
edge = np.array(edge,
dtype=[('vertex1', 'i4'), ('vertex2', 'i4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
e1 = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
e2 = PlyElement.describe(edge, 'edge', comments=['edges'])
PlyData([e1, e2], text=True).write(filename)
def write_bbox_color_json(scene_bbox, label, out_filename, num_classes=None, colormap=pyplot.cm.jet):
labels = label.astype(int)
if num_classes is None:
num_classes = np.max(labels) + 1
else:
assert (num_classes > np.max(labels))
colors = [colormap(i / float(num_classes)) for i in range(num_classes)]
used_color = {}
ret = []
for i, box in enumerate(scene_bbox):
c = colors[label[i]]
c = (np.array(c) * 255).astype(np.uint8)
item_i = [float(box[0]), float(box[1]), float(box[2]), float(box[3]), float(box[4]), float(box[5]),
int(c[0]), int(c[1]), int(c[2])]
used_color[label[i]] = c
#item_i = [str(_) for _ in item_i]
ret.append(item_i)
with open(out_filename, 'w') as f:
json.dump(ret, f)
return used_color
def write_bbox_color(scene_bbox, label, out_filename, num_classes=None, colormap=pyplot.cm.jet, edge=False):
"""Export scene bbox to meshes
Args:
scene_bbox: (N x 6 numpy array): xyz pos of center and 3 lengths
out_filename: (string) filename
Note:
To visualize the boxes in MeshLab.
1. Select the objects (the boxes)
2. Filters -> Polygon and Quad Mesh -> Turn into Quad-Dominant Mesh
3. Select Wireframe view.
"""
labels = label.astype(int)
if num_classes is None:
num_classes = np.max(labels) + 1
else:
assert (num_classes > np.max(labels))
def convert_box_to_trimesh_fmt(box, color):
ctr = box[:3]
lengths = box[3:]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
mesh = trimesh.creation.box(lengths, trns)
color = np.array(color) * 255
face_colors = np.array([color] * mesh.faces.shape[0], np.uint8)
vertex_colors = np.array([color] * mesh.vertices.shape[0], np.uint8)
#print(face_colors, vertex_colors, box_trimesh_fmt.vertices, box_trimesh_fmt.faces)
#exit(0)
box_visual = trimesh.visual.create_visual(
vertex_colors=vertex_colors,
face_colors=face_colors,
mesh=mesh)
mesh.visual = box_visual
# print(edges.shape)
# exit(0)
# print(box_trimesh_fmt.visual.face_colors)
#print(face_colors)
#print(box_visual.__dict__)
#print(box_trimesh_fmt.visual.__dict__)
#exit(0)
#, facecolors=color, vertex_color=color)
#print(box_trimesh_fmt.__dict__)
#exit(0)
return mesh
colors = [colormap(i / float(num_classes)) for i in range(num_classes)]
scene = []
ret = []
for i, box in enumerate(scene_bbox):
ret.append(colors[label[i]])
scene.append(convert_box_to_trimesh_fmt(box, colors[label[i]]))
mesh = merge_mesh_with_color(scene)
if edge:
sharp = mesh.face_adjacency_angles > np.radians(40)
edges = mesh.face_adjacency_edges[sharp]
assert edges.shape[0] % 12 == 0
edge_colors = mesh.visual.vertex_colors[edges[:, 0]]
#print(edges.shape, edge_colors.shape)
#exit(0)
write_ply_bbox_color(mesh.vertices, mesh.visual.vertex_colors, edges, edge_colors, out_filename)
else:
trimesh.exchange.export.export_mesh(mesh, out_filename, file_type='ply')
#print(mesh_list.visual.mesh.visual.__dict__)
# save to ply file
# ply = trimesh.exchange.ply.export_ply(mesh_list, encoding='ascii')
#trimesh.exchange.export.export_mesh(mesh_list, out_filename, file_type='ply') #, encoding='ascii')
# print(ply)
# exit(0)
# out_filename
return ret
def write_ply_rgb(points, colors, out_filename, num_classes=None):
""" Color (N,3) points with RGB colors (N,3) within range [0,255] as OBJ file """
colors = colors.astype(int)
N = points.shape[0]
fout = open(out_filename, 'w')
for i in range(N):
c = colors[i, :]
fout.write('v %f %f %f %d %d %d\n' % (points[i, 0], points[i, 1], points[i, 2], c[0], c[1], c[2]))
fout.close()
# ----------------------------------------
# Simple Point cloud and Volume Renderers
# ----------------------------------------
def pyplot_draw_point_cloud(points, output_filename):
""" points is a Nx3 numpy array """
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(points[:, 0], points[:, 1], points[:, 2])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
pyplot.savefig(output_filename)
def pyplot_draw_volume(vol, output_filename):
""" vol is of size vsize*vsize*vsize
output an image to output_filename
"""
points = volume_to_point_cloud(vol)
pyplot_draw_point_cloud(points, output_filename)
# ----------------------------------------
# Simple Point manipulations
# ----------------------------------------
def rotate_point_cloud(points, rotation_matrix=None):
""" Input: (n,3), Output: (n,3) """
# Rotate in-place around Z axis.
if rotation_matrix is None:
rotation_angle = np.random.uniform() * 2 * np.pi
sinval, cosval = np.sin(rotation_angle), np.cos(rotation_angle)
rotation_matrix = np.array([[cosval, sinval, 0],
[-sinval, cosval, 0],
[0, 0, 1]])
ctr = points.mean(axis=0)
rotated_data = np.dot(points - ctr, rotation_matrix) + ctr
return rotated_data, rotation_matrix
def rotate_pc_along_y(pc, rot_angle):
''' Input ps is NxC points with first 3 channels as XYZ
z is facing forward, x is left ward, y is downward
'''
cosval = np.cos(rot_angle)
sinval = np.sin(rot_angle)
rotmat = np.array([[cosval, -sinval], [sinval, cosval]])
pc[:, [0, 2]] = np.dot(pc[:, [0, 2]], np.transpose(rotmat))
return pc
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def roty_batch(t):
"""Rotation about the y-axis.
t: (x1,x2,...xn)
return: (x1,x2,...,xn,3,3)
"""
input_shape = t.shape
output = np.zeros(tuple(list(input_shape) + [3, 3]))
c = np.cos(t)
s = np.sin(t)
output[..., 0, 0] = c
output[..., 0, 2] = s
output[..., 1, 1] = 1
output[..., 2, 0] = -s
output[..., 2, 2] = c
return output
def rotz(t):
"""Rotation about the z-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0],
[s, c, 0],
[0, 0, 1]])
# ----------------------------------------
# BBox
# ----------------------------------------
def bbox_corner_dist_measure(crnr1, crnr2):
""" compute distance between box corners to replace iou
Args:
crnr1, crnr2: Nx3 points of box corners in camera axis (y points down)
output is a scalar between 0 and 1
"""
dist = sys.maxsize
for y in range(4):
rows = ([(x + y) % 4 for x in range(4)] + [4 + (x + y) % 4 for x in range(4)])
d_ = np.linalg.norm(crnr2[rows, :] - crnr1, axis=1).sum() / 8.0
if d_ < dist:
dist = d_
u = sum([np.linalg.norm(x[0, :] - x[6, :]) for x in [crnr1, crnr2]]) / 2.0
measure = max(1.0 - dist / u, 0)
print(measure)
return measure
def point_cloud_to_bbox(points):
""" Extract the axis aligned box from a pcl or batch of pcls
Args:
points: Nx3 points or BxNx3
output is 6 dim: xyz pos of center and 3 lengths
"""
which_dim = len(points.shape) - 2 # first dim if a single cloud and second if batch
mn, mx = points.min(which_dim), points.max(which_dim)
lengths = mx - mn
cntr = 0.5 * (mn + mx)
return np.concatenate([cntr, lengths], axis=which_dim)
def write_bbox(scene_bbox, out_filename):
"""Export scene bbox to meshes
Args:
scene_bbox: (N x 6 numpy array): xyz pos of center and 3 lengths
out_filename: (string) filename
Note:
To visualize the boxes in MeshLab.
1. Select the objects (the boxes)
2. Filters -> Polygon and Quad Mesh -> Turn into Quad-Dominant Mesh
3. Select Wireframe view.
"""
def convert_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='ply')
return
def write_oriented_bbox(scene_bbox, out_filename):
"""Export oriented (around Z axis) scene bbox to meshes
Args:
scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz)
and heading angle around Z axis.
Y forward, X right, Z upward. heading angle of positive X is 0,
heading angle of positive Y is 90 degrees.
out_filename: (string) filename
"""
def heading2rotmat(heading_angle):
pass
rotmat = np.zeros((3, 3))
rotmat[2, 2] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0:2, 0:2] = np.array([[cosval, -sinval], [sinval, cosval]])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
trns[0:3, 0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='ply')
return
def write_oriented_bbox_camera_coord(scene_bbox, out_filename):
"""Export oriented (around Y axis) scene bbox to meshes
Args:
scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz)
and heading angle around Y axis.
Z forward, X rightward, Y downward. heading angle of positive X is 0,
heading angle of negative Z is 90 degrees.
out_filename: (string) filename
"""
def heading2rotmat(heading_angle):
pass
rotmat = np.zeros((3, 3))
rotmat[1, 1] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0, :] = np.array([cosval, 0, sinval])
rotmat[2, :] = np.array([-sinval, 0, cosval])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
trns[0:3, 0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='ply')
return
def write_lines_as_cylinders(pcl, filename, rad=0.005, res=64):
"""Create lines represented as cylinders connecting pairs of 3D points
Args:
pcl: (N x 2 x 3 numpy array): N pairs of xyz pos
filename: (string) filename for the output mesh (ply) file
rad: radius for the cylinder
res: number of sections used to create the cylinder
"""
scene = trimesh.scene.Scene()
for src, tgt in pcl:
# compute line
vec = tgt - src
M = trimesh.geometry.align_vectors([0, 0, 1], vec, False)
vec = tgt - src # compute again since align_vectors modifies vec in-place!
M[:3, 3] = 0.5 * src + 0.5 * tgt
height = np.sqrt(np.dot(vec, vec))
scene.add_geometry(trimesh.creation.cylinder(radius=rad, height=height, sections=res, transform=M))
mesh_list = trimesh.util.concatenate(scene.dump())
trimesh.io.export.export_mesh(mesh_list, '%s.ply' % (filename), file_type='ply')
def normalize_pts(pts):
out = | np.array(pts, dtype=np.float32) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 28 20:32:25 2022
@author: brili
"""
import cv2
from time import time
import numpy as np
import sys
import os
curr_dir = os.path.dirname(os.path.abspath(__file__))
trt_main_complete_path = os.path.join(curr_dir, '..', '..', 'understand_trt_complete', 'tensorrt_expl_complete')
sys.path.insert(0, trt_main_complete_path)
print('sys path: ', sys.path)
from trt_main_complete import YOLOX_runner
from trt_main_complete import COCO_CLASSES
import argparse
class WebcamViewer:
def __init__(self,
rtsp_url: str,
model_path: str,
font=cv2.FONT_HERSHEY_SIMPLEX,
fontScale = 0.35,
bg_fps=[ | np.array([[10,10],[250,10],[250,40],[10,40]]) | numpy.array |
"""Test for helper.py"""
import pickle
import numpy as np
import pytest
import torch
from sklearn.datasets import make_classification
class TestSliceDict:
def assert_dicts_equal(self, d0, d1):
assert d0.keys() == d1.keys()
for key in d0.keys():
assert np.allclose(d0[key], d1[key])
@pytest.fixture
def data(self):
X, y = make_classification(100, 20, n_informative=10, random_state=0)
return X.astype(np.float32), y
@pytest.fixture(scope='session')
def sldict_cls(self):
from scripts.study_case.ID_12.skorch.helper import SliceDict
return SliceDict
@pytest.fixture
def sldict(self, sldict_cls):
return sldict_cls(
f0=np.arange(4),
f1=np.arange(12).reshape(4, 3),
)
def test_init_inconsistent_shapes(self, sldict_cls):
with pytest.raises(ValueError) as exc:
sldict_cls(f0=np.ones((10, 5)), f1=np.ones((11, 5)))
assert str(exc.value) == (
"Initialized with items of different lengths: 10, 11")
@pytest.mark.parametrize('item', [
np.ones(4),
np.ones((4, 1)),
np.ones((4, 4)),
np.ones((4, 10, 7)),
np.ones((4, 1, 28, 28)),
])
def test_set_item_correct_shape(self, sldict, item):
# does not raise
sldict['f2'] = item
@pytest.mark.parametrize('item', [
np.ones(3),
np.ones((1, 100)),
np.ones((5, 1000)),
np.ones((1, 100, 10)),
np.ones((28, 28, 1, 100)),
])
def test_set_item_incorrect_shape_raises(self, sldict, item):
with pytest.raises(ValueError) as exc:
sldict['f2'] = item
assert str(exc.value) == (
"Cannot set array with shape[0] != 4")
@pytest.mark.parametrize('key', [1, 1.2, (1, 2), [3]])
def test_set_item_incorrect_key_type(self, sldict, key):
with pytest.raises(TypeError) as exc:
sldict[key] = np.ones((100, 5))
assert str(exc.value).startswith("Key must be str, not <")
@pytest.mark.parametrize('item', [
np.ones(3),
np.ones((1, 100)),
np.ones((5, 1000)),
np.ones((1, 100, 10)),
np.ones((28, 28, 1, 100)),
])
def test_update_incorrect_shape_raises(self, sldict, item):
with pytest.raises(ValueError) as exc:
sldict.update({'f2': item})
assert str(exc.value) == (
"Cannot set array with shape[0] != 4")
@pytest.mark.parametrize('item', [123, 'hi', [1, 2, 3]])
def test_set_first_item_no_shape_raises(self, sldict_cls, item):
with pytest.raises(AttributeError):
sldict_cls(f0=item)
@pytest.mark.parametrize('kwargs, expected', [
({}, 0),
(dict(a=np.zeros(12)), 12),
(dict(a=np.zeros(12), b=np.ones((12, 5))), 12),
(dict(a=np.ones((10, 1, 1)), b=np.ones((10, 10)), c=np.ones(10)), 10),
])
def test_len_and_shape(self, sldict_cls, kwargs, expected):
sldict = sldict_cls(**kwargs)
assert len(sldict) == expected
assert sldict.shape == (expected,)
def test_get_item_str_key(self, sldict_cls):
sldict = sldict_cls(a=np.ones(5), b=np.zeros(5))
assert (sldict['a'] == np.ones(5)).all()
assert (sldict['b'] == np.zeros(5)).all()
@pytest.mark.parametrize('sl, expected', [
(slice(0, 1), {'f0': np.array([0]), 'f1': np.array([[0, 1, 2]])}),
(slice(1, 2), {'f0': np.array([1]), 'f1': np.array([[3, 4, 5]])}),
(slice(0, 2), {'f0': np.array([0, 1]),
'f1': np.array([[0, 1, 2], [3, 4, 5]])}),
(slice(0, None), dict(f0=np.arange(4),
f1=np.arange(12).reshape(4, 3))),
(slice(-1, None), {'f0': np.array([3]),
'f1': np.array([[9, 10, 11]])}),
(slice(None, None, -1), dict(f0=np.arange(4)[::-1],
f1=np.arange(12).reshape(4, 3)[::-1])),
])
def test_get_item_slice(self, sldict_cls, sldict, sl, expected):
sliced = sldict[sl]
self.assert_dicts_equal(sliced, sldict_cls(**expected))
def test_slice_list(self, sldict, sldict_cls):
result = sldict[[0, 2]]
expected = sldict_cls(
f0=np.array([0, 2]),
f1=np.array([[0, 1, 2], [6, 7, 8]]))
self.assert_dicts_equal(result, expected)
def test_slice_mask(self, sldict, sldict_cls):
result = sldict[np.array([1, 0, 1, 0]).astype(bool)]
expected = sldict_cls(
f0= | np.array([0, 2]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Fri May 1 19:29:28 2020
@author: Mike
"""
##############################################################################
configfilename = "configfile.csv"
##############################################################################
import numpy as np
import pandas as pd
from datetime import datetime
from tensorflow.keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, Flatten
from tensorflow.keras.layers import Bidirectional, GlobalMaxPool1D, BatchNormalization, Conv1D, GlobalMaxPooling1D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger
from tensorflow.keras.models import model_from_json
from tensorflow.keras import metrics
#from tensorflow.keras.utils import multi_gpu_model
from tensorflow.keras.optimizers import Nadam, Adam
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.model_selection import train_test_split as split
import tensorflow.keras.backend as K
import tensorflow.keras.callbacks as Kc
# TF2: Mixed precision
#from tensorflow.keras.mixed_precision import experimental as mixed_precision
#mixed_precision.set_policy('mixed_float16')
# TF2: Disable eager execution
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
tf.get_logger().setLevel('INFO')
import time
def AE(y, y_hat):
AE = np.abs(y - y_hat)
return AE
# Aggregated metrics:
# MAE = np.mean(AE)
# MEP = np.mean(EP)
def ear(table, alpha=2):
#absolute error
table["AE"] = AE(table["y"], table["y_pred"])
#temporal decay
table["AE_td"] = table["AE"]/alpha*table["prefix_number"]
# Calculate metrics:
# MAE:
MAE_td = np.mean(table["AE_td"])
#Aggregated metrics:
table["MAE_td"] = MAE_td
return table
def acc(table):
"""
Evaluates earliness alone
Input:
Inference table with relevant variables
T = "num_events"
t = "event_number"
case = "caseid"
y = "y"
y_pred = "y_pred"
Output:
EP, AE, MEP, MAE, MAEPE
"""
# Calculate metrics:
#Absolute error
table["AE"] = AE(table["y"], table["y_pred"])
# MAE:
MAE = np.mean(table["AE"])
#Aggregated metrics:
table["MAE"] = MAE
return table
def Earliness(Inference_test, parallel=False, EAR=True, TS=True):
start_time = time.time()
"""
Earliness
"""
print("Earliness...")
Inference_test = acc(Inference_test)
Inference_test = ear(Inference_test, alpha=1)
end_time = time.time()
Time_sec = end_time - start_time
print(Time_sec)
return Inference_test
"""
Evaluation pipeline
"""
def evaluate_model(data_objects, transform="log"):
print("Initial-model evaluation..")
# Load config-file
configfile = pd.read_csv(configfilename)
Project_dir = configfile.Project_dir.values[0]
RUN = configfile["RUN"][0]
F_dataset = configfile["F_dataset"][0]
# Load inference tables
#Inference_train = data_objects["Inference_train"]
Inference_test = data_objects["Inference_test"]
# Load the model
model = data_objects["model"]
# Load the training data
x_test, y_test = data_objects["x_test"], data_objects["y_test"]
# Predict on inference table
Inference_test["y_pred"] = model.predict(x_test, verbose=1, batch_size=2048)
# Save information about experiment
Inference_test["RUN"] = [RUN]*len(Inference_test)
Inference_test["F_dataset"] = [F_dataset]*len(Inference_test)
# Time information
time_taken = 0
now = datetime.now() # current date and time
timestamp = now.strftime("%Y/%m/%d, %H:%M:%S")
if "train_time" in data_objects:
time_taken = data_objects["train_time"]
# Get model training info
model_params = data_objects["model_params"]
epochs = data_objects["epochs"]
###################################################################
#Log transform inverse
if transform == "log":
Inference_test["y_pred"] = np.exp(Inference_test["y_pred"])-1
#Inference_test["y"] = np.exp(Inference_test["y"])-1
#Range transform inverse
if transform == "range":
Inference_test["y_pred"] = (Inference_test["y_pred"] * (data_objects["y_test_max"] - data_objects["y_test_min"])) + data_objects["y_test_min"]
###################################################################
# Calculate earliness
Inference_test = Earliness(Inference_test, EAR=True)
########## ACCURACY #######################
mae_test = | np.mean(Inference_test["MAE"]) | numpy.mean |
import numpy as np
import keras
import json
import utils
from os import listdir, path
def convert_x_dict_to_list(x_dict, viewport):
viewport["width"] = viewport["width"] if viewport["width"] != 0 else 1
viewport["height"] = viewport["height"] if viewport["height"] != 0 else 1
return np.array([
float(x_dict["x"]) / viewport["width"],
float(x_dict["y"]) / viewport["height"],
float(x_dict["width"]) / viewport["width"],
float(x_dict["height"]) / viewport["height"]
])
def convert_y_to_word_vectors(y):
tokensSplit = y.split(".")
vectors = map(lambda tokenWithoutDot: utils.convert_word_to_vector("." + tokenWithoutDot), tokensSplit[1:])
return vectors
def generate_data_for_file(file_path):
Xs = []
decoderXs = []
Ys = []
with open(file_path) as json_file:
data = json.load(json_file)
list_x = np.array(map(lambda rect: convert_x_dict_to_list(rect, data["viewport"]), data["x"]))
y = convert_y_to_word_vectors(data["y"])
y.insert(0, utils.convert_word_to_vector(".start"))
decoderX = []
decoderY = []
for t, word_vector in enumerate(y):
decoderX.append(word_vector)
if t > 0:
decoderY.append(word_vector)
decoderY.append(utils.convert_word_to_vector(".stop"))
return list_x, decoderX, decoderY
def preprocessX(X):
return sorted(X, key=lambda el: (el[1], el[0]), reverse=True)
class DatasetGenerator(keras.utils.Sequence):
def __init__(self, dir_path, batch_size=32):
self.used_data_files = []
self.batch_size = batch_size
self.dir_path = dir_path
self.data_files = listdir(dir_path)
def __len__(self):
"Denotes the number of batches per epoch"
return int(np.floor(len(self.data_files)) / self.batch_size)
def __getitem__(self, index):
files = self.data_files[index * self.batch_size: (index + 1) * self.batch_size]
X, decoderX, y = self.__generate_data(files)
return [X, decoderX], y
def __generate_data(self, files):
Xs = []
decoderXs = []
ys = []
for file_name in files:
self.used_data_files.append(file_name)
if len(self.used_data_files) % 100 == 0:
with open("open_data_files.json", "w+") as f:
json.dump(self.used_data_files, f)
X, decoderX, y = generate_data_for_file(path.join(self.dir_path, file_name))
X = preprocessX(X)
Xs.append(X)
decoderXs.append(decoderX)
ys.append(y)
return | np.array(Xs) | numpy.array |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
from pyscf import lib
from pyscf import scf
from pyscf.lib import logger
from pyscf.cc import ccsd
from pyscf.cc import uccsd
from pyscf.cc import eom_rccsd
from pyscf.cc import eom_gccsd
from pyscf.cc import addons
########################################
# EOM-IP-CCSD
########################################
class EOMIP(eom_gccsd.EOMIP):
def __init__(self, cc):
gcc = addons.convert_to_gccsd(cc)
eom_gccsd.EOMIP.__init__(self, gcc)
########################################
# EOM-EA-CCSD
########################################
class EOMEA(eom_gccsd.EOMEA):
def __init__(self, cc):
gcc = addons.convert_to_gccsd(cc)
eom_gccsd.EOMEA.__init__(self, gcc)
########################################
# EOM-EE-CCSD
########################################
def eeccsd(eom, nroots=1, koopmans=False, guess=None, eris=None, imds=None):
'''Calculate N-electron neutral excitations via EOM-EE-CCSD.
Kwargs:
nroots : int
Number of roots (eigenvalues) requested
koopmans : bool
Calculate Koopmans'-like (1p1h) excitations only, targeting via
overlap.
guess : list of ndarray
List of guess vectors to use for targeting via overlap.
'''
if eris is None: eris = eom._cc.ao2mo()
if imds is None: imds = eom.make_imds(eris)
spinvec_size = eom.vector_size()
nroots = min(nroots, spinvec_size)
diag_ee, diag_sf = eom.get_diag(imds)
guess_ee = []
guess_sf = []
if guess and guess[0].size == spinvec_size:
raise NotImplementedError
#TODO: initial guess from GCCSD EOM amplitudes
#orbspin = scf.addons.get_ghf_orbspin(eris.mo_coeff)
#nmo = np.sum(eom.nmo)
#nocc = np.sum(eom.nocc)
#for g in guess:
# r1, r2 = eom_gccsd.vector_to_amplitudes_ee(g, nmo, nocc)
# r1aa = r1[orbspin==0][:,orbspin==0]
# r1ab = r1[orbspin==0][:,orbspin==1]
# if abs(r1aa).max() > 1e-7:
# r1 = addons.spin2spatial(r1, orbspin)
# r2 = addons.spin2spatial(r2, orbspin)
# guess_ee.append(eom.amplitudes_to_vector(r1, r2))
# else:
# r1 = spin2spatial_eomsf(r1, orbspin)
# r2 = spin2spatial_eomsf(r2, orbspin)
# guess_sf.append(amplitudes_to_vector_eomsf(r1, r2))
# r1 = r2 = r1aa = r1ab = g = None
#nroots_ee = len(guess_ee)
#nroots_sf = len(guess_sf)
elif guess:
for g in guess:
if g.size == diag_ee.size:
guess_ee.append(g)
else:
guess_sf.append(g)
nroots_ee = len(guess_ee)
nroots_sf = len(guess_sf)
else:
dee = np.sort(diag_ee)[:nroots]
dsf = np.sort(diag_sf)[:nroots]
dmax = np.sort(np.hstack([dee,dsf]))[nroots-1]
nroots_ee = np.count_nonzero(dee <= dmax)
nroots_sf = np.count_nonzero(dsf <= dmax)
guess_ee = guess_sf = None
def eomee_sub(cls, nroots, guess, diag):
ee_sub = cls(eom._cc)
ee_sub.__dict__.update(eom.__dict__)
e, v = ee_sub.kernel(nroots, koopmans, guess, eris, imds, diag=diag)
if nroots == 1:
e, v = [e], [v]
ee_sub.converged = [ee_sub.converged]
return list(ee_sub.converged), list(e), list(v)
e0 = e1 = []
v0 = v1 = []
conv0 = conv1 = []
if nroots_ee > 0:
conv0, e0, v0 = eomee_sub(EOMEESpinKeep, nroots_ee, guess_ee, diag_ee)
if nroots_sf > 0:
conv1, e1, v1 = eomee_sub(EOMEESpinFlip, nroots_sf, guess_sf, diag_sf)
e = np.hstack([e0,e1])
idx = e.argsort()
e = e[idx]
conv = conv0 + conv1
conv = [conv[x] for x in idx]
v = v0 + v1
v = [v[x] for x in idx]
if nroots == 1:
conv = conv[0]
e = e[0]
v = v[0]
eom.converged = conv
eom.e = e
eom.v = v
return eom.e, eom.v
def eomee_ccsd(eom, nroots=1, koopmans=False, guess=None,
eris=None, imds=None, diag=None):
if eris is None: eris = eom._cc.ao2mo()
if imds is None: imds = eom.make_imds(eris)
eom.converged, eom.e, eom.v \
= eom_rccsd.kernel(eom, nroots, koopmans, guess, imds=imds, diag=diag)
return eom.e, eom.v
def eomsf_ccsd(eom, nroots=1, koopmans=False, guess=None,
eris=None, imds=None, diag=None):
'''Spin flip EOM-EE-CCSD
'''
return eomee_ccsd(eom, nroots, koopmans, guess, eris, imds, diag)
amplitudes_to_vector_ee = uccsd.amplitudes_to_vector
vector_to_amplitudes_ee = uccsd.vector_to_amplitudes
def amplitudes_to_vector_eomsf(t1, t2, out=None):
t1ab, t1ba = t1
t2baaa, t2aaba, t2abbb, t2bbab = t2
nocca, nvirb = t1ab.shape
noccb, nvira = t1ba.shape
otrila = np.tril_indices(nocca, k=-1)
otrilb = np.tril_indices(noccb, k=-1)
vtrila = np.tril_indices(nvira, k=-1)
vtrilb = np.tril_indices(nvirb, k=-1)
baaa = np.take(t2baaa.reshape(noccb*nocca,nvira*nvira),
vtrila[0]*nvira+vtrila[1], axis=1)
abbb = np.take(t2abbb.reshape(nocca*noccb,nvirb*nvirb),
vtrilb[0]*nvirb+vtrilb[1], axis=1)
vector = np.hstack((t1ab.ravel(), t1ba.ravel(),
baaa.ravel(), t2aaba[otrila].ravel(),
abbb.ravel(), t2bbab[otrilb].ravel()))
return vector
def vector_to_amplitudes_eomsf(vector, nmo, nocc):
nocca, noccb = nocc
nmoa, nmob = nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
t1ab = vector[:nocca*nvirb].reshape(nocca,nvirb).copy()
t1ba = vector[nocca*nvirb:nocca*nvirb+noccb*nvira].reshape(noccb,nvira).copy()
pvec = vector[t1ab.size+t1ba.size:]
nbaaa = noccb*nocca*nvira*(nvira-1)//2
naaba = nocca*(nocca-1)//2*nvirb*nvira
nabbb = nocca*noccb*nvirb*(nvirb-1)//2
nbbab = noccb*(noccb-1)//2*nvira*nvirb
t2baaa = np.zeros((noccb*nocca,nvira*nvira), dtype=vector.dtype)
t2aaba = np.zeros((nocca*nocca,nvirb*nvira), dtype=vector.dtype)
t2abbb = np.zeros((nocca*noccb,nvirb*nvirb), dtype=vector.dtype)
t2bbab = np.zeros((noccb*noccb,nvira*nvirb), dtype=vector.dtype)
otrila = np.tril_indices(nocca, k=-1)
otrilb = np.tril_indices(noccb, k=-1)
vtrila = np.tril_indices(nvira, k=-1)
vtrilb = np.tril_indices(nvirb, k=-1)
oidxab = np.arange(nocca*noccb, dtype=np.int32)
vidxab = np.arange(nvira*nvirb, dtype=np.int32)
v = pvec[:nbaaa].reshape(noccb*nocca,-1)
lib.takebak_2d(t2baaa, v, oidxab, vtrila[0]*nvira+vtrila[1])
lib.takebak_2d(t2baaa,-v, oidxab, vtrila[1]*nvira+vtrila[0])
v = pvec[nbaaa:nbaaa+naaba].reshape(-1,nvirb*nvira)
lib.takebak_2d(t2aaba, v, otrila[0]*nocca+otrila[1], vidxab)
lib.takebak_2d(t2aaba,-v, otrila[1]*nocca+otrila[0], vidxab)
v = pvec[nbaaa+naaba:nbaaa+naaba+nabbb].reshape(nocca*noccb,-1)
lib.takebak_2d(t2abbb, v, oidxab, vtrilb[0]*nvirb+vtrilb[1])
lib.takebak_2d(t2abbb,-v, oidxab, vtrilb[1]*nvirb+vtrilb[0])
v = pvec[nbaaa+naaba+nabbb:].reshape(-1,nvira*nvirb)
lib.takebak_2d(t2bbab, v, otrilb[0]*noccb+otrilb[1], vidxab)
lib.takebak_2d(t2bbab,-v, otrilb[1]*noccb+otrilb[0], vidxab)
t2baaa = t2baaa.reshape(noccb,nocca,nvira,nvira)
t2aaba = t2aaba.reshape(nocca,nocca,nvirb,nvira)
t2abbb = t2abbb.reshape(nocca,noccb,nvirb,nvirb)
t2bbab = t2bbab.reshape(noccb,noccb,nvira,nvirb)
return (t1ab,t1ba), (t2baaa, t2aaba, t2abbb, t2bbab)
def spatial2spin_eomsf(rx, orbspin):
'''Convert EOM spatial R1,R2 to spin-orbital R1,R2'''
if len(rx) == 2: # r1
r1ab, r1ba = rx
nocca, nvirb = r1ab.shape
noccb, nvira = r1ba.shape
else:
r2baaa,r2aaba,r2abbb,r2bbab = rx
noccb, nocca, nvira = r2baaa.shape[:3]
nvirb = r2aaba.shape[2]
nocc = nocca + noccb
nvir = nvira + nvirb
idxoa = np.where(orbspin[:nocc] == 0)[0]
idxob = np.where(orbspin[:nocc] == 1)[0]
idxva = np.where(orbspin[nocc:] == 0)[0]
idxvb = np.where(orbspin[nocc:] == 1)[0]
if len(rx) == 2: # r1
r1 = np.zeros((nocc,nvir), dtype=r1ab.dtype)
lib.takebak_2d(r1, r1ab, idxoa, idxvb)
lib.takebak_2d(r1, r1ba, idxob, idxva)
return r1
else:
r2 = np.zeros((nocc**2,nvir**2), dtype=r2aaba.dtype)
idxoaa = idxoa[:,None] * nocc + idxoa
idxoab = idxoa[:,None] * nocc + idxob
idxoba = idxob[:,None] * nocc + idxoa
idxobb = idxob[:,None] * nocc + idxob
idxvaa = idxva[:,None] * nvir + idxva
idxvab = idxva[:,None] * nvir + idxvb
idxvba = idxvb[:,None] * nvir + idxva
idxvbb = idxvb[:,None] * nvir + idxvb
r2baaa = r2baaa.reshape(noccb*nocca,nvira*nvira)
r2aaba = r2aaba.reshape(nocca*nocca,nvirb*nvira)
r2abbb = r2abbb.reshape(nocca*noccb,nvirb*nvirb)
r2bbab = r2bbab.reshape(noccb*noccb,nvira*nvirb)
lib.takebak_2d(r2, r2baaa, idxoba.ravel(), idxvaa.ravel())
lib.takebak_2d(r2, r2aaba, idxoaa.ravel(), idxvba.ravel())
lib.takebak_2d(r2, r2abbb, idxoab.ravel(), idxvbb.ravel())
lib.takebak_2d(r2, r2bbab, idxobb.ravel(), idxvab.ravel())
lib.takebak_2d(r2, r2baaa, idxoab.T.ravel(), idxvaa.T.ravel())
lib.takebak_2d(r2, r2aaba, idxoaa.T.ravel(), idxvab.T.ravel())
lib.takebak_2d(r2, r2abbb, idxoba.T.ravel(), idxvbb.T.ravel())
lib.takebak_2d(r2, r2bbab, idxobb.T.ravel(), idxvba.T.ravel())
return r2.reshape(nocc,nocc,nvir,nvir)
def spin2spatial_eomsf(rx, orbspin):
'''Convert EOM spin-orbital R1,R2 to spatial R1,R2'''
if rx.ndim == 2: # r1
nocc, nvir = rx.shape
else:
nocc, nvir = rx.shape[1:3]
idxoa = np.where(orbspin[:nocc] == 0)[0]
idxob = np.where(orbspin[:nocc] == 1)[0]
idxva = np.where(orbspin[nocc:] == 0)[0]
idxvb = np.where(orbspin[nocc:] == 1)[0]
nocca = len(idxoa)
noccb = len(idxob)
nvira = len(idxva)
nvirb = len(idxvb)
if rx.ndim == 2:
r1ab = lib.take_2d(rx, idxoa, idxvb)
r1ba = lib.take_2d(rx, idxob, idxva)
return r1ab, r1ba
else:
idxoaa = idxoa[:,None] * nocc + idxoa
idxoab = idxoa[:,None] * nocc + idxob
idxoba = idxob[:,None] * nocc + idxoa
idxobb = idxob[:,None] * nocc + idxob
idxvaa = idxva[:,None] * nvir + idxva
idxvab = idxva[:,None] * nvir + idxvb
idxvba = idxvb[:,None] * nvir + idxva
idxvbb = idxvb[:,None] * nvir + idxvb
r2 = rx.reshape(nocc**2,nvir**2)
r2baaa = lib.take_2d(r2, idxoba.ravel(), idxvaa.ravel())
r2aaba = lib.take_2d(r2, idxoaa.ravel(), idxvba.ravel())
r2abbb = lib.take_2d(r2, idxoab.ravel(), idxvbb.ravel())
r2bbab = lib.take_2d(r2, idxobb.ravel(), idxvab.ravel())
r2baaa = r2baaa.reshape(noccb,nocca,nvira,nvira)
r2aaba = r2aaba.reshape(nocca,nocca,nvirb,nvira)
r2abbb = r2abbb.reshape(nocca,noccb,nvirb,nvirb)
r2bbab = r2bbab.reshape(noccb,noccb,nvira,nvirb)
return r2baaa,r2aaba,r2abbb,r2bbab
# Ref: <NAME>, and <NAME>. Chem. Theory Comput. 10, 5567 (2014) Eqs.(9)-(10)
# Note: Last line in Eq. (10) is superfluous.
# See, e.g. Gwaltney, Nooijen, and Barlett, Chem. Phys. Lett. 248, 189 (1996)
def eomee_ccsd_matvec(eom, vector, imds=None):
if imds is None: imds = eom.make_imds()
t1, t2, eris = imds.t1, imds.t2, imds.eris
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
nmoa, nmob = nocca+nvira, noccb+nvirb
r1, r2 = vector_to_amplitudes_ee(vector, (nmoa,nmob), (nocca,noccb))
r1a, r1b = r1
r2aa, r2ab, r2bb = r2
#:eris_vvvv = ao2mo.restore(1, np.asarray(eris.vvvv), nvirb)
#:eris_VVVV = ao2mo.restore(1, np.asarray(eris.VVVV), nvirb)
#:eris_vvVV = _restore(np.asarray(eris.vvVV), nvira, nvirb)
#:Hr2aa += lib.einsum('ijef,aebf->ijab', tau2aa, eris_vvvv) * .5
#:Hr2bb += lib.einsum('ijef,aebf->ijab', tau2bb, eris_VVVV) * .5
#:Hr2ab += lib.einsum('iJeF,aeBF->iJaB', tau2ab, eris_vvVV)
tau2aa, tau2ab, tau2bb = uccsd.make_tau(r2, r1, t1, 2)
Hr2aa, Hr2ab, Hr2bb = eom._cc._add_vvvv(None, (tau2aa,tau2ab,tau2bb), eris)
Hr2aa *= .5
Hr2bb *= .5
tau2aa = tau2ab = tau2bb = None
Hr1a = lib.einsum('ae,ie->ia', imds.Fvva, r1a)
Hr1a -= lib.einsum('mi,ma->ia', imds.Fooa, r1a)
Hr1a += np.einsum('me,imae->ia',imds.Fova, r2aa)
Hr1a += np.einsum('ME,iMaE->ia',imds.Fovb, r2ab)
Hr1b = lib.einsum('ae,ie->ia', imds.Fvvb, r1b)
Hr1b -= lib.einsum('mi,ma->ia', imds.Foob, r1b)
Hr1b += np.einsum('me,imae->ia',imds.Fovb, r2bb)
Hr1b += np.einsum('me,mIeA->IA',imds.Fova, r2ab)
Hr2aa += lib.einsum('mnij,mnab->ijab', imds.woooo, r2aa) * .25
Hr2bb += lib.einsum('mnij,mnab->ijab', imds.wOOOO, r2bb) * .25
Hr2ab += lib.einsum('mNiJ,mNaB->iJaB', imds.woOoO, r2ab)
Hr2aa += lib.einsum('be,ijae->ijab', imds.Fvva, r2aa)
Hr2bb += lib.einsum('be,ijae->ijab', imds.Fvvb, r2bb)
Hr2ab += lib.einsum('BE,iJaE->iJaB', imds.Fvvb, r2ab)
Hr2ab += lib.einsum('be,iJeA->iJbA', imds.Fvva, r2ab)
Hr2aa -= lib.einsum('mj,imab->ijab', imds.Fooa, r2aa)
Hr2bb -= lib.einsum('mj,imab->ijab', imds.Foob, r2bb)
Hr2ab -= lib.einsum('MJ,iMaB->iJaB', imds.Foob, r2ab)
Hr2ab -= lib.einsum('mj,mIaB->jIaB', imds.Fooa, r2ab)
#:tau2aa, tau2ab, tau2bb = uccsd.make_tau(r2, r1, t1, 2)
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:Hr1a += lib.einsum('mfae,imef->ia', eris_ovvv, r2aa)
#:tmpaa = lib.einsum('meaf,ijef->maij', eris_ovvv, tau2aa)
#:Hr2aa+= lib.einsum('mb,maij->ijab', t1a, tmpaa)
#:tmpa = lib.einsum('mfae,me->af', eris_ovvv, r1a)
#:tmpa-= lib.einsum('meaf,me->af', eris_ovvv, r1a)
#:Hr1b += lib.einsum('mfae,imef->ia', eris_OVVV, r2bb)
#:tmpbb = lib.einsum('meaf,ijef->maij', eris_OVVV, tau2bb)
#:Hr2bb+= lib.einsum('mb,maij->ijab', t1b, tmpbb)
#:tmpb = lib.einsum('mfae,me->af', eris_OVVV, r1b)
#:tmpb-= lib.einsum('meaf,me->af', eris_OVVV, r1b)
#:Hr1b += lib.einsum('mfAE,mIfE->IA', eris_ovVV, r2ab)
#:tmpab = lib.einsum('meAF,iJeF->mAiJ', eris_ovVV, tau2ab)
#:Hr2ab-= lib.einsum('mb,mAiJ->iJbA', t1a, tmpab)
#:tmpb-= lib.einsum('meAF,me->AF', eris_ovVV, r1a)
#:Hr1a += lib.einsum('MFae,iMeF->ia', eris_OVvv, r2ab)
#:tmpba =-lib.einsum('MEaf,iJfE->MaiJ', eris_OVvv, tau2ab)
#:Hr2ab+= lib.einsum('MB,MaiJ->iJaB', t1b, tmpba)
#:tmpa-= lib.einsum('MEaf,ME->af', eris_OVvv, r1b)
tau2aa = uccsd.make_tau_aa(r2aa, r1a, t1a, 2)
mem_now = lib.current_memory()[0]
max_memory = max(0, eom.max_memory - mem_now)
tmpa = np.zeros((nvira,nvira))
tmpb = np.zeros((nvirb,nvirb))
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira**3*3))))
for p0, p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
Hr1a += lib.einsum('mfae,imef->ia', ovvv, r2aa[:,p0:p1])
tmpaa = lib.einsum('meaf,ijef->maij', ovvv, tau2aa)
Hr2aa+= lib.einsum('mb,maij->ijab', t1a[p0:p1], tmpaa)
tmpa+= lib.einsum('mfae,me->af', ovvv, r1a[p0:p1])
tmpa-= lib.einsum('meaf,me->af', ovvv, r1a[p0:p1])
ovvv = tmpaa = None
tau2aa = None
tau2bb = uccsd.make_tau_aa(r2bb, r1b, t1b, 2)
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb**3*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
Hr1b += lib.einsum('mfae,imef->ia', OVVV, r2bb[:,p0:p1])
tmpbb = lib.einsum('meaf,ijef->maij', OVVV, tau2bb)
Hr2bb+= lib.einsum('mb,maij->ijab', t1b[p0:p1], tmpbb)
tmpb+= lib.einsum('mfae,me->af', OVVV, r1b[p0:p1])
tmpb-= lib.einsum('meaf,me->af', OVVV, r1b[p0:p1])
OVVV = tmpbb = None
tau2bb = None
tau2ab = uccsd.make_tau_ab(r2ab, r1 , t1 , 2)
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira*nvirb**2*3))))
for p0, p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
Hr1b += lib.einsum('mfAE,mIfE->IA', ovVV, r2ab[p0:p1])
tmpab = lib.einsum('meAF,iJeF->mAiJ', ovVV, tau2ab)
Hr2ab-= lib.einsum('mb,mAiJ->iJbA', t1a[p0:p1], tmpab)
tmpb-= lib.einsum('meAF,me->AF', ovVV, r1a[p0:p1])
ovVV = tmpab = None
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb*nvira**2*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
Hr1a += lib.einsum('MFae,iMeF->ia', OVvv, r2ab[:,p0:p1])
tmpba = lib.einsum('MEaf,iJfE->MaiJ', OVvv, tau2ab)
Hr2ab-= lib.einsum('MB,MaiJ->iJaB', t1b[p0:p1], tmpba)
tmpa-= lib.einsum('MEaf,ME->af', OVvv, r1b[p0:p1])
OVvv = tmpba = None
tau2ab = None
Hr2aa-= lib.einsum('af,ijfb->ijab', tmpa, t2aa)
Hr2bb-= lib.einsum('af,ijfb->ijab', tmpb, t2bb)
Hr2ab-= lib.einsum('af,iJfB->iJaB', tmpa, t2ab)
Hr2ab-= lib.einsum('AF,iJbF->iJbA', tmpb, t2ab)
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
tau2aa = uccsd.make_tau_aa(r2aa, r1a, t1a, 2)
tauaa = uccsd.make_tau_aa(t2aa, t1a, t1a)
tmpaa = lib.einsum('menf,ijef->mnij', eris_ovov, tau2aa)
Hr2aa += lib.einsum('mnij,mnab->ijab', tmpaa, tauaa) * 0.25
tau2aa = tauaa = None
tau2bb = uccsd.make_tau_aa(r2bb, r1b, t1b, 2)
taubb = uccsd.make_tau_aa(t2bb, t1b, t1b)
tmpbb = lib.einsum('menf,ijef->mnij', eris_OVOV, tau2bb)
Hr2bb += lib.einsum('mnij,mnab->ijab', tmpbb, taubb) * 0.25
tau2bb = taubb = None
tau2ab = uccsd.make_tau_ab(r2ab, r1 , t1 , 2)
tauab = uccsd.make_tau_ab(t2ab, t1 , t1)
tmpab = lib.einsum('meNF,iJeF->mNiJ', eris_ovOV, tau2ab)
Hr2ab += lib.einsum('mNiJ,mNaB->iJaB', tmpab, tauab)
tau2ab = tauab = None
tmpa = lib.einsum('menf,imef->ni', eris_ovov, r2aa)
tmpa-= lib.einsum('neMF,iMeF->ni', eris_ovOV, r2ab)
tmpb = lib.einsum('menf,imef->ni', eris_OVOV, r2bb)
tmpb-= lib.einsum('mfNE,mIfE->NI', eris_ovOV, r2ab)
Hr1a += lib.einsum('na,ni->ia', t1a, tmpa)
Hr1b += lib.einsum('na,ni->ia', t1b, tmpb)
Hr2aa+= lib.einsum('mj,imab->ijab', tmpa, t2aa)
Hr2bb+= lib.einsum('mj,imab->ijab', tmpb, t2bb)
Hr2ab+= lib.einsum('MJ,iMaB->iJaB', tmpb, t2ab)
Hr2ab+= lib.einsum('mj,mIaB->jIaB', tmpa, t2ab)
tmp1a = np.einsum('menf,mf->en', eris_ovov, r1a)
tmp1a-= np.einsum('mfne,mf->en', eris_ovov, r1a)
tmp1a-= np.einsum('neMF,MF->en', eris_ovOV, r1b)
tmp1b = np.einsum('menf,mf->en', eris_OVOV, r1b)
tmp1b-= np.einsum('mfne,mf->en', eris_OVOV, r1b)
tmp1b-= np.einsum('mfNE,mf->EN', eris_ovOV, r1a)
tmpa = np.einsum('en,nb->eb', tmp1a, t1a)
tmpa+= lib.einsum('menf,mnfb->eb', eris_ovov, r2aa)
tmpa-= lib.einsum('meNF,mNbF->eb', eris_ovOV, r2ab)
tmpb = np.einsum('en,nb->eb', tmp1b, t1b)
tmpb+= lib.einsum('menf,mnfb->eb', eris_OVOV, r2bb)
tmpb-= lib.einsum('nfME,nMfB->EB', eris_ovOV, r2ab)
Hr2aa+= lib.einsum('eb,ijae->ijab', tmpa, t2aa)
Hr2bb+= lib.einsum('eb,ijae->ijab', tmpb, t2bb)
Hr2ab+= lib.einsum('EB,iJaE->iJaB', tmpb, t2ab)
Hr2ab+= lib.einsum('eb,iJeA->iJbA', tmpa, t2ab)
eirs_ovov = eris_ovOV = eris_OVOV = None
Hr2aa-= lib.einsum('mbij,ma->ijab', imds.wovoo, r1a)
Hr2bb-= lib.einsum('mbij,ma->ijab', imds.wOVOO, r1b)
Hr2ab-= lib.einsum('mBiJ,ma->iJaB', imds.woVoO, r1a)
Hr2ab-= lib.einsum('MbJi,MA->iJbA', imds.wOvOo, r1b)
Hr1a-= 0.5*lib.einsum('mnie,mnae->ia', imds.wooov, r2aa)
Hr1a-= lib.einsum('mNiE,mNaE->ia', imds.woOoV, r2ab)
Hr1b-= 0.5*lib.einsum('mnie,mnae->ia', imds.wOOOV, r2bb)
Hr1b-= lib.einsum('MnIe,nMeA->IA', imds.wOoOv, r2ab)
tmpa = lib.einsum('mnie,me->ni', imds.wooov, r1a)
tmpa-= lib.einsum('nMiE,ME->ni', imds.woOoV, r1b)
tmpb = lib.einsum('mnie,me->ni', imds.wOOOV, r1b)
tmpb-= lib.einsum('NmIe,me->NI', imds.wOoOv, r1a)
Hr2aa+= lib.einsum('ni,njab->ijab', tmpa, t2aa)
Hr2bb+= lib.einsum('ni,njab->ijab', tmpb, t2bb)
Hr2ab+= lib.einsum('ni,nJaB->iJaB', tmpa, t2ab)
Hr2ab+= lib.einsum('NI,jNaB->jIaB', tmpb, t2ab)
for p0, p1 in lib.prange(0, nvira, nocca):
Hr2aa+= lib.einsum('ejab,ie->ijab', imds.wvovv[p0:p1], r1a[:,p0:p1])
Hr2ab+= lib.einsum('eJaB,ie->iJaB', imds.wvOvV[p0:p1], r1a[:,p0:p1])
for p0, p1 in lib.prange(0, nvirb, noccb):
Hr2bb+= lib.einsum('ejab,ie->ijab', imds.wVOVV[p0:p1], r1b[:,p0:p1])
Hr2ab+= lib.einsum('EjBa,IE->jIaB', imds.wVoVv[p0:p1], r1b[:,p0:p1])
Hr1a += np.einsum('maei,me->ia',imds.wovvo,r1a)
Hr1a += np.einsum('MaEi,ME->ia',imds.wOvVo,r1b)
Hr1b += np.einsum('maei,me->ia',imds.wOVVO,r1b)
Hr1b += np.einsum('mAeI,me->IA',imds.woVvO,r1a)
Hr2aa+= lib.einsum('mbej,imae->ijab', imds.wovvo, r2aa) * 2
Hr2aa+= lib.einsum('MbEj,iMaE->ijab', imds.wOvVo, r2ab) * 2
Hr2bb+= lib.einsum('mbej,imae->ijab', imds.wOVVO, r2bb) * 2
Hr2bb+= lib.einsum('mBeJ,mIeA->IJAB', imds.woVvO, r2ab) * 2
Hr2ab+= lib.einsum('mBeJ,imae->iJaB', imds.woVvO, r2aa)
Hr2ab+= lib.einsum('MBEJ,iMaE->iJaB', imds.wOVVO, r2ab)
Hr2ab+= lib.einsum('mBEj,mIaE->jIaB', imds.woVVo, r2ab)
Hr2ab+= lib.einsum('mbej,mIeA->jIbA', imds.wovvo, r2ab)
Hr2ab+= lib.einsum('MbEj,IMAE->jIbA', imds.wOvVo, r2bb)
Hr2ab+= lib.einsum('MbeJ,iMeA->iJbA', imds.wOvvO, r2ab)
Hr2aa *= .5
Hr2bb *= .5
Hr2aa = Hr2aa - Hr2aa.transpose(0,1,3,2)
Hr2aa = Hr2aa - Hr2aa.transpose(1,0,2,3)
Hr2bb = Hr2bb - Hr2bb.transpose(0,1,3,2)
Hr2bb = Hr2bb - Hr2bb.transpose(1,0,2,3)
vector = amplitudes_to_vector_ee((Hr1a,Hr1b), (Hr2aa,Hr2ab,Hr2bb))
return vector
def eomsf_ccsd_matvec(eom, vector, imds=None):
'''Spin flip EOM-CCSD'''
if imds is None: imds = eom.make_imds()
t1, t2, eris = imds.t1, imds.t2, imds.eris
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
nmoa, nmob = nocca+nvira, noccb+nvirb
r1, r2 = vector_to_amplitudes_eomsf(vector, (nmoa,nmob), (nocca,noccb))
r1ab, r1ba = r1
r2baaa, r2aaba, r2abbb, r2bbab = r2
Hr1ab = np.einsum('ae,ie->ia', imds.Fvvb, r1ab)
Hr1ab -= np.einsum('mi,ma->ia', imds.Fooa, r1ab)
Hr1ab += np.einsum('me,imae->ia', imds.Fovb, r2abbb)
Hr1ab += np.einsum('me,imae->ia', imds.Fova, r2aaba)
Hr1ba = np.einsum('ae,ie->ia', imds.Fvva, r1ba)
Hr1ba -= np.einsum('mi,ma->ia', imds.Foob, r1ba)
Hr1ba += np.einsum('me,imae->ia', imds.Fova, r2baaa)
Hr1ba += np.einsum('me,imae->ia', imds.Fovb, r2bbab)
Hr2baaa = .5 *lib.einsum('nMjI,Mnab->Ijab', imds.woOoO, r2baaa)
Hr2aaba = .25*lib.einsum('mnij,mnAb->ijAb', imds.woooo, r2aaba)
Hr2abbb = .5 *lib.einsum('mNiJ,mNAB->iJAB', imds.woOoO, r2abbb)
Hr2bbab = .25*lib.einsum('MNIJ,MNaB->IJaB', imds.wOOOO, r2bbab)
Hr2baaa += lib.einsum('be,Ijae->Ijab', imds.Fvva , r2baaa)
Hr2baaa -= lib.einsum('mj,imab->ijab', imds.Fooa*.5, r2baaa)
Hr2baaa -= lib.einsum('MJ,Miab->Jiab', imds.Foob*.5, r2baaa)
Hr2bbab -= lib.einsum('mj,imab->ijab', imds.Foob , r2bbab)
Hr2bbab += lib.einsum('BE,IJaE->IJaB', imds.Fvvb*.5, r2bbab)
Hr2bbab += lib.einsum('be,IJeA->IJbA', imds.Fvva*.5, r2bbab)
Hr2aaba -= lib.einsum('mj,imab->ijab', imds.Fooa , r2aaba)
Hr2aaba += lib.einsum('be,ijAe->ijAb', imds.Fvva*.5, r2aaba)
Hr2aaba += lib.einsum('BE,ijEa->ijBa', imds.Fvvb*.5, r2aaba)
Hr2abbb += lib.einsum('BE,iJAE->iJAB', imds.Fvvb , r2abbb)
Hr2abbb -= lib.einsum('mj,imab->ijab', imds.Foob*.5, r2abbb)
Hr2abbb -= lib.einsum('mj,mIAB->jIAB', imds.Fooa*.5, r2abbb)
tau2baaa = np.einsum('ia,jb->ijab', r1ba, t1a)
tau2baaa = tau2baaa - tau2baaa.transpose(0,1,3,2)
tau2abbb = np.einsum('ia,jb->ijab', r1ab, t1b)
tau2abbb = tau2abbb - tau2abbb.transpose(0,1,3,2)
tau2aaba = np.einsum('ia,jb->ijab', r1ab, t1a)
tau2aaba = tau2aaba - tau2aaba.transpose(1,0,2,3)
tau2bbab = np.einsum('ia,jb->ijab', r1ba, t1b)
tau2bbab = tau2bbab - tau2bbab.transpose(1,0,2,3)
tau2baaa += r2baaa
tau2bbab += r2bbab
tau2abbb += r2abbb
tau2aaba += r2aaba
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:Hr1ba += lib.einsum('mfae,Imef->Ia', eris_ovvv, r2baaa)
#:tmp1aaba = lib.einsum('meaf,Ijef->maIj', eris_ovvv, tau2baaa)
#:Hr2baaa += lib.einsum('mb,maIj->Ijab', t1a , tmp1aaba)
mem_now = lib.current_memory()[0]
max_memory = max(0, eom.max_memory - mem_now)
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira**3*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
Hr1ba += lib.einsum('mfae,Imef->Ia', ovvv, r2baaa[:,p0:p1])
tmp1aaba = lib.einsum('meaf,Ijef->maIj', ovvv, tau2baaa)
Hr2baaa += lib.einsum('mb,maIj->Ijab', t1a[p0:p1], tmp1aaba)
ovvv = tmp1aaba = None
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:Hr1ab += lib.einsum('MFAE,iMEF->iA', eris_OVVV, r2abbb)
#:tmp1bbab = lib.einsum('MEAF,iJEF->MAiJ', eris_OVVV, tau2abbb)
#:Hr2abbb += lib.einsum('MB,MAiJ->iJAB', t1b , tmp1bbab)
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb**3*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
Hr1ab += lib.einsum('MFAE,iMEF->iA', OVVV, r2abbb[:,p0:p1])
tmp1bbab = lib.einsum('MEAF,iJEF->MAiJ', OVVV, tau2abbb)
Hr2abbb += lib.einsum('MB,MAiJ->iJAB', t1b[p0:p1], tmp1bbab)
OVVV = tmp1bbab = None
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:Hr1ab += lib.einsum('mfAE,imEf->iA', eris_ovVV, r2aaba)
#:tmp1abaa = lib.einsum('meAF,ijFe->mAij', eris_ovVV, tau2aaba)
#:tmp1abbb = lib.einsum('meAF,IJeF->mAIJ', eris_ovVV, tau2bbab)
#:tmp1ba = lib.einsum('mfAE,mE->Af', eris_ovVV, r1ab)
#:Hr2bbab -= lib.einsum('mb,mAIJ->IJbA', t1a*.5, tmp1abbb)
#:Hr2aaba -= lib.einsum('mb,mAij->ijAb', t1a*.5, tmp1abaa)
tmp1ba = np.zeros((nvirb,nvira))
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira*nvirb**2*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
Hr1ab += lib.einsum('mfAE,imEf->iA', ovVV, r2aaba[:,p0:p1])
tmp1abaa = lib.einsum('meAF,ijFe->mAij', ovVV, tau2aaba)
tmp1abbb = lib.einsum('meAF,IJeF->mAIJ', ovVV, tau2bbab)
tmp1ba += lib.einsum('mfAE,mE->Af', ovVV, r1ab[p0:p1])
Hr2bbab -= lib.einsum('mb,mAIJ->IJbA', t1a[p0:p1]*.5, tmp1abbb)
Hr2aaba -= lib.einsum('mb,mAij->ijAb', t1a[p0:p1]*.5, tmp1abaa)
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:Hr1ba += lib.einsum('MFae,IMeF->Ia', eris_OVvv, r2bbab)
#:tmp1baaa = lib.einsum('MEaf,ijEf->Maij', eris_OVvv, tau2aaba)
#:tmp1babb = lib.einsum('MEaf,IJfE->MaIJ', eris_OVvv, tau2bbab)
#:tmp1ab = lib.einsum('MFae,Me->aF', eris_OVvv, r1ba)
#:Hr2aaba -= lib.einsum('MB,Maij->ijBa', t1b*.5, tmp1baaa)
#:Hr2bbab -= lib.einsum('MB,MaIJ->IJaB', t1b*.5, tmp1babb)
tmp1ab = np.zeros((nvira,nvirb))
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb*nvira**2*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
Hr1ba += lib.einsum('MFae,IMeF->Ia', OVvv, r2bbab[:,p0:p1])
tmp1baaa = lib.einsum('MEaf,ijEf->Maij', OVvv, tau2aaba)
tmp1babb = lib.einsum('MEaf,IJfE->MaIJ', OVvv, tau2bbab)
tmp1ab+= lib.einsum('MFae,Me->aF', OVvv, r1ba[p0:p1])
Hr2aaba -= lib.einsum('MB,Maij->ijBa', t1b[p0:p1]*.5, tmp1baaa)
Hr2bbab -= lib.einsum('MB,MaIJ->IJaB', t1b[p0:p1]*.5, tmp1babb)
Hr2baaa += lib.einsum('aF,jIbF->Ijba', tmp1ab , t2ab)
Hr2bbab -= lib.einsum('aF,IJFB->IJaB', tmp1ab*.5, t2bb)
Hr2abbb += lib.einsum('Af,iJfB->iJBA', tmp1ba , t2ab)
Hr2aaba -= lib.einsum('Af,ijfb->ijAb', tmp1ba*.5, t2aa)
Hr2baaa -= lib.einsum('MbIj,Ma->Ijab', imds.wOvOo, r1ba )
Hr2bbab -= lib.einsum('MBIJ,Ma->IJaB', imds.wOVOO, r1ba*.5)
Hr2abbb -= lib.einsum('mBiJ,mA->iJAB', imds.woVoO, r1ab )
Hr2aaba -= lib.einsum('mbij,mA->ijAb', imds.wovoo, r1ab*.5)
Hr1ab -= 0.5*lib.einsum('mnie,mnAe->iA', imds.wooov, r2aaba)
Hr1ab -= lib.einsum('mNiE,mNAE->iA', imds.woOoV, r2abbb)
Hr1ba -= 0.5*lib.einsum('MNIE,MNaE->Ia', imds.wOOOV, r2bbab)
Hr1ba -= lib.einsum('MnIe,Mnae->Ia', imds.wOoOv, r2baaa)
tmp1ab = lib.einsum('MnIe,Me->nI', imds.wOoOv, r1ba)
tmp1ba = lib.einsum('mNiE,mE->Ni', imds.woOoV, r1ab)
Hr2baaa += lib.einsum('nI,njab->Ijab', tmp1ab*.5, t2aa)
Hr2bbab += lib.einsum('nI,nJaB->IJaB', tmp1ab , t2ab)
Hr2abbb += lib.einsum('Ni,NJAB->iJAB', tmp1ba*.5, t2bb)
Hr2aaba += lib.einsum('Ni,jNbA->ijAb', tmp1ba , t2ab)
for p0, p1 in lib.prange(0, nvira, nocca):
Hr2baaa += lib.einsum('ejab,Ie->Ijab', imds.wvovv[p0:p1], r1ba[:,p0:p1]*.5)
Hr2bbab += lib.einsum('eJaB,Ie->IJaB', imds.wvOvV[p0:p1], r1ba[:,p0:p1] )
for p0, p1 in lib.prange(0, nvirb, noccb):
Hr2abbb += lib.einsum('EJAB,iE->iJAB', imds.wVOVV[p0:p1], r1ab[:,p0:p1]*.5)
Hr2aaba += lib.einsum('EjAb,iE->ijAb', imds.wVoVv[p0:p1], r1ab[:,p0:p1] )
Hr1ab += np.einsum('mAEi,mE->iA', imds.woVVo, r1ab)
Hr1ba += np.einsum('MaeI,Me->Ia', imds.wOvvO, r1ba)
Hr2baaa += lib.einsum('mbej,Imae->Ijab', imds.wovvo, r2baaa)
Hr2baaa += lib.einsum('MbeJ,Miae->Jiab', imds.wOvvO, r2baaa)
Hr2baaa += lib.einsum('MbEj,IMaE->Ijab', imds.wOvVo, r2bbab)
Hr2bbab += lib.einsum('MBEJ,IMaE->IJaB', imds.wOVVO, r2bbab)
Hr2bbab += lib.einsum('MbeJ,IMeA->IJbA', imds.wOvvO, r2bbab)
Hr2bbab += lib.einsum('mBeJ,Imae->IJaB', imds.woVvO, r2baaa)
Hr2aaba += lib.einsum('mbej,imAe->ijAb', imds.wovvo, r2aaba)
Hr2aaba += lib.einsum('mBEj,imEa->ijBa', imds.woVVo, r2aaba)
Hr2aaba += lib.einsum('MbEj,iMAE->ijAb', imds.wOvVo, r2abbb)
Hr2abbb += lib.einsum('MBEJ,iMAE->iJAB', imds.wOVVO, r2abbb)
Hr2abbb += lib.einsum('mBEj,mIAE->jIAB', imds.woVVo, r2abbb)
Hr2abbb += lib.einsum('mBeJ,imAe->iJAB', imds.woVvO, r2aaba)
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
tauaa, tauab, taubb = uccsd.make_tau(t2, t1, t1)
tmp1baaa = lib.einsum('nfME,ijEf->Mnij', eris_ovOV, tau2aaba)
tmp1aaba = lib.einsum('menf,Ijef->mnIj', eris_ovov, tau2baaa)
tmp1abbb = lib.einsum('meNF,IJeF->mNIJ', eris_ovOV, tau2bbab)
tmp1bbab = lib.einsum('MENF,iJEF->MNiJ', eris_OVOV, tau2abbb)
Hr2baaa += 0.5*.5*lib.einsum('mnIj,mnab->Ijab', tmp1aaba, tauaa)
Hr2bbab += .5*lib.einsum('nMIJ,nMaB->IJaB', tmp1abbb, tauab)
Hr2aaba += .5*lib.einsum('Nmij,mNbA->ijAb', tmp1baaa, tauab)
Hr2abbb += 0.5*.5*lib.einsum('MNiJ,MNAB->iJAB', tmp1bbab, taubb)
tauaa = tauab = taubb = None
tmpab = lib.einsum('menf,Imef->nI', eris_ovov, r2baaa)
tmpab -= lib.einsum('nfME,IMfE->nI', eris_ovOV, r2bbab)
tmpba = lib.einsum('MENF,iMEF->Ni', eris_OVOV, r2abbb)
tmpba -= lib.einsum('meNF,imFe->Ni', eris_ovOV, r2aaba)
Hr1ab += np.einsum('NA,Ni->iA', t1b, tmpba)
Hr1ba += np.einsum('na,nI->Ia', t1a, tmpab)
Hr2baaa -= lib.einsum('mJ,imab->Jiab', tmpab*.5, t2aa)
Hr2bbab -= lib.einsum('mJ,mIaB->IJaB', tmpab*.5, t2ab) * 2
Hr2aaba -= lib.einsum('Mj,iMbA->ijAb', tmpba*.5, t2ab) * 2
Hr2abbb -= lib.einsum('Mj,IMAB->jIAB', tmpba*.5, t2bb)
tmp1ab = np.einsum('meNF,mF->eN', eris_ovOV, r1ab)
tmp1ba = np.einsum('nfME,Mf->En', eris_ovOV, r1ba)
tmpab = np.einsum('eN,NB->eB', tmp1ab, t1b)
tmpba = | np.einsum('En,nb->Eb', tmp1ba, t1a) | numpy.einsum |
###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
import numpy as np
import pyomo.environ as pyo
import sys
import os
import itertools
import warnings
import copy, pprint
import h5py
from scipy.interpolate import griddata
from enum import Enum, auto
from abc import abstractmethod, ABC
from idaes.core.util import get_solver
from idaes.surrogate.pysmo import sampling
from pyomo.common.collections import ComponentSet
from pyomo.common.tee import capture_output
np.set_printoptions(linewidth=200)
# ================================================================
class SamplingType(Enum):
FIXED = auto()
RANDOM = auto()
RANDOM_LHS = auto()
# ================================================================
class _Sample(ABC):
def __init__(self, pyomo_object, *args, **kwargs):
# Check for indexed with single value
if pyomo_object.is_indexed() and len(pyomo_object) == 1:
for _data_obj in pyomo_object.values():
pyomo_object = _data_obj
# Make sure we are a Var() or Param()
if not (pyomo_object.is_parameter_type() or pyomo_object.is_variable_type()):
raise ValueError(f"The sweep parameter needs to be a pyomo Param or Var but {type(pyomo_object)} was provided instead.")
if pyomo_object.is_parameter_type() and not pyomo_object.mutable:
raise ValueError(f"Parameter {pyomo_object} is not mutable, and so cannot be set by parameter_sweep")
self.pyomo_object = pyomo_object
self.setup(*args, **kwargs)
@abstractmethod
def sample(self, num_samples):
pass
@abstractmethod
def setup(self, *args, **kwargs):
pass
# ================================================================
class RandomSample(_Sample):
sampling_type = SamplingType.RANDOM
class FixedSample(_Sample):
sampling_type = SamplingType.FIXED
# ================================================================
class LinearSample(FixedSample):
def sample(self, num_samples):
return np.linspace(self.lower_limit, self.upper_limit, self.num_samples)
def setup(self, lower_limit, upper_limit, num_samples):
self.lower_limit = lower_limit
self.upper_limit = upper_limit
self.num_samples = num_samples
# ================================================================
class UniformSample(RandomSample):
def sample(self, num_samples):
return np.random.uniform(self.lower_limit, self.upper_limit, num_samples)
def setup(self, lower_limit, upper_limit):
self.lower_limit = lower_limit
self.upper_limit = upper_limit
# ================================================================
class NormalSample(RandomSample):
def sample(self, num_samples):
return np.random.normal(self.mean, self.sd, num_samples)
def setup(self, mean, sd):
self.mean = mean
self.sd = sd
# ================================================================
class LatinHypercubeSample(_Sample):
sampling_type = SamplingType.RANDOM_LHS
def sample(self, num_samples):
return [self.lower_limit, self.upper_limit]
def setup(self, lower_limit, upper_limit):
self.lower_limit = lower_limit
self.upper_limit = upper_limit
# ================================================================
def _init_mpi(mpi_comm=None):
if mpi_comm is None:
try:
from mpi4py import MPI
except:
warnings.warn("Could not import mpi4py from current environment (defaulting to serial).")
return None, 0, 1
else:
mpi_comm = MPI.COMM_WORLD
return mpi_comm, mpi_comm.Get_rank(), mpi_comm.Get_size()
# ================================================================
def _strip_extension(file_name, extension):
if file_name.lower().endswith(extension):
return file_name[:-len(extension)]
else:
return file_name
# ================================================================
def _build_combinations(d, sampling_type, num_samples, comm, rank, num_procs):
num_var_params = len(d)
if rank == 0:
param_values = []
for k, v in d.items():
# Build a vector of discrete values for this parameter
p = v.sample(num_samples)
param_values.append(p)
if sampling_type == SamplingType.FIXED:
# Form an array with every possible combination of parameter values
global_combo_array = np.array(np.meshgrid(*param_values, indexing="ij"))
global_combo_array = global_combo_array.reshape(num_var_params, -1).T
elif sampling_type == SamplingType.RANDOM:
sorting = np.argsort(param_values[0])
global_combo_array = np.vstack(param_values).T
global_combo_array = global_combo_array[sorting, :]
elif sampling_type == SamplingType.RANDOM_LHS:
lb = [val[0] for val in param_values]
ub = [val[1] for val in param_values]
lhs = sampling.LatinHypercubeSampling([lb, ub], number_of_samples=num_samples, sampling_type='creation')
global_combo_array = lhs.sample_points()
sorting = np.argsort(global_combo_array[:, 0])
global_combo_array = global_combo_array[sorting, :]
else:
raise ValueError(f"Unknown sampling type: {sampling_type}")
# Test if the global_combo_array is in row-major order
if not global_combo_array.flags.c_contiguous:
# If not, return a copy of this array with row-major memory order
global_combo_array = np.ascontiguousarray(global_combo_array)
else:
if sampling_type == SamplingType.FIXED:
nx = 1
for k, v in d.items():
nx *= v.num_samples
elif sampling_type == SamplingType.RANDOM or sampling_type == SamplingType.RANDOM_LHS:
nx = num_samples
else:
raise ValueError(f"Unknown sampling type: {sampling_type}")
if not float(nx).is_integer():
raise RuntimeError(f"Total number of samples must be integer valued")
nx = int(nx)
# Allocate memory to hold the Bcast array
global_combo_array = np.zeros((nx, num_var_params), dtype=np.float64)
### Broadcast the array to all processes
if num_procs > 1:
comm.Bcast(global_combo_array, root=0)
return global_combo_array
# ================================================================
def _divide_combinations(global_combo_array, rank, num_procs):
# Split the total list of combinations into NUM_PROCS chunks,
# one per each of the MPI ranks
# divided_combo_array = np.array_split(global_combo_array, num_procs, axis=0)
divided_combo_array = np.array_split(global_combo_array, num_procs)
# Return only this rank's portion of the total workload
local_combo_array = divided_combo_array[rank]
return local_combo_array
# ================================================================
def _update_model_values(m, param_dict, values):
for k, item in enumerate(param_dict.values()):
param = item.pyomo_object
if param.is_variable_type():
# Fix the single value to values[k]
param.fix(values[k])
elif param.is_parameter_type():
# Fix the single value to values[k]
param.set_value(values[k])
else:
raise RuntimeError(f"Unrecognized Pyomo object {param}")
# ================================================================
def _aggregate_results(local_results, global_values, comm, num_procs):
if num_procs > 1: # pragma: no cover
local_results = local_results.astype(np.float64)
global_results = np.zeros((np.shape(global_values)[0], np.shape(local_results)[1]), dtype=np.float64)
# Collect the number of result values to be sent from each process
send_counts = np.zeros(num_procs, dtype=np.int64)
comm.Gather(np.int64(np.size(local_results)), send_counts, root=0)
# Collect the global results results onto rank 0
comm.Gatherv(local_results, (global_results, send_counts), root=0)
# Broadcast the results to all ranks
comm.Bcast(global_results, root=0)
else:
global_results = np.copy(local_results)
return global_results
# ================================================================
def _default_optimize(model, options=None, tee=False):
'''
Default optimization function used in parameter_sweep.
Optimizes ``model`` using the IDAES default solver.
Raises a RuntimeError if the TerminationCondition is not optimal
Arguments:
model : A Pyomo ConcreteModel to optimize
options (optional) : Solver options to pass into idaes.core.utils.get_solver.
Default is None
tee (options) : To display the solver log. Default it False
'''
solver = get_solver(options=options)
results = solver.solve(model, tee=tee)
return results
# ================================================================
def _process_sweep_params(sweep_params):
sampling_type = None
# Check the list of parameters to make sure they are valid
for k in sweep_params:
# Convert to using Sample class
if isinstance(sweep_params[k], (list, tuple)):
sweep_params[k] = LinearSample(*sweep_params[k])
# Get the type of sampling
current_sampling_type = sweep_params[k].sampling_type
# Check to make sure only one sampling type is provided
if sampling_type is None:
sampling_type = current_sampling_type
elif current_sampling_type != sampling_type:
raise ValueError("Cannot mix sampling types")
return sweep_params, sampling_type
# ================================================================
def _interp_nan_values(global_values, global_results):
global_results_clean = np.copy(global_results)
n_vals = np.shape(global_values)[1]
n_outs = np.shape(global_results)[1]
# Build a mask of all the non-nan saved outputs
# i.e., where the optimzation succeeded
mask = np.isfinite(global_results[:, 0])
# Create a list of points where good data is available
x0 = global_values[mask, :]
if np.sum(mask) >= 4:
# Interpolate to get a value for nan points where possible
for k in range(n_outs):
y0 = global_results[mask, k]
yi = griddata(x0, y0, global_values, method='linear', rescale=True).reshape(-1)
global_results_clean[~mask, k] = yi[~mask]
else:
warnings.warn("Too few points to perform interpolation.")
return global_results_clean
# ================================================================
def _create_local_output_skeleton(model, sweep_params, outputs, num_samples):
output_dict = {}
output_dict["sweep_params"] = {}
output_dict["outputs"] = {}
sweep_param_objs = ComponentSet()
# Store the inputs
for sweep_param in sweep_params.values():
var = sweep_param.pyomo_object
sweep_param_objs.add(var)
output_dict["sweep_params"][var.name] = _create_component_output_skeleton(var, num_samples)
if outputs is None:
outputs = {}
# No outputs are specified, so every Var, Expression, and Objective on the model should be saved
for pyo_obj in model.component_data_objects((pyo.Var, pyo.Expression, pyo.Objective), active=True):
# Only need to save this variable if it isn't one of the value in sweep_params
if pyo_obj not in sweep_param_objs:
output_dict["outputs"][pyo_obj.name] = _create_component_output_skeleton(pyo_obj, num_samples)
outputs[pyo_obj.name] = pyo_obj
else:
# Save only the outputs specified in the outputs dictionary
for short_name, pyo_obj in outputs.items():
output_dict["outputs"][short_name] = _create_component_output_skeleton(pyo_obj, num_samples)
return output_dict, outputs
# ================================================================
def _create_component_output_skeleton(component, num_samples):
comp_dict = {}
comp_dict["value"] = np.zeros(num_samples, dtype=np.float)
if hasattr(component, 'lb'):
comp_dict["lower bound"] = component.lb
if hasattr(component, 'ub'):
comp_dict["upper bound"] = component.lb
if hasattr(component, 'get_units'):
unit_obj = component.get_units()
if unit_obj is not None:
comp_dict["units"] = component.get_units().name
else:
comp_dict["units"] = "None"
return comp_dict
# ================================================================
def _update_local_output_dict(model, sweep_params, case_number, sweep_vals, run_successful, output_dict, outputs):
# Get the inputs
op_ps_dict = output_dict["sweep_params"]
for key, item in sweep_params.items():
var_name = item.pyomo_object.name
op_ps_dict[var_name]['value'][case_number] = item.pyomo_object.value
# Get the outputs from model
if run_successful:
for label, pyo_obj in outputs.items():
output_dict["outputs"][label]["value"][case_number] = pyo.value(pyo_obj)
else:
for label in outputs.keys():
output_dict["outputs"][label]["value"][case_number] = np.nan
# ================================================================
def _create_global_output(local_output_dict, req_num_samples, comm, rank, num_procs):
if num_procs == 1:
global_output_dict = local_output_dict
else: # pragma: no cover
# We make the assumption that the parameter sweep is running the same
# flowsheet num_samples number of times, i.e., the structure of the
# local_output_dict remains the same across all mpi_ranks
local_num_cases = len(local_output_dict["solve_successful"])
# Gather the size of the value array on each MPI rank
sample_split_arr = comm.allgather(local_num_cases)
num_total_samples = sum(sample_split_arr)
# Create the global value array on rank 0
if rank == 0:
global_output_dict = copy.deepcopy(local_output_dict)
# Create a global value array of inputs in the dictionary
for key, item in global_output_dict.items():
if key != "solve_successful":
for subkey, subitem in item.items():
subitem['value'] = np.zeros(num_total_samples, dtype=np.float)
else:
global_output_dict = local_output_dict
# Finally collect the values
for key, item in local_output_dict.items(): # This probably doesnt work
if key != "solve_successful":
for subkey, subitem in item.items():
comm.Gatherv(sendbuf=subitem["value"],
recvbuf=(global_output_dict[key][subkey]["value"], sample_split_arr),
root=0)
# Trim to the exact number
global_output_dict[key][subkey]["value"] = global_output_dict[key][subkey]["value"][0:req_num_samples]
elif key == "solve_successful":
local_solve_successful = np.fromiter(item, dtype=np.bool, count=len(item))
if rank == 0:
global_solve_successful = np.empty(num_total_samples, dtype=np.bool)
else:
global_solve_successful = None
comm.Gatherv(sendbuf=local_solve_successful,
recvbuf=(global_solve_successful, sample_split_arr),
root=0)
if rank == 0:
global_output_dict[key] = global_solve_successful[0:req_num_samples]
return global_output_dict
# ================================================================
def _write_outputs(output_dict, output_directory, h5_results_file, txt_options="metadata"):
if not h5_results_file.endswith(".h5"):
h5_results_file += ".h5"
_write_output_to_h5(output_dict, output_directory, h5_results_file)
# We will also create a companion txt file by default which contains
# the metadata of the h5 file in a user readable format.
txt_fname = _strip_extension(h5_results_file,".h5") + ".txt"
txt_fpath = os.path.join(output_directory, txt_fname)
if "solve_successful" in output_dict.keys():
output_dict.pop("solve_successful")
if txt_options == "metadata":
my_dict = copy.deepcopy(output_dict)
for key, value in my_dict.items():
for subkey, subvalue in value.items():
subvalue.pop('value')
elif txt_options == "keys":
my_dict = {}
for key, value in output_dict.items():
my_dict[key] = list(value.keys())
else:
my_dict = output_dict
with open(txt_fpath, "w") as log_file:
pprint.pprint(my_dict, log_file)
# ================================================================
def _write_output_to_h5(output_dict, output_directory, fname):
fpath = os.path.join(output_directory, fname)
f = h5py.File(fpath, 'w')
for key, item in output_dict.items():
grp = f.create_group(key)
if key != "solve_successful":
for subkey, subitem in item.items():
subgrp = grp.create_group(subkey)
for subsubkey, subsubitem in subitem.items():
if subsubkey == 'lower bound' and subsubitem is None:
subgrp.create_dataset(subsubkey, data=np.finfo('d').min)
elif subsubkey == 'upper bound' and subsubitem is None:
subgrp.create_dataset(subsubkey, data=np.finfo('d').max)
else:
subgrp.create_dataset(subsubkey, data=output_dict[key][subkey][subsubkey])
elif key == 'solve_successful':
grp.create_dataset(key, data=output_dict[key])
f.close()
# ================================================================
def _read_output_h5(filepath):
f = h5py.File(filepath , 'r')
l1_keys = list(f.keys())
output_dict = {}
for key in l1_keys: # Input or Output
if key != 'solve_successful':
output_dict[key] = {}
l2_keys = list(f[key].keys())
for subkey in l2_keys: # Variable name
output_dict[key][subkey] = {}
l3_keys = list(f[key][subkey].keys())
for subsubkey in l3_keys: # variable metadata
output_dict[key][subkey][subsubkey] = f[key][subkey][subsubkey][()]
if subsubkey == "units":
# The strings are recovered in bytes. we choose to convert it to utf-8
output_dict[key][subkey][subsubkey] = output_dict[key][subkey][subsubkey].decode("utf-8")
elif key == 'solve_successful':
output_dict[key] = list(f[key]['solve_successful'][()])
f.close()
return output_dict
# ================================================================
def _do_param_sweep(model, sweep_params, outputs, local_values, optimize_function, optimize_kwargs,
reinitialize_function, reinitialize_kwargs, reinitialize_before_sweep, comm):
# Initialize space to hold results
local_num_cases = np.shape(local_values)[0]
# Create the output skeleton for storing detailed data
local_output_dict, outputs = _create_local_output_skeleton(model, sweep_params, outputs, local_num_cases)
local_results = np.zeros((local_num_cases, len(outputs)))
local_solve_successful_list = []
# ================================================================
# Run all optimization cases
# ================================================================
for k in range(local_num_cases):
# Update the model values with a single combination from the parameter space
_update_model_values(model, sweep_params, local_values[k, :])
run_successful = False #until proven otherwise
# Forced reinitialization of the flowsheet if enabled
if reinitialize_before_sweep:
try:
assert reinitialize_function is not None
except:
raise ValueError("Reinitialization function was not specified. The model will not be reinitialized.")
else:
reinitialize_function(model, **reinitialize_kwargs)
try:
# Simulate/optimize with this set of parameter
with capture_output():
results = optimize_function(model, **optimize_kwargs)
pyo.assert_optimal_termination(results)
except:
# If the run is infeasible, report nan
local_results[k, :] = np.nan
else:
# If the simulation suceeds, report stats
local_results[k, :] = [pyo.value(outcome) for outcome in outputs.values()]
run_successful = True
# If the initial attempt failed and additional conditions are met, try
# to reinitialize and resolve.
if not run_successful and (reinitialize_function is not None):
try:
reinitialize_function(model, **reinitialize_kwargs)
with capture_output():
results = optimize_function(model, **optimize_kwargs)
pyo.assert_optimal_termination(results)
except:
pass
else:
local_results[k, :] = [pyo.value(outcome) for outcome in outputs.values()]
run_successful = True
# Update the loop based on the reinitialization
_update_local_output_dict(model, sweep_params, k, local_values[k, :], run_successful, local_output_dict, outputs)
local_solve_successful_list.append(run_successful)
local_output_dict["solve_successful"] = local_solve_successful_list
return local_results, local_output_dict
# ================================================================
def _aggregate_local_results(global_values, local_results, local_output_dict,
num_samples, local_num_cases, comm, rank, num_procs):
global_results = _aggregate_results(local_results, global_values, comm, num_procs)
global_output_dict = _create_global_output(local_output_dict, num_samples, comm, rank, num_procs)
return global_results, global_output_dict
# ================================================================
def _save_results(sweep_params, outputs, local_values, global_values, local_results,
global_results, global_output_dict, csv_results_file, h5_results_file,
debugging_data_dir, comm, rank, num_procs, interpolate_nan_outputs):
# Make a directory for saved outputs
if rank == 0:
if csv_results_file is not None:
if not csv_results_file.endswith(".csv"):
csv_results_file += ".csv"
dirname = os.path.dirname(csv_results_file)
if dirname != '':
os.makedirs(dirname, exist_ok=True)
if debugging_data_dir is not None:
os.makedirs(debugging_data_dir, exist_ok=True)
if num_procs > 1:
comm.Barrier()
# Write a header string for all data files
data_header = ','.join(itertools.chain(sweep_params,global_output_dict['outputs']))
if debugging_data_dir is not None:
# Create the local filename and data
fname = os.path.join(debugging_data_dir, f'local_results_{rank:03}.csv')
local_save_data = np.hstack((local_values, local_results))
# Save the local data
np.savetxt(fname, local_save_data, header=data_header, delimiter=', ', fmt='%.6e')
# Create the global filename and data
global_save_data = np.hstack((global_values, global_results))
if rank == 0 and csv_results_file is not None:
# Save the global data
np.savetxt(csv_results_file, global_save_data, header=data_header, delimiter=',', fmt='%.6e')
if interpolate_nan_outputs:
global_results_clean = _interp_nan_values(global_values, global_results)
global_save_data_clean = np.hstack((global_values, global_results_clean))
head, tail = os.path.split(csv_results_file)
if head == '':
interp_file = 'interpolated_%s' % (tail)
else:
interp_file = '%s/interpolated_%s' % (head, tail)
np.savetxt(interp_file, global_save_data_clean, header=data_header, delimiter=',', fmt='%.6e')
if rank == 0 and h5_results_file is not None:
# Save the data of output dictionary
_write_outputs(global_output_dict, dirname, h5_results_file, txt_options="keys")
return global_save_data
# ================================================================
def parameter_sweep(model, sweep_params, outputs=None, csv_results_file=None, h5_results_file=None,
optimize_function=_default_optimize, optimize_kwargs=None, reinitialize_function=None,
reinitialize_kwargs=None, reinitialize_before_sweep=False, mpi_comm=None, debugging_data_dir=None,
interpolate_nan_outputs=False, num_samples=None, seed=None):
'''
This function offers a general way to perform repeated optimizations
of a model for the purposes of exploring a parameter space while
monitoring multiple outputs.
If provided, writes single CSV file to ``results_file`` with all inputs and resulting outputs.
Arguments:
model : A Pyomo ConcreteModel containing a watertap flowsheet, for best
results it should be initialized before being passed to this
function.
sweep_params: A dictionary containing the values to vary with the format
``sweep_params['Short/Pretty-print Name'] =
(model.fs.variable_or_param[index], lower_limit, upper_limit, num_samples)``.
A uniform number of samples ``num_samples`` will be take between
the ``lower_limit`` and ``upper_limit``.
outputs : An optional dictionary containing "short names" as keys and and Pyomo objects
on ``model`` whose values to report as values. E.g.,
``outputs['Short/Pretty-print Name'] = model.fs.variable_or_expression_to_report``.
If not provided, i.e., outputs = None, the default behavior is to save all model
variables, parameters, and expressions which provides very thorough results
at the cost of large file sizes.
csv_results_file (optional) : The path and file name where the results are to be saved;
subdirectories will be created as needed.
h5_results_file (optional) : The file name without the extension where the results are to be saved;
The path is identified from the arguments of `csv_results_file`. This
filename is used when creating the H5 file and the companion text file
which contains the variable names contained within the H5 file.
optimize_function (optional) : A user-defined function to perform the optimization of flowsheet
``model`` and loads the results back into ``model``. The first
argument of this function is ``model``\. The default uses the
default IDAES solver, raising an exception if the termination
condition is not optimal.
optimize_kwargs (optional) : Dictionary of kwargs to pass into every call to
``optimize_function``. The first arg will always be ``model``,
e.g., ``optimize_function(model, **optimize_kwargs)``. The default
uses no kwargs.
reinitialize_function (optional) : A user-defined function to perform the re-initialize the
flowsheet ``model`` if the first call to ``optimize_function``
fails for any reason. After ``reinitialize_function``, the
parameter sweep tool will immediately call
``optimize_function`` again.
reinitialize_kwargs (optional) : Dictionary or kwargs to pass into every call to
``reinitialize_function``. The first arg will always be
``model``, e.g.,
``reinitialize_function(model, **reinitialize_kwargs)``.
The default uses no kwargs.
reinitialize_before_sweep (optional): Boolean option to reinitialize the flow sheet model before
every parameter sweep realization. The default is False.
Note the parameter sweep model will try to reinitialize the
solve regardless of the option if the run fails.
mpi_comm (optional) : User-provided MPI communicator for parallel parameter sweeps.
If None COMM_WORLD will be used. The default is sufficient for most
users.
debugging_data_dir (optional) : Save results on a per-process basis for parallel debugging
purposes. If None no `debugging` data will be saved.
interpolate_nan_outputs (optional) : When the parameter sweep has finished, interior values
of np.nan will be replaced with a value obtained via
a linear interpolation of their surrounding valid neighbors.
If true, a second output file with the extension "_clean"
will be saved alongside the raw (un-interpolated) values.
num_samples (optional) : If the user is using sampling techniques rather than a linear grid
of values, they need to set the number of samples
seed (optional) : If the user is using a random sampling technique, this sets the seed
Returns:
save_data : A list were the first N columns are the values of the parameters passed
by ``sweep_params`` and the remaining columns are the values of the
simulation identified by the ``outputs`` argument.
'''
# Get an MPI communicator
comm, rank, num_procs = _init_mpi(mpi_comm)
# Convert sweep_params to LinearSamples
sweep_params, sampling_type = _process_sweep_params(sweep_params)
# Set the seed before sampling
np.random.seed(seed)
# Enumerate/Sample the parameter space
global_values = _build_combinations(sweep_params, sampling_type, num_samples, comm, rank, num_procs)
# divide the workload between processors
local_values = _divide_combinations(global_values, rank, num_procs)
local_num_cases = | np.shape(local_values) | numpy.shape |
"""
This module is an example of a barebones function plugin for napari
It implements the ``napari_experimental_provide_function`` hook specification.
see: https://napari.org/docs/dev/plugins/hook_specifications.html
Replace code below according to your needs.
"""
from __future__ import print_function, division
from typing import TYPE_CHECKING, DefaultDict
from unicodedata import name
import six
# import modules
import sys # input, output, errors, and files
import os # interacting with file systems
import time # getting time
import datetime
import inspect # get passed parameters
import yaml # parameter importing
import json # for importing tiff metadata
try:
import cPickle as pickle # loading and saving python objects
except:
import pickle
import numpy as np # numbers package
import struct # for interpretting strings as binary data
import re # regular expressions
from pprint import pprint # for human readable file output
import traceback # for error messaging
import warnings # error messaging
import copy # not sure this is needed
import h5py # working with HDF5 files
import pandas as pd
import networkx as nx
import collections
# scipy and image analysis
from scipy.signal import find_peaks_cwt # used in channel finding
from scipy.optimize import curve_fit # fitting ring profile
from scipy.optimize import leastsq # fitting 2d gaussian
from scipy import ndimage as ndi # labeling and distance transform
from skimage import io
from skimage import segmentation # used in make_masks and segmentation
from skimage.transform import rotate
from skimage.feature import match_template # used to align images
from skimage.feature import blob_log # used for foci finding
from skimage.filters import threshold_otsu, median # segmentation
from skimage import filters
from skimage import morphology # many functions is segmentation used from this
from skimage.measure import regionprops # used for creating lineages
from skimage.measure import profile_line # used for ring an nucleoid analysis
from skimage import util, measure, transform, feature
import tifffile as tiff
from sklearn import metrics
# deep learning
import tensorflow as tf # ignore message about how tf was compiled
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import models
from tensorflow.keras import losses
from tensorflow.keras import utils
from tensorflow.keras import backend as K
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # supress warnings
# Parralelization modules
import multiprocessing
from multiprocessing import Pool
# Plotting for debug
import matplotlib as mpl
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 12}
mpl.rc('font', **font)
mpl.rcParams['pdf.fonttype'] = 42
from matplotlib.patches import Ellipse
from pathlib import Path
import time
import matplotlib.pyplot as plt
# import modules
import os
import glob
import re
import numpy as np
import tifffile as tiff
import pims_nd2
from skimage import io, measure, morphology
import tifffile as tiff
from scipy import stats
from pprint import pprint # for human readable file output
import multiprocessing
from multiprocessing import Pool
import numpy as np
import warnings
from tensorflow.python.keras import models
from enum import Enum
import numpy as np
import multiprocessing
from multiprocessing import Pool
import os
from napari_plugin_engine import napari_hook_implementation
from skimage.filters import threshold_otsu # segmentation
from skimage import morphology # many functions is segmentation used from this
from skimage import segmentation # used in make_masks and segmentation
from scipy import ndimage as ndi # labeling and distance transform
import matplotlib.gridspec as gridspec
from skimage.exposure import rescale_intensity # for displaying in GUI
from skimage import io, morphology, segmentation
# import mm3_helpers as mm3
import napari
# This is the actual plugin function, where we export our function
# (The functions themselves are defined below)
@napari_hook_implementation
def napari_experimental_provide_function():
# we can return a single function
# or a tuple of (function, magicgui_options)
# or a list of multiple functions with or without options, as shown here:
#return [Segment, threshold, image_arithmetic]
return [Compile, ChannelPicker, Segment]
# 1. First example, a simple function that thresholds an image and creates a labels layer
def threshold(data: "napari.types.ImageData", threshold: int) -> "napari.types.LabelsData":
"""Threshold an image and return a mask."""
return (data > threshold).astype(int)
# print a warning
def warning(*objs):
print(time.strftime("%H:%M:%S WARNING:", time.localtime()), *objs, file=sys.stderr)
# print information
def information(*objs):
print(time.strftime("%H:%M:%S", time.localtime()), *objs, file=sys.stdout)
def julian_day_number():
"""
Need this to solve a bug in pims_nd2.nd2reader.ND2_Reader instance initialization.
The bug is in /usr/local/lib/python2.7/site-packages/pims_nd2/ND2SDK.py in function `jdn_to_datetime_local`, when the year number in the metadata (self._lim_metadata_desc) is not in the correct range. This causes a problem when calling self.metadata.
https://en.wikipedia.org/wiki/Julian_day
"""
dt=datetime.datetime.now()
tt=dt.timetuple()
jdn=(1461.*(tt.tm_year + 4800. + (tt.tm_mon - 14.)/12))/4. + (367.*(tt.tm_mon - 2. - 12.*((tt.tm_mon -14.)/12)))/12. - (3.*((tt.tm_year + 4900. + (tt.tm_mon - 14.)/12.)/100.))/4. + tt.tm_mday - 32075
return jdn
def get_plane(filepath):
pattern = r'(c\d+).tif'
res = re.search(pattern,filepath)
if (res != None):
return res.group(1)
else:
return None
def get_fov(filepath):
pattern = r'xy(\d+)\w*.tif'
res = re.search(pattern,filepath)
if (res != None):
return int(res.group(1))
else:
return None
def get_time(filepath):
pattern = r't(\d+)xy\w+.tif'
res = re.search(pattern,filepath)
if (res != None):
return np.int_(res.group(1))
else:
return None
# loads and image stack from TIFF or HDF5 using mm3 conventions
def load_stack(fov_id, peak_id, color='c1', image_return_number=None):
'''
Loads an image stack.
Supports reading TIFF stacks or HDF5 files.
Parameters
----------
fov_id : int
The FOV id
peak_id : int
The peak (channel) id. Dummy None value incase color='empty'
color : str
The image stack type to return. Can be:
c1 : phase stack
cN : where n is an integer for arbitrary color channel
sub : subtracted images
seg : segmented images
empty : get the empty channel for this fov, slightly different
Returns
-------
image_stack : np.ndarray
The image stack through time. Shape is (t, y, x)
'''
# things are slightly different for empty channels
if 'empty' in color:
if params['output'] == 'TIFF':
img_filename = params['experiment_name'] + '_xy%03d_%s.tif' % (fov_id, color)
with tiff.TiffFile(os.path.join(params['empty_dir'],img_filename)) as tif:
img_stack = tif.asarray()
if params['output'] == 'HDF5':
with h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r') as h5f:
img_stack = h5f[color][:]
return img_stack
# load normal images for either TIFF or HDF5
if params['output'] == 'TIFF':
if color[0] == 'c':
img_dir = params['chnl_dir']
elif 'sub' in color:
img_dir = params['sub_dir']
elif 'foci' in color:
img_dir = params['foci_seg_dir']
elif 'seg' in color:
img_dir = params['seg_dir']
img_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, color)
with tiff.TiffFile(os.path.join(img_dir, img_filename)) as tif:
img_stack = tif.asarray()
if params['output'] == 'HDF5':
with h5py.File(os.path.join(params['hdf5_dir'], 'xy%03d.hdf5' % fov_id), 'r') as h5f:
# normal naming
# need to use [:] to get a copy, else it references the closed hdf5 dataset
img_stack = h5f['channel_%04d/p%04d_%s' % (peak_id, peak_id, color)][:]
return img_stack
# load the time table and add it to the global params
def load_time_table():
'''Add the time table dictionary to the params global dictionary.
This is so it can be used during Cell creation.
'''
# try first for yaml, then for pkl
try:
with open(os.path.join(params['ana_dir'], 'time_table.yaml'), 'rb') as time_table_file:
params['time_table'] = yaml.safe_load(time_table_file)
except:
with open(os.path.join(params['ana_dir'], 'time_table.pkl'), 'rb') as time_table_file:
params['time_table'] = pickle.load(time_table_file)
return
# function for loading the channel masks
def load_channel_masks():
'''Load channel masks dictionary. Should be .yaml but try pickle too.
'''
information("Loading channel masks dictionary.")
# try loading from .yaml before .pkl
try:
information('Path:', os.path.join(params['ana_dir'], 'channel_masks.yaml'))
with open(os.path.join(params['ana_dir'], 'channel_masks.yaml'), 'r') as cmask_file:
channel_masks = yaml.safe_load(cmask_file)
except:
warning('Could not load channel masks dictionary from .yaml.')
try:
information('Path:', os.path.join(params['ana_dir'], 'channel_masks.pkl'))
with open(os.path.join(params['ana_dir'], 'channel_masks.pkl'), 'rb') as cmask_file:
channel_masks = pickle.load(cmask_file)
except ValueError:
warning('Could not load channel masks dictionary from .pkl.')
return channel_masks
# function for loading the specs file
def load_specs():
'''Load specs file which indicates which channels should be analyzed, used as empties, or ignored.'''
try:
with open(os.path.join(params['ana_dir'], 'specs.yaml'), 'r') as specs_file:
specs = yaml.safe_load(specs_file)
except:
try:
with open(os.path.join(params['ana_dir'], 'specs.pkl'), 'rb') as specs_file:
specs = pickle.load(specs_file)
except ValueError:
warning('Could not load specs file.')
return specs
### functions for dealing with raw TIFF images
# get params is the major function which processes raw TIFF images
def get_initial_tif_params(image_filename):
'''This is a function for getting the information
out of an image for later trap identification, cropping, and aligning with Unet. It loads a tiff file and pulls out the image metadata.
it returns a dictionary like this for each image:
'filename': image_filename,
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jdn' : image_metadata['jdn'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'plane_names' : image_metadata['plane_names'] # list of plane names
Called by
mm3_Compile.py __main__
Calls
mm3.extract_metadata
mm3.find_channels
'''
try:
# open up file and get metadata
with tiff.TiffFile(os.path.join(params['TIFF_dir'],image_filename)) as tif:
image_data = tif.asarray()
#print(image_data.shape) # uncomment for debug
#if len(image_data.shape) == 2:
# img_shape = [image_data.shape[0],image_data.shape[1]]
#else:
img_shape = [image_data.shape[1],image_data.shape[2]]
plane_list = [str(i+1) for i in range(image_data.shape[0])]
#print(plane_list) # uncomment for debug
if params['TIFF_source'] == 'elements':
image_metadata = get_tif_metadata_elements(tif)
elif params['TIFF_source'] == 'nd2ToTIFF':
image_metadata = get_tif_metadata_nd2ToTIFF(tif)
else:
image_metadata = get_tif_metadata_filename(tif)
information('Analyzed %s' % image_filename)
# return the file name, the data for the channels in that image, and the metadata
return {'filepath': os.path.join(params['TIFF_dir'], image_filename),
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jd' : image_metadata['jd'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'planes' : plane_list, # list of plane names
'shape' : img_shape} # image shape x y in pixels
except:
warning('Failed get_params for ' + image_filename.split("/")[-1])
print(sys.exc_info()[0])
print(sys.exc_info()[1])
print(traceback.print_tb(sys.exc_info()[2]))
return {'filepath': os.path.join(params['TIFF_dir'],image_filename), 'analyze_success': False}
# get params is the major function which processes raw TIFF images
def get_tif_params(image_filename, find_channels=True):
'''This is a damn important function for getting the information
out of an image. It loads a tiff file, pulls out the image data, and the metadata,
including the location of the channels if flagged.
it returns a dictionary like this for each image:
'filename': image_filename,
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jdn' : image_metadata['jdn'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'plane_names' : image_metadata['plane_names'] # list of plane names
'channels': cp_dict, # dictionary of channel locations, in the case of Unet-based channel segmentation, it's a dictionary of channel labels
Called by
mm3_Compile.py __main__
Calls
mm3.extract_metadata
mm3.find_channels
'''
try:
# open up file and get metadata
with tiff.TiffFile(os.path.join(params['TIFF_dir'],image_filename)) as tif:
image_data = tif.asarray()
if params['TIFF_source'] == 'elements':
image_metadata = get_tif_metadata_elements(tif)
elif params['TIFF_source'] == 'nd2ToTIFF':
image_metadata = get_tif_metadata_nd2ToTIFF(tif)
else:
image_metadata = get_tif_metadata_filename(tif)
# look for channels if flagged
if find_channels:
# fix the image orientation and get the number of planes
image_data = fix_orientation(image_data)
# if the image data has more than 1 plane restrict image_data to phase,
# which should have highest mean pixel data
if len(image_data.shape) > 2:
#ph_index = np.argmax([np.mean(image_data[ci]) for ci in range(image_data.shape[0])])
ph_index = int(params['phase_plane'][1:]) - 1
image_data = image_data[ph_index]
# get shape of single plane
img_shape = [image_data.shape[0], image_data.shape[1]]
# find channels on the processed image
chnl_loc_dict = find_channel_locs(image_data)
information('Analyzed %s' % image_filename)
# return the file name, the data for the channels in that image, and the metadata
return {'filepath': os.path.join(params['TIFF_dir'], image_filename),
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jd' : image_metadata['jd'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'planes' : image_metadata['planes'], # list of plane names
'shape' : img_shape, # image shape x y in pixels
# 'channels' : {1 : {'A' : 1, 'B' : 2}, 2 : {'C' : 3, 'D' : 4}}}
'channels' : chnl_loc_dict} # dictionary of channel locations
except:
warning('Failed get_params for ' + image_filename.split("/")[-1])
print(sys.exc_info()[0])
print(sys.exc_info()[1])
print(traceback.print_tb(sys.exc_info()[2]))
return {'filepath': os.path.join(params['TIFF_dir'],image_filename), 'analyze_success': False}
# finds metdata in a tiff image which has been expoted with Nikon Elements.
def get_tif_metadata_elements(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This if tiff files as exported by Nikon Elements as a stacked tiff, each for one tpoint.
tif is an opened tif file (using the package tifffile)
arguments:
fname (tifffile.TiffFile): TIFF file object from which data will be extracted
returns:
dictionary of values:
'jdn' (float)
'x' (float)
'y' (float)
'plane_names' (list of strings)
Called by
mm3.Compile
'''
# image Metadata
idata = { 'fov': -1,
't' : -1,
'jd': -1 * 0.0,
'x': -1 * 0.0,
'y': -1 * 0.0,
'planes': []}
# get the fov and t simply from the file name
idata['fov'] = int(tif.fname.split('xy')[1].split('.tif')[0])
idata['t'] = int(tif.fname.split('xy')[0].split('t')[-1])
# a page is plane, or stack, in the tiff. The other metdata is hidden down in there.
for page in tif:
for tag in page.tags.values():
#print("Checking tag",tag.name,tag.value)
t = tag.name, tag.value
t_string = u""
time_string = u""
# Interesting tag names: 65330, 65331 (binary data; good stuff), 65332
# we wnat to work with the tag of the name 65331
# if the tag name is not in the set of tegs we find interesting then skip this cycle of the loop
if tag.name not in ('65331', '65332', 'strip_byte_counts', 'image_width', 'orientation', 'compression', 'new_subfile_type', 'fill_order', 'max_sample_value', 'bits_per_sample', '65328', '65333'):
#print("*** " + tag.name)
#print(tag.value)
pass
#if tag.name == '65330':
# return tag.value
if tag.name in ('65331'):
# make info list a list of the tag values 0 to 65535 by zipoing up a paired list of two bytes, at two byte intervals i.e. fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b
# note that 0X100 is hex for 256
infolist = [a+b*0x100 for a,b in zip(tag.value[0::2], tag.value[1::2])]
# get char values for each element in infolist
for c_entry in range(0, len(infolist)):
# the element corresponds to an ascii char for a letter or bracket (and a few other things)
if infolist[c_entry] < 127 and infolist[c_entry] > 64:
# add the letter to the unicode string t_string
t_string += chr(infolist[c_entry])
#elif infolist[c_entry] == 0:
# continue
else:
t_string += " "
# this block will find the dTimeAbsolute and print the subsequent integers
# index 170 is counting seconds, and rollover of index 170 leads to increment of index 171
# rollover of index 171 leads to increment of index 172
# get the position of the array by finding the index of the t_string at which dTimeAbsolute is listed not that 2*len(dTimeAbsolute)=26
#print(t_string)
arraypos = t_string.index("dXPos") * 2 + 16
xarr = tag.value[arraypos:arraypos+4]
b = ''.join(chr(i) for i in xarr)
idata['x'] = float(struct.unpack('<f', b)[0])
arraypos = t_string.index("dYPos") * 2 + 16
yarr = tag.value[arraypos:arraypos+4]
b = ''.join(chr(i) for i in yarr)
idata['y'] = float(struct.unpack('<f', b)[0])
arraypos = t_string.index("dTimeAbsolute") * 2 + 26
shortarray = tag.value[arraypos+2:arraypos+10]
b = ''.join(chr(i) for i in shortarray)
idata['jd'] = float(struct.unpack('<d', b)[0])
# extract plane names
il = [a+b*0x100 for a,b in zip(tag.value[0::2], tag.value[1::2])]
li = [a+b*0x100 for a,b in zip(tag.value[1::2], tag.value[2::2])]
strings = list(zip(il, li))
allchars = ""
for c_entry in range(0, len(strings)):
if 31 < strings[c_entry][0] < 127:
allchars += chr(strings[c_entry][0])
elif 31 < strings[c_entry][1] < 127:
allchars += chr(strings[c_entry][1])
else:
allchars += " "
allchars = re.sub(' +',' ', allchars)
words = allchars.split(" ")
planes = []
for idx in [i for i, x in enumerate(words) if x == "sOpticalConfigName"]:
planes.append(words[idx+1])
idata['planes'] = planes
return idata
# finds metdata in a tiff image which has been expoted with nd2ToTIFF.py.
def get_tif_metadata_nd2ToTIFF(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This if tiff files as exported by the mm3 function mm3_nd2ToTIFF.py. All the metdata
is found in that script and saved in json format to the tiff, so it is simply extracted here
Paramters:
tif: TIFF file object from which data will be extracted
Returns:
dictionary of values:
'fov': int,
't' : int,
'jdn' (float)
'x' (float)
'y' (float)
'planes' (list of strings)
Called by
mm3_Compile.get_tif_params
'''
# get the first page of the tiff and pull out image description
# this dictionary should be in the above form
for tag in tif.pages[0].tags:
if tag.name=="ImageDescription":
idata=tag.value
break
#print(idata)
idata = json.loads(idata)
return idata
# Finds metadata from the filename
def get_tif_metadata_filename(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This just gets the tiff metadata from the filename and is a backup option when the known format of the metadata is not known.
Paramters:
tif: TIFF file object from which data will be extracted
Returns:
dictionary of values:
'fov': int,
't' : int,
'jdn' (float)
'x' (float)
'y' (float)
Called by
mm3_Compile.get_tif_params
'''
idata = {'fov' : get_fov(tif.filename), # fov id
't' : get_time(tif.filename), # time point
'jd' : -1 * 0.0, # absolute julian time
'x' : -1 * 0.0, # x position on stage [um]
'y' : -1 * 0.0} # y position on stage [um]
return idata
# make a lookup time table for converting nominal time to elapsed time in seconds
def make_time_table(analyzed_imgs):
'''
Loops through the analyzed images and uses the jd time in the metadata to find the elapsed
time in seconds that each picture was taken. This is later used for more accurate elongation
rate calculation.
Parametrs
---------
analyzed_imgs : dict
The output of get_tif_params.
params['use_jd'] : boolean
If set to True, 'jd' time will be used from the image metadata to use to create time table. Otherwise the 't' index will be used, and the parameter 'seconds_per_time_index' will be used from the parameters.yaml file to convert to seconds.
Returns
-------
time_table : dict
Look up dictionary with keys for the FOV and then the time point.
'''
information('Making time table...')
# initialize
time_table = {}
first_time = float('inf')
# need to go through the data once to find the first time
for iname, idata in six.iteritems(analyzed_imgs):
if params['use_jd']:
if idata['jd'] < first_time:
first_time = idata['jd']
else:
if idata['t'] < first_time:
first_time = idata['t']
# init dictionary for specific times per FOV
if idata['fov'] not in time_table:
time_table[idata['fov']] = {}
for iname, idata in six.iteritems(analyzed_imgs):
if params['use_jd']:
# convert jd time to elapsed time in seconds
t_in_seconds = np.around((idata['jd'] - first_time) * 24*60*60, decimals=0).astype('uint32')
else:
t_in_seconds = np.around((idata['t'] - first_time) * params['moviemaker']['seconds_per_time_index'], decimals=0).astype('uint32')
time_table[int(idata['fov'])][int(idata['t'])] = int(t_in_seconds)
# save to .pkl. This pkl will be loaded into the params
# with open(os.path.join(params['ana_dir'], 'time_table.pkl'), 'wb') as time_table_file:
# pickle.dump(time_table, time_table_file, protocol=pickle.HIGHEST_PROTOCOL)
# with open(os.path.join(params['ana_dir'], 'time_table.txt'), 'w') as time_table_file:
# pprint(time_table, stream=time_table_file)
with open(os.path.join(params['ana_dir'], 'time_table.yaml'), 'w') as time_table_file:
yaml.dump(data=time_table, stream=time_table_file, default_flow_style=False, tags=None)
information('Time table saved.')
return time_table
# saves traps sliced via Unet
def save_tiffs(imgDict, analyzed_imgs, fov_id):
savePath = os.path.join(params['experiment_directory'],
params['analysis_directory'],
params['chnl_dir'])
img_names = [key for key in analyzed_imgs.keys()]
image_params = analyzed_imgs[img_names[0]]
for peak,img in six.iteritems(imgDict):
img = img.astype('uint16')
if not os.path.isdir(savePath):
os.mkdir(savePath)
for planeNumber in image_params['planes']:
channel_filename = os.path.join(savePath, params['experiment_name'] + '_xy{0:0=3}_p{1:0=4}_c{2}.tif'.format(fov_id, peak, planeNumber))
io.imsave(channel_filename, img[:,:,:,int(planeNumber)-1])
# slice_and_write cuts up the image files one at a time and writes them out to tiff stacks
def tiff_stack_slice_and_write(images_to_write, channel_masks, analyzed_imgs):
'''Writes out 4D stacks of TIFF images per channel.
Loads all tiffs from and FOV into memory and then slices all time points at once.
Called by
__main__
'''
# make an array of images and then concatenate them into one big stack
image_fov_stack = []
# go through list of images and get the file path
for n, image in enumerate(images_to_write):
# analyzed_imgs dictionary will be found in main scope. [0] is the key, [1] is jd
image_params = analyzed_imgs[image[0]]
information("Loading %s." % image_params['filepath'].split('/')[-1])
if n == 1:
# declare identification variables for saving using first image
fov_id = image_params['fov']
# load the tif and store it in array
with tiff.TiffFile(image_params['filepath']) as tif:
image_data = tif.asarray()
# channel finding was also done on images after orientation was fixed
image_data = fix_orientation(image_data)
# add additional axis if the image is flat
if len(image_data.shape) == 2:
image_data = np.expand_dims(image_data, 0)
# change axis so it goes Y, X, Plane
image_data = np.rollaxis(image_data, 0, 3)
# add it to list. The images should be in time order
image_fov_stack.append(image_data)
# concatenate the list into one big ass stack
image_fov_stack = np.stack(image_fov_stack, axis=0)
# cut out the channels as per channel masks for this fov
for peak, channel_loc in six.iteritems(channel_masks[fov_id]):
#information('Slicing and saving channel peak %s.' % channel_filename.split('/')[-1])
information('Slicing and saving channel peak %d.' % peak)
# channel masks should only contain ints, but you can use this for hard fix
# for i in range(len(channel_loc)):
# for j in range(len(channel_loc[i])):
# channel_loc[i][j] = int(channel_loc[i][j])
# slice out channel.
# The function should recognize the shape length as 4 and cut all time points
channel_stack = cut_slice(image_fov_stack, channel_loc)
# save a different time stack for all colors
for color_index in range(channel_stack.shape[3]):
# this is the filename for the channel
# # chnl_dir and p will be looked for in the scope above (__main__)
channel_filename = os.path.join(params['chnl_dir'], params['experiment_name'] + '_xy%03d_p%04d_c%1d.tif' % (fov_id, peak, color_index+1))
# save stack
tiff.imsave(channel_filename, channel_stack[:,:,:,color_index], compress=4)
return
# saves traps sliced via Unet to an hdf5 file
def save_hdf5(imgDict, img_names, analyzed_imgs, fov_id, channel_masks):
'''Writes out 4D stacks of images to an HDF5 file.
Called by
mm3_Compile.py
'''
savePath = params['hdf5_dir']
if not os.path.isdir(savePath):
os.mkdir(savePath)
img_times = [analyzed_imgs[key]['t'] for key in img_names]
img_jds = [analyzed_imgs[key]['jd'] for key in img_names]
fov_ids = [analyzed_imgs[key]['fov'] for key in img_names]
# get image_params from first image from current fov
image_params = analyzed_imgs[img_names[0]]
# establish some variables for hdf5 attributes
fov_id = image_params['fov']
x_loc = image_params['x']
y_loc = image_params['y']
image_shape = image_params['shape']
image_planes = image_params['planes']
fov_channel_masks = channel_masks[fov_id]
with h5py.File(os.path.join(savePath,'{}_xy{:0=2}.hdf5'.format(params['experiment_name'],fov_id)), 'w', libver='earliest') as h5f:
# add in metadata for this FOV
# these attributes should be common for all channel
h5f.attrs.create('fov_id', fov_id)
h5f.attrs.create('stage_x_loc', x_loc)
h5f.attrs.create('stage_y_loc', y_loc)
h5f.attrs.create('image_shape', image_shape)
# encoding is because HDF5 has problems with numpy unicode
h5f.attrs.create('planes', [plane.encode('utf8') for plane in image_planes])
h5f.attrs.create('peaks', sorted([key for key in imgDict.keys()]))
# this is for things that change across time, for these create a dataset
img_names = np.asarray(img_names)
img_names = np.expand_dims(img_names, 1)
img_names = img_names.astype('S100')
h5ds = h5f.create_dataset(u'filenames', data=img_names,
chunks=True, maxshape=(None, 1), dtype='S100',
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times', data=np.expand_dims(img_times, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times_jd', data=np.expand_dims(img_jds, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
# cut out the channels as per channel masks for this fov
for peak,channel_stack in six.iteritems(imgDict):
channel_stack = channel_stack.astype('uint16')
# create group for this trap
h5g = h5f.create_group('channel_%04d' % peak)
# add attribute for peak_id, channel location
# add attribute for peak_id, channel location
h5g.attrs.create('peak_id', peak)
channel_loc = fov_channel_masks[peak]
h5g.attrs.create('channel_loc', channel_loc)
# save a different dataset for all colors
for color_index in range(channel_stack.shape[3]):
# create the dataset for the image. Review docs for these options.
h5ds = h5g.create_dataset(u'p%04d_c%1d' % (peak, color_index+1),
data=channel_stack[:,:,:,color_index],
chunks=(1, channel_stack.shape[1], channel_stack.shape[2]),
maxshape=(None, channel_stack.shape[1], channel_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# h5ds.attrs.create('plane', image_planes[color_index].encode('utf8'))
# write the data even though we have more to write (free up memory)
h5f.flush()
return
# same thing as tiff_stack_slice_and_write but do it for hdf5
def hdf5_stack_slice_and_write(images_to_write, channel_masks, analyzed_imgs):
'''Writes out 4D stacks of TIFF images to an HDF5 file.
Called by
__main__
'''
# make an array of images and then concatenate them into one big stack
image_fov_stack = []
# make arrays for filenames and times
image_filenames = []
image_times = [] # times is still an integer but may be indexed arbitrarily
image_jds = [] # jds = julian dates (times)
# go through list of images, load and fix them, and create arrays of metadata
for n, image in enumerate(images_to_write):
image_name = image[0] # [0] is the key, [1] is jd
# analyzed_imgs dictionary will be found in main scope.
image_params = analyzed_imgs[image_name]
information("Loading %s." % image_params['filepath'].split('/')[-1])
# add information to metadata arrays
image_filenames.append(image_name)
image_times.append(image_params['t'])
image_jds.append(image_params['jd'])
# declare identification variables for saving using first image
if n == 1:
# same across fov
fov_id = image_params['fov']
x_loc = image_params['x']
y_loc = image_params['y']
image_shape = image_params['shape']
image_planes = image_params['planes']
# load the tif and store it in array
with tiff.TiffFile(image_params['filepath']) as tif:
image_data = tif.asarray()
# channel finding was also done on images after orientation was fixed
image_data = fix_orientation(image_data)
# add additional axis if the image is flat
if len(image_data.shape) == 2:
image_data = np.expand_dims(image_data, 0)
#change axis so it goes X, Y, Plane
image_data = np.rollaxis(image_data, 0, 3)
# add it to list. The images should be in time order
image_fov_stack.append(image_data)
# concatenate the list into one big ass stack
image_fov_stack = np.stack(image_fov_stack, axis=0)
# create the HDF5 file for the FOV, first time this is being done.
with h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'w', libver='earliest') as h5f:
# add in metadata for this FOV
# these attributes should be common for all channel
h5f.attrs.create('fov_id', fov_id)
h5f.attrs.create('stage_x_loc', x_loc)
h5f.attrs.create('stage_y_loc', y_loc)
h5f.attrs.create('image_shape', image_shape)
# encoding is because HDF5 has problems with numpy unicode
h5f.attrs.create('planes', [plane.encode('utf8') for plane in image_planes])
h5f.attrs.create('peaks', sorted(channel_masks[fov_id].keys()))
# this is for things that change across time, for these create a dataset
h5ds = h5f.create_dataset(u'filenames', data=np.expand_dims(image_filenames, 1),
chunks=True, maxshape=(None, 1), dtype='S100',
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times', data=np.expand_dims(image_times, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times_jd', data=np.expand_dims(image_jds, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
# cut out the channels as per channel masks for this fov
for peak, channel_loc in six.iteritems(channel_masks[fov_id]):
#information('Slicing and saving channel peak %s.' % channel_filename.split('/')[-1])
information('Slicing and saving channel peak %d.' % peak)
# create group for this channel
h5g = h5f.create_group('channel_%04d' % peak)
# add attribute for peak_id, channel location
h5g.attrs.create('peak_id', peak)
h5g.attrs.create('channel_loc', channel_loc)
# channel masks should only contain ints, but you can use this for a hard fix
# for i in range(len(channel_loc)):
# for j in range(len(channel_loc[i])):
# channel_loc[i][j] = int(channel_loc[i][j])
# slice out channel.
# The function should recognize the shape length as 4 and cut all time points
channel_stack = cut_slice(image_fov_stack, channel_loc)
# save a different dataset for all colors
for color_index in range(channel_stack.shape[3]):
# create the dataset for the image. Review docs for these options.
h5ds = h5g.create_dataset(u'p%04d_c%1d' % (peak, color_index+1),
data=channel_stack[:,:,:,color_index],
chunks=(1, channel_stack.shape[1], channel_stack.shape[2]),
maxshape=(None, channel_stack.shape[1], channel_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# h5ds.attrs.create('plane', image_planes[color_index].encode('utf8'))
# write the data even though we have more to write (free up memory)
h5f.flush()
return
def tileImage(img, subImageNumber):
divisor = int(np.sqrt(subImageNumber))
M = img.shape[0]//divisor
N = img.shape[0]//divisor
print(img.shape, M, N, divisor, subImageNumber)
ans = ([img[x:x+M,y:y+N] for x in range(0,img.shape[0],M) for y in range(0,img.shape[1],N)])
tiles=[]
for m in ans:
if m.shape[0]==512 and m.shape[1]==512:
tiles.append(m)
tiles=np.asarray(tiles)
#print(tiles)
return(tiles)
def get_weights(img, subImageNumber):
divisor = int(np.sqrt(subImageNumber))
M = img.shape[0]//divisor
N = img.shape[0]//divisor
weights = np.ones((img.shape[0],img.shape[1]),dtype='uint8')
for i in range(divisor-1):
weights[(M*(i+1))-25:(M*(i+1)+25),:] = 0
weights[:,(N*(i+1))-25:(N*(i+1)+25)] = 0
return(weights)
def permute_image(img, trap_align_metadata):
# are there three dimensions?
if len(img.shape) == 3:
if img.shape[0] < 3: # for tifs with fewer than three imageing channels, the first dimension separates channels
# img = np.transpose(img, (1,2,0))
img = img[trap_align_metadata['phase_plane_index'],:,:] # grab just the phase channel
else:
img = img[:,:,trap_align_metadata['phase_plane_index']] # grab just the phase channel
return(img)
def imageConcatenatorFeatures(imgStack, subImageNumber = 64):
rowNumPerImage = int(np.sqrt(subImageNumber)) # here I'm assuming our large images are square, with equal number of crops in each dimension
#print(rowNumPerImage)
imageNum = int(imgStack.shape[0]/subImageNumber) # total number of sub-images divided by the number of sub-images in each original large image
iterNum = int(imageNum*rowNumPerImage)
imageDims = int(np.sqrt(imgStack.shape[1]*imgStack.shape[2]*subImageNumber))
featureNum = int(imgStack.shape[3])
bigImg = np.zeros(shape=(imageNum, imageDims, imageDims, featureNum), dtype='float32') # create array to store reconstructed images
featureRowDicts = []
for j in range(featureNum):
rowDict = {}
for i in range(iterNum):
baseNum = int(i*iterNum/imageNum)
# concatenate columns of 256x256 images to build each 256x2048 row
rowDict[i] = np.column_stack((imgStack[baseNum,:,:,j],imgStack[baseNum+1,:,:,j],
imgStack[baseNum+2,:,:,j], imgStack[baseNum+3,:,:,j]))#,
#imgStack[baseNum+4,:,:,j],imgStack[baseNum+5,:,:,j],
#imgStack[baseNum+6,:,:,j],imgStack[baseNum+7,:,:,j]))
featureRowDicts.append(rowDict)
for j in range(featureNum):
for i in range(imageNum):
baseNum = int(i*rowNumPerImage)
# concatenate appropriate 256x2048 rows to build a 2048x2048 image and place it into bigImg
bigImg[i,:,:,j] = np.row_stack((featureRowDicts[j][baseNum],featureRowDicts[j][baseNum+1],
featureRowDicts[j][baseNum+2],featureRowDicts[j][baseNum+3]))#,
#featureRowDicts[j][baseNum+4],featureRowDicts[j][baseNum+5],
#featureRowDicts[j][baseNum+6],featureRowDicts[j][baseNum+7]))
return(bigImg)
def imageConcatenatorFeatures2(imgStack, subImageNumber = 81):
rowNumPerImage = int(np.sqrt(subImageNumber)) # here I'm assuming our large images are square, with equal number of crops in each dimension
imageNum = int(imgStack.shape[0]/subImageNumber) # total number of sub-images divided by the number of sub-images in each original large image
iterNum = int(imageNum*rowNumPerImage)
imageDims = int(np.sqrt(imgStack.shape[1]*imgStack.shape[2]*subImageNumber))
featureNum = int(imgStack.shape[3])
bigImg = np.zeros(shape=(imageNum, imageDims, imageDims, featureNum), dtype='float32') # create array to store reconstructed images
featureRowDicts = []
for j in range(featureNum):
rowDict = {}
for i in range(iterNum):
baseNum = int(i*iterNum/imageNum)
# concatenate columns of 256x256 images to build each 256x2048 row
rowDict[i] = np.column_stack((imgStack[baseNum,:,:,j],imgStack[baseNum+1,:,:,j],
imgStack[baseNum+2,:,:,j], imgStack[baseNum+3,:,:,j],
imgStack[baseNum+4,:,:,j]))#,imgStack[baseNum+5,:,:,j],
#imgStack[baseNum+6,:,:,j],imgStack[baseNum+7,:,:,j],
#imgStack[baseNum+8,:,:,j]))
featureRowDicts.append(rowDict)
for j in range(featureNum):
for i in range(imageNum):
baseNum = int(i*rowNumPerImage)
# concatenate appropriate 256x2048 rows to build a 2048x2048 image and place it into bigImg
bigImg[i,:,:,j] = np.row_stack((featureRowDicts[j][baseNum],featureRowDicts[j][baseNum+1],
featureRowDicts[j][baseNum+2],featureRowDicts[j][baseNum+3],
featureRowDicts[j][baseNum+4]))#,featureRowDicts[j][baseNum+5],
#featureRowDicts[j][baseNum+6],featureRowDicts[j][baseNum+7],
#featureRowDicts[j][baseNum+8]))
return(bigImg)
def get_weights_array(arr=np.zeros((2048,2048)), shiftDistance=128, subImageNumber=64, padSubImageNumber=81):
originalImageWeights = get_weights(arr, subImageNumber=subImageNumber)
shiftLeftWeights = np.pad(originalImageWeights, pad_width=((0,0),(0,shiftDistance)),
mode='constant', constant_values=((0,0),(0,0)))[:,shiftDistance:]
shiftRightWeights = np.pad(originalImageWeights, pad_width=((0,0),(shiftDistance,0)),
mode='constant', constant_values=((0,0),(0,0)))[:,:(-1*shiftDistance)]
shiftUpWeights = np.pad(originalImageWeights, pad_width=((0,shiftDistance),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[shiftDistance:,:]
shiftDownWeights = np.pad(originalImageWeights, pad_width=((shiftDistance,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[:(-1*shiftDistance),:]
expandedImageWeights = get_weights(np.zeros((arr.shape[0]+2*shiftDistance,arr.shape[1]+2*shiftDistance)), subImageNumber=padSubImageNumber)[shiftDistance:-shiftDistance,shiftDistance:-shiftDistance]
allWeights = np.stack((originalImageWeights, expandedImageWeights, shiftUpWeights, shiftDownWeights, shiftLeftWeights,shiftRightWeights), axis=-1)
stackWeights = np.stack((allWeights,allWeights),axis=0)
stackWeights = np.stack((stackWeights,stackWeights,stackWeights),axis=3)
return(stackWeights)
# predicts locations of channels in an image using deep learning model
def get_frame_predictions(img,model,stackWeights, shiftDistance=256, subImageNumber=16, padSubImageNumber=25, debug=False):
pred = predict_first_image_channels(img, model, shiftDistance=shiftDistance,
subImageNumber=subImageNumber, padSubImageNumber=padSubImageNumber, debug=debug)[0,...]
# print(pred.shape)
if debug:
print(pred.shape)
compositePrediction = np.average(pred, axis=3, weights=stackWeights)
# print(compositePrediction.shape)
padSize = (compositePrediction.shape[0]-img.shape[0])//2
compositePrediction = util.crop(compositePrediction,((padSize,padSize),
(padSize,padSize),
(0,0)))
# print(compositePrediction.shape)
return(compositePrediction)
def apply_median_filter_normalize(imgs):
selem = morphology.disk(3)
for i in range(imgs.shape[0]):
# Store sample
tmpImg = imgs[i,:,:,0]
medImg = median(tmpImg, selem)
tmpImg = medImg/np.max(medImg)
tmpImg = np.expand_dims(tmpImg, axis=-1)
imgs[i,:,:,:] = tmpImg
return(imgs)
def predict_first_image_channels(img, model,
subImageNumber=16, padSubImageNumber=25,
shiftDistance=128, batchSize=1,
debug=False):
imgSize = img.shape[0]
padSize = (2048-imgSize)//2 # how much to pad on each side to get up to 2048x2048?
imgStack = np.pad(img, pad_width=((padSize,padSize),(padSize,padSize)),
mode='constant', constant_values=((0,0),(0,0))) # pad the images to make them 2048x2048
# pad the stack by 128 pixels on each side to get complemetary crops that I can run the network on. This
# should help me fill in low-confidence regions where the crop boundaries were for the original image
imgStackExpand = np.pad(imgStack, pad_width=((shiftDistance,shiftDistance),(shiftDistance,shiftDistance)),
mode='constant', constant_values=((0,0),(0,0)))
imgStackShiftRight = np.pad(imgStack, pad_width=((0,0),(0,shiftDistance)),
mode='constant', constant_values=((0,0),(0,0)))[:,shiftDistance:]
imgStackShiftLeft = np.pad(imgStack, pad_width=((0,0),(shiftDistance,0)),
mode='constant', constant_values=((0,0),(0,0)))[:,:-shiftDistance]
imgStackShiftDown = np.pad(imgStack, pad_width=((0,shiftDistance),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[shiftDistance:,:]
imgStackShiftUp = np.pad(imgStack, pad_width=((shiftDistance,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[:-shiftDistance,:]
#print(imgStackShiftUp.shape)
crops = tileImage(imgStack, subImageNumber=subImageNumber)
print("Crops: ", crops.shape)
crops = np.expand_dims(crops, -1)
data_gen_args = {'batch_size':params['compile']['channel_prediction_batch_size'],
'n_channels':1,
'normalize_to_one':True,
'shuffle':False}
predict_gen_args = {'verbose':1,
'use_multiprocessing':True,
'workers':params['num_analyzers']}
img_generator = TrapSegmentationDataGenerator(crops, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
prediction = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
#print(prediction.shape)
cropsExpand = tileImage(imgStackExpand, subImageNumber=padSubImageNumber)
cropsExpand = np.expand_dims(cropsExpand, -1)
img_generator = TrapSegmentationDataGenerator(cropsExpand, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionExpand = imageConcatenatorFeatures2(predictions, subImageNumber=padSubImageNumber)
predictionExpand = util.crop(predictionExpand, ((0,0),(shiftDistance,shiftDistance),(shiftDistance,shiftDistance),(0,0)))
#print(predictionExpand.shape)
cropsShiftLeft = tileImage(imgStackShiftLeft, subImageNumber=subImageNumber)
cropsShiftLeft = np.expand_dims(cropsShiftLeft, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftLeft, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionLeft = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionLeft = np.pad(predictionLeft, pad_width=((0,0),(0,0),(0,shiftDistance),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,:,shiftDistance:,:]
#print(predictionLeft.shape)
cropsShiftRight = tileImage(imgStackShiftRight, subImageNumber=subImageNumber)
cropsShiftRight = np.expand_dims(cropsShiftRight, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftRight, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionRight = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionRight = np.pad(predictionRight, pad_width=((0,0),(0,0),(shiftDistance,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,:,:(-1*shiftDistance),:]
#print(predictionRight.shape)
cropsShiftUp = tileImage(imgStackShiftUp, subImageNumber=subImageNumber)
#print(cropsShiftUp.shape)
cropsShiftUp = np.expand_dims(cropsShiftUp, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftUp, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionUp = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionUp = np.pad(predictionUp, pad_width=((0,0),(0,shiftDistance),(0,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,shiftDistance:,:,:]
#print(predictionUp.shape)
cropsShiftDown = tileImage(imgStackShiftDown, subImageNumber=subImageNumber)
cropsShiftDown = np.expand_dims(cropsShiftDown, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftDown, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionDown = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionDown = np.pad(predictionDown, pad_width=((0,0),(shiftDistance,0),(0,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,:(-1*shiftDistance),:,:]
#print(predictionDown.shape)
allPredictions = np.stack((prediction, predictionExpand,
predictionUp, predictionDown,
predictionLeft, predictionRight), axis=-1)
return(allPredictions)
# takes initial U-net centroids for trap locations, and creats bounding boxes for each trap at the defined height and width
def get_frame_trap_bounding_boxes(trapLabels, trapProps, trapAreaThreshold=2000, trapWidth=27, trapHeight=256):
badTrapLabels = [reg.label for reg in trapProps if reg.area < trapAreaThreshold] # filter out small "trap" regions
goodTraps = trapLabels.copy()
for label in badTrapLabels:
goodTraps[goodTraps == label] = 0 # re-label bad traps as background (0)
goodTrapProps = measure.regionprops(goodTraps)
trapCentroids = [(int(np.round(reg.centroid[0])),int(np.round(reg.centroid[1]))) for reg in goodTrapProps] # get centroids as integers
trapBboxes = []
for centroid in trapCentroids:
rowIndex = centroid[0]
colIndex = centroid[1]
minRow = rowIndex-trapHeight//2
maxRow = rowIndex+trapHeight//2
minCol = colIndex-trapWidth//2
maxCol = colIndex+trapWidth//2
if trapWidth % 2 != 0:
maxCol += 1
coordArray = np.array([minRow,maxRow,minCol,maxCol])
# remove any traps at edges of image
if np.any(coordArray > goodTraps.shape[0]):
continue
if np.any(coordArray < 0):
continue
trapBboxes.append((minRow,minCol,maxRow,maxCol))
return(trapBboxes)
# this function performs image alignment as defined by the shifts passed as an argument
def crop_traps(fileNames, trapProps, labelledTraps, bboxesDict, trap_align_metadata):
frameNum = trap_align_metadata['frame_count']
channelNum = trap_align_metadata['plane_number']
trapImagesDict = {key:np.zeros((frameNum,
trap_align_metadata['trap_height'],
trap_align_metadata['trap_width'],
channelNum)) for key in bboxesDict}
trapClosedEndPxDict = {}
flipImageDict = {}
trapMask = labelledTraps
for frame in range(frameNum):
if (frame+1) % 20 == 0:
print("Cropping trap regions for frame number {} of {}.".format(frame+1, frameNum))
imgPath = os.path.join(params['experiment_directory'],params['image_directory'],fileNames[frame])
fullFrameImg = io.imread(imgPath)
if len(fullFrameImg.shape) == 3:
if fullFrameImg.shape[0] < 3: # for tifs with less than three imaging channels, the first dimension separates channels
fullFrameImg = np.transpose(fullFrameImg, (1,2,0))
trapClosedEndPxDict[fileNames[frame]] = {key:{} for key in bboxesDict.keys()}
for key in trapImagesDict.keys():
bbox = bboxesDict[key][frame]
trapImagesDict[key][frame,:,:,:] = fullFrameImg[bbox[0]:bbox[2],bbox[1]:bbox[3],:]
#tmpImg = np.reshape(fullFrameImg[trapMask==key], (trapHeight,trapWidth,channelNum))
if frame == 0:
medianProfile = np.median(trapImagesDict[key][frame,:,:,0],axis=1) # get intensity of middle column of trap
maxIntensityRow = np.argmax(medianProfile)
if maxIntensityRow > trap_align_metadata['trap_height']//2:
flipImageDict[key] = 0
else:
flipImageDict[key] = 1
if flipImageDict[key] == 1:
trapImagesDict[key][frame,:,:,:] = trapImagesDict[key][frame,::-1,:,:]
trapClosedEndPxDict[fileNames[frame]][key]['closed_end_px'] = bbox[0]
trapClosedEndPxDict[fileNames[frame]][key]['open_end_px'] = bbox[2]
else:
trapClosedEndPxDict[fileNames[frame]][key]['closed_end_px'] = bbox[2]
trapClosedEndPxDict[fileNames[frame]][key]['open_end_px'] = bbox[0]
continue
return(trapImagesDict, trapClosedEndPxDict)
# gets shifted bounding boxes to crop traps through time
def shift_bounding_boxes(bboxesDict, shifts, imgSize):
bboxesShiftDict = {}
for key in bboxesDict.keys():
bboxesShiftDict[key] = []
bboxes = bboxesDict[key]
for i in range(shifts.shape[0]):
if i == 0:
bboxesShiftDict[key].append(bboxes)
else:
minRow = bboxes[0]+shifts[i,0]
minCol = bboxes[1]+shifts[i,1]
maxRow = bboxes[2]+shifts[i,0]
maxCol = bboxes[3]+shifts[i,1]
bboxesShiftDict[key].append((minRow,
minCol,
maxRow,
maxCol))
if np.any(np.asarray([minRow,minCol,maxRow,maxCol]) < 0):
print("channel {} removed: out of frame".format(key))
del bboxesShiftDict[key]
break
if np.any(np.asarray([minRow,minCol,maxRow,maxCol]) > imgSize):
print("channel {} removed: out of frame".format(key))
del bboxesShiftDict[key]
break
return(bboxesShiftDict)
# finds the location of channels in a tif
def find_channel_locs(image_data):
'''Finds the location of channels from a phase contrast image. The channels are returned in
a dictionary where the key is the x position of the channel in pixel and the value is a
dicionary with the open and closed end in pixels in y.
Called by
mm3_Compile.get_tif_params
'''
# declare temp variables from yaml parameter dict.
chan_w = params['compile']['channel_width']
chan_sep = params['compile']['channel_separation']
crop_wp = int(params['compile']['channel_width_pad'] + chan_w/2)
chan_snr = params['compile']['channel_detection_snr']
# Detect peaks in the x projection (i.e. find the channels)
projection_x = image_data.sum(axis=0).astype(np.int32)
# find_peaks_cwt is a function which attempts to find the peaks in a 1-D array by
# convolving it with a wave. here the wave is the default Mexican hat wave
# but the minimum signal to noise ratio is specified
# *** The range here should be a parameter or changed to a fraction.
peaks = find_peaks_cwt(projection_x, np.arange(chan_w-5,chan_w+5), min_snr=chan_snr)
# If the left-most peak position is within half of a channel separation,
# discard the channel from the list.
if peaks[0] < (chan_sep / 2):
peaks = peaks[1:]
# If the diference between the right-most peak position and the right edge
# of the image is less than half of a channel separation, discard the channel.
if image_data.shape[1] - peaks[-1] < (chan_sep / 2):
peaks = peaks[:-1]
# Find the average channel ends for the y-projected image
projection_y = image_data.sum(axis=1)
# find derivative, must use int32 because it was unsigned 16b before.
proj_y_d = np.diff(projection_y.astype(np.int32))
# use the top third to look for closed end, is pixel location of highest deriv
onethirdpoint_y = int(projection_y.shape[0]/3.0)
default_closed_end_px = proj_y_d[:onethirdpoint_y].argmax()
# use bottom third to look for open end, pixel location of lowest deriv
twothirdpoint_y = int(projection_y.shape[0]*2.0/3.0)
default_open_end_px = twothirdpoint_y + proj_y_d[twothirdpoint_y:].argmin()
default_length = default_open_end_px - default_closed_end_px # used for checks
# go through peaks and assign information
# dict for channel dimensions
chnl_loc_dict = {}
# key is peak location, value is dict with {'closed_end_px': px, 'open_end_px': px}
for peak in peaks:
# set defaults
chnl_loc_dict[peak] = {'closed_end_px': default_closed_end_px,
'open_end_px': default_open_end_px}
# redo the previous y projection finding with just this channel
channel_slice = image_data[:, peak-crop_wp:peak+crop_wp]
slice_projection_y = channel_slice.sum(axis = 1)
slice_proj_y_d = np.diff(slice_projection_y.astype(np.int32))
slice_closed_end_px = slice_proj_y_d[:onethirdpoint_y].argmax()
slice_open_end_px = twothirdpoint_y + slice_proj_y_d[twothirdpoint_y:].argmin()
slice_length = slice_open_end_px - slice_closed_end_px
# check if these values make sense. If so, use them. If not, use default
# make sure lenght is not 30 pixels bigger or smaller than default
# *** This 15 should probably be a parameter or at least changed to a fraction.
if slice_length + 15 < default_length or slice_length - 15 > default_length:
continue
# make sure ends are greater than 15 pixels from image edge
if slice_closed_end_px < 15 or slice_open_end_px > image_data.shape[0] - 15:
continue
# if you made it to this point then update the entry
chnl_loc_dict[peak] = {'closed_end_px' : slice_closed_end_px,
'open_end_px' : slice_open_end_px}
return chnl_loc_dict
# make masks from initial set of images (same images as clusters)
def make_masks(analyzed_imgs):
'''
Make masks goes through the channel locations in the image metadata and builds a consensus
Mask for each image per fov, which it returns as dictionary named channel_masks.
The keys in this dictionary are fov id, and the values is a another dictionary. This dict's keys are channel locations (peaks) and the values is a [2][2] array:
[[minrow, maxrow],[mincol, maxcol]] of pixel locations designating the corner of each mask
for each channel on the whole image
One important consequence of these function is that the channel ids and the size of the
channel slices are decided now. Updates to mask must coordinate with these values.
Parameters
analyzed_imgs : dict
image information created by get_params
Returns
channel_masks : dict
dictionary of consensus channel masks.
Called By
mm3_Compile.py
Calls
'''
information("Determining initial channel masks...")
# declare temp variables from yaml parameter dict.
crop_wp = int(params['compile']['channel_width_pad'] + params['compile']['channel_width']/2)
chan_lp = int(params['compile']['channel_length_pad'])
#intiaize dictionary
channel_masks = {}
# get the size of the images (hope they are the same)
for img_k in analyzed_imgs.keys():
img_v = analyzed_imgs[img_k]
image_rows = img_v['shape'][0] # x pixels
image_cols = img_v['shape'][1] # y pixels
break # just need one. using iteritems mean the whole dict doesn't load
# get the fov ids
fovs = []
for img_k in analyzed_imgs.keys():
img_v = analyzed_imgs[img_k]
if img_v['fov'] not in fovs:
fovs.append(img_v['fov'])
# max width and length across all fovs. channels will get expanded by these values
# this important for later updates to the masks, which should be the same
max_chnl_mask_len = 0
max_chnl_mask_wid = 0
# for each fov make a channel_mask dictionary from consensus mask
for fov in fovs:
# initialize a the dict and consensus mask
channel_masks_1fov = {} # dict which holds channel masks {peak : [[y1, y2],[x1,x2]],...}
consensus_mask = np.zeros([image_rows, image_cols]) # mask for labeling
# bring up information for each image
for img_k in analyzed_imgs.keys():
img_v = analyzed_imgs[img_k]
# skip this one if it is not of the current fov
if img_v['fov'] != fov:
continue
# for each channel in each image make a single mask
img_chnl_mask = np.zeros([image_rows, image_cols])
# and add the channel mask to it
for chnl_peak, peak_ends in six.iteritems(img_v['channels']):
# pull out the peak location and top and bottom location
# and expand by padding (more padding done later for width)
x1 = max(chnl_peak - crop_wp, 0)
x2 = min(chnl_peak + crop_wp, image_cols)
y1 = max(peak_ends['closed_end_px'] - chan_lp, 0)
y2 = min(peak_ends['open_end_px'] + chan_lp, image_rows)
# add it to the mask for this image
img_chnl_mask[y1:y2, x1:x2] = 1
# add it to the consensus mask
consensus_mask += img_chnl_mask
# Normalize concensus mask between 0 and 1.
consensus_mask = consensus_mask.astype('float32') / float(np.amax(consensus_mask))
# threshhold and homogenize each channel mask within the mask, label them
# label when value is above 0.1 (so 90% occupancy), transpose.
# the [0] is for the array ([1] is the number of regions)
# It transposes and then transposes again so regions are labeled left to right
# clear border it to make sure the channels are off the edge
consensus_mask = ndi.label(consensus_mask)[0]
# go through each label
for label in np.unique(consensus_mask):
if label == 0: # label zero is the background
continue
binary_core = consensus_mask == label
# clean up the rough edges
poscols = np.any(binary_core, axis = 0) # column positions where true (any)
posrows = np.any(binary_core, axis = 1) # row positions where true (any)
# channel_id givin by horizontal position
# this is important. later updates to the positions will have to check
# if their channels contain this median value to match up
channel_id = int(np.median(np.where(poscols)[0]))
# store the edge locations of the channel mask in the dictionary. Will be ints
min_row = np.min(np.where(posrows)[0])
max_row = np.max(np.where(posrows)[0])
min_col = np.min(np.where(poscols)[0])
max_col = np.max(np.where(poscols)[0])
# if the min/max cols are within the image bounds,
# add the mask, as 4 points, to the dictionary
if min_col > 0 and max_col < image_cols:
channel_masks_1fov[channel_id] = [[min_row, max_row], [min_col, max_col]]
# find the largest channel width and height while you go round
max_chnl_mask_len = int(max(max_chnl_mask_len, max_row - min_row))
max_chnl_mask_wid = int(max(max_chnl_mask_wid, max_col - min_col))
# add channel_mask dictionary to the fov dictionary, use copy to play it safe
channel_masks[fov] = channel_masks_1fov.copy()
# update all channel masks to be the max size
cm_copy = channel_masks.copy()
for fov, peaks in six.iteritems(channel_masks):
# f_id = int(fov)
for peak, chnl_mask in six.iteritems(peaks):
# p_id = int(peak)
# just add length to the open end (bottom of image, low column)
if chnl_mask[0][1] - chnl_mask[0][0] != max_chnl_mask_len:
cm_copy[fov][peak][0][1] = chnl_mask[0][0] + max_chnl_mask_len
# enlarge widths around the middle, but make sure you don't get floats
if chnl_mask[1][1] - chnl_mask[1][0] != max_chnl_mask_wid:
wid_diff = max_chnl_mask_wid - (chnl_mask[1][1] - chnl_mask[1][0])
if wid_diff % 2 == 0:
cm_copy[fov][peak][1][0] = max(chnl_mask[1][0] - wid_diff/2, 0)
cm_copy[fov][peak][1][1] = min(chnl_mask[1][1] + wid_diff/2, image_cols - 1)
else:
cm_copy[fov][peak][1][0] = max(chnl_mask[1][0] - (wid_diff-1)/2, 0)
cm_copy[fov][peak][1][1] = min(chnl_mask[1][1] + (wid_diff+1)/2, image_cols - 1)
# convert all values to ints
chnl_mask[0][0] = int(chnl_mask[0][0])
chnl_mask[0][1] = int(chnl_mask[0][1])
chnl_mask[1][0] = int(chnl_mask[1][0])
chnl_mask[1][1] = int(chnl_mask[1][1])
# cm_copy[fov][peak] = {'y_top': chnl_mask[0][0],
# 'y_bot': chnl_mask[0][1],
# 'x_left': chnl_mask[1][0],
# 'x_right': chnl_mask[1][1]}
# print(type(cm_copy[fov][peak][1][0]), cm_copy[fov][peak][1][0])
#save the channel mask dictionary to a pickle and a text file
# with open(os.path.join(params['ana_dir'], 'channel_masks.pkl'), 'wb') as cmask_file:
# pickle.dump(cm_copy, cmask_file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(params['ana_dir'], 'channel_masks.txt'), 'w') as cmask_file:
pprint(cm_copy, stream=cmask_file)
with open(os.path.join(params['ana_dir'], 'channel_masks.yaml'), 'w') as cmask_file:
yaml.dump(data=cm_copy, stream=cmask_file, default_flow_style=False, tags=None)
information("Channel masks saved.")
return cm_copy
# get each fov_id, peak_id, frame's mask bounding box from bounding boxes arrived at by convolutional neural network
def make_channel_masks_CNN(bboxes_dict):
'''
The keys in this dictionary are peak_ids and the values of each is an array of shape (frameNumber,2,2):
Each frameNumber's 2x2 slice of the array represents the given peak_id's [[minrow, maxrow],[mincol, maxcol]].
One important consequence of these function is that the channel ids and the size of the
channel slices are decided now. Updates to mask must coordinate with these values.
Parameters
analyzed_imgs : dict
image information created by get_params
Returns
channel_masks : dict
dictionary of consensus channel masks.
Called By
mm3_Compile.py
Calls
'''
# initialize the new channel_masks dict
channel_masks = {}
# reorder elements of tuples in bboxes_dict to match [[minrow, maxrow], [mincol, maxcol]] convention above
peak_ids = [peak_id for peak_id in bboxes_dict.keys()]
peak_ids.sort()
bbox_array = np.zeros((len(bboxes_dict[peak_ids[0]]),2,2), dtype='uint16')
for peak_id in peak_ids:
# get each frame's bounding boxes for the given peak_id
frame_bboxes = bboxes_dict[peak_id]
for frame_index in range(len(frame_bboxes)):
# replace the values in bbox_array with the proper ones from frame_bboxes
minrow = frame_bboxes[frame_index][0]
maxrow = frame_bboxes[frame_index][2]
mincol = frame_bboxes[frame_index][1]
maxcol = frame_bboxes[frame_index][3]
bbox_array[frame_index,0,0] = minrow
bbox_array[frame_index,0,1] = maxrow
bbox_array[frame_index,1,0] = mincol
bbox_array[frame_index,1,1] = maxcol
channel_masks[peak_id] = bbox_array
return(channel_masks)
### functions about trimming, padding, and manipulating images
# define function for flipping the images on an FOV by FOV basis
def fix_orientation(image_data):
'''
Fix the orientation. The standard direction for channels to open to is down.
called by
process_tif
get_params
'''
# user parameter indicates how things should be flipped
image_orientation = params['compile']['image_orientation']
# if this is just a phase image give in an extra layer so rest of code is fine
flat = False # flag for if the image is flat or multiple levels
if len(image_data.shape) == 2:
image_data = np.expand_dims(image_data, 0)
flat = True
# setting image_orientation to 'auto' will use autodetection
if image_orientation == "auto":
# use 'phase_plane' to find the phase plane in image_data, assuming c1, c2, c3... naming scheme here.
try:
ph_channel = int(re.search('[0-9]', params['phase_plane']).group(0)) - 1
except:
# Pick the plane to analyze with the highest mean px value (should be phase)
ph_channel = np.argmax([np.mean(image_data[ci]) for ci in range(image_data.shape[0])])
# flip based on the index of the higest average row value
# this should be closer to the opening
if np.argmax(image_data[ph_channel].mean(axis = 1)) < image_data[ph_channel].shape[0] / 2:
image_data = image_data[:,::-1,:]
else:
pass # no need to do anything
# flip if up is chosen
elif image_orientation == "up":
return image_data[:,::-1,:]
# do not flip the images if "down is the specified image orientation"
elif image_orientation == "down":
pass
if flat:
image_data = image_data[0] # just return that first layer
return image_data
# cuts out channels from the image
def cut_slice(image_data, channel_loc):
'''Takes an image and cuts out the channel based on the slice location
slice location is the list with the peak information, in the form
[][y1, y2],[x1, x2]]. Returns the channel slice as a numpy array.
The numpy array will be a stack if there are multiple planes.
if you want to slice all the channels from a picture with the channel_masks
dictionary use a loop like this:
for channel_loc in channel_masks[fov_id]: # fov_id is the fov of the image
channel_slice = cut_slice[image_pixel_data, channel_loc]
# ... do something with the slice
NOTE: this function will try to determine what the shape of your
image is and slice accordingly. It expects the images are in the order
[t, x, y, c]. It assumes images with three dimensions are [x, y, c] not
[t, x, y].
'''
# case where image is in form [x, y]
if len(image_data.shape) == 2:
# make slice object
channel_slicer = np.s_[channel_loc[0][0]:channel_loc[0][1],
channel_loc[1][0]:channel_loc[1][1]]
# case where image is in form [x, y, c]
elif len(image_data.shape) == 3:
channel_slicer = np.s_[channel_loc[0][0]:channel_loc[0][1],
channel_loc[1][0]:channel_loc[1][1],:]
# case where image in form [t, x , y, c]
elif len(image_data.shape) == 4:
channel_slicer = np.s_[:,channel_loc[0][0]:channel_loc[0][1],
channel_loc[1][0]:channel_loc[1][1],:]
# slice based on appropriate slicer object.
channel_slice = image_data[channel_slicer]
# pad y of channel if slice happened to be outside of image
y_difference = (channel_loc[0][1] - channel_loc[0][0]) - channel_slice.shape[1]
if y_difference > 0:
paddings = [[0, 0], # t
[0, y_difference], # y
[0, 0], # x
[0, 0]] # c
channel_slice = np.pad(channel_slice, paddings, mode='edge')
return channel_slice
# calculate cross correlation between pixels in channel stack
def channel_xcorr(fov_id, peak_id):
'''
Function calculates the cross correlation of images in a
stack to the first image in the stack. The output is an
array that is the length of the stack with the best cross
correlation between that image and the first image.
The very first value should be 1.
'''
pad_size = params['subtract']['alignment_pad']
# Use this number of images to calculate cross correlations
number_of_images = 20
# load the phase contrast images
image_data = load_stack(fov_id, peak_id, color=params['phase_plane'])
# if there are more images than number_of_images, use number_of_images images evenly
# spaced across the range
if image_data.shape[0] > number_of_images:
spacing = int(image_data.shape[0] / number_of_images)
image_data = image_data[::spacing,:,:]
if image_data.shape[0] > number_of_images:
image_data = image_data[:number_of_images,:,:]
# we will compare all images to this one, needs to be padded to account for image drift
first_img = np.pad(image_data[0,:,:], pad_size, mode='reflect')
xcorr_array = [] # array holds cross correlation vaues
for img in image_data:
# use match_template to find all cross correlations for the
# current image against the first image.
xcorr_array.append(np.max(match_template(first_img, img)))
return xcorr_array
### functions about subtraction
# average empty channels from stacks, making another TIFF stack
def average_empties_stack(fov_id, specs, color='c1', align=True):
'''Takes the fov file name and the peak names of the designated empties,
averages them and saves the image
Parameters
fov_id : int
FOV number
specs : dict
specifies whether a channel should be analyzed (1), used for making
an average empty (0), or ignored (-1).
color : string
Which plane to use.
align : boolean
Flag that is passed to the worker function average_empties, indicates
whether images should be aligned be for averaging (use False for fluorescent images)
Returns
True if succesful.
Saves empty stack to analysis folder
'''
information("Creating average empty channel for FOV %d." % fov_id)
# get peak ids of empty channels for this fov
empty_peak_ids = []
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 0: # 0 means it should be used for empty
empty_peak_ids.append(peak_id)
empty_peak_ids = sorted(empty_peak_ids) # sort for repeatability
# depending on how many empties there are choose what to do
# if there is no empty the user is going to have to copy another empty stack
if len(empty_peak_ids) == 0:
information("No empty channel designated for FOV %d." % fov_id)
return False
# if there is just one then you can just copy that channel
elif len(empty_peak_ids) == 1:
peak_id = empty_peak_ids[0]
information("One empty channel (%d) designated for FOV %d." % (peak_id, fov_id))
# load the one phase contrast as the empties
avg_empty_stack = load_stack(fov_id, peak_id, color=color)
# but if there is more than one empty you need to align and average them per timepoint
elif len(empty_peak_ids) > 1:
# load the image stacks into memory
empty_stacks = [] # list which holds phase image stacks of designated empties
for peak_id in empty_peak_ids:
# load data and append to list
image_data = load_stack(fov_id, peak_id, color=color)
empty_stacks.append(image_data)
information("%d empty channels designated for FOV %d." % (len(empty_stacks), fov_id))
# go through time points and create list of averaged empties
avg_empty_stack = [] # list will be later concatentated into numpy array
time_points = range(image_data.shape[0]) # index is time
for t in time_points:
# get images from one timepoint at a time and send to alignment and averaging
imgs = [stack[t] for stack in empty_stacks]
avg_empty = average_empties(imgs, align=align) # function is in mm3
avg_empty_stack.append(avg_empty)
# concatenate list and then save out to tiff stack
avg_empty_stack = np.stack(avg_empty_stack, axis=0)
# save out data
if params['output'] == 'TIFF':
# make new name and save it
empty_filename = params['experiment_name'] + '_xy%03d_empty_%s.tif' % (fov_id, color)
tiff.imsave(os.path.join(params['empty_dir'],empty_filename), avg_empty_stack, compress=4)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# delete the dataset if it exists (important for debug)
if 'empty_%s' % color in h5f:
del h5f[u'empty_%s' % color]
# the empty channel should be it's own dataset
h5ds = h5f.create_dataset(u'empty_%s' % color,
data=avg_empty_stack,
chunks=(1, avg_empty_stack.shape[1], avg_empty_stack.shape[2]),
maxshape=(None, avg_empty_stack.shape[1], avg_empty_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# give attribute which says which channels contribute
h5ds.attrs.create('empty_channels', empty_peak_ids)
h5f.close()
information("Saved empty channel for FOV %d." % fov_id)
return True
# averages a list of empty channels
def average_empties(imgs, align=True):
'''
This function averages a set of images (empty channels) and returns a single image
of the same size. It first aligns the images to the first image before averaging.
Alignment is done by enlarging the first image using edge padding.
Subsequent images are then aligned to this image and the offset recorded.
These images are padded such that they are the same size as the first (padded) image but
with the image in the correct (aligned) place. Edge padding is again used.
The images are then placed in a stack and aveaged. This image is trimmed so it is the size
of the original images
Called by
average_empties_stack
'''
aligned_imgs = [] # list contains the aligned, padded images
if align:
# pixel size to use for padding (ammount that alignment could be off)
pad_size = params['subtract']['alignment_pad']
for n, img in enumerate(imgs):
# if this is the first image, pad it and add it to the stack
if n == 0:
ref_img = np.pad(img, pad_size, mode='reflect') # padded reference image
aligned_imgs.append(ref_img)
# otherwise align this image to the first padded image
else:
# find correlation between a convolution of img against the padded reference
match_result = match_template(ref_img, img)
# find index of highest correlation (relative to top left corner of img)
y, x = np.unravel_index(np.argmax(match_result), match_result.shape)
# pad img so it aligns and is the same size as reference image
pad_img = np.pad(img, ((y, ref_img.shape[0] - (y + img.shape[0])),
(x, ref_img.shape[1] - (x + img.shape[1]))), mode='reflect')
aligned_imgs.append(pad_img)
else:
# don't align, just link the names to go forward easily
aligned_imgs = imgs
# stack the aligned data along 3rd axis
aligned_imgs = np.dstack(aligned_imgs)
# get a mean image along 3rd axis
avg_empty = np.nanmean(aligned_imgs, axis=2)
# trim off the padded edges (only if images were alinged, otherwise there was no padding)
if align:
avg_empty = avg_empty[pad_size:-1*pad_size, pad_size:-1*pad_size]
# change type back to unsigned 16 bit not floats
avg_empty = avg_empty.astype(dtype='uint16')
return avg_empty
# this function is used when one FOV doesn't have an empty
def copy_empty_stack(from_fov, to_fov, color='c1'):
'''Copy an empty stack from one FOV to another'''
# load empty stack from one FOV
information('Loading empty stack from FOV {} to save for FOV {}.'.format(from_fov, to_fov))
avg_empty_stack = load_stack(from_fov, 0, color='empty_{}'.format(color))
# save out data
if params['output'] == 'TIFF':
# make new name and save it
empty_filename = params['experiment_name'] + '_xy%03d_empty_%s.tif' % (to_fov, color)
tiff.imsave(os.path.join(params['empty_dir'],empty_filename), avg_empty_stack, compress=4)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % to_fov), 'r+')
# delete the dataset if it exists (important for debug)
if 'empty_%s' % color in h5f:
del h5f[u'empty_%s' % color]
# the empty channel should be it's own dataset
h5ds = h5f.create_dataset(u'empty_%s' % color,
data=avg_empty_stack,
chunks=(1, avg_empty_stack.shape[1], avg_empty_stack.shape[2]),
maxshape=(None, avg_empty_stack.shape[1], avg_empty_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# give attribute which says which channels contribute. Just put 0
h5ds.attrs.create('empty_channels', [0])
h5f.close()
information("Saved empty channel for FOV %d." % to_fov)
# Do subtraction for an fov over many timepoints
def subtract_fov_stack(fov_id, specs, color='c1', method='phase'):
'''
For a given FOV, loads the precomputed empty stack and does subtraction on
all peaks in the FOV designated to be analyzed
Parameters
----------
color : string, 'c1', 'c2', etc.
This is the channel to subtraction. will be appended to the word empty.
Called by
mm3_Subtract.py
Calls
mm3.subtract_phase
'''
information('Subtracting peaks for FOV %d.' % fov_id)
# load empty stack feed dummy peak number to get empty
avg_empty_stack = load_stack(fov_id, 0, color='empty_{}'.format(color))
# determine which peaks are to be analyzed
ana_peak_ids = []
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1: # 0 means it should be used for empty, -1 is ignore
ana_peak_ids.append(peak_id)
ana_peak_ids = sorted(ana_peak_ids) # sort for repeatability
information("Subtracting %d channels for FOV %d." % (len(ana_peak_ids), fov_id))
# just break if there are to peaks to analize
if not ana_peak_ids:
return False
# load images for the peak and get phase images
for peak_id in ana_peak_ids:
information('Subtracting peak %d.' % peak_id)
image_data = load_stack(fov_id, peak_id, color=color)
# make a list for all time points to send to a multiprocessing pool
# list will length of image_data with tuples (image, empty)
subtract_pairs = zip(image_data, avg_empty_stack)
# # set up multiprocessing pool to do subtraction. Should wait until finished
# pool = Pool(processes=params['num_analyzers'])
# if method == 'phase':
# subtracted_imgs = pool.map(subtract_phase, subtract_pairs, chunksize=10)
# elif method == 'fluor':
# subtracted_imgs = pool.map(subtract_fluor, subtract_pairs, chunksize=10)
# pool.close() # tells the process nothing more will be added.
# pool.join() # blocks script until everything has been processed and workers exit
# linear loop for debug
subtracted_imgs = [subtract_phase(subtract_pair) for subtract_pair in subtract_pairs]
# stack them up along a time axis
subtracted_stack = np.stack(subtracted_imgs, axis=0)
# save out the subtracted stack
if params['output'] == 'TIFF':
sub_filename = params['experiment_name'] + '_xy%03d_p%04d_sub_%s.tif' % (fov_id, peak_id, color)
tiff.imsave(os.path.join(params['sub_dir'],sub_filename), subtracted_stack, compress=4) # save it
if fov_id==1 and peak_id<50:
napari.current_viewer().add_image(subtracted_stack, name='Subtracted' + '_xy1_p'+str(peak_id)+'_sub_'+str(color)+'.tif', visible=True)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# put subtracted channel in correct group
h5g = h5f['channel_%04d' % peak_id]
# delete the dataset if it exists (important for debug)
if 'p%04d_sub_%s' % (peak_id, color) in h5g:
del h5g['p%04d_sub_%s' % (peak_id, color)]
h5ds = h5g.create_dataset(u'p%04d_sub_%s' % (peak_id, color),
data=subtracted_stack,
chunks=(1, subtracted_stack.shape[1], subtracted_stack.shape[2]),
maxshape=(None, subtracted_stack.shape[1], subtracted_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
information("Saved subtracted channel %d." % peak_id)
if params['output'] == 'HDF5':
h5f.close()
return True
# subtracts one phase contrast image from another.
def subtract_phase(image_pair):
'''subtract_phase aligns and subtracts a .
Modified from subtract_phase_only by jt on 20160511
The subtracted image returned is the same size as the image given. It may however include
data points around the edge that are meaningless but not marked.
We align the empty channel to the phase channel, then subtract.
Parameters
image_pair : tuple of length two with; (image, empty_mean)
Returns
channel_subtracted : np.array
The subtracted image
Called by
subtract_fov_stack
'''
# get out data and pad
cropped_channel, empty_channel = image_pair # [channel slice, empty slice]
# this is for aligning the empty channel to the cell channel.
### Pad cropped channel.
pad_size = params['subtract']['alignment_pad'] # pixel size to use for padding (ammount that alignment could be off)
padded_chnl = np.pad(cropped_channel, pad_size, mode='reflect')
# ### Align channel to empty using match template.
# use match template to get a correlation array and find the position of maximum overlap
match_result = match_template(padded_chnl, empty_channel)
# get row and colum of max correlation value in correlation array
y, x = np.unravel_index(np.argmax(match_result), match_result.shape)
# pad the empty channel according to alignment to be overlayed on padded channel.
empty_paddings = [[y, padded_chnl.shape[0] - (y + empty_channel.shape[0])],
[x, padded_chnl.shape[1] - (x + empty_channel.shape[1])]]
aligned_empty = np.pad(empty_channel, empty_paddings, mode='reflect')
# now trim it off so it is the same size as the original channel
aligned_empty = aligned_empty[pad_size:-1*pad_size, pad_size:-1*pad_size]
### Compute the difference between the empty and channel phase contrast images
# subtract cropped cell image from empty channel.
channel_subtracted = aligned_empty.astype('int32') - cropped_channel.astype('int32')
# channel_subtracted = cropped_channel.astype('int32') - aligned_empty.astype('int32')
# just zero out anything less than 0. This is what Sattar does
channel_subtracted[channel_subtracted < 0] = 0
channel_subtracted = channel_subtracted.astype('uint16') # change back to 16bit
return channel_subtracted
# subtract one fluorescence image from another.
def subtract_fluor(image_pair):
''' subtract_fluor does a simple subtraction of one image to another. Unlike subtract_phase,
there is no alignment. Also, the empty channel is subtracted from the full channel.
Parameters
image_pair : tuple of length two with; (image, empty_mean)
Returns
channel_subtracted : np.array
The subtracted image.
Called by
subtract_fov_stack
'''
# get out data and pad
cropped_channel, empty_channel = image_pair # [channel slice, empty slice]
# check frame size of cropped channel and background, always keep crop channel size the same
crop_size = np.shape(cropped_channel)[:2]
empty_size = np.shape(empty_channel)[:2]
if crop_size != empty_size:
if crop_size[0] > empty_size[0] or crop_size[1] > empty_size[1]:
pad_row_length = max(crop_size[0] - empty_size[0], 0) # prevent negatives
pad_column_length = max(crop_size[1] - empty_size[1], 0)
empty_channel = np.pad(empty_channel,
[[np.int(.5*pad_row_length), pad_row_length-np.int(.5*pad_row_length)],
[np.int(.5*pad_column_length), pad_column_length-np.int(.5*pad_column_length)],
[0,0]], 'edge')
# mm3.information('size adjusted 1')
empty_size = np.shape(empty_channel)[:2]
if crop_size[0] < empty_size[0] or crop_size[1] < empty_size[1]:
empty_channel = empty_channel[:crop_size[0], :crop_size[1],]
### Compute the difference between the empty and channel phase contrast images
# subtract cropped cell image from empty channel.
channel_subtracted = cropped_channel.astype('int32') - empty_channel.astype('int32')
# channel_subtracted = cropped_channel.astype('int32') - aligned_empty.astype('int32')
# just zero out anything less than 0.
channel_subtracted[channel_subtracted < 0] = 0
channel_subtracted = channel_subtracted.astype('uint16') # change back to 16bit
return channel_subtracted
### functions that deal with segmentation and lineages
# Do segmentation for an channel time stack
def segment_chnl_stack(fov_id, peak_id):
'''
For a given fov and peak (channel), do segmentation for all images in the
subtracted .tif stack.
Called by
mm3_Segment.py
Calls
mm3.segment_image
'''
information('Segmenting FOV %d, channel %d.' % (fov_id, peak_id))
# load subtracted images
sub_stack = load_stack(fov_id, peak_id, color='sub_{}'.format(params['phase_plane']))
# set up multiprocessing pool to do segmentation. Will do everything before going on.
#pool = Pool(processes=params['num_analyzers'])
# send the 3d array to multiprocessing
#segmented_imgs = pool.map(segment_image, sub_stack, chunksize=8)
#pool.close() # tells the process nothing more will be added.
#pool.join() # blocks script until everything has been processed and workers exit
# image by image for debug
segmented_imgs = []
for sub_image in sub_stack:
segmented_imgs.append(segment_image(sub_image))
# stack them up along a time axis
segmented_imgs = np.stack(segmented_imgs, axis=0)
segmented_imgs = segmented_imgs.astype('uint8')
# save out the segmented stack
if params['output'] == 'TIFF':
seg_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['seg_img'])
tiff.imsave(os.path.join(params['seg_dir'],seg_filename),
segmented_imgs, compress=5)
if fov_id==1 and peak_id<50:
napari.current_viewer().add_image(segmented_imgs, name='Segmented' + '_xy1_p'+str(peak_id)+'_sub_'+str(params['seg_img'])+'.tif', visible=True)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# put segmented channel in correct group
h5g = h5f['channel_%04d' % peak_id]
# delete the dataset if it exists (important for debug)
if 'p%04d_%s' % (peak_id, params['seg_img']) in h5g:
del h5g['p%04d_%s' % (peak_id, params['seg_img'])]
h5ds = h5g.create_dataset(u'p%04d_%s' % (peak_id, params['seg_img']),
data=segmented_imgs,
chunks=(1, segmented_imgs.shape[1], segmented_imgs.shape[2]),
maxshape=(None, segmented_imgs.shape[1], segmented_imgs.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
h5f.close()
information("Saved segmented channel %d." % peak_id)
return True
# segmentation algorithm
def segment_image(image):
'''Segments a subtracted image and returns a labeled image
Parameters
image : a ndarray which is an image. This should be the subtracted image
Returns
labeled_image : a ndarray which is also an image. Labeled values, which
should correspond to cells, all have the same integer value starting with 1.
Non labeled area should have value zero.
'''
# load in segmentation parameters
OTSU_threshold = params['segment']['otsu']['OTSU_threshold']
first_opening_size = params['segment']['otsu']['first_opening_size']
distance_threshold = params['segment']['otsu']['distance_threshold']
second_opening_size = params['segment']['otsu']['second_opening_size']
min_object_size = params['segment']['otsu']['min_object_size']
# threshold image
try:
thresh = threshold_otsu(image) # finds optimal OTSU threshhold value
except:
return np.zeros_like(image)
threshholded = image > OTSU_threshold*thresh # will create binary image
# if there are no cells, good to clear the border
# because otherwise the OTSU is just for random bullshit, most
# likely on the side of the image
threshholded = segmentation.clear_border(threshholded)
# Opening = erosion then dialation.
# opening smooths images, breaks isthmuses, and eliminates protrusions.
# "opens" dark gaps between bright features.
morph = morphology.binary_opening(threshholded, morphology.disk(first_opening_size))
# if this image is empty at this point (likely if there were no cells), just return
# zero array
if np.amax(morph) == 0:
return np.zeros_like(image)
### Calculate distance matrix, use as markers for random walker (diffusion watershed)
# Generate the markers based on distance to the background
distance = ndi.distance_transform_edt(morph)
# threshold distance image
distance_thresh = np.zeros_like(distance)
distance_thresh[distance < distance_threshold] = 0
distance_thresh[distance >= distance_threshold] = 1
# do an extra opening on the distance
distance_opened = morphology.binary_opening(distance_thresh,
morphology.disk(second_opening_size))
# remove artifacts connected to image border
cleared = segmentation.clear_border(distance_opened)
# remove small objects. Remove small objects wants a
# labeled image and will fail if there is only one label. Return zero image in that case
# could have used try/except but remove_small_objects loves to issue warnings.
cleared, label_num = morphology.label(cleared, connectivity=1, return_num=True)
if label_num > 1:
cleared = morphology.remove_small_objects(cleared, min_size=min_object_size)
else:
# if there are no labels, then just return the cleared image as it is zero
return np.zeros_like(image)
# relabel now that small objects and labels on edges have been cleared
markers = morphology.label(cleared, connectivity=1)
# just break if there is no label
if np.amax(markers) == 0:
return np.zeros_like(image)
# the binary image for the watershed, which uses the unmodified OTSU threshold
threshholded_watershed = threshholded
threshholded_watershed = segmentation.clear_border(threshholded_watershed)
# label using the random walker (diffusion watershed) algorithm
try:
# set anything outside of OTSU threshold to -1 so it will not be labeled
markers[threshholded_watershed == 0] = -1
# here is the main algorithm
labeled_image = segmentation.random_walker(-1*image, markers)
# put negative values back to zero for proper image
labeled_image[labeled_image == -1] = 0
except:
return np.zeros_like(image)
return labeled_image
# loss functions for model
def dice_coeff(y_true, y_pred):
smooth = 1.
# Flatten
y_true_f = tf.reshape(y_true, [-1])
y_pred_f = tf.reshape(y_pred, [-1])
intersection = tf.reduce_sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth)
return score
def dice_loss(y_true, y_pred):
loss = 1 - dice_coeff(y_true, y_pred)
return loss
def bce_dice_loss(y_true, y_pred):
loss = losses.binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
return loss
def tversky_loss(y_true, y_pred):
alpha = 0.5
beta = 0.5
ones = K.ones((512,512,3)) #K.ones(K.shape(y_true))
p0 = y_pred # proba that voxels are class i
p1 = ones-y_pred # proba that voxels are not class i
g0 = y_true
g1 = ones-y_true
num = K.sum(p0*g0, (0,1,2))
den = num + alpha*K.sum(p0*g1,(0,1,2)) + beta*K.sum(p1*g0,(0,1,2))
T = K.sum(num/den) # when summing over classes, T has dynamic range [0 Ncl]
Ncl = K.cast(K.shape(y_true)[-1], 'float32')
return Ncl-T
def cce_tversky_loss(y_true, y_pred):
loss = losses.categorical_crossentropy(y_true, y_pred) + tversky_loss(y_true, y_pred)
return loss
def get_pad_distances(unet_shape, img_height, img_width):
'''Finds padding and trimming sizes to make the input image the same as the size expected by the U-net model.
Padding is done evenly to the top and bottom of the image. Trimming is only done from the right or bottom.
'''
half_width_pad = (unet_shape[1]-img_width)/2
if half_width_pad > 0:
left_pad = int(np.floor(half_width_pad))
right_pad = int(np.ceil(half_width_pad))
right_trim = 0
else:
left_pad = 0
right_pad = 0
right_trim = img_width - unet_shape[1]
half_height_pad = (unet_shape[0]-img_height)/2
if half_height_pad > 0:
top_pad = int(np.floor(half_height_pad))
bottom_pad = int(np.ceil(half_height_pad))
bottom_trim = 0
else:
top_pad = 0
bottom_pad = 0
bottom_trim = img_height - unet_shape[0]
pad_dict = {'top_pad' : top_pad,
'bottom_pad' : bottom_pad,
'right_pad' : right_pad,
'left_pad' : left_pad,
'bottom_trim' : bottom_trim,
'right_trim' : right_trim}
return pad_dict
#@profile
def segment_cells_unet(ana_peak_ids, fov_id, pad_dict, unet_shape, model):
batch_size = params['segment']['batch_size']
cellClassThreshold = params['segment']['cell_class_threshold']
if cellClassThreshold == 'None': # yaml imports None as a string
cellClassThreshold = False
min_object_size = params['segment']['min_object_size']
# arguments to data generator
# data_gen_args = {'batch_size':batch_size,
# 'n_channels':1,
# 'normalize_to_one':False,
# 'shuffle':False}
# arguments to predict_generator
predict_args = dict(use_multiprocessing=True,
workers=params['num_analyzers'],
verbose=1)
for peak_id in ana_peak_ids:
information('Segmenting peak {}.'.format(peak_id))
img_stack = load_stack(fov_id, peak_id, color=params['phase_plane'])
if params['segment']['normalize_to_one']:
med_stack = np.zeros(img_stack.shape)
selem = morphology.disk(1)
for frame_idx in range(img_stack.shape[0]):
tmpImg = img_stack[frame_idx,...]
med_stack[frame_idx,...] = median(tmpImg, selem)
# robust normalization of peak's image stack to 1
max_val = np.max(med_stack)
img_stack = img_stack/max_val
img_stack[img_stack > 1] = 1
# trim and pad image to correct size
img_stack = img_stack[:, :unet_shape[0], :unet_shape[1]]
img_stack = np.pad(img_stack,
((0,0),
(pad_dict['top_pad'],pad_dict['bottom_pad']),
(pad_dict['left_pad'],pad_dict['right_pad'])),
mode='constant')
img_stack = np.expand_dims(img_stack, -1) # TF expects images to be 4D
# set up image generator
# image_generator = CellSegmentationDataGenerator(img_stack, **data_gen_args)
image_datagen = ImageDataGenerator()
image_generator = image_datagen.flow(x=img_stack,
batch_size=batch_size,
shuffle=False) # keep same order
# predict cell locations. This has multiprocessing built in but I need to mess with the parameters to see how to best utilize it. ***
predictions = model.predict_generator(image_generator, **predict_args)
# post processing
# remove padding including the added last dimension
predictions = predictions[:, pad_dict['top_pad']:unet_shape[0]-pad_dict['bottom_pad'],
pad_dict['left_pad']:unet_shape[1]-pad_dict['right_pad'], 0]
# pad back incase the image had been trimmed
predictions = np.pad(predictions,
((0,0),
(0,pad_dict['bottom_trim']),
(0,pad_dict['right_trim'])),
mode='constant')
if params['segment']['save_predictions']:
pred_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['pred_img'])
if not os.path.isdir(params['pred_dir']):
os.makedirs(params['pred_dir'])
int_preds = (predictions * 255).astype('uint8')
tiff.imsave(os.path.join(params['pred_dir'], pred_filename),
int_preds, compress=4)
# binarized and label (if there is a threshold value, otherwise, save a grayscale for debug)
if cellClassThreshold:
predictions[predictions >= cellClassThreshold] = 1
predictions[predictions < cellClassThreshold] = 0
predictions = predictions.astype('uint8')
segmented_imgs = np.zeros(predictions.shape, dtype='uint8')
# process and label each frame of the channel
for frame in range(segmented_imgs.shape[0]):
# get rid of small holes
predictions[frame,:,:] = morphology.remove_small_holes(predictions[frame,:,:], min_object_size)
# get rid of small objects.
predictions[frame,:,:] = morphology.remove_small_objects(morphology.label(predictions[frame,:,:], connectivity=1), min_size=min_object_size)
# remove labels which touch the boarder
predictions[frame,:,:] = segmentation.clear_border(predictions[frame,:,:])
# relabel now
segmented_imgs[frame,:,:] = morphology.label(predictions[frame,:,:], connectivity=1)
else: # in this case you just want to scale the 0 to 1 float image to 0 to 255
information('Converting predictions to grayscale.')
segmented_imgs = np.around(predictions * 100)
# both binary and grayscale should be 8bit. This may be ensured above and is unneccesary
segmented_imgs = segmented_imgs.astype('uint8')
# save out the segmented stacks
if params['output'] == 'TIFF':
seg_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['seg_img'])
tiff.imsave(os.path.join(params['seg_dir'], seg_filename),
segmented_imgs, compress=4)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# put segmented channel in correct group
h5g = h5f['channel_%04d' % peak_id]
# delete the dataset if it exists (important for debug)
if 'p%04d_%s' % (peak_id, params['seg_img']) in h5g:
del h5g['p%04d_%s' % (peak_id, params['seg_img'])]
h5ds = h5g.create_dataset(u'p%04d_%s' % (peak_id, params['seg_img']),
data=segmented_imgs,
chunks=(1, segmented_imgs.shape[1], segmented_imgs.shape[2]),
maxshape=(None, segmented_imgs.shape[1], segmented_imgs.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
h5f.close()
#@profile
def segment_fov_unet(fov_id, specs, model, color=None):
'''
Segments the channels from one fov using the U-net CNN model.
Parameters
----------
fov_id : int
specs : dict
model : TensorFlow model
'''
information('Segmenting FOV {} with U-net.'.format(fov_id))
if color is None:
color = params['phase_plane']
# load segmentation parameters
unet_shape = (params['segment']['trained_model_image_height'],
params['segment']['trained_model_image_width'])
### determine stitching of images.
# need channel shape, specifically the width. load first for example
# this assumes that all channels are the same size for this FOV, which they should
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1:
break # just break out with the current peak_id
img_stack = load_stack(fov_id, peak_id, color=color)
img_height = img_stack.shape[1]
img_width = img_stack.shape[2]
pad_dict = get_pad_distances(unet_shape, img_height, img_width)
# dermine how many channels we have to analyze for this FOV
ana_peak_ids = []
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1:
ana_peak_ids.append(peak_id)
ana_peak_ids.sort() # sort for repeatability
#ana_peak_ids = ana_peak_ids[:2]
segment_cells_unet(ana_peak_ids, fov_id, pad_dict, unet_shape, model)
information("Finished segmentation for FOV {}.".format(fov_id))
return
def segment_foci_unet(ana_peak_ids, fov_id, pad_dict, unet_shape, model):
# batch_size = params['foci']['batch_size']
focusClassThreshold = params['foci']['focus_threshold']
if focusClassThreshold == 'None': # yaml imports None as a string
focusClassThreshold = False
# arguments to data generator
data_gen_args = {'batch_size':params['foci']['batch_size'],
'n_channels':1,
'normalize_to_one':False,
'shuffle':False}
# arguments to predict_generator
predict_args = dict(use_multiprocessing=False,
# workers=params['num_analyzers'],
verbose=1)
for peak_id in ana_peak_ids:
information('Segmenting foci in peak {}.'.format(peak_id))
# print(peak_id) # debugging a shape error at some traps
img_stack = load_stack(fov_id, peak_id, color=params['foci']['foci_plane'])
# pad image to correct size
img_stack = np.pad(img_stack,
((0,0),
(pad_dict['top_pad'],pad_dict['bottom_pad']),
(pad_dict['left_pad'],pad_dict['right_pad'])),
mode='constant')
img_stack = np.expand_dims(img_stack, -1)
# set up image generator
image_generator = FocusSegmentationDataGenerator(img_stack, **data_gen_args)
# predict foci locations.
predictions = model.predict_generator(image_generator, **predict_args)
# post processing
# remove padding including the added last dimension
predictions = predictions[:, pad_dict['top_pad']:unet_shape[0]-pad_dict['bottom_pad'],
pad_dict['left_pad']:unet_shape[1]-pad_dict['right_pad'], 0]
if params['foci']['save_predictions']:
pred_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['pred_img'])
if not os.path.isdir(params['foci_pred_dir']):
os.makedirs(params['foci_pred_dir'])
int_preds = (predictions * 255).astype('uint8')
tiff.imsave(os.path.join(params['foci_pred_dir'], pred_filename),
int_preds, compress=4)
# binarized and label (if there is a threshold value, otherwise, save a grayscale for debug)
if focusClassThreshold:
predictions[predictions >= focusClassThreshold] = 1
predictions[predictions < focusClassThreshold] = 0
predictions = predictions.astype('uint8')
segmented_imgs = np.zeros(predictions.shape, dtype='uint8')
# process and label each frame of the channel
for frame in range(segmented_imgs.shape[0]):
# get rid of small holes
# predictions[frame,:,:] = morphology.remove_small_holes(predictions[frame,:,:], min_object_size)
# get rid of small objects.
# predictions[frame,:,:] = morphology.remove_small_objects(morphology.label(predictions[frame,:,:], connectivity=1), min_size=min_object_size)
# remove labels which touch the boarder
predictions[frame,:,:] = segmentation.clear_border(predictions[frame,:,:])
# relabel now
segmented_imgs[frame,:,:] = morphology.label(predictions[frame,:,:], connectivity=2)
else: # in this case you just want to scale the 0 to 1 float image to 0 to 255
information('Converting predictions to grayscale.')
segmented_imgs = np.around(predictions * 100)
# both binary and grayscale should be 8bit. This may be ensured above and is unneccesary
segmented_imgs = segmented_imgs.astype('uint8')
# save out the segmented stacks
if params['output'] == 'TIFF':
seg_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['seg_img'])
tiff.imsave(os.path.join(params['foci_seg_dir'], seg_filename),
segmented_imgs, compress=4)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# put segmented channel in correct group
h5g = h5f['channel_%04d' % peak_id]
# delete the dataset if it exists (important for debug)
if 'p%04d_%s' % (peak_id, params['seg_img']) in h5g:
del h5g['p%04d_%s' % (peak_id, params['seg_img'])]
h5ds = h5g.create_dataset(u'p%04d_%s' % (peak_id, params['seg_img']),
data=segmented_imgs,
chunks=(1, segmented_imgs.shape[1], segmented_imgs.shape[2]),
maxshape=(None, segmented_imgs.shape[1], segmented_imgs.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
h5f.close()
def segment_fov_foci_unet(fov_id, specs, model, color=None):
'''
Segments the channels from one fov using the U-net CNN model.
Parameters
----------
fov_id : int
specs : dict
model : TensorFlow model
'''
information('Segmenting FOV {} with U-net.'.format(fov_id))
if color is None:
color = params['phase_plane']
# load segmentation parameters
unet_shape = (params['segment']['trained_model_image_height'],
params['segment']['trained_model_image_width'])
### determine stitching of images.
# need channel shape, specifically the width. load first for example
# this assumes that all channels are the same size for this FOV, which they should
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1:
break # just break out with the current peak_id
img_stack = load_stack(fov_id, peak_id, color=color)
img_height = img_stack.shape[1]
img_width = img_stack.shape[2]
# find padding and trimming distances
pad_dict = get_pad_distances(unet_shape, img_height, img_width)
# timepoints = img_stack.shape[0]
# dermine how many channels we have to analyze for this FOV
ana_peak_ids = []
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1:
ana_peak_ids.append(peak_id)
ana_peak_ids.sort() # sort for repeatability
k = segment_foci_unet(ana_peak_ids, fov_id, pad_dict, unet_shape, model)
information("Finished segmentation for FOV {}.".format(fov_id))
return(k)
# class for image generation for predicting cell locations in phase-contrast images
class CellSegmentationDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self,
img_array,
batch_size=32,
n_channels=1,
shuffle=False,
normalize_to_one=False):
'Initialization'
self.dim = (img_array.shape[1], img_array.shape[2])
self.batch_size = batch_size
self.img_array = img_array
self.img_number = img_array.shape[0]
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
self.normalize_to_one = normalize_to_one
if normalize_to_one:
self.selem = morphology.disk(1)
def __len__(self):
'Denotes the number of batches per epoch'
return(int(np.ceil(self.img_number / self.batch_size)))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
array_list_temp = [self.img_array[k,:,:,0] for k in indexes]
# Generate data
X = self.__data_generation(array_list_temp)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(self.img_number)
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, array_list_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.n_channels))
# Generate data
for i in range(self.batch_size):
# Store sample
try:
tmpImg = array_list_temp[i]
except IndexError:
X = X[:i,...]
break
# ensure image is uint8
if tmpImg.dtype=="uint16":
tmpImg = tmpImg / 2**16 * 2**8
tmpImg = tmpImg.astype('uint8')
if self.normalize_to_one:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
medImg = median(tmpImg, self.selem)
tmpImg = tmpImg/np.max(medImg)
tmpImg[tmpImg > 1] = 1
X[i,:,:,0] = tmpImg
return (X)
class TemporalCellDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self,
fileName,
batch_size=32,
dim=(32,32,32),
n_channels=1,
n_classes=10,
shuffle=False,
normalize_to_one=False):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.fileName = fileName
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
self.normalize_to_one = normalize_to_one
if normalize_to_one:
self.selem = morphology.disk(1)
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(self.batch_size / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate data
X = self.__data_generation()
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
pass
def __data_generation(self):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.dim[2], self.n_channels))
full_stack = io.imread(self.fileName)
if full_stack.dtype=="uint16":
full_stack = full_stack / 2**16 * 2**8
full_stack = full_stack.astype('uint8')
img_height = full_stack.shape[1]
img_width = full_stack.shape[2]
pad_dict = get_pad_distances(self.dim, img_height, img_width)
full_stack = np.pad(full_stack,
((0,0),
(pad_dict['top_pad'],pad_dict['bottom_pad']),
(pad_dict['left_pad'],pad_dict['right_pad'])
),
mode='constant')
full_stack = full_stack.transpose(1,2,0)
# Generate data
for i in range(self.batch_size):
if i == 0:
tmpImg = np.zeros((self.dim[0], self.dim[1], self.dim[2], 1))
tmpImg[:,:,0,0] = full_stack[:,:,0]
for j in range(1,self.dim[2]):
tmpImg[:,:,j,0] = full_stack[:,:,j]
elif i == (self.batch_size - 1):
tmpImg = np.zeros((self.dim[0], self.dim[1], self.dim[2], 1))
tmpImg[:,:,-1,0] = full_stack[:,:,-1]
for j in range(self.dim[2]-1):
tmpImg[:,:,j,0] = full_stack[:,:,j]
else:
tmpImg = np.zeros((self.dim[0], self.dim[1], self.dim[2], 1))
tmpImg[:,:,:,0] = full_stack[:,:,(i-1):(i+2)]
X[i,:,:,:,:] = tmpImg
return X
# class for image generation for predicting cell locations in phase-contrast images
class FocusSegmentationDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self,
img_array,
batch_size=32,
n_channels=1,
shuffle=False,
normalize_to_one=False):
'Initialization'
self.dim = (img_array.shape[1], img_array.shape[2])
self.batch_size = batch_size
self.img_array = img_array
self.img_number = img_array.shape[0]
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
self.normalize_to_one = normalize_to_one
if normalize_to_one:
self.selem = morphology.disk(1)
def __len__(self):
'Denotes the number of batches per epoch'
return(int(np.ceil(self.img_number / self.batch_size)))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
array_list_temp = [self.img_array[k,:,:,0] for k in indexes]
# Generate data
X = self.__data_generation(array_list_temp)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(self.img_number)
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, array_list_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.n_channels), 'uint16')
if self.normalize_to_one:
max_pixels = []
# Generate data
for i in range(self.batch_size):
# Store sample
try:
tmpImg = array_list_temp[i]
if self.normalize_to_one:
# tmpMedian = filters.median(tmpImg, self.selem)
tmpMax = np.max(tmpImg)
max_pixels.append(tmpMax)
except IndexError:
X = X[:i,...]
break
# ensure image is uint8
# if tmpImg.dtype=="uint16":
# tmpImg = tmpImg / 2**16 * 2**8
# tmpImg = tmpImg.astype('uint8')
# if self.normalize_to_one:
# with warnings.catch_warnings():
# warnings.simplefilter('ignore')
# medImg = median(tmpImg, self.selem)
# tmpImg = tmpImg/np.max(medImg)
# tmpImg[tmpImg > 1] = 1
X[i,:,:,0] = tmpImg
if self.normalize_to_one:
channel_max = np.max(max_pixels) / (2**8 - 1)
# print("Channel max: {}".format(channel_max))
# print("Array max: {}".format(np.max(X)))
X = X/channel_max
# print("Normalized array max: {}".format(np.max(X)))
X[X > 1] = 1
return (X)
# class for image generation for predicting trap locations in phase-contrast images
class TrapSegmentationDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self, img_array, batch_size=32,
n_channels=1, normalize_to_one=False, shuffle=False):
'Initialization'
self.dim = (img_array.shape[1], img_array.shape[2])
self.img_number = img_array.shape[0]
self.img_array = img_array
self.batch_size = batch_size
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
self.normalize_to_one = normalize_to_one
if normalize_to_one:
self.selem = morphology.disk(3)
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(self.img_number / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
array_list_temp = [self.img_array[k,:,:,0] for k in indexes]
# Generate data
X = self.__data_generation(array_list_temp)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(self.img_number)
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, array_list_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.n_channels))
# Generate data
for i in range(self.batch_size):
# Store sample
try:
tmpImg = array_list_temp[i]
except IndexError:
X = X[:i,...]
break
if self.normalize_to_one:
medImg = median(tmpImg, self.selem)
tmpImg = medImg/np.max(medImg)
X[i,:,:,0] = tmpImg
return (X)
# class for image generation for classifying traps as good, empty, out-of-focus, or defective
class TrapKymographPredictionDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self, list_fileNames, batch_size=32, dim=(32,32,32), n_channels=1,
n_classes=10, shuffle=False):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.list_fileNames = list_fileNames
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(len(self.list_fileNames) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_fileNames_temp = [self.list_fileNames[k] for k in indexes]
# Generate data
X = self.__data_generation(list_fileNames_temp)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_fileNames))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_fileNames_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.n_channels))
# Generate data
for i, fName in enumerate(list_fileNames_temp):
# Store sample
tmpImg = io.imread(fName)
tmpImgShape = tmpImg.shape
if tmpImgShape[0] < self.dim[0]:
t_end = tmpImgShape[0]
else:
t_end = self.dim[0]
X[i,:t_end,:,:] = np.expand_dims(tmpImg[:t_end,:,tmpImg.shape[-1]//2], axis=-1)
return X
def absolute_diff(y_true, y_pred):
y_true_sum = K.sum(y_true)
y_pred_sum = K.sum(y_pred)
diff = K.abs(y_pred_sum - y_true_sum)/tf.to_float(tf.size(y_true))
return diff
def all_loss(y_true, y_pred):
loss = losses.binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred) + absolute_diff(y_true, y_pred)
return loss
def absolute_dice_loss(y_true, y_pred):
loss = dice_loss(y_true, y_pred) + absolute_diff(y_true, y_pred)
return loss
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
def f2_m(y_true, y_pred, beta=2):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
numer = (1+beta**2)*recall*precision
denom = recall + (beta**2)*precision + K.epsilon()
return numer/denom
def f_precision_m(y_true, y_pred, beta=0.5):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
numer = (1+beta**2)*recall*precision
denom = recall + (beta**2)*precision + K.epsilon()
return numer/denom
# finds lineages for all peaks in a fov
def make_lineages_fov(fov_id, specs):
'''
For a given fov, create the lineages from the segmented images.
Called by
mm3_Segment.py
Calls
mm3.make_lineage_chnl_stack
'''
ana_peak_ids = [] # channels to be analyzed
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1: # 1 means analyze
ana_peak_ids.append(peak_id)
ana_peak_ids = sorted(ana_peak_ids) # sort for repeatability
information('Creating lineage for FOV %d with %d channels.' % (fov_id, len(ana_peak_ids)))
# just break if there are no peaks to analize
if not ana_peak_ids:
# returning empty dictionary will add nothing to current cells dictionary
return {}
# This is a list of tuples (fov_id, peak_id) to send to the Pool command
fov_and_peak_ids_list = [(fov_id, peak_id) for peak_id in ana_peak_ids]
# set up multiprocessing pool. will complete pool before going on
#pool = Pool(processes=params['num_analyzers'])
# create the lineages for each peak individually
# the output is a list of dictionaries
#lineages = pool.map(make_lineage_chnl_stack, fov_and_peak_ids_list, chunksize=8)
#pool.close() # tells the process nothing more will be added.
#pool.join() # blocks script until everything has been processed and workers exit
# This is the non-parallelized version (useful for debug)
lineages = []
for fov_and_peak_ids in fov_and_peak_ids_list:
lineages.append(make_lineage_chnl_stack(fov_and_peak_ids))
# combine all dictionaries into one dictionary
Cells = {} # create dictionary to hold all information
for cell_dict in lineages: # for all the other dictionaries in the list
Cells.update(cell_dict) # updates Cells with the entries in cell_dict
return Cells
# get number of cells in each frame and total number of pairwise interactions
def get_cell_counts(regionprops_list):
cell_count_list = [len(time_regions) for time_regions in regionprops_list]
interaction_count_list = []
for i,cell_count in enumerate(cell_count_list):
if i+1 == len(cell_count_list):
break
interaction_count_list.append(cell_count*cell_count_list[i+1])
total_cells = np.sum(cell_count_list)
total_interactions = np.sum(interaction_count_list)
return(total_cells, total_interactions, cell_count_list, interaction_count_list)
# get cells' information for track prediction
def gather_interactions_and_events(regionprops_list):
total_cells, total_interactions, cell_count_list, interaction_count_list = get_cell_counts(regionprops_list)
# instantiate an array with a 2x4 array for each pair of cells'
# min_y, max_y, centroid_y, and area
# in reality it would be much, much more efficient to
# look this information up in the data generator at run time
# for now, this will work
pairwise_cell_data = np.zeros((total_interactions,2,5,1))
# make a dictionary, the keys of which will be row indices so that we
# can quickly look up which timepoints/cells correspond to which
# rows of our model's ouput
pairwise_cell_lookup = {}
# populate arrays
interaction_count = 0
cell_count = 0
for frame, frame_regions in enumerate(regionprops_list):
for region in frame_regions:
cell_label = region.label
y,x = region.centroid
bbox = region.bbox
orientation = region.orientation
min_y = bbox[0]
max_y = bbox[2]
area = region.area
cell_label = region.label
cell_info = (min_y, max_y, y, area, orientation)
cell_count += 1
try:
frame_plus_one_regions = regionprops_list[frame+1]
except IndexError as e:
# print(e)
break
for region_plus_one in frame_plus_one_regions:
paired_cell_label = region_plus_one.label
y,x = region_plus_one.centroid
bbox = region_plus_one.bbox
min_y = bbox[0]
max_y = bbox[2]
area = region_plus_one.area
paired_cell_label = region_plus_one.label
pairwise_cell_data[interaction_count,0,:,0] = cell_info
pairwise_cell_data[interaction_count,1,:,0] = (min_y, max_y, y, area, orientation)
pairwise_cell_lookup[interaction_count] = {'frame':frame, 'cell_label':cell_label, 'paired_cell_label':paired_cell_label}
interaction_count += 1
return(pairwise_cell_data, pairwise_cell_lookup)
# look up which cells are interacting according to the track model
def cell_interaction_lookup(predictions, lookup_table):
'''
Accepts prediction matrix and
'''
frame = []
cell_label = []
paired_cell_label = []
interaction_type = []
# loop over rows of predictions
for row_index in range(predictions.shape[0]):
row_predictions = predictions[row_index]
row_relationship = np.where(row_predictions > 0.95)[0]
if row_relationship.size == 0:
continue
elif row_relationship[0] == 3:
continue
elif row_relationship[0] == 0:
interaction_type.append('migration')
elif row_relationship[0] == 1:
interaction_type.append('child')
elif row_relationship[0] == 2:
interaction_type.append('false_join')
frame.append(lookup_table[row_index]['frame'])
cell_label.append(lookup_table[row_index]['cell_label'])
paired_cell_label.append(lookup_table[row_index]['paired_cell_label'])
track_df = pd.DataFrame(data={'frame':frame,
'cell_label':cell_label,
'paired_cell_label':paired_cell_label,
'interaction_type':interaction_type})
return(track_df)
def get_tracking_model_dict():
model_dict = {}
if not 'migrate_model' in model_dict:
model_dict['migrate_model'] = models.load_model(params['tracking']['migrate_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
if not 'child_model' in model_dict:
model_dict['child_model'] = models.load_model(params['tracking']['child_model'],
custom_objects={'bce_dice_loss':bce_dice_loss,
'f2_m':f2_m})
if not 'appear_model' in model_dict:
model_dict['appear_model'] = models.load_model(params['tracking']['appear_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
if not 'die_model' in model_dict:
model_dict['die_model'] = models.load_model(params['tracking']['die_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
if not 'disappear_model' in model_dict:
model_dict['disappear_model'] = models.load_model(params['tracking']['disappear_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
if not 'born_model' in model_dict:
model_dict['born_model'] = models.load_model(params['tracking']['born_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
# if not 'zero_cell_model' in model_dict:
# model_dict['zero_cell_model'] = models.load_model(params['tracking']['zero_cell_model'],
# custom_objects={'absolute_dice_loss':absolute_dice_loss,
# 'f2_m':f2_m})
# if not 'one_cell_model' in model_dict:
# model_dict['one_cell_model'] = models.load_model(params['tracking']['one_cell_model'],
# custom_objects={'bce_dice_loss':bce_dice_loss,
# 'f2_m':f2_m})
# if not 'two_cell_model' in model_dict:
# model_dict['two_cell_model'] = models.load_model(params['tracking']['two_cell_model'],
# custom_objects={'all_loss':all_loss,
# 'f2_m':f2_m})
# if not 'geq_three_cell_model' in model_dict:
# model_dict['geq_three_cell_model'] = models.load_model(params['tracking']['geq_three_cell_model'],
# custom_objects={'bce_dice_loss':bce_dice_loss,
# 'f2_m':f2_m})
return(model_dict)
# Creates lineage for a single channel
def make_lineage_chnl_stack(fov_and_peak_id):
'''
Create the lineage for a set of segmented images for one channel. Start by making the regions in the first time points potenial cells. Go forward in time and map regions in the timepoint to the potential cells in previous time points, building the life of a cell. Used basic checks such as the regions should overlap, and grow by a little and not shrink too much. If regions do not link back in time, discard them. If two regions map to one previous region, check if it is a sensible division event.
Parameters
----------
fov_and_peak_ids : tuple.
(fov_id, peak_id)
Returns
-------
Cells : dict
A dictionary of all the cells from this lineage, divided and undivided
'''
# load in parameters
# if leaf regions see no action for longer than this, drop them
lost_cell_time = params['track']['lost_cell_time']
# only cells with y positions below this value will recieve the honor of becoming new
# cells, unless they are daughters of current cells
new_cell_y_cutoff = params['track']['new_cell_y_cutoff']
# only regions with labels less than or equal to this value will be considered to start cells
new_cell_region_cutoff = params['track']['new_cell_region_cutoff']
# get the specific ids from the tuple
fov_id, peak_id = fov_and_peak_id
# start time is the first time point for this series of TIFFs.
start_time_index = min(params['time_table'][fov_id].keys())
information('Creating lineage for FOV %d, channel %d.' % (fov_id, peak_id))
# load segmented data
image_data_seg = load_stack(fov_id, peak_id, color=params['track']['seg_img'])
# image_data_seg = load_stack(fov_id, peak_id, color='seg')
# Calculate all data for all time points.
# this list will be length of the number of time points
regions_by_time = [regionprops(label_image=timepoint) for timepoint in image_data_seg] # removed coordinates='xy'
# Set up data structures.
Cells = {} # Dict that holds all the cell objects, divided and undivided
cell_leaves = [] # cell ids of the current leaves of the growing lineage tree
# go through regions by timepoint and build lineages
# timepoints start with the index of the first image
for t, regions in enumerate(regions_by_time, start=start_time_index):
# if there are cell leaves who are still waiting to be linked, but
# too much time has passed, remove them.
for leaf_id in cell_leaves:
if t - Cells[leaf_id].times[-1] > lost_cell_time:
cell_leaves.remove(leaf_id)
# make all the regions leaves if there are no current leaves
if not cell_leaves:
for region in regions:
if region.centroid[0] < new_cell_y_cutoff and region.label <= new_cell_region_cutoff:
# Create cell and put in cell dictionary
cell_id = create_cell_id(region, t, peak_id, fov_id)
Cells[cell_id] = Cell(cell_id, region, t, parent_id=None)
# add thes id to list of current leaves
cell_leaves.append(cell_id)
# Determine if the regions are children of current leaves
else:
### create mapping between regions and leaves
leaf_region_map = {}
leaf_region_map = {leaf_id : [] for leaf_id in cell_leaves}
# get the last y position of current leaves and create tuple with the id
current_leaf_positions = [(leaf_id, Cells[leaf_id].centroids[-1][0]) for leaf_id in cell_leaves]
# go through regions, they will come off in Y position order
for r, region in enumerate(regions):
# create tuple which is cell_id of closest leaf, distance
current_closest = (None, float('inf'))
# check this region against all positions of all current leaf regions,
# find the closest one in y.
for leaf in current_leaf_positions:
# calculate distance between region and leaf
y_dist_region_to_leaf = abs(region.centroid[0] - leaf[1])
# if the distance is closer than before, update
if y_dist_region_to_leaf < current_closest[1]:
current_closest = (leaf[0], y_dist_region_to_leaf)
# update map with the closest region
leaf_region_map[current_closest[0]].append((r, y_dist_region_to_leaf))
# go through the current leaf regions.
# limit by the closest two current regions if there are three regions to the leaf
for leaf_id, region_links in six.iteritems(leaf_region_map):
if len(region_links) > 2:
closest_two_regions = sorted(region_links, key=lambda x: x[1])[:2]
# but sort by region order so top region is first
closest_two_regions = sorted(closest_two_regions, key=lambda x: x[0])
# replace value in dictionary
leaf_region_map[leaf_id] = closest_two_regions
# for the discarded regions, put them as new leaves
# if they are near the closed end of the channel
discarded_regions = sorted(region_links, key=lambda x: x[1])[2:]
for discarded_region in discarded_regions:
region = regions[discarded_region[0]]
if region.centroid[0] < new_cell_y_cutoff and region.label <= new_cell_region_cutoff:
cell_id = create_cell_id(region, t, peak_id, fov_id)
Cells[cell_id] = Cell(cell_id, region, t, parent_id=None)
cell_leaves.append(cell_id) # add to leaves
else:
# since the regions are ordered, none of the remaining will pass
break
### iterate over the leaves, looking to see what regions connect to them.
for leaf_id, region_links in six.iteritems(leaf_region_map):
# if there is just one suggested descendant,
# see if it checks out and append the data
if len(region_links) == 1:
region = regions[region_links[0][0]] # grab the region from the list
# check if the pairing makes sense based on size and position
# this function returns true if things are okay
if check_growth_by_region(Cells[leaf_id], region):
# grow the cell by the region in this case
Cells[leaf_id].grow(region, t)
# there may be two daughters, or maybe there is just one child and a new cell
elif len(region_links) == 2:
# grab these two daughters
region1 = regions[region_links[0][0]]
region2 = regions[region_links[1][0]]
# check_division returns 3 if cell divided,
# 1 if first region is just the cell growing and the second is trash
# 2 if the second region is the cell, and the first is trash
# or 0 if it cannot be determined.
check_division_result = check_division(Cells[leaf_id], region1, region2)
if check_division_result == 3:
# create two new cells and divide the mother
daughter1_id = create_cell_id(region1, t, peak_id, fov_id)
daughter2_id = create_cell_id(region2, t, peak_id, fov_id)
Cells[daughter1_id] = Cell(daughter1_id, region1, t,
parent_id=leaf_id)
Cells[daughter2_id] = Cell(daughter2_id, region2, t,
parent_id=leaf_id)
Cells[leaf_id].divide(Cells[daughter1_id], Cells[daughter2_id], t)
# remove mother from current leaves
cell_leaves.remove(leaf_id)
# add the daughter ids to list of current leaves if they pass cutoffs
if region1.centroid[0] < new_cell_y_cutoff and region1.label <= new_cell_region_cutoff:
cell_leaves.append(daughter1_id)
if region2.centroid[0] < new_cell_y_cutoff and region2.label <= new_cell_region_cutoff:
cell_leaves.append(daughter2_id)
# 1 means that daughter 1 is just a continuation of the mother
# The other region should be a leaf it passes the requirements
elif check_division_result == 1:
Cells[leaf_id].grow(region1, t)
if region2.centroid[0] < new_cell_y_cutoff and region2.label <= new_cell_region_cutoff:
cell_id = create_cell_id(region2, t, peak_id, fov_id)
Cells[cell_id] = Cell(cell_id, region2, t, parent_id=None)
cell_leaves.append(cell_id) # add to leaves
# ditto for 2
elif check_division_result == 2:
Cells[leaf_id].grow(region2, t)
if region1.centroid[0] < new_cell_y_cutoff and region1.label <= new_cell_region_cutoff:
cell_id = create_cell_id(region1, t, peak_id, fov_id)
Cells[cell_id] = Cell(cell_id, region1, t, parent_id=None)
cell_leaves.append(cell_id) # add to leaves
# return the dictionary with all the cells
return Cells
### Cell class and related functions
# this is the object that holds all information for a detection
class Detection():
'''
The Detection is a single detection in a single frame.
'''
# initialize (birth) the cell
def __init__(self, detection_id, region, t):
'''The detection must be given a unique detection_id and passed the region
information from the segmentation
Parameters
__________
detection_id : str
detection_id is a string in the form fXpXtXrX
f is 3 digit FOV number
p is 4 digit peak number
t is 4 digit time point
r is region label for that segmentation
Use the function create_detection_id to return a proper string.
region : region properties object
Information about the labeled region from
skimage.measure.regionprops()
'''
# create all the attributes
# id
self.id = detection_id
# identification convenience
self.fov = int(detection_id.split('f')[1].split('p')[0])
self.peak = int(detection_id.split('p')[1].split('t')[0])
self.t = t
self.cell_count = 1
# self.abs_times = [params['time_table'][self.fov][t]] # elapsed time in seconds
if region is not None:
self.label = region.label
self.bbox = region.bbox
self.area = region.area
# calculating cell length and width by using Feret Diamter. These values are in pixels
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.length = length_tmp
self.width = width_tmp
# calculate cell volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volume = (length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 + (4/3) * np.pi * (width_tmp/2)**3
# angle of the fit elipsoid and centroid location
self.orientation = region.orientation
self.centroid = region.centroid
else:
self.label = None
self.bbox = None
self.area = None
# calculating cell length and width by using Feret Diamter. These values are in pixels
length_tmp, width_tmp = (None, None)
self.length = None
self.width = None
# calculate cell volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volume = None
# angle of the fit elipsoid and centroid location
self.orientation = None
self.centroid = None
# this is the object that holds all information for a cell
class Cell():
'''
The Cell class is one cell that has been born. It is not neccesarily a cell that
has divided.
'''
# initialize (birth) the cell
def __init__(self, cell_id, region, t, parent_id=None):
'''The cell must be given a unique cell_id and passed the region
information from the segmentation
Parameters
__________
cell_id : str
cell_id is a string in the form fXpXtXrX
f is 3 digit FOV number
p is 4 digit peak number
t is 4 digit time point at time of birth
r is region label for that segmentation
Use the function create_cell_id to do return a proper string.
region : region properties object
Information about the labeled region from
skimage.measure.regionprops()
parent_id : str
id of the parent if there is one.
'''
# create all the attributes
# id
self.id = cell_id
# identification convenience
self.fov = int(cell_id.split('f')[1].split('p')[0])
self.peak = int(cell_id.split('p')[1].split('t')[0])
self.birth_label = int(cell_id.split('r')[1])
# parent id may be none
self.parent = parent_id
# daughters is updated when cell divides
# if this is none then the cell did not divide
self.daughters = None
# birth and division time
self.birth_time = t
self.division_time = None # filled out if cell divides
# the following information is on a per timepoint basis
self.times = [t]
self.abs_times = [params['time_table'][self.fov][t]] # elapsed time in seconds
self.labels = [region.label]
self.bboxes = [region.bbox]
self.areas = [region.area]
# calculating cell length and width by using Feret Diamter. These values are in pixels
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths = [length_tmp]
self.widths = [width_tmp]
# calculate cell volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volumes = [(length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3]
# angle of the fit elipsoid and centroid location
self.orientations = [region.orientation]
self.centroids = [region.centroid]
# these are special datatype, as they include information from the daugthers for division
# computed upon division
self.times_w_div = None
self.lengths_w_div = None
self.widths_w_div = None
# this information is the "production" information that
# we want to extract at the end. Some of this is for convenience.
# This is only filled out if a cell divides.
self.sb = None # in um
self.sd = None # this should be combined lengths of daughters, in um
self.delta = None
self.tau = None
self.elong_rate = None
self.septum_position = None
self.width = None
self.death = None
def grow(self, region, t):
'''Append data from a region to this cell.
use cell.times[-1] to get most current value'''
self.times.append(t)
self.abs_times.append(params['time_table'][self.fov][t])
self.labels.append(region.label)
self.bboxes.append(region.bbox)
self.areas.append(region.area)
#calculating cell length and width by using <NAME>
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths.append(length_tmp)
self.widths.append(width_tmp)
self.volumes.append((length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3)
self.orientations.append(region.orientation)
self.centroids.append(region.centroid)
def die(self, region, t):
'''
Annotate cell as dying from current t to next t.
'''
self.death = t
def divide(self, daughter1, daughter2, t):
'''Divide the cell and update stats.
daugther1 and daugther2 are instances of the Cell class.
daughter1 is the daugther closer to the closed end.'''
# put the daugther ids into the cell
self.daughters = [daughter1.id, daughter2.id]
# give this guy a division time
self.division_time = daughter1.birth_time
# update times
self.times_w_div = self.times + [self.division_time]
self.abs_times.append(params['time_table'][self.fov][self.division_time])
# flesh out the stats for this cell
# size at birth
self.sb = self.lengths[0] * params['pxl2um']
# force the division length to be the combined lengths of the daughters
self.sd = (daughter1.lengths[0] + daughter2.lengths[0]) * params['pxl2um']
# delta is here for convenience
self.delta = self.sd - self.sb
# generation time. Use more accurate times and convert to minutes
self.tau = np.float64((self.abs_times[-1] - self.abs_times[0]) / 60.0)
# include the data points from the daughters
self.lengths_w_div = [l * params['pxl2um'] for l in self.lengths] + [self.sd]
self.widths_w_div = [w * params['pxl2um'] for w in self.widths] + [((daughter1.widths[0] + daughter2.widths[0])/2) * params['pxl2um']]
# volumes for all timepoints, in um^3
self.volumes_w_div = []
for i in range(len(self.lengths_w_div)):
self.volumes_w_div.append((self.lengths_w_div[i] - self.widths_w_div[i]) *
np.pi * (self.widths_w_div[i]/2)**2 +
(4/3) * np.pi * (self.widths_w_div[i]/2)**3)
# calculate elongation rate.
try:
times = np.float64((np.array(self.abs_times) - self.abs_times[0]) / 60.0)
log_lengths = np.float64(np.log(self.lengths_w_div))
p = np.polyfit(times, log_lengths, 1) # this wants float64
self.elong_rate = p[0] * 60.0 # convert to hours
except:
self.elong_rate = np.float64('NaN')
warning('Elongation rate calculate failed for {}.'.format(self.id))
# calculate the septum position as a number between 0 and 1
# which indicates the size of daughter closer to the closed end
# compared to the total size
self.septum_position = daughter1.lengths[0] / (daughter1.lengths[0] + daughter2.lengths[0])
# calculate single width over cell's life
self.width = np.mean(self.widths_w_div)
# convert data to smaller floats. No need for float64
# see https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html
convert_to = 'float16' # numpy datatype to convert to
self.sb = self.sb.astype(convert_to)
self.sd = self.sd.astype(convert_to)
self.delta = self.delta.astype(convert_to)
self.elong_rate = self.elong_rate.astype(convert_to)
self.tau = self.tau.astype(convert_to)
self.septum_position = self.septum_position.astype(convert_to)
self.width = self.width.astype(convert_to)
self.lengths = [length.astype(convert_to) for length in self.lengths]
self.lengths_w_div = [length.astype(convert_to) for length in self.lengths_w_div]
self.widths = [width.astype(convert_to) for width in self.widths]
self.widths_w_div = [width.astype(convert_to) for width in self.widths_w_div]
self.volumes = [vol.astype(convert_to) for vol in self.volumes]
self.volumes_w_div = [vol.astype(convert_to) for vol in self.volumes_w_div]
# note the float16 is hardcoded here
self.orientations = [np.float16(orientation) for orientation in self.orientations]
self.centroids = [(y.astype(convert_to), x.astype(convert_to)) for y, x in self.centroids]
def print_info(self):
'''prints information about the cell'''
print('id = %s' % self.id)
print('times = {}'.format(', '.join('{}'.format(t) for t in self.times)))
print('lengths = {}'.format(', '.join('{:.2f}'.format(l) for l in self.lengths)))
class CellTree():
def __init__(self):
self.cells = {}
self.scores = [] # probably needs to be different
self.score = 0
self.cell_id_list = []
def add_cell(self, cell):
self.cells[cell.id] = cell
self.cell_id_list.append(cell.id)
self.cell_id_list.sort()
def update_score(self):
pass
def get_cell(self, cell_id):
return(self.cells[cell_id])
def get_top_from_cell(self, cell_id):
pass
# this is the object that holds all information for a cell
class CellFromGraph():
'''
The CellFromGraph class is one cell that has been born.
It is not neccesarily a cell that has divided.
'''
# initialize (birth) the cell
def __init__(self, cell_id, region, t, parent=None):
'''The cell must be given a unique cell_id and passed the region
information from the segmentation
Parameters
__________
cell_id : str
cell_id is a string in the form fXpXtXrX
f is 3 digit FOV number
p is 4 digit peak number
t is 4 digit time point at time of birth
r is region label for that segmentation
Use the function create_cell_id to do return a proper string.
region : region properties object
Information about the labeled region from
skimage.measure.regionprops()
parent_id : str
id of the parent if there is one.
'''
# create all the attributes
# id
self.id = cell_id
# identification convenience
self.fov = int(cell_id.split('f')[1].split('p')[0])
self.peak = int(cell_id.split('p')[1].split('t')[0])
self.birth_label = int(region.label)
self.regions = [region]
# parent is a CellFromGraph object, can be None
self.parent = parent
# daughters is updated when cell divides
# if this is none then the cell did not divide
self.daughters = None
# birth and division time
self.birth_time = t
self.division_time = None # filled out if cell divides
# the following information is on a per timepoint basis
self.times = [t]
self.abs_times = [params['time_table'][self.fov][t]] # elapsed time in seconds
self.labels = [region.label]
self.bboxes = [region.bbox]
self.areas = [region.area]
# calculating cell length and width by using Feret Diamter. These values are in pixels
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths = [length_tmp]
self.widths = [width_tmp]
# calculate cell volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volumes = [(length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3]
# angle of the fit elipsoid and centroid location
self.orientations = [region.orientation]
self.centroids = [region.centroid]
# these are special datatype, as they include information from the daugthers for division
# computed upon division
self.times_w_div = None
self.lengths_w_div = None
self.widths_w_div = None
# this information is the "production" information that
# we want to extract at the end. Some of this is for convenience.
# This is only filled out if a cell divides.
self.sb = None # in um
self.sd = None # this should be combined lengths of daughters, in um
self.delta = None
self.tau = None
self.elong_rate = None
self.septum_position = None
self.width = None
self.death = None
self.disappear = None
self.area_mean_fluorescence = {}
self.volume_mean_fluorescence = {}
self.total_fluorescence = {}
self.foci = {}
def __len__(self):
return(len(self.times))
def add_parent(self, parent):
self.parent = parent
def grow(self, region, t):
'''Append data from a region to this cell.
use cell.times[-1] to get most current value'''
self.times.append(t)
self.abs_times.append(params['time_table'][self.fov][t])
self.labels.append(region.label)
self.bboxes.append(region.bbox)
self.areas.append(region.area)
self.regions.append(region)
#calculating cell length and width by using Feret Diamter
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths.append(length_tmp)
self.widths.append(width_tmp)
self.volumes.append((length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3)
self.orientations.append(region.orientation)
self.centroids.append(region.centroid)
def die(self, region, t):
'''
Annotate cell as dying from current t to next t.
'''
self.death = t
def disappears(self, region, t):
'''
Annotate cell as disappearing from current t to next t.
'''
self.disappear = t
def add_daughter(self, daughter, t):
if self.daughters is None:
self.daughters = [daughter]
else:
self.daughters.append(daughter)
assert len(self.daughters) < 3, "Too many daughter cells in cell {}".format(self.id)
# sort daughters by y position, with smaller y-value first.
# this will cause the daughter closer to the closed end of the trap to be listed first.
self.daughters.sort(key=lambda cell: cell.centroids[0][0])
self.divide(t)
def divide(self, t):
'''Divide the cell and update stats.
daughter1 is the daugther closer to the closed end.'''
# put the daugther ids into the cell
# self.daughters = [daughter1.id, daughter2.id]
# give this guy a division time
self.division_time = self.daughters[0].birth_time
# update times
self.times_w_div = self.times + [self.division_time]
self.abs_times.append(params['time_table'][self.fov][self.division_time])
# flesh out the stats for this cell
# size at birth
self.sb = self.lengths[0] * params['pxl2um']
# force the division length to be the combined lengths of the daughters
self.sd = (self.daughters[0].lengths[0] + self.daughters[1].lengths[0]) * params['pxl2um']
# delta is here for convenience
self.delta = self.sd - self.sb
# generation time. Use more accurate times and convert to minutes
self.tau = np.float64((self.abs_times[-1] - self.abs_times[0]) / 60.0)
# include the data points from the daughters
self.lengths_w_div = [l * params['pxl2um'] for l in self.lengths] + [self.sd]
self.widths_w_div = [w * params['pxl2um'] for w in self.widths] + [((self.daughters[0].widths[0] + self.daughters[1].widths[0])/2) * params['pxl2um']]
# volumes for all timepoints, in um^3
self.volumes_w_div = []
for i in range(len(self.lengths_w_div)):
self.volumes_w_div.append((self.lengths_w_div[i] - self.widths_w_div[i]) *
np.pi * (self.widths_w_div[i]/2)**2 +
(4/3) * np.pi * (self.widths_w_div[i]/2)**3)
# calculate elongation rate.
try:
times = np.float64((np.array(self.abs_times) - self.abs_times[0]) / 60.0) # convert times to minutes
log_lengths = np.float64(np.log(self.lengths_w_div))
p = np.polyfit(times, log_lengths, 1) # this wants float64
self.elong_rate = p[0] * 60.0 # convert to hours
except:
self.elong_rate = np.float64('NaN')
warning('Elongation rate calculate failed for {}.'.format(self.id))
# calculate the septum position as a number between 0 and 1
# which indicates the size of daughter closer to the closed end
# compared to the total size
self.septum_position = self.daughters[0].lengths[0] / (self.daughters[0].lengths[0] + self.daughters[1].lengths[0])
# calculate single width over cell's life
self.width = np.mean(self.widths_w_div)
# convert data to smaller floats. No need for float64
# see https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html
convert_to = 'float16' # numpy datatype to convert to
self.sb = self.sb.astype(convert_to)
self.sd = self.sd.astype(convert_to)
self.delta = self.delta.astype(convert_to)
self.elong_rate = self.elong_rate.astype(convert_to)
self.tau = self.tau.astype(convert_to)
self.septum_position = self.septum_position.astype(convert_to)
self.width = self.width.astype(convert_to)
self.lengths = [length.astype(convert_to) for length in self.lengths]
self.lengths_w_div = [length.astype(convert_to) for length in self.lengths_w_div]
self.widths = [width.astype(convert_to) for width in self.widths]
self.widths_w_div = [width.astype(convert_to) for width in self.widths_w_div]
self.volumes = [vol.astype(convert_to) for vol in self.volumes]
self.volumes_w_div = [vol.astype(convert_to) for vol in self.volumes_w_div]
# note the float16 is hardcoded here
self.orientations = [np.float16(orientation) for orientation in self.orientations]
self.centroids = [(y.astype(convert_to), x.astype(convert_to)) for y, x in self.centroids]
def add_focus(self, focus, t):
'''Adds a focus to the cell. See function foci_info_unet'''
self.foci[focus.id] = focus
def print_info(self):
'''prints information about the cell'''
print('id = %s' % self.id)
print('times = {}'.format(', '.join('{}'.format(t) for t in self.times)))
print('lengths = {}'.format(', '.join('{:.2f}'.format(l) for l in self.lengths)))
if self.daughters is not None:
print('daughters = {}'.format(', '.join('{}'.format(daughter.id) for daughter in self.daughters)))
if self.parent is not None:
print('parent = {}'.format(self.parent.id))
def make_wide_df(self):
data = {}
data['id'] = self.id
data['fov'] = self.fov
data['trap'] = self.peak
data['parent'] = self.parent
data['child1'] = None
data['child2'] = None
data['division_time'] = self.division_time
data['birth_label'] = self.birth_label
data['birth_time'] = self.birth_time
data['sb'] = self.sb
data['sd'] = self.sd
data['delta'] = self.delta
data['tau'] = self.tau
data['elong_rate'] = self.elong_rate
data['septum_position'] = self.septum_position
data['death'] = self.death
data['disappear'] = self.disappear
if self.daughters is not None:
data['child1'] = self.daughters[0]
if len(self.daughters) == 2:
data['child2'] = self.daughters[1]
df = pd.DataFrame(data, index=[self.id])
return(df)
def make_long_df(self):
data = {}
data['id'] = [self.id]*len(self.times)
data['times'] = self.times
data['length'] = self.lengths
data['volume'] = self.volumes
data['area'] = self.areas
# if a cell divides then there is one extra value in abs_times
if self.division_time is None:
data['seconds'] = self.abs_times
else:
data['seconds'] = self.abs_times[:-1]
# if there is fluorescence data, place it into the dataframe
if len(self.area_mean_fluorescence.keys()) != 0:
for fluorescence_channel in self.area_mean_fluorescence.keys():
data['{}_area_mean_fluorescence'.format(fluorescence_channel)] = self.area_mean_fluorescence[fluorescence_channel]
data['{}_volume_mean_fluorescence'.format(fluorescence_channel)] = self.volume_mean_fluorescence[fluorescence_channel]
data['{}_total_fluorescence'.format(fluorescence_channel)] = self.total_fluorescence[fluorescence_channel]
df = pd.DataFrame(data, index=data['id'])
return(df)
# this is the object that holds all information for a fluorescent focus
# this class can eventually be used in focus tracking, much like the Cell class
# is used for cell tracking
class Focus():
'''
The Focus class holds information on fluorescent foci.
A single focus can be present in multiple different cells.
'''
# initialize the focus
def __init__(self,
cell,
region,
seg_img,
intensity_image,
t):
'''The cell must be given a unique cell_id and passed the region
information from the segmentation
Parameters
__________
cell : a Cell object
region : region properties object
Information about the labeled region from
skimage.measure.regionprops()
seg_img : 2D numpy array
Labelled image of cell segmentations
intensity_image : 2D numpy array
Fluorescence image with foci
'''
# create all the attributes
# id
focus_id = create_focus_id(region,
t,
cell.peak,
cell.fov,
experiment_name=params['experiment_name'])
self.id = focus_id
# identification convenience
self.appear_label = int(region.label)
self.regions = [region]
self.fov = cell.fov
self.peak = cell.peak
# cell is a CellFromGraph object
# cells are added later using the .add_cell method
self.cells = [cell]
# daughters is updated when focus splits
# if this is none then the focus did not split
self.parent = None
self.daughters = None
self.merger_partner = None
# appearance and split time
self.appear_time = t
self.split_time = None # filled out if focus splits
# the following information is on a per timepoint basis
self.times = [t]
self.abs_times = [params['time_table'][cell.fov][t]] # elapsed time in seconds
self.labels = [region.label]
self.bboxes = [region.bbox]
self.areas = [region.area]
# calculating focus length and width by using Feret Diamter.
# These values are in pixels
# NOTE: in the future, update to straighten a focus an get straightened length/width
# print(region)
length_tmp = region.major_axis_length
width_tmp = region.minor_axis_length
# length_tmp, width_tmp = feretdiameter(region)
# if length_tmp == None:
# warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths = [length_tmp]
self.widths = [width_tmp]
# calculate focus volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volumes = [(length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3]
# angle of the fit elipsoid and centroid location
self.orientations = [region.orientation]
self.centroids = [region.centroid]
# special information for focci
self.elong_rate = None
self.disappear = None
self.area_mean_fluorescence = []
self.volume_mean_fluorescence = []
self.total_fluorescence = []
self.median_fluorescence = []
self.sd_fluorescence = []
self.disp_l = []
self.disp_w = []
self.calculate_fluorescence(seg_img, intensity_image, region)
def __len__(self):
return(len(self.times))
def __str__(self):
return(self.print_info())
def add_cell(self, cell):
self.cells.append(cell)
def add_parent_focus(self, parent):
self.parent = parent
def merge(self, partner):
self.merger_partner = partner
def grow(self,
region,
t,
seg_img,
intensity_image,
current_cell):
'''Append data from a region to this focus.
use self.times[-1] to get most current value.'''
if current_cell is not self.cells[-1]:
self.add_cell(current_cell)
self.times.append(t)
self.abs_times.append(params['time_table'][self.cells[-1].fov][t])
self.labels.append(region.label)
self.bboxes.append(region.bbox)
self.areas.append(region.area)
self.regions.append(region)
#calculating focus length and width by using Feret Diamter
length_tmp = region.major_axis_length
width_tmp = region.minor_axis_length
# length_tmp, width_tmp = feretdiameter(region)
# if length_tmp == None:
# warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths.append(length_tmp)
self.widths.append(width_tmp)
self.volumes.append((length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3)
self.orientations.append(region.orientation)
self.centroids.append(region.centroid)
self.calculate_fluorescence(seg_img, intensity_image, region)
def calculate_fluorescence(self,
seg_img,
intensity_image,
region):
total_fluor = np.sum(intensity_image[seg_img == region.label])
self.total_fluorescence.append(total_fluor)
self.area_mean_fluorescence.append(total_fluor/self.areas[-1])
self.volume_mean_fluorescence.append(total_fluor/self.volumes[-1])
self.median_fluorescence.append(np.median(intensity_image[seg_img == region.label]))
self.sd_fluorescence.append(np.std(intensity_image[seg_img == region.label]))
# get the focus' displacement from center of cell
# find x and y position relative to the whole image (convert from small box)
# calculate distance of foci from middle of cell (scikit image)
orientation = region.orientation
if orientation < 0:
orientation = np.pi+orientation
cell_idx = self.cells[-1].times.index(self.times[-1]) # final time in self.times is current time
cell_centroid = self.cells[-1].centroids[cell_idx]
focus_centroid = region.centroid
disp_y = (focus_centroid[0]-cell_centroid[0])*np.sin(orientation) - (focus_centroid[1]-cell_centroid[1])*np.cos(orientation)
disp_x = (focus_centroid[0]-cell_centroid[0])*np.cos(orientation) + (focus_centroid[1]-cell_centroid[1])*np.sin(orientation)
# append foci information to the list
self.disp_l = np.append(self.disp_l, disp_y)
self.disp_w = | np.append(self.disp_w, disp_x) | numpy.append |
import numpy as np
import scipy.optimize as optimization
import matplotlib.pyplot as plt
try:
from submm_python_routines.KIDs import calibrate
except:
from KIDs import calibrate
from numba import jit # to get working on python 2 I had to downgrade llvmlite pip install llvmlite==0.31.0
# module for fitting resonances curves for kinetic inductance detectors.
# written by <NAME> 12/21/16
# for example see test_fit.py in this directory
# To Do
# I think the error analysis on the fit_nonlinear_iq_with_err probably needs some work
# add in step by step fitting i.e. first amplitude normalizaiton, then cabel delay, then i0,q0 subtraction, then phase rotation, then the rest of the fit.
# need to have fit option that just specifies tau becuase that never really changes for your cryostat
#Change log
#JDW 2017-08-17 added in a keyword/function to allow for gain varation "amp_var" to be taken out before fitting
#JDW 2017-08-30 added in fitting for magnitude fitting of resonators i.e. not in iq space
#JDW 2018-03-05 added more clever function for guessing x0 for fits
#JDW 2018-08-23 added more clever guessing for resonators with large phi into guess seperate functions
J=np.exp(2j*np.pi/3)
Jc=1/J
@jit(nopython=True)
def cardan(a,b,c,d):
'''
analytical root finding fast: using numba looks like x10 speed up
returns only the largest real root
'''
u=np.empty(2,np.complex128)
z0=b/3/a
a2,b2 = a*a,b*b
p=-b2/3/a2 +c/a
q=(b/27*(2*b2/a2-9*c/a)+d)/a
D=-4*p*p*p-27*q*q
r=np.sqrt(-D/27+0j)
u=((-q-r)/2)**(1/3.)#0.33333333333333333333333
v=((-q+r)/2)**(1/3.)#0.33333333333333333333333
w=u*v
w0=np.abs(w+p/3)
w1=np.abs(w*J+p/3)
w2=np.abs(w*Jc+p/3)
if w0<w1:
if w2<w0 : v*=Jc
elif w2<w1 : v*=Jc
else: v*=J
roots = np.asarray((u+v-z0, u*J+v*Jc-z0,u*Jc+v*J-z0))
#print(roots)
where_real = np.where(np.abs(np.imag(roots)) < 1e-15)
#if len(where_real)>1: print(len(where_real))
#print(D)
if D>0: return np.max(np.real(roots)) # three real roots
else: return np.real(roots[np.argsort(np.abs(np.imag(roots)))][0]) #one real root get the value that has smallest imaginary component
#return np.max(np.real(roots[where_real]))
#return np.asarray((u+v-z0, u*J+v*Jc-z0,u*Jc+v*J-z0))
# function to descript the magnitude S21 of a non linear resonator
@jit(nopython=True)
def nonlinear_mag(x,fr,Qr,amp,phi,a,b0,b1,flin):
'''
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readout system
# a is the non-linearity paramter bifurcation occurs at a = 0.77
# b0 DC level of s21 away from resonator
# b1 Frequency dependant gain varation
# flin is probably the frequency of the resonator when a = 0
#
# This is based of fitting code from MUSIC
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and all other terms are farmilar to me
# but I am not sure where the last term comes from though it does seem to be important for fitting
#
# / (j phi) (j phi) \ 2
#|S21|^2 = (b0+b1 x_lin)* |1 -amp*e^ +amp*(e^ -1) |^
# | ------------ ---- |
# \ (1+ 2jy) 2 /
#
# where the nonlineaity of y is described by the following eqution taken from Response of superconducting microresonators
# with nonlinear kinetic inductance
# yg = y+ a/(1+y^2) where yg = Qr*xg and xg = (f-fr)/fr
#
'''
xlin = (x - flin)/flin
xg = (x-fr)/fr
yg = Qr*xg
y = np.zeros(x.shape[0])
#find the roots of the y equation above
for i in range(0,x.shape[0]):
# 4y^3+ -4yg*y^2+ y -(yg+a)
#roots = np.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#roots = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
#print(roots)
#roots = np.roots((16.,-16.*yg[i],8.,-8.*yg[i]+4*a*yg[i]/Qr-4*a,1.,-yg[i]+a*yg[i]/Qr-a+a**2/Qr)) #more accurate version that doesn't seem to change the fit at al
# only care about real roots
#where_real = np.where(np.imag(roots) == 0)
#where_real = np.where(np.abs(np.imag(roots)) < 1e-10) #analytic version has some floating point error accumulation
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))#np.max(np.real(roots[where_real]))
z = (b0 +b1*xlin)*np.abs(1.0 - amp*np.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(np.exp(1.0j*phi) -1.0))**2
return z
@jit(nopython=True)
def linear_mag(x,fr,Qr,amp,phi,b0):
'''
# simplier version for quicker fitting when applicable
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readout system
# b0 DC level of s21 away from resonator
#
# This is based of fitting code from MUSIC
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and all other terms are farmilar to me
# but I am not sure where the last term comes from though it does seem to be important for fitting
#
# / (j phi) (j phi) \ 2
#|S21|^2 = (b0)* |1 -amp*e^ +amp*(e^ -1) |^
# | ------------ ---- |
# \ (1+ 2jxg) 2 /
#
# no y just xg
# with no nonlinear kinetic inductance
'''
if not np.isscalar(fr): #vectorize
x = np.reshape(x,(x.shape[0],1,1,1,1,1))
xg = (x-fr)/fr
z = (b0)*np.abs(1.0 - amp*np.exp(1.0j*phi)/ (1.0 +2.0*1.0j*xg*Qr) + amp/2.*(np.exp(1.0j*phi) -1.0))**2
return z
# function to describe the i q loop of a nonlinear resonator
@jit(nopython=True)
def nonlinear_iq(x,fr,Qr,amp,phi,a,i0,q0,tau,f0):
'''
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readou system
# a is the non-linearity paramter bifurcation occurs at a = 0.77
# i0
# q0 these are constants that describes an overall phase rotation of the iq loop + a DC gain offset
# tau cabel delay
# f0 is all the center frequency, not sure why we include this as a secondary paramter should be the same as fr
#
# This is based of fitting code from MUSIC
#
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and all other terms are farmilar to me
# but I am not sure where the last term comes from though it does seem to be important for fitting
#
# (-j 2 pi deltaf tau) / (j phi) (j phi) \
# (i0+j*q0)*e^ *|1 -amp*e^ +amp*(e^ -1) |
# | ------------ ---- |
# \ (1+ 2jy) 2 /
#
# where the nonlineaity of y is described by the following eqution taken from Response of superconducting microresonators
# with nonlinear kinetic inductance
# yg = y+ a/(1+y^2) where yg = Qr*xg and xg = (f-fr)/fr
#
'''
deltaf = (x - f0)
xg = (x-fr)/fr
yg = Qr*xg
y = np.zeros(x.shape[0])
#find the roots of the y equation above
for i in range(0,x.shape[0]):
# 4y^3+ -4yg*y^2+ y -(yg+a)
#roots = np.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#roots = np.roots((16.,-16.*yg[i],8.,-8.*yg[i]+4*a*yg[i]/Qr-4*a,1.,-yg[i]+a*yg[i]/Qr-a+a**2/Qr)) #more accurate version that doesn't seem to change the fit at al
# only care about real roots
#where_real = np.where(np.imag(roots) == 0)
#y[i] = np.max(np.real(roots[where_real]))
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
z = (i0 +1.j*q0)* np.exp(-1.0j* 2* np.pi *deltaf*tau) * (1.0 - amp*np.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(np.exp(1.0j*phi) -1.0))
return z
def nonlinear_iq_for_fitter(x,fr,Qr,amp,phi,a,i0,q0,tau,f0,**keywords):
'''
when using a fitter that can't handel complex number
one needs to return both the real and imaginary components seperatly
'''
if ('tau' in keywords):
use_given_tau = True
tau = keywords['tau']
print("hello")
else:
use_given_tau = False
deltaf = (x - f0)
xg = (x-fr)/fr
yg = Qr*xg
y = np.zeros(x.shape[0])
for i in range(0,x.shape[0]):
#roots = np.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#where_real = np.where(np.imag(roots) == 0)
#y[i] = np.max(np.real(roots[where_real]))
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
z = (i0 +1.j*q0)* np.exp(-1.0j* 2* np.pi *deltaf*tau) * (1.0 - amp*np.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(np.exp(1.0j*phi) -1.0))
real_z = np.real(z)
imag_z = np.imag(z)
return np.hstack((real_z,imag_z))
def brute_force_linear_mag_fit(x,z,ranges,n_grid_points,error = None, plot = False,**keywords):
'''
x frequencies Hz
z complex or abs of s21
ranges is the ranges for each parameter i.e. np.asarray(([f_low,Qr_low,amp_low,phi_low,b0_low],[f_high,Qr_high,amp_high,phi_high,b0_high]))
n_grid_points how finely to sample each parameter space.
this can be very slow for n>10
an increase by a factor of 2 will take 2**5 times longer
to marginalize over you must minimize over the unwanted axies of sum_dev
i.e for fr np.min(np.min(np.min(np.min(fit['sum_dev'],axis = 4),axis = 3),axis = 2),axis = 1)
'''
if error is None:
error = np.ones(len(x))
fs = np.linspace(ranges[0][0],ranges[1][0],n_grid_points)
Qrs = np.linspace(ranges[0][1],ranges[1][1],n_grid_points)
amps = np.linspace(ranges[0][2],ranges[1][2],n_grid_points)
phis = np.linspace(ranges[0][3],ranges[1][3],n_grid_points)
b0s = np.linspace(ranges[0][4],ranges[1][4],n_grid_points)
evaluated_ranges = np.vstack((fs,Qrs,amps,phis,b0s))
a,b,c,d,e = np.meshgrid(fs,Qrs,amps,phis,b0s,indexing = "ij") #always index ij
evaluated = linear_mag(x,a,b,c,d,e)
data_values = np.reshape(np.abs(z)**2,(abs(z).shape[0],1,1,1,1,1))
error = np.reshape(error,(abs(z).shape[0],1,1,1,1,1))
sum_dev = np.sum(((np.sqrt(evaluated)-np.sqrt(data_values))**2/error**2),axis = 0) # comparing in magnitude space rather than magnitude squared
min_index = np.where(sum_dev == np.min(sum_dev))
index1 = min_index[0][0]
index2 = min_index[1][0]
index3 = min_index[2][0]
index4 = min_index[3][0]
index5 = min_index[4][0]
fit_values = np.asarray((fs[index1],Qrs[index2],amps[index3],phis[index4],b0s[index5]))
fit_values_names = ('f0','Qr','amp','phi','b0')
fit_result = linear_mag(x,fs[index1],Qrs[index2],amps[index3],phis[index4],b0s[index5])
marginalized_1d = np.zeros((5,n_grid_points))
marginalized_1d[0,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 2),axis = 1)
marginalized_1d[1,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 2),axis = 0)
marginalized_1d[2,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 1),axis = 0)
marginalized_1d[3,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 2),axis = 1),axis = 0)
marginalized_1d[4,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 3),axis = 2),axis = 1),axis = 0)
marginalized_2d = np.zeros((5,5,n_grid_points,n_grid_points))
#0 _
#1 x _
#2 x x _
#3 x x x _
#4 x x x x _
# 0 1 2 3 4
marginalized_2d[0,1,:] = marginalized_2d[1,0,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 2)
marginalized_2d[2,0,:] = marginalized_2d[0,2,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 1)
marginalized_2d[2,1,:] = marginalized_2d[1,2,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 0)
marginalized_2d[3,0,:] = marginalized_2d[0,3,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 2),axis = 1)
marginalized_2d[3,1,:] = marginalized_2d[1,3,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 2),axis = 0)
marginalized_2d[3,2,:] = marginalized_2d[2,3,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 1),axis = 0)
marginalized_2d[4,0,:] = marginalized_2d[0,4,:] = np.min(np.min(np.min(sum_dev,axis = 3),axis = 2),axis = 1)
marginalized_2d[4,1,:] = marginalized_2d[1,4,:] = np.min(np.min(np.min(sum_dev,axis = 3),axis = 2),axis = 0)
marginalized_2d[4,2,:] = marginalized_2d[2,4,:] = np.min(np.min(np.min(sum_dev,axis = 3),axis = 1),axis = 0)
marginalized_2d[4,3,:] = marginalized_2d[3,4,:] = np.min(np.min(np.min(sum_dev,axis = 2),axis = 1),axis = 0)
if plot:
levels = [2.3,4.61] #delta chi squared two parameters 68 90 % confidence
fig_fit = plt.figure(-1)
axs = fig_fit.subplots(5, 5)
for i in range(0,5): # y starting from top
for j in range(0,5): #x starting from left
if i > j:
#plt.subplot(5,5,i+1+5*j)
#axs[i, j].set_aspect('equal', 'box')
extent = [evaluated_ranges[j,0],evaluated_ranges[j,n_grid_points-1],evaluated_ranges[i,0],evaluated_ranges[i,n_grid_points-1]]
axs[i,j].imshow(marginalized_2d[i,j,:]-np.min(sum_dev),extent =extent,origin = 'lower', cmap = 'jet')
axs[i,j].contour(evaluated_ranges[j],evaluated_ranges[i],marginalized_2d[i,j,:]-np.min(sum_dev),levels = levels,colors = 'white')
axs[i,j].set_ylim(evaluated_ranges[i,0],evaluated_ranges[i,n_grid_points-1])
axs[i,j].set_xlim(evaluated_ranges[j,0],evaluated_ranges[j,n_grid_points-1])
axs[i,j].set_aspect((evaluated_ranges[j,0]-evaluated_ranges[j,n_grid_points-1])/(evaluated_ranges[i,0]-evaluated_ranges[i,n_grid_points-1]))
if j == 0:
axs[i, j].set_ylabel(fit_values_names[i])
if i == 4:
axs[i, j].set_xlabel("\n"+fit_values_names[j])
if i<4:
axs[i,j].get_xaxis().set_ticks([])
if j>0:
axs[i,j].get_yaxis().set_ticks([])
elif i < j:
fig_fit.delaxes(axs[i,j])
for i in range(0,5):
#axes.subplot(5,5,i+1+5*i)
axs[i,i].plot(evaluated_ranges[i,:],marginalized_1d[i,:]-np.min(sum_dev))
axs[i,i].plot(evaluated_ranges[i,:],np.ones(len(evaluated_ranges[i,:]))*1.,color = 'k')
axs[i,i].plot(evaluated_ranges[i,:],np.ones(len(evaluated_ranges[i,:]))*2.7,color = 'k')
axs[i,i].yaxis.set_label_position("right")
axs[i,i].yaxis.tick_right()
axs[i,i].xaxis.set_label_position("top")
axs[i,i].xaxis.tick_top()
axs[i,i].set_xlabel(fit_values_names[i])
#axs[0,0].set_ylabel(fit_values_names[0])
#axs[4,4].set_xlabel(fit_values_names[4])
axs[4,4].xaxis.set_label_position("bottom")
axs[4,4].xaxis.tick_bottom()
#make a dictionary to return
fit_dict = {'fit_values': fit_values,'fit_values_names':fit_values_names, 'sum_dev': sum_dev, 'fit_result': fit_result,'marginalized_2d':marginalized_2d,'marginalized_1d':marginalized_1d,'evaluated_ranges':evaluated_ranges}#, 'x0':x0, 'z':z}
return fit_dict
# function for fitting an iq sweep with the above equation
def fit_nonlinear_iq(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
# tau forces tau to specific value
# tau_guess fixes the guess for tau without have to specifiy all of x0
'''
if ('tau' in keywords):
use_given_tau = True
tau = keywords['tau']
else:
use_given_tau = False
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(x),50,.01,-np.pi,0,-np.inf,-np.inf,0,np.min(x)],[np.max(x),200000,1,np.pi,5,np.inf,np.inf,1*10**-6,np.max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
#fr_guess = x[np.argmin(np.abs(z))]
#x0 = [fr_guess,10000.,0.5,0,0,np.mean(np.real(z)),np.mean(np.imag(z)),3*10**-7,fr_guess]
x0 = guess_x0_iq_nonlinear(x,z,verbose = True)
print(x0)
if ('fr_guess' in keywords):
x0[0] = keywords['fr_guess']
if ('tau_guess' in keywords):
x0[7] = keywords['tau_guess']
#Amplitude normalization?
do_amp_norm = 0
if ('amp_norm' in keywords):
amp_norm = keywords['amp_norm']
if amp_norm == True:
do_amp_norm = 1
elif amp_norm == False:
do_amp_norm = 0
else:
print("please specify amp_norm as True or False")
if do_amp_norm == 1:
z = amplitude_normalization(x,z)
z_stacked = np.hstack((np.real(z),np.imag(z)))
if use_given_tau == True:
del bounds[0][7]
del bounds[1][7]
del x0[7]
fit = optimization.curve_fit(lambda x_lamb,a,b,c,d,e,f,g,h: nonlinear_iq_for_fitter(x_lamb,a,b,c,d,e,f,g,tau,h), x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],tau,fit[0][7])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],tau,x0[7])
else:
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
def fit_nonlinear_iq_sep(fine_x,fine_z,gain_x,gain_z,**keywords):
'''
# same as above funciton but takes fine and gain scans seperatly
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(fine_x),500.,.01,-np.pi,0,-np.inf,-np.inf,1*10**-9,np.min(fine_x)],[np.max(fine_x),1000000,1,np.pi,5,np.inf,np.inf,1*10**-6,np.max(fine_x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
#fr_guess = x[np.argmin(np.abs(z))]
#x0 = [fr_guess,10000.,0.5,0,0,np.mean(np.real(z)),np.mean(np.imag(z)),3*10**-7,fr_guess]
x0 = guess_x0_iq_nonlinear_sep(fine_x,fine_z,gain_x,gain_z)
#print(x0)
#Amplitude normalization?
do_amp_norm = 0
if ('amp_norm' in keywords):
amp_norm = keywords['amp_norm']
if amp_norm == True:
do_amp_norm = 1
elif amp_norm == False:
do_amp_norm = 0
else:
print("please specify amp_norm as True or False")
if (('fine_z_err' in keywords) & ('gain_z_err' in keywords)):
use_err = True
fine_z_err = keywords['fine_z_err']
gain_z_err = keywords['gain_z_err']
else:
use_err = False
x = np.hstack((fine_x,gain_x))
z = np.hstack((fine_z,gain_z))
if use_err:
z_err = np.hstack((fine_z_err,gain_z_err))
if do_amp_norm == 1:
z = amplitude_normalization(x,z)
z_stacked = np.hstack((np.real(z),np.imag(z)))
if use_err:
z_err_stacked = np.hstack((np.real(z_err),np.imag(z_err)))
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,sigma = z_err_stacked,bounds = bounds)
else:
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
if use_err:
#only do it for fine data
#red_chi_sqr = np.sum(z_stacked-np.hstack((np.real(fit_result),np.imag(fit_result))))**2/z_err_stacked**2)/(len(z_stacked)-8.)
#only do it for fine data
red_chi_sqr = np.sum((np.hstack((np.real(fine_z),np.imag(fine_z)))-np.hstack((np.real(fit_result[0:len(fine_z)]),np.imag(fit_result[0:len(fine_z)]))))**2/np.hstack((np.real(fine_z_err),np.imag(fine_z_err)))**2)/(len(fine_z)*2.-8.)
#make a dictionary to return
if use_err:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x,'red_chi_sqr':red_chi_sqr}
else:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x}
return fit_dict
# same function but double fits so that it can get error and a proper covariance matrix out
def fit_nonlinear_iq_with_err(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(x),2000,.01,-np.pi,0,-5,-5,1*10**-9,np.min(x)],[np.max(x),200000,1,np.pi,5,5,5,1*10**-6,np.max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
fr_guess = x[np.argmin(np.abs(z))]
x0 = guess_x0_iq_nonlinear(x,z)
#Amplitude normalization?
do_amp_norm = 0
if ('amp_norm' in keywords):
amp_norm = keywords['amp_norm']
if amp_norm == True:
do_amp_norm = 1
elif amp_norm == False:
do_amp_norm = 0
else:
print("please specify amp_norm as True or False")
if do_amp_norm == 1:
z = amplitude_normalization(x,z)
z_stacked = np.hstack((np.real(z),np.imag(z)))
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
fit_result_stacked = nonlinear_iq_for_fitter(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
# get error
var = np.sum((z_stacked-fit_result_stacked)**2)/(z_stacked.shape[0] - 1)
err = np.ones(z_stacked.shape[0])*np.sqrt(var)
# refit
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,err,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
# function for fitting an iq sweep with the above equation
def fit_nonlinear_mag(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(x),100,.01,-np.pi,0,-np.inf,-np.inf,np.min(x)],[np.max(x),200000,1,np.pi,5,np.inf,np.inf,np.max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
fr_guess = x[np.argmin(np.abs(z))]
#x0 = [fr_guess,10000.,0.5,0,0,np.abs(z[0])**2,np.abs(z[0])**2,fr_guess]
x0 = guess_x0_mag_nonlinear(x,z,verbose = True)
fit = optimization.curve_fit(nonlinear_mag, x, np.abs(z)**2 ,x0,bounds = bounds)
fit_result = nonlinear_mag(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7])
x0_result = nonlinear_mag(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
def fit_nonlinear_mag_sep(fine_x,fine_z,gain_x,gain_z,**keywords):
'''
# same as above but fine and gain scans are provided seperatly
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(fine_x),100,.01,-np.pi,0,-np.inf,-np.inf,np.min(fine_x)],[np.max(fine_x),1000000,100,np.pi,5,np.inf,np.inf,np.max(fine_x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
x0 = guess_x0_mag_nonlinear_sep(fine_x,fine_z,gain_x,gain_z)
if (('fine_z_err' in keywords) & ('gain_z_err' in keywords)):
use_err = True
fine_z_err = keywords['fine_z_err']
gain_z_err = keywords['gain_z_err']
else:
use_err = False
#stack the scans for curvefit
x = np.hstack((fine_x,gain_x))
z = np.hstack((fine_z,gain_z))
if use_err:
z_err = np.hstack((fine_z_err,gain_z_err))
z_err = np.sqrt(4*np.real(z_err)**2*np.real(z)**2+4*np.imag(z_err)**2*np.imag(z)**2) #propogation of errors left out cross term
fit = optimization.curve_fit(nonlinear_mag, x, np.abs(z)**2 ,x0,sigma = z_err,bounds = bounds)
else:
fit = optimization.curve_fit(nonlinear_mag, x, np.abs(z)**2 ,x0,bounds = bounds)
fit_result = nonlinear_mag(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7])
x0_result = nonlinear_mag(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7])
#compute reduced chi squared
print(len(z))
if use_err:
#red_chi_sqr = np.sum((np.abs(z)**2-fit_result)**2/z_err**2)/(len(z)-7.)
# only use fine scan for reduced chi squared.
red_chi_sqr = np.sum((np.abs(fine_z)**2-fit_result[0:len(fine_z)])**2/z_err[0:len(fine_z)]**2)/(len(fine_z)-7.)
#make a dictionary to return
if use_err:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x,'red_chi_sqr':red_chi_sqr}
else:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x}
return fit_dict
def amplitude_normalization(x,z):
'''
# normalize the amplitude varation requires a gain scan
#flag frequencies to use in amplitude normaliztion
'''
index_use = np.where(np.abs(x-np.median(x))>100000) #100kHz away from resonator
poly = np.polyfit(x[index_use],np.abs(z[index_use]),2)
poly_func = np.poly1d(poly)
normalized_data = z/poly_func(x)*np.median(np.abs(z[index_use]))
return normalized_data
def amplitude_normalization_sep(gain_x,gain_z,fine_x,fine_z,stream_x,stream_z):
'''
# normalize the amplitude varation requires a gain scan
# uses gain scan to normalize does not use fine scan
#flag frequencies to use in amplitude normaliztion
'''
index_use = np.where(np.abs(gain_x-np.median(gain_x))>100000) #100kHz away from resonator
poly = np.polyfit(gain_x[index_use],np.abs(gain_z[index_use]),2)
poly_func = np.poly1d(poly)
poly_data = poly_func(gain_x)
normalized_gain = gain_z/poly_data*np.median(np.abs(gain_z[index_use]))
normalized_fine = fine_z/poly_func(fine_x)*np.median(np.abs(gain_z[index_use]))
normalized_stream = stream_z/poly_func(stream_x)*np.median(np.abs(gain_z[index_use]))
amp_norm_dict = {'normalized_gain':normalized_gain,
'normalized_fine':normalized_fine,
'normalized_stream':normalized_stream,
'poly_data':poly_data}
return amp_norm_dict
def guess_x0_iq_nonlinear(x,z,verbose = False):
'''
# this is lest robust than guess_x0_iq_nonlinear_sep
# below. it is recommended to use that instead
#make sure data is sorted from low to high frequency
'''
sort_index = np.argsort(x)
x = x[sort_index]
z = z[sort_index]
#extract just fine data
df = np.abs(x-np.roll(x,1))
fine_df = np.min(df[np.where(df != 0)])
fine_z_index = np.where(df<fine_df*1.1)
fine_z = z[fine_z_index]
fine_x = x[fine_z_index]
#extract the gain scan
gain_z_index = np.where(df>fine_df*1.1)
gain_z = z[gain_z_index]
gain_x = x[gain_z_index]
gain_phase = np.arctan2(np.real(gain_z),np.imag(gain_z))
#guess f0
fr_guess_index = np.argmin(np.abs(z))
#fr_guess = x[fr_guess_index]
fr_guess_index_fine = np.argmin(np.abs(fine_z))
# below breaks if there is not a right and left side in the fine scan
if fr_guess_index_fine == 0:
fr_guess_index_fine = len(fine_x)//2
elif fr_guess_index_fine == (len(fine_x)-1):
fr_guess_index_fine = len(fine_x)//2
fr_guess = fine_x[fr_guess_index_fine]
#guess Q
mag_max = np.max(np.abs(fine_z)**2)
mag_min = np.min(np.abs(fine_z)**2)
mag_3dB = (mag_max+mag_min)/2.
half_distance = np.abs(fine_z)**2-mag_3dB
right = half_distance[fr_guess_index_fine:-1]
left = half_distance[0:fr_guess_index_fine]
right_index = np.argmin(np.abs(right))+fr_guess_index_fine
left_index = np.argmin(np.abs(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#guess amp
d = np.max(20*np.log10(np.abs(z)))-np.min(20*np.log10(np.abs(z)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4#polynomial fit to amp verus depth
#guess impedance rotation phi
phi_guess = 0
#guess non-linearity parameter
#might be able to guess this by ratioing the distance between min and max distance between iq points in fine sweep
a_guess = 0
#i0 and iq guess
if np.max(np.abs(fine_z))==np.max(np.abs(z)): #if the resonator has an impedance mismatch rotation that makes the fine greater that the cabel delay
i0_guess = np.real(fine_z[np.argmax(np.abs(fine_z))])
q0_guess = np.imag(fine_z[np.argmax(np.abs(fine_z))])
else:
i0_guess = (np.real(fine_z[0])+np.real(fine_z[-1]))/2.
q0_guess = (np.imag(fine_z[0])+np.imag(fine_z[-1]))/2.
#cabel delay guess tau
#y = mx +b
#m = (y2 - y1)/(x2-x1)
#b = y-mx
if len(gain_z)>1: #is there a gain scan?
m = (gain_phase - np.roll(gain_phase,1))/(gain_x-np.roll(gain_x,1))
b = gain_phase -m*gain_x
m_best = np.median(m[~np.isnan(m)])
tau_guess = m_best/(2*np.pi)
else:
tau_guess = 3*10**-9
if verbose == True:
print("fr guess = %.2f MHz" %(fr_guess/10**6))
print("Q guess = %.2f kHz, %.1f" % ((Q_guess_Hz/10**3),Q_guess))
print("amp guess = %.2f" %amp_guess)
print("i0 guess = %.2f" %i0_guess)
print("q0 guess = %.2f" %q0_guess)
print("tau guess = %.2f x 10^-7" %(tau_guess/10**-7))
x0 = [fr_guess,Q_guess,amp_guess,phi_guess,a_guess,i0_guess,q0_guess,tau_guess,fr_guess]
return x0
def guess_x0_mag_nonlinear(x,z,verbose = False):
'''
# this is lest robust than guess_x0_mag_nonlinear_sep
#below it is recommended to use that instead
#make sure data is sorted from low to high frequency
'''
sort_index = np.argsort(x)
x = x[sort_index]
z = z[sort_index]
#extract just fine data
#this will probably break if there is no fine scan
df = np.abs(x-np.roll(x,1))
fine_df = np.min(df[ | np.where(df != 0) | numpy.where |
import os, pickle, re, csv
from tqdm import tqdm
import numpy as np
import torch.utils.data
from torchvision.datasets import ImageFolder
import torchvision.transforms as transforms
import PIL
from collections import Counter
import nltk
import json
class Vocabulary(object):
def __init__(self,
vocab_threshold,
vocab_file,
annotations_file,
vocab_from_file=False,
unk_word="[UNK]",
pad_word="[PAD]",
start_word="[BOS]",
end_word="[EOS]"):
"""Initialize the vocabulary.
Args:
vocab_threshold: Minimum word count threshold.
vocab_file: File containing the vocabulary.
start_word: Special word denoting sentence start.
end_word: Special word denoting sentence end.
unk_word: Special word denoting unknown words.
annotations_file: Path for train annotation file.
vocab_from_file: If False, create vocab from scratch & override any existing vocab_file
If True, load vocab from from existing vocab_file, if it exists
"""
self.vocab_threshold = vocab_threshold
self.vocab_file = vocab_file
self.unk_word = unk_word
self.pad_word = pad_word
self.start_word=start_word
self.end_word = end_word
self.annotations_file = annotations_file
self.vocab_from_file = vocab_from_file
self.get_vocab()
def get_vocab(self):
"""Load the vocabulary from file OR build the vocabulary from scratch."""
if os.path.exists(self.vocab_file) & self.vocab_from_file:
print('Reading vocabulary from %s file!' % self.vocab_file)
with open(self.vocab_file, 'rb') as f:
vocab = pickle.load(f)
self.word2idx = vocab['word2idx']
self.idx2word = vocab['idx2word']
print('Vocabulary successfully loaded from %s file!' % self.vocab_file)
else:
print("Building voabulary from scratch")
self.build_vocab()
with open(self.vocab_file, 'wb') as f:
pickle.dump({'word2idx': self.word2idx, 'idx2word': self.idx2word}, f)
def build_vocab(self):
"""Populate the dictionaries for converting tokens to integers (and vice-versa)."""
self.init_vocab()
self.add_word(self.unk_word)
self.add_word(self.pad_word)
self.add_captions()
def init_vocab(self):
"""Initialize the dictionaries for converting tokens to integers (and vice-versa)."""
self.word2idx = {}
self.idx2word = {}
self.idx = 0
def add_word(self, word):
"""Add a token to the vocabulary."""
if not word in self.word2idx:
self.word2idx[word] = self.idx
self.idx2word[self.idx] = word
self.idx += 1
def add_captions(self):
"""Loop over training captions and add all tokens to the vocabulary that meet or exceed the threshold."""
counter = Counter()
with open(self.annotations_file, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
print("Tokenizing captions")
for i, row in tqdm(enumerate(csv_reader)):
_, _, caption = row
tokens = nltk.tokenize.word_tokenize(caption.lower())
counter.update(tokens)
words = [word for word, cnt in counter.items() if cnt >= self.vocab_threshold]
for i, word in enumerate(words):
self.add_word(word)
def load_glove(self, filename):
""" returns { word (str) : vector_embedding (torch.FloatTensor) }
"""
glove = {}
with open(filename) as f:
for line in tqdm(f.readlines()):
values = line.strip("\n").split(" ") # space separator
word = values[0]
vector = np.asarray([float(e) for e in values[1:]])
glove[word] = vector
return glove
def extract_glove(self, raw_glove_path, vocab_glove_path, glove_dim=300):
if os.path.exists(vocab_glove_path):
print("Pre-extracted embedding matrix exists at %s" % vocab_glove_path)
else:
# Make glove embedding.
print("Loading glove embedding at path : {}.\n".format(raw_glove_path))
glove_full = self.load_glove(raw_glove_path)
print("Glove Loaded, building word2idx, idx2word mapping.\n")
idx2word = {v: k for k, v in self.word2idx.items()}
glove_matrix = np.zeros([len(self.word2idx), glove_dim])
glove_keys = glove_full.keys()
for i in tqdm(range(len(idx2word))):
w = idx2word[i]
w_embed = glove_full[w] if w in glove_keys else | np.random.randn(glove_dim) | numpy.random.randn |
import random
import unittest
from unittest.mock import MagicMock, patch, ANY
import numpy as np
import torch
from torch.autograd import Variable
import neurox.interpretation.linear_probe as linear_probe
class TestL1Regularization(unittest.TestCase):
def test_l1_penalty(self):
"L1 Regularization"
tmp = np.random.random((5,5))
expected_penalty = np.sum(np.abs(tmp))
penalty = linear_probe.l1_penalty(Variable(torch.Tensor(tmp)))
self.assertIsInstance(penalty, Variable)
self.assertAlmostEqual(
expected_penalty, penalty.data.item(), places=3
)
class TestL2Regularization(unittest.TestCase):
def test_l2_penalty(self):
"L2 Regularization"
tmp = np.random.random((5,5))
expected_penalty = np.sqrt(np.sum(np.power(tmp, 2)))
penalty = linear_probe.l2_penalty(Variable(torch.Tensor(tmp)))
self.assertIsInstance(penalty, Variable)
self.assertAlmostEqual(
expected_penalty, penalty.data.item(), places=3
)
class TestLinearProbeClass(unittest.TestCase):
def test_linear_probe_init(self):
"Linear Probe Initialization"
probe = linear_probe.LinearProbe(50, 5)
self.assertEqual(probe.linear.in_features, 50)
self.assertEqual(probe.linear.out_features, 5)
@patch('torch.nn.Linear')
def test_linear_probe_forward(self, linear_mock):
"Linear Probe Forward"
probe = linear_probe.LinearProbe(50, 5)
probe.forward(torch.rand((50, 1)))
linear_mock.assert_called_once()
class TestTrainProbe(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.num_examples = 10
cls.num_features = 100
cls.num_classes = 3
cls.X = np.random.random((cls.num_examples, cls.num_features)).astype(np.float32)
# Ensure y has all class labels atleast once for classification
cls.y_classification = np.concatenate((
np.arange(cls.num_classes),
np.random.randint(0, cls.num_classes, size=cls.num_examples - cls.num_classes),
))
cls.y_regression = np.random.random((cls.num_examples)).astype(np.float32)
@patch("torch.optim.Adam.step")
def test_train_classification_probe(self, optimizer_step_fn):
"Basic classification probe training test"
num_epochs = 5
linear_probe._train_probe(self.X, self.y_classification, "classification", num_epochs=num_epochs)
self.assertEqual(optimizer_step_fn.call_count, num_epochs)
def test_train_classification_probe_one_class(self):
"Classification probe with one class test"
y = np.zeros((self.num_examples,))
self.assertRaises(ValueError, linear_probe._train_probe, self.X, y, "classification")
def test_train_probe_invalid_type(self):
"Train probe of invalid type"
self.assertRaises(ValueError, linear_probe._train_probe, self.X, self.y_classification, "invalid-type")
@patch("torch.optim.Adam.step")
def test_train_regression_probe(self, optimizer_step_fn):
"Basic regression probe training test"
num_epochs = 12
linear_probe._train_probe(self.X, self.y_regression, "regression", num_epochs=num_epochs)
self.assertEqual(optimizer_step_fn.call_count, num_epochs)
def test_train_probe_no_regularization(self):
"Probe training with wrong regularization test"
self.assertRaises(
ValueError, linear_probe._train_probe, self.X, self.y_classification, "classification", lambda_l1=None
)
@patch("torch.optim.Adam.step")
def test_train_probe_float16(self, optimizer_step_fn):
"Basic probe training test. Same test as before but different data dtype"
X = np.random.random((self.num_examples, self.num_features)).astype(np.float16)
# Ensure y has all three class labels atleast once
y = np.concatenate((
np.arange(self.num_classes),
np.random.randint(0, self.num_classes, size=self.num_examples - self.num_classes),
))
linear_probe._train_probe(X, y, "classification")
self.assertEqual(optimizer_step_fn.call_count, 10)
class TestEvaluateProbe(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.num_examples = 10
cls.num_features = 100
cls.num_classes = 3
X = np.random.random((cls.num_examples, cls.num_features)).astype(np.float32)
# Ensure y has all three class labels atleast once
y_classification = np.concatenate((
np.arange(cls.num_classes),
np.random.randint(0, cls.num_classes, size=cls.num_examples - cls.num_classes),
))
y_regression = np.random.random((cls.num_examples,)).astype(np.float32)
cls.trained_probe = linear_probe._train_probe(X, y_classification, "classification")
cls.trained_regression_probe = linear_probe._train_probe(X, y_regression, "regression")
def test_evaluate_classification_probe(self):
"Basic classification probe evaluation"
scores = linear_probe.evaluate_probe(
self.trained_probe,
np.random.random((self.num_examples, self.num_features)).astype(np.float32),
np.random.randint(0, self.num_classes, size=self.num_examples),
)
self.assertIn("__OVERALL__", scores)
def test_evaluate_regression_probe(self):
"Basic regresson probe evaluation"
scores = linear_probe.evaluate_probe(
self.trained_regression_probe,
np.random.random((self.num_examples, self.num_features)).astype(np.float32),
np.random.random((self.num_examples,)),
)
self.assertIn("__OVERALL__", scores)
def test_evaluate_probe_with_class_labels(self):
"Evaluation with class labels"
scores = linear_probe.evaluate_probe(
self.trained_probe,
np.random.random((self.num_examples, self.num_features)).astype(np.float32),
np.random.randint(0, self.num_classes, size=self.num_examples),
idx_to_class = {0: "class0", 1: "class1", 2: "class2"}
)
self.assertIn("__OVERALL__", scores)
self.assertIn("class0", scores)
self.assertIn("class1", scores)
def test_evaluate_probe_with_class_labels_float16(self):
"Evaluation with class labels. Same test as before but different data dtype"
scores = linear_probe.evaluate_probe(
self.trained_probe,
np.random.random((self.num_examples, self.num_features)).astype(np.float16),
np.random.randint(0, self.num_classes, size=self.num_examples),
idx_to_class = {0: "class0", 1: "class1", 2: "class2"}
)
self.assertIn("__OVERALL__", scores)
self.assertIn("class0", scores)
self.assertIn("class1", scores)
def test_evaluate_probe_with_return_predictions(self):
"Probe evaluation with returned predictions"
y_true = np.random.randint(0, self.num_classes, size=self.num_examples)
scores, predictions = linear_probe.evaluate_probe(
self.trained_probe,
np.random.random((self.num_examples, self.num_features)).astype(np.float32),
y_true,
return_predictions=True
)
self.assertIn("__OVERALL__", scores)
self.assertIsInstance(predictions, list)
self.assertEqual(len(predictions), self.num_examples)
self.assertIsInstance(predictions[0], tuple)
# Source words should be from 0 to num_examples since no source_tokens
# were given
self.assertListEqual([p[0] for p in predictions], list(range(self.num_examples)))
self.assertNotEqual([p[1] for p in predictions], list(y_true))
class TestGetTopNeurons(unittest.TestCase):
@patch("neurox.interpretation.linear_probe.LinearProbe")
def test_get_top_neurons(self, probe_mock):
"Basic get top neurons test"
# Create a weight matrix with 2 samples and 3 neurons
# In the first sample, more than 50% of the weight mass is covered by
# the first neuron.
# In the second sample, more than 50% of the weight mass is covered by
# the second neuron
mock_weight_matrix = [[5,1,1], [1,10,1]]
probe_mock.parameters.return_value = [torch.Tensor(mock_weight_matrix)]
top_neurons, classwise_top_neurons = linear_probe.get_top_neurons(
probe_mock,
0.5,
{'class0': 0, 'class1': 1}
)
np.testing.assert_array_equal(top_neurons, [0, 1])
np.testing.assert_array_equal(classwise_top_neurons['class0'], [0])
np.testing.assert_array_equal(classwise_top_neurons['class1'], [1])
@patch("neurox.interpretation.linear_probe.LinearProbe")
def test_get_top_neurons_all_selection(self, probe_mock):
"Get top neurons with all selection test"
# Create a weight matrix with 2 samples and 3 neurons
mock_weight_matrix = [[10, 9, 8], [10, 2, 1]]
probe_mock.parameters.return_value = [torch.Tensor(mock_weight_matrix)]
top_neurons, classwise_top_neurons = linear_probe.get_top_neurons(
probe_mock,
1.1, # Percentage is higher than total mass, all neurons will be top neurons
{'class0': 0, 'class1': 1}
)
np.testing.assert_array_equal(top_neurons, [0, 1, 2])
np.testing.assert_array_equal(classwise_top_neurons['class0'], [0, 1, 2])
np.testing.assert_array_equal(classwise_top_neurons['class1'], [0, 1, 2])
class TestGetTopNeuronsHardThreshold(unittest.TestCase):
@patch("neurox.interpretation.linear_probe.LinearProbe")
def test_get_top_neurons_hard_threshold(self, probe_mock):
"Basic get top neurons with hard threshold test"
# Create a weight matrix with 2 samples and 4 neurons
# In the first sample, only the first neuron is higher than
# max_weight (5) / threshold (2) = 2.5
# In the second sample, the second and fourth neuron are higher than
# max_weight(10) / threshold (2) = 5
mock_weight_matrix = [[5,1,2,1], [1,10,1,6]]
probe_mock.parameters.return_value = [torch.Tensor(mock_weight_matrix)]
top_neurons, classwise_top_neurons = linear_probe.get_top_neurons_hard_threshold(
probe_mock,
2,
{'class0': 0, 'class1': 1}
)
np.testing.assert_array_equal(top_neurons, [0, 1, 3])
np.testing.assert_array_equal(classwise_top_neurons['class0'], [0])
np.testing.assert_array_equal(classwise_top_neurons['class1'], [1, 3])
class TestGetBottomNeurons(unittest.TestCase):
@patch("neurox.interpretation.linear_probe.LinearProbe")
def test_get_bottom_neurons(self, probe_mock):
"Basic get bottom neurons test"
# Create a weight matrix with 2 samples and 3 neurons
# In the first sample, the third neuron alone covers the bottom 10% of
# the total weight mass (5+4+1=10)
# In the second sample, the second and third neuron cover the bottom 10%
# of the total weight mass (10+1+1=12)
mock_weight_matrix = [[5,4,1], [10,1,1]]
probe_mock.parameters.return_value = [torch.Tensor(mock_weight_matrix)]
bottom_neurons, classwise_bottom_neurons = linear_probe.get_bottom_neurons(
probe_mock,
0.1,
{'class0': 0, 'class1': 1}
)
np.testing.assert_array_equal(bottom_neurons, [1, 2])
np.testing.assert_array_equal(classwise_bottom_neurons['class0'], [2])
np.testing.assert_array_equal(classwise_bottom_neurons['class1'], [1, 2])
@patch("neurox.interpretation.linear_probe.LinearProbe")
def test_get_bottom_neurons_all_selection(self, probe_mock):
"Get bottom neurons with all selection test"
# Create a weight matrix with 2 samples and 3 neurons
mock_weight_matrix = [[8, 9, 10], [1,2,10]]
probe_mock.parameters.return_value = [torch.Tensor(mock_weight_matrix)]
bottom_neurons, classwise_bottom_neurons = linear_probe.get_bottom_neurons(
probe_mock,
1.1, # Percentage is higher than total mass, all neurons will be bottom neurons
{'class0': 0, 'class1': 1}
)
# All neurons must be bottom neurons
np.testing.assert_array_equal(bottom_neurons, [0, 1, 2])
np.testing.assert_array_equal(classwise_bottom_neurons['class0'], [0, 1, 2])
| np.testing.assert_array_equal(classwise_bottom_neurons['class1'], [0, 1, 2]) | numpy.testing.assert_array_equal |
import os
import sys
from collections import namedtuple
from itertools import product
import numpy as np
from numpy.matlib import repmat
import pandas as pd
from scipy.stats import t
from ctg.core.config import config
import ctg.core.calculate_abundance as calculate_abundance
# import warnings
# from pandas.core.common import SettingWithCopyWarning
# warnings.simplefilter('error', SettingWithCopyWarning)
'''
The file format below is hardcoded for Amanda's files. This can be changed by
later by passing a global config object for the run (or just additional
function argumments).
Currently, the time points should be of the format
construct_id probe_a_id probe_b_id target_a_id target_b_id {NAME}_T{DAYS}_{REP}
In addition, reps are defined to be the first set of levels in the loading functions
'''
def ma_cov(x,y, axis=0):
"""Calculates the covariance from masked numpy arrays"""
return np.ma.mean(x*y, axis=axis) - (np.ma.mean(x, axis=axis)*np.ma.mean(y, axis=axis))
def fit_ac_fc(counts, abundance=None, min_good_tpts=2, min_counts_threshold=10):
"""Wrapper for the ctg api"""
if isinstance(counts, str):
c = Counts.from_file(counts)
else:
c = Counts(counts)
c.min_good_tpts = min_good_tpts
return c.fit_ac_fc(
abundance,
min_counts_threshold=min_counts_threshold
)
class Counts(object):
"""Counts object to contain information related to the counts"""
def __init__(
self,
dataframe,
mask=None,
names=None,
col=5,
min_good_tpts=2,
):
self.data = dataframe
self.mask = mask
self.min_good_tpts = min_good_tpts
#Separating names and timepoint data
if names is None:
self.names = dataframe.iloc[:, :col]
self.data = dataframe.iloc[:, col:]
else:
self.names = names
self._parse_header()
@classmethod
def from_file(
cls,
file,
names=None,
col=5,
sep='\s+',
**kwargs
):
"""Constructing an Counts object using a text file"""
kwargs['sep'] = sep # Setting default
df = pd.read_csv(file, **kwargs)
return Counts(
df,
names=names,
col=col,
)
def _sanitize_names(self):
"""Leftover compliance from previous pipeline"""
if self.names is None:
raise RuntimeError("Cannot sanitize without providing names!")
good = ~(self.names['target_a_id'] == self.names['target_b_id'])
good_names = self.names.loc[good]
#Log Transforming counts
good_data = self.data.loc[good]
good_data.values[good_data.values == 0] = 1
abundance = good_data.sum(axis=0)
y = np.log2(good_data/abundance)
self.data = y
self.good_names = good_names
if hasattr(self, "abundance_thresholds"):
self.abundance_thresholds = pd.Series(
self.abundance_thresholds.values.ravel() - np.log2(abundance.values),
index=abundance.index
)
def _parse_header(self):
"""Extract number of replicates and timepoints from header"""
container = []
for i in self.data.columns:
arr = i.split('_')
if len(arr) != 3 or arr[1][0] != "T":
raise ValueError("Column headers are expected to be in the form {NAME}_T{DAYS}_{REP}")
container.append((int(arr[2]), int(arr[1][1:]))) # (Rep, Timepoint)
reps = set([i[0] for i in container])
if reps != set(range(1, len(reps) + 1)):
raise ValueError("Expect reps to be integers starting from 1.")
self.n_reps = len(reps)
self.timepoints = [[] for _ in range(self.n_reps)]
indexes = [[] for _ in range(self.n_reps)]
for ind, tup in enumerate(container):
i,j = tup
indexes[i - 1].append(ind)
self.timepoints[i - 1].append(j)
self.data_indexes = indexes
def add_mask(self):
"""Creates a mask for which timepoints did not meet abundance threshold"""
if not hasattr(self, "abundance_thresholds"):
raise ValueError("Cannot create mask without abundance threshold!")
Masker = namedtuple("Masker", ["mask", "bad", "allbad"])
mask = self.data.values > self.abundance_thresholds.values
bad = [mask[:, index].sum(axis=1) < self.min_good_tpts \
for index in self.data_indexes
]
bad = | np.vstack(bad) | numpy.vstack |
import os
import logging
import numpy as np
from PIL import Image
from PIL import ImageOps
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import tensorflow as tf
tf.get_logger().setLevel(logging.ERROR)
from tensorflow.keras.utils import Sequence, to_categorical
from augmentation import augmentations
##########################################################################
class DataGenerator(Sequence):
def __init__(self,
data,
labels,
img_dim=(32, 32,3),
batch_size=32,
num_classes=10,
shuffle=True,
jsd=True
):
self.data = data
self.labels = labels
self.img_dim = img_dim
self.batch_size = batch_size
self.num_classes = num_classes
self.shuffle = shuffle
self.jsd = jsd
self.augmentations = augmentations
self.on_epoch_end()
def on_epoch_end(self):
self.indices = np.arange(len(self.data))
if self.shuffle:
np.random.shuffle(self.indices)
def apply_op(self, image, op, severity):
image = np.clip(image * 255., 0, 255).astype(np.uint8)
pil_img = Image.fromarray(image) # Convert to PIL.Image
pil_img = op(pil_img, severity)
return np.asarray(pil_img).astype(np.float32) / 255.
def augment_and_mix(self, image, severity=3, width=3, depth=-1, alpha=1.):
"""Perform AugMix augmentations and compute mixture.
Args:
image: Raw input image as ndarray shape (h, w, c)
severity: Severity of underlying augmentation operators (1-10).
width: Width of augmentation chain
depth: Depth of augmentation chain. -1 or (1, 3)
alpha: Probability coefficient for Beta and Dirichlet distributions.
Returns:
mixed: Augmented and mixed image.
"""
ws = np.random.dirichlet([alpha] * width).astype(np.float32)
m = np.float32(np.random.beta(alpha, alpha))
mix = np.zeros_like(image).astype(np.float32)
for i in range(width):
image_aug = image.copy()
depth = depth if depth > 0 else np.random.randint(1, 4)
for _ in range(depth):
op = np.random.choice(self.augmentations)
image_aug = self.apply_op(image_aug, op, severity)
# Preprocessing commutes since all coefficients are convex
mix += ws[i] * image_aug
# mix the image and return
mixed = (1 - m)*image + m*mix
return mixed
def __len__(self):
return int(np.ceil(len(self.data) / self.batch_size))
def __getitem__(self, idx):
curr_batch = self.indices[idx*self.batch_size:(idx+1)*self.batch_size]
batch_len = len(curr_batch)
X_orig = | np.zeros((batch_len, *self.img_dim), dtype=np.float32) | numpy.zeros |
from comet_ml import Experiment
import nni
import os
import torch
import numpy as np
import pandas as pd
from core.data_utils import get_all_loaders
from core.cka_utils import calculate_CKA
from core.train_methods import train_task_sequentially, train_task_LMC_offline, eval_single_epoch
from core.utils import save_np_arrays, setup_experiment, log_comet_metric, get_random_string
from core.utils import save_task_model_by_policy, load_task_model_by_policy, flatten_params
from core.utils import assign_weights, get_norm_distance, ContinualMeter, load_model
from core.visualization import plot_contour, get_xy, plot_heat_map, plot_l2_map, plot_accs
from core.visualization import plot_single_interpolation, plot_multi_interpolations
DATASET = 'cifar'
HIDDENS = 512
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
TRIAL_ID = os.environ.get('NNI_TRIAL_JOB_ID', get_random_string(5))
EXP_DIR = './checkpoints/{}'.format(TRIAL_ID)
config = {
# ---COMMON----
'num_tasks': 20, 'per_task_rotation': 9, 'trial': TRIAL_ID, 'exp_dir': EXP_DIR,\
'memory_size': 100, 'dataset': DATASET, 'device': DEVICE, 'momentum': 0.8,\
'mlp_hiddens': HIDDENS, 'dropout': 0.2, 'lr_decay': 1.0, 'stable_sgd': False,\
# ----Seq Model-----
'seq_lr': 0.1, 'seq_batch_size': 64, 'seq_epochs': 1,\
# ------LMC models------
'lmc_policy': 'offline', 'lmc_interpolation': 'linear',\
'lmc_lr': 0.005, 'lmc_batch_size': 64, 'lcm_init_position': 0.01,\
'lmc_line_samples': 5, 'lmc_epochs': 1,
}
# config = nni.get_next_parameter()
config['per_task_rotation'] = 9
config['mlp_hiddens'] = HIDDENS
config['trial'] = TRIAL_ID
config['dataset'] = DATASET
config['device'] = DEVICE
config['exp_dir'] = EXP_DIR
config['lmc_policy'] = 'offline'
config['lmc_interpolation'] = 'linear'
seq_meter = ContinualMeter('seq_accs', config['num_tasks'])
lmc_meter = ContinualMeter('lmc_accs', config['num_tasks'])
experiment = Experiment(api_key="1UNrcJdirU9MEY0RC3UCU7eAg", \
project_name="iclr-cifar-20", \
workspace="cl-modeconnectivity", disabled=False)
loaders = get_all_loaders(config['dataset'], config['num_tasks'],\
config['lmc_batch_size'], config['seq_batch_size'],\
config['memory_size'], config.get('per_task_rotation'))
def plot_loss_plane(w, eval_loader, path, w_labels, config):
u = w[2] - w[0]
dx = np.linalg.norm(u)
u /= dx
v = w[1] - w[0]
v -= np.dot(u, v) * u
dy = np.linalg.norm(v)
v /= dy
m = load_task_model_by_policy(0, 'init', config['exp_dir'])
m.eval()
coords = np.stack(get_xy(p, w[0], u, v) for p in w)
# print("coords", coords)
G = 15
margin = 0.2
alphas = np.linspace(0.0 - margin, 1.0 + margin, G)
betas = np.linspace(0.0 - margin, 1.0 + margin, G)
tr_loss = np.zeros((G, G))
grid = np.zeros((G, G, 2))
for i, alpha in enumerate(alphas):
for j, beta in enumerate(betas):
p = w[0] + alpha * dx * u + beta * dy * v
m = assign_weights(m, p).to(DEVICE)
err = eval_single_epoch(m, eval_loader)['loss']
c = get_xy(p, w[0], u, v)
#print(c)
grid[i, j] = [alpha * dx, beta * dy]
tr_loss[i, j] = err
contour = {'grid': grid, 'values': tr_loss, 'coords': coords}
save_np_arrays(contour, path=path)
plot_contour(grid, tr_loss, coords, log_alpha=-5.0, N=7, path=path, w_labels=w_labels, dataset=config['dataset'])
return contour
def get_mode_connections(p1, t1, p2, t2, eval_task, config):
w1 = flatten_params(load_task_model_by_policy(t1, p1, config['exp_dir']))
w2 = flatten_params(load_task_model_by_policy(t2, p2, config['exp_dir']))
loss, acc, ts = calculate_mode_connectivity(w1, w2, loaders['sequential'][eval_task]['val'], config)
save_path = '{}/mc_{}_{}_to_{}_{}_on_{}'.format(config['exp_dir'],p1, t1, p2, t2, eval_task)
res = {'loss': loss, 'acc': acc, 'ts': ts}
save_np_arrays(res, path=save_path)
return res
def plot_mode_connections_for_minima(p1, t1, config, max_task=None):
seq_cons, mtl_cons = [], []
seq_labels, mtl_labels = [], []
segments = []
if max_task is None:
max_task = config['num_tasks']
for t2 in range(t1+1, max_task+1):
seq_con = get_mode_connections(p1, t1, 'seq', t2, t1, config)
mtl_con = get_mode_connections(p1, t1, 'lmc', t2, t1, config)
segments = seq_con['ts']
seq_labels.append(r"$\hat{{w}}_{} \rightarrow \hat{{w}}_{{{}}}$".format(t1, t2))
mtl_labels.append(r"$\hat{{w}}_{} \rightarrow \bar{{w}}_{{{}}}$".format(t1, t2))
seq_cons.append(seq_con['loss'])
mtl_cons.append(mtl_con['loss'])
# print("DEBUG MC >> len(labels)=", len(seq_cons+mtl_cons))
save_path = path='{}/mc_on_{}_max_{}'.format(config['exp_dir'], t1, max_task)
plot_multi_interpolations(x=segments, ys=seq_cons + mtl_cons ,y_labels=seq_labels+mtl_labels, path=save_path)
def plot_graphs(config):
# load models
# models = {'seq': {}, 'mtl': {}, 'lmc': {}}
# for t in range(1, config['num_tasks']+1):
# models['seq'][t] = flatten_params(load_task_model_by_policy(t, 'seq', config['exp_dir']))
# if t >= 2:
# models['mtl'][t] = flatten_params(load_task_model_by_policy(t, 'mtl', config['exp_dir']))
# models['lmc'][t] = flatten_params(load_task_model_by_policy(t, 'lmc', config['exp_dir']))
# acc_fig_path = "{}/accs".format(config['exp_dir'])
# plot_accs(config['num_tasks'], seq_meter.data, lmc_meter.data, acc_fig_path)
# --- task 1 ---
plot_mode_connections_for_minima('seq', 1, config)
# plot_mode_connections_for_minima('seq', 1, config, 2)
# plot_mode_connections_for_minima('seq', 1, config, 3)
# plot_mode_connections_for_minima('seq', 2, config)
plot_mode_connections_for_minima('seq', 5, config)
plot_mode_connections_for_minima('seq', 10, config)
# plot_mode_connections_for_minima('seq', 2, config, 3)
# path = '{}/surface_{}_{}_{}_{}_{}_{}_on_{}'.format(config['exp_dir'], 'seq', 1, 'lmc', 2, 'seq', 2, 1)
# labels = [r"$\hat{w}_1$", r"$\bar{w}_{2}$", r"$\hat{w}_2$"]
# plot_loss_plane([models['seq'][1], models['lmc'][2], models['seq'][2]], loaders['sequential'][1]['val'], path, labels, config)
# path = '{}/surface_{}_{}_{}_{}_{}_{}_on_{}'.format(config['exp_dir'], 'seq', 1, 'lmc', 2, 'seq', 2, 2)
# labels = [r"$\hat{w}_1$", r"$\bar{w}_{2}$", r"$\hat{w}_2$"]
# plot_loss_plane([models['seq'][1], models['lmc'][2], models['seq'][2]], loaders['sequential'][2]['val'], path, labels, config)
# path = '{}/surface_{}_{}_{}_{}_{}_{}_on_{}'.format(config['exp_dir'], 'seq', 1, 'lmc', 3, 'seq', 3, 1)
# labels = [r"$\hat{w}_1$", r"$\bar{w}_{3}$", r"$\hat{w}_3$"]
# plot_loss_plane([models['seq'][1], models['lmc'][3], models['seq'][3]], loaders['sequential'][1]['val'], path, labels, config)
# path = '{}/surface_{}_{}_{}_{}_{}_{}_on_{}'.format(config['exp_dir'], 'seq', 1, 'lmc', 3, 'seq', 3, 3)
# labels = [r"$\hat{w}_1$", r"$W\bar{w}{3}$", r"$\hat{w}_3$"]
# plot_loss_plane([models['seq'][1], models['lmc'][3], models['seq'][3]], loaders['sequential'][3]['val'], path, labels, config)
# path = '{}/surface_{}_{}_{}_{}_{}_{}_on_{}'.format(config['exp_dir'], 'seq', 2, 'lmc', 3, 'seq', 3, 2)
# labels = [r"$\hat{w}_2$", r"$W\bar{w}{3}$", r"$\hat{w}_3$"]
# plot_loss_plane([models['seq'][2], models['lmc'][3], models['seq'][3]], loaders['sequential'][2]['val'], path, labels, config)
def calculate_mode_connectivity(w1, w2, eval_loader, config):
net = load_model('{}/{}.pth'.format(config['exp_dir'], 'init')).to(DEVICE)
loss_history, acc_history, ts = [], [], []
for t in | np.arange(0.0, 1.01, 0.025) | numpy.arange |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
from scipy.special import beta as beta_fn
from functools import partial
from scipy.linalg import solve_triangular
def sub2ind(sizes, multi_index):
r"""
Map a d-dimensional index to the scalar index of the equivalent flat
1D array
Examples
--------
.. math::
\begin{bmatrix}
0,0 & 0,1 & 0,2\\
1,0 & 1,1 & 1,2\\
2,0 & 2,1 & 2,2
\end{bmatrix}
\rightarrow
\begin{bmatrix}
0 & 3 & 6\\
1 & 4 & 7\\
2 & 5 & 8
\end{bmatrix}
>>> from pyapprox.utilities import sub2ind
>>> sizes = [3,3]
>>> ind = sub2ind(sizes,[1,0])
>>> print(ind)
1
Parameters
----------
sizes : integer
The number of elems in each dimension. For a 2D index
sizes = [numRows, numCols]
multi_index : np.ndarray (len(sizes))
The d-dimensional index
Returns
-------
scalar_index : integer
The scalar index
See Also
--------
pyapprox.utilities.sub2ind
"""
num_sets = len(sizes)
scalar_index = 0; shift = 1
for ii in range(num_sets):
scalar_index += shift * multi_index[ii]
shift *= sizes[ii]
return scalar_index
def ind2sub(sizes,scalar_index,num_elems):
r"""
Map a scalar index of a flat 1D array to the equivalent d-dimensional index
Examples
--------
.. math::
\begin{bmatrix}
0 & 3 & 6\\
1 & 4 & 7\\
2 & 5 & 8
\end{bmatrix}
\rightarrow
\begin{bmatrix}
0,0 & 0,1 & 0,2\\
1,0 & 1,1 & 1,2\\
2,0 & 2,1 & 2,2
\end{bmatrix}
>>> from pyapprox.utilities import ind2sub
>>> sizes = [3,3]
>>> sub = ind2sub(sizes,1,9)
>>> print(sub)
[1 0]
Parameters
----------
sizes : integer
The number of elems in each dimension. For a 2D index
sizes = [numRows, numCols]
scalar_index : integer
The scalar index
num_elems : integer
The total number of elements in the d-dimensional matrix
Returns
-------
multi_index : np.ndarray (len(sizes))
The d-dimensional index
See Also
--------
pyapprox.utilities.sub2ind
"""
denom = num_elems
num_sets = len(sizes)
multi_index = np.empty((num_sets),dtype=int)
for ii in range(num_sets-1,-1,-1):
denom /= sizes[ii]
multi_index[ii] = scalar_index / denom;
scalar_index = scalar_index % denom;
return multi_index
def cartesian_product(input_sets, elem_size=1):
r"""
Compute the cartesian product of an arbitray number of sets.
The sets can consist of numbers or themselves be lists or vectors. All
the lists or vectors of a given set must have the same number of entries
(elem_size). However each set can have a different number of scalars, lists,
or vectors.
Parameters
----------
input_sets
The sets to be used in the cartesian product.
elem_size : integer
The size of the vectors within each set.
Returns
-------
result : np.ndarray (num_sets*elem_size, num_elems)
The cartesian product. num_elems = np.prod(sizes)/elem_size,
where sizes[ii] = len(input_sets[ii]), ii=0,..,num_sets-1.
result.dtype will be set to the first entry of the first input_set
"""
import itertools
out = []
## ::-1 reverse order to be backwards compatiable with old
## function below
for r in itertools.product(*input_sets[::-1]):
out.append(r)
out = np.asarray(out).T[::-1,:]
return out
try:
from pyapprox.cython.utilities import cartesian_product_pyx
# # fused type does not work for np.in32, np.float32, np.int64
# # so envoke cython cast
# if np.issubdtype(input_sets[0][0],np.signedinteger):
# return cartesian_product_pyx(input_sets,1,elem_size)
# if np.issubdtype(input_sets[0][0],np.floating):
# return cartesian_product_pyx(input_sets,1.,elem_size)
# else:
# return cartesian_product_pyx(
# input_sets,input_sets[0][0],elem_size)
# always convert to float then cast back
cast_input_sets = [np.asarray(s,dtype=float) for s in input_sets]
out = cartesian_product_pyx(cast_input_sets,1.,elem_size)
out = np.asarray(out,dtype=input_sets[0].dtype)
return out
except:
print ('cartesian_product extension failed')
num_elems = 1;
num_sets = len(input_sets)
sizes = np.empty((num_sets),dtype=int)
for ii in range(num_sets):
sizes[ii] = input_sets[ii].shape[0]/elem_size
num_elems *= sizes[ii]
#try:
# from pyapprox.weave import c_cartesian_product
# # note c_cartesian_product takes_num_elems as last arg and cython
# # takes elem_size
# return c_cartesian_product(input_sets, elem_size, sizes, num_elems)
#except:
# print ('cartesian_product extension failed')
result = np.empty(
(num_sets*elem_size, num_elems), dtype=type(input_sets[0][0]))
for ii in range(num_elems):
multi_index = ind2sub( sizes, ii, num_elems)
for jj in range(num_sets):
for kk in range(elem_size):
result[jj*elem_size+kk,ii]=\
input_sets[jj][multi_index[jj]*elem_size+kk];
return result
def outer_product(input_sets):
r"""
Construct the outer product of an arbitary number of sets.
Examples
--------
.. math::
\{1,2\}\times\{3,4\}=\{1\times3, 2\times3, 1\times4, 2\times4\} =
\{3, 6, 4, 8\}
Parameters
----------
input_sets
The sets to be used in the outer product
Returns
-------
result : np.ndarray(np.prod(sizes))
The outer product of the sets.
result.dtype will be set to the first entry of the first input_set
"""
out = cartesian_product(input_sets)
return np.prod(out,axis=0)
try:
from pyapprox.cython.utilities import outer_product_pyx
# fused type does not work for np.in32, np.float32, np.int64
# so envoke cython cast
if np.issubdtype(input_sets[0][0],np.signedinteger):
return outer_product_pyx(input_sets,1)
if np.issubdtype(input_sets[0][0],np.floating):
return outer_product_pyx(input_sets,1.)
else:
return outer_product_pyx(input_sets,input_sets[0][0])
except:
print ('outer_product extension failed')
num_elems = 1
num_sets = len(input_sets)
sizes = np.empty((num_sets),dtype=int)
for ii in range(num_sets):
sizes[ii] = len(input_sets[ii])
num_elems *= sizes[ii];
# try:
# from pyapprox.weave import c_outer_product
# return c_outer_product(input_sets)
# except:
# print ('outer_product extension failed')
result = np.empty((num_elems), dtype=type(input_sets[0][0]))
for ii in range(num_elems):
result[ii] = 1.0
multi_index = ind2sub(sizes, ii, num_elems);
for jj in range(num_sets):
result[ii] *= input_sets[jj][multi_index[jj]];
return result
def hash_array(array,decimals=None):
r"""
Hash an array for dictionary or set based lookup
Parameters
----------
array : np.ndarray
The integer array to hash
Returns
-------
key : integer
The hash value of the array
"""
#assert array.ndim==1
#array = np.ascontiguousarray(array)
#array.flags.writeable = False
#return hash(array.data)
if decimals is not None:
array = np.around(array,decimals)
#return hash(array.tostring())
return hash(array.tobytes())
def unique_matrix_rows(matrix):
unique_rows = []
unique_rows_set = set()
for ii in range(matrix.shape[0]):
key = hash_array(matrix[ii,:])
if key not in unique_rows_set:
unique_rows_set.add(key)
unique_rows.append(matrix[ii,:])
return np.asarray(unique_rows)
def remove_common_rows(matrices):
num_cols = matrices[0].shape[1]
unique_rows_dict = dict()
for ii in range(len(matrices)):
matrix = matrices[ii]
assert matrix.shape[1]==num_cols
for jj in range(matrix.shape[0]):
key = hash_array(matrix[jj,:])
if key not in unique_rows_dict:
unique_rows_dict[key] = (ii,jj)
elif unique_rows_dict[key][0]!=ii:
del unique_rows_dict[key]
#else:
# entry is a duplicate entry in the current. Allow this to
# occur but only add one of the duplicates to the unique rows dict
unique_rows = []
for key in list(unique_rows_dict.keys()):
ii,jj = unique_rows_dict[key]
unique_rows.append(matrices[ii][jj,:])
return np.asarray(unique_rows)
def allclose_unsorted_matrix_rows(matrix1,matrix2):
if matrix1.shape!=matrix2.shape:
return False
matrix1_dict = dict()
for ii in range(matrix1.shape[0]):
key = hash_array(matrix1[ii,:])
# allow duplicates of rows
if key not in matrix1_dict:
matrix1_dict[key] = 0
else:
matrix1_dict[key] += 1
matrix2_dict = dict()
for ii in range(matrix2.shape[0]):
key = hash_array(matrix2[ii,:])
# allow duplicates of rows
if key not in matrix2_dict:
matrix2_dict[key] = 0
else:
matrix2_dict[key] += 1
if len(list(matrix1_dict.keys()))!=len(list(matrix2_dict.keys())):
return False
for key in list(matrix1_dict.keys()):
if key not in matrix2_dict:
return False
if matrix2_dict[key]!=matrix1_dict[key]:
return False
return True
def get_2d_cartesian_grid(num_pts_1d, ranges):
r"""
Get a 2d tensor grid with equidistant points.
Parameters
----------
num_pts_1d : integer
The number of points in each dimension
ranges : np.ndarray (4)
The lower and upper bound of each dimension [lb_1,ub_1,lb_2,ub_2]
Returns
-------
grid : np.ndarray (2,num_pts_1d**2)
The points in the tensor product grid.
[x1,x2,...x1,x2...]
[y1,y1,...y2,y2...]
"""
#from math_tools_cpp import cartesian_product_double as cartesian_product
from PyDakota.math_tools import cartesian_product
x1 = np.linspace( ranges[0], ranges[1], num_pts_1d )
x2 = np.linspace( ranges[2], ranges[3], num_pts_1d )
abscissa_1d = []
abscissa_1d.append( x1 )
abscissa_1d.append( x2 )
grid = cartesian_product( abscissa_1d, 1 )
return grid
def invert_permutation_vector( p , dtype=int):
r"""
Returns the "inverse" of a permutation vector. I.e., returns the
permutation vector that performs the inverse of the original
permutation operation.
Parameters
----------
p: np.ndarray
Permutation vector
dtype: type
Data type passed to np.ndarray constructor
Returns
-------
pt: np.ndarray
Permutation vector that accomplishes the inverse of the
permutation p.
"""
N = np.max(p) + 1
pt = np.zeros(p.size,dtype=dtype)
pt[p] = np.arange(N,dtype=dtype)
return pt
def nchoosek(nn,kk):
try: # SciPy >= 0.19
from scipy.special import comb
except:
from scipy.misc import comb
result = np.asarray(np.round(comb(nn, kk)),dtype=int)
if np.isscalar(result):
result=np.asscalar(result)
return result
def total_degree_space_dimension(dimension, degree):
r"""
Return the number of basis functions in a total degree polynomial space,
i.e. the space of all polynomials with degree at most degree.
Parameters
----------
num_vars : integer
The number of variables of the polynomials
degree :
The degree of the total-degree space
Returns
-------
num_terms : integer
The number of basis functions in the total degree space
"""
#from scipy.special import gammaln
#subspace_dimension = lambda k: int(np.round(np.exp( gammaln(k+d+1) - gammaln(k+1) - gammaln(d+1) )))
return nchoosek(dimension+degree,degree)
def total_degree_encompassing_N(dimension, N):
r"""
Returns the smallest integer k such that the dimension of the total
degree-k space is greater than N.
"""
k = 0
while total_degree_subspace_dimension(dimension, k) < N:
k += 1
return k
def total_degree_barrier_indices(dimension, max_degree):
r"""
Returns linear indices that bound total degree spaces
Parameters
----------
dimension: int
Parametric dimension
max_degree: int
Maximum polynomial degree
Returns
-------
degree_barrier_indices: list
List of degree barrier indices up to (including) max_degree.
"""
degree_barrier_indices = [0]
for degree in range(1,max_degree+1):
degree_barrier_indices.append( total_degree_subspace_dimension(dimension, degree) )
return degree_barrier_indices
def total_degree_orthogonal_transformation( coefficients, d ):
r"""
Returns an orthogonal matrix transformation that "matches" the input
coefficients.
Parameters
----------
coefficients: np.ndarray
Length-N vector of expansion coefficients
d: int
Parametric dimension
Returns
-------
Q: np.ndarray
A size N x N orthogonal matrix transformation. The first column
is a unit vector in the direction of coefficients.
"""
from scipy.linalg import qr
N = coefficients.size
degree_barrier_indices = [1]
max_degree = 0
while degree_barrier_indices[-1] < N-1:
max_degree += 1
degree_barrier_indices.append( total_degree_subspace_dimension(d, max_degree) )
q = np.zeros([N, N])
# Assume degree = 0 is just constant
q[0,0] = 1.
for degree in range(1,max_degree+1):
i1 = degree_barrier_indices[degree-1]
i2 = degree_barrier_indices[degree]
M = i2-i1
q[i1:i2,i1:i2] = qr( coefficients[i1:i2].reshape([M, 1]) )[0]
return q
def get_low_rank_matrix(num_rows,num_cols,rank):
r"""
Construct a matrix of size num_rows x num_cols with a given rank.
Parameters
----------
num_rows : integer
The number rows in the matrix
num_cols : integer
The number columns in the matrix
rank : integer
The rank of the matrix
Returns
-------
Amatrix : np.ndarray (num_rows,num_cols)
The low-rank matrix generated
"""
assert rank <= min(num_rows,num_cols)
# Generate a matrix with normally distributed entries
N = max(num_rows,num_cols)
Amatrix = np.random.normal(0,1,(N,N))
# Make A symmetric positive definite
Amatrix = np.dot( Amatrix.T, Amatrix )
# Construct low rank approximation of A
eigvals, eigvecs = np.linalg.eigh( Amatrix.copy() )
# Set smallest eigenvalues to zero. Note eigenvals are in
# ascending order
eigvals[:(eigvals.shape[0]-rank)] = 0.
# Construct rank r A matrix
Amatrix = np.dot(eigvecs,np.dot(np.diag(eigvals),eigvecs.T))
# Resize matrix to have requested size
Amatrix = Amatrix[:num_rows,:num_cols]
return Amatrix
def adjust_sign_svd(U, V, adjust_based_upon_U=True):
r"""
Ensure uniquness of svd by ensuring the first entry of each left singular
singular vector be positive. Only works for np.linalg.svd
if full_matrices=False
Parameters
----------
U : (M x M) matrix
left singular vectors of a singular value decomposition of a (M x N)
matrix A.
V : (N x N) matrix
right singular vectors of a singular value decomposition of a (M x N)
matrix A.
adjust_based_upon_U : boolean (default=True)
True - make the first entry of each column of U positive
False - make the first entry of each row of V positive
Returns
-------
U : (M x M) matrix
left singular vectors with first entry of the first
singular vector always being positive.
V : (M x M) matrix
right singular vectors consistent with sign adjustment applied to U.
"""
if U.shape[1] != V.shape[0]:
raise Exception('U.shape[1] must equal V.shape[0]. If using np.linalg.svd set full_matrices=False')
if adjust_based_upon_U:
s = np.sign(U[0,:])
else:
s = np.sign(V[:,0])
U *= s
V *= s[:,np.newaxis]
return U,V
def adjust_sign_eig(U):
r"""
Ensure uniquness of eigenvalue decompotision by ensuring the first entry
of the first singular vector of U is positive.
Parameters
----------
U : (M x M) matrix
left singular vectors of a singular value decomposition of a (M x M)
matrix A.
Returns
-------
U : (M x M) matrix
left singular vectors with first entry of the first
singular vector always being positive.
"""
s = np.sign(U[0,:])
U *= s
return U
def sorted_eigh(C):
r"""
Compute the eigenvalue decomposition of a matrix C and sort
the eigenvalues and corresponding eigenvectors by decreasing
magnitude.
Warning. This will prioritize large eigenvalues even if they
are negative. Do not use if need to distinguish between positive
and negative eigenvalues
Input
B: matrix (NxN)
matrix to decompose
Output
e: vector (N)
absolute values of the eigenvalues of C sorted by decreasing
magnitude
W: eigenvectors sorted so that they respect sorting of e
"""
e, W = np.linalg.eigh(C)
e = abs(e)
ind = np.argsort(e)
e = e[ind[::-1]]
W = W[:,ind[::-1]]
s = np.sign(W[0,:])
s[s==0] = 1
W = W*s
return e.reshape((e.size,1)), W
def continue_pivoted_lu_factorization(LU_factor,raw_pivots,current_iter,
max_iters,num_initial_rows=0):
it = current_iter
for it in range(current_iter,max_iters):
# find best pivot
if np.isscalar(num_initial_rows) and (it<num_initial_rows):
#pivot=np.argmax(np.absolute(LU_factor[it:num_initial_rows,it]))+it
pivot = it
elif (not np.isscalar(num_initial_rows) and
(it<num_initial_rows.shape[0])):
pivot=num_initial_rows[it]
else:
pivot = np.argmax(np.absolute(LU_factor[it:,it]))+it
# update pivots vector
#swap_rows(pivots,it,pivot)
raw_pivots[it]=pivot
# apply pivots(swap rows) in L factorization
swap_rows(LU_factor,it,pivot)
# check for singularity
if abs(LU_factor[it,it])<np.finfo(float).eps:
msg = "pivot %1.2e"%abs(LU_factor[it,it])
msg += " is to small. Stopping factorization."
print (msg)
break
# update L_factor
LU_factor[it+1:,it] /= LU_factor[it,it];
# udpate U_factor
col_vector = LU_factor[it+1:,it]
row_vector = LU_factor[it,it+1:]
update = np.outer(col_vector,row_vector)
LU_factor[it+1:,it+1:]-= update
return LU_factor, raw_pivots, it
def unprecondition_LU_factor(LU_factor,precond_weights,num_pivots=None):
r"""
A=LU and WA=XY
Then WLU=XY
We also know Y=WU
So WLU=XWU => WL=XW so L=inv(W)*X*W
and U = inv(W)Y
"""
if num_pivots is None:
num_pivots = np.min(LU_factor.shape)
assert precond_weights.shape[1]==1
assert precond_weights.shape[0]==LU_factor.shape[0]
# left multiply L an U by inv(W), i.e. compute inv(W).dot(L)
# and inv(W).dot(U)
LU_factor = LU_factor.copy()/precond_weights
# right multiply L by W, i.e. compute L.dot(W)
# Do not overwrite columns past num_pivots. If not all pivots have been
# performed the columns to the right of this point contain U factor
for ii in range(num_pivots):
LU_factor[ii+1:,ii]*=precond_weights[ii,0]
return LU_factor
def split_lu_factorization_matrix(LU_factor,num_pivots=None):
r"""
Return the L and U factors of an inplace LU factorization
Parameters
----------
num_pivots : integer
The number of pivots performed. This allows LU in place matrix
to be split during evolution of LU algorithm
"""
if num_pivots is None:
num_pivots = np.min(LU_factor.shape)
L_factor = np.tril(LU_factor)
if L_factor.shape[1]<L_factor.shape[0]:
# if matrix over-determined ensure L is a square matrix
n0 = L_factor.shape[0]-L_factor.shape[1]
L_factor=np.hstack([L_factor,np.zeros((L_factor.shape[0],n0))])
if num_pivots<np.min(L_factor.shape):
n1 = L_factor.shape[0]-num_pivots
n2 = L_factor.shape[1]-num_pivots
L_factor[num_pivots:,num_pivots:] = np.eye(n1,n2)
np.fill_diagonal(L_factor,1.)
U_factor = np.triu(LU_factor)
U_factor[num_pivots:,num_pivots:] = LU_factor[num_pivots:,num_pivots:]
return L_factor, U_factor
def truncated_pivoted_lu_factorization(A,max_iters,num_initial_rows=0,
truncate_L_factor=True):
r"""
Compute a incomplete pivoted LU decompostion of a matrix.
Parameters
----------
A np.ndarray (num_rows,num_cols)
The matrix to be factored
max_iters : integer
The maximum number of pivots to perform. Internally max)iters will be
set such that max_iters = min(max_iters,K), K=min(num_rows,num_cols)
num_initial_rows: integer or np.ndarray()
The number of the top rows of A to be chosen as pivots before
any remaining rows can be chosen.
If object is an array then entries are raw pivots which
will be used in order.
Returns
-------
L_factor : np.ndarray (max_iters,K)
The lower triangular factor with a unit diagonal.
K=min(num_rows,num_cols)
U_factor : np.ndarray (K,num_cols)
The upper triangular factor
raw_pivots : np.ndarray (num_rows)
The sequential pivots used to during algorithm to swap rows of A.
pivots can be obtained from raw_pivots using
get_final_pivots_from_sequential_pivots(raw_pivots)
pivots : np.ndarray (max_iters)
The index of the chosen rows in the original matrix A chosen as pivots
"""
num_rows,num_cols = A.shape
min_num_rows_cols = min(num_rows, num_cols)
max_iters = min(max_iters, min_num_rows_cols)
if ( A.shape[1] < max_iters ):
msg = "truncated_pivoted_lu_factorization: "
msg += " A is inconsistent with max_iters. Try deceasing max_iters or "
msg += " increasing the number of columns of A"
raise Exception(msg)
# Use L to store both L and U during factoriation then copy out U in post
# processing
LU_factor = A.copy()
raw_pivots = np.arange(num_rows)#np.empty(num_rows,dtype=int)
LU_factor,raw_pivots,it = continue_pivoted_lu_factorization(
LU_factor,raw_pivots,0,max_iters,num_initial_rows)
if not truncate_L_factor:
return LU_factor, raw_pivots
else:
pivots = get_final_pivots_from_sequential_pivots(
raw_pivots)[:it+1]
L_factor, U_factor = split_lu_factorization_matrix(LU_factor,it+1)
L_factor = L_factor[:it+1,:it+1]
U_factor = U_factor[:it+1,:it+1]
return L_factor, U_factor, pivots
def add_columns_to_pivoted_lu_factorization(LU_factor,new_cols,raw_pivots):
r"""
Given factorization PA=LU add new columns to A in unpermuted order and
update LU factorization
Parameters
----------
raw_pivots : np.ndarray (num_pivots)
The pivots applied at each iteration of pivoted LU factorization.
If desired one can use get_final_pivots_from_sequential_pivots to
compute final position of rows after all pivots have been applied.
"""
assert LU_factor.shape[0]==new_cols.shape[0]
assert raw_pivots.shape[0]<=new_cols.shape[0]
num_new_cols = new_cols.shape[1]
num_pivots = raw_pivots.shape[0]
for it in range(num_pivots):
pivot = raw_pivots[it]
swap_rows(new_cols,it,pivot)
# update U_factor
# recover state of col vector from permuted LU factor
# Let (jj,kk) represent iteration and pivot pairs
# then if lu factorization produced sequence of pairs
# (0,4),(1,2),(2,4) then LU_factor[:,0] here will be col_vector
# in LU algorithm with the second and third permutations
# so undo these permutations in reverse order
col_vector = LU_factor[it+1:,it].copy()
for ii in range(num_pivots-it-1):
# (it+1) necessary in two lines below because only dealing
# with compressed col vector which starts at row it in LU_factor
jj=raw_pivots[num_pivots-1-ii]-(it+1)
kk=num_pivots-ii-1-(it+1)
swap_rows(col_vector,jj,kk)
row_vector = new_cols[it,:]
update = np.outer(col_vector,row_vector)
new_cols[it+1:,:] -= update
#new_cols = add_rows_to_pivoted_lu_factorization(
# new_cols[:it+1,:],new_cols[it+1:,:],num_pivots)
LU_factor = np.hstack((LU_factor,new_cols))
return LU_factor
def add_rows_to_pivoted_lu_factorization(LU_factor,new_rows,num_pivots):
assert LU_factor.shape[1]==new_rows.shape[1]
num_new_rows = new_rows.shape[0]
LU_factor_extra = new_rows.copy()
for it in range(num_pivots):
LU_factor_extra[:,it]/=LU_factor[it,it]
col_vector = LU_factor_extra[:,it]
row_vector = LU_factor[it,it+1:]
update = np.outer(col_vector,row_vector)
LU_factor_extra[:,it+1:] -= update
return np.vstack([LU_factor,LU_factor_extra])
def swap_rows(matrix,ii,jj):
temp = matrix[ii].copy()
matrix[ii]=matrix[jj]
matrix[jj]=temp
def pivot_rows(pivots,matrix,in_place=True):
if not in_place:
matrix = matrix.copy()
num_pivots = pivots.shape[0]
assert num_pivots <= matrix.shape[0]
for ii in range(num_pivots):
swap_rows(matrix,ii,pivots[ii])
return matrix
def get_final_pivots_from_sequential_pivots(sequential_pivots,num_pivots=None):
if num_pivots is None:
num_pivots = sequential_pivots.shape[0]
assert num_pivots >= sequential_pivots.shape[0]
pivots = np.arange(num_pivots)
return pivot_rows(sequential_pivots,pivots,False)
def get_tensor_product_quadrature_rule(
degrees,num_vars,univariate_quadrature_rules,transform_samples=None,
density_function=None):
r"""
if get error about outer product failing it may be because
univariate_quadrature rule is returning a weights array for every level,
i.e. l=0,...level
"""
degrees = np.atleast_1d(degrees)
if degrees.shape[0]==1 and num_vars>1:
degrees = np.array([degrees[0]]*num_vars,dtype=int)
if callable(univariate_quadrature_rules):
univariate_quadrature_rules = [univariate_quadrature_rules]*num_vars
x_1d = []; w_1d = []
for ii in range(len(univariate_quadrature_rules)):
x,w = univariate_quadrature_rules[ii](degrees[ii])
x_1d.append(x); w_1d.append(w)
samples = cartesian_product(x_1d,1)
weights = outer_product(w_1d)
if density_function is not None:
weights *= density_function(samples)
if transform_samples is not None:
samples = transform_samples(samples)
return samples, weights
def piecewise_quadratic_interpolation(samples,mesh,mesh_vals,ranges):
assert mesh.shape[0]==mesh_vals.shape[0]
vals = np.zeros_like(samples)
samples = (samples-ranges[0])/(ranges[1]-ranges[0])
for ii in range(0,mesh.shape[0]-2,2):
xl=mesh[ii]; xr=mesh[ii+2]
x=(samples-xl)/(xr-xl)
interval_vals = canonical_piecewise_quadratic_interpolation(
x,mesh_vals[ii:ii+3])
# to avoid double counting we set left boundary of each interval to zero
# except for first interval
if ii==0:
interval_vals[(x<0)|(x>1)]=0.
else:
interval_vals[(x<=0)|(x>1)]=0.
vals += interval_vals
return vals
# I = np.argsort(samples)
# sorted_samples = samples[I]
# idx2=0
# for ii in range(0,mesh.shape[0]-2,2):
# xl=mesh[ii]; xr=mesh[ii+2]
# for jj in range(idx2,sorted_samples.shape[0]):
# if ii==0:
# if sorted_samples[jj]>=xl:
# idx1=jj
# break
# else:
# if sorted_samples[jj]>xl:
# idx1=jj
# break
# for jj in range(idx1,sorted_samples.shape[0]):
# if sorted_samples[jj]>xr:
# idx2=jj-1
# break
# if jj==sorted_samples.shape[0]-1:
# idx2=jj
# x=(sorted_samples[idx1:idx2+1]-xl)/(xr-xl)
# interval_vals = canonical_piecewise_quadratic_interpolation(
# x,mesh_vals[ii:ii+3])
# vals[idx1:idx2+1] += interval_vals
# return vals[np.argsort(I)]
def canonical_piecewise_quadratic_interpolation(x,nodal_vals):
r"""
Piecewise quadratic interpolation of nodes at [0,0.5,1]
Assumes all values are in [0,1].
"""
assert x.ndim==1
assert nodal_vals.shape[0]==3
vals = nodal_vals[0]*(1.0-3.0*x+2.0*x**2)+nodal_vals[1]*(4.0*x-4.0*x**2)+\
nodal_vals[2]*(-x+2.0*x**2)
return vals
def discrete_sampling(N,probs,states=None):
r"""
discrete_sampling -- samples iid from a discrete probability measure
x = discrete_sampling(N, prob, states)
Generates N iid samples from a random variable X whose probability mass
function is
prob(X = states[j]) = prob[j], 1 <= j <= length(prob).
If states is not given, the states are gives by 1 <= state <= length(prob)
"""
p = probs.squeeze()/np.sum(probs)
bins = np.digitize(
np.random.uniform(0.,1.,(N,1)), np.hstack((0,np.cumsum(p))))-1
if states is None:
x = bins
else:
assert(states.shape[0] == probs.shape[0])
x = states[bins]
return x.squeeze()
def lists_of_arrays_equal(list1,list2):
if len(list1)!=len(list2):
return False
equal = True
for ll in range(len(list1)):
if not np.allclose(list1[ll],list2[ll]):
return False
return True
def lists_of_lists_of_arrays_equal(list1,list2):
if len(list1)!=len(list2):
return False
equal = True
for ll in range(len(list1)):
for kk in range(len(list1[ll])):
if not np.allclose(list1[ll][kk],list2[ll][kk]):
return False
return True
def beta_pdf(alpha_stat,beta_stat,x):
#scipy implementation is slow
const = 1./beta_fn(alpha_stat,beta_stat)
return const*(x**(alpha_stat-1)*(1-x)**(beta_stat-1))
def pdf_under_affine_map(pdf,loc,scale,y):
return pdf((y-loc)/scale)/scale
def beta_pdf_on_ab(alpha_stat,beta_stat,a,b,x):
#const = 1./beta_fn(alpha_stat,beta_stat)
#const /= (b-a)**(alpha_stat+beta_stat-1)
#return const*((x-a)**(alpha_stat-1)*(b-x)**(beta_stat-1))
from functools import partial
pdf = partial(beta_pdf,alpha_stat,beta_stat)
return pdf_under_affine_map(pdf,a,(b-a),x)
def beta_pdf_derivative(alpha_stat,beta_stat,x):
r"""
x in [0,1]
"""
#beta_const = gamma_fn(alpha_stat+beta_stat)/(
# gamma_fn(alpha_stat)*gamma_fn(beta_stat))
beta_const = 1./beta_fn(alpha_stat,beta_stat)
deriv=0
if alpha_stat>1:
deriv += (alpha_stat-1)*(x**(alpha_stat-2)*(1-x)**(beta_stat-1))
if beta_stat>1:
deriv -= (beta_stat -1)*(x**(alpha_stat-1)*(1-x)**(beta_stat-2))
deriv *= beta_const
return deriv
from scipy.special import erf
def gaussian_cdf(mean,var,x):
return 0.5*(1+erf((x-mean)/(np.sqrt(var*2))))
def gaussian_pdf(mean,var,x,package=np):
r"""
set package=sympy if want to use for symbolic calculations
"""
return package.exp(-(x-mean)**2/(2*var)) / (2*package.pi*var)**.5
def gaussian_pdf_derivative(mean,var,x):
return -gaussian_pdf(mean,var,x)*(x-mean)/var
def pdf_derivative_under_affine_map(pdf_deriv,loc,scale,y):
r"""
Let y=g(x)=x*scale+loc and x = g^{-1}(y) = v(y) = (y-loc)/scale, scale>0
p_Y(y)=p_X(v(y))*|dv/dy(y)|=p_X((y-loc)/scale))/scale
dp_Y(y)/dy = dv/dy(y)*dp_X/dx(v(y))/scale = dp_X/dx(v(y))/scale**2
"""
return pdf_deriv((y-loc)/scale)/scale**2
def gradient_of_tensor_product_function(univariate_functions,
univariate_derivatives,samples):
num_samples = samples.shape[1]
num_vars = len(univariate_functions)
assert len(univariate_derivatives)==num_vars
gradient = np.empty((num_vars,num_samples))
# precompute data which is reused multiple times
function_values = []
for ii in range(num_vars):
function_values.append(univariate_functions[ii](samples[ii,:]))
for ii in range(num_vars):
gradient[ii,:] = univariate_derivatives[ii](samples[ii,:])
for jj in range(ii):
gradient[ii,:] *= function_values[jj]
for jj in range(ii+1,num_vars):
gradient[ii,:] *= function_values[jj]
return gradient
def evaluate_tensor_product_function(univariate_functions,samples):
num_samples = samples.shape[1]
num_vars = len(univariate_functions)
values = np.ones((num_samples))
for ii in range(num_vars):
values *= univariate_functions[ii](samples[ii,:])
return values
def cholesky_decomposition(Amat):
nrows = Amat.shape[0]
assert Amat.shape[1]==nrows
L = np.zeros((nrows,nrows))
for ii in range(nrows):
temp = Amat[ii,ii]-np.sum(L[ii,:ii]**2)
if temp <= 0:
raise Exception ('matrix is not positive definite')
L[ii,ii]=np.sqrt(temp)
L[ii+1:,ii]=\
(Amat[ii+1:,ii]-np.sum(L[ii+1:,:ii]*L[ii,:ii],axis=1))/L[ii,ii]
return L
def pivoted_cholesky_decomposition(A,npivots,init_pivots=None,tol=0.,
error_on_small_tol=False,
pivot_weights=None,
return_full=False,
econ=True):
r"""
Return a low-rank pivoted Cholesky decomposition of matrix A.
If A is positive definite and npivots is equal to the number of rows of A
then L.dot(L.T)==A
To obtain the pivoted form of L set
L = L[pivots,:]
Then P.T.dot(A).P == L.dot(L.T)
where P is the standard pivot matrix which can be obtained from the
pivot vector using the function
"""
Amat = A.copy()
nrows = Amat.shape[0]
assert Amat.shape[1]==nrows
assert npivots<=nrows
#L = np.zeros(((nrows,npivots)))
L = np.zeros(((nrows,nrows)))
#diag1 = np.diag(Amat).copy() # returns a copy of diag
diag = Amat.ravel()[::Amat.shape[0]+1] #returns a view of diag
#assert np.allclose(diag,diag1)
pivots = np.arange(nrows)
init_error = np.absolute(diag).sum()
L, pivots, diag, chol_flag, ncompleted_pivots, error = \
continue_pivoted_cholesky_decomposition(
Amat, L, npivots, init_pivots, tol,
error_on_small_tol,
pivot_weights, pivots, diag,
0, init_error, econ)
if not return_full:
return L[:,:ncompleted_pivots], pivots[:ncompleted_pivots], error,\
chol_flag
else:
return L, pivots, error, chol_flag, diag.copy(), init_error, \
ncompleted_pivots
def continue_pivoted_cholesky_decomposition(Amat, L, npivots, init_pivots, tol,
error_on_small_tol,
pivot_weights, pivots, diag,
ncompleted_pivots, init_error,
econ):
Amat = Amat.copy() # Do not overwrite incoming Amat
if econ is False and pivot_weights is not None:
msg = 'pivot weights not used when econ is False'
raise Exception(msg)
chol_flag = 0
assert ncompleted_pivots < npivots
for ii in range(ncompleted_pivots, npivots):
if init_pivots is None or ii >= len(init_pivots):
if econ:
if pivot_weights is None:
pivot = np.argmax(diag[pivots[ii:]])+ii
else:
pivot = np.argmax(
pivot_weights[pivots[ii:]]*diag[pivots[ii:]])+ii
else:
schur_complement = (
Amat[np.ix_(pivots[ii:], pivots[ii:])]-
L[pivots[ii:], :ii].dot(L[pivots[ii:], :ii].T))
schur_diag = np.diagonal(schur_complement)
pivot = np.argmax(
np.linalg.norm(schur_complement, axis=0)**2/schur_diag)
pivot += ii
else:
pivot = np.where(pivots==init_pivots[ii])[0][0]
assert pivot >= ii
swap_rows(pivots, ii, pivot)
if diag[pivots[ii]] <= 0:
msg = 'matrix is not positive definite'
if error_on_small_tol:
raise Exception (msg)
else:
print(msg)
chol_flag = 1
break
L[pivots[ii],ii] = np.sqrt(diag[pivots[ii]])
L[pivots[ii+1:], ii]=(Amat[pivots[ii+1:], pivots[ii]]-
L[pivots[ii+1:], :ii].dot(L[pivots[ii], :ii]))/L[pivots[ii], ii]
diag[pivots[ii+1:]] -= L[pivots[ii+1:], ii]**2
# for jj in range(ii+1,nrows):
# L[pivots[jj],ii]=(Amat[pivots[ii],pivots[jj]]-
# L[pivots[ii],:ii].dot(L[pivots[jj],:ii]))/L[pivots[ii],ii]
# diag[pivots[jj]] -= L[pivots[jj],ii]**2
error = diag[pivots[ii+1:]].sum()/init_error
# print(ii,'error',error)
if error<tol:
msg = 'Tolerance reached. '
msg += f'Iteration:{ii}. Tol={tol}. Error={error}'
# If matrix is rank r then then error will be machine precision
# In such a case exiting without an error is the right thing to do
if error_on_small_tol:
raise Exception(msg)
else:
chol_flag = 1
print(msg)
break
return L, pivots, diag, chol_flag, ii+1, error
def get_pivot_matrix_from_vector(pivots,nrows):
P = np.eye(nrows)
P = P[pivots,:]
return P
def determinant_triangular_matrix(matrix):
return np.prod(np.diag(matrix))
def get_all_primes_less_than_or_equal_to_n(n):
primes = list()
primes.append(2)
for num in range(3, n+1, 2):
if all(num % i != 0 for i in range(2, int(num**.5 ) + 1)):
primes.append(num)
return np.asarray(primes)
def get_first_n_primes(n):
primes = list()
primes.append(2)
num=3
while len(primes)<n:
if all(num % i != 0 for i in range(2, int(num**.5 ) + 1)):
primes.append(num)
num+=2
return np.asarray(primes)
def halton_sequence(num_vars, index1, index2):
assert index1<index2
assert num_vars<=100
primes = get_first_n_primes(num_vars)
try:
from pyapprox.cython.utilities import halton_sequence_pyx
return halton_sequence_pyx(primes,index1,index2)
except:
print ('halton_sequence extension failed')
pass
num_samples = index2-index1
sequence = np.zeros((num_vars,num_samples))
ones = np.ones(num_vars)
kk=0
for ii in range(index1,index2):
ff = ii*ones
prime_inv = 1./primes
summand = ii*num_vars
while summand>0:
remainder = np.remainder(ff,primes)
sequence[:,kk] += remainder*prime_inv
prime_inv /= primes
ff=ff//primes
summand = ff.sum()
kk+=1
return sequence
def transformed_halton_sequence(marginal_icdfs,num_vars,num_samples,
start_index=1):
assert start_index>0
# sample with index 0 is [0,..0] this can cause problems for icdfs of
# unbounded random variables so start with index 1 in halton sequence
samples = halton_sequence(num_vars, start_index, num_samples+start_index)
if marginal_icdfs is None:
return samples
if callable(marginal_icdfs):
marginal_icdfs = [marginal_icdfs]*num_vars
else:
assert len(marginal_icdfs)==num_vars
for ii in range(num_vars):
samples[ii,:] = marginal_icdfs[ii](samples[ii,:])
return samples
def approx_fprime(x,func,eps=np.sqrt(np.finfo(float).eps)):
r"""Approx the gradient of a vector valued function at a single
sample using finite_difference
"""
assert x.shape[1]==1
nvars = x.shape[0]
fprime = []
func_at_x = func(x).squeeze()
assert func_at_x.ndim==1
for ii in range(nvars):
x_plus_eps = x.copy()
x_plus_eps[ii] += eps
fprime.append((func(x_plus_eps).squeeze()-func_at_x)/eps)
return np.array(fprime)
def partial_functions_equal(func1, func2):
if not (isinstance(func1, partial) and isinstance(func2, partial)):
return False
are_equal = all([getattr(func1, attr) == getattr(func2, attr)
for attr in ['func', 'args', 'keywords']])
return are_equal
def get_all_sample_combinations(samples1,samples2):
r"""
For two sample sets of different random variables
loop over all combinations
samples1 vary slowest and samples2 vary fastest
Let samples1 = [[1,2],[2,3]]
samples2 = [[0, 0, 0],[0, 1, 2]]
Then samples will be
([1, 2, 0, 0, 0])
([1, 2, 0, 1, 2])
([3, 4, 0, 0, 0])
([3, 4, 0, 1, 2])
"""
import itertools
samples = []
for r in itertools.product(*[samples1.T,samples2.T]):
samples.append(np.concatenate(r))
return np.asarray(samples).T
def get_correlation_from_covariance(cov):
r"""
Compute the correlation matrix from a covariance matrix
Parameters
----------
cov : np.ndarray (nrows,nrows)
The symetric covariance matrix
Returns
-------
cor : np.ndarray (nrows,nrows)
The symetric correlation matrix
Examples
--------
>>> cov = np.asarray([[2,-1],[-1,2]])
>>> get_correlation_from_covariance(cov)
array([[ 1. , -0.5],
[-0.5, 1. ]])
"""
stdev_inv = 1/np.sqrt(np.diag(cov))
cor = stdev_inv[np.newaxis,:]*cov*stdev_inv[:,np.newaxis]
return cor
def compute_f_divergence(density1,density2,quad_rule,div_type,
normalize=False):
r"""
Compute f divergence between two densities
.. math:: \int_\Gamma f\left(\frac{p(z)}{q(z)}\right)q(x)\,dx
Parameters
----------
density1 : callable
The density p(z)
density2 : callable
The density q(z)
normalize : boolean
True - normalize the densities
False - Check that densities are normalized, i.e. integrate to 1
quad_rule : tuple
x,w - quadrature points and weights
x : np.ndarray (num_vars,num_samples)
w : np.ndarray (num_samples)
div_type : string
The type of f divergence (KL,TV,hellinger).
KL - Kullback-Leibler :math:`f(t)=t\log t`
TV - total variation :math:`f(t)=\frac{1}{2}\lvert t-1\rvert`
hellinger - squared Hellinger :math:`f(t)=(\sqrt(t)-1)^2`
"""
x,w=quad_rule
assert w.ndim==1
density1_vals = density1(x).squeeze()
const1 = density1_vals.dot(w)
density2_vals = density2(x).squeeze()
const2 = density2_vals.dot(w)
if normalize:
density1_vals/=const1
density2_vals/=const2
else:
tol=1e-14
#print(const1)
#print(const2)
assert np.allclose(const1,1.0,atol=tol)
assert np.allclose(const2,1.0,atol=tol)
const1,const2=1.0,1.0
# normalize densities. May be needed if density is
# Unnormalized Bayesian Posterior
d1 = lambda x: density1(x)/const1
d2 = lambda x: density2(x)/const2
if div_type=='KL':
# Kullback-Leibler
f = lambda t: t*np.log(t)
elif div_type=='TV':
# Total variation
f = lambda t: 0.5*np.absolute(t-1)
elif div_type=='hellinger':
# Squared hellinger int (p(z)**0.5-q(z)**0.5)**2 dz
# Note some formulations use 0.5 times above integral. We do not
# do that here
f = lambda t: (np.sqrt(t)-1)**2
else:
raise Exception(f'Divergence type {div_type} not supported')
d1_vals,d2_vals = d1(x),d2(x)
I = np.where(d2_vals>1e-15)[0]
ratios = | np.zeros_like(d2_vals) | numpy.zeros_like |
# -*- coding: iso-8859-15 -*-
#
# This software was written by <NAME> (<NAME>)
# Copyright <NAME>
# All rights reserved
# This software is licenced under a 3-clause BSD style license
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#Redistributions of source code must retain the above copyright notice,
#this list of conditions and the following disclaimer.
#
#Redistributions in binary form must reproduce the above copyright notice,
#this list of conditions and the following disclaimer in the documentation
#and/or other materials provided with the distribution.
#
#Neither the name of the University College London nor the names
#of the code contributors may be used to endorse or promote products
#derived from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
#THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
#PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
#CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
#EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
#PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
#OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
#WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
#ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
# Developed by <NAME> (MSSL/UCL)
# uvotpy
# (c) 2009-2017, see Licence
from future.builtins import str
from future.builtins import input
from future.builtins import range
__version__ = '2.9.0 20171209'
import sys
import optparse
import numpy as np
import matplotlib.pyplot as plt
try:
from astropy.io import fits as pyfits
from astropy import wcs
except:
import pyfits
import re
import warnings
try:
import imagestats
except:
import stsci.imagestats as imagestats
import scipy
from scipy import interpolate
from scipy.ndimage import convolve
from scipy.signal import boxcar
from scipy.optimize import leastsq
from scipy.special import erf
from numpy import polyfit, polyval
'''
try:
#from uvotpy import uvotplot,uvotmisc,uvotwcs,rationalfit,mpfit,uvotio
import uvotplot
import uvotmisc
import uvotwcs
import rationalfit
import mpfit
import uvotio
except:
pass
'''
from uvotmisc import interpgrid, uvotrotvec, rdTab, rdList
from generate_USNOB1_cat import get_usnob1_cat
import datetime
import os
if __name__ != '__main__':
anchor_preset = list([None,None])
bg_pix_limits = list([-100,-70,70,100])
bg_lower_ = list([None,None]) # (offset, width) in pix, e.g., [20,30], default [50,50]
bg_upper_ = list([None,None]) # (offset, width) in pix, e.g., [20,30], default [50,50]
offsetlimit = None
#set Global parameters
status = 0
do_coi_correction = True # if not set, disable coi_correction
tempnames = list()
tempntags = list()
cval = -1.0123456789
interactive = True
update_curve = True
contour_on_img = False
give_result = False # with this set, a call to getSpec returns all data
give_new_result = False
use_rectext = False
background_method = 'boxcar' # alternatives 'splinefit' 'boxcar'
background_smoothing = [50,7] # 'boxcar' default smoothing in dispersion and across dispersion in pix
background_interpolation = 'linear'
trackcentroiding = True # default (= False will disable track y-centroiding)
global trackwidth
trackwidth = 2.5 # width of extraction region in sigma (alternative default = 1.0) 2.5 was used for flux calibration.
bluetrackwidth = 1.3 # multiplier width of non-order-overlapped extraction region [not yet active]
write_RMF = False
background_source_mag = 18.0
zeroth_blim_offset = 1.0
coi_half_width = None
slit_width = 200
_PROFILE_BACKGROUND_ = False # start with severe sigma-clip f background, before going to smoothing
today_ = datetime.date.today()
datestring = today_.isoformat()[0:4]+today_.isoformat()[5:7]+today_.isoformat()[8:10]
fileversion=1
calmode=True
typeNone = type(None)
senscorr = True # do sensitivity correction
print(66*"=")
print("uvotpy module uvotgetspec version=",__version__)
print("<NAME> (c) 2009-2017, see uvotpy licence.")
print("please use reference provided at http://github.com/PaulKuin/uvotpy")
print(66*"=","\n")
def getSpec(RA,DEC,obsid, ext, indir='./', wr_outfile=True,
outfile=None, calfile=None, fluxcalfile=None,
use_lenticular_image=True,
offsetlimit=None, anchor_offset=None, anchor_position=[None,None],
background_lower=[None,None], background_upper=[None,None],
background_template=None,
fixed_angle=None, spextwidth=13, curved="update",
fit_second=False, predict2nd=True, skip_field_src=False,
optimal_extraction=False, catspec=None,write_RMF=write_RMF,
get_curve=None,fit_sigmas=True,get_sigma_poly=False,
lfilt1=None, lfilt1_ext=None, lfilt2=None, lfilt2_ext=None,
wheelpos=None, interactive=interactive, sumimage=None, set_maglimit=None,
plot_img=True, plot_raw=True, plot_spec=True, zoom=True, highlight=False,
uvotgraspcorr_on=True, ank_c_0offset = False,
update_pnt=True, ifmotion=False, motion_file=None, anchor_x_offset=False,
replace=None,ifextended=False, singleside_bkg = False, fixwidth = False,
clobber=False, chatter=1):
'''Makes all the necessary calls to reduce the data.
Parameters
----------
ra, dec : float
The Sky position (J2000) in **decimal degrees**
obsid : str
The observation ID number as a **String**. Typically that is
something like "00032331001" and should be part of your
grism filename which is something like "sw00032331001ugu_dt.img"
ext : int
number of the extension to process
kwargs : dict
optional keyword arguments, possible values are:
- **fit_second** : bool
fit the second order. Off since it sometimes causes problems when the
orders overlap completely. Useful for spectra in top part detector
- **background_lower** : list
instead of default background list offset from spectrum as list
of two numbers, like [20, 40]. Distance relative to spectrum
- **background_upper** : list
instead of default background list offset from spectrum as list
of two numbers, like [20, 40]. Distance relative to spectrum
- **offsetlimit** : None,int,[center,range]
Default behaviour is to determine automatically any required offset from
the predicted anchor position to the spectrum, and correct for that.
The automated method may fail in the case of a weak spectrum and strong zeroth
or first order next to the spectrum. Two methods are provided:
(1) provide a number which will be used to limit the allowed offset. If
within that limit no peak is identified, the program will stop and require
you to provide a manual offset value. Try small numbers like 1, -1, 3, etc..
(2) if you already know the approximate y-location of the spectrum at the
anchor x-position in the rotated small image strip around the spectrum, you
can give this with a small allowed range for fine tuning as a list of two
parameter values. The first value in the list must be the y-coordinate
(by default the spectrum falls close to y=100 pixels), the second parameter
the allowed adjustment to a peak value in pixels. For example, [105,2].
This will require no further interactive input, and the spectrum will be
extracted using that offset.
- **wheelpos**: {160,200,955,1000}
filter wheel position for the grism filter mode used. Helpful for
forcing Vgrism or UVgrism input when both are present in the directory.
160:UV Clocked, 200:UV Nominal, 955:V clocked, 1000:V nominal
- **zoom** : bool
when False, the whole extracted region is displayed, including zeroth
order when present.
- **clobber** : bool
When True, overwrite earlier output (see also outfile)
- **write_RMF** : bool
When True, write the rmf file (will take extra time due to large matrix operations)
- **use_lenticular_image** : bool
When True and a lenticular image is present, it is used. If False,
the grism image header WCS-S system will be used for the astrometry,
with an automatic call to uvotgraspcorr for refinement.
- **sumimage** : str
Name summed image generated using ``sum_Extimage()``, will extract spectrum
from summed image.
- **wr_outfile** : bool
If False, no output file is written
- **outfile** : path, str
Name of output file, other than automatically generated.
- **calfile** : path, str
calibration file name
- **fluxcalfile** : path, str
flux calibration file name or "CALDB" or None
- **predict2nd** : bool
predict the second order flux from the first. Overestimates in centre a lot.
- **skip_field_src** : bool
if True do not locate zeroth order positions. Can be used if
absence internet connection or USNO-B1 server causes problems.
- **optimal_extraction** : bool, obsolete
Do not use.Better results with other implementation.
- **catspec** : path
optional full path to the catalog specification file for uvotgraspcorr.
- **get_curve** : bool or path
True: activate option to supply the curvature coefficients of all
orders by hand.
path: filename with coefficients of curvature
- **uvotgraspcorr_on** : bool
enable/disable rerun of uvotgraspcorr to update the WCS keywords
- **update_pnt** : bool
enable/disable update of the WCS keywords from the attitude file
(this is done prior to running uvotgraspcorr is that is enabled)
- **fit_sigmas** : bool
fit the sigma of trackwidths if True (not implemented, always on)
- **get_sigma_poly** : bool
option to supply the polynomial for the sigma (not implemented)
- **lfilt1**, **lfilt2** : str
name if the lenticular filter before and after the grism exposure
(now supplied by fileinfo())
- **lfilt1_ext**, **lfilt2_ext** : int
extension of the lenticular filter (now supplied by fileinfo())
- **plot_img** : bool
plot the first figure with the det image
- **plot_raw** : bool
plot the raw spectrum data
- **plot_spec** : bool
plot the flux spectrum
- **highlight** : bool
add contours to the plots to highlight contrasts
- **chatter** : int
verbosity of program
- **set_maglimit** : int
specify a magnitude limit to seach for background sources in the USNO-B1 catalog
- **background_template** : numpy 2D array
User provides a background template that will be used instead
determining background. Must be in counts. Size and alignment
must exactly match detector image.
Returns
-------
None, (give_result=True) compounded data (Y0, Y1, Y2, Y3, Y4) which
are explained in the code, or (give_new_result=True) a data dictionary.
Notes
-----
**Quick Start**
`getSpec(ra,dec,obsid, ext,)`
should produce plots and output files
**Which directory?**
The program needs to be started from the CORRECT data directory.
The attitude file [e.g., "sw<OBSID>pat.fits" ]is needed!
A link or copy of the attitude file needs to be present in the directory
or "../../auxil/" directory as well.
**Global parameters**
These parameters can be reset, e.g., during a (i)python session, before calling getSpec.
- **trackwidth** : float
width spectral extraction in units of sigma. The default is trackwidth = 2.5
The alternative default is trackwidth = 1.0 which gives better results for
weak sources, or spectra with nearby contamination. However, the flux
calibration and coincidence-loss correction give currently inconsistent
results. When using trackwidth=1.0, rescale the flux to match trackwidth=2.5
which value was used for flux calibration and coincidence-loss correction.
- **give_result** : bool
set to False since a call to getSpec with this set will return all the
intermediate results. See returns
When the extraction slit is set to be straight ``curved="straight"`` it cuts off the UV part of the
spectrum for spectra located in the top left and bottom right of the image.
History
-------
Version 2011-09-22 NPMK(MSSL) : handle case with no lenticular filter observation
Version 2012-01-15 NPMK(MSSL) : optimal extraction is no longer actively supported until further notice
Version 2013-10-23 NPMK(MSSL) : fixed bug so uvotgraspcorr gives same accuracy as lenticular filter
Version 2014-01-01 NPMK(MSSL) : aperture correction for background added; output dictionary
Version 2014-07-23 NPMK(MSSL) : coi-correction using new calibrared coi-box and factor
Version 2014-08-04 NPMK(MSSL/UCL): expanded offsetlimit parameter with list option to specify y-range.
Version 2015-12-03 NPMK(MSSL/UCL): change input parameter 'get_curve' to accept a file name with coefficients
Version 2016-01-16 NPMK(MSSL/UCL): added options for background; disable automated centroiding of spectrum
Example
-------
from uvotpy.uvotgetspec import getSpec
from uvotpy import uvotgetspec
import os, shutil
indir1 = os.getenv('UVOTPY') +'/test'
indir2 = os.getcwd()+'/test/UVGRISM/00055900056/uvot/image'
shutil.copytree(indir1, os.getcwd()+'/test' )
getSpec( 254.7129625, 34.3148667, '00055900056', 1, offsetlimit=1,indir=indir2, clobber=True )
'''
# (specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile), (method), \
# (Xphi, Yphi, date1), (dist12, ankerimg, ZOpos), expmap, bgimg, bg_limits_used, bgextra = Y0
#
#( (dis,spnet,angle,anker,anker2,anker_field,ank_c), (bg,bg1,bg2,extimg,spimg,spnetimg,offset),
# (C_1,C_2,img), hdr,m1,m2,aa,wav1 ) = Y1
#
#fit,(coef0,coef1,coef2,coef3),(bg_zeroth,bg_first,bg_second,bg_third),(borderup,borderdown),apercorr,expospec=Y2
#
#counts, variance, borderup, borderdown, (fractions,cnts,vars,newsigmas) = Y3
#
#wav2p, dis2p, flux2p, qual2p, dist12p = Y4[0]
#
# where,
#
#(present0,present1,present2,present3),(q0,q1,q2,q3), \
# (y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(y1,dlim1L,dlim1U,sig1coef,sp_first,co_first),\
# (y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(y3,dlim3L,dlim3U,sig3coef,sp_third,co_third),\
# (x,xstart,xend,sp_all,quality,co_back) = fit
#
# dis = dispersion with zero at ~260nm[UV]/420nm[V] ; spnet = background-substracted spectrum from 'spnetimg'
# angle = rotation-angle used to extract 'extimg' ; anker = first order anchor position in DET coordinates
# anker2 = second order anker X,Y position ; anker_field = Xphi,Yphy input angles with respect to reference
# ank_c = X,Y position of axis of rotation (anker) in 'extimg'
# bg = mean background, smoothed, with sources removed
# bg1 = one-sided background, sources removed, smoothed ; bg2 = same for background opposite side
# extimg = image extracted of source and background, 201 pixels wide, all orders.
# spimg = image centered on first order position ; spnetimg = background-subtracted 'spimg'
# offset = offset of spectrum from expected position based on 'anchor' at 260nm[UVG]/420nm[VG], first order
# C_1 = dispersion coefficients [python] first order; C_2 = same for second order
# img = original image ;
# WC_lines positions for selected WC star lines ; hdr = header for image
# m1,m2 = index limits spectrum ; aa = indices spectrum (e.g., dis[aa])
# wav1 = wavelengths for dis[aa] first order (combine with spnet[aa])
#
# when wr_outfile=True the program produces a flux calibrated output file by calling uvotio.
# [fails if output file is already present and clobber=False]
#
# The background must be consistent with the width of the spectrum summed.
from uvotio import fileinfo, rate2flux, readFluxCalFile
from uvotplot import plot_ellipsoid_regions
if (type(RA) == np.ndarray) | (type(DEC) == np.array):
raise IOError("RA, and DEC arguments must be of float type ")
if type(offsetlimit) == list:
if len(offsetlimit) != 2:
raise IOError("offsetlimit list must be [center, distance from center] in pixels")
get_curve_filename = None
a_str_type = type(curved)
if chatter > 4 :
print ("\n*****\na_str_type = ",a_str_type)
print ("value of get_curve = ",get_curve)
print ("type of parameter get_curve is %s\n"%(type(get_curve)) )
print ("type curved = ",type(curved))
if type(get_curve) == a_str_type:
# file name: check this file is present
if os.access(get_curve,os.F_OK):
get_curve_filename = get_curve
get_curve = True
else:
raise IOError(
"ERROR: get_curve *%s* is not a boolean value nor the name of a file that is on the disk."
%(get_curve) )
elif type(get_curve) == bool:
if get_curve:
get_curve_filename = None
print("requires input of curvature coefficients")
elif type(get_curve) == type(None):
get_curve = False
else:
raise IOError("parameter get_curve should by type str or bool, but is %s"%(type(get_curve)))
# check environment
CALDB = os.getenv('CALDB')
if CALDB == '':
print('WARNING: The CALDB environment variable has not been set')
HEADAS = os.getenv('HEADAS')
if HEADAS == '':
print('WARNING: The HEADAS environment variable has not been set')
print('That is needed for the calls to uvot Ftools ')
#SCAT_PRESENT = os.system('which scat > /dev/null')
#if SCAT_PRESENT != 0:
# print('WARNING: cannot locate the scat program \nDid you install WCSTOOLS ?\n')
SESAME_PRESENT = os.system('which sesame > /dev/null')
#if SESAME_PRESENT != 0:
# print 'WARNING: cannot locate the sesame program \nDid you install the cdsclient tools?\n'
# fix some parameters
framtime = 0.0110329 # all grism images are taken in unbinned mode
splineorder=3
getzmxmode='spline'
smooth=50
testparam=None
msg = "" ; msg2 = "" ; msg4 = ""
attime = datetime.datetime.now()
logfile = 'uvotgrism_'+obsid+'_'+str(ext)+'_'+'_'+attime.isoformat()[0:19]+'.log'
if type(fluxcalfile) == bool: fluxcalfile = None
tempnames.append(logfile)
tempntags.append('logfile')
tempnames.append('rectext_spectrum.img')
tempntags.append('rectext')
lfiltnames=np.array(['uvw2','uvm2','uvw1','u','b','v','wh'])
ext_names =np.array(['uw2','um2','uw1','uuu','ubb','uvv','uwh'])
filestub = 'sw'+obsid
histry = ""
for x in sys.argv: histry += x + " "
Y0 = None
Y2 = None
Y3 = None
Y4 = None
Yfit = {}
Yout = {"coi_level":None} # output dictionary (2014-01-01; replace Y0,Y1,Y2,Y3)
lfilt1_aspcorr = "not initialized"
lfilt2_aspcorr = "not initialized"
qflag = quality_flags()
ZOpos = None
# parameters getSpec()
Yout.update({'indir':indir,'obsid':obsid,'ext':ext})
Yout.update({'ra':RA,'dec':DEC,'wheelpos':wheelpos})
if type(sumimage) == typeNone:
if background_template is not None:
# convert background_template to a dictionary
background_template = {'template':np.asarray(background_template),
'sumimg':False}
try:
ext = int(ext)
except:
print("fatal error in extension number: must be an integer value")
# locate related lenticular images
specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile = \
fileinfo(filestub,ext,directory=indir,wheelpos=wheelpos,chatter=chatter)
# set some flags and variables
lfiltinput = (lfilt1 != None) ^ (lfilt2 != None)
lfiltpresent = lfiltinput | (lfilt1_ != None) | (lfilt2_ != None)
if (type(lfilt1_) == typeNone) & (type(lfilt2_) == typeNone):
# ensure the output is consistent with no lenticular filter solution
use_lenticular_image = False
# translate
filt_id = {"wh":"wh","v":"vv","b":"bb","u":"uu","uvw1":"w1","uvm2":"m2","uvw2":"w2"}
lfiltflag = False
if ((type(lfilt1) == typeNone)&(type(lfilt1_) != typeNone)):
lfilt1 = lfilt1_
lfilt1_ext = lfilt1_ext_
if chatter > 0: print("lenticular filter 1 from search lenticular images"+lfilt1+"+"+str(lfilt1_ext))
lfiltflag = True
lfilt1_aspcorr = None
try:
hdu_1 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt1]+"_sk.img",lfilt1_ext)
lfilt1_aspcorr = hdu_1["ASPCORR"]
except:
hdu_1 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt1]+"_sk.img.gz",lfilt1_ext)
lfilt1_aspcorr = hdu_1["ASPCORR"]
if ((type(lfilt2) == typeNone)&(type(lfilt2_) != typeNone)):
lfilt2 = lfilt2_
lfilt2_ext = lfilt2_ext_
if chatter > 0: print("lenticular filter 2 from search lenticular images"+lfilt2+"+"+str(lfilt2_ext))
lfiltflag = True
lfilt2_aspcorr = None
try:
hdu_2 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt2]+"_sk.img",lfilt2_ext)
lfilt2_aspcorr = hdu_2["ASPCORR"]
except:
hdu_2 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt2]+"_sk.img.gz",lfilt2_ext)
lfilt2_aspcorr = hdu_2["ASPCORR"]
# report
if chatter > 4:
msg2 += "getSpec: image parameter values\n"
msg2 += "ra, dec = (%6.1f,%6.1f)\n" % (RA,DEC)
msg2 += "filestub, extension = %s[%i]\n"% (filestub, ext)
if lfiltpresent & use_lenticular_image:
msg2 += "first/only lenticular filter = "+lfilt1+" extension first filter = "+str(lfilt1_ext)+'\n'
msg2 += " Aspect correction keyword : %s\n"%(lfilt1_aspcorr)
if lfilt2_ext != None:
msg2 += "second lenticular filter = "+lfilt2+" extension second filter = "+str(lfilt2_ext)+'\n'
msg2 += " Aspect correction keyword : %s\n"%(lfilt2_aspcorr)
if not use_lenticular_image:
msg2 += "anchor position derived without lenticular filter\n"
msg2 += "spectrum extraction preset width = "+str(spextwidth)+'\n'
#msg2 += "optimal extraction "+str(optimal_extraction)+'\n'
hdr = pyfits.getheader(specfile,int(ext))
if chatter > -1:
msg += '\nuvotgetspec version : '+__version__+'\n'
msg += ' Position RA,DEC : '+str(RA)+' '+str(DEC)+'\n'
msg += ' Start date-time : '+str(hdr['date-obs'])+'\n'
msg += ' grism file : '+specfile.split('/')[-1]+'['+str(ext)+']\n'
msg += ' attitude file : '+attfile.split('/')[-1]+'\n'
if lfiltpresent & use_lenticular_image:
if ((lfilt1 != None) & (lfilt1_ext != None)):
msg += ' lenticular file 1: '+lfilt1+'['+str(lfilt1_ext)+']\n'
msg += ' aspcorr: '+lfilt1_aspcorr+'\n'
if ((lfilt2 != None) & (lfilt2_ext != None)):
msg += ' lenticular file 2: '+lfilt2+'['+str(lfilt2_ext)+']\n'
msg += ' aspcorr: '+lfilt2_aspcorr+'\n'
if not use_lenticular_image:
msg += "anchor position derived without lenticular filter\n"
if not 'ASPCORR' in hdr: hdr['ASPCORR'] = 'UNKNOWN'
Yout.update({'hdr':hdr})
tstart = hdr['TSTART']
tstop = hdr['TSTOP']
wheelpos = hdr['WHEELPOS']
expo = hdr['EXPOSURE']
expmap = [hdr['EXPOSURE']]
Yout.update({'wheelpos':wheelpos})
if 'FRAMTIME' not in hdr:
# compute the frametime from the CCD deadtime and deadtime fraction
#deadc = hdr['deadc']
#deadtime = 600*285*1e-9 # 600ns x 285 CCD lines seconds
#framtime = deadtime/(1.0-deadc)
framtime = 0.0110329
hdr.update('framtime',framtime,comment='frame time computed from deadc ')
Yout.update({'hdr':hdr})
if chatter > 1:
print("frame time computed from deadc - added to hdr")
print("with a value of ",hdr['framtime']," ",Yout['hdr']['framtime'])
if not 'detnam' in hdr:
hdr.update('detnam',str(hdr['wheelpos']))
msg += ' exposuretime : %7.1f \n'%(expo)
maxcounts = 1.1 * expo/framtime
if chatter > 0:
msg += ' wheel position : '+str(wheelpos)+'\n'
msg += ' roll angle : %5.1f\n'% (hdr['pa_pnt'])
msg += 'coincidence loss version: 2 (2014-07-23)\n'
msg += '======================================\n'
try:
if ( (np.abs(RA - hdr['RA_OBJ']) > 0.4) ^ (np.abs(DEC - hdr['DEC_OBJ']) > 0.4) ):
sys.stderr.write("\nWARNING: It looks like the input RA,DEC and target position in header are different fields\n")
except (RuntimeError, TypeError, NameError, KeyError):
pass
msg2 += " cannot read target position from header for verification\n"
if lfiltinput:
# the lenticular filter(s) were specified on the command line.
# check that the lenticular image and grism image are close enough in time.
if type(lfilt1_ext) == typeNone:
lfilt1_ext = int(ext)
lpos = np.where( np.array([lfilt1]) == lfiltnames )
if len(lpos[0]) < 1: sys.stderr.write("WARNING: illegal name for the lenticular filter\n")
lnam = ext_names[lpos]
lfile1 = filestub+lnam[0]+'_sk.img'
hdr_l1 = pyfits.getheader(lfile1,lfilt1_ext)
tstart1 = hdr_l1['TSTART']
tstop1 = hdr_l1['TSTOP']
if not ( (np.abs(tstart-tstop1) < 20) ^ (np.abs(tstart1-tstop) < 20) ):
sys.stderr.write("WARNING: check that "+lfile1+" matches the grism image\n")
if lfilt2 != None:
if type(lfilt2_ext) == typeNone:
lfilt2_ext = lfilt1_ext+1
lpos = np.where( np.array([lfilt2]) == lfiltnames )
if len(lpos[0] < 1): sys.stderr.write("WARNING: illegal name for the lenticular filter\n")
lnam = ext_names[lpos]
lfile2 = filestub+lnam[0]+'_sk.img'
hdr_l2 = pyfits.getheader(lfile1,lfilt1_ext)
tstart2 = hdr_l2['TSTART']
tstop2 = hdr_l2['TSTOP']
if not ( (np.abs(tstart-tstop1) < 20) ^ (np.abs(tstart1-tstop) < 20) ):
sys.stderr.write("WARNING: check that "+lfile2+" matches the grism image\n")
if (not lfiltpresent) | (not use_lenticular_image):
method = "grism_only"
else:
method = None
if not senscorr: msg += "WARNING: No correction for sensitivity degradation applied.\n"
# get the USNO-B1 catalog data for the field, & find the zeroth orders
if (not skip_field_src):
if chatter > 2: print("============== locate zeroth orders due to field sources =============")
if wheelpos > 500: zeroth_blim_offset = 2.5
ZOpos = find_zeroth_orders(filestub, ext, wheelpos,indir=indir,
set_maglimit=set_maglimit,clobber="yes", chatter=chatter, )
# use for the ftools the downloaded usnob1 catalog in file "search.ub1" using the
# catspec parameter in the calls
if os.access('catalog.spec',os.F_OK) & (catspec == None):
catspec= 'catalog.spec'
# retrieve the input angle relative to the boresight
Xphi, Yphi, date1, msg3, lenticular_anchors = findInputAngle( RA, DEC, filestub, ext,
uvotgraspcorr_on=uvotgraspcorr_on, update_pnt=update_pnt, msg="", \
wheelpos=wheelpos, lfilter=lfilt1, lfilter_ext=lfilt1_ext, \
lfilt2=lfilt2, lfilt2_ext=lfilt2_ext, method=method, \
attfile=attfile, catspec=catspec, indir=indir, chatter=chatter)
Yout.update({"Xphi":Xphi,"Yphi":Yphi})
Yout.update({'lenticular_anchors':lenticular_anchors})
# read the anchor and dispersion out of the wavecal file
anker, anker2, C_1, C_2, angle, calibdat, msg4 = getCalData(Xphi,Yphi,wheelpos, date1, \
calfile=calfile, chatter=chatter)
hdrr = pyfits.getheader(specfile,int(ext))
if (hdrr['aspcorr'] == 'UNKNOWN') & (not lfiltpresent):
msg += "WARNING: No aspect solution found. Anchor uncertainty large.\n"
msg += "first order anchor position on detector in det coordinates:\n"
msg += "anchor1=(%8.2f,%8.2f)\n" % (anker[0],anker[1])
msg += "first order dispersion polynomial (distance anchor, \n"
msg += " highest term first)\n"
for k in range(len(C_1)):
msg += "DISP1_"+str(k)+"=%12.4e\n" % (C_1[k])
msg += "second order anchor position on detector in det coordinates:\n"
msg += "anchor2=(%8.2f,%8.2f)\n" % (anker2[0],anker2[1])
msg += "second order dispersion polynomial (distance anchor2,\n"
msg += " highest term first)\n"
for k in range(len(C_2)):
msg += "DISP2_"+str(k)+"=%12.4e\n" % (C_2[k])
#sys.stderr.write( "first order anchor = %s\n"%(anker))
#sys.stderr.write( "second order anchor = %s\n"%(anker2))
msg += "first order dispersion = %s\n"%(str(C_1))
msg += "second order dispersion = %s\n"%(str(C_2))
if chatter > 1:
sys.stderr.write( "first order dispersion = %s\n"%(str(C_1)) )
sys.stderr.write( "second order dispersion = %s\n"%(str(C_2)) )
msg += "lenticular filter anchor positions (det)\n"
msg += msg3
# override angle
if fixed_angle != None:
msg += "WARNING: overriding calibration file angle for extracting \n\t"\
"spectrum cal: "+str(angle)+'->'+str(fixed_angle)+" \n"
angle = fixed_angle
# override anchor position in det pixel coordinates
if anchor_position[0] != None:
cal_anker = anker
anker = np.array(anchor_position)
msg += "overriding anchor position with value [%8.1f,%8.1f]\n" % (anker[0],anker[1])
anker2 = anker2 -cal_anker + anker
msg += "overriding anchor position 2nd order with value [%8.1f,%8.1f]\n"%(anker2[0],anker2[1])
anker_field = np.array([Xphi,Yphi])
theta=np.zeros(5)+angle # use the angle from first order everywhere.
C_0 = np.zeros(3) # not in calibration file. Use uvotcal/zemax to get.
C_3 = np.zeros(3)
Cmin1 = np.zeros(3)
msg += "field coordinates:\n"
msg += "FIELD=(%9.4f,%9.4f)\n" % (Xphi,Yphi)
# order distance between anchors
dist12 = np.sqrt( (anker[0]-anker2[0])**2 + (anker[1]-anker2[1])**2 )
msg += "order distance 1st-2nd anchors :\n"
msg += "DIST12=%7.1f\n" % (dist12)
Yout.update({"anker":anker,"anker2":anker2,"C_1":C_1,"C_2":C_2,"theta":angle,"dist12":dist12})
# determine x,y locations of certain wavelengths on the image
# TBD: add curvature
if wheelpos < 500:
wavpnt = np.arange(1700,6800,slit_width)
else:
wavpnt = np.arange(2500,6600,slit_width)
dispnt=pixdisFromWave(C_1,wavpnt) # pixel distance to anchor
if chatter > 0: msg2 += 'first order angle at anchor point: = %7.1f\n'%(angle)
crpix = crpix1,crpix2 = hdr['crpix1'],hdr['crpix2']
crpix = np.array(crpix) # centre of image
ankerimg = anker - np.array([1100.5,1100.5])+crpix
xpnt = ankerimg[0] + dispnt*np.cos((180-angle)*np.pi/180)
ypnt = ankerimg[1] + dispnt*np.sin((180-angle)*np.pi/180)
msg += "1st order anchor on image at (%7.1f,%7.1f)\n"%(ankerimg[0],ankerimg[1])
if chatter > 4: msg += "Found anchor point; now extracting spectrum.\n"
if chatter > 2: print("==========Found anchor point; now extracting spectrum ========")
if type(offsetlimit) == typeNone:
if wheelpos > 300:
offsetlimit = 9
sys.stdout.write("automatically set the value for the offsetlimit = "+str(offsetlimit)+'\n')
# find position zeroth order on detector from WCS-S after update from uvotwcs
#if 'hdr' not in Yout:
# hdr = pyfits.getheader(specfile,int(ext))
# Yout.update({'hdr':hdr})
zero_xy_imgpos = [-1,-1]
if chatter > 1: print("zeroth order position on image...")
try:
wS =wcs.WCS(header=hdr,key='S',relax=True,)
zero_xy_imgpos = wS.wcs_world2pix([[RA,DEC]],0)
print("position not corrected for SIP = ", zero_xy_imgpos[0][0],zero_xy_imgpos[0][1])
zero_xy_imgpos = wS.sip_pix2foc(zero_xy_imgpos, 0)[0]
if chatter > 1:
"print zeroth order position on image:",zero_xy_imgpos
except:
pass
Yout.update({'zeroxy_imgpos':zero_xy_imgpos})
# provide some checks on background inputs:
if background_lower[0] != None:
background_lower = np.abs(background_lower)
if np.sum(background_lower) >= (slit_width-10):
background_lower = [None,None]
msg += "WARNING: background_lower set too close to edge image\n Using default\n"
if background_upper[0] != None:
background_upper = np.abs(background_upper)
if np.sum(background_upper) >= (slit_width-10):
background_upper = [None,None]
msg += "WARNING: background_upper set too close to edge image\n Using default\n"
# in case of summary file:
if (not skip_field_src) & (ZOpos == None):
if chatter > 2: print("DEBUG 802 ================== locate zeroth orders due to field sources =============")
if wheelpos > 500: zeroth_blim_offset = 2.5
try:
ZOpos = find_zeroth_orders(filestub, ext, wheelpos,indir=indir,
set_maglimit=set_maglimit,clobber="yes", chatter=chatter, )
except:
if type(sumimage) == typeNone:
print ("exception to call find_zeroth_orders : skip_field_src = ",skip_field_src)
pass
# use for the ftools the downloaded usnob1 catalog in file "search.ub1" using the
# catspec parameter in the calls
if os.access('catalog.spec',os.F_OK) & (catspec == None):
catspec= 'catalog.spec'
if (not skip_field_src):
Xim,Yim,Xa,Yb,Thet,b2mag,matched,ondetector = ZOpos
pivot_ori=np.array([(ankerimg)[0],(ankerimg)[1]])
Y_ZOpos={"Xim":Xim,"Yim":Yim,"Xa":Xa,"Yb":Yb,"Thet":Thet,"b2mag":b2mag,
"matched":matched,"ondetector":ondetector}
Yout.update({"ZOpos":Y_ZOpos})
else:
Yout.update({"ZOpos":None})
# find background, extract straight slit spectrum
if chatter > 3 : print ("DEBUG 827 compute background")
if sumimage != None:
# initialize parameters for extraction summed extracted image
print('reading summed image file : '+sumimage)
print('ext label for output file is set to : ', ext)
Y6 = sum_Extimage (None, sum_file_name=sumimage, mode='read')
extimg, expmap, exposure, wheelpos, C_1, C_2, dist12, anker, \
(coef0, coef1,coef2,coef3,sig0coef,sig1coef,sig2coef,sig3coef), hdr = Y6
if background_template != None:
background_template = {'extimg': background_template,
'sumimg': True}
if (background_template['extimg'].size != extimg.size):
print("ERROR")
print("background_template.size=",background_template['extimg'].size)
print("extimg.size=",extimg.size)
raise IOError("The template does not match the sumimage dimensions")
msg += "order distance 1st-2nd anchors :\n"
msg += "DIST12=%7.1f\n" % (dist12)
for k in range(len(C_1)):
msg += "DISP1_"+str(k)+"=%12.4e\n" % (C_1[k])
msg += "second order dispersion polynomial (distance anchor2,\n"
msg += " highest term first)\n"
for k in range(len(C_2)):
msg += "DISP2_"+str(k)+"=%12.4e\n" % (C_2[k])
print("first order anchor = ",anker)
print("first order dispersion = %s"%(str(C_1)))
print("second order dispersion = %s"%(str(C_2)))
tstart = hdr['tstart']
ank_c = [100,500,0,2000]
if type(offsetlimit) == typeNone:
offset = 0
elif type(offsetlimit) == list:
offset = offsetlimit[0]-96
ank_c[0] = offsetlimit[0]
else:
offset = offsetlimit # for sumimage used offsetlimit to set the offset
ank_c[0] = 96+offsetlimit
dis = np.arange(-500,1500)
img = extimg
# get background
bg, bg1, bg2, bgsig, bgimg, bg_limits_used, bgextra = findBackground(extimg,
background_lower=background_lower,
background_upper=background_upper,)
if singleside_bkg == 'bg1':
bg2 = bg1
elif singleside_bkg == 'bg2':
bg1 = bg2
else:
pass
skip_field_src = True
spnet = bg1 # placeholder
expo = exposure
maxcounts = exposure/0.01
anker2 = anker + [dist12,0]
spimg,spnetimg,anker_field = None, None, (0.,0.)
m1,m2,aa,wav1 = None,None,None,None
if type(outfile) == typeNone:
outfile='sum_image_'
Yfit.update({"coef0":coef0,"coef1":coef1,"coef2":coef2,"coef3":coef3,
"sig0coef":sig0coef,"sig1coef":sig1coef,"sig2coef":sig2coef,"sig3coef":sig3coef} )
Yout.update({"anker":anker,"anker2":None,
"C_1":C_1,"C_2":C_2,
"Xphi":0.0,"Yphi":0.0,
"wheelpos":wheelpos,"dist12":dist12,
"hdr":hdr,"offset":offset})
Yout.update({"background_1":bg1,"background_2":bg2})
dropout_mask = None
Yout.update({"zeroxy_imgpos":[1000,1000]})
else:
# default extraction
if chatter > 2 : print ("DEBUG 894 default extraction")
# start with a quick straight slit extraction
exSpIm = extractSpecImg(specfile,ext,ankerimg,angle,spwid=spextwidth,
background_lower=background_lower, background_upper=background_upper,
template = background_template, x_offset = anchor_x_offset, ank_c_0offset=ank_c_0offset,
offsetlimit=offsetlimit, replace=replace, chatter=chatter, singleside_bkg=singleside_bkg)
dis = exSpIm['dis']
spnet = exSpIm['spnet']
bg = exSpIm['bg']
bg1 = exSpIm['bg1']
bg2 = exSpIm['bg2']
bgsig = exSpIm['bgsigma']
bgimg = exSpIm['bgimg']
bg_limits_used = exSpIm['bg_limits_used']
bgextra = exSpIm['bgextras']
extimg = exSpIm['extimg']
spimg = exSpIm['spimg']
spnetimg = exSpIm['spnetimg']
offset = exSpIm['offset']
ank_c = exSpIm['ank_c']
if background_template != None:
background_template ={"extimg":exSpIm["template_extimg"]}
Yout.update({"template":exSpIm["template_extimg"]})
if exSpIm['dropouts']:
dropout_mask = exSpIm['dropout_mask']
else: dropout_mask = None
Yout.update({"background_1":bg1,"background_2":bg2})
#msg += "1st order anchor offset from spectrum = %7.1f\n"%(offset)
#msg += "anchor position in rotated extracted spectrum (%6.1f,%6.1f)\n"%(ank_c[1],ank_c[0])
calibdat = None # free the memory
if chatter > 2: print("============ straight slit extraction complete =================")
if np.max(spnet) < maxcounts: maxcounts = 2.0*np.max(spnet)
# initial limits spectrum (pixels)
m1 = ank_c[1]-400
if wheelpos > 500: m1 = ank_c[1]-370
if m1 < 0: m1 = 0
if m1 < (ank_c[2]+30): m1 = ank_c[2]+30
m2 = ank_c[1]+2000
if wheelpos > 500: m2 = ank_c[1]+1000
if m2 >= len(dis): m2 = len(dis)-2
if m2 > (ank_c[3]-40): m2=(ank_c[3]-40)
aa = list(range(int(m1),int(m2)))
wav1 = polyval(C_1,dis[aa])
# get grism det image
img = pyfits.getdata(specfile, ext)
if isinstance(replace,np.ndarray):
img = replace
try:
offset = np.asscalar(offset)
except:
pass
Yout.update({"offset":offset})
Zbg = bg, bg1, bg2, bgsig, bgimg, bg_limits_used, bgextra
net = extimg-bgextra[-1]
var = extimg.copy()
dims = np.asarray( img.shape )
dims = np.array([dims[1],dims[0]])
dims2 = np.asarray(extimg.shape)
dims2 = np.array([dims2[1],dims2[0]])
msg += "Lower background from y = %i pix\nLower background to y = %i pix\n" % (bg_limits_used[0],bg_limits_used[1])
msg += "Upper background from y = %i pix\nUpper background to y = %i pix\n" % (bg_limits_used[2],bg_limits_used[3])
msg += "TRACKWID =%4.1f\n" % (trackwidth)
# collect some results:
if sumimage == None:
Y0 = (specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile), (method), \
(Xphi, Yphi, date1), (dist12, ankerimg, ZOpos), expmap, bgimg, bg_limits_used, bgextra
else:
Y0 = None, None, None, (dist12, None, None), expmap, bgimg, bg_limits_used, bgextra
angle = 0.0
# curvature from input (TBD how - placeholder with raw_input)
# choose input coef or pick from plot
# choose order to do it for
if (get_curve & interactive) | (get_curve & (get_curve_filename != None)):
if chatter > 3 : print ("DEBUG 978 get user-provided curve coefficients and extract spectrum")
spextwidth = None
# grab coefficients
poly_1 = None
poly_2 = None
poly_3 = None
if get_curve_filename == None:
try:
poly_1 = eval(input("give coefficients of first order polynomial array( [X^3,X^2,X,C] )"))
poly_2 = eval(input("give coefficients of second order polynomial array( [X^2,X,C] )"))
poly_3 = eval(input("give coefficients of third order polynomial array( [X,C] )"))
except:
print("failed")
if (type(poly_1) != list) | (type(poly_2) != list) | (type(poly_3) != list):
print("poly_1 type = ",type(poly_1))
print("poly_2 type = ",type(poly_2))
print("poly_3 type = ",type(poly_3))
raise IOError("the coefficients must be a list")
poly_1 = np.asarray(poly_1)
poly_2 = np.asarray(poly_2)
poly_3 = np.asarray(poly_3)
else:
try:
curfile = rdList(get_curve_filename)
poly_1 = np.array(curfile[0][0].split(','),dtype=float)
poly_2 = np.array(curfile[1][0].split(','),dtype=float)
poly_3 = np.array(curfile[2][0].split(','),dtype=float)
except:
print("There seems to be a problem when readin the coefficients out of the file")
print("The format is a list of coefficient separated by comma's, highest order first")
print("The first line for the first order")
print("The second line for the secons order")
print("The third line for the third order")
print("like, \n1.233e-10,-7.1e-7,3.01e-3,0.0.\n1.233e-5,-2.3e-2,0.03.0\n1.7e-1,0.9\n")
print(get_curve_filename)
print(curfile)
print(poly_1)
print(poly_2)
print(poly_3)
raise IOError("ERROR whilst reading curvature polynomial from file\n")
print("Curvature coefficients were read in...\npoly_1: %s \npoly_2: %s \npoly_3: %s \n"%
(poly_1,poly_2,poly_3))
fitorder, cp2, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,\
bg_second,bg_third), (borderup,borderdown), apercorr, expospec, msg, curved \
= curved_extraction(
extimg, ank_c, anker, wheelpos,
ZOpos=ZOpos, skip_field_sources=skip_field_src,
offsetlimit=offsetlimit,
predict_second_order=predict2nd,
background_template=background_template,
angle=angle, offset=offset,
poly_1=poly_1, poly_2=poly_2, poly_3=poly_3,
msg=msg, curved=curved,
outfull=True, expmap=expmap,
fit_second=fit_second,
fit_third=fit_second,
C_1=C_1,C_2=C_2,dist12=dist12,
dropout_mask=dropout_mask, ifmotion=ifmotion,
obsid=obsid,indir=indir,motion_file=motion_file,
ank_c_0offset=ank_c_0offset,
chatter=chatter,ifextended=ifextended,
fixwidth=fixwidth)
# fit_sigmas parameter needs passing
(present0,present1,present2,present3),(q0,q1,q2,q3), (
y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first,co_first),(
y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third,co_third),(
x,xstart,xend,sp_all,quality,co_back) = fitorder
# update the anchor y-coordinate
if chatter > 3 : print ("DEBUG 1048 update anchor coordinate\noriginal ank_c=%s\ny1=%s"%(ank_c,y1))
ank_c[0] = y1[np.int(ank_c[1])]
Yfit.update({"coef0":coef0,"coef1":coef1,"coef2":coef2,"coef3":coef3,
"bg_zeroth":bg_zeroth,"bg_first":bg_first,"bg_second":bg_second,"bg_third":bg_third,
"borderup":borderup,"borderdown":borderdown,
"sig0coef":sig0coef,"sig1coef":sig1coef,"sig2coef":sig2coef,"sig3coef":sig3coef,
"present0":present0,"present1":present1,"present2":present2,"present3":present3,
"q0":q0,"q1":q1,"q2":q2,"q3":q3,
"y0":y0,"dlim0L":dlim0L,"dlim0U":dlim0U,"sp_zeroth":sp_zeroth,"bg_zeroth":bg_zeroth,"co_zeroth":co_zeroth,
"y1":y1,"dlim1L":dlim1L,"dlim1U":dlim1U,"sp_first": sp_first, "bg_first": bg_first, "co_first": co_first,
"y2":y2,"dlim2L":dlim2L,"dlim2U":dlim2U,"sp_second":sp_second,"bg_second":bg_second,"co_second":co_second,
"y3":y3,"dlim3L":dlim3L,"dlim3U":dlim3U,"sp_third": sp_third, "bg_third": bg_third, "co_third":co_third,
"x":x,"xstart":xstart,"xend":xend,"sp_all":sp_all,"quality":quality,"co_back":co_back,
"apercorr":apercorr,"expospec":expospec})
Yout.update({"ank_c":ank_c,"extimg":extimg,"expmap":expmap})
# curvature from calibration
if spextwidth != None:
if chatter > 3 : print ("DEBUG 1067 get curve coefficients from cal file and extract spectrum ")
fitorder, cp2, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,\
bg_second,bg_third), (borderup,borderdown) , apercorr, expospec, msg, curved \
= curved_extraction(
extimg,ank_c,anker, wheelpos,
ZOpos=ZOpos, skip_field_sources=skip_field_src,
offsetlimit=offsetlimit,
background_lower=background_lower,
background_upper=background_upper, \
background_template=background_template,\
angle=angle, offset=offset,
outfull=True, expmap=expmap,
msg = msg, curved=curved,
fit_second=fit_second,
fit_third=fit_second, C_1=C_1,C_2=C_2,dist12=dist12,
dropout_mask=dropout_mask, ifmotion=ifmotion,
obsid=obsid,indir=indir,motion_file=motion_file,
ank_c_0offset=ank_c_0offset,
chatter=chatter,ifextended=ifextended,
fixwidth=fixwidth)
(present0,present1,present2,present3),(q0,q1,q2,q3), \
(y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first,co_first),\
(y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third,co_third),\
(x,xstart,xend,sp_all,quality,co_back) = fitorder
Yfit.update({"coef0":coef0,"coef1":coef1,"coef2":coef2,"coef3":coef3,
"bg_zeroth":bg_zeroth,"bg_first":bg_first,"bg_second":bg_second,"bg_third":bg_third,
"borderup":borderup,"borderdown":borderdown,
"sig0coef":sig0coef,"sig1coef":sig1coef,"sig2coef":sig2coef,"sig3coef":sig3coef,
"present0":present0,"present1":present1,"present2":present2,"present3":present3,
"q0":q0,"q1":q1,"q2":q2,"q3":q3,
"y0":y0,"dlim0L":dlim0L,"dlim0U":dlim0U,"sp_zeroth":sp_zeroth,"bg_zeroth":bg_zeroth,"co_zeroth":co_zeroth,
"y1":y1,"dlim1L":dlim1L,"dlim1U":dlim1U,"sp_first": sp_first, "bg_first": bg_first, "co_first": co_first,
"y2":y2,"dlim2L":dlim2L,"dlim2U":dlim2U,"sp_second":sp_second,"bg_second":bg_second,"co_second":co_second,
"y3":y3,"dlim3L":dlim3L,"dlim3U":dlim3U,"sp_third": sp_third, "bg_third": bg_third, "co_third":co_third,
"x":x,"xstart":xstart,"xend":xend,"sp_all":sp_all,"quality":quality,"co_back":co_back,
"apercorr":apercorr,"expospec":expospec})
ank_c[0] = y1[int(ank_c[1])]
Yout.update({"ank_c":ank_c,"extimg":extimg,"expmap":expmap})
msg += "orders present:"
if present0: msg += "0th order, "
if present1: msg += "first order"
if present2: msg += ", second order"
if present3: msg += ", third order "
print('1224 CCCCCCCCCCCCC', coef1)
print(RA,DEC)
print(anker)
print(ank_c)
msg += '\nparametrized order curvature:\n'
if present0:
for k in range(len(coef0)):
msg += "COEF0_"+str(k)+"=%12.4e\n" % (coef0[k])
if present1:
for k in range(len(coef1)):
msg += "COEF1_"+str(k)+"=%12.4e\n" % (coef1[k])
if present2:
for k in range(len(coef2)):
msg += "COEF2_"+str(k)+"=%12.4e\n" % (coef2[k])
if present3:
for k in range(len(coef3)):
msg += "COEF3_"+str(k)+"=%12.4e\n" % (coef3[k])
msg += '\nparametrized width slit:\n'
if present0:
for k in range(len(sig0coef)):
msg += "SIGCOEF0_"+str(k)+"=%12.4e\n" % (sig0coef[k])
if present1:
for k in range(len(sig1coef)):
msg += "SIGCOEF1_"+str(k)+"=%12.4e\n" % (sig1coef[k])
if present2:
for k in range(len(sig2coef)):
msg += "SIGCOEF2_"+str(k)+"=%12.4e\n" % (sig2coef[k])
if present3:
for k in range(len(sig3coef)):
msg += "SIGCOEF3_"+str(k)+"=%12.4e\n" % (sig3coef[k])
if chatter > 3 : print ("DEBUG 1142 done spectral extraction, now calibrate")
offset = ank_c[0]-slit_width/2
msg += "best fit 1st order anchor offset from spectrum = %7.1f\n"%(offset)
msg += "anchor position in rotated extracted spectrum (%6.1f,%6.1f)\n"%(ank_c[1],y1[int(ank_c[1])])
msg += msg4
Yout.update({"offset":offset})
#2012-02-20 moved updateFitorder to curved_extraction
#if curved == "update":
# fit = fitorder2
#else:
# fit = fitorder
fit = fitorder
if optimal_extraction:
# development dropped, since mod8 causes slit width oscillations
# also requires a good second order flux and coi calibration for
# possible further development of order splitting.
# result in not consistent now.
print("Starting optimal extraction: This can take a few minutes ......\n\t "\
"........\n\t\t .............")
Y3 = get_initspectrum(net,var,fit,160,ankerimg,C_1=C_1,C_2=C_2,dist12=dist12,
predict2nd=predict2nd,
chatter=1)
counts, variance, borderup, borderdown, (fractions,cnts,vars,newsigmas) = Y3
# need to test that C_2 is valid here
if predict2nd:
Y4 = predict_second_order(dis,(sp_first-bg_first), C_1,C_2, dist12, quality,dlim1L, dlim1U,wheelpos)
wav2p, dis2p, flux2p, qual2p, dist12p = Y4[0]
# retrieve the effective area
Y7 = readFluxCalFile(wheelpos,anchor=anker,spectralorder=1,arf=fluxcalfile,msg=msg,chatter=chatter)
EffArea1 = Y7[:-1]
msg = Y7[-1]
Y7 = readFluxCalFile(wheelpos,anchor=anker,spectralorder=2,arf=None,msg=msg,chatter=chatter)
if type(Y7) == tuple:
EffArea2 = Y7[:-1]
else:
if type(Y7) != typeNone: msg = Y7
EffArea2 = None
# note that the output differs depending on parameters given, i.e., arf, anchor
Yout.update({"effarea1":EffArea1,"effarea2":EffArea2})
if interactive:
import matplotlib.pyplot as plt
if (plot_img) & (sumimage == None):
#plt.winter()
# make plot of model on image [figure 1]
#xa = np.where( (dis < 1400) & (dis > -300) )
bga = bg.copy()
fig1 = plt.figure(1); plt.clf()
img[img <=0 ] = 1e-16
plt.imshow(np.log(img),vmin=np.log(bga.mean()*0.1),vmax=np.log(bga.mean()*4))
levs = np.array([5,15,30,60,120,360]) * bg.mean()
if highlight: plt.contour(img,levels=levs)
# plot yellow wavelength marker
# TBD : add curvature
plt.plot(xpnt,ypnt,'+k',markersize=14)
if not skip_field_src:
plot_ellipsoid_regions(Xim,Yim,
Xa,Yb,Thet,b2mag,matched,ondetector,
pivot_ori,pivot_ori,dims,17.,)
if zoom:
#plt.xlim(np.max(np.array([0.,0.])),np.min(np.array([hdr['NAXIS1'],ankerimg[0]+400])))
#plt.ylim(np.max(np.array([0.,ankerimg[1]-400 ])), hdr['NAXIS2'])
plt.xlim(0,2000)
plt.ylim(0,2000)
else:
plt.xlim(0,2000)
plt.ylim(0,2000)
plt.savefig(indir+'/'+obsid+'_map.png',dpi=150)
#plt.show()
plt.close()
if (plot_raw):
#plt.winter()
nsubplots = 2
#if not fit_second: nsubplots=3
# make plot of spectrum [figure 2]
fig2 = plt.figure(2); plt.clf()
plt.subplots_adjust(top=1,hspace=0, wspace=0)
# image slice
ax21 = plt.subplot(nsubplots,1,1)
ac = -ank_c[1]
net[net<=0.] = 1e-16
#plt.imshow(np.log10(net),vmin=-0.8,vmax=0.8, #~FIXME:
# extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
# origin='lower',cmap=plt.cm.winter)
plt.imshow(np.log10(net),vmin=-10,vmax=2,
extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
origin='lower')#,cmap=plt.cm.winter)
#plt.imshow(extimg,vmin=0,vmax=50,
# extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
# origin='lower')#,cmap=plt.cm.winter)
if highlight:
plt.contour(np.log10(net),levels=[1,1.3,1.7,2.0,3.0],
extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
origin='lower')
#plt.imshow( extimg,vmin= (bg1.mean())*0.1,vmax= (bg1.mean()+bg1.std())*2, extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]) )
#levels = np.array([5,10,20,40,70,90.])
#levels = spnet[ank_c[2]:ank_c[3]].max() * levels * 0.01
#if highlight: plt.contour(net,levels=levels,extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]))
# cross_section_plot:
cp2 = cp2/np.max(cp2)*100
#plt.plot(ac+cp2+ank_c[1],np.arange(len(cp2)),'k',lw=2,alpha=0.6,ds='steps') #~TODO:
# plot zeroth orders
if not skip_field_src:
pivot= np.array([ank_c[1],ank_c[0]-offset])
#pivot_ori=ankerimg
mlim = 17.
if wheelpos > 500: mlim = 15.5
plot_ellipsoid_regions(Xim,Yim,Xa,Yb,Thet,b2mag,
matched,ondetector,
pivot,pivot_ori,
dims2,mlim,
img_angle=angle-180.0,ax=ax21)
# plot line on anchor location
#plt.plot([ac+ank_c[1],ac+ank_c[1]],[0,slit_width],'k',lw=2)
plt.plot(0,ank_c[0],'kx',MarkerSize=5) #~TODO:
# plot position centre of orders
#if present0: plt.plot(ac+q0[0],y0[q0[0]],'k--',lw=1.2)
#plt.plot( ac+q1[0],y1[q1[0]],'k--',lw=1.2)
#if present2: plt.plot(ac+q2[0],y2[q2[0]],'k--',alpha=0.6,lw=1.2)
#if present3: plt.plot(ac+q3[0],y3[q3[0]],'k--',alpha=0.3,lw=1.2)
# plot borders slit region
if present0:
plt.plot(ac+q0[0],borderup [0,q0[0]],'r-')
plt.plot(ac+q0[0],borderdown[0,q0[0]],'r-')
if present1:
plt.plot(ac+q1[0],borderup [1,q1[0]],'r-',lw=1.2)
plt.plot(ac+q1[0],borderdown[1,q1[0]],'r-',lw=1.2)
if present2:
plt.plot(ac+q2[0],borderup [2,q2[0]],'r-',alpha=0.6,lw=1)
plt.plot(ac+q2[0],borderdown[2,q2[0]],'r-',alpha=0.6,lw=1)
if present3:
plt.plot(ac+q3[0],borderup [3,q3[0]],'r-',alpha=0.3,lw=1.2)
plt.plot(ac+q3[0],borderdown[3,q3[0]],'r-',alpha=0.3,lw=1.2)
# plot limits background
plt_bg = np.ones(len(q1[0]))
if (background_lower[0] == None) & (background_upper[0] == None):
background_lower = [0,50] ; background_upper = [slit_width-50,slit_width]
plt.plot(ac+q1[0],plt_bg*(background_lower[1]),'-k',lw=1.5 )
plt.plot(ac+q1[0],plt_bg*(background_upper[0]),'-k',lw=1.5 )
else:
if background_lower[0] != None:
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]-background_lower[0]),'-k',lw=1.5 )
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]-background_lower[1]),'-k',lw=1.5 )
elif background_lower[1] != None:
plt.plot(ac+q1[0],plt_bg*(background_lower[1]),'-k',lw=1.5 )
if background_upper[1] != None:
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]+background_upper[0]),'-k',lw=1.5 )
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]+background_upper[1]),'-k',lw=1.5 )
elif background_upper[0] != None:
plt.plot(ac+q1[0],plt_bg*(background_upper[0]),'-k',lw=1.5 )
# rescale, title
plt.ylim(0,slit_width)
#plt.ylim(50,150)
if not zoom:
xlim1 = ac+ank_c[2]
xlim2 = ac+ank_c[3]
else:
xlim1 = max(ac+ank_c[2], -420)
xlim2 = min(ac+ank_c[3],1400)
plt.xlim(xlim1,xlim2)
plt.title(obsid+'+'+str(ext))
# first order raw data plot
ax22 = plt.subplot(nsubplots,1,2)
plt.rcParams['legend.fontsize'] = 'small'
if curved == 'straight':
p1, = plt.plot( dis[ank_c[2]:ank_c[3]], spnet[ank_c[2]:ank_c[3]],'k',
ds='steps',lw=0.5,alpha=0.5,label='straight')
p2, = plt.plot( dis[ank_c[2]:ank_c[3]],
spextwidth*(bg1[ank_c[2]:ank_c[3]]+bg2[ank_c[2]:ank_c[3]])*0.5,
'b',alpha=0.5,label='background')
plt.legend([p1,p2],['straight','background'],loc=0,)
if curved != "straight":
p3, = plt.plot(x[q1[0]],(sp_first-bg_first)[q1[0]],'r',ds='steps',label='spectrum')
plt.plot(x[q1[0]],(sp_first-bg_first)[q1[0]],'k',alpha=0.2,ds='steps',label='_nolegend_')
p7, = plt.plot(x[q1[0]], bg_first[q1[0]],'y',alpha=0.5,lw=1.1,ds='steps',label='background')
# bad pixels:
qbad = np.where(quality[q1[0]] > 0)
p4, = plt.plot(x[qbad],(sp_first-bg_first)[qbad],'xk',markersize=4)
#p7, = plt.plot(x[q1[0]],(bg_first)[q1[0]],'r-',alpha=0.3,label='curve_bkg')
# annotation
#plt.legend([p3,p4,p7],['spectrum','suspect','background'],loc=0,)
plt.legend([p3,p7],['spectrum','background'],loc=0,)
maxbg = np.max(bg_first[q1[0]][np.isfinite(bg_first[q1[0]])])
topcnt = 1.2 * np.max([np.max(spnet[q1[0]]),maxbg, np.max((sp_first-bg_first)[q1[0]])])
plt.ylim(np.max([ -20, np.min((sp_first-bg_first)[q1[0]])]), np.min([topcnt, maxcounts]))
if optimal_extraction:
p5, = plt.plot(x[q1[0]],counts[1,q1[0]],'g',alpha=0.5,ds='steps',lw=1.2,label='optimal' )
p6, = plt.plot(x[q1[0]],counts[1,q1[0]],'k',alpha=0.5,ds='steps',lw=1.2,label='_nolegend_' )
p7, = plt.plot(x[q1[0]], bg_first[q1[0]],'y',alpha=0.7,lw=1.1,ds='steps',label='background')
plt.legend([p3,p5,p7],['spectrum','optimal','background'],loc=0,)
topcnt = 1.2 * np.max((sp_first-bg_first)[q1[0]])
ylim1,ylim2 = -10, np.min([topcnt, maxcounts])
plt.ylim( ylim1, ylim2 )
#plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
plt.xlim(xlim1,xlim2)
plt.ylabel('1st order counts')
'''
# plot second order
ax23 = plt.subplot(nsubplots,1,3)
plt.rcParams['legend.fontsize'] = 'small'
#plt.xlim(ank_c[2],ank_c[3])
if fit_second:
if curved != 'straight':
p1, = plt.plot(x[q2[0]],(sp_second-bg_second)[q2[0]],'r',label='spectrum')
plt.plot(x[q2[0]],(sp_second-bg_second)[q2[0]],'k',alpha=0.2,label='_nolegend_')
p7, = plt.plot(x[q2[0]],(bg_second)[q2[0]],'y',alpha=0.7,lw=1.1,label='background')
qbad = np.where(quality[q2[0]] > 0)
p2, = plt.plot(x[qbad],(sp_second-bg_second)[qbad],'+k',alpha=0.3,label='suspect')
plt.legend((p1,p7,p2),('spectrum','background','suspect'),loc=2)
plt.ylim(np.max([ -100, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
if optimal_extraction:
p3, = plt.plot(x[q2[0]],counts[2,q2[0]],'g',alpha=0.5,ds='steps',label='optimal' )
plt.legend((p1,p7,p2,p3),('spectrum','background','suspect','optimal',),loc=2)
#plt.ylim(np.max([ -10,np.min(counts[2,q2[0]]), np.min((sp_second-bg_second)[q2[0]])]),\
# np.min([np.max(counts[2,q2[0]]), np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.ylim( ylim1,ylim2 )
if predict2nd :
p4, = plt.plot(dis2p+dist12,flux2p, ds='steps',label='predicted')
p5, = plt.plot(dis2p[np.where(qual2p != 0)]+dist12,flux2p[np.where(qual2p != 0)],'+k',label='suspect',markersize=4)
if optimal_extraction & fit_second:
plt.legend((p1,p2,p3,p4,p5),('curved','suspect','optimal','predicted','suspect'),loc=2)
#plt.ylim(np.max([ -100,np.min(counts[2,q2[0]]), np.min((sp_second-bg_second)[q2[0]])]),\
# np.min([np.max(counts[2,q2[0]]), np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.ylim( ylim1,ylim2 )
elif optimal_extraction:
plt.legend((p1,p7,p4,p5),('curved','background','predicted','suspect'),loc=2)
plt.ylim(np.max([ -10, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
elif fit_second:
plt.legend((p1,p2,p4,p5),('curved','suspect','predicted','suspect'),loc=2)
plt.ylim(np.max([ -10, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
else:
plt.legend((p4,p5),('predicted','suspect'),loc=2)
plt.ylim(np.max([ -10, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
plt.xlim(xlim1,xlim2)
plt.ylabel('2nd order counts')
'''
'''
if fit_second:
ax24 = plt.subplot(nsubplots,1,4)
plt.rcParams['legend.fontsize'] = 'small'
if (len(q3[0]) > 1) & (curved != "xxx"):
p1, = plt.plot(x[q3[0]],(sp_third-bg_third)[q3[0]],'r',label='spectrum')
plt.plot(x[q3[0]],(sp_third-bg_third)[q3[0]],'k',alpha=0.2,label='_nolegend_')
qbad = np.where(quality[q3[0]] > 0)
p2, = plt.plot(x[qbad],(sp_third-bg_third)[qbad],'xk',alpha=0.3,label='suspect')
p3, = plt.plot(x[q3[0]],bg_third[q3[0]],'y',label='background')
plt.legend([p1,p3,p2],['spectrum','background','suspect'],loc=2)
plt.ylim(np.max([ -100, np.min((sp_second-bg_second)[q3[0]])]),\
np.min([np.max((sp_third-bg_third)[q3[0]]), maxcounts]))
if optimal_extraction:
p4, = plt.plot(x[q3[0]],counts[3,q3[0]],'b',alpha=0.5,ds='steps',label='optimal' )
plt.legend([p1,p3,p2,p4],['spectrum','background','suspect','optimal',],loc=2)
#plt.ylim(np.max([ -100,np.min(counts[3,q3[0]]), np.min((sp_second-bg_second)[q3[0]])]),\
# np.min([np.max(counts[3,q3[0]]), np.max((sp_third-bg_third)[q3[0]]), maxcounts]))
plt.ylim( ylim1,ylim2 )
#plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
plt.xlim(xlim1,xlim2)
plt.ylabel(u'3rd order counts')
plt.xlabel(u'pixel distance from anchor position')
'''
plt.savefig(indir+'/'+obsid+'_count.png',dpi=150)
#plt.show()
if (plot_spec):
#plt.winter()
# NEED the flux cal applied!
nsubplots = 1
if not fit_second:
nsubplots = 1
fig3 = plt.figure(3)
plt.clf()
wav1 = polyval(C_1,x[q1[0]])
ax31 = plt.subplot(nsubplots,1,1)
if curved != "xxx":
# PSF aperture correction applies on net rate, but background
# needs to be corrected to default trackwidth linearly
rate1 = ((sp_first[q1[0]]-bg_first[q1[0]] ) * apercorr[1,[q1[0]]]
/expospec[1,[q1[0]]]).flatten()
bkgrate1 = ((bg_first)[q1[0]] * (2.5/trackwidth)
/expospec[1,[q1[0]]]).flatten()
print("computing flux for plot; frametime =",framtime)
flux1,wav1,coi_valid1 = rate2flux(wav1,rate1, wheelpos,
bkgrate=bkgrate1,
co_sprate = (co_first[q1[0]]/expospec[1,[q1[0]]]).flatten(),
co_bgrate = (co_back [q1[0]]/expospec[1,[q1[0]]]).flatten(),
pixno=x[q1[0]],
#sig1coef=sig1coef, sigma1_limits=[2.6,4.0],
arf1=fluxcalfile, arf2=None, effarea1=EffArea1,
spectralorder=1, swifttime=tstart,
#trackwidth = trackwidth,
anker=anker,
#option=1, fudgespec=1.32,
frametime=framtime,
debug=False,chatter=1)
#flux1_err = 0.5*(rate2flux(,,rate+err,,) - rate2flux(,,rate-err,,))
p1, = plt.plot(wav1[np.isfinite(flux1)],flux1[np.isfinite(flux1)],
color='darkred',label=u'curved')
p11, = plt.plot(wav1[np.isfinite(flux1)&(coi_valid1==False)],
flux1[np.isfinite(flux1)&(coi_valid1==False)],'.',
color='lawngreen',
label="too bright")
# PROBLEM quality flags !!!
qbad1 = np.where((quality[np.array(x[q1[0]],dtype=int)] > 0) & (quality[np.array(x[q1[0]],dtype=int)] < 16))
qbad2 = np.where((quality[np.array(x[q1[0]],dtype=int)] > 0) & (quality[np.array(x[q1[0]],dtype=int)] == qflag.get("bad")))
plt.legend([p1,p11],[u'calibrated spectrum',u'too bright - not calibrated'])
if len(qbad2[0]) > 0:
p2, = plt.plot(wav1[qbad2],flux1[qbad2],
'+k',markersize=4,label=u'bad data')
plt.legend([p1,p2],[u'curved',u'bad data'])
plt.ylabel(u'1st order flux $(erg\ cm^{-2} s^{-1} \AA^{-1)}$')
# find reasonable limits flux
get_flux_limit = flux1[int(len(wav1)*0.3):int(len(wav1)*0.7)]
get_flux_limit[get_flux_limit==np.inf] = np.nan
get_flux_limit[get_flux_limit==-np.inf]= np.nan
qf = np.nanmax(get_flux_limit)
if qf > 2e-12:
qf = 2e-12
plt.ylim(0.001*qf,1.2*qf)
plt.xlim(1600,6000)
if optimal_extraction: # no longer supported (2013-04-24)
print("OPTIMAL EXTRACTION IS NO LONGER SUPPORTED")
wav1 = np.polyval(C_1,x[q1[0]])
#flux1 = rate2flux(wav1, counts[1,q1[0]]/expo, wheelpos, spectralorder=1, arf1=fluxcalfile)
flux1,wav1,coi_valid1 = rate2flux(wav1,counts[1,q1[0]]/expo, wheelpos, bkgrate=bgkrate1,
co_sprate = (co_first[q1[0]]/expospec[1,[q1[0]]]).flatten(),
co_bgrate = (co_back [q1[0]]/expospec[1,[q1[0]]]).flatten(),
pixno=x[q1[0]], #sig1coef=sig1coef, sigma1_limits=[2.6,4.0],
arf1=fluxcalfile, arf2=None, spectralorder=1, swifttime=tstart,
#trackwidth = trackwidth,
anker=anker, #option=1, fudgespec=1.32,
frametime=framtime,
debug=False,chatter=1)
p3, = plt.plot(wav1, flux1,'g',alpha=0.5,ds='steps',lw=2,label='optimal' )
p4, = plt.plot(wav1,flux1,'k',alpha=0.5,ds='steps',lw=2,label='_nolegend_' )
#plt.legend([p1,p2,p3],['curved','suspect','optimal'],loc=0,)
plt.legend([p1,p3],['curved','optimal'],loc=0,)
qf = (flux1 > 0.) & (flux1 < 1.0e-11)
plt.ylim( -0.01*np.max(flux1[qf]), 1.2*np.max(flux1[qf]) )
plt.ylabel(u'1st order count rate')
plt.xlim(np.min(wav1)-10,np.max(wav1))
plt.title(obsid+'+'+str(ext))
'''
if fit_second:
ax32 = plt.subplot(nsubplots,1,2)
plt.plot([1650,3200],[0,1])
plt.text(2000,0.4,'NO SECOND ORDER DATA',fontsize=16)
if curved != 'xxx':
wav2 = polyval(C_2,x[q2[0]]-dist12)
rate2 = ((sp_second[q2[0]]-bg_second[q2[0]])*
apercorr[2,[q2[0]]].flatten()/expospec[2,[q2[0]]].flatten() )
bkgrate2 = ((bg_second)[q2[0]] * (2.5/trackwidth)
/expospec[2,[q2[0]]]).flatten()
flux2,wav2,coi_valid2 = rate2flux(wav2, rate2, wheelpos,
bkgrate=bkgrate2,
co_sprate = (co_second[q2[0]]/expospec[2,[q2[0]]]).flatten(),
co_bgrate = (co_back [q2[0]]/expospec[2,[q2[0]]]).flatten(),
pixno=x[q2[0]],
arf1=fluxcalfile, arf2=None,
frametime=framtime, effarea2=EffArea2,
spectralorder=2,swifttime=tstart,
anker=anker2,
debug=False,chatter=1)
#flux1_err = rate2flux(wave,rate_err, wheelpos, spectralorder=1,)
plt.cla()
print('#############################')
print(wav2[100],flux2[100],wav2,flux2)
p1, = plt.plot(wav2,flux2,'r',label='curved')
plt.plot(wav2,flux2,'k',alpha=0.2,label='_nolegend_')
qbad1 = np.where((quality[np.array(x[q2[0]],dtype=int)] > 0) & (quality[np.array(x[q2[0]],dtype=int)] < 16))
p2, = plt.plot(wav2[qbad1],flux2[qbad1],'+k',markersize=4,label='suspect data')
plt.legend(['uncalibrated','suspect data'])
plt.ylabel(u'estimated 2nd order flux')
plt.xlim(1600,3200)
qf = (flux1 > 0.) & (flux1 < 1.0e-11)
if np.sum(qf[0]) > 0:
plt.ylim( -0.01*np.max(flux1[qf]), 1.2*np.max(flux1[qf]) )
#else: plt.ylim(1e-16,2e-12)
else: plt.ylim(1e-12,1e-11)
# final fix to limits of fig 3,1
y31a,y31b = ax31.get_ylim()
setylim = False
if y31a < 1e-16:
y31a = 1e-16
setylim = True
if y31b > 1e-12:
y31b = 1e-12
setylim = True
if setylim: ax31.set_ylim(bottom=y31a,top=y31b)
#
'''
plt.xlabel(u'$\lambda(\AA)$',fontsize=16)
plt.savefig(indir+'/'+obsid+'_flux.png',dpi=150)
# to plot the three figures
#plt.show()
# output parameter
Y1 = ( (dis,spnet,angle,anker,anker2,anker_field,ank_c), (bg,bg1,bg2,extimg,spimg,spnetimg,offset),
(C_1,C_2,img), hdr,m1,m2,aa,wav1 )
# output parameter
Y2 = fit, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,
bg_second,bg_third), (borderup,borderdown), apercorr, expospec
Yout.update({"Yfit":Yfit})
# writing output to a file
#try:
if wr_outfile: # write output file
if ((chatter > 0) & (not clobber)): print("trying to write output files")
import uvotio
if (curved == 'straight') & (not optimal_extraction):
ank_c2 = np.copy(ank_c) ; ank_c2[1] -= m1
F = uvotio.wr_spec(RA,DEC,filestub,ext,
hdr,anker,anker_field[0],anker_field[1],
dis[aa],wav1,
spnet[aa]/expo,bg[aa]/expo,
bg1[aa]/expo,bg2[aa]/expo,
offset,ank_c2,extimg, C_1,
history=None,chatter=1,
clobber=clobber,
calibration_mode=calmode,
interactive=interactive)
elif not optimal_extraction:
if fileversion == 2:
Y = Yout
elif fileversion == 1:
Y = (Y0,Y1,Y2,Y4)
F = uvotio.writeSpectrum(RA,DEC,filestub,ext, Y,
fileoutstub=outfile,
arf1=fluxcalfile, arf2=None,
fit_second=fit_second,
write_rmffile=write_RMF, fileversion=1,
used_lenticular=use_lenticular_image,
history=msg,
calibration_mode=calmode,
chatter=chatter,
clobber=clobber )
elif optimal_extraction:
Y = (Y0,Y1,Y2,Y3,Y4)
F = uvotio.OldwriteSpectrum(RA,DEC,filestub,ext, Y, mode=2,
quality=quality, interactive=False,fileout=outfile,
updateRMF=write_rmffile, \
history=msg, chatter=5, clobber=clobber)
#except (RuntimeError, IOError, ValueError):
# print "ERROR writing output files. Try to call uvotio.wr_spec."
# pass
# clean up fake file
if tempntags.__contains__('fakefilestub'):
filestub = tempnames[tempntags.index('fakefilestub')]
os.system('rm '+indir+filestub+'ufk_??.img ')
# update Figure 3 to use the flux...
# TBD
# write the summary
sys.stdout.write(msg)
sys.stdout.write(msg2)
flog = open(logfile,'a')
flog.write(msg)
flog.write(msg2)
flog.close()
#plt.show()
if give_result: return Y0, Y1, Y2, Y3, Y4
if give_new_result: return Yout
def extractSpecImg(file,ext,anker,angle,anker0=None,anker2=None, anker3=None,\
searchwidth=35,spwid=13,offsetlimit=None, fixoffset=None,
background_lower=[None,None], background_upper=[None,None],
template=None, x_offset = False, ank_c_0offset=False, replace=None,
clobber=True,chatter=2,singleside_bkg=False):
'''
extract the grism image of spectral orders plus background
using the reference point at 2600A in first order.
Parameters
----------
file : str
input file location
ext : int
extension of image
anker : list, ndarray
X,Y coordinates of the 2600A (1) point on the image in image coordinates
angle : float
angle of the spectrum at 2600A in first order from zemax e.g., 28.8
searchwidth : float
find spectrum with this possible offset ( in crowded fields
it should be set to a smaller value)
template : dictionary
template for the background.
use_rectext : bool
If True then the HEADAS uvotimgrism program rectext is used to extract the image
This is a better way than using ndimage.rotate() which does some weird smoothing.
offsetlimit : None, float/int, list
if None, search for y-offset predicted anchor to spectrum using searchwidth
if float/int number, search for offset only up to a distance as given from y=100
if list, two elements, no more. [y-value, delta-y] for search of offset.
if delta-y < 1, fixoffset = y-value.
History
-------
2011-09-05 NPMK changed interpolation in rotate to linear, added a mask image to
make sure to keep track of the new pixel area.
2011-09-08 NPMK incorporated rectext as new extraction and removed interactive plot,
curved, and optimize which are now olsewhere.
2014-02-28 Add template for the background as an option
2014-08-04 add option to provide a 2-element list for the offsetlimit to constrain
the offset search range.
'''
import numpy as np
import os, sys
try:
from astropy.io import fits as pyfits
except:
import pyfits
import scipy.ndimage as ndimage
#out_of_img_val = -1.0123456789 now a global
Tmpl = (template != None)
if Tmpl:
if template['sumimg']:
raise IOError("extractSpecImg should not be called when there is sumimage input")
if chatter > 4:
print('extractSpecImg parameters: file, ext, anker, angle')
print(file,ext)
print(anker,angle)
print('searchwidth,chatter,spwid,offsetlimit, :')
print(searchwidth,chatter,spwid,offsetlimit)
img, hdr = pyfits.getdata(file,ext,header=True)
if isinstance(replace,np.ndarray):
img = replace
# wcs_ = wcs.WCS(header=hdr,) # detector coordinates DETX,DETY in mm
# wcsS = wcs.WCS(header=hdr,key='S',relax=True,) # TAN-SIP coordinate type
if Tmpl:
if (img.shape != template['template'].shape) :
print("ERROR")
print("img.shape=", img.shape)
print("background_template.shape=",template['template'].shape)
raise IOError("The templare array does not match the image")
wheelpos = hdr['WHEELPOS']
if chatter > 4: print('wheelpos:', wheelpos)
if not use_rectext:
# now we want to extend the image array and place the anchor at the centre
s1 = 0.5*img.shape[0]
s2 = 0.5*img.shape[1]
d1 = -(s1 - anker[1]) # distance of anker to centre img
d2 = -(s2 - anker[0])
n1 = 2.*abs(d1) + img.shape[0] + 400 # extend img with 2.x the distance of anchor
n2 = 2.*abs(d2) + img.shape[1] + 400
#return img, hdr, s1, s2, d1, d2, n1, n2
if 2*int(n1/2) == int(n1): n1 = n1 + 1
if 2*int(n2/2) == int(n2): n2 = n2 + 1
c1 = n1 / 2 - anker[1]
c2 = n2 / 2 - anker[0]
n1 = int(n1)
n2 = int(n2)
c1 = int(c1)
c2 = int(c2)
if chatter > 3: print('array info : ',img.shape,d1,d2,n1,n2,c1,c2)
# the ankor is now centered in array a; initialize a with out_of_img_val
a = np.zeros( (n1,n2), dtype=float) + cval
if Tmpl : a_ = np.zeros( (n1,n2), dtype=float) + cval
# load array in middle
a[c1:c1+img.shape[0],c2:c2+img.shape[1]] = img
if Tmpl: a_[c1:c1+img.shape[0],c2:c2+img.shape[1]] = template['template']
# patch outer regions with something like mean to get rid of artifacts
mask = abs(a - cval) < 1.e-8
# Kludge:
# test image for bad data and make a fix by putting the image average in its place
dropouts = False
aanan = np.isnan(a) # process further for flagging
aagood = np.isfinite(a)
aaave = a[np.where(aagood)].mean()
a[np.where(aanan)] = aaave
if len( np.where(aanan)[0]) > 0 :
dropouts = True
print("extractSpecImg WARNING: BAD IMAGE DATA fixed by setting to mean of good data whole image ")
# now we want to rotate the array to have the dispersion in the x-direction
if angle < 40. :
theta = 180.0 - angle
else: theta = angle
if not use_rectext:
b = ndimage.rotate(a,theta,reshape = False,order = 1,mode = 'constant',cval = cval)
if Tmpl:
b_ = ndimage.rotate(a_,theta,reshape = False,order = 1,mode = 'constant',cval = cval)
if dropouts: #try to rotate the boolean image
aanan = ndimage.rotate(aanan,theta,reshape = False,order = 1,mode = 'constant',)
e2 = int(0.5*b.shape[0])
c = b[e2-int(slit_width/2):e2+int(slit_width/2),:]
if Tmpl: c_ = b_[e2-int(slit_width/2):e2+int(slit_width/2),:]
if dropouts: aanan = aanan[e2-int(slit_width/2):e2+int(slit_width/2),:]
ank_c = [ (c.shape[0]-1)/2+1, (c.shape[1]-1)/2+1 , 0, c.shape[1]] #~TODO:
if x_offset == False:
pass
else:
ank_c[1] += x_offset
if use_rectext:
# history: rectext is a fortran code that maintains proper density of quantity when
# performing a rotation.
# build the command for extracting the image with rectext
outfile= tempnames[tempntags.index('rectext')]
cosangle = np.cos(theta/180.*np.pi)
sinangle = np.sin(theta/180.*np.pi)
# distance anchor to pivot
dx_ank = - (hdr['naxis1']-anker[0])/cosangle + slit_width/2*sinangle #~FIXME: I am not sure if this is "+ 100.*sinangle" or "+ slit_width/2*sinangle"
if np.abs(dx_ank) > 760: dx_ank = 760 # include zeroth order (375 for just first order)
# distance to end spectrum
dx_2 = -anker[0] /cosangle + slit_width/2/sinangle # to lhs edge #~FIXME: I am not sure if this is "+ 100.*sinangle" or "+ slit_width/2*sinangle"
dy_2 = (hdr['naxis2']-anker[1])/sinangle - slit_width/2/cosangle # to top edge #~FIXME: I am not sure if this is "+ 100.*sinangle" or "+ slit_width/2*sinangle"
dx = int(dx_ank + np.array([dx_2,dy_2]).min() ) # length rotated spectrum
dy = slit_width # width rotated spectrum
# pivot x0,y0
x0 = anker[0] - dx_ank*cosangle + dy/2.*sinangle
y0 = anker[1] - dx_ank*sinangle - dy/2.*cosangle
command= "rectext infile="+file+"+"+str(ext)
command+=" outfile="+outfile
command+=" angle="+str(theta)+" width="+str(dx)
command+=" height="+str(dy)+" x0="+str(x0)+" y0="+str(y0)
command+=" null="+str(cval)
command+=" chatter=5 clobber=yes"
print(command)
os.system(command)
c = extimg = pyfits.getdata(outfile,0)
ank_c = np.array([int(slit_width/2),dx_ank,0,extimg.shape[1]])
# out_of_img_val = 0.
if clobber:
os.system("rm "+outfile)
if Tmpl:
raise("background_template cannot be used with use_rectext option")
# version 2016-01-16 revision:
# the background can be extracted via a method from the strip image
#
# extract the strips with the background on both sides, and the spectral orders
# find optimised place of the spectrum
# first find parts not off the detector -> 'qofd'
eps1 = 1e-15 # remainder after resampling for intel-MAC OSX system (could be jacked up)
qofd = np.where( abs(c[int(slit_width/2),:] - cval) > eps1 )
# define constants for the spectrum in each mode
if wheelpos < 300: # UV grism
disrange = 150 # perhaps make parameter in call?
disscale = 10 # ditto
minrange = disrange/10 # 300 is maximum
maxrange = np.array([disrange*disscale,c.shape[1]-ank_c[1]-2]).min() # 1200 is most of the spectrum
else: # V grism
disrange = 120 # perhaps make parameter in call?
disscale = 5 # ditto
minrange = np.array([disrange/2,ank_c[1]-qofd[0].min() ]).max() # 300 is maximum
maxrange = np.array([disrange*disscale,c.shape[1]-ank_c[1]-2],qofd[0].max()-ank_c[1]).min() # 600 is most of the spectrum
if chatter > 1:
#print 'image was rotated; anchor in extracted image is ', ank_c[:2]
#print 'limits spectrum are ',ank_c[2:]
print('finding location spectrum from a slice around anchor x-sized:',minrange,':',maxrange)
print('offsetlimit = ', offsetlimit)
d = (c[:,int(ank_c[1]-minrange):int(ank_c[1]+maxrange)]).sum(axis=1).squeeze()
if len(qofd[0]) > 0:
ank_c[2] = min(qofd[0])
ank_c[3] = max(qofd[0])
else:
ank_c[2] = -1
ank_c[3] = -1
# y-position of anchor spectrum in strip image (allowed y (= [50,150], but search only in
# range defined by searchwidth (default=35) )
y_default=int(slit_width/2) # reference y
if (type(offsetlimit) == list):
if (len(offsetlimit)==2):
# sane y_default
if (offsetlimit[0] > 50) & (offsetlimit[0] < 150):
y_default=int(offsetlimit[0]+0.5) # round to nearest pixel
else:
raise IOError("parameter offsetlimit[0]=%i, must be in range [51,149]."+
"\nIs the aspect correction right (in reference images)?"%(offsetlimit[0]))
if offsetlimit[1] < 1:
fixoffset = offsetlimit[0]-int(slit_width/2)
else:
searchwidth=int(offsetlimit[1]+0.5)
if fixoffset == None:
offset = ( (np.where(d == (d[y_default-searchwidth:y_default+searchwidth]).max() ) )[0] - y_default )
if chatter>0: print('offset found from y=%i is %i '%(y_default ,-offset))
if len(offset) == 0:
print('offset problem: offset set to zero')
offset = 0
offset = offset[0]
if (type(offsetlimit) != list):
if (offsetlimit != None):
if abs(offset) >= offsetlimit:
offset = 0
print('This is larger than the offsetlimit. The offset has been set to 0')
if interactive:
offset = float(input('Please give a value for the offset: '))
else:
offset = fixoffset
if ank_c_0offset == True:
offset = 0
if chatter > 0:
print('offset used is : ', -offset)
if (type(offsetlimit) == list) & (fixoffset == None):
ank_c[0] = offsetlimit[0]-offset
else:
ank_c[0] += offset
print('image was rotated; anchor in extracted image is [', ank_c[0],',',ank_c[1],']')
print('limits spectrum on image in dispersion direction are ',ank_c[2],' - ',ank_c[3])
# Straight slit extraction (most basic extraction, no curvature):
sphalfwid = int(spwid-0.5)/2
splim1 = int(slit_width/2)+offset-sphalfwid+1
splim2 = splim1 + spwid
spimg = c[int(splim1):int(splim2),:]
if chatter > 0:
print('Extraction limits across dispersion: splim1,splim2 = ',splim1,' - ',splim2)
bg, bg1, bg2, bgsigma, bgimg, bg_limits, bgextras = findBackground(c,
background_lower=background_lower, background_upper=background_upper,yloc_spectrum=ank_c[0] )
if singleside_bkg == 'bg1':
bg2 = bg1
elif singleside_bkg == 'bg2':
bg1 = bg2
else:
pass
bgmean = bg
bg = 0.5*(bg1+bg2)
if chatter > 0: print('Background : %10.2f +/- %10.2f (1-sigma error)'%( bgmean,bgsigma))
# define the dispersion with origen at the projected position of the
# 2600 point in first order
dis = np.arange((c.shape[1]),dtype=np.int16) - ank_c[1]
# remove the background
#bgimg_ = 0.* spimg.copy()
#for i in range(bgimg_.shape[0]): bgimg_[i,:]=bg
spnetimg = spimg - bg
spnet = spnetimg.sum(axis=0)
result = {"dis":dis,"spnet":spnet,"bg":bg,"bg1":bg1,
"bg2":bg2,"bgsigma":bgsigma,"bgimg":bgimg,
"bg_limits_used":bg_limits,"bgextras":bgextras,
"extimg":c,"spimg":spimg,"spnetimg":spnetimg,
"offset":offset,"ank_c":ank_c,'dropouts':dropouts}
if dropouts: result.update({"dropout_mask":aanan})
if Tmpl: result.update({"template_extimg":c_})
return result
def sigclip1d_mask(array1d, sigma, badval=None, conv=1e-5, maxloop=30):
"""
sigma clip array around mean, using number of sigmas 'sigma'
after masking the badval given, requiring finite numbers, and
either finish when converged or maxloop is reached.
return good mask
"""
import numpy as np
y = np.asarray(array1d)
if badval != None:
valid = (np.abs(y - badval) > 1e-6) & np.isfinite(y)
else:
valid = np.isfinite(y)
yv = y[valid]
mask = yv < (yv.mean() + sigma * yv.std())
ym_ = yv.mean()
ymean = yv[mask].mean()
yv = yv[mask]
while (np.abs(ym_-ymean) > conv*np.abs(ymean)) & (maxloop > 0):
ym_ = ymean
mask = ( yv < (yv.mean() + sigma * yv.std()) )
yv = yv[mask]
ymean = yv.mean()
maxloop -= 1
valid[valid] = y[valid] < ymean + sigma*yv.std()
return valid
def background_profile(img, smo1=30, badval=None):
"""
helper routine to determine for the rotated image
(spectrum in rows) the background using sigma clipping.
"""
import numpy as np
from scipy import interpolate
bgimg = img.copy()
nx = bgimg.shape[1] # number of points in direction of dispersion
ny = bgimg.shape[0] # width of the image
# look at the summed rows of the image
u_ysum = []
for i in range(ny):
u_ysum.append(bgimg[i,:].mean())
u_ysum = np.asarray(u_ysum)
u_ymask = sigclip1d_mask(u_ysum, 2.5, badval=badval, conv=1e-5, maxloop=30)
u_ymean = u_ysum[u_ymask].mean()
# look at the summed columns after filtering bad rows
u_yindex = np.where(u_ymask)[0]
u_xsum = []
u_std = []
for i in range(nx):
u_x1 = bgimg[u_yindex, i].squeeze()
# clip u_x1
u_x1mask = sigclip1d_mask(u_x1, 2.5, badval=None, conv=1e-5, maxloop=30)
u_xsum.append(u_x1[u_x1mask].mean())
u_std.append(u_x1[u_x1mask].std())
#print u_x1[u_x1mask]
#if np.isfinite(u_x1mask.mean()) & len(u_x1[u_x1mask])>0:
# print "%8.2f %8.2f %8.2f "%(u_x1[u_x1mask].mean(),u_x1[u_x1mask].std(),u_x1[u_x1mask].max())
# the best background estimate of the typical row is now u_xsum
# fit a smooth spline through the u_xsum values (or boxcar?)
#print "u_x means "
#print u_xsum
u_xsum = np.asarray(u_xsum)
u_std = np.asarray(u_std)
u_xsum_ok = np.isfinite(u_xsum)
bg_tcp = interpolate.splrep(np.arange(nx)[u_xsum_ok],
np.asarray(u_xsum)[u_xsum_ok], s=smo1)
# representative background profile in column
u_x = interpolate.splev(np.arange(nx), bg_tcp, )
return u_xsum, u_x, u_std
def findBackground(extimg,background_lower=[None,None], background_upper=[None,None],yloc_spectrum=int(slit_width/2),
smo1=None, smo2=None, chatter=2):
'''Extract the background from the image slice containing the spectrum.
Parameters
----------
extimg : 2D array
image containing spectrum. Dispersion approximately along x-axis.
background_lower : list
distance in pixels from `yloc_spectrum` of the limits of the lower background region.
background_upper : list
distance in pixels from `yloc_spectrum` of the limits of the upper background region.
yloc_spectrum : int
pixel `Y` location of spectrum
smo1 : float
smoothing parameter passed to smoothing spline fitting routine. `None` for default.
smo2 : float
smoothing parameter passed to smoothing spline fitting routine. `None` for default.
chatter : int
verbosity
Returns
-------
bg : float
mean background
bg1, bg2 : 1D arrays
bg1 = lower background; bg2 = upper background
inherits size from extimg.shape x-xoordinate
bgsig : float
standard deviation of background
bgimg : 2D array
image of the background constructed from bg1 and/or bg2
bg_limits_used : list, length 4
limits used for the background in the following order: lower background, upper background
(bg1_good, bg1_dis, bg1_dis_good, bg2_good, bg2_dis, bg2_dis_good, bgimg_lin) : tuple
various other background measures
Notes
-----
**Global parameter**
- **background_method** : {'boxcar','splinefit'}
The two background images can be computed 2 ways:
1. 'splinefit': sigma clip image, then fit a smoothing spline to each
row, then average in y for each background region
2. 'boxcar': select the background from the smoothed image created
by method 1 below.
3. 'sigmaclip': do sigma clipping on rows and columns to get column
profile background, then clip image and mask, interpolate over masked
bits.
extimg is the image containing the spectrum in the 1-axis centered in 0-axis
`ank` is the position of the anchor in the image
I create two background images:
1. split the image strip into 40 portions in x, so that the background variation is small
compute the mean
sigma clip (3 sigma) each area to to the local mean
replace out-of-image pixels with mean of whole image (2-sigma clipped)
smooth with a boxcar by the smoothing factor
2. compute the background in two regions upper and lower
linearly interpolate in Y between the two regions to create a background image
bg1 = lower background; bg2 = upper background
smo1, smo2 allow one to relax the smoothing factor in computing the smoothing spline fit
History
-------
- 8 Nov 2011 NPM Kuin complete overhaul
things to do: get quality flagging of bad background points, edges perhaps done here?
- 13 Aug 2012: possible problem was seen of very bright sources not getting masked out properly
and causing an error in the background that extends over a large distance due to the smoothing.
The cause is that the sources are more extended than can be handled by this method.
A solution would be to derive a global background
- 30 Sep 2014: background fails in visible grism e.g., 57977004+1 nearby bright spectrum
new method added (4x slower processing) to screen the image using sigma clipping
'''
import sys
import numpy as np
try:
from convolve import boxcar
except:
from stsci.convolve import boxcar
from scipy import interpolate
import stsci.imagestats as imagestats
# initialize parameters
bgimg = extimg.copy()
out = np.where( (np.abs(bgimg-cval) <= 1e-6) )
in_img = np.where( (np.abs(bgimg-cval) > 1e-6) & np.isfinite(bgimg) )
nx = bgimg.shape[1] # number of points in direction of dispersion
ny = bgimg.shape[0] # width of the image
# sigma screening of background taking advantage of the dispersion being
# basically along the x-axis
if _PROFILE_BACKGROUND_:
bg, u_x, bg_sig = background_profile(bgimg, smo1=30, badval=cval)
u_mask = np.zeros((ny,nx),dtype=bool)
for i in range(ny):
u_mask[i,(bgimg[i,:].flatten() < u_x) &
np.isfinite(bgimg[i,:].flatten())] = True
bkg_sc = np.zeros((ny,nx),dtype=float)
# the following leaves larger disps in the dispersion but less noise;
# tested but not implemented, as it is not as fast and the mean results
# are comparable:
#for i in range(ny):
# uf = interpolate.interp1d(np.where(u_mask[i,:])[0],bgimg[i,u_mask[i,:]],bounds_error=False,fill_value=cval)
# bkg_sc[i,:] = uf(np.arange(nx))
#for i in range(nx):
# ucol = bkg_sc[:,i]
# if len(ucol[ucol != cval]) > 0:
# ucol[ucol == cval] = ucol[ucol != cval].mean()
for i in range(nx):
ucol = bgimg[:,i]
if len(ucol[u_mask[:,i]]) > 0:
ucol[np.where(u_mask[:,i] == False)[0] ] = ucol[u_mask[:,i]].mean()
bkg_sc[:,i] = ucol
if background_method == 'sigmaclip':
return bkg_sc
else:
# continue now with the with screened image
bgimg = bkg_sc
kx0 = 0 ; kx1 = nx # default limits for valid lower background
kx2 = 0 ; kx3 = nx # default limits for valid upper background
ny4 = int(0.25*ny) # default width of each default background region
sig1 = 1 # unit for background offset, width
bg_limits_used = [0,0,0,0] # return values used
## in the next section I replace the > 2.5 sigma peaks with the mean
## after subdividing the image strip to allow for the
## change in background level which can be > 2 over the
## image. Off-image parts are set to image mean.
# this works most times in the absence of the sigma screening,but
# can lead to overestimates of the background.
# the call to the imagestats package is only done here, and should
# consider replacement. Its not critical for the program.
#
xlist = np.linspace(0,bgimg.shape[1],80)
xlist = np.asarray(xlist,dtype=int)
imgstats = imagestats.ImageStats(bgimg[in_img[0],in_img[1]],nclip=3)
bg = imgstats.mean
bgsig = imgstats.stddev
if chatter > 2:
sys.stderr.write( 'background statistics: mean=%10.2f, sigma=%10.2f '%
(imgstats.mean, imgstats.stddev))
# create boolean image flagging good pixels
img_good = np.ones(extimg.shape,dtype=bool)
# flag area out of picture as bad
img_good[out] = False
# replace high values in image with estimate of mean and flag them as not good
for i in range(78):
# after the sigma screening this is a bit of overkill, leave in for now
sub_bg = boxcar(bgimg[:,xlist[i]:xlist[i+2]] , (5,5), mode='reflect', cval=cval)
sub_bg_use = np.where( np.abs(sub_bg - cval) > 1.0e-5 ) # list of coordinates
imgstats = None
if sub_bg_use[0].size > 0:
imgstats = imagestats.ImageStats(sub_bg[sub_bg_use],nclip=3)
# patch values in image (not out of image) with mean if outliers
aval = 2.0*imgstats.stddev
img_clip_ = (
(np.abs(bgimg[:,xlist[i]:xlist[i+2]]-cval) < 1e-6) |
(np.abs(sub_bg - imgstats.mean) > aval) |
(sub_bg <= 0.) | np.isnan(sub_bg) )
bgimg[:,xlist[i]:xlist[i+2]][img_clip_] = imgstats.mean # patch image
img_good[:,xlist[i]:xlist[i+2]][img_clip_] = False # flag patches
# the next section selects the user-selected or default background for further processing
if chatter > 1:
if background_method == 'boxcar':
sys.stderr.write( "BACKGROUND METHOD: %s; background smoothing = %s\n"%
(background_method,background_smoothing))
else:
sys.stderr.write( "BACKGROUND METHOD:%s\n"%(background_method ))
if not ((background_method == 'splinefit') | (background_method == 'boxcar') ):
sys.stderr.write('background method missing; currently reads : %s\n'%(background_method))
if background_method == 'boxcar':
# boxcar smooth in x,y using the global parameter background_smoothing
bgimg = boxcar(bgimg,background_smoothing,mode='reflect',cval=cval)
if background_lower[0] == None:
bg1 = bgimg[0:ny4,:].copy()
bg_limits_used[0]=0
bg_limits_used[1]=ny4
bg1_good = img_good[0:ny4,:]
kx0 = np.min(np.where(img_good[0,:]))+10 # assuming the spectrum is in the top two thirds of the detector
kx1 = np.max(np.where(img_good[0,:]))-10
else:
# no curvature, no second order: limits
bg1_1= np.max(np.array([yloc_spectrum - sig1*background_lower[0],20 ]))
#bg1_0= np.max(np.array([yloc_spectrum - sig1*(background_lower[0]+background_lower[1]),0]))
bg1_0= np.max(np.array([yloc_spectrum - sig1*(background_lower[1]),0]))
bg1 = bgimg[int(bg1_0):int(bg1_1),:].copy()
bg_limits_used[0]=bg1_0
bg_limits_used[1]=bg1_1
bg1_good = img_good[int(bg1_0):int(bg1_1),:]
kx0 = np.min(np.where(img_good[int(bg1_0),:]))+10 # assuming the spectrum is in the top two thirds of the detector
kx1 = np.max(np.where(img_good[int(bg1_0),:]))-10 # corrected for edge effects
#if ((kx2-kx0) < 20):
# print 'not enough valid upper background points'
if background_upper[0] == None:
bg2 = bgimg[-ny4:ny,:].copy()
bg_limits_used[2]=ny-ny4
bg_limits_used[3]=ny
bg2_good = img_good[-ny4:ny,:]
kx2 = np.min(np.where(img_good[ny-1,:]))+10 # assuming the spectrum is in the top two thirds of the detector
kx3 = np.max(np.where(img_good[ny-1,:]))-10
else:
bg2_0= np.min(np.array([yloc_spectrum + sig1*background_upper[0],(slit_width-20) ]))
#bg2_1= np.min(np.array([yloc_spectrum + sig1*(background_upper[0]+background_upper[1]),ny]))
bg2_1= np.min(np.array([yloc_spectrum + sig1*(background_upper[1]),ny]))
bg2 = bgimg[int(bg2_0):int(bg2_1),:].copy()
bg_limits_used[2]=bg2_0
bg_limits_used[3]=bg2_1
bg2_good = img_good[int(bg2_0):int(bg2_1),:]
kx2 = np.min(np.where(img_good[int(bg2_1),:]))+10 # assuming the spectrum is in the top two thirds of the detector
kx3 = np.max(np.where(img_good[int(bg2_1),:]))-10
#if ((kx3-kx2) < 20):
# print 'not enough valid upper background points'
if background_method == 'boxcar':
bg1 = bg1_dis = bg1.mean(0)
bg2 = bg2_dis = bg2.mean(0)
bg1_dis_good = np.zeros(nx,dtype=bool)
bg2_dis_good = np.zeros(nx,dtype=bool)
for i in range(nx):
bg1_dis_good[i] = np.where(bool(int(bg1_good[:,i].mean(0))))
bg2_dis_good[i] = np.where(bool(int(bg2_good[:,i].mean(0))))
if background_method == 'splinefit':
# mean bg1_dis, bg2_dis across dispersion
bg1_dis = np.zeros(nx) ; bg2_dis = np.zeros(nx)
for i in range(nx):
bg1_dis[i] = bg1[:,i][bg1_good[:,i]].mean()
if not bool(int(bg1_good[:,i].mean())):
bg1_dis[i] = cval
bg2_dis[i] = bg2[:,i][bg2_good[:,i]].mean()
if not bool(int(bg2_good[:,i].mean())):
bg2_dis[i] = cval
# some parts of the background may have been masked out completely, so
# find the good points and the bad points
bg1_dis_good = np.where( np.isfinite(bg1_dis) & (np.abs(bg1_dis - cval) > 1.e-7) )
bg2_dis_good = np.where( np.isfinite(bg2_dis) & (np.abs(bg2_dis - cval) > 1.e-7) )
bg1_dis_bad = np.where( ~(np.isfinite(bg1_dis) & (np.abs(bg1_dis - cval) > 1.e-7)) )
bg2_dis_bad = np.where( ~(np.isfinite(bg2_dis) & (np.abs(bg2_dis - cval) > 1.e-7)) )
# fit a smoothing spline to each background
x = bg1_dis_good[0]
s = len(x) - np.sqrt(2.*len(x))
if smo1 != None: s = smo1
if len(x) > 40: x = x[7:len(x)-7] # clip end of spectrum where there is downturn
w = np.ones(len(x))
tck1 = interpolate.splrep(x,bg1_dis[x],w=w,xb=bg1_dis_good[0][0],xe=bg1_dis_good[0][-1],k=3,s=s)
bg1 = np.ones(nx) * (bg1_dis[x]).mean()
bg1[np.arange(kx0,kx1)] = interpolate.splev(np.arange(kx0,kx1), tck1)
x = bg2_dis_good[0]
s = len(x) - np.sqrt(2.*len(x))
if smo2 != None: s = smo1
if len(x) > 40: x = x[10:len(x)-10] # clip
w = np.ones(len(x))
tck2 = interpolate.splrep(x,bg2_dis[x],w=w,xb=bg2_dis_good[0][0],xe=bg2_dis_good[0][-1],k=3,s=s)
bg2 = np.ones(nx) * (bg2_dis[x]).mean()
bg2[np.arange(kx2,kx3)] = interpolate.splev(np.arange(kx2,kx3), tck2)
# force bg >= 0:
# spline can do weird things ?
negvals = bg1 < 0.0
if negvals.any():
bg1[negvals] = 0.0
if chatter > 1:
print("background 1 set to zero in ",len(np.where(negvals)[0])," points")
negvals = bg2 < 0.0
if negvals.any():
bg2[negvals] = 0.0
if chatter > 1:
print("background 1 set to zero in ",len(np.where(negvals)[0])," points")
# image constructed from linear inter/extra-polation of bg1 and bg2
bgimg_lin = np.zeros(ny*nx).reshape(ny,nx)
dbgdy = (bg2-bg1)/(ny-1)
for i in range(ny):
bgimg_lin[i,:] = bg1 + dbgdy*i
# interpolate background and generate smooth interpolation image
if ( (background_lower[0] == None) & (background_upper[0] == None)):
# default background region
dbgdy = (bg2-bg1)/150.0 # assuming height spectrum 200 and width extraction regions 30 pix each
for i9 in range(bgimg.shape[0]):
bgimg[i9,kx0:kx1] = bg1[kx0:kx1] + dbgdy[kx0:kx1]*(i9-25)
bgimg[i9,0:kx0] = bg2[0:kx0]
bgimg[i9,kx1:nx] = bg2[kx1:nx]
if chatter > 2: print("1..BACKGROUND DEFAULT from BG1 and BG2")
elif ((background_lower[0] != None) & (background_upper[0] == None)):
# set background to lower background region
for i9 in range(bgimg.shape[0]):
bgimg[i9,:] = bg1
if chatter > 2: print("2..BACKGROUND from lower BG1 only")
elif ((background_upper[0] != None) & (background_lower[0] == None)):
# set background to that of upper background region
for i9 in range(bgimg.shape[0]):
bgimg[i9,:] = bg2
if chatter > 2: print("3..BACKGROUND from upper BG2 only")
else:
# linear interpolation of the two background regions
dbgdy = (bg2-bg1)/(background_upper[0]+0.5*background_upper[1]+background_lower[0]+0.5*background_lower[1])
for i9 in range(bgimg.shape[0]):
bgimg[i9,kx0:kx1] = bg1[kx0:kx1] + dbgdy[kx0:kx1]*(i9-int(int(slit_width/2)-(background_lower[0]+0.5*background_lower[1])))
bgimg[i9,0:kx0] = bg2[0:kx0] # assuming that the spectrum in not in the lower left corner
bgimg[i9,kx1:nx] = bg2[kx1:nx]
if chatter > 2: print("4..BACKGROUND from BG1 and BG2")
return bg, bg1, bg2, bgsig, bgimg, bg_limits_used, (bg1_good, bg1_dis,
bg1_dis_good, bg2_good, bg2_dis, bg2_dis_good, bgimg_lin)
def interpol(xx,x,y):
'''
linearly interpolate a function y(x) to return y(xx)
no special treatment of boundaries
2011-12-10 NPMKuin skip all data points which are not finite
'''
import numpy as np
x = np.asarray(x.ravel())
y = np.asarray(y.ravel())
q0 = np.isfinite(x) & np.isfinite(y) # filter out NaN values
q1 = np.where(q0)
if len(q1[0]) == 0:
print("error in arrays to be interpolated")
print("x:",x)
print("y:",y)
print("arg:",xx)
x1 = x[q1[0]]
y1 = y[q1[0]]
q2 = np.where( np.isfinite(xx) ) # filter out NaN values
kk = x1.searchsorted(xx[q2])-1
# should extrapolate if element of k = len(a)
#q = np.where(k == len(a)) ; k[q] = k[q]-1
n = len(kk)
f = np.zeros(n)
f2 = np.zeros(len(xx))
for i in range(n):
k = kk[i]
if k > (len(x1)-2):
k = len(x1) - 2
s = (y1[k+1]-y1[k])/(x1[k+1]-x1[k])
f[i] = y1[k]+s*(xx[q2[0]][i]-x1[k])
f2[q2] = f
f2[int(not q2)] = np.NaN
return f2
def hydrogen(n,l):
'''
Return roughly the wavelength of the Hydrogen lines
Lymann spectrum: l=0, n>l+1
Balmer spectrum: l=1, n>2
Pachen spectrum: l=2, n>3
'''
# Rydberg constant in m-1 units
R = 1.097e7
inv_lam = R*(1./(l+1)**2 - 1./n**2)
lam = 1./inv_lam * 1e10
return lam
def boresight(filter='uvw1',order=1,wave=260,
r2d=77.0,date=0,chatter=0):
''' provide reference positions on the
UVOT filters for mapping and as function of
time for grisms.
This function name is for historical reasons,
and provides a key mapping function for the
spectral extraction.
The correct boresight of the (lenticular) filters
should be gotten from the Swift UVOT CALDB
as maintained by HEASARC. The positions here
are in some cases substantially different from
the boresight in the CALDB. They are reference
positions for the spectral extraction algorithms
rather than boresight.
The grism boresight positions at 260nm (uv grism)
and 420nm (visible grism) in first order are served
in an uncommon format (in DET pixels)
by adding (77,77) to the lenticular filter
RAW coordinate.(see TELDEF file) the grism
boresight was measured in DET coordinates,
not RAW. (offset correction should be 104,78)
Parameters
----------
filter : str
one of {'ug200','uc160','vg1000','vc955',
'wh','v','b','u','uvw1','uvm2','uvw2'}
order : {0,1,2}
order for which the anchor is needed
wave : float
anchor wavelength in nm
r2d : float
additive factor in x,y to anchor position
date: long
format in swift time (s)
if 0 then provide the first order anchor
coordinates of the boresight for mapping
from the lenticular filter position
chatter : int
verbosity
Returns
-------
When *date* = 0:
For translation: The boresight for a filter
(in DET pixels) by adding (77,77) to the
lenticular filter RAW coordinate (see TELDEF file)
the grism boresight was measured in DET
(The default r2d=77 returns the correct
boresight for the grisms in detector
coordinates. To get the grism boresight in
detector image coordinates, subtract (104,78)
typically. The difference is due to the distortion
correction from RAW to DET)
When *date* is non-zero, and *order*=0:
The zeroth order boresight
NOTE:
-----
THE TRANSLATION OF LENTICULAR IMAGE TO GRISM
IMAGE IS ALWAYS THE SAME, INDEPENDENT OF THE
BORESIGHT.
THEREFORE THE BORESIGHT DRIFT DOES NOT AFFECT
THE GRISM ANCHOR POSITIONS AS LONG AS THE DEFAULT
BORESIGHT POSITIONS ARE USED.
[Becase those were used for the calibration].
However, the zeroth order "reference" position
drift affects the "uvotgraspcorr" - derived
WCS-S. The positions used
History:
2014-01-04 NPMK : rewrite to inter/extrapolate
the boresight positions
'''
from scipy.interpolate import interp1d
import numpy as np
filterlist = ['ug200','uc160','vg1000','vc955',
'wh','v','b','u','uvw1','uvm2','uvw2']
if filter == 'list': return filterlist
grismfilters = ['ug200','uc160','vg1000','vc955']
lenticular = ['v','b','u','uvw1','uvm2','uvw2']
#old pixel offset anchor based on pre-2010 data
# dates in swift time, drift [x.y] in pixels
#dates=[209952000,179971200,154483349,139968000,121838400]
#drift=[ [0,0], [+2.4,-2.0], [+3.4,-3.0], [+6.4,-10], [+6.4,-10]]
# data from Frank's plot (email 2 dec 2013, uvw1 filter)
# original plot was in arcsec, but the drift converted
# to pixels. uvw1 seems representative (except for white)
swtime = np.array([
1.25000000e+08, 1.39985684e+08, 1.60529672e+08,
1.89248438e+08, 2.23489068e+08, 2.46907209e+08,
2.66126366e+08, 2.79601770e+08, 2.89763794e+08,
3.01251301e+08, 3.13180634e+08, 3.28423998e+08,
3.43445470e+08, 3.59351249e+08, 3.75257678e+08,
4.50000000e+08])
boredx = (np.array([-1.6, -0.870,0.546,1.174,2.328,2.47,
2.813,3.076,3.400,3.805,4.149,4.656,
5.081,5.607,6.072,8.56 ])-1.9)/0.502
boredy = (np.array([ -0.75,-2.197,-4.857,-6.527,
-7.098,-7.252,-7.142,-7.560,
-7.670,-8.000,-8.043,-8.395,
-8.637,-9.142,-9.670,-11.9])+6.8)/0.502
# I assume the same overall drift for the grism
# boresight (in pixels). Perhaps a scale factor for the
# grism would be closer to 0.56 pix/arcsec
# the range has been extrapolated for better interpolation
# and also to support the near future. The early
# time extrapolation is different from the nearly constant
# boresight in the teldef but within about a pixel.
# I think the extrapolation is more accurate.
fx = interp1d(swtime,boredx,bounds_error=False,fill_value="extrapolate")
fy = interp1d(swtime,boredy,bounds_error=False,fill_value="extrapolate")
# reference anchor positions
reference0 = {'ug200': [1449.22, 707.7],
'uc160': [1494.9 , 605.8], #[1501.4 , 593.7], # ?[1494.9, 605.8],
'vg1000':[1506.8 , 664.3],
'vc955': [1542.5 , 556.4]}
# DO NOT CHANGE THE FOLLOWING VALUES AS THE WAVECAL DEPENDS ON THEM !!!
reference1 = {'ug200': [ 928.53,1002.69],
'uc160': [1025.1 , 945.3 ],
'vg1000':[ 969.3 ,1021.3 ],
'vc955': [1063.7 , 952.6 ]}
if (filter in grismfilters):
if (date > 125000000) and (order == 0):
anchor = reference0[filter]
anchor[0] += r2d-fx(date)
anchor[1] += r2d-fy(date)
return anchor
elif (date > 125000000) and (order == 1):
anchor = reference1[filter]
anchor[0] += r2d-fx(date)
anchor[1] += r2d-fy(date)
return anchor
elif order == 1:
anchor = reference1[filter]
anchor[0] += r2d
anchor[1] += r2d
return anchor
elif order == 0:
raise RuntimeError(
"The zeroth order reference position needs a date")
else:
return reference1[filter]
elif (date > 125000000) and (filter in lenticular):
ref_lent = {'v':[951.74,1049.89],
'b':[951.87,1049.67],
'u':[956.98,1047.84],
'uvw1':[951.20,1049.36],
'uvm2':[949.75,1049.30],
'uvw2':[951.11,1050.18]}
anchor = ref_lent[filter]
anchor[0] += r2d-fx(date)
anchor[1] += r2d-fy(date)
return anchor
elif (date > 122000000) and (filter == 'wh'):
print("approximate static white filter boresight")
if date > 209952000:
return 949.902+r2d, 1048.837+r2d
elif date > 179971200:
return 953.315+r2d, 1048.014+r2d
elif date > 154483349:
return 954.506+r2d, 1043.486+r2d
elif date > 139968000:
return 956.000+r2d, 1039.775+r2d
elif date > 121838400:
return 956.000+r2d, 1039.775+r2d
else: return filterlist
else:
# this is the version used initially *(changed 2 june 2009)
# DO NOT CHANGE THESE VALUES AS THE WAVECAL DEPENDS ON THEM !!!
if filter == 'uvw1': return 954.61+r2d, 1044.66+r2d
elif filter == 'wh' : return 954.51+r2d, 1043.49+r2d
elif filter == 'v' : return 955.06+r2d, 1045.98+r2d
elif filter == 'b' : return 955.28+r2d, 1045.08+r2d
elif filter == 'u' : return 960.06+r2d, 1043.33+r2d
elif filter == 'uvm2': return 953.23+r2d, 1044.90+r2d
elif filter == 'uvw2': return 953.23+r2d, 1044.90+r2d
elif filter == 'w1' : return 954.61+r2d, 1044.66+r2d
elif filter == 'm2' : return 953.23+r2d, 1044.90+r2d
elif filter == 'w2' : return 953.23+r2d, 1044.90+r2d
elif filter == 'ug200':
if order == 1:
if wave == 260: return 928.53+r2d,1002.69+r2d
elif filter == 'uc160':
if order == 1:
if wave == 260: return 1025.1+27+r2d,945.3+r2d
elif filter == 'vg1000':
#elif order == 1: return 948.4+r2d, 1025.9+r2d
if order == 1: return 969.3+r2d, 1021.3+r2d
elif filter == 'vc955':
if order == 1: return 1063.7+r2d, 952.6+r2d
raise IOError("valid filter values are 'wh','v',"\
"'b','u','uvw1','uvm2','uvw2','ug200',"\
"'uc160','vg1000','vc955'\n")
def makeXspecInput(lamdasp,countrate,error,lamda_response=None,chatter=1):
''' Convert the count rate spectrum per pixel into a spectrum
on the given bins of the response function.
Parameters
----------
lamdasp : array
wavelengths spectrum
countrate : array
count rates at wavelengths
error : array
errors at wavelengths
kwargs : dict
- **lamda_response** : array
the wavelength for the response bins
- **chatter** : int
verbosity
Returns
-------
lambda : array
wavelengths of the bins
countrate : array
count rate in the bins
error : array
errors in the bins
Notes
-----
errors are summed as sqrt( sum (errors**2 ) )
'''
# calculate bin size response, data
if type(lamda_response) == typeNone:
print('need to read in response matrix file')
print(' please code it up')
return None
new_countrate = np.zeros(len(lamda_response))
new_error = np.zeros(len(lamda_response))
# find bin widths
dlamresp = lamda_response.copy()*0
for i in range(len(dlamresp) -1):
dlamresp[i+1] = lamda_response[i+1] - lamda_response[i]
dlamresp[0] = dlamresp[1] # set width first two data bins equal (could inter/extrapolate the lot)
dlam = lamdasp.copy()*0
for i in range(len(dlam) -1):
dlam[i+1]=lamdasp[i+1] - lamdasp[i]
dlam[0] = dlam[1]
#
for i in range(len(lamda_response)):
# find the pixels to use that have contributions to the bin
lam1 = lamda_response[i] - dlamresp[i]/2.0
lam2 = lamda_response[i] + dlamresp[i]/2.0
if ( (lam1 >= (np.max(lamdasp)+dlam[len(lamdasp)-1])) ^ (lam2 <= (np.min(lamdasp)-dlam[0]))):
# no count data
new_countrate[i] = 0
if ((chatter > 2) & (i < 450) & (i > 400)) :
print(' i = ',i,' lam1 = ',lam1,' lam2 = ', lam2,' <<< counts set to zero ')
print(' i = ',i,' term 1 ',(np.max(lamdasp)-dlam[len(lamdasp)-1]))
print(' i = ',i,' term 2 ',(np.min(lamdasp)+dlam[0] ))
else:
if chatter > 2: print('new bin ',i,' lam = ',lam1,' - ',lam2)
# find the bits to add
k = np.where( (lamdasp+dlam/2 > lam1) & (lamdasp-dlam/2 <= lam2) )
# the countrate in a bin is proportional to its width; make sure only
# the part of the data array that fall within the new bin is added
if chatter > 2:
print('data in ',k[0],' wavelengths ',lamdasp[k[0]])
print('counts are ',countrate[k[0]])
nk = len(k[0])
factor = np.zeros( nk )
for m in range(nk): # now loop over all bins that might contribute
wbin1 = lamdasp[k[0][m]] - dlam[k[0][m]]/2
wbin2 = lamdasp[k[0][m]] + dlam[k[0][m]]/2
# width bin_form override with limits bin_to
factor[m] = (np.min(np.array( (wbin2,lam2) )) - np.max(np.array((wbin1 ,lam1))))/ (wbin2-wbin1)
if chatter > 2 :
print(' ... m = ',m,' bin= ',wbin1,' - ',wbin2)
print(' ... trimmed ',np.min(np.array( (wbin2,lam2) )),' - ',np.max(np.array((wbin1 ,lam1))))
new_countrate[i] = (factor * countrate[k[0]]).sum()
new_error[i] = np.sqrt( ( (factor * error[k[0]])**2 ).sum() )
if chatter > 2:
print(' scaled factor = ', factor)
print(' new_countrate = ', new_countrate[i])
#
# check that the total number of counts is the same
print('total counts in = ', countrate.sum())
print('total counts out= ', new_countrate.sum())
#
return lamda_response, new_countrate, new_error
def find_zeroth_orders(filestub, ext, wheelpos, region=False,indir='./',
set_maglimit=None, clobber="NO", chatter=0):
'''
The aim is to identify the zeroth order on the grism image.
This is done as follows:
We run uvotdetect to get the zeroth orders in the detector image.
We also grab the USNO B1 source list and predict the positions on the image using the WCSS header.
Bases on a histogram of minimum distances, as correction is made to the WCSS header, and
also to the USNO-B1 predicted positions.
'''
import os
try:
from astropy.io import fits, ascii
except:
import pyfits as fits
from numpy import array, zeros, log10, where
import datetime
import uvotwcs
from astropy import wcs
if chatter > 0:
print("find_zeroth_orders: determining positions zeroth orders from USNO-B1")
if ((wheelpos == 160) ^ (wheelpos == 200)):
grtype = "ugu"
zp = 19.46 # zeropoint uv nominal zeroth orders for 10 arcsec circular region
else:
grtype = "ugv"
zp = 18.90 # estimated visible grism zeropoint for same
exts = repr(ext)
gfile = os.path.join(indir,filestub+grtype+"_dt.img")
infile = os.path.join(indir,filestub+grtype+"_dt.img["+exts+"]")
outfile = os.path.join(indir,filestub+grtype+"_"+exts+"_detect.fits")
if ((wheelpos == 160) ^ (wheelpos == 200)):
command = "uvotdetect infile="+infile+ " outfile="+outfile + \
' threshold=6 sexargs = "-DEBLEND_MINCONT 0.1" '+ \
" expopt = BETA calibrate=NO expfile=NONE "+ \
" clobber="+clobber+" chatter=0 > /dev/null"
else:
command = "uvotdetect infile="+infile+ " outfile="+outfile + \
' threshold=6 sexargs = "-DEBLEND_MINCONT 0.1" '+ \
" expopt = BETA calibrate=NO expfile=NONE "+ \
" clobber="+clobber+" chatter=0 > /dev/null"
if chatter > 1:
print("find_zeroth_orders: trying to detect the zeroth orders in the grism image")
print(command)
useuvotdetect = True
tt = os.system(command)
if tt != 0:
raise('find_zeroth_orders: uvotdetect had a problem with this image\nIs HEASOFT initialised?')
if not os.access(outfile,os.F_OK):
# so you can provide it another way
useuvotdetect = False
rate = 0
if useuvotdetect:
f = fits.open(outfile)
g = f[1].data
h = f[1].header
refid = g.field('refid')
rate = g.field('rate')
rate_err = g.field('rate_err')
rate_bkg = g.field('rate_bkg') # counts/sec/arcsec**2
x_img = g.field('ux_image')
y_img = g.field('uy_image')
a_img = g.field('ua_image') # semi axis
b_img = g.field('ub_image') # semi axis
theta = g.field('utheta_image') # angle of the detection ellipse
prof_major = g.field('prof_major')
prof_minor = g.field('prof_minor')
prof_theta = g.field('prof_theta')
threshold = g.field('threshold') # sigma
flags = g.field('flags')
f.close()
else:
rate_bkg = array([0.08])
hh = fits.getheader(gfile, ext)
exposure = hh['exposure']
ra = hh['RA_PNT']
dec = hh['DEC_PNT']
if "A_ORDER" in hh:
distortpresent = True
else:
distortpresent = False
if chatter > 1:
print("find_zeroth_orders: pointing position ",ra,dec)
# unfortunately uvotdetect will pick up spurious stuff as well near the spectra
# need real sources.
# get catalog sources (B magnitude most closely matches zeroth order)
CALDB = os.getenv('CALDB')
if CALDB == '':
print('find_zeroth_orders: the CALDB environment variable has not been set')
return None
HEADAS = os.getenv('HEADAS')
if HEADAS == '':
print('find_zeroth_orders: The HEADAS environment variable has not been set')
print('That is needed for the uvot Ftools ')
return None
if set_maglimit == None:
b_background = zp + 2.5*log10( (rate_bkg.std())*1256.6 )
# some typical measure for the image
blim= b_background.mean() + b_background.std() + zeroth_blim_offset
else:
blim = set_maglimit
if blim < background_source_mag: blim = background_source_mag
if np.isnan(blim): blim = 18
# if usno-b1 catalog is present for this position,
# do not retrieve again
if os.access('searchcenter.ub1',os.F_OK):
searchcenterf = open( 'searchcenter.ub1' )
searchcenter= searchcenterf.readline().split(',')
searchcenterf.close()
racen,decen = float(searchcenter[0]),float(searchcenter[1])
if np.abs(ra-racen) + np.abs(dec-decen) < 0.01:
use_previous_search = True
else:
use_previous_search = False
else:
use_previous_search = False
# empty file
if os.access('search.ub1',os.F_OK) :
searchf = open('search.ub1')
stab = searchf.readlines()
searchf.close()
if len(stab) < 3: use_previous_search = False
# retrieve catalog data
if (not os.access('search.ub1',os.F_OK)) | (not use_previous_search):
if (chatter > 4): print ("get_usnob1_cat(%f,%f,%f)"%(ra,dec,blim))
status = get_usnob1_cat(ra, dec, blim)
if status is None:
print('ra={}, dec={}, blim={}'.format(ra, dec, blim))
print("find_zeroth_orders: could not get source list from USNO-B1")
sys.exit()
else:
if chatter > 1:
print("find_zeroth_orders: using the USNO-B1 source list from file search.ub1")
# generate a new catspecfile
_write_catspecfile()
# remove reliance on astropy tables as it fails on debian linux
searchf = open('search.ub1')
stab = searchf.readlines()
searchf.close()
M = len(stab)
ra = []
dec = []
b2mag = []
for row in stab:
row_values = row.split()
if len(row_values) > 6:
ra.append(row_values[1])
dec.append(row_values[2])
b2mag.append(row_values[5])
M = len(ra)
if M == 0:
return
ra = np.asarray(ra,dtype=np.float64)
dec = np.asarray(dec,dtype=np.float64)
b2mag = np.asarray(b2mag,dtype=np.float)
Xa = zeros(M)
Yb = zeros(M)
Thet= zeros(M)
ondetector = zeros(M,dtype=bool)
matched = zeros(M,dtype=bool)
# now find the image coordinates:
#
wcsS = wcs.WCS(header=hh,key='S',relax=True,) # TAN-SIP coordinate type
Xim,Yim = wcsS.wcs_world2pix(ra,dec,0)
xdim, ydim = hh['naxis1'],hh['naxis2']
wheelpos = hh['wheelpos']
if wheelpos == 200:
q1 = (rate > 2.5*rate_bkg) & (rate < 125*rate_bkg)
defaulttheta = 151.4-180.
bins = np.arange(-29.5,29.5,1)
midbin = np.arange(-29,29,1)
elif wheelpos == 160:
q1 = (rate > 2.5*rate_bkg) & (rate < 125*rate_bkg) & (x_img > 850)
defaulttheta = 144.4-180.
bins = np.arange(-29.5,29.5,1)
midbin = np.arange(-29,29,1)
elif wheelpos == 955:
q1 = (rate > 2.5*rate_bkg) & (rate < 175*rate_bkg) & (x_img > 850)
defaulttheta = 140.5-180
bins = np.arange(-49.5,49.5,1)
midbin = np.arange(-49,49,1)
elif wheelpos == 1000:
q1 = (rate > 2.5*rate_bkg) & (rate < 175*rate_bkg)
defaulttheta = 148.1-180.
bins = np.arange(-49.5,49.5,1)
midbin = np.arange(-49,49,1)
Thet -= defaulttheta
Xa += 17.0
Yb += 5.5
# convert sky coord. to positions (Xim , Yim) , and set flag ondetector
for i in range(M):
if not distortpresent:
# now we need to apply the distortion correction:
Xim[i], Yim[i] = uvotwcs.correct_image_distortion(Xim[i],Yim[i],hh)
ondetector[i] = ((Xim[i] > 8) & (Xim[i] < xdim) & (Yim[i] > 8) & (Yim[i] < ydim-8))
xoff = 0.0
yoff = 0.0
# derive offset :
# find the minimum distances between sources in lists pair-wise
distance = []
distx = []
disty = []
kx = -1
dxlim = 100 # maximum distance in X
dylim = 100 # maximum distance in Y
tol = 5 # tolerance in x and y match
xim = x_img[q1]
yim = y_img[q1]
M2 = int(len(xim)*0.5)
for i2 in range(M2): # loop over the xdetect results
i = 2*i2
i1 = 2*i2+1
if (ondetector[i] and useuvotdetect):
dx = np.abs(Xim - xim[i ])
dy = np.abs(Yim - yim[i ])
dx1 = np.abs(Xim - xim[i1])
dy1 = np.abs(Yim - yim[i1])
op = (dx < dxlim) & (dy < dylim)
if op.sum() != 0:
dis = np.sqrt(dx[op]**2+dy[op]**2)
kx = dis == np.min(dis)
kx = np.arange(len(op))[op][kx]
op1 = (dx1 < dxlim) & (dy1 < dylim)
if op1.sum() != 0:
dis = np.sqrt(dx1[op1]**2+dy1[op1]**2)
kx1 = dis == np.min(dis)
kx1 = np.arange(len(op1))[op1][kx1]
if (np.abs(dx[kx] - dx1[kx1]) < tol ) & (np.abs(dy[kx] - dy1[kx1]) < tol ):
distx.append( Xim[kx] - xim[i ] )
disty.append( Yim[kx] - yim[i ] )
distx.append( Xim[kx1] - xim[i1] )
disty.append( Yim[kx1] - yim[i1] )
if ((type(kx) == int) & (chatter > 3)):
print("Xim: ",Xim[kx])
print("xim:",xim)
print("dx: ",dx)
if len(distx) > 0 :
hisx = np.histogram(distx,bins=bins)
#xoff = hisx[1][:-1][hisx[0] == hisx[0].max()].mean()
xoff = midbin[hisx[0] == hisx[0].max()].mean()
hisy = np.histogram(disty,bins=bins)
#yoff = hisy[1][:-1][hisy[0] == hisy[0].max()].mean()
yoff = midbin[hisy[0] == hisy[0].max()].mean()
# subtract xoff, yoff from Xim, Yim or add to origin ( hh[CRPIX1S],hh[CRPIX2S] ) if offset
# is larger than 1 pix
if (np.sqrt(xoff**2+yoff**2) > 1.0):
if ("forceshi" not in hh):
hh['crpix1s'] += xoff
hh['crpix2s'] += yoff
hh["forceshi"] = "%f,%f"%(xoff,yoff)
hh["forcesh0"] = "%f,%f"%(xoff,yoff)
print("offset (%5.1f,%5.1f) found"%(xoff,yoff))
print("offset found has been applied to the fits header of file: %s\n"%(gfile))
else:
# do not apply shift to crpix*s for subsequent shifts, but record overall ahift
# original shift is in "forcesh0" which actually WAS applied. Both items are needed
# to reconstruct shifts between pointing image and the source locations (in case
# we allow interactive adjustments of zeroth orders, that would enable pointing updates
# however, the keyword must be reset at start of reprocessing (not done now)
xoff_,yoff_ = np.array((hh["forceshi"]).split(','),dtype=float)
hh["forceshi"] = "%f,%f"%(xoff_+xoff,yoff_+yoff)
f = fits.open(gfile,mode='update')
f[ext].header = hh
f.close()
print("find_zeroth_orders result (binary matched offset): \n")
print("\tAfter comparing uvotdetect zeroth order positions to USNO-B1 predicted source positions ")
print("\tthere was found an overall offset equal to (%5.1f.%5.1f) pix "%(xoff,yoff))
Xim -= xoff
Yim -= yoff
else:
# if binary matched offsets don't pan out at all, compute simple offsets
for i in range(len(xim)): # loop over the xdetect results
if (ondetector[i] and useuvotdetect):
dx = np.abs(Xim - xim[i ])
dy = np.abs(Yim - yim[i ])
op = (dx < dxlim) & (dy < dylim)
if op.sum() != 0:
dis = np.sqrt(dx[op]**2+dy[op]**2)
kx = dis == np.min(dis)
kx = np.arange(len(op))[op][kx]
distx.append( Xim[kx] - xim[i ] )
disty.append( Yim[kx] - yim[i ] )
hisx = np.histogram(distx,bins=bins)
#xoff = hisx[1][hisx[0] == hisx[0].max()].mean()
xoff = midbin[hisx[0] == hisx[0].max()].mean()
hisy = np.histogram(disty,bins=bins)
#yoff = hisy[1][hisy[0] == hisy[0].max()].mean()
yoff = midbin[hisy[0] == hisy[0].max()].mean()
if (np.sqrt(xoff**2+yoff**2) > 1.0):
if ("forceshi" not in hh):
hh['crpix1s'] += xoff
hh['crpix2s'] += yoff
hh["forceshi"] = "%f,%f"%(xoff,yoff)
hh["forcesh0"] = "%f,%f"%(xoff,yoff)
print("offset (%5.1f,%5.1f) found"%(xoff,yoff))
print("offset found has been applied to the fits header of file: %s\n"%(gfile))
else:
# do not apply shift to crpix*s for subsequent shifts, but record overall ahift
# original shift is in "forcesh0" which actually WAS applied. Both items are needed
# to reconstruct shifts between pointing image and the source locations (in case
# we allow interactive adjustments of zeroth orders, that would enable pointing updates
# however, the keyword must be reset at start of reprocessing (not done now)
xoff_,yoff_ = np.array((hh["forceshi"]).split(','),dtype=float)
hh["forceshi"] = "%f,%f"%(xoff_+xoff,yoff_+yoff)
f = fits.open(gfile,mode='update')
f[ext].header = hh
f.close()
print("find_zeroth_orders result (simple offset): \n")
print("\tAfter comparing uvotdetect zeroth order positions to USNO-B1 predicted source positions ")
print("\tthere was found an overall offset equal to (%5.1f.%5.1f) pix "%(xoff,yoff))
Xim -= xoff
Yim -= yoff
# find ellipse belonging to source from uvotdetect output, or make up one for all ondetector
xacc = 10
yacc = 6
for i in range(M):
if (ondetector[i] and useuvotdetect):
kx = where ( abs(Xim[i] - x_img) < xacc )
if len(kx[0]) != 0:
kxy = where( abs(Yim[i] - y_img[kx]) < yacc)
if len(kxy[0]) == 1:
k = kx[0][kxy[0][0]]
Xa[i] = prof_major[k]*5.
Yb[i] = prof_minor[k]*5.
Thet[i]= -theta[k]
matched[i] = True
else:
# make up some ellipse axes in pix
Xa[i] = 17.0
Yb[i] = 5.0
if chatter > 0:
print("find_zeroth_orders: there were %i matches found between the uvotdetect sources and the USNO B1 list"%(matched.sum()))
if region:
a = datetime.date.today()
datetime = a.isoformat()[0:4]+a.isoformat()[5:7]+a.isoformat()[8:10]
# make region file for sources on detector
f = open(filestub+'_'+exts+'.reg','w')
f.write('# Region file format: DS9 version 4.1\n')
#f.write('# written by uvotgetspec.findzerothorders python program '+datetime+'\n')
f.write('# Filename: '+infile+'\n')
f.write('global color=green dashlist=8 3 width=1 font="helvetica 10 normal" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1 \n')
f.write('physical\n')
for i in range(M):
if (ondetector[i] and useuvotdetect):
f.write('ellipse(%12.2f,%12.2f,%12.2f,%12.2f,%12.2f)\n' % (Xim[i],Yim[i],Xa[i],Yb[i],180.-Thet[i]) )
f.close()
# make a second region file for sources with first order on detector [TBD]
# the sources on the detector are Xim[ondetector] etc.,
# matched[ondetector] are those sources which have both been found by uvotdetect and in the catalog
# the complete list also includes sources off the detector which may have first orders on the
# detector when the B magnitude > ~14.
# the ellipse parameters for the sources which have no uvotdetection (matched=False) are some
# arbitrary mean values. They should be scaled to brightness.
return Xim,Yim,Xa,Yb,Thet,b2mag,matched,ondetector
def spec_curvature(wheelpos,anchor,order=1,):
'''Find the coefficients of the polynomial for the curvature.
Parameters
----------
wheelpos : int, {160,200,955,1000}
grism filter position in filter wheel
anchor : list, array
anchor position in detector coordinates (pixels)
order : int
the desired spectral order
Returns
-------
Provides the polynomial coefficients for y(x).
Notes
-----
The curvature is defined with argument the pixel coordinate in the dispersion
direction with reference to the the anchor coordinates in det-img
coordinates. The polynomial returns the offset normal to the dispersion.
- 2011-03-07 <NAME>, initial version
- 2011-08-02 fixed nominal coefficients order=1
'''
from scipy import interpolate
from numpy import array
xin = anchor[0] -104
yin = anchor[1] -78
if ((wheelpos == 1000) ^ (wheelpos == 955)):
# return y = 0 + 0.0*x coefficient
return array([0.,0.])
elif wheelpos == 160:
if order == 1:
tck_c1= [array([0.,0.,0.,0.,2048., 2048., 2048., 2048.]), \
array([0.,0.,0.,0., 2048., 2048., 2048., 2048.]), \
array([ 0.1329227 , -0.28774943, 0.13672294, -0.18436127, -0.19086855,\
0.23071908, -0.21803703, 0.11983982, 0.16678715, -0.2004285 ,\
0.12813155, -0.13855324, -0.1356009 , 0.11504641, -0.10732287,\
0.03374111]),3,3]
tck_c2 = [array([0.,0.,0.,0., 2048., 2048., 2048., 2048.]),\
array([0.,0.,0.,0., 2048., 2048., 2048., 2048.]),\
array([ -3.17463632e-04, 2.53197376e-04, -3.44611897e-04,\
4.81594388e-04, 2.63206764e-04, -3.03314305e-04,\
3.25032065e-04, -2.97050826e-04, -3.06358032e-04,\
3.32952612e-04, -2.79473410e-04, 3.95150704e-04,\
2.56203495e-04, -2.34524716e-04, 2.75320861e-04,\
-6.64416547e-05]),3,3]
tck_c3 = [array([ 0.,0.,0.,0.,2048., 2048., 2048., 2048.]),\
array([ 0.,0.,0.,0.,2048., 2048., 2048., 2048.]),\
array([ -4.14989592e-07, 5.09851884e-07, -4.86551197e-07,\
1.33727326e-07, 4.87557866e-07, -5.51120320e-07,\
5.76975007e-07, -3.29793632e-07, -3.42589204e-07,\
3.00002959e-07, -2.90718693e-07, 5.57782883e-08,\
2.20540397e-07, -1.62674045e-07, 8.70230076e-08,\
-1.13489556e-07]),3,3]
#coef = array([interpolate.bisplev(xin,yin,tck_c3),interpolate.bisplev(xin,yin,tck_c2),\
# interpolate.bisplev(xin,yin,tck_c1), 0.])
coef = array([interpolate.bisplev(xin,yin,tck_c3)*0.5,interpolate.bisplev(xin,yin,tck_c2)*0.5,\
interpolate.bisplev(xin,yin,tck_c1)*0.5, 0.]) #~FIXME:
return coef
elif order == 2:
tck_c0 = [array([ 0., 0., 0., 0., 1134.78683, 2048., 2048., 2048., 2048.]), \
array([ 0., 0., 0., 0., 871.080060, 2048., 2048., 2048., 2048.]), \
array([-110.94246902, 15.02796289, -56.20252149, -12.04954456,\
311.31851187, -31.09148174, -48.44676102, 85.82835905,\
-73.06964994, 99.58445164, 46.47352776, 11.29231744,\
-68.32631894, 88.68570087, -34.78582366, -33.71033771,\
6.89774103, 25.59082616, 23.37354026, 49.61868235,\
-438.17511696, -31.63936231, 28.8779241 , 51.03055925,\
16.46852299]), 3, 3]
tck_c1 = [array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 0.52932582, -0.76118033, 0.38401924, -0.189221 , -0.45446129,\
0.73092481, -0.53433133, 0.12702548, 0.21033591, -0.45067611,\
0.32032545, -0.25744487, -0.06022942, 0.22532666, -0.27174491,\
0.03352306]), 3, 3]
tck_c2 = [array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ -4.46331730e-04, 3.94044533e-04, -1.77072490e-04,\
2.09823843e-04, 3.02872440e-04, -6.23869655e-04,\
5.44400661e-04, -3.70038727e-04, -1.60398389e-04,\
4.90085648e-04, -4.91436626e-04, 4.62904236e-04,\
4.05692472e-05, -2.34521165e-04, 3.04866621e-04,\
-1.25811263e-04]), 3, 3]
#tck_c0 = [array([0.,0., 1132.60995961, 2048.,2048.]),
# array([0.,0., 814.28303687, 2048.,2048.]),
# array([-49.34868162, -0.22692399, -11.06660953, 5.95510567,
# -3.13109456, 37.63588808, -38.7797533 , 24.43177327, 43.27243297]),1,1]
#tck_c1 = [array([ 0., 0., 2048., 2048.]),
# array([ 0., 0., 2048., 2048.]),
# array([ 0.01418938, -0.06999955, -0.00446343, -0.06662488]),1,1]
#tck_c2 = [array([ 0., 0., 2048., 2048.]),
# array([ 0., 0., 2048., 2048.]),
# array([ -9.99564069e-05, 8.89513468e-05, 4.77910984e-05, 1.44368445e-05]),1,1]
coef = array([interpolate.bisplev(xin,yin,tck_c2),interpolate.bisplev(xin,yin,tck_c1),\
interpolate.bisplev(xin,yin,tck_c0)])
return coef
elif order == 3:
# not a particularly good fit.
tck_c0 = [array([0., 0., 1101.24169141, 2048.,2048.]),
array([0., 0., 952.39879838, 2048.,2048.]),
array([ -74.75453915, 7.63095536, -131.36395787, 11.14709189,
-5.52089337, 73.59327202, -57.25048374, 37.8898465 ,
65.90098406]), 1, 1]
tck_c1 = [array([ 0., 0., 2048., 2048.]),
array([ 0., 0., 2048., 2048.]),
array([-0.04768498, -0.02044308, 0.02984554, -0.04408517]), 1, 1]
coef = array([interpolate.bisplev(xin,yin,tck_c1),interpolate.bisplev(xin,yin,tck_c0)])
return coef
elif order == 0:
tck_c0 = [array([ 0., 0., 1075.07521348, 2048. ,2048.]),
array([ 0., 0., 1013.70915889, 2048. ,2048.]),
array([ 130.89087966, 25.49195385, 5.7585513 , -34.68684878,
-52.13229007, -168.75159696, 711.84382717, -364.9631271 ,
374.9961278 ]),1,1]
tck_c1 = [array([ 0., 0., 2048., 2048.]),
array([ 0., 0., 2048., 2048.]),
array([ 0.08258587, -0.06696916, -0.09968132, -0.31579981]),1,1]
coef = array([interpolate.bisplev(xin,yin,tck_c1),interpolate.bisplev(xin,yin,tck_c0)])
return coef
else:
raise (ValueError)
elif wheelpos == 200:
if order == 1:
tck_c1 = [array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([-0.00820665, -0.06820851, 0.04475057, -0.06496112, 0.062989 , \
-0.05069771, -0.01397332, 0.03530437, -0.17563673, 0.12602437,\
-0.10312421, -0.02404978, 0.06091811, -0.02879142, -0.06533121,\
0.07355998]), 3, 3]
tck_c2 = [array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 1.69259046e-04, -1.67036380e-04, -9.95915869e-05, \
2.87449321e-04, -4.90398133e-04, 3.27190710e-04, \
2.12389405e-04, -3.55245720e-04, 7.41048332e-04, \
-4.68649092e-04, -1.11124841e-04, 6.72174552e-04, \
-3.26167775e-04, 1.15602175e-04, 5.78187743e-04, \
-8.79488201e-04]), 3, 3]
tck_c3 = [array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 1.11106098e-07, 2.72305072e-07, -7.24832745e-07,\
4.65025511e-07, -2.35416547e-07, -3.87761080e-07,\
1.05955881e-06, -6.46388216e-07, 3.15103869e-07,\
5.48402086e-07, -1.44488974e-06, 6.52867676e-07,\
1.14004672e-08, -9.48879026e-07, 1.64082320e-06,\
-8.07897628e-07]), 3, 3]
# the linear fit fails at the right side (57020002) but is quite good otherwise:
#tck_c1 = [array([ 0., 0., 2048., 2048.]), array([ 0., 0., 2048., 2048.]),\
# array([-0.02212781, -0.00873168, -0.00377861, -0.02478484]), 1, 1]
#
#tck_c2 = [array([ 0., 0., 2048., 2048.]), array([ 0., 0., 2048., 2048.]),\
# array([ -6.75189230e-05, 6.19498966e-05, 5.22322103e-05, 7.75736030e-05]), 1, 1]
#
#tck_c3 = [array([ 0., 0., 2048., 2048.]), array([ 0., 0., 2048., 2048.]), \
# array([ -1.75056810e-09, -3.61606998e-08, -6.00321832e-09, -1.39611943e-08]), 1, 1]
coef = array([interpolate.bisplev(xin,yin,tck_c3),interpolate.bisplev(xin,yin,tck_c2),\
interpolate.bisplev(xin,yin,tck_c1), 0.])
return coef
elif order == 2:
tck_c0 = [array([0.,0., 956.25596245, 2048.,2048.]),
array([0.,0., 1067.40622524, 2048.,2048.]),
array([ 17.82135471, -4.93884392, 20.55439437, -18.22869669,
13.11429182, 41.2680039 , 9.8050793 , 32.72362507, -6.56524782]), 1, 1]
tck_c1 = [array([ 0., 0., 2048., 2048.]),
array([ 0., 0., 2048., 2048.]),
array([ 0.02362119, -0.03992572, 0.0177935 , -0.10163929]),1, 1]
tck_c2 = [array([ 0., 0., 2048., 2048.]),
array([ 0., 0., 2048., 2048.]),
array([ -6.32035759e-05, 5.28407967e-05, -8.87338917e-06, 8.58873870e-05]),1,1]
coef = array([interpolate.bisplev(xin,yin,tck_c2),interpolate.bisplev(xin,yin,tck_c1),\
interpolate.bisplev(xin,yin,tck_c0)])
return coef
elif order == 3:
tck_c0 = [array([ 0. , 0. , 807.44415249, 2048.,2048.]),
array([ 0. , 0. , 1189.77686531, 2048.,2048.]),
array([-5436.10353688, 218.93823252, -254.71035527, -24.35684969,
23.26131493, 51.66273635, 37.89898456, 46.77095978,
63.22039872]), 1, 1]
tck_c1 = [array([ 0., 0., 2048., 2048.]),
array([ 0., 0., 2048., 2048.]),
array([-0.02591263, -0.03092398, 0.00352404, -0.01171369]), 1, 1]
coef = array([interpolate.bisplev(xin,yin,tck_c1),interpolate.bisplev(xin,yin,tck_c0)])
return coef
elif order == 0:
tck_c0 = [array([0.,0., 798.6983833, 2048., 2048.]),
array([0.,0., 1308.9171309, 2048., 2048.]),
array([ 1244.05322027, 24.35223956, -191.8634177 , -170.68236661,
-4.57013926, 20.35393124, -365.28237355, -235.44828185, -2455.96232688]), 1, 1]
tck_c1 = [array([ 0., 0., 2048., 2048.]),
array([ 0., 0., 2048., 2048.]),
array([ 0.54398146, -0.04547362, -0.63454342, -0.49417562]),1,1]
coef = array([interpolate.bisplev(xin,yin,tck_c1),interpolate.bisplev(xin,yin,tck_c0)])
return coef
else:
raise (ValueError)
else:
print('spec_curvature: illegal wheelpos value')
raise (ValueError)
def get_coi_box(wheelpos):
# provide half-width, length coi-box and factor
# typical angle spectrum varies with wheelpos
# 29,27,31,28 3x8/cos([144.5,151.4,140.5,148.1]) for wheelpos = 160,200,955,1000
coistuff = {'160':(7.5,29,1.11),
'200':(7.5,27,1.12),
'955':(6.5,31,1.09),
'1000':(7.0,28,1.13),}
return coistuff[str(wheelpos)]
def curved_extraction(extimg,ank_c,anchor1, wheelpos, expmap=None, offset=0., \
anker0=None, anker2=None, anker3=None, angle=None, offsetlimit=None, \
background_lower=[None,None], background_upper=[None,None],background_template=None,\
trackonly=False, trackfull=False, caldefault=True, curved="noupdate", \
poly_1=None,poly_2=None,poly_3=None, set_offset=False, \
composite_fit=True, test=None, chatter=0, skip_field_sources=False,\
predict_second_order=True, ZOpos=None,outfull=False, msg='',\
fit_second=True,fit_third=True,C_1=None,C_2=None,dist12=None, ifmotion=True,\
dropout_mask=None,obsid=None,indir=None,motion_file=None,ank_c_0offset=False,ifextended=False,fixwidth=False):
'''This routine knows about the curvature of the spectra in the UV filters
can provide the coefficients of the tracks of the orders
can provide a gaussian fit to the orders
extimg = extracted image
ank_c = array( [ X pos anchor, Y pos anchor, start position spectrum, end spectrum]) in extimg
anchor1 = anchor position in original image in det coordinates
wheelpos = filter wheel position
ZOpos variables defining Zeroth Order positions
angle [req with ZOpos]
background_template - if provided, the background will be based on this
dropout_mask from extractSpecImg
override curvature polynomial coefficients with poly_1,poly_2,poly_3
i.e., after a call to updateFitorder()
output new array of sum across fixed number of pixels across spectrum for coincidence loss
width of box depends on parameter coi_half_width
NPMK, 2010-07-09 initial version
2012-02-20 There was a problem with the offset/track y1 position/borderup,borderdown consistency
when using a prescribed offset. Changing handling. Always make a fine yank adjustment < 3 pix.
disabled for now the set_offset (it does not do anything).
2012-02-20 moved the call to updateFitorder() to curved_extraction. The result is that the
spectrum will be extracted using the updated track parameters.
2014-06-02 add support for fixed box extraction coincidence loss.
2014-08-04 add parameter curved_extraction to limit y-positioning extraction slit with list option
2014-08-06 changed code to correctly adjust y1 position
2014-08-25 fixed error in curve of location orders except first one
2016-01-17 trackcentroiding parameter added to disable centroiding
'''
import pylab as plt
from numpy import array,arange,where, zeros,ones, asarray, abs, int
from uvotplot import plot_ellipsoid_regions
import uvotmisc
anky,ankx,xstart,xend = ank_c
xstart -= ankx
xend -= ankx
anchor2 = anchor1
if test == 'cal':
from cal3 import get_1stOrderFit, get_2ndOrderFit ,get_3rdOrderFit, get_0thOrderFit
from cal3 import nominaluv, clockeduv
if wheelpos == 160:
curves = clockeduv
elif wheelpos == 200:
curves = nominaluv
else:
print("use straight extraction for V grism modes")
return
if wheelpos > 300:
return
# coincidence loss box
coi_half_width,coilength,coifactor = get_coi_box(wheelpos)
# read the table of coefficients/get the coeeficients of the Y(dis) offsets and limits[]
# stored with array of angles used.
# ZEROTH ORDER CURVATURE
if test == 'notyetcal':
coef0 = get_0thOrderFit(xin=anchor2[0],yin=anchor2[1],curvedata=curves)
else:
coef0 = spec_curvature(wheelpos,anchor2,order=0)
dlim0L=-820
dlim0U=-570
present0=True
if (xstart > dlim0U):
present0=False
coef0 = array([0.,0.])
if (xstart > dlim0L): dlim0L = xstart
# FIRST ORDER CURVATURE
if test == 'cal':
coef1 = get_1stOrderFit(xin=anchor2[0],yin=anchor2[1],curvedata=curves)
else:
coef1 = spec_curvature(wheelpos,anchor2,order=1)
#coef1[0] = -3.08e-9
#coef1[1] = 5.89e-6
#coef1[2] = -9.21e-3
dlim1L=-400
dlim1U=1150
present1=True
if (xstart > dlim1L): dlim1L = xstart
if (xend < dlim1U): dlim1U = xend
# SECOND ORDER CURVATURE
if test == 'cal':
coef2 = get_2ndOrderFit(xin=anchor2[0],yin=anchor2[1],curvedata=curves)
else:
coef2 = spec_curvature(wheelpos,anchor2,order=2)
dlim2L=25
dlim2U=3000
if (xstart > dlim2L): dlim2L = xstart
if (xend < dlim2U): dlim2U = xend
if (xend > dlim2L):
present2=True
else: present2=False
# THIRD ORDER CURVATURE
if test == 'cal':
coef3 = get_3rdOrderFit(xin=anchor2[0],yin=anchor2[1],curvedata=curves)
else:
coef3 = spec_curvature(wheelpos,anchor2,order=3)
dlim3L=425
dlim3U=3000
if (xstart > dlim3L): dlim3L = xstart
if (xend < dlim3U): dlim3U = xend
if (xend > dlim3L):
present3=True
else: present3=False
# good first approximation:
# if wheelpos == 160:
sig0coef=array([4.7])
sig1coef=array([-8.22e-09, 6.773e-04, 3.338])
#sig1coef=array([1.6*(-8.22e-09), 1.6*(6.773e-04), 1.6*3.338]) #~FIXME: try changing sigma
#sig1coef=array([ 3.0])
sig2coef=array([-5.44e-07, 2.132e-03, 3.662])
sig3coef=array([0.0059,1.5])
# override coefficients y(x):
print ("DEBUG 3431 type coef1 is ", type(coef1) )
print ("DEBUG 3432 type poly_1 is ",type(poly_1))
if (type(poly_1) != typeNone): coef1 = poly_1
if (type(poly_2) != typeNone): coef2 = poly_2
if (type(poly_3) != typeNone): coef3 = poly_3
#===================================================================
if chatter > 0:
print('================== curvature fits for y ==============')
print('zeroth order poly: ',coef0)
print('first order poly: ',coef1)
print('second order poly: ',coef2)
print('third order poly: ',coef3)
print('======================================================')
#===================================================================
# remove background
#if cval == None: cval = out_of_img_val = -1.0123456789 cval now global
if chatter > 3 : print ("DEBUG 3453 remove background")
bg, bg1, bg2, bgsig, bgimg, bg_limits, \
(bg1_good, bg1_dis, bg1_dis_good, bg2_good, bg2_dis, bg2_dis_good, bgimg_lin) \
= findBackground(extimg,background_lower=background_lower,
background_upper=background_upper,yloc_spectrum=anky, chatter=2)
if background_template != None:
bgimg = background_template['extimg']
spimg = extimg - bgimg
ny,nx = spimg.shape
# initialise quality array, exposure array for spectrum and flags
quality = zeros(nx,dtype=int)
expospec = zeros(5*nx,dtype=int).reshape(5,nx)
qflag = quality_flags()
# get the mask for zeroth orders in the way
if chatter > 3 : print ("DEBUG 3470 get mask zeroth orders ")
# set bad done while extracting spectra below
set_qual = ((not skip_field_sources) & (ZOpos != None) & (angle != None))
if set_qual:
Xim,Yim,Xa,Yb,Thet,b2mag,matched,ondetector = ZOpos
# find_zeroth_orders(filestub, ext, wheelpos,clobber="yes", )
dims = array([nx,ny])
pivot_ori=array([(anchor1)[0],(anchor1)[1]])
pivot= array([ank_c[1],ank_c[0]])
# map down to 18th magnitude in B2 (use global variable uvotgetspec.background_source_mag)
m_lim = background_source_mag
map_all = plot_ellipsoid_regions(Xim.copy(),Yim.copy(),Xa.copy(),Yb.copy(),Thet.copy(),\
b2mag.copy(),matched.copy(), ondetector,pivot,pivot_ori,dims,m_lim,img_angle=angle-180.0,\
lmap=True,makeplot=False,chatter=chatter)
if chatter > 2:
print("zeroth order map all: shape=",map_all.shape," min, max =",map_all.min(), map_all.max())
# map down to 16th magnitude in B2
m_lim = 16.0
map_strong = plot_ellipsoid_regions(Xim.copy(),Yim.copy(),Xa.copy(),Yb.copy(),Thet.copy(),\
b2mag.copy(),matched.copy(), ondetector,pivot,pivot_ori,dims,m_lim,img_angle=angle-180.0,\
lmap=True,makeplot=False,chatter=chatter)
if chatter > 2:
print("zeroth order map strong: shape=",map_strong.shape," min, max =",map_strong.min(), map_strong.max())
# tracks - defined as yi (delta) = 0 at anchor position (ankx,anky)
if chatter > 3 : print ("DEBUG 3500 set up y arrays ")
# shift to first order anchor
x = array(arange(nx))-ankx
y = zeros(nx)+anky
y0 = zeros(nx)+anky - polyval(coef1,0)
y1 = zeros(nx)+anky - polyval(coef1,0)
y2 = zeros(nx)+anky - polyval(coef1,0)
y3 = zeros(nx)+anky - polyval(coef1,0)
q0 = where((x >= dlim0L) & (x <= dlim0U))
x0 = x[q0]
if present0: y0[q0] += polyval(coef0,x[q0])
q1 = where((x >= dlim1L) & (x <= dlim1U))
x1 = x[q1]
if present1: y1[q1] += polyval(coef1,x[q1])
q2 = where((x >= dlim2L) & (x <= dlim2U))
x2 = x[q2]
if present2: y2[q2] += polyval(coef2,x[q2])
q3 = where((x >= dlim3L) & (x <= dlim3U))
x3 = x[q3]
if present3: y3[q3] += polyval(coef3,x[q3])
if trackcentroiding: # global (default = True)
if chatter > 3 : print ("DEBUG 3522 centroid track")
# refine the offset by determining where the peak in the
# first order falls.
# We NEED a map to exclude zeroth orders that fall on/near the spectrum
ny = int(ny)
cp2 = zeros(ny)
cp2_spimg = zeros(spimg.shape) #~TODO:
delpix = 50
if wheelpos == 200: delpix=25 # the accuracy for the nominal uv anchor is not as good.
offsetset = False
if type(offsetlimit) == list:
offsetval = offsetlimit[0]
delpix = array([abs(offsetlimit[1]),1],dtype=int).max() # at least 1
if offsetlimit[1] < 1.:
offsetset = True
else:
print('curved_extraction: offsetlimit=',offsetlimit,' delpix=',delpix)
eo = int(anky-slit_width/2)
if set_offset:
eo = int(offset-slit_width/2)
for q in q1[0]:
if ((x[q] < 600) & (x[q] > -200) & (quality[q] == 0)):
try:
m0 = 0.5*ny-delpix + eo #int( (ny+1)/4)
m1 = 0.5*ny+delpix + eo #int( 3*(ny+1)/4)+1
yoff = y1[q] - anky # this is just the offset from the anchor since y1[x=0] was set to anky
cp2[int(m0-yoff):int(m1-yoff)] += spimg[int(m0):int(m1),q].flatten()
cp2_spimg[int(m0-yoff):int(m1-yoff),q] += spimg[int(m0):int(m1),q].flatten()
except:
print("skipping slice %5i in adjusting first order y-position"%(q))
pass
fig = plt.figure()
plt.title(obsid)
#plt.show()
#print(np.sum(cp2_spimg[:,1632:1832],axis=1),len(np.sum(cp2_spimg[:,200:400],axis=1)))
plt.plot(arange(slit_width),np.sum(cp2_spimg[:,1032:1232],axis=1)/expmap[0],label='-200-0/1032-1232')
plt.plot(arange(slit_width),np.sum(cp2_spimg[:,1232:1432],axis=1)/expmap[0],label='0-200/1232-1432')
plt.plot(arange(slit_width),np.sum(cp2_spimg[:,1432:1632],axis=1)/expmap[0],label='200-400/1432-1632')
plt.plot(arange(slit_width),np.sum(cp2_spimg[:,1632:1832],axis=1)/expmap[0],label='400-600/1632-1832')
plt.legend()
plt.ylabel('count rate per bin')
plt.title(obsid)
plt.savefig(indir+'/'+obsid+'_wing.png')
#plt.show()
plt.close()
if offsetset:
yof = offsetval - anky
if chatter > 1:
print("spectrum location set with input parameter to: y=%5.1f"%(offsetval))
msg += "spectrum location set with input parameter to: y=%5.1f\n"%(offsetval)
else:
if ifmotion:
motion = abs(obsid2motion(obsid,motion_file)['V'])
(p0,p1,p2), ier = leastsq(Fun4, (cp2.max(),anky,3.2), args=(cp2,arange(slit_width),motion) ) #~FIXME:
sigma_mean=np.mean(polyval(sig1coef,x))
#p3= motion
elif fixwidth:
(p0,p1,p2), ier = leastsq(Fun1, (cp2.max(),anky,3.2), args=(cp2,arange(slit_width)) )
sigma_mean=fixwidth/trackwidth #np.mean(polyval(sig1coef,x))
times = sigma_mean/np.mean(polyval(sig1coef,x))
sig0coef = times*sig0coef
sig1coef = times*sig1coef
sig2coef = times*sig2coef
sig3coef = times*sig3coef
elif ifextended:
(p0,p1,p2), ier = leastsq(Fun1, (cp2.max(),anky,3.2), args=(cp2,arange(slit_width)) )
sigma_mean = p2
times = p2/np.mean(polyval(sig1coef,x))
#times = 1.
#sigma_mean = times*np.mean(polyval(sig1coef,x))
sig0coef = times*sig0coef
sig1coef = times*sig1coef
sig2coef = times*sig2coef
sig3coef = times*sig3coef
else:
(p0,p1), ier = leastsq(Fun1b, (cp2.max(),anky), args=(cp2,arange(slit_width),3.2) )
sigma_mean=np.mean(polyval(sig1coef,x))
#print(p0,p1,p2,p3,sigma_mean)
fig = plt.figure()
if ifmotion:
plt.plot(arange(slit_width),cp2)
plt.plot(arange(slit_width),smeargaussian(arange(slit_width),p0,p1,sigma_mean,motion))
plt.vlines(p1-(trackwidth *sigma_mean+motion/2),0,np.max(cp2),color='k')
plt.vlines(p1+(trackwidth *sigma_mean+motion/2),0,np.max(cp2),color='k')
plt.xlabel('y pixels')
plt.ylabel('total counts')
plt.title(obsid+' motion:'+"%.2f"%motion)
elif fixwidth:
np.savetxt(indir+'/'+obsid+'_fit.txt',np.transpose(np.array([arange(slit_width),cp2])),delimiter=',',fmt='%.2f') #~FIXME:
with open(indir+'/'+obsid+'_fit.txt','r+') as f:
content = f.read()
f.seek(0,0)
f.write('A:'+f'{p0:.2f}'+' mu:'+f'{p1:.2f}'+' sigma:'+f'{p2:.2f}'+'\n'+content)
f.close()
plt.plot(arange(slit_width),cp2)
plt.plot(arange(slit_width),singlegaussian(arange(slit_width),p0,p1,p2))
plt.vlines(p1-(trackwidth *sigma_mean),0,np.max(cp2),color='k')
plt.vlines(p1+(trackwidth *sigma_mean),0,np.max(cp2),color='k')
plt.xlabel('y pixels')
plt.ylabel('total counts')
plt.title(obsid)
else:
plt.plot(arange(slit_width),cp2)
plt.plot(arange(slit_width),singlegaussian(arange(slit_width),p0,p1,sigma_mean))
plt.vlines(p1-(trackwidth *sigma_mean),0,np.max(cp2),color='k')
plt.vlines(p1+(trackwidth *sigma_mean),0,np.max(cp2),color='k')
plt.xlabel('y pixels')
plt.ylabel('total counts')
plt.title(obsid)
plt.savefig(indir+'/'+obsid+'_fit.png')
#plt.show()
plt.close()
yof = (p1-anky)
if ank_c_0offset == True:
yof = 0
if chatter > 1:
print("\n *** cross-spectrum gaussian fit parameters: ",p0,p1)
print("the first anchor fit with gaussian peaks at %5.1f, and the Y correction\nis %5.1f (may not be used)" % (p1,yof))
#### should also estimate the likely wavelength error from the offset distance p1 and print
#msg += "cross-spectrum gaussian fit parameters: (%5.1f ,%5.1f)\n" % (p0,p1)
#msg += "the first anchor fit with gaussian peaks at %5.1f, and the Y correction was %5.1f\n" % (p1,yof)
else:
set_offset = True
offsetset = False
# so now shift the location of the curves to match the first order uv part.
if set_offset:
# ignore computed offset and offsetlimit [,] but used passed offset argument
y0 += offset
y1 += offset
y2 += offset
y3 += offset
print("shifting the y-curve with offset passed by parameter")
else:
# assuming the relative position of the orders is correct, just shift the whole bunch
y0 += yof
y1 += yof
y2 += yof
y3 += yof
if not set_qual:
map = None
print("no zeroth order contamination quality information available ")
quality[:] = qflag['good']
# OUTPUT PARAMETER spectra, background, slit init - full dimension retained
if chatter > 3 : print ("DEBUG 3594 set up spectrum arrays ")
# initialize
sp_all = zeros(nx) + cval # straight slit
bg_all = zeros(nx) + cval # straight slit
# spectrum arrays
sp_zeroth = zeros(nx) + cval # curved extraction
sp_first = zeros(nx) + cval # curved extraction
sp_second = zeros(nx) + cval # curved extraction
sp_third = zeros(nx) + cval # curved extraction
bg_zeroth = zeros(nx) + cval # curved extraction
bg_first = zeros(nx) + cval # curved extraction
bg_second = zeros(nx) + cval # curved extraction
bg_third = zeros(nx) + cval # curved extraction
# coi-area arrays
co_zeroth = zeros(nx) + cval
co_first = zeros(nx) + cval
co_second = zeros(nx) + cval
co_third = zeros(nx) + cval
co_back = zeros(nx) + cval
# quality flag arrays
at1 = zeros(nx,dtype=bool)
at2 = zeros(nx,dtype=bool)
at3 = zeros(nx,dtype=bool)
apercorr = zeros(5*nx).reshape(5,nx) + cval
borderup = zeros(5*nx).reshape(5,nx) + cval
borderdown = zeros(5*nx).reshape(5,nx) + cval
fitorder = (present0,present1,present2,present3),(q0,q1,q2,q3),(
y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first, co_first ),(
y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third,co_third ),(
x,xstart,xend,sp_all,quality,co_back)
if trackonly: # output the coordinates on the extimg image which specify the lay of
# each order
if outfull:
return fitorder, cp2, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,
bg_second,bg_third), (borderup,borderdown), apercorr #, expospec, msg, curved
else: return fitorder
if not trackfull:
if (curved == "update") & (not trackcentroiding):
# the hope is, that with more data the calibration can be improved to eliminate this step
#try:
fitorder2, fval, fvalerr = updateFitorder(extimg, fitorder, wheelpos, full=True,
predict2nd=predict_second_order, fit_second=fit_second, fit_third=fit_second,
C_1=C_1, C_2=C_2, d12=dist12, chatter=chatter)
msg += "updated the curvature and width fit parameters\n"
(present0,present1,present2,present3),(q0,q1,q2,q3), (
y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first,co_first ),(
y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third,co_third ),(
x,xstart,xend,sp_all,quality,co_back) = fitorder2
# update the anchor y-coordinate
ank_c[0] = y1[int(ank_c[1])]
#except:
# msg += "WARNING: fit order curvature update has failed\n"
# curved = "curve"
if offsetset & (not trackcentroiding):
mess = "%s\nWARNING Using offsetlimit with parameter *curved = 'update'* \n"\
"WARNING Therefore we updated the curvature, and besides the curvature, the\n"\
"Y-position of the extraction region was updated to y1[ankx]=%5.1f and \n"\
"does not equal the offsetlimit value of %5.1f \n%s"%(30*"=*=",
y1[int(ankx)],offsetlimit[0],30*"=*=")
print(mess)
mess = "Updated the curvature, and besides the curvature, the Y-position \n"\
" of the extraction region was updated to y1[ankx]=%5.1f and does\n"\
" not equal the offsetlimit value of %5.1f \n"%(y1[int(ankx)],offsetlimit[0])
msg += mess+"\n"
# default single track extraction
sphalfwid = 4.*sig1coef[0]
spwid = 2*sphalfwid
splim1 = int(slit_width/2+offset-sphalfwid+1)
splim2 = int(splim1 + spwid)
sp_all = extimg[splim1:splim2,:].sum(axis=0).flatten()
bg_all = bgimg[splim1:splim2,:].sum(axis=0).flatten()
borderup[4,:] = splim2
borderdown[4,:] = splim1
# background for coi-loss box - using a 3x larger sampling region
k1 = int(anky-3*coi_half_width+0.5)
co_back = bgimg[k1:k1+int(6*coi_half_width),:].sum(axis=0)/3.0
if present0:
for i in range(nx):
sphalfwid = trackwidth*polyval(sig0coef,x[i])
spwid = 2*sphalfwid
#splim1 = 100+offset-sphalfwid+1 changes 19-feb-2012
#splim2 = splim1 + spwid
#k1 = splim1+y0[i]-anky
k1 = int(y0[i] - sphalfwid + 0.5)
k2 = k1 + int(spwid+0.5)
k3 = int(y0[i] - coi_half_width + 0.5)
k4 = k1 + int(2*coi_half_width)
if i in q0[0]:
co_zeroth[i] = extimg[k3:k4,i].sum()
sp_zeroth[i] = extimg[k1:k2,i].sum()
bg_zeroth[i] = bgimg[k1:k2,i].sum()
borderup[0,i] = k2
borderdown[0,i] = k1
apercorr[0,i] = x_aperture_correction(k1,k2,sig0coef,x[i],norder=0,wheelpos=wheelpos,fixwidth=fixwidth)
if len(expmap) == 1: expospec[0,i] = expmap[0]
else: expospec[0,i] = expmap[k1:k2,i].mean()
if present1:
#if ifmotion:
# apercorr_value = x_aperture_correction(0,0,sig1coef,100,norder=1,mode='gaussian',
# sigma=p2,motion=motion,tw=trackwidth,ifmotion=ifmotion)
for i in range(nx):
if ifmotion:
sphalfwid = trackwidth *polyval(sig1coef,x[i])+motion/2 #~FIXME:
else:
sphalfwid = trackwidth * polyval(sig1coef,x[i])
# if (x[i] < 30): sphalfwid *= bluetrackwidth
spwid = 2*sphalfwid
#splim1 = 100+offset-sphalfwid+1 changes 19-feb-2012
#splim2 = splim1 + spwid
#k1 = int(splim1+y1[i]-anky+0.5)
k1 = int(y1[i] - sphalfwid + 0.5)
k2 = k1 + int(spwid+0.5)
k3 = int(y1[i] - coi_half_width + 0.5)
k4 = k3 + int(2*coi_half_width) #--TODO:FIXME:
k5 = y1[i]
if i in q1[0]:
co_first[i] = extimg[k3:k4,i].sum()
sp_first[i] = extimg[k1:k2,i].sum()
bg_first[i] = bgimg[k1:k2,i].sum()
borderup[1,i] = k2
borderdown[1,i] = k1
if ifmotion:
apercorr[1,i] = x_aperture_correction(k1,k2,sig1coef,x[i],norder=1,mode='gaussian',
sigma=polyval(sig1coef,x[i]),motion=motion,ifmotion=ifmotion,wheelpos=wheelpos,fixwidth=fixwidth)
# apercorr[1,i] = apercorr_value
else:
apercorr[1,i] = x_aperture_correction(k1,k2,sig1coef,x[i],norder=1,wheelpos=wheelpos,fixwidth=fixwidth)
if len(expmap) == 1: expospec[1,i] = expmap[0]
else: expospec[1,i] = expmap[k1:k2,i].mean()
if dropout_mask != None:
at3[i] = dropout_mask[k1:k2,i].any()
if set_qual:
k5 = int(y1[i] - 49 + 0.5)
k6 = k1 + int(98+0.5)
if ny > 20:
# all zeroth orders of sources within coi-distance:
at1[i] = (map_all[i,k3:k4] == False).any()
if ny > 100:
# strong sources: circle 49 pix radius hits the centre of the track
at2[i] = (map_strong[i,k5:k6] == False).any()
quality[at1] = qflag['weakzeroth']
quality[at2] = qflag['zeroth']
quality[at3] = qflag['bad']
if present2:
for i in range(nx):
sphalfwid = trackwidth * polyval(sig2coef,x[i])
spwid = 2*sphalfwid
#splim1 = 100+offset-sphalfwid+1 changes 19-feb-2012
#splim2 = splim1 + spwid
#k1 = int(splim1+y2[i]-anky+0.5)
k1 = int(y2[i] - sphalfwid +0.5)
k2 = k1 + int(spwid+0.5)
k3 = int(y2[i] - coi_half_width + 0.5)
k4 = k1 + int(2*coi_half_width)
if i in q2[0]:
co_second[i] = extimg[k3:k4,i].sum()
sp_second[i] = extimg[k1:k2,i].sum()
bg_second[i] = bgimg[k1:k2,i].sum()
borderup[2,i] = k2
borderdown[2,i] = k1
apercorr[2,i] = x_aperture_correction(k1,k2,sig2coef,x[i],norder=2,wheelpos=wheelpos,fixwidth=fixwidth)
if len(expmap) == 1: expospec[2,i] = expmap[0]
else: expospec[2,i] = expmap[k1:k2,i].mean()
y1_y2 = np.abs(0.5*(k2+k1) - 0.5*(borderup[1,i]-borderdown[1,i]))
s1_s2 = 0.5*(np.polyval(sig1coef,x[i]) + np.polyval(sig2coef, x[i]) )
if ( y1_y2 < s1_s2) : quality[i] += qflag.get('overlap')
if present3:
for i in range(nx):
sphalfwid = trackwidth * polyval(sig3coef,x[i])
spwid = 2*sphalfwid
#splim1 = 100+offset-sphalfwid+1
#splim2 = splim1 + spwid
#k1 = int(splim1+y3[i]-anky+0.5)
k1 = int(y3[i] - sphalfwid +0.5)
k2 = k1 + int(spwid+0.5)
k3 = int(y3[i] - coi_half_width + 0.5)
k4 = k1 + int(2*coi_half_width)
if i in q3[0]:
co_third[i] = extimg[k3:k4,i].sum(axis=0)
sp_third[i] = extimg[k1:k2,i].sum(axis=0)
bg_third[i] = bgimg[k1:k2,i].sum(axis=0)
borderup[3,i] = k2
borderdown[3,i] = k1
apercorr[3,i] = x_aperture_correction(k1,k2,sig3coef,x[i],norder=3,wheelpos=wheelpos,fixwidth=fixwidth)
if len(expmap) == 1: expospec[3,i] = expmap[0]
else: expospec[3,i] = expmap[k1:k2,i].mean()
# y0,y1,y2,y3 now reflect accurately the center of the slit used.
if chatter > 3 : print ("DEBUG 3792 stacking results in structure fitorder")
fitorder = (present0,present1,present2,present3),(q0,q1,q2,q3), (
y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first, co_first ),(
y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third, co_third ),(
x,xstart,xend,sp_all,quality,co_back)
#~FIXME:
if outfull:
return fitorder, cp2, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,
bg_second,bg_third), (borderup,borderdown), apercorr, expospec, msg, curved
else: return fitorder
#===================
# Now calculate the probability distributions across the orders using gaussian fits
# this section was for development only
if trackfull: #~FIXME: # fit the cross profile with gaussians; return the gaussian fit parameters
if chatter > 3 : print ("DEBUG 3810 full-track update with mfit")
# output parameter gfit:
# define output per x[i]: numpy array gfit.shape= (6,nx) of: (x,order,amplitude,y_pix_position,sig,flags)
gfit = np.zeros( 4*6*nx ).reshape(4,6,nx) -1
#check that y1,y2,y3 are full length arrays
if not ( (len(y1) == nx) & (len(y2) == nx) & (len(y3) == nx) ):
print("FATAL error in uvotgetspec.curved_extraction array sizes wrong")
# this parameter allows you to restrict the range along the dispersion being considered
if (test == None) | (test == 'cal'):
ileft = 2
irite = nx -2
else:
ileft = test[0]
irite = test[1]
for i in range(ileft,irite):
if chatter > 3: print("uvotgetspec.curved_extraction [trackfull] fitting i = %2i x=%6.2f"%(i,x[i]))
# do the zeroth order
if i in q0[0]:
Ypos = (array( [y0[i]])).flatten()
Xpos = arange(i-2,i+3)
sigmas = sig0coef
(par, flag), junk = get_components(Xpos,spimg,Ypos,wheelpos,\
caldefault=caldefault,sigmas=sigmas)
flags = str(flag[0])+str(flag[1])+str(flag[2])+str(flag[3])+str(flag[4])+str(flag[5])
iflags = int(flags)
gfit[0,:,i] = [i,0,par[0],par[1],par[2],iflags]
if chatter > 3: print(i, par, flag)
# do the first order
if ((i in q1[0]) & (i not in q2[0])) :
Ypos = array( [y1[i]] ).flatten()
Xpos = arange(i-2,i+3)
sigmas = sig1coef
(par, flag), junk = get_components(Xpos,spimg,Ypos,wheelpos,\
caldefault=caldefault,sigmas=sigmas)
flags = str(flag[0])+str(flag[1])+str(flag[2])+str(flag[3])+str(flag[4])+str(flag[5])
iflags = int(flags)
gfit[1,:,i] = [i,1,par[0],par[1],par[2],iflags]
if chatter > 3: print(i, par, flag)
# do the second order
if ((i in q1[0]) & (i in q2[0]) & (i not in q3[0])):
Ypos = array( [y1[i],y2[i]]).flatten()
Xpos = arange(i-3,i+4)
sigmas = array([ sig1coef[0], sig2coef[0] ])
if chatter > 3: print('++++ second order Xpos:',Xpos,' Ypos: ', Ypos,' wheelpos ',wheelpos)
Z = get_components(Xpos,spimg,Ypos,wheelpos,composite_fit=composite_fit,\
caldefault=caldefault,sigmas=sigmas)
par, flag = Z[0]
flags = str(flag[0])+str(flag[1])+str(flag[2])+str(flag[3])+str(flag[4])+str(flag[5])
iflags = int(flags)
gfit[1,:,i] = [i,1,par[0],par[1],par[2],iflags]
if len(par) == 6:
gfit[2,:,i] = [i,2,par[3],par[4],par[5],iflags]
if chatter > 3: print(i); print(par[0:3]); print(par[3:6]); print(flag)
# do the third order
if ((i in q1[0]) & (i in q2[0]) & (i in q3[0])):
Ypos = array([y1[i],y2[i],y3[i]]).flatten()
Xpos = arange(i-4,i+5)
sigmas = array([sig1coef[0], sig2coef[0], sig3coef[0]])
if chatter > 3: print('+++++ third order Xpos:',Xpos,' Ypos: ', Ypos,' * * * 3 3 3 3 3 * * *')
width = abs( polyval(array([2.0e-05, 0.034, -70]),(anchor2[1]-1200.)))+5.0 # rough limits
try:
Z = get_components(Xpos,spimg,Ypos,wheelpos,chatter=chatter,width=width,\
composite_fit=composite_fit,caldefault=caldefault,sigmas=sigmas)
par, flag = Z[0]
except:
print("failed 3rd order fitting width = ",width)
print("Ypos = ",Ypos)
print("Xpos range ",i-4,i+5, " sigmas = ",sigmas, " wheelpos = ",wheelpos)
print("composite_fit:",composite_fit," caldefault:",caldefault)
print(par)
print(flag)
par = array([0.,y1[i],3.,0.,y2[i],4.,0.,y3[i],6.])
flag = array([9,9,9,9,9,9])
flags = str(flag[0])+str(flag[1])+str(flag[2])+str(flag[3])+str(flag[4])+str(flag[5])
iflags = int(flags)
gfit[1,:,i] = [i,1,par[0],par[1],par[2],iflags]
if len(par) > 4:
gfit[2,:,i] = [i,2,par[3],par[4],par[5],iflags]
if len(par) == 9:
gfit[3,:,i] = [i,3,par[6],par[7],par[8],iflags]
if chatter > 3:
print(i); print(par[0:3]) ; print(par[3:6]) ; print(par[6:9]) ; print(iflags)
# thing not covered (properly):
# -- the second order falls on the first and the third order not
# -- one of the orders is not on the detector
# -- order overlap
# -- minus one order
return fitorder, gfit, (bgimg,)
def x_aperture_correction(k1,k2,sigcoef,x,norder=None, mode='best', coi=None, wheelpos=None, sigma=3.2,motion=10, tw=2.5, ifmotion=True, fixwidth=False):
'''Returns the aperture correction factor
parameters
----------
k1,k2 : int
k1 edge of track, k2 opposite track edge
in pixel coordinates
sigcoef : list
polynomial coefficient of the fit to the track width
so that sigma = polyval(sigcoef,x)
x : float
pixel/channel position
norder: int
order of the spectrum
mode : 'best'|'gaussian'
'gaussian' option causes first order to be treated as a gaussian PSF
coi : None
not implemented
wheelpos : 160|200|955|1000
filter wheel position
Notes
-----
The aperture correction is returned for given sigcoef and position x
Using the measured cumulative profile normal to the dispersion for the
first order (faint spectrum) or gaussians for orders zero,second, third.
History:
2012-02-20 Split out in preparation of non-gaussian aperture correction factor
2012-10-06 Dependence on coi-factor identified as a likely parameter
changing the PSF (no further action)
2013-12-15 revised aperture functions, one for each grism (low coi)
'''
import uvotmisc
import scipy
from scipy.interpolate import interp1d, splev
import numpy as np
apercorr = 1.0
if fixwidth:
apercorr = np.ones(np.shape(apercorr)) #~FIXME: I must remove this line to do apercorr
return apercorr
if norder == 0:
apercorr = 1.0/uvotmisc.GaussianHalfIntegralFraction( 0.5*(k2-k1)/np.polyval(sigcoef,x) )
if norder == 1:
# low coi apertures (normalised to 1 at aperture with half-width 2.5 sigma)
# fitted polynomials to the aperture (low-coi)
#for 0<aperture<6 sig
polycoef160 = np.array([ 1.32112392e-03, -2.69269447e-02, 2.10636905e-01,
-7.89493710e-01, 1.43691688e+00, -2.43239325e-02])
polycoef200 = np.array([ 1.29297314e-03, -2.66018405e-02, 2.10241179e-01,
-7.93941262e-01, 1.44678036e+00, -2.51078365e-02])
#y200 = polyval(polycoef200,x)
polycoef1000a = np.array([ 0.00260494, -0.04792046, 0.33581242, -1.11237223, 1.74086898,
-0.04026319]) # for aperture <= 2.2 sig, and for larger:
polycoef1000b = np.array([ 0.00128903, 0.00107042, 0.98446801])
polycoef955 = np.array([ 0.00213156, -0.03953134, 0.28146284, -0.96044626, 1.58429093,
-0.02412411]) # for aperture < 4 sig
# best curves for the apertures (using aperture.py plots WD1657+343)
aper_160_low = {
# half-width in units of sig
"sig": [0.00,0.30,0.51,0.700,0.90,1.000,1.100,1.200,1.400,
1.600,1.800,2.000,2.20,2.5,2.900,3.31,4.11,6.00],
# aperture correction, normalised
"ape": [0.00,0.30,0.52,0.667,0.77,0.818,0.849,0.872,0.921,
0.947,0.968,0.980,0.99,1.0,1.008,1.01,1.01,1.01]
}
aper_200_low = {
"sig": [0.0,0.300,0.510,0.700,0.800,0.900,1.000,1.10,1.20,
1.40, 1.60, 1.80, 2.0, 2.2, 2.5, 2.7, 3.0,4.0,6.0],
"ape": [0.0,0.308,0.533,0.674,0.742,0.780,0.830,0.86,0.89,
0.929,0.959,0.977,0.986,0.991,1.0,1.002,1.003,1.004,1.005 ]
}
aper_1000_low = {
"sig": [0.0, 0.3, 0.5, 0.7, 0.8, 0.9, 1.0, 1.2, 1.4, 1.6, 2.0,2.2,2.5,3.0 ,4.0 ,6.0 ],
"ape": [0.0,0.37,0.55,0.68,0.74,0.80,0.85,0.91,0.96,0.98,0.995,1. ,1. ,1.004,1.01,1.01]
}
aper_955_med = {
"sig": [0.0,0.30,0.60,0.80,1.00,1.30,1.60,1.80,2.00,2.50,3.00, 4.00,6.00],
"ape": [0.0,0.28,0.47,0.64,0.75,0.86,0.93,0.96,0.97,1.00,1.013,1.02,1.02]
}
aper_1000_med = {
"sig": [0.0,0.30,0.50,0.70,0.80,0.90,1.00,1.20,1.40,1.60,
1.80,2.00,2.20,2.50,3.00,4.00,6.00],
"ape": [0.0,0.34,0.46,0.63,0.68,0.73,0.76,0.87,0.90,0.94,
0.96,0.98,0.99,1.00,1.015,1.027,1.036]
}
renormal = 1.0430 # calibration done with aperture correction 1.043 (sig=2.5)
sig = np.polyval(sigcoef,x) # half width parameter sig in pixels
xx = 0.5*(k2-k1)/sig # half track width in units of sig
if (mode == 'gaussian'):# | (xx > 4.5):
if ifmotion:
apercorr = 1.0/uvotmisc.SmearGaussianHalfIntegralFraction(sigma,motion,tw) #~FIXME:
else:
apercorr = 1.0/uvotmisc.GaussianHalfIntegralFraction( 0.5*(k2-k1)/np.polyval(sigcoef,x) )
elif (wheelpos != None):
# low coi for wheelpos = 160,200; medium coi for wheelpos = 955, 1000
if wheelpos == 160:
if (type(coi) == typeNone) or (coi < 0.1) :
apercf1 = interp1d(aper_160_low['sig'],aper_160_low['ape'],)
apercorr = renormal / apercf1(xx)
if wheelpos == 200:
if (type(coi) == typeNone) or (coi < 0.1) :
apercf2 = interp1d(aper_200_low['sig'],aper_200_low['ape'],)
apercorr = renormal / apercf2(xx)
if wheelpos == 955:
if (type(coi) == typeNone) or (coi < 0.1) :
apercf3 = interp1d(aper_955_med['sig'],aper_955_med['ape'],)
apercorr = renormal / apercf3(xx)
#apercf3 = interp1d([0,6],[0,1],fill_value=(0,1),bounds_error=False)
#apercorr = 1.0/apercf3(xx) # change psf to test if there is apercorr before coi-corr
if wheelpos == 1000:
if (type(coi) == typeNone) or (coi < 0.1) :
apercf4 = interp1d(aper_1000_low['sig'],aper_1000_low['ape'],)
apercorr = renormal / apercf4(xx)
else:
# when xx<4.5, mode !gaussian, wheelpos==None use the following
# 2012-02-21 PSF best fit at 3500 from cal_psf aper05+aper08 valid for 0.5 < xx < 4.5
# the function does not rise as steeply so has more prominent wings
tck = (np.array([ 0. , 0. , 0. , 0. , 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
0.9, 1. , 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9,
2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 3. ,
3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4. , 4.1,
4.2, 4.3, 4.4, 4.5, 4.6, 4.7, 4.8, 5. , 5. , 5. , 5. ]),
np.array([ -6.45497898e-19, 7.97698047e-02, 1.52208991e-01,
2.56482414e-01, 3.31017197e-01, 4.03222197e-01,
4.72064814e-01, 5.37148347e-01, 5.97906198e-01,
6.53816662e-01, 7.04346413e-01, 7.48964617e-01,
7.87816053e-01, 8.21035507e-01, 8.48805502e-01,
8.71348421e-01, 8.88900296e-01, 9.03143354e-01,
9.16085646e-01, 9.28196443e-01, 9.38406001e-01,
9.45971114e-01, 9.51330905e-01, 9.54947930e-01,
9.57278503e-01, 9.58780477e-01, 9.59911792e-01,
9.60934825e-01, 9.62119406e-01, 9.63707446e-01,
9.66045076e-01, 9.69089467e-01, 9.73684854e-01,
9.75257929e-01, 9.77453939e-01, 9.81061451e-01,
9.80798098e-01, 9.82633805e-01, 9.83725248e-01,
9.84876762e-01, 9.85915295e-01, 9.86929684e-01,
9.87938594e-01, 9.88979493e-01, 9.90084808e-01,
9.91288321e-01, 9.92623448e-01, 9.94123703e-01,
9.96388866e-01, 9.98435907e-01, 1.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00]), 3)
apercorr = 1.0/splev( xx, tck,)
if norder == 2:
apercorr = 1.0/uvotmisc.GaussianHalfIntegralFraction( 0.5*(k2-k1)/np.polyval(sigcoef,x) )
if norder == 3:
apercorr = 1.0/uvotmisc.GaussianHalfIntegralFraction( 0.5*(k2-k1)/np.polyval(sigcoef,x) )
return apercorr
def clipmask(f,sigclip=2.5,fpos=False):
'''Provides mask to clip bad data.
Parameters
----------
f : 2D array
kwargs : dict
optional arguments
- **sigclip** : float
clip data at `sigma` standard deviations above the mean
- **fpos** : bool
if True, clip negative values
Returns
-------
mask : 2D array, boolean
Array of same size as image, true where within sigclip standard
deviations of mean.
Notes
-----
By default infinities are clipped.
The mask is iterated until it converges. So the effect of outliers
on the standard deviation is nil. This also means that sigma needs
to be chosen large enough or the standard deviation will not be
a good measure of the real noise in the mean.
'''
import numpy as np
bg = f
if fpos:
mask = (np.isfinite(f) & (f >= 0.))
else:
mask = np.isfinite(f)
m0 = len(np.where(mask)[0])
n = 50
bad = True
while (bad & (n > 0)):
n -= 1
mask = abs(f - f[mask].mean()) < sigclip * f[mask].std()
m = len(np.where(mask)[0])
if m == m0: bad = False
else: m0 = m
return mask
def get_components(xpos,ori_img,Ypositions,wheelpos,chatter=0,caldefault=False,\
sigmas=None,noiselevel=None,width=40.0,composite_fit=True, fiterrors = True, \
smoothpix=1, amp2lim=None,fixsig=False,fixpos=False):
''' extract the spectral components for an image slice
at position(s) xpos (dispersion axis) using the Ypositions
of the orders. The value of Ypositions[0] should be the main peak.
Notes: implicit assumption is that the 'y' axis is the pixel number.
if for some reason the data pairs are (z_i,f_meas_i) then the definition of y
changes into z.
if the return value for the centre of the gaussian exceeds some number (sig?),
then the solution is probably suspect. In that case a second fit with sig? held
fixed perhaps should be done.
some tests show that the solution is very sensitive to the first guess of the
position of the peak. It will even find a dip in the noise (neg amplitude)
rather than the main peak or overshoot the peak if the starting guess is too far
off, and fudge sigma to be large.
Error Flag:
flag[0] 0 = ok, 1=solution main peak is offset from Ypositions by more than 'sig' pixels
flag[1] 0 = ok, 1=solution secondary peak is offset from Ypositions by more than 'sig' pixels
flag[2] 0 = ok, 1=solution third peak is offset from Ypositions by more than 'sig' pixels
flag[3] not used
flag[4] number of orders in answer
flag[5] error flag returned by fitting program
noiselevel:
if the fit to the peak has a maximum < noiselevel then the peak will be removed.
fiterrors True implies caldefault=True
smoothpix: the number of pixels along dispersion to smooth over for
fitting gaussians across dispersion
amp2lim: second order prediction of a (minimum, maximum) valid for all xpos
NPMK, 2010-07-15 Fecit
NPMK, 2011-08-16 adding smoothing for improved fitting
NPMK 2011-08-26 replace leastsq with mpfit based routines; clip image outside spectrum width
'''
import numpy
from numpy import array, arange,transpose, where, abs, min, zeros, atleast_1d, atleast_2d, sqrt
try:
from convolve import boxcar
except:
from stsci.convolve import boxcar
xpos = atleast_1d(xpos)
ori_img = atleast_2d(ori_img)
Ypositions = atleast_1d(Ypositions)
xpos = xpos.flatten()
Ypositions = Ypositions.flatten()
nypos = len(Ypositions)
smoothpix = int(smoothpix)
if smoothpix > 1:
spimg = boxcar(ori_img.copy(),(smoothpix,),mode='reflect')
else: spimg = ori_img
if type(sigmas) == typeNone:
sigmas = array([3.1,4.3,4.6])
if chatter > 4:
print("get_components: input prameter wheelpos ", wheelpos)
print("get_components: input parameter xpos ", xpos)
print("get_components: input parameter Ypositions ", Ypositions)
print("get_components: number of orders : ",nypos)
print("get_components: dimension input image ", spimg.shape)
xpos = xpos[ where(xpos < spimg.shape[1])[0] ] # eliminate elements outside range
if len(xpos) <1:
print("get_components: xpos must be at least one number")
raise ValueError
return
elif len(xpos) == 1:
f_meas = spimg[:,xpos]
f_ori = ori_img[:,xpos]
else:
f_meas = spimg[:,xpos].mean(axis=1)
f_ori = ori_img[:,xpos].mean(axis=1)
f_meas = f_meas.flatten()
f_ori = f_ori.flatten()
f_pos = f_meas >= 0
f_err = 9.99e+9 * numpy.ones(len(f_meas))
f_err[f_pos] = 1.4*sqrt(f_meas[f_pos])
bg_mask = clipmask( f_meas, fpos=True)
f_mask = bg_mask
bg = f_meas[bg_mask].mean()
if type(noiselevel) == typeNone:
noiselevel = f_meas[bg_mask].mean()
if chatter > 3: print("get_components: adopted noiselevel = ", noiselevel)
y = arange(spimg.shape[0],dtype=float) # pixel number
flag = zeros(6, dtype=int )
if caldefault:
if type(sigmas) == typeNone:
print("missing parameter fitorder in uvotgetspec.get_components\n")
else:
# the positions of the centre of the fits are given in Ypositions
sigmaas = atleast_1d(sigmas)
if nypos == 1:
if chatter > 3: print('len Ypositions == 1')
sig0 = sigmaas[0]
p0 = Ypositions[0]
a0 = max(f_meas)
f_mask[p0-4*sig0:p0+4*sig0] = True
Z = runfit1(y[f_mask],f_meas[f_mask],f_err[f_mask],bg,a0,p0,sig0,\
fixsig=fixsig,fixpos=fixpos)
flag[5] = Z.status
if Z.status > 0:
[bg0,bg1,a0,p0,sig0] = Z.params
else:
if chatter > 4:
print("runfit1 status:",Z.status)
print("runfit1 params:",Z.params)
if fiterrors: return (Z.params,Z.perror,flag), (y,f_meas) # errors in fit = Z.perror
else: return ((a0,p0,sig0),flag), (y,f_meas)
if nypos == 2:
if chatter > 3: print('len Ypositions == 2')
sig0, sig1 = sigmaas[0], sigmaas[1]
p0, p1 = Ypositions
a0 = 0.9 * max(f_meas)
a1 = 0.5*a0
f_mask[p0-4*sig0:p0+4*sig0] = True
f_mask[p1-4*sig1:p1+4*sig1] = True
Z = runfit2(y[f_mask],f_meas[f_mask],f_err[f_mask],bg,a0,p0,sig0,a1,p1,sig1,\
fixsig=fixsig,fixpos=fixpos,amp2lim=amp2lim)
flag[5] = Z.status
if Z.status > 0:
[bg0,bg1,a0,p0,sig0,a1,p1,sig1] = Z.params
if fiterrors: return (Z.params,Z.perror,flag), (y,f_meas) # errors in fit = Z.perror
else: return ((a0,p0,sig0,a1,p1,sig1),flag), (y,f_meas)
if nypos == 3:
if chatter > 3: print('len Ypositions == 3')
sig0,sig1,sig2 = sigmaas[:]
p0, p1, p2 = Ypositions
a0 = 0.9* max(f_meas)
a1 = a0
a2 = a1
f_mask[p0-4*sig0:p0+4*sig0] = True
f_mask[p2-4*sig2:p2+4*sig2] = True
Z = runfit3(y[f_mask],f_meas[f_mask],f_err[f_mask],bg,a0,p0,sig0,a1,p1,sig1,a2,p2,sig2,\
fixsig=fixsig,fixpos=fixpos,amp2lim=amp2lim)
flag[5] = Z.status
if Z.status > 0:
[bg0,bg1,a0,p0,sig0,a1,p1,sig1,a2,p2,sig2] = Z.params
if fiterrors: return (Z.params,Z.perror,flag), (y,f_meas) # errors in fit = Z.perror
else: return ((a0,p0,sig0,a1,p1,sig1,a2,p2,sig2),flag), (y,f_meas)
if wheelpos < 500 :
sig = 6
else:
sig = 4
sig0 = sig
Sig = sig
# width = 40 Maximum order distance - parameter in call ?
# start with fitting using a fixed sig
# to get the peaks fixed do them one by one
if len(Ypositions) < 4 :
# FIT ONE PEAK for all observations
# first guess single gaussian fit parameters
a0 = f_meas.max()
y0 = Ypositions[0]
(p0_,p1), ier = leastsq(Fun1b, (a0,y0), args=(f_meas,y,sig) )
# if the "solution" is wrong use the input as best guess:
if abs(Ypositions[0] - p1) > 15:
p1 = y0
flag[0] = 3
else: # shift the input positions
delpos = p1-Ypositions[0]
Ypositions += delpos
# refine the sigma with fixed centre for the peak
(p0,sig_), ier = leastsq(Fun1a, (p0_,sig), args=(f_meas,y,p1) )
if ((sig_ > 0.1*sig) & (sig_ < 6.* sig)):
sig1 = sig_
else: sig1 = sig
Yout = ((p0,p1,sig1), flag), (y,f_meas)
if chatter > 3:
print("highest peak amplitude=%8.1f, position=%8.1f, sigma=%8.2f, ier flag=%2i "%(p0,p1,sig1,ier))
else:
print('Error in number of orders given in Ypositions')
return
# limit acceptable range for seaching for maxima
q = where( (y < p1+width) & (y > p1-0.5*width) ) # if direction known, one can be set to 3*sig
yq = y[q[0]]
qok = len(q[0]) > 0
if ( (len(Ypositions) > 1) & qok ):
# TWO PEAKS
# double gaussian fit: remove the first peak from the data and fit the residual
f_meas_reduced = f_meas[q] - singlegaussian(yq, p0, p1, sig_)
a0 = f_meas_reduced.max()
y0 = where(f_meas_reduced == a0)[0][0]
Y2 = (p2,p3) , ier = leastsq(Fun1b, (a0,y0) , args=(f_meas_reduced,yq,sig))
if chatter > 3:
print('position order 2: %8.1f shifted to %8.1f'%(p3,p3+y[q][0]))
p3 += y[q][0]
# check that the refined value is not too far off:
if abs(p3 - Ypositions[1]) > 15:
if chatter > 3: print("problem p3 way off p3=",p3)
p3 = Ypositions[1]
flag[1] = 3
Y2 = (p2,sig2), ier = leastsq(Fun1a, (p2,sig1), args=(f_meas_reduced,yq,p3 ))
if not ((sig2 > 0.25*sig1) & (sig2 < 4.* sig1)):
sig2 = sig1
newsig2 = False
else:
# keep sig2
newsig2 = True
if chatter > 3:
print("second highest peak amplitude=%8.1f, position=%8.1f, sigma=%8.2f ; ier flag=%2i "%(p2,p3,sig2, ier))
Yout = ((p0,p1,sig1,p2,p3,sig2),flag), (y,q,f_meas,f_meas_reduced)
if ((len(Ypositions) > 2) & qok ):
# triple gaussian fit: removed the second peak from the data
(p0,p1,sig1,p2,p3,sig2), ier = \
leastsq(Fun2, (p0,p1,sig1,p2,p3,sig2) , args=(f_meas[q],y[q]))
if chatter > 3:
print("fit double gaussian (%8.2f,%8.2f,%8.2f, %8.2f,%8.2f,%8.2f)"%\
(p0,p1,sig1,p2,p3,sig2))
f_meas_reduced = f_meas[q] - doublegaussian(yq,p0,p1,sig1,p2,p3,sig2)
if not newsig2:
y0 = Ypositions[2]
a0 = 10*noiselevel
else:
a0 = f_meas_reduced.max()
y0 = y[q][where(f_meas_reduced == a0)[0][0]]
if chatter > 3: print("third order input fit: amplitude = %8.2f, position = %8.2f"%(a0,y0))
sig3 = 2*sig2
Y3 = (p4,p5), ier = leastsq(Fun1b, (a0,y0) , args=(f_meas_reduced,y[q],sig3))
p5 += y[q][0]
if abs(p5-Ypositions[2]) > 15:
p5 = Ypositions[2]
flag[2] = 3
Y3 = (p4a,sig3), ier = leastsq(Fun1a, (p4,sig3), args=(f_meas_reduced,y[q],p5 ))
if sig3 > 6*sig: sig3 = 2*sig2
if chatter > 3:
print("third highest peak amplitude=%8.1f, position=%8.1f, sigma=%8.2f, ier flag =%i "\
%(p4,p5,sig3,ier))
Yout = ((p0,p1,sig1,p2,p3,sig2,p4,p5,sig),flag),(y,q,f_meas,f_meas_reduced)
# now remove odd solutions - TBD: just flagging now
# check that the solutions for the centre are within 'Sig' of the input 'Ypositions'
if chatter > 2:
print("input Ypositions: ", Ypositions)
nposi = len(Ypositions)
if len(Ypositions) < 4 :
dy = min(abs(p1 - Ypositions))
if dy > Sig: flag[0] += 1
if ((len(Ypositions) > 1) & ( len(q[0]) > 0 )):
dy = min(abs(p3 - Ypositions))
if dy > Sig: flag[1] += 1
dy = abs(p3 - p1)
if dy < sig:
flag[1] += 10
ip = where(abs(p3-Ypositions) < 0.9*dy)[0]
indx = list(range(len(Ypositions)))
if len(ip) == 0:
print("problem with fitting peak # 2 ")
else:
indx.pop(ip[-1])
Ypositions = Ypositions[indx]
if p2 < noiselevel:
flag[1] += 20
ip = where(abs(p3-Ypositions) < 0.9*dy)[0]
if len(ip) == 0:
print("problem with fitting peak # 2 ")
else:
indx = list(range(len(Ypositions)))
#return (p0,p1,p2,p3), Ypositions, ip, noiselevel,dy
indx.pop(ip)
Ypositions = Ypositions[indx]
if ((len(Ypositions) > 2) & qok):
dy = min(abs(p5 - Ypositions))
if dy > Sig: flag[2] += 1
dy = abs(p5 - p1)
if dy < sig:
flag[2] += 10
ip = where(abs(p5-Ypositions) < 0.2*dy)[0]
indx = list(range(len(Ypositions)))
if len(ip) == 0:
print("problem with fitting peak # 2 ")
else:
indx.pop(ip)
Ypositions = Ypositions[indx]
if p4 < noiselevel:
flag[2] += 20
ip = where(abs(p5-Ypositions) < 0.9*dy)[0]
if chatter > 2: print('ip = ',ip)
indx = list(range(len(Ypositions)))
if len(ip) == 0:
print("problem with fitting peak # 2 ")
else:
indx.pop(ip[-1])
Ypositions = Ypositions[indx]
if flag[1] != 10:
dy = abs(p5 - p3)
if dy < sig:
flag[2] += 100
ip = where(abs(p5-Ypositions) < 0.9*dy)[0]
if len(ip) == 0:
print("problem with fitting peak # 2 ")
else:
indx = list(range(len(Ypositions)))
indx.pop(ip[-1])
Ypositions = Ypositions[indx]
if chatter > 2:
print("flag: ",flag)
print(" initial fit parameters: \n first peak:", p0, p1, sig1)
if nposi > 1: print(" second peak:", p2,p3, sig2)
if nposi > 2: print(" third peak:", p4,p5, sig3)
print(" intermediate Ypositions: ", Ypositions)
if not composite_fit: # bail out at this point
if len(Ypositions) == 1:
Y1 = ((p0,p1,sig), flag), 0
elif len(Ypositions) == 2:
Y1 = ((p0,p1,sig,p2,p3,sig2), flag), 0
elif len(Ypositions) == 3:
Y1 = ((p0,p1,sig,p2,p3,sig2,p4,p5,sig), flag), 0
else:
Y1 = Yout
return Y1
# free sig and refit
if ( len(Ypositions) == 1) :
# first guess single gaussian fit parameters in range given by width parameter
a0 = p0
y0 = p1
if chatter > 3:
print("f_meas :", transpose(f_meas))
print("a0: %8.2f \ny0: %8.2f \nsig0 : %8.2f "%(a0,y0,sig))
print(q)
params_fit, ier = leastsq(Fun1, (a0,y0,sig), args=(f_meas[q],y[q]) )
flag[5] = 1
flag[4] = ier
# remove odd solutions
return (params_fit, flag), (f_meas, y)
elif (qok & (len(Ypositions) == 2) ):
# double gaussian fit
a0 = p0
y0 = p1
a1 = p2
y1 = p3
Y0 = params_fit, ier = leastsq(Fun2, (a0,y0,sig,a1,y1,sig) , args=(f_meas[q],y[q]))
flag[5]=2
flag[4]=ier
# remove odd solutions - TBD
return (params_fit, flag), (f_meas, y, f_meas_reduced, q)
elif (qok & (len(Ypositions) == 3)):
# restricting the fitting to a smaller region around the peaks to
# fit will reduce the effect of broadening the fit due to noise.
q = where( (y > p1-3.*sig1) & (y < p3+3*sig3) )
# ====
# triple gaussian fit
a0 = p0
y0 = p1
a1 = p2
y1 = p3
a2 = p4
y2 = p5
Y0 = params_fit, ier = leastsq(Fun3, (a0,y0,sig1,a1,y1,sig2,a2,y2,sig3) , args=(f_meas[q],y[q]))
flag[5] = 3 # number of peaks
flag[4] = ier
# remove odd solutions
return (params_fit, flag), (f_meas, y, f_meas_reduced, q)
else:
# error in call
print("Error in get_components Ypositions not 1,2,or 3")
return Yout
def obsid2motion(obsid, file_path):
''' By Zexi
to obtain motion (pixels) from a precreated motion table
'''
import pandas as pd
data=pd.read_csv(file_path,sep=' ',header=0)
data['OBS_ID']=data['OBS_ID'].astype(str)
data['OBS_ID']='000'+data['OBS_ID']
d = data.set_index(['OBS_ID'])
motion_v = d.loc[obsid]['MOTION_V']
motion_p = d.loc[obsid]['MOTION_P']
dict = {'V':motion_v, 'P':motion_p}
return dict
def Fun1(p,y,x):
'''compute the residuals for gaussian fit in get_components '''
a0, x0, sig0 = p
return y - singlegaussian(x,a0,x0,sig0)
def Fun1a(p,y,x,x0):
'''compute the residuals for gaussian fit with fixed centre in get_components '''
a0, sig0 = p
return y - singlegaussian(x,a0,x0,sig0)
def Fun1b(p,y,x,sig0):
'''compute the residuals for gaussian fit with fixed width in get_components '''
a0, x0 = p
return y - singlegaussian(x,a0,x0,sig0)
def Fun1c(p,y,x,x0,sig0):
'''compute the residuals for gaussian fit with fixed centre and width in get_components '''
a0 = p
return y - singlegaussian(x,a0,x0,sig0)
def DFun1(p,y,x):
'''There is something wrong with the return argument. Should prob be a matrix of partial derivs '''
a0, x0, sig0 = p
return -Dsinglegaussian(x,a0,x0,sig0)
def Fun2(p,y,x):
'''compute the residuals for gaussian fit in get_components '''
a0, x0, sig0 ,a1,x1,sig1 = p
return y - doublegaussian(x,a0,x0,sig0,a1,x1,sig1)
def Fun2b(p,y,x,sig):
'''compute the residuals for gaussian fit in get_components for fixed sig '''
a0, x0, a1,x1 = p
return y - doublegaussian(x,a0,x0,sig,a1,x1,sig)
def Fun2bb(p,y,x,sig1,sig2):
'''compute the residuals for gaussian fit in get_components for fixed sig1, and sig2 '''
a0, x0, a1,x1 = p
return y - doublegaussian(x,a0,x0,sig1,a1,x1,sig2)
def Fun2bc(p,y,x,x0,x1):
'''compute the residuals for gaussian fit in get_components for fixed centre x0, x1 '''
a0, sig0, a1,sig1 = p
return y - doublegaussian(x,a0,x0,sig0,a1,x1,sig1)
def Fun2c(p,y,x,x0,sig0,x1,sig1):
'''compute the residuals for gaussian fit in get_components for fixed centre x_i and width sig_i '''
a0, a1 = p
return y - doublegaussian(x,a0,x0,sig0,a1,x1,sig1)
def DFun2(p,y,x):
a0, x0, sig0,a1,x1,sig1 = p
return -Ddoublegaussian(x,a0,x0,sig0,a1,x1,sig1)
def Fun3(p,y,x):
'''compute the residuals for gaussian fit in get_components '''
a0, x0, sig0 ,a1,x1,sig1 ,a2,x2,sig2= p
return y - trigaussian(x,a0,x0,sig0,a1,x1,sig1,a2,x2,sig2)
def Fun3b(p,y,x,sig):
'''compute the residuals for gaussian fit in get_components '''
a0,x0,a1,x1,a2,x2 = p
return y - trigaussian(x,a0,x0,sig,a1,x1,sig,a2,x2,sig)
def Fun3bb(p,y,x,sig1,sig2,sig3):
'''compute the residuals for gaussian fit in get_components '''
a0,x0,a1,x1,a2,x2 = p
return y - trigaussian(x,a0,x0,sig1,a1,x1,sig2,a2,x2,sig3)
def Fun3c(p,y,x,x0,sig0,x1,sig1,x2,sig2):
'''compute the residuals for gaussian fit in get_components for fixed centre x_i and width sig_i '''
a0, a1, a2 = p
return y - trigaussian(x,a0,x0,sig0,a1,x1,sig1,a2,x2,sig2)
def DFun3(p,y,x):
a0, x0, sig0,a1,x1,sig1,a2,x2,sig2 = p
return -Dtrigaussian(x,a0,x0,sig0,a1,x1,sig1,a2,x2,sig2)
def Fun4(p,y,x,motion0):
a0, x0, sig0 = p
return y - smeargaussian(x,a0,x0,sig0,motion0)
def singlegaussian(x, a0, x0, sig0 ):
'''
The function returns the gaussian function
on array x centred on x0 with width sig0
and amplitude a0
'''
x = np.atleast_1d(x)
f = 0. * x.copy()
q = np.where( np.abs(x-x0) < 4.*sig0 )
f[q] = a0 * np.exp( - ((x[q]-x0)/sig0)**2 )
return f
def Dsinglegaussian(x, a0, x0, sig0):
'''partial derivative of singlegaussian to all parameters'''
f = singlegaussian(x, a0, x0, sig0)
dfda0 = f/a0
dfdx0 = 2*x0*(x-x0)*f/sig0**2
dfdsig0 = 2*f*(x-x0)**2/sig0**3
return dfda0, dfdx0, dfdsig0
def doublegaussian(x, a0, x0, sig0, a1, x1, sig1 ):
'''
The function returns the double gaussian function
on array x centred on x0 and x1 with width sig0 and sig1
and amplitude a0, and a1
'''
x = np.atleast_1d(x)
f1 = 0. * x.copy()
f2 = 0. * x.copy()
q = np.where( np.abs(x-x0) < 4.*sig0 )
f1[q] = a0 * np.exp( - ((x[q]-x0)/sig0)**2 )
q = np.where( np.abs(x-x1) < 4.*sig1)
f2[q] = a1 * np.exp( - ((x[q]-x1)/sig1)**2 )
f = f1+f2
return f
def trigaussian(x, a0, x0, sig0, a1, x1, sig1, a2, x2, sig2 ):
'''
The function returns the triple gaussian function
on array x centred on x0, x1, x2 with width sig0, sig1, sig2
and amplitude a0,a1, a2. :
'''
x = np.atleast_1d(x)
f0 = 0. * x.copy()
f1 = 0. * x.copy()
f2 = 0. * x.copy()
q = np.where(np.abs( x-x0 ) < 4.*sig0)
f0[q] = a0 * np.exp( - ((x[q]-x0)/sig0)**2 )
q = np.where(np.abs( x-x1 ) < 4.*sig1)
f1[q] = a1 * np.exp( - ((x[q]-x1)/sig1)**2 )
q= np.where( np.abs(x-x2) < 4.*sig2)
f2[q] = a2 * np.exp( - ((x[q]-x2)/sig2)**2 )
f = f0 + f1 + f2
return f
def Ddoublegaussian(x, a0, x0, sig0, a1, x1, sig1):
'''partial derivative of doublegaussian to all parameters'''
f = singlegaussian(x, a0, x0, sig0)
dfda0 = f/a0
dfdx0 = 2*x0*(x-x0)*f/sig0**2
dfdsig0 = 2*f*(x-x0)**2/sig0**3
f = singlegaussian(x, a1, x1, sig1)
dfda1 = f/a1
dfdx1 = 2*x1*(x-x1)*f/sig1**2
dfdsig1 = 2*f*(x-x1)**2/sig1**3
return dfda0, dfdx0, dfdsig0, dfda1, dfdx1, dfdsig1
def gaussPlusPoly(x, a0, x0, sig0, b, n=2):
'''compute function gaussian*polynomial(n) '''
f = singlegaussian(x, a0, x0, sig0 ) * (b[2]+(b[1]+b[0]*x)*x)
return f
def DgaussPlusPoly(x, a0, x0, sig0, b, n=2):
'''compute Jacobian for gaussPlusPoly '''
dfda0, dfdx0, dfdsig0 = (Dsinglegaussian(x, a0, x0, sig0) ) * (b[2]+(b[1]+b[0]*x)*x)
dfdb2 = 0
dfdb1 = (singlegaussian(x, a0, x0, sig0) ) * b[1]
dfdb0 = (singlegaussian(x, a0, x0, sig0) ) * 2*b[2]*x
return (dfda0, dfdx0, dfdsig0, dfdb2, dfdb1,dfdb0)
def smeargaussian(x, A, mu, sigma, motion, normalize=True):
t1, t2 = -motion/2, motion/2
m1, m2 = (t1-(x-mu))/(np.sqrt(2)*sigma), (t2-(x-mu))/(np.sqrt(2)*sigma)
n1, n2 = m1*m1, m2*m2
fifth = -(np.exp(-n2)-np.exp(-n1))
sixth = np.sqrt(np.pi/2)*(x-mu)/sigma*(erf(m2)-erf(m1))
forth = fifth + sixth
third = np.exp(np.power((x-mu)/sigma,2)/2)*2*np.power(sigma,2)*forth
secnd = -1/(2*np.power(sigma,2))*third
def first_f(t):
return np.exp(-np.power(t/sigma,2)/2+t*(x-mu)/np.power(sigma,2))
first = first_f(t2)-first_f(t1)
zeroth = np.power(sigma,2)/(x-mu)*(first - secnd)
if normalize == True:
norm = 1./(sigma*np.sqrt(2*np.pi))
else:
norm = 1.
#q = norm/motion*np.exp(-np.power((x-mu)/sigma,2)/2)*zeroth
q = np.exp(-np.power((x-mu)/sigma,2)/2)*zeroth
a1, a2 = t1/(np.sqrt(2)*sigma), t2/(np.sqrt(2)*sigma)
q_max = np.sqrt(np.pi/2)*sigma*(erf(a2)-erf(a1))
q = A*q/q_max
return q
def pixdisFromWave(C_1,wave):
''' find the pixel distance from the given wavelengths for first order uv grism'''
from numpy import polyval, polyfit, linspace, where
if C_1[-2] < 4.5: d = linspace(-370,1300, num=100)
else: d = linspace(-360,550,num=100)
w = polyval(C_1,d)
w1 = min(wave) - 100
w2 = max(wave) + 100
q = where( (w > w1) & (w < w2) )
Cinv = polyfit(w[q],d[q],4)
return polyval(Cinv,wave)
def quality_flags():
'''Definition of quality flags for UVOT grism '''
flags = dict(
good=0, # data good, but may need COI correction
bad=1, # data dropout or bad pixel or user marked bad
zeroth=2, # strong zeroth order too close to/overlaps spectrum
weakzeroth=4, # weak zeroth order too close to/overlaps spectrum
first=8, # other first order overlaps and brighter than BG + 5 sigma of noise
overlap=16, # orders overlap to close to separate (first, second) or (first second and third)
too_bright=32, # the counts per frame are too large
unknown=-1
)
return flags
def plotSecondOrder(dis,C_2,anker,anker2, spnet, scale=False):
'''
The aim of this procedure is to plot
the spectrum with the second order wavelength scale.
Second order brightness scaling (scale = True)
'''
from pylab import plot, polyval
# catch when anker2 = NaN
# tbd.
D = np.sqrt((anker[0]-anker2[0])**2+(anker[1]-anker2[1])**2)
dis2 = dis-D
p = np.where( np.abs(dis2) == np.abs(dis2).min() )
p1 = p[0] - 700
p2 = len(dis2)
aa = list(range(p1,p2))
plot( polyval(C_2,dis2[aa]),spnet[aa])
def secondOrderPSF_FWHM(wavelength, C_2inv, units = 'angstroem'):
''' returns the second order PSF FWHM
in A (or pixels when units = 'pixels')
C_2inv = inverse function of dispersion coefficients for the second order
Although the PSF is horse-shoe shaped, the PSF fit is by a gaussian.
'''
w = [1900.,2000,2100,2200,2300,2530,2900,4000]
FWHM = [5.9,6.5,7.7,8.7,10,14,22,63]
a = np.polyfit(w,FWHM,2)
pix2lam = 1.76 # this could be improved using the actual dispersion relation
# dis = np.polyval(C_2inv,wavelength)
# pix2lam = np.polyval(C_2,dis+1) - np.polyval(C_2,dis)
if units == 'pixels':
return np.polyval(a,wavelength)
elif units == 'angstroem':
return np.polyval(a,wavelength) * pix2lam
def response21_grcal(wave):
'''
to get 2nd order counts per bin multiply first order peak counts/bin with
the result of this function
broad band measurements with band width > resolution
let band width D_lam = (lambda_max-lambda_min)
first order pixel ~ 3.1 A/pix
second order pixel ~ 1.7 A/pix
so first order CR/pix ~ CR1_band / 3.1
and second order CR/pix ~ CR2_band /1 .7
EWratio = CR2_band/CR1_band
so # pix/band = d_lam / 3.1 for first order and d_lam/1.7 for second order
so in second order pix the CR(2)/pix = CR(1)* (d_lam/3.1) / (d_lam/1.7) * EWratio
= CR(1) * (1.7/3.2) * EW ratio
'''
from numpy import array, exp, polyfit, log, polyval
wmean = array([1925.,2225,2650])
EWratio = array([0.80,0.42,0.22]) # ratio of broad band response ground cal nominal
EWratio_err= array([0.01,0.01,0.005]) # error
C1_over_C2 = 3.2/1.7 # ratio of pixel scales (1)/(2)
a = polyfit(wmean,log(EWratio),2) # logarithmic fit
EW2 = exp( polyval(a, wave) ) # return ratio
return EW2/C1_over_C2
def response21_firstcal(wave,wheelpos=160):
'''Second order flux calibration relative to first order based on
effective areas from 2011-12-18 at offset position uv clocked grism
Near the centre (default position) of the detector, the second order
flux is overestimated. A better value there is perhaps half the predicted
value, though the exact number is impossible to determine at present.
'''
import numpy as np
from scipy import interpolate
print("2nd order response based on offset position uv clocked at (1600,1600)_DET \n")
#if wheelpos != 160:
# do whatever
#
# return R21
coef = np.array([ 3.70653066e-06, -9.56213490e-03, 5.77251517e+00])
# ratio (sp_2/\AA)/ (sp_1/\AA)
R21 = 1./np.polyval(coef,wave)
if (np.min(wave) < 1838.):
q = (wave < 1839.)
wav = np.array([1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, 1698, 1699, 1700,
1701, 1702, 1703, 1704, 1705, 1706, 1707, 1708, 1709, 1710, 1711,
1712, 1713, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1722,
1723, 1724, 1725, 1726, 1727, 1728, 1729, 1730, 1731, 1732, 1733,
1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, 1742, 1743, 1744,
1745, 1746, 1747, 1748, 1749, 1750, 1751, 1752, 1753, 1754, 1755,
1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, 1765, 1766,
1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777,
1778, 1779, 1780, 1781, 1782, 1783, 1784, 1785, 1786, 1787, 1788,
1789, 1790, 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, 1799,
1800, 1801, 1802, 1803, 1804, 1805, 1806, 1807, 1808, 1809, 1810,
1811, 1812, 1813, 1814, 1815, 1816, 1817, 1818, 1819, 1820, 1821,
1822, 1823, 1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832,
1833, 1834, 1835, 1836, 1837, 1838, 1839])
ratio = np.array([ 0.258639 , 0.26471343, 0.27042023, 0.27579628, 0.28086127,
0.28533528, 0.28957406, 0.29359907, 0.29742921, 0.3010812 ,
0.30456987, 0.30790845, 0.31110877, 0.3141814 , 0.31713589,
0.31998082, 0.32010247, 0.32081151, 0.32181713, 0.32280622,
0.32377967, 0.32473829, 0.32568282, 0.32661395, 0.32753234,
0.32843857, 0.32933322, 0.33021679, 0.33108977, 0.33195263,
0.33243225, 0.33252353, 0.33262903, 0.33274794, 0.3328795 ,
0.33302301, 0.33317782, 0.33334329, 0.33351887, 0.33370401,
0.3338982 , 0.33410098, 0.3343119 , 0.33458345, 0.33498466,
0.33538817, 0.33579382, 0.33620149, 0.33661104, 0.33702235,
0.3374353 , 0.33891465, 0.34053073, 0.3421217 , 0.34368845,
0.34663769, 0.35000718, 0.35334531, 0.35665266, 0.3599298 ,
0.3631773 , 0.36639568, 0.36958547, 0.37274719, 0.37588132,
0.37898836, 0.38206878, 0.38512304, 0.38815158, 0.39115485,
0.39413328, 0.39708727, 0.40001724, 0.40292359, 0.40616969,
0.40948579, 0.4123554 , 0.41437097, 0.41637511, 0.41836796,
0.42034965, 0.42232032, 0.42428008, 0.42622906, 0.42816739,
0.43009518, 0.43201256, 0.43391964, 0.43581654, 0.43793192,
0.44004629, 0.44215087, 0.44424574, 0.44633099, 0.44840671,
0.45047299, 0.4525299 , 0.45457754, 0.45661598, 0.45864531,
0.4607006 , 0.46279476, 0.46626514, 0.47005637, 0.47383064,
0.47758809, 0.48132887, 0.48505311, 0.48876095, 0.49245253,
0.49612799, 0.49978745, 0.50343106, 0.50705893, 0.5106712 ,
0.514268 , 0.51784944, 0.52141565, 0.52496675, 0.52850286,
0.53264671, 0.53713253, 0.5416131 , 0.54608843, 0.55055849,
0.55502327, 0.55948277, 0.56393697, 0.56838586, 0.57282942,
0.57737607, 0.58315569, 0.58892863, 0.59469489, 0.60045444,
0.60620727, 0.61195337, 0.61769272, 0.6234253 , 0.6291511 ,
0.63488101, 0.64091211, 0.64694134, 0.65296866, 0.65899403,
0.66501741, 0.67103875, 0.67705802, 0.68307519, 0.6890902 ])
func = interpolate.interp1d(wav, ratio, kind='linear', bounds_error=False )
R21[q] = 1./func(wave[q])
return R21
def response21(wave, version='firstcal',wheelpos=160 ):
'''
second over first order response per unit of angstrom
input:
dis1 range of first order bins (pix)
dis2 range of second order bins (pix)
'''
if version == 'groundcal':
return response21_grcal(wave)
elif version == 'firstcal':
return response21_firstcal(wave)
else:
print('\Fatal Error in call response21 function\n')
raise IOError
return
def polyinverse( coef, dis):
''' determine the inverse of the polynomial coefficients
of the same order as in input
so w = polyval(coef, d)
and d = polyval(coefinv, w)
Warning
-------
Accuracy is not always good.
'''
import numpy as np
wav = np.polyval(coef, dis)
norder = np.array([len(coef)-1,len(dis)-1])
norder = np.array([norder.max(),9]).min()
coef_inv = np.polyfit(wav, dis, norder)
return coef_inv
def pix_from_wave( disp, wave,spectralorder=1 ):
'''Get the pixel coordinate from wavelengths and dispersion.
Parameters
----------
disp : list
the dispersion polynomial coefficients
wave : array-like
wavelength
kwargs : disp
- **spectralorder** : int
the spectral order number
returns
-------
pix : array-like
pixel distance as
Note
----
polyinverse() was used which is inaccurate
example
-------
d = pix_from_wave([3.2,2600.], lambda )
'''
from scipy import interpolate
import numpy as np
from stsci.convolve import boxcar
wave = np.asarray( wave )
wave = np.atleast_1d(wave)
wone = np.ones(len(wave))
grism = None
if (disp[-1] > 2350.0) & (disp[-1] < 2750.) : grism = 'UV'
if (disp[-1] > 4000.0) & (disp[-1] < 4500.) : grism = 'VIS'
if grism == None:
raise RuntimeError("The dispersion coefficients do not seem correct. Aborting.")
if spectralorder == 1:
# initial guess
dinv = polyinverse( disp, np.arange(-370,1150) )
d = np.polyval(dinv, wave )
if len(wave) < 20:
dp = np.polyval(dinv, wave+10 ) # CRAP polyval!
y = (dp-d)/10.0
y[y <= 0] = y[y > 0].mean()
dpdw = y
else:
fd = interpolate.interp1d(wave,d,bounds_error=False,fill_value=0.3,kind='quadratic')
dp = fd(wave+20)
y = (dp-d)/20.0
y[y <= 0] = y[y > 0].mean()
dpdw = boxcar(y,(100,),mode='reflect')
count = 100
while (np.abs(np.polyval(disp,d) - wave) > 0.5 * wone).all() | count > 0:
dw = np.polyval(disp,d) - wave
d -= dpdw*dw*0.5
count -= 1
return d
if spectralorder == 2:
# initial guess
dinv = polyinverse( disp, np.arange(-640,1300) )
d = np.polyval(dinv, wave )
dp = np.polyval(dinv, wave+1.0 )
dpdw = dp-d
count = 100
while (np.abs(np.polyval(disp,d) - wave) > 0.5 * wone).all() | count > 0:
dw = np.polyval(disp,d) - wave
d -= dpdw*dw*0.5
count -= 1
return d
pix = np.polyval( disp, wave )
return
def predict_second_order(dis,spnet,C_1,C_2,d12,qual,dismin,dismax,wheelpos):
'''Predict the second order flux in the given wavelength range
Parameters
----------
spnet[dis] : array-like
extracted spectrum of first order (with possibly higher order contributions)
Assume anchor for dis=0, dis in pix units
C_1, C_2 : list, ndarray
dispersion coefficients for the first and second order
d12 : float
distance in pix between anchor and second order reference point
qual[dis] : array-like
quality extracted spectrum
dismin,dismax : float
define the pixel range for the wavelength range of the first order
wheelpos : int {160,200,955,1000}
position filter wheel
calling function
response21 is giving second over first order response for bins determined by dis
polyinverse determines the inverse of the polynomial coefficients
returns
-------
sp2[dis] : array-like
second order flux
wave2[dis] : array-like
second order wavelength
Notes
-----
used by response21() is giving second over first order response for bins determined by dis
polyinverse determines the inverse of the polynomial coefficients
'''
import numpy as np
from numpy import where, searchsorted, int
dis = np.asarray(1.0*dis) # ensure floating point array
spnet = np.asarray(spnet)
qual = np.asarray(qual)
wave = np.polyval(C_1,dis)
wmin = np.polyval(C_1,dismin)
wmax = np.polyval(C_1,dismax)
dis2 = dis[where(dis > 1)] - d12
wav2 = np.polyval(C_2,dis2)
n2b = wav2.searchsorted(wmin)
dis2 = dis2[n2b:]
wav2 = wav2[n2b:]
# determine the inverse of the dispersion on the domain with wmin< wav2 < wmax
#C_1inv = polyinverse(C_1,dis )
#C_2inv = polyinverse(C_2,dis2)
# second order limits
wmin2, wmax2 = np.max(np.array([wav2[0],wmin])),wav2[-1]
#compute second order prediction within the limits
# first order points to use to predict second order (range dis and indices)
#dlo, dhi = np.polyval(C_1inv,wmin2), np.polyval(C_1inv,wmax2)
dlo, dhi = pix_from_wave(C_1,wmin2), pix_from_wave(C_1,wmax2)
idlo, idhi = int(dis.searchsorted(dlo)), int(dis.searchsorted(dhi))
wav1cut = wave[idlo:idhi]
dis1cut = dis [idlo:idhi]
qua1cut = qual[idlo:idhi]
# second order dis2 corresponding to wavelength range wav1cut
#dis2cut = polyval(C_2inv,wav1cut)
dis2cut = pix_from_wave(C_2, wav1cut)
# find scale factor (1 pix = x \AA )
pixscale1 = polyval(C_1, dis1cut+1) - polyval(C_1, dis1cut)
pixscale2 = polyval(C_2, dis1cut+1) - | polyval(C_2, dis1cut) | numpy.polyval |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from concorde.tsp import TSPSolver
import functions as fc
import plots_functions as pf
import file_reader_writer as frw
import time
import sys
import os
from shapely.geometry import LineString
from shapely.geometry import Point
from termcolor import colored
from collections import defaultdict
from concorde.tests.data_utils import get_dataset_path
| np.set_printoptions(threshold=sys.maxsize) | numpy.set_printoptions |
"""Core functions of the PPO algorithm."""
import gym
import numpy as np
import scipy.signal
import tensorflow as tf
EPS = 1e-8
LOG_STD_MAX = 2
LOG_STD_MIN = -20
def distribute_value(value, num_proc):
"""Adjusts training parameters for distributed training.
In case of distributed training frequencies expressed in global steps have
to be adjusted to local steps, thus divided by the number of processes.
"""
return max(value // num_proc, 1)
def combined_shape(length, shape=None):
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
def discount_cumsum(x, discount):
"""Magic from rllab for computing discounted cumulative sums of vectors."""
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[
::-1]
@tf.function
def gaussian_likelihood(value, mu, log_std):
"""Calculates value's likelihood under Gaussian pdf."""
pre_sum = -0.5 * (
((value - mu) / (tf.exp(log_std) + EPS)) ** 2 +
2 * log_std + | np.log(2 * np.pi) | numpy.log |
import numpy as np
from tqdm import tqdm
from collections import defaultdict
from sklearn import metrics
from sklearn.preprocessing import normalize
def finch(feats, finch_step, finch_dis, metric="cosine", do_normalize=True):
if do_normalize:
feats = normalize(feats, norm='l2').astype('float32')
num_track = feats.shape[0]
clusters = np.arange(num_track)
for step in range(finch_step):
print('Step {}'.format(step))
pre_ids = list(set(clusters))
pre_ids.sort()
if step >= 3:
print(pre_ids[-10:])
if len(pre_ids) <= 3:
break
pre_map = defaultdict(list)
for i, x in tqdm(enumerate(clusters)):
pre_map[x].append(i)
# if step>=3:
# print("pre_map before convert: ",pre_map[-10:])
pre_map = {k: np.array(v) for k, v in pre_map.items()}
print('Calculate center features')
if step == 0:
feats_now = feats.copy()
else:
feats_now = np.array([np.sum(feats[pre_map[i]], axis=0) / pre_map[i].size
for i in tqdm(pre_ids)])
print('Search top1')
print("feature_shape_now: ", feats_now.shape)
num_track_now = feats_now.shape[0]
feats_now = normalize(feats_now, norm='l2').astype('float32')
orig_dist = metrics.pairwise.pairwise_distances(feats_now, feats_now, metric=metric)
np.fill_diagonal(orig_dist, float('inf'))
topk_idx = | np.argmin(orig_dist, axis=1) | numpy.argmin |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for regularizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class RegularizerTest(tf.test.TestCase):
def test_l1(self):
with self.assertRaises(ValueError):
tf.contrib.layers.l1_regularizer(-1.)
with self.assertRaises(ValueError):
tf.contrib.layers.l1_regularizer(0)
self.assertIsNone(tf.contrib.layers.l1_regularizer(0.)(None))
values = np.array([1., -1., 4., 2.])
weights = tf.constant(values)
with tf.Session() as sess:
result = sess.run(tf.contrib.layers.l1_regularizer(.5)(weights))
self.assertAllClose( | np.abs(values) | numpy.abs |
import os
import pickle
import random
import cv2 as cv
import numpy as np
from tqdm import tqdm
from math import ceil
from numpy.linalg import inv
from natsort import natsorted
from PIL import Image, ImageOps
from config import train_file, valid_file, test_file, image_folder, im_size
debug_identity = False
generate_series = True
### This function is provided by <NAME>'s repository "deep_homography_estimation"
# https://github.com/mez/deep_homography_estimation
# Dataset_Generation_Visualization.ipynb
def process(files, is_test):
# Data gen parameters
size = (im_size, im_size)
if is_test:
#size = (640, 480)
#patch_size = 256
patch_size = im_size
rho = patch_size//4
else:
#size = (320, 240)
#patch_size = 128
size = (im_size//2, im_size//2)
patch_size = im_size
rho = patch_size//4
samples = []
index = 0
for (f1, m, f2) in tqdm(files):
fullpath1 = os.path.join(image_folder, f1)
fullpath2 = os.path.join(image_folder, f2)
#img1 = cv.imread(fullpath1, 0)
#img2 = cv.imread(fullpath2, 0)
#img1 = cv.resize(img1, size)
#img2 = cv.resize(img2, size)
img1 = Image.open(fullpath1)
img2 = Image.open(fullpath2)
img1 = ImageOps.grayscale(img1)
img2 = ImageOps.grayscale(img2)
color = 'black'
img1 = ImageOps.pad(img1, size, color=color)
img2 = ImageOps.pad(img2, size, color=color)
#img1.show()
#img2.show()
img1 = np.asarray(img1)
img2 = np.asarray(img2)
top_point = (rho, rho)
left_point = (rho, patch_size + rho)
bottom_point = (patch_size + rho, patch_size + rho)
right_point = (patch_size + rho, rho)
output = [top_point, left_point, bottom_point, right_point]
print(output)
if generate_series:
num_transforms = 20
for index in range(num_transforms + 1):
offset = size[0] * 2 * index/num_transforms
shift_top = offset
shift_bottom = offset/8
drop_top = offset
drop_bottom = offset/16
four_points = [(0,0),(0,size[0]),(size[0],size[0]),(size[0],0)]
perturbed_four_points = [(-shift_top,-drop_top),(shift_bottom,size[0]-drop_bottom),(size[0]-shift_bottom,size[0]-drop_bottom),(size[0]+shift_top,-drop_top)]
H = cv.getPerspectiveTransform(np.float32(four_points), np.float32(perturbed_four_points))
warped_image = cv.warpPerspective(img1, inv(H), size)
#cv.imshow('img1', img1)
#cv.imshow('warped_image', warped_image)
#cv.waitKey()
training_image = | np.dstack((img1, warped_image)) | numpy.dstack |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.