prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import math
import numpy as np
import matlab.engine
from pyomo.environ import *
from pyomo.dae import *
from pyomo.gdp import *
from pyomo.gdp.plugins.chull import ConvexHull_Transformation
from pyomo.gdp.plugins.bigm import BigM_Transformation
from pyomo.core import Var
from pyomo.dae.plugins.finitedifference import Finite_Difference_Transformation
import hopperUtil
class Hopper:
def __init__(self, N, eng, matlabHopper, name=''):
self.model_disc = []
self.positionMax = 10
self.rotationMax = 2*np.pi
self.velocityMax = 10
self.angularVelocityMax = 10
self.forceMax = 10
self.N = N
self.r = []
self.v = []
self.F = []
self.th = []
self.w = []
self.T = []
self.p = []
self.pd = []
self.R = []
self.dtBounds = (0.05, 0.2)
self.dtNom = 0.1
self.c = []
self.p_MDT = -2
self.P_MDT = -1
self.regions = []
self.base = 10
self.tf = 1
self.nOrientationSectors = 1
self.bodyRadius = 0.25
self.mdt_precision = 1
self.eng = eng
self.matlabHopper = matlabHopper
self.momentOfInertia = self.eng.getDimensionlessMomentOfInertia(self.matlabHopper)
self.hipOffset = self.eng.getHipInBody(self.matlabHopper)
self.footnames = self.hipOffset.keys()
def addPlatform(self, platform_start, platform_end, platform_height, mu, platform_left, platform_right):
self.addRegion(A=np.matrix('-1., 0.,; 1., 0.'),
b=np.matrix('%f; %f' % (-(platform_start+0.1), platform_end-0.1)),
Aeq=np.array([0., 1.]), beq=platform_height, normal=np.matrix('0.; 1.'),
mu=mu)
self.eng.addPlatform(self.matlabHopper, platform_start, platform_end, platform_height, platform_left, platform_right, nargout=0)
def addFreeBlock(self, left=None, right=None, top=None, bottom=None):
Arows = []
brows = []
if left is not None:
Arows.append(np.matrix('-1., 0.'))
brows.append(np.matrix(-left))
if right is not None:
Arows.append(np.matrix('1., 0.'))
brows.append(np.matrix(right))
if top is not None:
Arows.append(np.matrix('0., 1.'))
brows.append(np.matrix(top))
if bottom is not None:
Arows.append(np.matrix('0., -1.'))
brows.append(np.matrix(-bottom))
self.addRegion(A=np.vstack(Arows), b=np.vstack(brows))
def addRegion(self, **kwargs):
self.regions.append(dict.fromkeys(['A', 'b', 'Aeq', 'beq', 'normal', 'mu']))
self.regions[-1]['normal'] = np.matrix('0.; 0.')
self.regions[-1]['mu'] = 0.
for key, value in kwargs.iteritems():
for key2 in self.regions[-1].keys():
if key == key2:
self.regions[-1][key] = value
forMatlab = dict(self.regions[-1])
for key, value in forMatlab.iteritems():
if isinstance(value, type(np.array(0))):
forMatlab[key] = matlab.double(value.tolist())
if value is None:
forMatlab[key] = matlab.double([])
self.eng.addRegion(self.matlabHopper, forMatlab, nargout=0)
def constructVisualizer(self):
self.eng.constructVisualizer(self.matlabHopper, nargout=0)
def playback(self, speed=1.):
self.eng.playback(self.matlabHopper, speed, nargout=0)
def extractTime(self, m):
return np.cumsum([0.]+[m.dt[ti].value for ti in m.t][:-1])
def extractPostition(self, m):
return np.vstack([np.array([m.r[xz, ti].value for ti in m.t]) for xz in m.R2_INDEX])
def extractVelocity(self, m):
return np.vstack([np.array([m.v[xz, ti].value for ti in m.t]) for xz in m.R2_INDEX])
def extractTotalForce(self, m):
return np.vstack([np.array([m.F[xz, ti].value for ti in m.t]) for xz in m.R2_INDEX])
def extractOrientation(self, m):
return np.atleast_2d( | np.array([m.th[ti].value for ti in m.t]) | numpy.array |
import emcee
import numpy as np
from robo.acquisition_functions.information_gain import InformationGain
class InformationGainPerUnitCost(InformationGain):
def __init__(self, model, cost_model,
lower, upper,
is_env_variable,
sampling_acquisition=None,
n_representer=50):
"""
Information gain per unit cost as described in Swersky et al. [1] which
computes the information gain of a configuration divided by it's cost.
This implementation slightly differs from the implementation of
Swersky et al. as it additionally adds the optimization overhead to
the cost. You can simply set the optimization overhead to 0 to obtain
the original formulation.
[1] <NAME>., <NAME>., and <NAME>.
Multi-task Bayesian optimization.
In Proc. of NIPS 13, 2013.
Parameters
----------
model : Model object
Models the objective function. The model has to be a
Gaussian process.
cost_model : model
Models the cost function. The model has to be a Gaussian Process.
lower : (D) numpy array
Specified the lower bound of the input space. Each entry
corresponds to one dimension.
upper : (D) numpy array
Specified the upper bound of the input space. Each entry
corresponds to one dimension.
is_env_variable : (D) numpy array
Specifies which input dimension is an environmental variable. If
the i-th input is an environmental variable than the i-th entry has
to be 1 and 0 otherwise.
n_representer : int, optional
The number of representer points to discretize the input space and
to compute pmin.
"""
self.cost_model = cost_model
self.n_dims = lower.shape[0]
self.is_env = is_env_variable
super(InformationGainPerUnitCost, self).__init__(model,
lower,
upper,
sampling_acquisition=sampling_acquisition,
Nb=n_representer)
def update(self, model, cost_model, overhead=None):
self.cost_model = cost_model
if overhead is None:
self.overhead = 0
else:
self.overhead = overhead
super(InformationGainPerUnitCost, self).update(model)
def compute(self, X, derivative=False):
"""
Computes the acquisition_functions value for a single point.
Parameters
----------
X : (1, D) numpy array
The input point for which the acquisition_functions functions is computed.
derivative : bool, optional
If it is equal to True also the derivatives with respect to X is
computed.
Returns
-------
acquisition_value: numpy array
The acquisition_functions value computed for X.
grad : numpy array
The computed gradient of the acquisition_functions function at X. Only
returned if derivative==True
"""
if len(X.shape) == 1:
X = X[np.newaxis, :]
# Predict the log costs for this configuration
log_cost = self.cost_model.predict(X)[0]
if derivative:
raise "Not implemented"
else:
dh = super(InformationGainPerUnitCost, self).compute(X,
derivative=derivative)
# We model the log cost, but we compute
# the information gain per unit cost
# Add the cost it took to pick the last configuration
cost = np.exp(log_cost)
acquisition_value = dh / (cost + self.overhead)
return acquisition_value
def sampling_acquisition_wrapper(self, x):
# Check if sample point is inside the configuration space
lower = self.lower[ | np.where(self.is_env == 0) | numpy.where |
import numpy as np
import time
from numpy.linalg import inv
from scipy.optimize import newton
from scipy.linalg.blas import dgemm,sgemm,sgemv
def derivative_minim_sub(y_sub, X_sub, X_subT, G_selected, A_selc, subsample_size):
def smaller_predproc_exponential(param):
h = param
C_inv = inv(h*G_selected+(1-h)* | np.identity(subsample_size) | numpy.identity |
"""
Image alignment.
:func:`~apply_transform_wcs()`: align an image based on WCS.
:func:`~apply_transform_stars()`: align an image based on pixel coordinates of
1, 2, or more stars.
"""
from typing import List as TList, Tuple, Union
from numpy import (
array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt,
transpose, zeros)
from numpy.linalg import lstsq
import scipy.ndimage
from astropy.wcs import WCS
__all__ = ['apply_transform_stars', 'apply_transform_wcs']
def apply_transform_stars(img: Union[ndarray, ma.MaskedArray],
src_stars: Union[TList[Tuple[float, float]],
ndarray],
dst_stars: Union[TList[Tuple[float, float]],
ndarray],
ref_width: int, ref_height: int,
prefilter: bool = True) -> ma.MaskedArray:
"""
Align an image based on pixel coordinates of one or more stars
:param img: input image as 2D NumPy array
:param src_stars: list of (X, Y) coordinates of one or more alignment stars
in the image being aligned
:param dst_stars: list of (X, Y) coordinates of the same stars as in
`src_stars` in the reference image
:param ref_width: reference image width in pixels
:param ref_height: reference image height in pixels
:param prefilter: apply spline filter before interpolation
:return: transformed image
"""
nref = min(len(src_stars), len(dst_stars))
src_x, src_y = transpose(src_stars[:nref])
dst_x, dst_y = transpose(dst_stars[:nref])
# Pad the image if smaller than the reference image
h, w = img.shape
avg = img.mean()
if w < ref_width or h < ref_height:
new_img = full([max(h, ref_height), max(w, ref_width)], avg, img.dtype)
if isinstance(img, ma.MaskedArray) and img.mask.any():
new_img[:h, :w] = img.data
mask = ones([ref_height, ref_width], bool)
mask[:h, :w] = img.mask
img = ma.MaskedArray(new_img, mask)
else:
new_img[:h, :w] = img
img = new_img
if isinstance(img, ma.MaskedArray) and img.mask.any():
# scipy.ndimage does not handle masked arrays; fill masked values with
# global mean and mask them afterwards after transformation
mask = img.mask.astype(float32)
img = img.filled(avg)
else:
mask = zeros(img.shape, float32)
if nref == 1:
# Pure shift
offset = [dst_y[0] - src_y[0], dst_x[0] - src_x[0]]
img = scipy.ndimage.shift(
img, offset, mode='nearest', prefilter=prefilter)
mask = scipy.ndimage.shift(mask, offset, cval=True, prefilter=prefilter)
else:
if nref == 2:
# Partial affine transform (shift + rotation + uniform scale)
# [ src_y ] [ A B ] [ dst_y ] [ dy ]
# [ src_x ] = [ -B A ] [ dst_x ] + [ dx ]
src_dy, src_dx = src_y[0] - src_y[1], src_x[0] - src_x[1]
dst_dy, dst_dx = dst_y[0] - dst_y[1], dst_x[0] - dst_x[1]
d = dst_dx**2 + dst_dy**2
if not d:
raise ValueError(
'Both alignment stars have the same coordinates')
a = (src_dy*dst_dy + src_dx*dst_dx)/d
b = (src_dy*dst_dx - src_dx*dst_dy)/d
mat = array([[a, b], [-b, a]])
offset = [src_y[0] - dst_y[0]*a - dst_x[0]*b,
src_x[0] - dst_x[0]*a + dst_y[0]*b]
else:
# Full affine transform
# [ src_y ] [ A B ] [ dst_y ] [ dy ]
# [ src_x ] = [ C D ] [ dst_x ] + [ dx ]
a = transpose([dst_y, dst_x, ones(nref)])
py = lstsq(a, src_y, rcond=None)[0]
px = lstsq(a, src_x, rcond=None)[0]
mat = array([py[:2], px[:2]])
offset = [py[2], px[2]]
img = scipy.ndimage.affine_transform(
img, mat, offset, mode='nearest', prefilter=prefilter)
mask = scipy.ndimage.affine_transform(
mask, mat, offset, cval=True, prefilter=prefilter) > 0.06
# Match the reference image size
if w > ref_width or h > ref_height:
img = img[:ref_height, :ref_width]
mask = mask[:ref_height, :ref_width]
return ma.masked_array(img, mask, fill_value=avg)
wcs_grid = {
1: (array([1/2]),
array([1/2])),
2: (array([1/3, 2/3]),
array([1/2, 1/2])),
3: (array([1/4, 1/2, 3/4]),
array([1/3, 2/3, 1/3])),
4: (array([1/3, 2/3, 1/3, 2/3]),
array([1/3, 1/3, 2/3, 2/3])),
5: (array([1/3, 2/3, 1/3, 2/3, 1/2]),
array([1/3, 1/3, 2/3, 2/3, 1/2])),
6: (array([1/4, 1/2, 3/4, 1/4, 1/2, 3/4]),
array([1/3, 1/3, 1/3, 2/3, 2/3, 2/3])),
7: (array([1/4, 1/2, 3/4, 1/4, 1/2, 3/4, 1/2]),
array([1/3, 1/3, 1/3, 2/3, 2/3, 2/3, 1/2])),
8: (array([1/4, 1/2, 3/4, 1/4, 1/2, 3/4, 1/3, 2/3]),
array([1/3, 1/3, 1/3, 2/3, 2/3, 2/3, 1/2, 1/2])),
9: (array([1/4, 1/2, 3/4, 1/4, 1/2, 3/4, 1/4, 1/2, 3/4]),
array([1/4, 1/4, 1/4, 1/2, 1/2, 1/2, 3/4, 3/4, 3/4])),
}
def apply_transform_wcs(img: Union[ndarray, ma.MaskedArray],
src_wcs: WCS, dst_wcs: WCS,
ref_width: int, ref_height: int,
grid_points: int = 0,
prefilter: bool = False) -> ma.MaskedArray:
"""
Align an image based on WCS
:param img: input image as 2D NumPy array
:param src_wcs: WCS of image being aligned
:param dst_wcs: reference image WCS
:param ref_width: reference image width in pixels
:param ref_height: reference image height in pixels
:param grid_points: number of grid points for WCS interpolation::
0: transform using WCS calculated for each pixel
1: offset-only alignment using central pixel
2: shift + rotation + uniform scale (2-star) alignment using two points
>= 3: full affine transform using the given number of fake "alignment
stars" generated from the WCS
:param prefilter: apply spline filter before interpolation
:return: transformed image
"""
# Pad the image if smaller than the reference image
h, w = img.shape
avg = img.mean()
if w < ref_width or h < ref_height:
new_img = full([max(h, ref_height), max(w, ref_width)], avg, img.dtype)
if isinstance(img, ma.MaskedArray) and img.mask.any():
new_img[:h, :w] = img.data
mask = | ones(new_img.shape, bool) | numpy.ones |
from __future__ import division, absolute_import, print_function
from functools import reduce
import numpy as np
import numpy.core.umath as umath
import numpy.core.fromnumeric as fromnumeric
from numpy.testing import TestCase, run_module_suite, assert_
from numpy.ma.testutils import assert_array_equal
from numpy.ma import (
MaskType, MaskedArray, absolute, add, all, allclose, allequal, alltrue,
arange, arccos, arcsin, arctan, arctan2, array, average, choose,
concatenate, conjugate, cos, cosh, count, divide, equal, exp, filled,
getmask, greater, greater_equal, inner, isMaskedArray, less,
less_equal, log, log10, make_mask, masked, masked_array, masked_equal,
masked_greater, masked_greater_equal, masked_inside, masked_less,
masked_less_equal, masked_not_equal, masked_outside,
masked_print_option, masked_values, masked_where, maximum, minimum,
multiply, nomask, nonzero, not_equal, ones, outer, product, put, ravel,
repeat, resize, shape, sin, sinh, sometrue, sort, sqrt, subtract, sum,
take, tan, tanh, transpose, where, zeros,
)
pi = np.pi
def eq(v, w, msg=''):
result = allclose(v, w)
if not result:
print("Not eq:%s\n%s\n----%s" % (msg, str(v), str(w)))
return result
class TestMa(TestCase):
def setUp(self):
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = array(x, mask=m1)
ym = array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
s = x.shape
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s)
def test_testBasic1d(self):
# Test of basic array creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
self.assertFalse(isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
self.assertEqual(shape(xm), s)
self.assertEqual(xm.shape, s)
self.assertEqual(xm.dtype, x.dtype)
self.assertEqual(xm.size, reduce(lambda x, y:x * y, s))
self.assertEqual(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
self.assertTrue(eq(xm, xf))
self.assertTrue(eq(filled(xm, 1.e20), xf))
self.assertTrue(eq(x, xm))
def test_testBasic2d(self):
# Test of basic array creation and properties in 2 dimensions.
for s in [(4, 3), (6, 2)]:
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
self.assertFalse(isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
self.assertEqual(shape(xm), s)
self.assertEqual(xm.shape, s)
self.assertEqual(xm.size, reduce(lambda x, y:x * y, s))
self.assertEqual(count(xm),
len(m1) - reduce(lambda x, y:x + y, m1))
self.assertTrue(eq(xm, xf))
self.assertTrue(eq(filled(xm, 1.e20), xf))
self.assertTrue(eq(x, xm))
self.setUp()
def test_testArithmetic(self):
# Test of basic arithmetic.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
a2d = array([[1, 2], [0, 4]])
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
self.assertTrue(eq(a2d * a2d, a2d * a2dm))
self.assertTrue(eq(a2d + a2d, a2d + a2dm))
self.assertTrue(eq(a2d - a2d, a2d - a2dm))
for s in [(12,), (4, 3), (2, 6)]:
x = x.reshape(s)
y = y.reshape(s)
xm = xm.reshape(s)
ym = ym.reshape(s)
xf = xf.reshape(s)
self.assertTrue(eq(-x, -xm))
self.assertTrue(eq(x + y, xm + ym))
self.assertTrue(eq(x - y, xm - ym))
self.assertTrue(eq(x * y, xm * ym))
with np.errstate(divide='ignore', invalid='ignore'):
self.assertTrue(eq(x / y, xm / ym))
self.assertTrue(eq(a10 + y, a10 + ym))
self.assertTrue(eq(a10 - y, a10 - ym))
self.assertTrue(eq(a10 * y, a10 * ym))
with np.errstate(divide='ignore', invalid='ignore'):
self.assertTrue(eq(a10 / y, a10 / ym))
self.assertTrue(eq(x + a10, xm + a10))
self.assertTrue(eq(x - a10, xm - a10))
self.assertTrue(eq(x * a10, xm * a10))
self.assertTrue(eq(x / a10, xm / a10))
self.assertTrue(eq(x ** 2, xm ** 2))
self.assertTrue(eq(abs(x) ** 2.5, abs(xm) ** 2.5))
self.assertTrue(eq(x ** y, xm ** ym))
self.assertTrue(eq(np.add(x, y), add(xm, ym)))
self.assertTrue(eq(np.subtract(x, y), subtract(xm, ym)))
self.assertTrue(eq(np.multiply(x, y), multiply(xm, ym)))
with np.errstate(divide='ignore', invalid='ignore'):
self.assertTrue(eq(np.divide(x, y), divide(xm, ym)))
def test_testMixedArithmetic(self):
na = np.array([1])
ma = array([1])
self.assertTrue(isinstance(na + ma, MaskedArray))
self.assertTrue(isinstance(ma + na, MaskedArray))
def test_testUfuncs1(self):
# Test various functions such as sin, cos.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
self.assertTrue(eq(np.cos(x), cos(xm)))
self.assertTrue(eq(np.cosh(x), cosh(xm)))
self.assertTrue(eq(np.sin(x), sin(xm)))
self.assertTrue(eq(np.sinh(x), sinh(xm)))
self.assertTrue(eq(np.tan(x), tan(xm)))
self.assertTrue(eq(np.tanh(x), tanh(xm)))
with np.errstate(divide='ignore', invalid='ignore'):
self.assertTrue(eq(np.sqrt(abs(x)), sqrt(xm)))
self.assertTrue(eq(np.log(abs(x)), log(xm)))
self.assertTrue(eq(np.log10(abs(x)), log10(xm)))
self.assertTrue(eq(np.exp(x), exp(xm)))
self.assertTrue(eq(np.arcsin(z), arcsin(zm)))
self.assertTrue(eq(np.arccos(z), arccos(zm)))
self.assertTrue(eq(np.arctan(z), arctan(zm)))
self.assertTrue(eq(np.arctan2(x, y), arctan2(xm, ym)))
self.assertTrue(eq(np.absolute(x), absolute(xm)))
self.assertTrue(eq(np.equal(x, y), equal(xm, ym)))
self.assertTrue(eq(np.not_equal(x, y), not_equal(xm, ym)))
self.assertTrue(eq(np.less(x, y), less(xm, ym)))
self.assertTrue(eq(np.greater(x, y), greater(xm, ym)))
self.assertTrue(eq(np.less_equal(x, y), less_equal(xm, ym)))
self.assertTrue(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
self.assertTrue(eq(np.conjugate(x), conjugate(xm)))
self.assertTrue(eq(np.concatenate((x, y)), concatenate((xm, ym))))
self.assertTrue(eq(np.concatenate((x, y)), concatenate((x, y))))
self.assertTrue(eq(np.concatenate((x, y)), concatenate((xm, y))))
self.assertTrue(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
@dec.skipif('__pypy__' in sys.builtin_module_names)
def test_xtestCount(self):
# Test count
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
self.assertTrue(count(ott).dtype.type is np.intp)
self.assertEqual(3, count(ott))
self.assertEqual(1, count(1))
self.assertTrue(eq(0, array(1, mask=[1])))
ott = ott.reshape((2, 2))
self.assertTrue(count(ott).dtype.type is np.intp)
assert_(isinstance(count(ott, 0), np.ndarray))
self.assertTrue(count(ott).dtype.type is np.intp)
self.assertTrue(eq(3, count(ott)))
assert_(getmask(count(ott, 0)) is nomask)
self.assertTrue(eq([1, 2], count(ott, 0)))
def test_testMinMax(self):
# Test minimum and maximum.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
xr = np.ravel(x) # max doesn't work if shaped
xmr = ravel(xm)
# true because of careful selection of data
self.assertTrue(eq(max(xr), maximum(xmr)))
self.assertTrue(eq(min(xr), minimum(xmr)))
def test_testAddSumProd(self):
# Test add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
self.assertTrue(eq(np.add.reduce(x), add.reduce(x)))
self.assertTrue(eq(np.add.accumulate(x), add.accumulate(x)))
self.assertTrue(eq(4, sum(array(4), axis=0)))
self.assertTrue(eq(4, sum(array(4), axis=0)))
self.assertTrue(eq(np.sum(x, axis=0), sum(x, axis=0)))
self.assertTrue(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
self.assertTrue(eq(np.sum(x, 0), sum(x, 0)))
self.assertTrue(eq(np.product(x, axis=0), product(x, axis=0)))
self.assertTrue(eq(np.product(x, 0), product(x, 0)))
self.assertTrue(eq(np.product(filled(xm, 1), axis=0),
product(xm, axis=0)))
if len(s) > 1:
self.assertTrue(eq(np.concatenate((x, y), 1),
concatenate((xm, ym), 1)))
self.assertTrue(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
self.assertTrue(eq(np.sum(x, 1), sum(x, 1)))
self.assertTrue(eq(np.product(x, 1), product(x, 1)))
def test_testCI(self):
# Test of conversions and indexing
x1 = np.array([1, 2, 4, 3])
x2 = array(x1, mask=[1, 0, 0, 0])
x3 = array(x1, mask=[0, 1, 0, 1])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
assert_(eq(np.sort(x1), sort(x2, fill_value=0)))
# tests of indexing
assert_(type(x2[1]) is type(x1[1]))
assert_(x1[1] == x2[1])
assert_(x2[0] is masked)
assert_(eq(x1[2], x2[2]))
assert_(eq(x1[2:5], x2[2:5]))
assert_(eq(x1[:], x2[:]))
assert_(eq(x1[1:], x3[1:]))
x1[2] = 9
x2[2] = 9
assert_(eq(x1, x2))
x1[1:3] = 99
x2[1:3] = 99
assert_(eq(x1, x2))
x2[1] = masked
assert_(eq(x1, x2))
x2[1:3] = masked
assert_(eq(x1, x2))
x2[:] = x1
x2[1] = masked
assert_(allequal(getmask(x2), array([0, 1, 0, 0])))
x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x3), array([0, 1, 1, 0])))
x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x4), array([0, 1, 1, 0])))
assert_(allequal(x4, array([1, 2, 3, 4])))
x1 = np.arange(5) * 1.0
x2 = masked_values(x1, 3.0)
assert_(eq(x1, x2))
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_(eq(3.0, x2.fill_value))
x1 = array([1, 'hello', 2, 3], object)
x2 = np.array([1, 'hello', 2, 3], object)
s1 = x1[1]
s2 = x2[1]
self.assertEqual(type(s2), str)
self.assertEqual(type(s1), str)
self.assertEqual(s1, s2)
assert_(x1[1:1].shape == (0,))
def test_testCopySize(self):
# Tests of some subtle points of copying and sizing.
n = [0, 0, 1, 0, 0]
m = make_mask(n)
m2 = make_mask(m)
self.assertTrue(m is m2)
m3 = make_mask(m, copy=1)
self.assertTrue(m is not m3)
x1 = np.arange(5)
y1 = array(x1, mask=m)
self.assertTrue(y1._data is not x1)
self.assertTrue(allequal(x1, y1._data))
self.assertTrue(y1.mask is m)
y1a = array(y1, copy=0)
self.assertTrue(y1a.mask is y1.mask)
y2 = array(x1, mask=m, copy=0)
self.assertTrue(y2.mask is m)
self.assertTrue(y2[2] is masked)
y2[2] = 9
self.assertTrue(y2[2] is not masked)
self.assertTrue(y2.mask is not m)
self.assertTrue(allequal(y2.mask, 0))
y3 = array(x1 * 1.0, mask=m)
self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)
x4 = arange(4)
x4[2] = masked
y4 = resize(x4, (8,))
self.assertTrue(eq(concatenate([x4, x4]), y4))
self.assertTrue(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
y5 = repeat(x4, (2, 2, 2, 2), axis=0)
self.assertTrue(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
y6 = repeat(x4, 2, axis=0)
self.assertTrue(eq(y5, y6))
def test_testPut(self):
# Test of put
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
x = array(d, mask=m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is masked)
x[[1, 4]] = [10, 40]
self.assertTrue(x.mask is not m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is not masked)
self.assertTrue(eq(x, [0, 10, 2, -1, 40]))
x = array(d, mask=m)
x.put([0, 1, 2], [-1, 100, 200])
self.assertTrue(eq(x, [-1, 100, 200, 0, 0]))
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is masked)
def test_testMaPut(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1]
i = np.nonzero(m)[0]
put(ym, i, zm)
assert_(all(take(ym, i, axis=0) == zm))
def test_testOddFeatures(self):
# Test of other odd features
x = arange(20)
x = x.reshape(4, 5)
x.flat[5] = 12
assert_(x[1, 0] == 12)
z = x + 10j * x
assert_(eq(z.real, x))
assert_(eq(z.imag, 10 * x))
assert_(eq((z * conjugate(z)).real, 101 * x * x))
z.imag[...] = 0.0
x = arange(10)
x[3] = masked
assert_(str(x[3]) == str(masked))
c = x >= 8
assert_(count(where(c, masked, masked)) == 0)
assert_(shape(where(c, masked, masked)) == c.shape)
z = where(c, x, masked)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is masked)
assert_(z[7] is masked)
assert_(z[8] is not masked)
assert_(z[9] is not masked)
assert_(eq(x, z))
z = where(c, masked, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
z = masked_where(c, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
assert_(eq(x, z))
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_(eq(z, [1., 2., 0., -4., -5]))
c[0] = masked
z = where(c, x, -x)
assert_(eq(z, [1., 2., 0., -4., -5]))
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2)))
assert_(eq(masked_where(greater_equal(x, 2), x),
masked_greater_equal(x, 2)))
assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2)))
assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)))
assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2)))
assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]))
assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]))
assert_(eq(masked_inside(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 1, 3).mask,
[1, 1, 1, 1, 0]))
assert_(eq(masked_outside(array(list(range(5)),
mask=[0, 1, 0, 0, 0]), 1, 3).mask,
[1, 1, 0, 0, 1]))
assert_(eq(masked_equal(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 0]))
assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1],
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 1]))
assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
[99, 99, 3, 4, 5]))
atest = ones((10, 10, 10), dtype=np.float32)
btest = zeros(atest.shape, MaskType)
ctest = masked_where(btest, atest)
assert_(eq(atest, ctest))
z = choose(c, (-x, x))
assert_(eq(z, [1., 2., 0., -4., -5]))
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
x = arange(6)
x[5] = masked
y = arange(6) * 10
y[2] = masked
c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0])
cm = c.filled(1)
z = where(c, x, y)
zm = where(cm, x, y)
assert_(eq(z, zm))
assert_(getmask(zm) is nomask)
assert_(eq(zm, [0, 1, 2, 30, 40, 50]))
z = where(c, masked, 1)
assert_(eq(z, [99, 99, 99, 1, 1, 1]))
z = where(c, 1, masked)
assert_(eq(z, [99, 1, 1, 99, 99, 99]))
def test_testMinMax2(self):
# Test of minumum, maximum.
assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3]))
assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9]))
x = arange(5)
y = arange(5) - 2
x[3] = masked
y[0] = masked
assert_(eq(minimum(x, y), where(less(x, y), x, y)))
assert_(eq(maximum(x, y), where(greater(x, y), x, y)))
assert_(minimum(x) == 0)
assert_(maximum(x) == 4)
def test_testTakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))))
assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)))
assert_(eq(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y)))
assert_(eq(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y)))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_testInplace(self):
# Test of inplace operations and rich comparisons
y = arange(10)
x = arange(10)
xm = arange(10)
xm[2] = masked
x += 1
assert_(eq(x, y + 1))
xm += 1
assert_(eq(x, y + 1))
x = arange(10)
xm = arange(10)
xm[2] = masked
x -= 1
assert_(eq(x, y - 1))
xm -= 1
assert_(eq(xm, y - 1))
x = arange(10) * 1.0
xm = arange(10) * 1.0
xm[2] = masked
x *= 2.0
assert_(eq(x, y * 2))
xm *= 2.0
assert_(eq(xm, y * 2))
x = arange(10) * 2
xm = arange(10)
xm[2] = masked
x //= 2
assert_(eq(x, y))
xm //= 2
assert_(eq(x, y))
x = arange(10) * 1.0
xm = arange(10) * 1.0
xm[2] = masked
x /= 2.0
assert_(eq(x, y / 2.0))
xm /= arange(10)
assert_(eq(xm, ones((10,))))
x = arange(10).astype(np.float32)
xm = arange(10)
xm[2] = masked
x += 1.
assert_(eq(x, y + 1.))
def test_testPickle(self):
# Test of pickling
import pickle
x = arange(12)
x[4:10:2] = masked
x = x.reshape(4, 3)
s = pickle.dumps(x)
y = pickle.loads(s)
assert_(eq(x, y))
def test_testMasked(self):
# Test of masked element
xx = arange(6)
xx[1] = masked
self.assertTrue(str(masked) == '--')
self.assertTrue(xx[1] is masked)
self.assertEqual(filled(xx[1], 0), 0)
# don't know why these should raise an exception...
#self.assertRaises(Exception, lambda x,y: x+y, masked, masked)
#self.assertRaises(Exception, lambda x,y: x+y, masked, 2)
#self.assertRaises(Exception, lambda x,y: x+y, masked, xx)
#self.assertRaises(Exception, lambda x,y: x+y, xx, masked)
def test_testAverage1(self):
# Test of average.
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
self.assertTrue(eq(2.0, average(ott, axis=0)))
self.assertTrue(eq(2.0, average(ott, weights=[1., 1., 2., 1.])))
result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1)
self.assertTrue(eq(2.0, result))
self.assertTrue(wts == 4.0)
ott[:] = masked
self.assertTrue(average(ott, axis=0) is masked)
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
ott = ott.reshape(2, 2)
ott[:, 1] = masked
self.assertTrue(eq(average(ott, axis=0), [2.0, 0.0]))
self.assertTrue(average(ott, axis=1)[0] is masked)
self.assertTrue(eq([2., 0.], average(ott, axis=0)))
result, wts = average(ott, axis=0, returned=1)
self.assertTrue(eq(wts, [1., 0.]))
def test_testAverage2(self):
# More tests of average.
w1 = [0, 1, 1, 1, 1, 0]
w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
x = arange(6)
self.assertTrue(allclose(average(x, axis=0), 2.5))
self.assertTrue(allclose(average(x, axis=0, weights=w1), 2.5))
y = array([arange(6), 2.0 * arange(6)])
self.assertTrue(allclose(average(y, None),
np.add.reduce(np.arange(6)) * 3. / 12.))
self.assertTrue(allclose(average(y, axis=0), np.arange(6) * 3. / 2.))
self.assertTrue(allclose(average(y, axis=1),
[average(x, axis=0), average(x, axis=0)*2.0]))
self.assertTrue(allclose(average(y, None, weights=w2), 20. / 6.))
self.assertTrue(allclose(average(y, axis=0, weights=w2),
[0., 1., 2., 3., 4., 10.]))
self.assertTrue(allclose(average(y, axis=1),
[average(x, axis=0), average(x, axis=0)*2.0]))
m1 = zeros(6)
m2 = [0, 0, 1, 1, 0, 0]
m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
m4 = ones(6)
m5 = [0, 1, 1, 1, 1, 1]
self.assertTrue(allclose(average(masked_array(x, m1), axis=0), 2.5))
self.assertTrue(allclose(average(masked_array(x, m2), axis=0), 2.5))
self.assertTrue(average(masked_array(x, m4), axis=0) is masked)
self.assertEqual(average(masked_array(x, m5), axis=0), 0.0)
self.assertEqual(count(average(masked_array(x, m4), axis=0)), 0)
z = masked_array(y, m3)
self.assertTrue(allclose(average(z, None), 20. / 6.))
self.assertTrue(allclose(average(z, axis=0),
[0., 1., 99., 99., 4.0, 7.5]))
self.assertTrue(allclose(average(z, axis=1), [2.5, 5.0]))
self.assertTrue(allclose(average(z, axis=0, weights=w2),
[0., 1., 99., 99., 4.0, 10.0]))
a = arange(6)
b = arange(6) * 3
r1, w1 = average([[a, b], [b, a]], axis=1, returned=1)
self.assertEqual(shape(r1), shape(w1))
self.assertEqual(r1.shape, w1.shape)
r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1)
self.assertEqual(shape(w2), | shape(r2) | numpy.ma.shape |
import numpy as np
from scipy.stats import binom
from sklearn.ensemble import IsolationForest
from sklearn.preprocessing import MinMaxScaler
from scipy.special import erf
from learnware.algorithm.anomaly_detect.base import BaseAnomalyDetect
class iForest(BaseAnomalyDetect):
def __init__(self, n_estimators=100,
max_samples="auto",
contamination=0.1,
max_features=1.,
bootstrap=False,
n_jobs=1,
behaviour='old',
random_state=None,
verbose=0):
super(iForest, self).__init__()
self.contamination = contamination
self.n_estimators = n_estimators
self.max_samples = max_samples
self.max_features = max_features
self.bootstrap = bootstrap
self.n_jobs = n_jobs
self.behaviour = behaviour
self.random_state = random_state
self.verbose = verbose
# 内部算法的检测器
self.detector_ = None
self.decision_scores_ = None
self.threshold_ = None
self.labels_ = None
def fit(self, X, y=None):
self.detector_ = IsolationForest(n_estimators=self.n_estimators,
max_samples=self.max_samples,
contamination=self.contamination,
max_features=self.max_features,
bootstrap=self.bootstrap,
n_jobs=self.n_jobs,
random_state=self.random_state,
verbose=self.verbose)
X = self._data_type_transform(X)
self.detector_.fit(X, y=None, sample_weight=None)
self.decision_function(X)
self._decision_threshold_process()
return self
def predict(self, X, return_confidence=False):
X = self._data_type_transform(X)
if self.detector_ is None:
raise EOFError("detector not found, please fit the train data.")
pred_score = self.decision_function(X)
prediction = | np.ones_like(pred_score, dtype=int) | numpy.ones_like |
# This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 <NAME> and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
"""
Tests the h5py.Dataset.__getitem__ method.
This module does not specifically test type conversion. The "type" axis
therefore only tests objects which interact with the slicing system in
unreliable ways; for example, compound and array types.
See test_dataset_getitem_types for type-conversion tests.
Tests are organized into TestCases by dataset shape and type. Test
methods vary by slicing arg type.
1. Dataset shape:
Empty
Scalar
1D
3D
2. Type:
Float
Compound
Array
3. Slicing arg types:
Ellipsis
Empty tuple
Regular slice
Indexing
Index list
Boolean mask
Field names
"""
import sys
import numpy as np
import h5py
from .common import ut, TestCase
class TestEmpty(TestCase):
def setUp(self):
TestCase.setUp(self)
sid = h5py.h5s.create(h5py.h5s.NULL)
tid = h5py.h5t.C_S1.copy()
tid.set_size(10)
dsid = h5py.h5d.create(self.f.id, b'x', tid, sid)
self.dset = h5py.Dataset(dsid)
self.empty_obj = h5py.Empty(np.dtype("S10"))
def test_ndim(self):
""" Verify number of dimensions """
self.assertEqual(self.dset.ndim, 0)
def test_shape(self):
""" Verify shape """
self.assertEqual(self.dset.shape, None)
def test_size(self):
""" Verify shape """
self.assertEqual(self.dset.size, None)
def test_ellipsis(self):
""" Ellipsis -> ValueError """
self.assertEqual(self.dset[...], self.empty_obj)
def test_tuple(self):
""" () -> IOError """
self.assertEqual(self.dset[()], self.empty_obj)
def test_slice(self):
""" slice -> ValueError """
with self.assertRaises(ValueError):
self.dset[0:4]
def test_index(self):
""" index -> ValueError """
with self.assertRaises(ValueError):
self.dset[0]
def test_indexlist(self):
""" index list -> ValueError """
with self.assertRaises(ValueError):
self.dset[[1,2,5]]
def test_mask(self):
""" mask -> ValueError """
mask = np.array(True, dtype='bool')
with self.assertRaises(ValueError):
self.dset[mask]
def test_fieldnames(self):
""" field name -> ValueError """
with self.assertRaises(ValueError):
self.dset['field']
class TestScalarFloat(TestCase):
def setUp(self):
TestCase.setUp(self)
self.data = np.array(42.5, dtype='f')
self.dset = self.f.create_dataset('x', data=self.data)
def test_ndim(self):
""" Verify number of dimensions """
self.assertEqual(self.dset.ndim, 0)
def test_shape(self):
""" Verify shape """
self.assertEqual(self.dset.shape, tuple())
def test_ellipsis(self):
""" Ellipsis -> scalar ndarray """
out = self.dset[...]
self.assertArrayEqual(out, self.data)
def test_tuple(self):
""" () -> bare item """
out = self.dset[()]
self.assertArrayEqual(out, self.data.item())
def test_slice(self):
""" slice -> ValueError """
with self.assertRaises(ValueError):
self.dset[0:4]
def test_index(self):
""" index -> ValueError """
with self.assertRaises(ValueError):
self.dset[0]
# FIXME: NumPy has IndexError instead
def test_indexlist(self):
""" index list -> ValueError """
with self.assertRaises(ValueError):
self.dset[[1,2,5]]
# FIXME: NumPy permits this
def test_mask(self):
""" mask -> ValueError """
mask = np.array(True, dtype='bool')
with self.assertRaises(ValueError):
self.dset[mask]
def test_fieldnames(self):
""" field name -> ValueError (no fields) """
with self.assertRaises(ValueError):
self.dset['field']
class TestScalarCompound(TestCase):
def setUp(self):
TestCase.setUp(self)
self.data = np.array((42.5, -118, "Hello"), dtype=[('a', 'f'), ('b', 'i'), ('c', '|S10')])
self.dset = self.f.create_dataset('x', data=self.data)
def test_ndim(self):
""" Verify number of dimensions """
self.assertEqual(self.dset.ndim, 0)
def test_shape(self):
""" Verify shape """
self.assertEqual(self.dset.shape, tuple())
def test_ellipsis(self):
""" Ellipsis -> scalar ndarray """
out = self.dset[...]
# assertArrayEqual doesn't work with compounds; do manually
self.assertIsInstance(out, np.ndarray)
self.assertEqual(out.shape, self.data.shape)
self.assertEqual(out.dtype, self.data.dtype)
def test_tuple(self):
""" () -> np.void instance """
out = self.dset[()]
self.assertIsInstance(out, np.void)
self.assertEqual(out.dtype, self.data.dtype)
def test_slice(self):
""" slice -> ValueError """
with self.assertRaises(ValueError):
self.dset[0:4]
def test_index(self):
""" index -> ValueError """
with self.assertRaises(ValueError):
self.dset[0]
# FIXME: NumPy has IndexError instead
def test_indexlist(self):
""" index list -> ValueError """
with self.assertRaises(ValueError):
self.dset[[1,2,5]]
# FIXME: NumPy permits this
def test_mask(self):
""" mask -> ValueError """
mask = np.array(True, dtype='bool')
with self.assertRaises(ValueError):
self.dset[mask]
# FIXME: NumPy returns a scalar ndarray
def test_fieldnames(self):
""" field name -> bare value """
out = self.dset['a']
self.assertIsInstance(out, np.float32)
self.assertEqual(out, self.dset['a'])
class TestScalarArray(TestCase):
def setUp(self):
TestCase.setUp(self)
self.dt = np.dtype('(3,2)f')
self.data = np.array([(3.2, -119), (42, 99.8), (3.14, 0)], dtype='f')
self.dset = self.f.create_dataset('x', (), dtype=self.dt)
self.dset[...] = self.data
def test_ndim(self):
""" Verify number of dimensions """
self.assertEqual(self.data.ndim, 2)
self.assertEqual(self.dset.ndim, 0)
def test_shape(self):
""" Verify shape """
self.assertEqual(self.data.shape, (3, 2))
self.assertEqual(self.dset.shape, tuple())
def test_ellipsis(self):
""" Ellipsis -> ndarray promoted to underlying shape """
out = self.dset[...]
self.assertArrayEqual(out, self.data)
def test_tuple(self):
""" () -> same as ellipsis """
out = self.dset[...]
self.assertArrayEqual(out, self.data)
def test_slice(self):
""" slice -> ValueError """
with self.assertRaises(ValueError):
self.dset[0:4]
def test_index(self):
""" index -> ValueError """
with self.assertRaises(ValueError):
self.dset[0]
def test_indexlist(self):
""" index list -> ValueError """
with self.assertRaises(ValueError):
self.dset[[]]
def test_mask(self):
""" mask -> ValueError """
mask = np.array(True, dtype='bool')
with self.assertRaises(ValueError):
self.dset[mask]
def test_fieldnames(self):
""" field name -> ValueError (no fields) """
with self.assertRaises(ValueError):
self.dset['field']
@ut.skipUnless(h5py.version.hdf5_version_tuple >= (1, 8, 7), 'HDF5 1.8.7+ required')
class Test1DZeroFloat(TestCase):
def setUp(self):
TestCase.setUp(self)
self.data = np.ones((0,), dtype='f')
self.dset = self.f.create_dataset('x', data=self.data)
def test_ndim(self):
""" Verify number of dimensions """
self.assertEqual(self.dset.ndim, 1)
def test_shape(self):
""" Verify shape """
self.assertEqual(self.dset.shape, (0,))
def test_ellipsis(self):
""" Ellipsis -> ndarray of matching shape """
self.assertNumpyBehavior(self.dset, self.data, np.s_[...])
def test_tuple(self):
""" () -> same as ellipsis """
self.assertNumpyBehavior(self.dset, self.data, np.s_[()])
def test_slice(self):
""" slice -> ndarray of shape (0,) """
self.assertNumpyBehavior(self.dset, self.data, np.s_[0:4])
def test_slice_stop_less_than_start(self):
self.assertNumpyBehavior(self.dset, self.data, np.s_[7:5])
def test_index(self):
""" index -> out of range """
with self.assertRaises(IndexError):
self.dset[0]
def test_indexlist(self):
""" index list """
self.assertNumpyBehavior(self.dset, self.data, np.s_[[]])
def test_mask(self):
""" mask -> ndarray of matching shape """
mask = np.ones((0,), dtype='bool')
self.assertNumpyBehavior(self.dset, self.data, np.s_[mask])
def test_fieldnames(self):
""" field name -> ValueError (no fields) """
with self.assertRaises(ValueError):
self.dset['field']
class Test1DFloat(TestCase):
def setUp(self):
TestCase.setUp(self)
self.data = np.arange(13).astype('f')
self.dset = self.f.create_dataset('x', data=self.data)
def test_ndim(self):
""" Verify number of dimensions """
self.assertEqual(self.dset.ndim, 1)
def test_shape(self):
""" Verify shape """
self.assertEqual(self.dset.shape, (13,))
def test_ellipsis(self):
self.assertNumpyBehavior(self.dset, self.data, np.s_[...])
def test_tuple(self):
self.assertNumpyBehavior(self.dset, self.data, np.s_[()])
def test_slice_simple(self):
self.assertNumpyBehavior(self.dset, self.data, np.s_[0:4])
def test_slice_zerosize(self):
self.assertNumpyBehavior(self.dset, self.data, np.s_[4:4])
def test_slice_strides(self):
self.assertNumpyBehavior(self.dset, self.data, np.s_[1:7:3])
def test_slice_negindexes(self):
self.assertNumpyBehavior(self.dset, self.data, np.s_[-8:-2:3])
def test_slice_stop_less_than_start(self):
self.assertNumpyBehavior(self.dset, self.data, np.s_[7:5])
def test_slice_outofrange(self):
self.assertNumpyBehavior(self.dset, self.data, np.s_[100:400:3])
def test_slice_backwards(self):
""" we disallow negative steps """
with self.assertRaises(ValueError):
self.dset[::-1]
def test_slice_zerostride(self):
self.assertNumpyBehavior(self.dset, self.data, np.s_[::0])
def test_index_simple(self):
self.assertNumpyBehavior(self.dset, self.data, np.s_[3])
def test_index_neg(self):
self.assertNumpyBehavior(self.dset, self.data, np.s_[-4])
# FIXME: NumPy permits this... it adds a new axis in front
def test_index_none(self):
with self.assertRaises(TypeError):
self.dset[None]
# FIXME: NumPy raises IndexError
# Also this currently raises UnboundLocalError. :(
@ut.expectedFailure
def test_index_illegal(self):
""" Illegal slicing argument """
with self.assertRaises(TypeError):
self.dset[{}]
def test_index_outofrange(self):
with self.assertRaises(IndexError):
self.dset[100]
def test_indexlist_simple(self):
self.assertNumpyBehavior(self.dset, self.data, np.s_[[1,2,5]])
def test_indexlist_numpyarray(self):
self.assertNumpyBehavior(self.dset, self.data, np.s_[np.array([1, 2, 5])])
def test_indexlist_single_index_ellipsis(self):
self.assertNumpyBehavior(self.dset, self.data, np.s_[[0], ...])
def test_indexlist_numpyarray_single_index_ellipsis(self):
self.assertNumpyBehavior(self.dset, self.data, np.s_[ | np.array([0]) | numpy.array |
import numpy as np
from scipy.ndimage import median_filter
from scipy.ndimage import gaussian_filter1d
import Marsh
import CCF
def getP(data, centroids, aperture_radius, ron, gain, nsigma, polynomial_spacing, polynomial_order, min_column = None, max_column = None, return_flat = False, data_variance = None):
"""
Given a 2D-spectrum, centroids over it and various properties of the noise, this function returns the light
fractions of a spectrum using the algorithm described in detail in Marsh (1989, PASP 101, 1032).
Attributes
----------
data : numpy.array
Array containing the 2D-spectrum.
centroids : numpy.array
Array containing the centroids at each column of the spectra
aperture_radius : double
Aperture radius (measured from the center).
ron : double
Read-out-noise of the detector, in electrons.
gain : double
Gain of the detector, in electrons/ADU.
nsigma : double
Number-of-sigmas to reject outliers.
polynomial_spacing : double
Pixel spacing between polynomials of Marsh's algorithm.
polynomial_order : int
Order of the polynomials that will be fitted to the surface in Marsh's algorithm (N in the paper).
min_column : int
(Optional) Minimum column to consider the calculation from.
max_column : int
(Optional) Maximum column to consider the calculation from.
return_flat : bool
(Optional) If `True`, returns the flattened version of the light fractions. Default is `False`.
data_variance : numpy.array
(Optional) Array containing the variances of each of the points in the 2-D `data` array. If defined, the `ron` and `gain` will be ignored.
Returns
-------
P : numpy.array
Light frations (weights) of the optimal extraction.
"""
# Prepare inputs to the algorithm:
flattened_data = data.flatten().astype('double')
nrows, ncolumns = data.shape
ncentroids = len(centroids)
if min_column is None:
min_column = 0
if max_column is None:
max_column = ncolumns
# Calculate the light fractions (P's):
if data_variance is None:
flattened_P = Marsh.ObtainP(flattened_data,
centroids,
nrows,
ncolumns,
ncentroids,
aperture_radius,
ron,
gain,
nsigma,
polynomial_spacing,
polynomial_order,
0,
min_column,
max_column
)
else:
flat_ones_array = np.ones(nrows * ncolumns).astype('double')
flattened_variance = data_variance.flatten().astype('double')
flattened_P = Marsh.SObtainP(flattened_data,
flat_ones_array,
flattened_variance,
centroids,
nrows,
ncolumns,
ncentroids,
aperture_radius,
ron,
gain,
nsigma,
polynomial_spacing,
polynomial_order,
0,
min_column,
max_column
)
# Obtain the P's back:
P = np.asarray(flattened_P).astype('double')
if not return_flat:
P.resize(nrows, ncolumns)
# Return light fractions:
return P
def getOptimalSpectrum(data, centroids, aperture_radius, ron, gain, nsigma, polynomial_spacing, polynomial_order, min_column = None, max_column = None, P = None, return_P = False, data_variance = None):
"""
Given a 2D-spectrum, this function returns the optimal extracted spectrum using the algorithm detailed in Marsh (1989, PASP 101, 1032).
By default, this function calculates the light fractions individually for each spectrum; if you have a pre-computed one (e.g.,
obtained with the `getP` function), you can ingest that instead which will significantly speed-up the algorithm when ran on
several spectra.
Attributes
----------
data : numpy.array
Array containing the 2D-spectrum.
centroids : numpy.array
Array containing the centroids at each column of the spectra
aperture_radius : double
Aperture radius (measured from the center).
ron : double
Read-out-noise of the detector, in electrons.
gain : double
Gain of the detector, in electrons/ADU.
nsigma : double
Number-of-sigmas to reject outliers.
polynomial_spacing : double
Pixel spacing between polynomials of Marsh's algorithm.
polynomial_order : int
Order of the polynomials that will be fitted to the surface in Marsh's algorithm (N in the paper).
min_column : int
(Optional) Minimum column to consider the calculation from.
max_column : int
(Optional) Maximum column to consider the calculation from.
P : numpy.array
(Optional) Array containing the 2-D light fractions (the P's --- if not ingested, will be obtained using the `getP` function).
return_P : bool
(Optional) If `True`, function also returns the light fractions (P's).
data_variance : numpy.array
(Optional) Array containing the variances of each of the points in the 2-D `data` array. If defined, the `ron` and `gain` will be ignored.
Returns
-------
spectrum : numpy.array
A 3-dimensional cube with spectrum[0,:] indicating the columns, spectrum[1,:] the optimally extracted spectra at those columns and
spectrum[2,:] having the *inverse* of the variance of the spectra.
"""
if P is not None:
flattened_P = P.flatten().astype('double')
else:
if data_variance is None:
flattened_P = getP(data, centroids, aperture_radius, ron, gain, nsigma, polynomial_spacing, polynomial_order,
min_column = min_column, max_column = max_column, return_flat = True)
else:
flattened_P = getP(data, centroids, aperture_radius, ron, gain, nsigma, polynomial_spacing, polynomial_order,
min_column = min_column, max_column = max_column, return_flat = True, data_variance = data_variance)
# Prepare inputs:
flattened_data = data.flatten().astype('double')
nrows, ncolumns = data.shape
ncentroids = len(centroids)
if min_column is None:
min_column = 0
if max_column is None:
max_column = ncolumns
# Obtain extracted spectrum:
if data_variance is None:
flattened_spectrum, size = Marsh.ObtainSpectrum(flattened_data,
centroids,
flattened_P,
nrows,
ncolumns,
ncentroids,
aperture_radius,
ron,
gain,
polynomial_spacing,
nsigma,
min_column,
max_column
)
else:
flat_ones_array = np.ones(nrows * ncolumns).astype('double')
flattened_variance = data_variance.flatten().astype('double')
flattened_spectrum, size = Marsh.SObtainSpectrum(flattened_data,
flat_ones_array,
flattened_variance,
centroids,
flattened_P,
nrows,
ncolumns,
ncentroids,
aperture_radius,
ron,
gain,
polynomial_spacing,
nsigma,
min_column,
max_column
)
spectrum = np.asarray(flattened_spectrum)
spectrum.resize(3, size)
# Return results depending on user-input:
if not return_P:
return spectrum
else:
P = np.asarray(flattened_P).astype('double')
P.resize(nrows, ncolumns)
return spectrum, P
def getFastSimpleSpectrum(data, centroids, aperture_radius, min_column = None, max_column = None, return_aperture = False):
"""
Given a 2D-spectrum, this function returns a simple-extracted spectrum. This function is fast to
compute, but it doesn't calculate errors on the spectra.
Attributes
----------
data : numpy.array
Array containing the 2D-spectrum.
centroids : numpy.array
Array containing the centroids at each column of the spectra
aperture_radius : double
Aperture radius (measured from the center).
min_column : int
(Optional) Minimum column to consider the calculation from.
max_column : int
(Optional) Maximum column to consider the calculation from.
return_aperture : bool
(Optional) If the spectral trace (centroids) hits edges with the aperture, algorithm will select
a smaller aperture for the extraction. If `True`, this function returns that selected aperture.
"""
# Prepare inputs:
flattened_data = data.flatten().astype('double')
nrows, ncolumns = data.shape
ncentroids = len(centroids)
if min_column is None:
min_column = 0
if max_column is None:
max_column = ncolumns
# Generate spectrum:
flattened_spectrum, aperture = Marsh.SimpleExtraction(flattened_data,
centroids,
nrows,
ncolumns,
ncentroids,
aperture_radius,
min_column,
max_column
)
# Prepare outputs:
spectrum = np.asarray(flattened_spectrum)
if not return_aperture:
return spectrum
else:
return spectrum, aperture
def getSimpleSpectrum(data, x, y, aperture_radius, background_radius=50, error_data=None, correct_bkg=True, method = 'sum', bkg_method = 'all'):
"""
This function takes as inputs two arrays (x,y) that follow the trace,
and returns the added flux over the defined aperture radius (and its error, if an error matrix
is given as well), substracting in the way any background between the aperture radius and the
background radius. The background is calculated by taking the median of the points between the
aperture_radius and the background_radius.
Parameters
----------
data: ndarray
Image from which the spectrum wants to be extracted
x: ndarray
Array with the x-axis of the trace (i.e., the columns, wavelength direction)
y: ndarray
Array with the y-axis of the trace (i.e., rows, spatial direction)
aperture_radius: float
Distance from the center of the trace at which you want to add fluxes.
background_radius: float
Distance from the center of the trace from which you want to calculate the background. The
background region will be between this radius and the aperture_radius.
error_data: ndarray
Image with the errors of each pixel value on the data ndarray above
correct_bkg: boolean
If True, apply background correction. If false, ommit this.
method : string
Method used to perform the extraction. Default is `sum`; `average` takes the average of the non-fractional pixels
used to extract the spectrum. This latter one is useful if the input is a wavelength map.
bkg_method : string
Method for the background substraction. Currently accepts 'all' to use pixels at both sides, 'up' to use pixels "above" the spectrum and
'down' to use pixels "below" the spectrum.
"""
method = method.lower()
# If average method being used, remove background correction:
if method == 'average':
correct_bkg = False
# Create array that will save our fluxes:
flux = np.zeros(len(x))
if error_data is not None:
flux_error = np.zeros(len(x))
max_column = data.shape[0] - 1
for i in range(len(x)):
# Cut the column with which we'll be working with:
column = data[:,int(x[i])]
if error_data is not None:
variance_column = error_data[:,int(x[i])]**2
# Define limits given by the aperture_radius and background_radius variables:
if correct_bkg:
left_side_bkg = np.max([y[i] - background_radius, 0])
right_side_bkg = np.min([max_column, y[i] + background_radius])
left_side_ap = np.max([y[i] - aperture_radius, 0])
right_side_ap = np.min([max_column, y[i] + aperture_radius])
# Extract background, being careful with edges:
if correct_bkg:
bkg_left = column[np.max([0, int(left_side_bkg)]) : np.max([0, int(left_side_ap)])]
bkg_right = column[np.min([int(right_side_ap), max_column]) : np.max([int(right_side_bkg), max_column])]
if bkg_method == 'all':
bkg = np.median(np.append(bkg_left, bkg_right))
elif bkg_method == 'up':
bkg = np.median(bkg_right)
elif bkg_method == 'down':
bkg = np.median(bkg_left)
else:
bkg = 0.
# Substract it from the column:
column -= bkg
# Perform aperture extraction of the background-substracted column, being careful with pixelization
# at the edges. First, deal with left (up) side:
l_decimal, l_integer = np.modf(left_side_ap)
l_integer = int(l_integer)
if l_decimal < 0.5:
l_fraction = (0.5 - l_decimal) * column[np.min([l_integer, max_column])]
l_limit = l_integer + 1
if error_data is not None:
l_fraction_variance = ((0.5 - l_decimal)**2) * variance_column[np.min([l_integer, max_column])]
else:
l_fraction = (1. - (l_decimal - 0.5)) * column[np.min([l_integer + 1, max_column])]
l_limit = l_integer + 2
if error_data is not None:
l_fraction_variance = ((1. - (l_decimal - 0.5))**2) * variance_column[np.min([l_integer + 1, max_column])]
# Now right (down) side:
r_decimal, r_integer = np.modf(right_side_ap)
r_integer = int(r_integer)
if r_decimal < 0.5:
r_fraction = (1. - (0.5 - r_decimal)) * column[np.min([max_column, r_integer])]
r_limit = r_integer
if error_data is not None:
r_fraction_variance = ((1. - (0.5 - r_decimal))**2) * variance_column[np.min([max_column, r_integer])]
else:
r_fraction = (r_decimal - 0.5) * column[np.min([max_column, r_integer + 1])]
r_limit = r_integer + 1
if error_data is not None:
r_fraction_variance = ((r_decimal - 0.5)**2) * variance_column[np.min([max_column, r_integer + 1])]
# Save total flux in current column:
if method == 'sum':
flux[i] = l_fraction + r_fraction + np.sum(column[l_limit:r_limit])
elif method == 'average':
flux[i] = np.mean(column[l_limit:r_limit])
else:
raise Exception('Method "'+method+'" currently not supported for aperture extraction. Select either "sum" or "average".')
if error_data is not None:
# Total error is the sum of the variances:
flux_error[i] = np.sqrt(np.sum(variance_column[l_limit:r_limit]) + l_fraction_variance + \
r_fraction_variance)
if error_data is not None:
return flux, flux_error
else:
return flux
def gaussian(x, mean = 0., sigma = 1.):
"""
This function returns a gaussian evaluated at x
Parameters
----------
x : numpy.array
Array containing where the gaussian will be evaluated
mean : double
Mean of the gaussian.
sigma : double
Standard-deviation of the gaussian
Returns
-------
Gaussian evaluated at `x`.
"""
norm = 1. / ( np.sqrt(2. * np.pi) * sigma )
return norm * np.exp( - ( (x - mean)**2 ) / (2 * sigma**2) )
def double_gaussian(x, mean1 = -7.9, mean2 = 7.9, sigma1 = 1., sigma2 = 1.):
"""
This function returns the sum of two gaussians evaluated at x. This function reproduces the expected separation of
the "horns" in a NIRISS/SOSS profile.
Parameters
----------
x : numpy.array
Array containing where the gaussian will be evaluated
mean1 : double
Mean of the first gaussian.
mean2 : double
Mean of the second gaussian.
sigma1 : double
Standard-deviation of the first gaussian.
sigma2 : double
Standard-deviation of the second gaussian.
Returns
-------
Double gaussian evaluated at `x`.
"""
return gaussian(x, mean1, sigma1) + gaussian(x, mean2, sigma2)
def get_ccf(x, y, function = 'gaussian', parameters = None, pixelation = False, lag_step = 0.001):
"""
Function that obtains the CCF between input data defined between x and y and a pre-defined function.
Parameters
----------
x : numpy.array
Array containing the values at which each of the y-values are defined.
y : numpy.array
Array containing the input values.
function : string or function
String containing the function against which the CCF wants to be computed. Default is 'gaussian'; can also be 'double gaussian'. Alternatively it can be
a function of choice that needs to be able to be evaluated at `x`.
parameters : list
Parameters for the input function. For the gaussian, the first item identifies the mean, the second the standard deviation. For the double gaussian, the first two
are the mean and standard deviation of the first gaussian, the last two are the mean and standard deviation of the second gaussian. Default is None, in which case
the 'gaussian' is set to mean 0 and standard deviaton of 1, and for the 'double gaussian', the standard deviations are also 1, but the mean of the first and second
gaussians are set to -7.9 and +7.9 --- these are consistent with the distance between SOSS' horns.
pixelation : bool
Boolean deciding whether to apply pixelation effects (i.e., integrating function over a pixel)
lag_step : double
Steps used to lag the `function` along all the input x-values. Default is 0.001.
Returns
-------
ccf : numpy.array
Array containing the cross-correlation function between y and the selected function
"""
# Create array of lags:
lags = np.arange(np.min(x), np.max(x), lag_step)
# Define which functions to use. All are coded in C (see c-codes/Utilities/CCF.c)
if type(function) is str:
if function == 'gaussian':
if parameters is None:
mean, sigma = 0., 1.
else:
mean, sigma = parameters
ccf = CCF.Gaussian(x.astype('double'), y.astype('double'), lags.astype('double'), len(x), len(lags), mean, sigma)
elif function == 'double gaussian':
if parameters is None:
mean1, sigma1, mean2, sigma2 = -7.9, 1., 7.9, 1.
else:
mean1, sigma1, mean2, sigma2 = parameters
ccf = CCF.DoubleGaussian(x.astype('double'), y.astype('double'), lags.astype('double'), len(x), len(lags), mean1, sigma1, mean2, sigma2)
else:
raise Exception('Function '+function+' not available for CCF. Try "gaussian", "double gaussian" or define your own function as input.')
else:
# Create matrix of dimensions [len(lags), len(x)]; each row contains x - lags[i]:
all_lags = np.tile( x.astype('double'), (len(lags), 1) )
all_lags = (all_lags.transpose() - lags).transpose()
# Evaluate input function at those lags:
evaluated_function = function(all_lags)
# Compute CCF in C:
ccf = CCF.AnyFunction(y.astype('double'), evaluated_function.flatten(), len(x), len(lags))
return lags, ccf
def trace_spectrum(image, dqflags, xstart, ystart, profile_radius=20, correct_outliers = False, nsigma = 100, median_filter_radius = 5, method = 'ccf', ccf_function = 'gaussian', ccf_parameters = None, ccf_step = 0.001, gaussian_filter = False, gauss_filter_width=10, xend=None, y_tolerance = 2, verbose = False):
"""
Function that non-parametrically traces spectra. There are various methods to trace the spectra. The default method is `ccf`, which performs cross-correlation
to find the trace positions given a user-specified function (default is 'gaussian'; can also be 'double gaussian' or a user-specified function). Tracing happens from columns
`xstart` until `xend` --- default for `xend` is `0`.
Parameters
----------
image: numpy.array
The image that wants to be traced.
dqflags: ndarray
The data quality flags for each pixel in the image. Only pixels with DQ flags of zero will be used
in the tracing.
xstart: float
The x-position (column) on which the tracing algorithm will be started
ystart: float
The estimated y-position (row) of the center of the trace. An estimate within a few pixels is enough (defined by y_tolerance).
profile_radius: float
Expected radius of the profile measured from its center. Only this region will be used to estimate
the trace position of the spectrum.
correct_outliers : bool
Decide if to correct outliers or not on each column. If True, outliers are detected via a median filter.
nsigma : float
Median filters are applied to each column in search of outliers if `correct_outliers` is `True`. `nsigma` defines
how many n-sigma above the noise level the residuals of the median filter and the image should be considered outliers.
median_filter_radius : int
Radius of the median filter in case `correct_outliers` is `True`. Needs to be an odd number. Default is `5`.
method : string
Method by which the tracing is expected to happen. Default is `ccf`; can also be `centroid`, which will use the centroid of each column
to estimate the center of the trace.
ccf_function : string or function
Function to cross-correlate cross-dispersion profiles against. Default is `gaussian` (useful for most instruments) --- can also be `double gaussian` (useful for
e.g., NIRISS/SOSS --- double gaussian separation tailored to that instrument). Alternatively, a function can be passed directly --- this function needs to be
evaluated at a set of arrays `x`, and be centered at `x=0`.
ccf_parameters : list
Parameters of the function against which data will be CCF'ed. For details, see the get_ccf function; default is None, which defaults to the get_ccf defaults.
ccf_step : double
Step at which the CCF will run. The smallest, the most accurate, but also the slower the CCF method is. Default is `0.001`.
gaussian_filter : bool
Flag that defines if each column will be convolved with a gaussian filter (good to smooth profile to match a gaussian better). Default is `False`.
gauss_filter_width : float
Width of the gaussian filter used to perform the centroiding of the first column, if `gaussian_filter` is `True`.
xend: int
x-position at which tracing ends. If none, trace all the columns left to xstart.
y_tolerance: float
When tracing, if the difference between the two difference traces at two contiguous columns is larger than this,
then assume tracing failed (e.g., cosmic ray).
verbose: boolean
If True, print error messages.
Returns
-------
x : numpy.array
Columns at which the trace position is being calculated.
y : numpy.array
Estimated trace position.
"""
# Define x-axis:
if xend is not None:
if xend < xstart:
x = np.arange(xend, xstart + 1)
indexes = range(len(x))[::-1]
direction = 'left'
else:
x = np.arange(xstart, xend + 1)
indexes = range(len(x))
direction = 'right'
else:
x = np.arange(0, xstart + 1)
# Define y-axis:
y = np.arange(image.shape[0])
# Define status of good/bad for each trace position:
status = np.full(len(x), True, dtype=bool)
# Define array that will save trace at each x:
ytraces = np.zeros(len(x))
first_time = True
for i in indexes:
xcurrent = x[i]
# Perform median filter to identify nasty (i.e., cosmic rays) outliers in the column:
mf = median_filter(image[:,xcurrent], size = median_filter_radius)
if correct_outliers:
residuals = mf - image[:,xcurrent]
mad_sigma = get_mad_sigma(residuals)
column_nsigma = np.abs(residuals) / mad_sigma
else:
column_nsigma = np.zeros(image.shape[0]) * nsigma
# Extract data-quality flags for current column; index good pixels --- mask nans as well:
idx_good = np.where((dqflags[:, xcurrent] == 0) & (~np.isnan(image[:, xcurrent]) & (column_nsigma < nsigma)))[0]
idx_bad = np.where(~((dqflags[:, xcurrent] == 0) & (~np.isnan(image[:, xcurrent]) & (column_nsigma < nsigma))))[0]
if len(idx_good) > 0:
# Replace bad values with the ones in the median filter:
column_data = np.copy(image[:, xcurrent])
column_data[idx_bad] = mf[idx_bad]
if gaussian_filter:
# Convolve column with a gaussian filter; remove median before convolving:
filtered_column = gaussian_filter1d(column_data - \
np.median(column_data), gauss_filter_width)
else:
filtered_column = column_data - np.median(column_data)
# Find trace depending on the method, only within pixels close to profile_radius:
idx = np.where(np.abs(y - ystart) < profile_radius)[0]
if method == 'ccf':
# Run CCF search using only the pixels within profile_radius:
lags, ccf = get_ccf(y[idx], filtered_column[idx], function = ccf_function, parameters = ccf_parameters, lag_step = ccf_step)
idx_max = np.where(ccf == | np.max(ccf) | numpy.max |
# NNJ flexible node numbers & opion for 1 or 2 hidden layers
import numpy as np
J = 6 ; M = 6 # numbers of nodes in the hidden layers 1 and 2 (set M to zero for just one hidden layer)
def sigmoid(f): # but other activation functions can be tried
return 1.0/(1+ np.exp(-f))
def sigmderiv(z): # derivative of above function but note that z is the value sigmoid(f) not the sigmoid argument f
return z * (1.0 - z)
class NeuralNetwork:
def __init__(self, x, y):
if M > 0: # ie if there is a second hidden layer
self.z3ea = x # input
self.w32am = np.random.rand(A,M) * 0.1 # initial weights for layer 3 to layer 2 (hidden)
self.w21mj = np.random.rand(M,J) * 0.1 # initial weights for layer 2 to layer 1 (hidden)
else:
self.z2em = x # input although m is a now !! (when M=0)
self.w21mj = np.random.rand(A,J) * 0.1 # initial weights for layer 2 to layer 1 (hidden)
self.w10jp = np.random.rand(J,P) * 0.1 # initial weights for layer 1 to layer 0 (output)
self.t0ep = y # Target training results of e training cases for layer 0 (output): p values each
def feedforward(self):
if M > 0:
self.z2em = sigmoid(np.dot(self.z3ea, self.w32am)) # = sigmoid(f2em)
self.z1ej = sigmoid(np.dot(self.z2em, self.w21mj)) # = sigmoid(f1ej)
self.z0ep = sigmoid(np.dot(self.z1ej, self.w10jp)) # = sigmoid(f0ep)
def backprop(self):
dEp = (self.t0ep - self.z0ep) * sigmderiv(self.z0ep) # = -dE/df0ep
self.w10jp += np.dot(self.z1ej.T, dEp)
dEj = np.dot(dEp, self.w10jp.T) * sigmderiv(self.z1ej) # = -de/df1ej
self.w21mj += np.dot(self.z2em.T, dEj)
if M > 0:
dEm = np.dot(dEj,self.w21mj.T)*sigmderiv(self.z2em) # = -dE/df2em
self.w32am += np.dot(self.z3ea.T, dEm)
def singlerun(self,zInput):
if M > 0:
z2m = sigmoid(np.dot(zInput, self.w32am))
z1j = sigmoid(np.dot(z2m, self.w21mj))
else:
z1j = sigmoid( | np.dot(zInput, self.w21mj) | numpy.dot |
import pickle
import mnist
import seaborn as sns
import mpmath as mp
import numpy as np
from numpy.testing import assert_array_almost_equal as aae
from matplotlib import pyplot as plt
from scipy.spatial.distance import cdist
from scipy.stats import norm
from scipy.stats import multivariate_normal as mn
import sklearn
from sklearn.utils import check_random_state
from sklearn.utils.extmath import row_norms
import sklearn.cluster
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_score
#import umap
from _k_init import _k_init
from unionfind import UnionFind
COLORS = [
'#cc5151', '#51cccc', '#337f7f', '#8ecc51', '#7f3333', '#597f33', '#8e51cc',
'#59337f', '#ccad51', '#7f6c33', '#51cc70', '#337f46', '#5170cc', '#33467f',
'#cc51ad', '#7f336c', '#cc7f51', '#7f4f33', '#bccc51', '#757f33', '#60cc51',
'#3c7f33', '#51cc9e', '#337f62', '#519ecc', '#33627f', '#6051cc', '#3c337f'
]
def init_centers(X, n_clusters):
"""
Run k-means++ to initialize centroids.
Since we will be comparing to k-means, it's fair for both methods to have
the same initialization method.
Taken from scikit-learn.
"""
random_state = check_random_state(None)
n_samples = X.shape[0]
x_squared_norms = row_norms(X, squared=True)
centers = _k_init(
X, n_clusters,
random_state=random_state,
x_squared_norms=x_squared_norms)
return centers
class GMM:
def __init__(self, n_clusters, n_steps, eps=1e-20):
self.n_clusters = n_clusters
self.n_steps = n_steps
self.eps = eps
def _initialize(self):
"""
Initializes self.alpha, self.mu, self.sigma, self.w
"""
self.alpha = np.ones((self.n_clusters)) / self.n_clusters
self.mu = self.X[np.random.choice(np.arange(self.n), self.n_clusters)]
self.sigma = np.ones((self.n_clusters, self.d))
self.chunklet_w = np.zeros((self.n_chunklets, self.n_clusters))
#centers = init_centers(X, self.n_clusters)
#dists = cdist(X, centers)
#labels = np.argmin(dists, axis=1)
#unq_labels, self.alpha = np.unique(labels, return_counts=True)
#self.alpha = np.zeros(self.n_clusters)
#self.mu = np.zeros((self.n_clusters, d))
# Using diagonal variance
#self.sigma = np.zeros((self.n_clusters, d))
# for i, lbl in enumerate(unq_labels):
# cur_pts = np.where(labels == lbl)
# self.alpha[i] = cur_pts[0].shape[0]
# # initialize means
# self.mu[i, :] = np.mean(X[cur_pts], axis=0)
# centered = (X[cur_pts] - self.mu[i])**2
# centered = np.sum(centered, axis=0) / centered.shape[0]
# # initialize vars
# self.sigma[i, :] = self.alpha[i] * centered
#self.alpha /= n
# self._validate_sigma()
#self.chunklet_w = np.zeros((self.chunklets.shape[0], self.n_clusters))
def _transitive_closure(self):
self.uf = UnionFind(np.arange(self.n))
for link in self.ml:
self.uf.union(link[0], link[1])
self.chunklets = np.array(
[np.array(list(i)) for i in self.uf.components()])
self.n_chunklets = self.chunklets.shape[0]
self.chunklet_shapes = np.array([i.shape[0] for i in self.chunklets])
self.chunklet_shapes = self.chunklet_shapes.reshape(-1, 1)
self.chunklet_means = np.array(
[np.mean(self.X[i], axis=0) for i in self.chunklets])
assert self.chunklet_means.shape == (self.n_chunklets, self.d)
def fit(self, X, ml):
self.n = X.shape[0]
self.d = X.shape[1]
self.X = X.copy()
self.ml = ml.copy()
self._transitive_closure()
self._initialize()
self.scores = []
self.lls = []
for step in range(self.n_steps):
self.e_step()
self.m_step()
self.scores.append(self.score())
self.lls.append(self.ll)
print(f"Step {step+1} :: LL {self.ll} :: Score {self.scores[-1]}")
if len(self.lls) >= 2 and np.abs(self.lls[-1] - self.lls[-2]) < 1e-2:
print("Converged")
break
def get_labels(self):
chunk_labels = np.argmax(self.chunklet_w, axis=1).astype(np.int)
labels = np.zeros(self.n)
for i, chunk in enumerate(self.chunklets):
labels[chunk] = chunk_labels[i]
return labels.astype(np.int)
def llhood(self):
ll = 0
for i, chunklet in enumerate(self.chunklets):
for j in range(self.n_clusters):
numerator = mn.pdf(
self.X[chunklet], self.mu[j], np.diag(self.sigma[j]))
ll += np.sum(np.log(numerator + self.eps), axis=0) *\
self.chunklet_w[i,j]
ll += np.log(self.alpha[j] + self.eps) * self.chunklet_w[i,j]
return ll
def e_step(self):
self.ll = 0
for i, chunklet in enumerate(self.chunklets):
denominator = 0
numerators = []
for j in range(self.n_clusters):
numerator = mn.pdf(
self.X[chunklet], self.mu[j], np.diag(self.sigma[j]))
self.ll += np.sum(np.log(numerator + self.eps), axis=0) *\
self.chunklet_w[i,j]
self.ll += np.log(self.alpha[j] + self.eps) *\
self.chunklet_w[i,j]
numerator = np.prod(numerator, axis=0)
numerator *= self.alpha[j]
denominator += numerator
self.chunklet_w[i, j] = numerator
self.chunklet_w[i, :] /= (denominator + self.eps)
#assert np.abs(self.chunklet_w[i, :].sum() - 1) < eps,\
# np.abs(self.chunklet_w[i, :].sum())
def m_step(self):
self.alpha = self.chunklet_w.sum(axis=0) / self.n_chunklets
for j in range(self.n_clusters):
den = 0
temp_mu = np.zeros((1, self.d))
numfrac = self.chunklet_w[:, j, np.newaxis] * self.chunklet_shapes
den = np.sum(numfrac, axis=0, keepdims=True)
temp_mu = np.sum(self.chunklet_means * numfrac, axis=0)
self.mu[j] = temp_mu / den
diff_sq = (self.X - self.mu[j])**2
temp_sigma = | np.zeros((1, self.d)) | numpy.zeros |
import numpy
import numpy as np
import scipy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import lal
import lalsimulation
from lal.lal import PC_SI as LAL_PC_SI
import h5py
import warnings
import random
# Calculating the projection of complex vector v on complex vector u
def proj(u, v):
# notice: this algrithm assume denominator isn't zero
return u * numpy.vdot(v,u) / numpy.vdot(u,u)
# Calculating the normalized residual (= a new basis) of a vector vec from known bases
def gram_schmidt(bases, vec):
for i in numpy.arange(0,len(bases)):
vec = vec - proj(bases[i], vec)
return vec/numpy.sqrt(numpy.vdot(vec,vec)) # normalized new basis
# Calculating overlap of two waveforms
def overlap_of_two_waveforms(wf1, wf2):
wf1norm = wf1/numpy.sqrt(numpy.vdot(wf1,wf1)) # normalize the first waveform
wf2norm = wf2/numpy.sqrt(numpy.vdot(wf2,wf2)) # normalize the second waveform
diff = wf1norm - wf2norm
#overlap = 1 - 0.5*(numpy.vdot(diff,diff))
overlap = numpy.real(numpy.vdot(wf1norm, wf2norm))
return overlap
def spherical_to_cartesian(sph):
x = sph[0]*numpy.sin(sph[1])*numpy.cos(sph[2])
y = sph[0]*numpy.sin(sph[1])*numpy.sin(sph[2])
z = sph[0]*numpy.cos(sph[1])
car = [x,y,z]
return car
def get_m1m2_from_mcq(mc, q):
m2 = mc * q ** (-0.6) * (1+q)**0.2
m1 = m2 * q
return numpy.array([m1,m2])
def generate_a_waveform(m1, m2, spin1, spin2, ecc, lambda1, lambda2, iota, phiRef, distance, deltaF, f_min, f_max, waveFlags, approximant):
test_mass1 = m1 * lal.lal.MSUN_SI
test_mass2 = m2 * lal.lal.MSUN_SI
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda1(waveFlags, lambda1)
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda2(waveFlags, lambda2)
[plus_test, cross_test]=lalsimulation.SimInspiralChooseFDWaveform(test_mass1, test_mass2, spin1[0], spin1[1], spin1[2], spin2[0], spin2[1], spin2[2], distance, iota, phiRef, 0, ecc, 0, deltaF, f_min, f_max, 0, waveFlags, approximant)
hp = plus_test.data.data
hp_test = hp[numpy.int(f_min/deltaF):numpy.int(f_max/deltaF)]
return hp_test
def generate_a_waveform_from_mcq(mc, q, spin1, spin2, ecc, lambda1, lambda2, iota, phiRef, distance, deltaF, f_min, f_max, waveFlags, approximant):
m1,m2 = get_m1m2_from_mcq(mc,q)
test_mass1 = m1 * lal.lal.MSUN_SI
test_mass2 = m2 * lal.lal.MSUN_SI
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda1(waveFlags, lambda1)
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda2(waveFlags, lambda2)
[plus_test, cross_test]=lalsimulation.SimInspiralChooseFDWaveform(test_mass1, test_mass2, spin1[0], spin1[1], spin1[2], spin2[0], spin2[1], spin2[2], distance, iota, phiRef, 0, ecc, 0, deltaF, f_min, f_max, 0, waveFlags, approximant)
hp = plus_test.data.data
hp_test = hp[numpy.int(f_min/deltaF): | numpy.int(f_max/deltaF) | numpy.int |
import numpy as np
from sklearn import model_selection, metrics
from .containers import Data
from .querystrategies import QueryStrategy, SimpleMargin
from .initializations import LDSCentrality as LDS
class ActiveLearningModel(object):
def __init__(self, classifier, query_strategy, eval_metric="auc",
U_proportion=0.9, init_L="random", random_state=None):
'''
:param sklearn.base.BaseEstimator classifier: Classifier to
build the model.
:param QueryStrategy query_strategy: QueryStrategy instance to use.
:param str eval_metric: One of "auc", "accuracy".
:param float U_proportion: proportion of training data to be assigned
the unlabeled set.
:param str init_L: How to initialize L: "random" or "LDS".
:param int random_state: Sets the random_state parameter
of train_test_split.
'''
self.__check_args(classifier, query_strategy, U_proportion)
self.classifier = classifier
self.query_strategy = query_strategy
self.eval_metric = eval_metric
self.U_proportion = U_proportion
self.init_L = init_L
self.random_state = random_state
self.L = Data() # Labeled data.
self.U = Data() # Unlabeled data.
self.T = Data() # Test data.
self.classes = None
def __check_args(self, classifier, query_strategy, U_proportion):
if not isinstance(query_strategy, QueryStrategy):
raise ValueError("query_strategy must be an instance of QueryStrategy.") # noqa
if not 0 < U_proportion < 1:
raise ValueError("U_proportion must be in range (0,1) exclusive. Got {}." # noqa
.format(U_proportion))
if isinstance(query_strategy, SimpleMargin):
if not hasattr(classifier, "decision_function"):
raise ValueError("{} compatible only with discriminative models." # noqa
.format(str(query_strategy)))
def _random_init(self, X, y, U_size):
"""
Initialize the labeled set at random.
:param np.array X: feature matrix
:param np.array y: label vector
:param int U_size: The number of samples to keep unlabeled.
:returns tuple of labeled X, unlabeled X, labeled y, unlabeled y
:rtype tuple(np.array, np.array, np.array, np.array)
"""
split = model_selection.train_test_split(X, y, test_size=U_size,
random_state=self.random_state) # noqa
return split
def _LDS_init(self, X, y, U_size):
"""
Initialize the labeled set using local density score (LDS) sampling.
:param np.array X: feature matrix
:param np.array y: label vector
:param int U_size: The number of samples to keep unlabeled.
:returns tuple of labeled X, unlabeled X, labeled y, unlabeled y
:rtype tuple(np.array, np.array, np.array, np.array)
"""
k = 10
idxs = LDS(k=k, threshold="auto").find_centers(X, y)
mask = np.zeros(X.shape[0], dtype=bool)
mask[idxs] = True
Lx = X[mask, ]
Ux = X[np.logical_not(mask), ]
Ly = y[mask]
Uy = y[np.logical_not(mask), ]
return Lx, Ux, Ly, Uy
def prepare_data(self, train_X, test_X, train_y, test_y):
'''
Splits data into unlabeled, labeled, and test sets
according to self.U_proportion.
:param np.array train_X: Training data features.
:param np.array test_X: Test data features.
:param np.array train_y: Training data labels.
:param np.array test_y: Test data labels.
'''
U_size = int(np.ceil(self.U_proportion * train_X.shape[0]))
if not 0 < U_size < train_X.shape[0]:
raise ValueError("U_proportion must result in non-empty labeled and unlabeled sets.") # noqa
if train_X.shape[0] - U_size <= 1:
raise ValueError("U_proportion must result in a labeled set with > 1 members.") # noqa
if self.init_L == "random":
split = self._random_init(train_X, train_y, U_size)
elif self.init_L == "LDS":
split = self._LDS_init(train_X, train_y, U_size)
self.L.X, self.U.X, self.L.y, self.U.y = split
self.T.X = test_X
self.T.y = test_y
def update_labels(self):
'''
Gets the chosen index from the query strategy,
adds the corresponding data point to L and removes
it from U. Logs which instance is picked from U.
:returns: chosen x and y, for use with partial_train()
:rtype: tuple(numpy.ndarray, numpy.ndarray)
'''
index = self.query_strategy.query(self.U, self.L, self.classifier)
chosen_x = self.U.X[index]
chosen_y = np.array([self.U.y[index]])
self.L.y = np.append(self.L.y, chosen_y, axis=0)
self.L.X = np.vstack((self.L.X, chosen_x))
self.U.X = np.delete(self.U.X, index, axis=0)
self.U.y = np.delete(self.U.y, index, axis=0)
return chosen_x.reshape(1, -1), chosen_y
def train(self):
'''
Trains the classifier on L.
'''
self.classifier.fit(self.L.X, self.L.y)
def partial_train(self, new_x, new_y):
'''
Given a subset of training examples, calls partial_fit.
:param numpy.ndarray new_x: Feature array.
:param numpy.ndarray new_y: Label array.
'''
if self.classes is None:
self.classes = | np.unique(self.U.y) | numpy.unique |
import numpy as np
import scipy.sparse.linalg as sp_linalg
from six import string_types
def recursive_nmu(array, r=None, max_iter=5e2, tol=1e-3, downdate='minus',
init='svd'):
if r is None:
r = min(array.shape)
array = array.copy()
factors = []
for k in range(r):
u, v = nmu_admm(array, max_iter, tol, init=init)
if np.count_nonzero(u) == 0 or np.count_nonzero(v) == 0:
break
factors.append((u, v))
if k == r - 1:
continue
if downdate == 'minus':
array = np.maximum(0, array - np.dot(u, v))
if downdate == 'hard-col' or downdate == 'hard-both':
array[:, np.squeeze(v > 0)] = 0
if downdate == 'hard-row' or downdate == 'hard-both':
array[np.squeeze(u > 0), :] = 0
if array.max() == 0:
break
return factors
def nmu(array, max_iter=5e2, tol=1e-3, init='svd', ret_errors=False):
u, v = _nmu_initialize(array, init=init)
u_old = u.copy()
v_old = v.copy()
mu = 0
# Alternating optimization
error_u = []
error_v = []
for k in range(int(max_iter)):
# updating mu:
if np.any(u > 0) and np.any(v > 0):
remainder = array - u.dot(v)
mu = np.maximum(0, mu - remainder / (k + 1))
else:
mu /= 2
u = u_old
v = v_old
u_old = u.copy()
v_old = v.copy()
# updating u, v:
aux = array - mu
u = np.maximum(0, aux.dot(v.T))
u = | np.maximum(0, u) | numpy.maximum |
"""
Fusion.
"""
import math
import numpy as np
import sys
import os
sys.path.insert(1, os.path.realpath(__file__ + '../lib/'))
import utils
from common import Timer
import mcubes
import time
# For getting views.
watertight_render = __import__('2_1_watertight_render')
# Whether to use GPU or CPU for depth fusion.
use_gpu = True
if use_gpu:
import libfusiongpu as libfusion
from libfusiongpu import tsdf_gpu as compute_tsdf
else:
import libfusioncpu as libfusion
from libfusioncpu import tsdf_cpu as compute_tsdf
def fusion(depthmaps, Rs):
"""
Fuse the rendered depth maps.
:param depthmaps: depth maps
:type depthmaps: numpy.ndarray
:param Rs: rotation matrices corresponding to views
:type Rs: [numpy.ndarray]
:return: (T)SDF
:rtype: numpy.ndarray
"""
Ks = np.array([
[config['watertight_rendering']['focal_length_x'], 0, config['watertight_rendering']['principal_point_x']],
[0, config['watertight_rendering']['focal_length_y'], config['watertight_rendering']['principal_point_y']],
[0, 0, 1]
])
Ks = Ks.reshape((1, 3, 3))
Ks = np.repeat(Ks, len(depthmaps), axis=0).astype(np.float32)
Ts = []
for i in range(len(Rs)):
Rs[i] = Rs[i]
Ts.append( | np.array(config['watertight_rendering']['mesh_center']) | numpy.array |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 <NAME> <<EMAIL>>
#
# Distributed under terms of the GNU-License license.
"""
"""
import numpy as np
def jonswap(w, Hs, Tp):
""" JONSWAP wave spectrum, IEC 61400-3
w: ndarray of shape (n,), frequencies to be sampled at, rad/s
Hs: significant wave height, m
Tp: wave peak period, sec
"""
w = np.squeeze(w)
with np.errstate(divide='ignore'):
wp = 2*np.pi/Tp
gamma = 3.3
sigma = 0.07 * | np.ones(w.shape) | numpy.ones |
##
"""
The script implement three steps for refining the segmentation results based
on the fitted SSM.
Step 1: Close holes in regions dictated by the SSM
step 2: Remove unconnected components outside the span of the SSM
Step 3: Remove voxels that are further than the 95% of contour distances
Processed segmentation masks are saved in the
"./Results/stat_shape/seg_postprocess" folder.
Author: <NAME> <<EMAIL>>
Last updated: 2021-02-03
"""
import os
import nibabel as nib
import numpy as np
from scipy.ndimage import morphology
from skimage import measure
import collections
import time
## Help functions
def compute_dsc(A, B):
"""
Compute the DICE score of volumes A and B.
"""
numerator = np.sum(np.logical_and(A, np.where(B != 0, 1, 0)))
denominator = (np.sum(A.astype(bool)) + np.sum(B.astype(bool)))
return numerator * 200.0 / denominator
def getLargestCC(segmentation):
"""
Find the largest connected component in volume image.
"""
labels = measure.label(segmentation)
assert (labels.max() != 0) # assume at least 1 CC
largestCC = labels == np.argmax(np.bincount(labels.flat)[1:]) + 1
return largestCC
def surfd(input1, input2, sampling=1, connectivity=1):
"""
Compute the Hausforff distance of two image volumes.
"""
input_1 = np.atleast_1d(input1.astype(np.bool))
input_2 = np.atleast_1d(input2.astype(np.bool))
conn = morphology.generate_binary_structure(input_1.ndim, connectivity)
S = input_1 ^ morphology.binary_erosion(input_1, conn)
Sprime = input_2 ^ morphology.binary_erosion(input_2, conn)
dta = morphology.distance_transform_edt(~S, sampling)
dtb = morphology.distance_transform_edt(~Sprime, sampling)
sds = np.concatenate([np.ravel(dta[Sprime != 0]), np.ravel(dtb[S != 0])])
ha = np.multiply(dta, np.where(Sprime != 0, 1, 0))
hb = np.multiply(dtb, np.where(S != 0, 1, 0))
return sds, ha, hb
## Data preparation
seg_labels_path = './Data/OpenKnee/seg/'
ssm_labels_path = './Results/stat_shape/'
gt_labels_path = './Data/OpenKnee/ground_truth/'
save_path = './Data/OpenKnee/seg/postprocess/'
seg_labels = [f for f in os.listdir(seg_labels_path) if f.endswith('.nii.gz')]
ssm_labels = [f for f in os.listdir(ssm_labels_path) if f.endswith('.nii.gz')]
gt_labels = [f for f in os.listdir(gt_labels_path) if f.endswith('.nii.gz')]
labels = {
1: 'femur',
2: 'femoral_cart',
3: 'tibia',
4: 'tibial_cart_med',
5: 'tibial_cart_lat',
}
## Main processing
f = open('Results/results.txt', 'w')
for subject in range(len(seg_labels)):
print(seg_labels[subject])
# Load SEGMENTAION mask
seg_img = nib.load(os.path.join(seg_labels_path + seg_labels[subject]))
seg_mask = seg_img.get_fdata()
# Load SSM FIT mask
ssm_img = nib.load(os.path.join(ssm_labels_path + ssm_labels[subject]))
ssm_mask = ssm_img.get_fdata()
# Load SSM Ground Truth (GT) mask
gt_img = nib.load(os.path.join(gt_labels_path + gt_labels[subject]))
gt_mask = gt_img.get_fdata()
hdr = seg_img.header
spacing = hdr.get_zooms()
affine = seg_img.affine
# Apply corrective filters
Volume = | np.zeros(hdr['dim'][1:4]) | numpy.zeros |
"""
The bpm module contain the Bpm class used to simulate the light propagation -
within low refractive index variation
and small angle (paraxial approximation) -
using the Beam Propagation Method.
This module was done by <NAME> during a master
university course from the PAIP master of the université de Lorraine,
under the directive of Pr. <NAME>.
The bpm codes are mainly based on a compilation of MatLab codes initialy
developed by <NAME> during his PhD thesis[2],
and later modified at the FEMTO-ST institute of the Université de
Franche-Comté and at the LMOPS laboratory [3] of the
Université de Lorraine.
[1] <NAME>, in Fundamentals of Optical Waveguides,
2nd ed., edited by <NAME> (Academic, Burlington, 2006), pp. 329–397.
[2] "Generation et propagation de reseaux periodiques de solitons spatiaux
dans un milieu de kerr massif" PhD thesis, université de Franche-Comté 1998.
[3] <NAME> et. al., Broadband photonic transport between waveguides by
adiabatic elimination Phys. Rev. A, 97 023811 (2018).
"""
from math import pi, ceil, radians, sqrt, log, sin, cos, acos, asin, exp
import time
from scipy import special
from numpy.fft import fft, ifft, fftshift
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import numba
@numba.vectorize([numba.float64(numba.complex128),
numba.float32(numba.complex64)])
def abs2(x):
"""Square modulus of x. Fastest way possible for a numpy array."""
return x.real**2 + x.imag**2
class Bpm():
"""
The Bpm class is used to simulate light propagation -
within small refractive index variation guides
and small angle of propagation (paraxial) -
using the Beam Propagation Method.
Parameters
----------
no : float
Refractive index of the cladding.
lo : float
Wavelength of the beam in vaccum (µm).
length_z : float
Size of the compute window over z (µm).
dist_z : float
Step over z (µm)
nbr_z_disp : int
Number of points to display over z.
length_x : float
Size of the compute window over x (µm).
dist_x : float
Step over x (µm)
"""
def __init__(self, no, lo,
length_z, dist_z, nbr_z_disp,
length_x, dist_x):
"""
The Bpm class is used to simulate light propagation -
within small refractive index variation guides
and small angle of propagation (paraxial) -
using the Beam Propagation Method.
Parameters
----------
no : float
Refractive index of the cladding
lo : float
Wavelength of the beam in vaccum (µm).
length_z : float
Size of the compute window over z (µm).
dist_z : float
Step over z (µm).
nbr_z_disp : int
Number of points to display over z.
length_x : float
Size of the compute window over x (µm).
dist_x : float
Step over x (µm).
Notes
-----
This method creates the following variables within the class
:class:`Bpm`:
- All input variables.
- ko: the free space vector (1/µm).
"""
self.no = no
self.lo = lo
self.ko = 2*pi / self.lo # linear wave vector in free space (1/µm)
self.length_z = length_z
self.dist_z = dist_z
self.nbr_z_disp = nbr_z_disp
self.dist_x = dist_x
self.length_x = length_x
def create_x_z(self):
"""
Create the x, z array and ajust the resolution variables.
Returns
-------
length_z : float
Corrected value due to nbr_z being an int.
nbr_z : int
Number of points computed over z.
nbr_z_disp : int
Corrected value due to pas being an int.
length_x : float
Corrected value due to nbr_x being an int.
nbr_x : int
Number of point over x (µm).
x : array
x values between [-length_x/2, length_x/2-dist_x] center on 0.
Notes
-----
This method creates the following variables within the class
:class:`Bpm`:
- pas : Interval of computed points between each displayed points.
"""
assert self.nbr_z_disp > 0
self.nbr_z = ceil(self.length_z / self.dist_z)
self.length_z = self.nbr_z * self.dist_z
self.pas = ceil(self.length_z / (self.nbr_z_disp * self.dist_z))
self.nbr_z_disp = ceil(self.length_z / (self.pas * self.dist_z))
self.nbr_z_disp += 1 # add 1 for the initial field
self.nbr_z += 1 # add 1 for the initial field
self.nbr_x = ceil(self.length_x / self.dist_x) # nbr points over x
# check if even number
if self.nbr_x % 2 != 0:
self.nbr_x += 1
# check if multiple of 8: speeds up execution
# (was also needed for a obsolete feature)
for _ in range(3):
if self.nbr_x % 8 != 0:
self.nbr_x += 2
else:
break
self.length_x = self.nbr_x * self.dist_x
self.x = np.linspace(-self.length_x/2,
self.length_x/2 - self.dist_x,
self.nbr_x)
return [self.length_z, self.nbr_z, self.nbr_z_disp-1,
self.length_x, self.nbr_x, self.x]
# Guides #
def squared_guide(self, width):
"""
A lambda function than returns a centered rectangular shape.
return 1 if :math:`x >= -width/2` and :math:`x <= width/2`
else return 0.
Parameters
----------
width : float
Waveguide width.
"""
return lambda x: (x >= -width/2) & (x <= width/2)
def gauss_guide(self, width, gauss_pow=1):
"""
A lambda function than return a centered super-Gaussian shape.
:math:`e^{-(x/w)^{2P}}`
The waist is defined as width/2 and correspond to the 1/e
relative value.
See :func:`.example_guides_x` for more details.
Parameters
----------
width : float
Waveguide width (µm) at 1/e^2 intensity.
gauss_pow : int, optional
Index of the super-gaussian guide with 1 being a regural gaussian
guide and 4 being the conventionnal super-gaussian guide used to
describe realistic waveguides.
See on en.wikipedia.org/wiki/Gaussian_function
#Higher-order_Gaussian_or_super-Gaussian_function.
1 by Default.
"""
if width == 0:
return lambda x: 0
w = width / 2 # width is diameter and w is radius
return lambda x: | np.exp(-(x / w)**(2*gauss_pow)) | numpy.exp |
"""This example simulates the start-up behavior of the squirrel cage induction motor connected to
an ideal three-phase grid. The state and action space is continuous.
Running the example will create a formatted plot that show the motor's angular velocity, the drive torque,
the applied voltage in three-phase abc-coordinates, and the measured current in field-oriented dq-coordinates.
"""
import numpy as np
import gym_electric_motor as gem
import matplotlib.pyplot as plt
def parameterize_three_phase_grid(amplitude, frequency, initial_phase):
"""This nested function allows to create a function of time, which returns the momentary voltage of the
three-phase grid.
The nested structure allows to parameterize the three-phase grid by amplitude(as a fraction of the DC-link voltage),
frequency (in Hertz) and initial phase (in degree).
"""
omega = frequency * 2 * np.pi # 1/s
phi = 2 * np.pi / 3 # phase offset
phi_initial = initial_phase * 2 * np.pi / 360
def grid_voltage(t):
u_abc = [
amplitude * np.sin(omega * t + phi_initial),
amplitude * np.sin(omega * t + phi_initial - phi),
amplitude * np.sin(omega * t + phi_initial + phi)
]
return u_abc
return grid_voltage
# Create the environment
env = gem.make(
# Choose the squirrel cage induction motor (SCIM) with continuous-control-set
"AbcCont-CC-SCIM-v0",
# Define the numerical solver for the simulation
ode_solver="scipy.ode",
# Define which state variables are to be monitored concerning limit violations
# "()" means, that limit violation will not necessitate an env.reset()
constraints=(),
# Set the sampling time
tau=1e-5
)
tau = env.physical_system.tau
limits = env.physical_system.limits
# reset the environment such that the simulation can be started
(state, reference) = env.reset()
# We define these arrays in order to save our simulation results in them
# Initial state and initial time are directly inserted
STATE = np.transpose(np.array([state * limits]))
TIME = np.array([0])
# Use the previously defined function to parameterize a three-phase grid with an amplitude of
# 80 % of the DC-link voltage and a frequency of 50 Hertz
f_grid = 50 # Hertz
u_abc = parameterize_three_phase_grid(amplitude=0.8, frequency=f_grid, initial_phase=0)
# Set a time horizon to simulate, in this case 60 ms
time_horizon = 0.06
step_horizon = int(time_horizon / tau)
for idx in range(step_horizon):
# calculate the time of this simulation step
time = idx * tau
# apply the voltage as given by the grid
(state, reference), reward, done, _ = env.step(u_abc(time))
# save the results of this simulation step
STATE = np.append(STATE, | np.transpose([state * limits]) | numpy.transpose |
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import,
unicode_literals)
"""
Small collection of robust statistical estimators based on functions from
<NAME> (Hughes STX) statistics library (called ROBLIB) that have
been incorporated into the AstroIDL User's Library. Function included are:
* biweightMean - biweighted mean estimator
* mean - robust estimator of the mean of a data set
* mode - robust estimate of the mode of a data set using the half-sample
method
* std - robust estimator of the standard deviation of a data set
* checkfit - return the standard deviation and biweights for a fit in order
to determine its quality
* linefit - outlier resistant fit of a line to data
* polyfit - outlier resistant fit of a polynomial to data
For the fitting routines, the coefficients are returned in the same order as
numpy.polyfit, i.e., with the coefficient of the highest power listed first.
For additional information about the original IDL routines, see:
http://idlastro.gsfc.nasa.gov/contents.html#C17
"""
import math
import numpy
__version__ = '0.4'
__revision__ = '$Rev$'
__all__ = ['biweightMean', 'mean', 'mode', 'std', 'checkfit', 'linefit', 'polyfit', '__version__', '__revision__', '__all__']
__iterMax = 25
__delta = 5.0e-7
__epsilon = 1.0e-20
#print("Note that for the outlier rejection, BisquareLimit=3.0 is used")
def biweightMean(inputData, axis=None, dtype=None):
"""
Calculate the mean of a data set using bisquare weighting.
Based on the biweight_mean routine from the AstroIDL User's
Library.
.. versionchanged:: 1.0.3
Added the 'axis' and 'dtype' keywords to make this function more
compatible with numpy.mean()
"""
if axis is not None:
fnc = lambda x: biweightMean(x, dtype=dtype)
y0 = numpy.apply_along_axis(fnc, axis, inputData)
else:
y = inputData.ravel()
if type(y).__name__ == "MaskedArray":
y = y.compressed()
if dtype is not None:
y = y.astype(dtype)
n = len(y)
closeEnough = 0.03*numpy.sqrt(0.5/(n-1))
diff = 1.0e30
nIter = 0
y0 = numpy.median(y)
deviation = y - y0
sigma = std(deviation)
if sigma < __epsilon:
diff = 0
while diff > closeEnough:
nIter = nIter + 1
if nIter > __iterMax:
break
uu = ((y-y0)/(6.0*sigma))**2.0
uu = numpy.where(uu > 1.0, 1.0, uu)
weights = (1.0-uu)**2.0
weights /= weights.sum()
y0 = (weights*y).sum()
deviation = y - y0
prevSigma = sigma
sigma = std(deviation, Zero=True)
if sigma > __epsilon:
diff = numpy.abs(prevSigma - sigma) / prevSigma
else:
diff = 0.0
return y0
def mean(inputData, Cut=3.0, axis=None, dtype=None):
"""
Robust estimator of the mean of a data set. Based on the
resistant_mean function from the AstroIDL User's Library.
.. versionchanged:: 1.0.3
Added the 'axis' and 'dtype' keywords to make this function more
compatible with numpy.mean()
"""
if axis is not None:
fnc = lambda x: mean(x, dtype=dtype)
dataMean = numpy.apply_along_axis(fnc, axis, inputData)
else:
data = inputData.ravel()
if type(data).__name__ == "MaskedArray":
data = data.compressed()
if dtype is not None:
data = data.astype(dtype)
data0 = numpy.median(data)
maxAbsDev = numpy.median(numpy.abs(data-data0)) / 0.6745
if maxAbsDev < __epsilon:
maxAbsDev = (numpy.abs(data-data0)).mean() / 0.8000
cutOff = Cut*maxAbsDev
good = numpy.where( numpy.abs(data-data0) <= cutOff )
good = good[0]
dataMean = data[good].mean()
dataSigma = math.sqrt( ((data[good]-dataMean)**2.0).sum() / len(good) )
if Cut > 1.0:
sigmaCut = Cut
else:
sigmaCut = 1.0
if sigmaCut <= 4.5:
dataSigma = dataSigma / (-0.15405 + 0.90723*sigmaCut - 0.23584*sigmaCut**2.0 + 0.020142*sigmaCut**3.0)
cutOff = Cut*dataSigma
good = numpy.where( numpy.abs(data-data0) <= cutOff )
good = good[0]
dataMean = data[good].mean()
if len(good) > 3:
dataSigma = math.sqrt( ((data[good]-dataMean)**2.0).sum() / len(good) )
if Cut > 1.0:
sigmaCut = Cut
else:
sigmaCut = 1.0
if sigmaCut <= 4.5:
dataSigma = dataSigma / (-0.15405 + 0.90723*sigmaCut - 0.23584*sigmaCut**2.0 + 0.020142*sigmaCut**3.0)
dataSigma = dataSigma / math.sqrt(len(good)-1)
return dataMean
def mode(inputData, axis=None, dtype=None):
"""
Robust estimator of the mode of a data set using the half-sample mode.
.. versionadded: 1.0.3
"""
if axis is not None:
fnc = lambda x: mode(x, dtype=dtype)
dataMode = numpy.apply_along_axis(fnc, axis, inputData)
else:
# Create the function that we can use for the half-sample mode
def _hsm(data):
if data.size == 1:
return data[0]
elif data.size == 2:
return data.mean()
elif data.size == 3:
i1 = data[1] - data[0]
i2 = data[2] - data[1]
if i1 < i2:
return data[:2].mean()
elif i2 > i1:
return data[1:].mean()
else:
return data[1]
else:
wMin = data[-1] - data[0]
N = data.size/2 + data.size%2
for i in xrange(0, N):
w = data[i+N-1] - data[i]
if w < wMin:
wMin = w
j = i
return _hsm(data[j:j+N])
data = inputData.ravel()
if type(data).__name__ == "MaskedArray":
data = data.compressed()
if dtype is not None:
data = data.astype(dtype)
# The data need to be sorted for this to work
data = numpy.sort(data)
# Find the mode
dataMode = _hsm(data)
return dataMode
def std(inputData, Zero=False, axis=None, dtype=None):
"""
Robust estimator of the standard deviation of a data set.
Based on the robust_sigma function from the AstroIDL User's Library.
.. versionchanged:: 1.0.3
Added the 'axis' and 'dtype' keywords to make this function more
compatible with numpy.std()
"""
if axis is not None:
fnc = lambda x: std(x, dtype=dtype)
sigma = numpy.apply_along_axis(fnc, axis, inputData)
else:
data = inputData.ravel()
if type(data).__name__ == "MaskedArray":
data = data.compressed()
if dtype is not None:
data = data.astype(dtype)
if Zero:
data0 = 0.0
else:
data0 = numpy.median(data)
maxAbsDev = numpy.median(numpy.abs(data-data0)) / 0.6745
if maxAbsDev < __epsilon:
maxAbsDev = (numpy.abs(data-data0)).mean() / 0.8000
if maxAbsDev < __epsilon:
sigma = 0.0
return sigma
u = (data-data0) / 6.0 / maxAbsDev
u2 = u**2.0
good = numpy.where( u2 <= 1.0 )
good = good[0]
if len(good) < 3:
print("WARNING: Distribution is too strange to compute standard deviation")
sigma = -1.0
return sigma
numerator = ((data[good]-data0)**2.0 * (1.0-u2[good])**2.0).sum()
nElements = (data.ravel()).shape[0]
denominator = ((1.0-u2[good])*(1.0-5.0*u2[good])).sum()
sigma = nElements*numerator / (denominator*(denominator-1.0))
if sigma > 0:
sigma = math.sqrt(sigma)
else:
sigma = 0.0
return sigma
def checkfit(inputData, inputFit, epsilon, delta, BisquareLimit=3.0):
"""
Determine the quality of a fit and biweights. Returns a tuple
with elements:
0. Robust standard deviation analog
1. Fractional median absolute deviation of the residuals
2. Number of input points given non-zero weight in the calculation
3. Bisquare weights of the input points
4. Residual values scaled by sigma
This function is based on the rob_checkfit routine from the AstroIDL
User's Library.
"""
data = inputData.ravel()
fit = inputFit.ravel()
if type(data).__name__ == "MaskedArray":
data = data.compressed()
if type(fit).__name__ == "MaskedArray":
fit = fit.compressed()
deviation = data - fit
sigma = std(deviation, Zero=True)
if sigma < epsilon:
return (sigma, 0.0, 0, 0.0, 0.0)
toUse = (numpy.where( numpy.abs(fit) > epsilon ))[0]
if len(toUse) < 3:
fracDev = 0.0
else:
fracDev = numpy.median(numpy.abs(deviation[toUse]/fit[toUse]))
if fracDev < delta:
return (sigma, fracDev, 0, 0.0, 0.0)
biweights = numpy.abs(deviation)/(BisquareLimit*sigma)
toUse = (numpy.where(biweights > 1))[0]
if len(toUse) > 0:
biweights[toUse] = 1.0
nGood = len(data) - len(toUse)
scaledResids = (1.0 - biweights**2.0)
scaledResids = scaledResids / scaledResids.sum()
return (sigma, fracDev, nGood, biweights, scaledResids)
def linefit(inputX, inputY, iterMax=25, Bisector=False, BisquareLimit=6.0, CloseFactor=0.03):
"""
Outlier resistance two-variable linear regression function.
Based on the robust_linefit routine in the AstroIDL User's Library.
"""
xIn = inputX.ravel()
yIn = inputY.ravel()
if type(yIn).__name__ == "MaskedArray":
xIn = xIn.compress(numpy.logical_not(yIn.mask))
yIn = yIn.compressed()
n = len(xIn)
x0 = xIn.sum() / n
y0 = yIn.sum() / n
x = xIn - x0
y = yIn - y0
cc = numpy.zeros(2)
ss = numpy.zeros(2)
sigma = 0.0
yFit = yIn
badFit = 0
nGood = n
lsq = 0.0
yp = y
if n > 5:
s = numpy.argsort(x)
u = x[s]
v = y[s]
nHalf = n/2 -1
x1 = numpy.median(u[0:nHalf])
x2 = numpy.median(u[nHalf:])
y1 = numpy.median(v[0:nHalf])
y2 = numpy.median(v[nHalf:])
if numpy.abs(x2-x1) < __epsilon:
x1 = u[0]
x2 = u[-1]
y1 = v[0]
y2 = v[-1]
cc[1] = (y2-y1)/(x2-x1)
cc[0] = y1 - cc[1]*x1
yFit = cc[0] + cc[1]*x
sigma, fracDev, nGood, biweights, scaledResids = checkfit(yp, yFit, __epsilon, __delta)
if nGood < 2:
lsq = 1.0
if lsq == 1 or n < 6:
sx = x.sum()
sy = y.sum()
sxy = (x*y).sum()
sxx = (x*x).sum()
d = sxx - sx*sx
if numpy.abs(d) < __epsilon:
return (0.0, 0.0)
ySlope = (sxy - sx*sy) / d
yYInt = (sxx*sy - sx*sxy) / d
if Bisector:
syy = (y*y).sum()
d = syy - sy*sy
if numpy.abs(d) < __epsilon:
return (0.0, 0.0)
tSlope = (sxy - sy*sx) / d
tYInt = (syy*sx - sy*sxy) / d
if numpy.abs(tSlope) < __epsilon:
return (0.0, 0.0)
xSlope = 1.0/tSlope
xYInt = -tYInt / tSlope
if ySlope > xSlope:
a1 = yYInt
b1 = ySlope
r1 = numpy.sqrt(1.0+ySlope**2.0)
a2 = xYInt
b2 = xSlope
r2 = numpy.sqrt(1.0+xSlope**2.0)
else:
a2 = yYInt
b2 = ySlope
r2 = numpy.sqrt(1.0+ySlope**2.0)
a1 = xYInt
b1 = xSlope
r1 = numpy.sqrt(1.0+xSlope**2.0)
yInt = (r1*a2 + r2*a1) / (r1 + r2)
slope = (r1*b2 + r2*b1) / (r1 + r2)
r = numpy.sqrt(1.0+slope**2.0)
if yInt > 0:
r = -r
u1 = slope / r
u2 = -1.0/r
u3 = yInt / r
yp = u1*x + u2*y + u3
yFit = y*0.0
ss = yp
else:
slope = ySlope
yInt = yYInt
yFit = yInt + slope*x
cc[0] = yInt
cc[1] = slope
sigma, fracDev, nGood, biweights, scaledResids = checkfit(yp, yFit, __epsilon, __delta)
if nGood < 2:
cc[0] = cc[0] + y0 - cc[1]*x0
return cc[::-1]
sigma1 = (100.0*sigma)
closeEnough = CloseFactor * numpy.sqrt(0.5/(n-1))
if closeEnough < __delta:
closeEnough = __delta
diff = 1.0e20
nIter = 0
while diff > closeEnough:
nIter = nIter + 1
if nIter > iterMax:
break
sigma2 = sigma1
sigma1 = sigma
sx = (biweights*x).sum()
sy = (biweights*y).sum()
sxy = (biweights*x*y).sum()
sxx = (biweights*x*x).sum()
d = sxx - sx*sx
if numpy.abs(d) < __epsilon:
return (0.0, 0.0)
ySlope = (sxy - sx*sy) / d
yYInt = (sxx*sy - sx*sxy) / d
slope = ySlope
yInt = yYInt
if Bisector:
syy = (biweights*y*y).sum()
d = syy - sy*sy
if numpy.abs(d) < __epsilon:
return (0.0, 0.0)
tSlope = (sxy - sy*sx) / d
tYInt = (syy*sx - sy*sxy) / d
if numpy.abs(tSlope) < __epsilon:
return (0.0, 0.0)
xSlope = 1.0/tSlope
xYInt = -tYInt / tSlope
if ySlope > xSlope:
a1 = yYInt
b1 = ySlope
r1 = numpy.sqrt(1.0+ySlope**2.0)
a2 = xYInt
b2 = xSlope
r2 = numpy.sqrt(1.0+xSlope**2.0)
else:
a2 = yYInt
b2 = ySlope
r2 = numpy.sqrt(1.0+ySlope**2.0)
a1 = xYInt
b1 = xSlope
r1 = numpy.sqrt(1.0+xSlope**2.0)
yInt = (r1*a2 + r2*a1) / (r1 + r2)
slope = (r1*b2 + r2*b1) / (r1 + r2)
r = numpy.sqrt(1.0+slope**2.0)
if yInt > 0:
r = -r
u1 = slope / r
u2 = -1.0/r
u3 = yInt / r
yp = u1*x + u2*y + u3
yFit = y*0.0
ss = yp
else:
yFit = yInt + slope*x
cc[0] = yInt
cc[1] = slope
sigma, fracDev, nGood, biweights, scaledResids = checkfit(yp, yFit, __epsilon, __delta)
if nGood < 2:
badFit = 1
break
diff1 = numpy.abs(sigma1 - sigma)/sigma
diff2 = numpy.abs(sigma2 - sigma)/sigma
if diff1 < diff2:
diff = diff1
else:
diff = diff2
cc[0] = cc[0] + y0 - cc[1]*x0
return cc[::-1]
def polyfit(inputX, inputY, order, iterMax=25):
"""
Outlier resistance two-variable polynomial function fitter.
Based on the robust_poly_fit routine in the AstroIDL User's
Library.
Unlike robust_poly_fit, two different polynomial fitters are used
because numpy.polyfit does not support non-uniform weighting of the
data. For the weighted fitting, the SciPy Orthogonal Distance
Regression module (scipy.odr) is used.
"""
from scipy import odr
def polyFunc(B, x, order=order):
out = x*0.0
for i in range(order+1):
out = out + B[i]*x**i
model = odr.Model(polyFunc)
x = inputX.ravel()
y = inputY.ravel()
if type(y).__name__ == "MaskedArray":
x = x.compress(numpy.logical_not(y.mask))
y = y.compressed()
n = len(x)
x0 = x.sum() / n
y0 = y.sum() / n
u = x
v = y
nSeg = order + 2
if (nSeg//2)*2 == nSeg:
nSeg = nSeg + 1
minPts = nSeg*3
if n < 1000:
lsqFit = 1
cc = numpy.polyfit(u, v, order)
yFit = numpy.polyval(cc, u)
else:
lsqfit = 0
q = numpy.argsort(u)
u = u[q]
v = v[q]
nPerSeg = numpy.zeros(nSeg, dtype=int) + n//nSeg
nLeft = n - nPerSeg[0]*nSeg
nPerSeg[nSeg//2] = nPerSeg[nSeg//2] + nLeft
r = numpy.zeros(nSeg)
s = numpy.zeros(nSeg)
r[0] = numpy.median(u[0:nPerSeg[0]])
s[0] = numpy.median(v[0:nPerSeg[0]])
i2 = nPerSeg[0]-1
for i in range(1,nSeg):
i1 = i2
i2 = i1 + nPerSeg[i]
r[i] = numpy.median(u[i1:i2])
s[i] = numpy.median(v[i1:i2])
cc = numpy.polyfit(r, s, order)
yFit = numpy.polyval(cc, u)
sigma, fracDev, nGood, biweights, scaledResids = checkfit(v, yFit, __epsilon, __delta)
if nGood == 0:
return cc, np.nan
if nGood < minPts:
if lsqFit == 0:
cc = numpy.polyfit(u, v, order)
yFit = numpy.polyval(cc, u)
sigma, fracDev, nGood, biweights, scaledResids = checkfit(yp, yFit, __epsilon, __delta)
if nGood == 0:
return __processPoly(x0, y0, order, cc)
nGood = n - nGood
if nGood < minPts:
return 0, np.nan
closeEnough = 0.03*numpy.sqrt(0.5/(n-1))
if closeEnough < __delta:
closeEnough = __delta
diff = 1.0e10
sigma1 = 100.0*sigma
nIter = 0
while diff > closeEnough:
nIter = nIter + 1
if nIter > iterMax:
break
sigma2 = sigma1
sigma1 = sigma
g = (numpy.where(biweights < 1))[0]
if len(g) < len(biweights):
u = u[g]
v = v[g]
biweights = biweights[g]
try:
## Try the fancy method...
data = odr.RealData(u, v, sy=1.0/biweights)
fit = odr.ODR(data, model, beta0=cc[::-1])
out = fit.run()
cc = out.beta[::-1]
except:
## And then give up when it doesn't work
cc = numpy.polyfit(u, v, order)
yFit = numpy.polyval(cc, u)
sigma, fracDev, nGood, biweights, scaledResids = checkfit(v, yFit, __epsilon, __delta)
if nGood < minPts:
return cc, np.nan
diff1 = numpy.abs(sigma1 - sigma)/sigma
diff2 = numpy.abs(sigma2 - sigma)/sigma
if diff1 < diff2:
diff = diff1
else:
diff = diff2
return cc, sigma
import numpy as np
from scipy import optimize
def gaussian(x,A,x0,err,B):
return A * np.exp(-(x-x0)**2/(2.*err**2)) + B
def fit_gaussian(x,y,p0=None,yerr=None, **kwargs):
assert np.all(np.isfinite(x)) & np.all(np.isfinite(y))
if p0 is None:
p0 = [np.max(y), (np.max(x)-np.min(x))/2., np.median(x), np.min(y)]
popt, pcov = optimize.curve_fit(gaussian, x, y, p0=p0,
bounds=([0, np.min(x), 0, 0],
[2*np.max(y), np.max(x), 3*(np.max(x)-np.min(x)), np.max(y)]),
sigma=yerr,
**kwargs)
return popt, pcov
def gfunc3(x, *theta):
z = (x-theta[1])/theta[2]
return theta[0] * np.exp(-z**2/2.)
def gfunc4(x, *theta):
z = (x-theta[1])/theta[2]
return theta[0] * np.exp(-z**2/2.) + theta[3]
def gfunc5(x, *theta):
z = (x-theta[1])/theta[2]
return theta[0] * np.exp(-z**2/2.) + theta[3] + theta[4]*x
def gfunc6(x, *theta):
z = (x-theta[1])/theta[2]
return theta[0] * | np.exp(-z**2/2.) | numpy.exp |
import logging
import numpy as np
import io
from typing import Dict
from PIL import Image
logger = logging.getLogger("mlagents.envs")
class BrainInfo:
def __init__(self, visual_observation, vector_observation, text_observations, memory=None,
reward=None, agents=None, local_done=None,
vector_action=None, text_action=None, max_reached=None, action_mask=None):
"""
Describes experience at current step of all agents linked to a brain.
"""
# list of np.ndarray, ndarray's shape = (agent_amount, height, width, 1)
self.visual_observations = visual_observation
# ndarray, shape = (agent_amount, vector_obs_size)
self.vector_observations = vector_observation
# Basically ignore it
self.text_observations = text_observations
# Basically ignore it
self.memories = memory
# list of agents' reward, len = agent_size
self.rewards = reward
# list of boolen, len = agent_size
self.local_done = local_done
# list of boolen, len = agent_size
self.max_reached = max_reached
# list of agent ID
self.agents = agents
# ndarray, shape = (agent_size, action_size), action_size = 1 for discrete action
self.previous_vector_actions = vector_action
# Basically ignore it
self.previous_text_actions = text_action
# Ignore it if not using action branching
self.action_masks = action_mask
@staticmethod
def process_pixels(image_bytes, gray_scale):
"""
Converts byte array observation image into numpy array, re-sizes it,
and optionally converts it to grey scale
:param gray_scale: Whether to convert the image to grayscale.
:param image_bytes: input byte array corresponding to image
:return: processed numpy array of observation from environment
"""
s = bytearray(image_bytes)
image = Image.open(io.BytesIO(s))
s = np.array(image) / 255.0
if gray_scale:
s = | np.mean(s, axis=2) | numpy.mean |
#!/usr/bin/env python3
''' Script to precompute image features using a Pytorch ResNet CNN, using 36 discretized views
at each viewpoint in 30 degree increments, and the provided camera WIDTH, HEIGHT
and VFOV parameters. '''
import os
import sys
import MatterSim
import argparse
import numpy as np
import json
import math
import h5py
import copy
from PIL import Image
import time
from progressbar import ProgressBar
import torch
import torch.nn.functional as F
import torch.multiprocessing as mp
from utils import load_viewpoint_ids
import timm
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
TSV_FIELDNAMES = ['scanId', 'viewpointId', 'image_w', 'image_h', 'vfov', 'features', 'logits']
VIEWPOINT_SIZE = 36 # Number of discretized views from one viewpoint
FEATURE_SIZE = 768
LOGIT_SIZE = 1000
WIDTH = 640
HEIGHT = 480
VFOV = 60
def build_feature_extractor(model_name, checkpoint_file=None):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = timm.create_model(model_name, pretrained=(checkpoint_file is None)).to(device)
if checkpoint_file is not None:
state_dict = torch.load(checkpoint_file, map_location=lambda storage, loc: storage)['state_dict']
model.load_state_dict(state_dict)
model.eval()
config = resolve_data_config({}, model=model)
img_transforms = create_transform(**config)
return model, img_transforms, device
def build_simulator(connectivity_dir, scan_dir):
sim = MatterSim.Simulator()
sim.setNavGraphPath(connectivity_dir)
sim.setDatasetPath(scan_dir)
sim.setCameraResolution(WIDTH, HEIGHT)
sim.setCameraVFOV(math.radians(VFOV))
sim.setDiscretizedViewingAngles(True)
sim.setDepthEnabled(False)
sim.setPreloadingEnabled(False)
sim.setBatchSize(1)
sim.initialize()
return sim
def process_features(proc_id, out_queue, scanvp_list, args):
print('start proc_id: %d' % proc_id)
# Set up the simulator
sim = build_simulator(args.connectivity_dir, args.scan_dir)
# Set up PyTorch CNN model
torch.set_grad_enabled(False)
model, img_transforms, device = build_feature_extractor(args.model_name, args.checkpoint_file)
for scan_id, viewpoint_id in scanvp_list:
# Loop all discretized views from this location
images = []
for ix in range(VIEWPOINT_SIZE):
if ix == 0:
sim.newEpisode([scan_id], [viewpoint_id], [0], [math.radians(-30)])
elif ix % 12 == 0:
sim.makeAction([0], [1.0], [1.0])
else:
sim.makeAction([0], [1.0], [0])
state = sim.getState()[0]
assert state.viewIndex == ix
image = np.array(state.rgb, copy=True) # in BGR channel
image = Image.fromarray(image[:, :, ::-1]) #cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
images.append(image)
images = torch.stack([img_transforms(image).to(device) for image in images], 0)
fts, logits = [], []
for k in range(0, len(images), args.batch_size):
b_fts = model.forward_features(images[k: k+args.batch_size])
b_logits = model.head(b_fts)
b_fts = b_fts.data.cpu().numpy()
b_logits = b_logits.data.cpu().numpy()
fts.append(b_fts)
logits.append(b_logits)
fts = | np.concatenate(fts, 0) | numpy.concatenate |
from os import read
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
import datetime
import sys
from tqdm import tqdm
import cppsolver as cs
from ..solver import Solver, Solver_jac
from ..preprocess import Reading_Data, LM_data, LM_data_2mag
from ..filter import lowpass_filter, mean_filter, median_filter, Magnet_KF, Magnet_UKF, Magnet_KF_cpp
from ..preprocess import read_data
def ang_convert(x):
a = x//(2*np.pi)
result = x-a*(2*np.pi)
if result > np.pi:
result -= np.pi * 2
return result
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def show_track_1mag_csv_cpp(reading_path, cali_path, gt_path, pSensor, My_M, use_kalman=False):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlim([-10, 15])
ax.set_ylim([-10, 15])
ax.set_zlim([0, 25])
# ax.set_title("Reconstructed Magnet Position")
ax.set_xlabel('x(cm)')
ax.set_ylabel('y(cm)')
ax.set_zlabel('z(cm)')
# M_choice = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
# M_choice = [0.8, 1, 1.2, 1.4]
M_choice = [2]
reading_data = Reading_Data(data_path=reading_path, cali_path=cali_path)
data = reading_data.readings
lm_data = LM_data(gt_path)
# set the origin of the gt
lm_data.offset = np.array([-1.5614192, -0.31039926, 0.90800506])
result_parameter = []
color = ['r', 'b', 'g', 'y', 'm']
for index, M in enumerate(M_choice):
# model = Solver(1)
# model = Finexus_Solver(-5e-2, -5e-2, 8e-2)
pred_position = []
changingM = []
changingG = []
changingTheta = []
changingPhy = []
directions = []
SNR = []
cut = 5
starting_point = lm_data.get_gt(reading_data.tstamps[cut])[0]
if use_kalman:
kf_params = np.array([40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, np.log(
My_M), 1e-2 * starting_point[0], 1e-2 * starting_point[1], 1e-2 * starting_point[2], 0, 0])
model = Magnet_KF_cpp(
1, pSensor, [0.8, 0.8, 1.5]*pSensor.shape[0], kf_params, dt=1/17, ord=3)
else:
params = np.array([40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, np.log(
My_M), 1e-2 * starting_point[0], 1e-2 * starting_point[1], 1e-2 * starting_point[2], 0, 0])
params = np.array([40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, np.log(
My_M), 1e-2 * (-2), 1e-2 * (2), 1e-2 * (20), 0, 0])
for i in tqdm(range(cut, data.shape[0] - cut)):
# fix m value and gx gy gz
datai = data[i].reshape(-1, 3)
if use_kalman:
model.predict()
result = model.update(datai)
else:
result = cs.solve_1mag(
datai.reshape(-1), pSensor.reshape(-1), params)
params = result.copy()
[x0, y0, z0, Gx, Gy, Gz] = [
result[4] * 1e2, result[5] * 1e2,
result[6] * 1e2, result[0],
result[1], result[2]
]
# [m, theta, phy] = [np.exp(result['m0'].value), np.pi * sigmoid(
# result['theta0'].value), np.pi * np.tanh(result['phy0'].value)]
[m, theta, phy, direction] = [
np.exp(result[3]),
ang_convert(result[7]),
ang_convert(result[8]),
np.array([np.sin(ang_convert(result[7]))*np.cos(ang_convert(result[8])),
np.sin(ang_convert(result[7]))*np.sin(ang_convert(result[8])), np.cos(ang_convert(result[7]))]),
]
# [x, y, z, m] = [result['X'].value*1e2, result['Y'].value*1e2,
# result['Z'].value*1e2, result['m'].value]
G = np.array([Gx, Gy, Gz])
noise = np.linalg.norm(G, 2)
signal = np.linalg.norm(datai - G, 2)
pred_position.append(x0)
pred_position.append(y0)
pred_position.append(z0)
changingM.append(m)
changingTheta.append(theta)
changingPhy.append(phy)
changingG.append([Gx, Gy, Gz])
directions.append(direction)
changingG = np.array(changingG)
changingM = np.array(changingM)
changingTheta = np.array(changingTheta)
changingPhy = np.array(changingPhy)
changingAng = np.stack([changingTheta, changingPhy], axis=0).T
directions = np.stack(directions, axis=0)
pred_position = np.array(pred_position).reshape(-1, 3)
compare_label = [' ', '(fixing G)']
ax.plot(pred_position[:, 0],
pred_position[:, 1],
pred_position[:, 2],
c=color[index % len(color)],
label='Magnet')
print(np.mean(pred_position, axis=0))
# sensor position
ax.scatter(1e2 * pSensor[:, 0],
1e2 * pSensor[:, 1],
1e2 * pSensor[:, 2],
c='r',
s=1,
alpha=0.5)
# calculate loss
gt_route = []
losses = {}
losses_count = {}
gt_directions = []
losses_angle = {}
losses_count_angle = {}
for i in range(pred_position.shape[0]):
# Get gt
gt = lm_data.get_gt(reading_data.tstamps[i + cut])
gt_pos = gt[0]
gt_route.append(gt_pos)
gt_direction = gt[1]
gt_directions.append(gt_direction)
# calculate loss
dis = np.linalg.norm(gt_pos - np.mean(pSensor, axis=0), 2)
loss1 = np.linalg.norm(gt_pos - pred_position[i], 2)
loss2 = np.arccos(np.dot(gt_direction, directions[i]))
# store route loss
if not dis in losses.keys():
losses[dis] = loss1
losses_count[dis] = 1
else:
losses[dis] += loss1
losses_count[dis] += 1
# store ang loss
if not dis in losses_angle.keys():
losses_angle[dis] = loss2
losses_count_angle[dis] = 1
else:
losses_angle[dis] += loss2
losses_count_angle[dis] += 1
gt_route = np.stack(gt_route, axis=0)
gt_directions = np.stack(gt_directions, axis=0)
ax.plot(gt_route[:, 0],
gt_route[:, 1],
gt_route[:, 2],
c='b',
alpha=0.5,
linewidth=2,
label='Ground Truth')
plt.legend()
# store the gt route and the reconstructed route
tmp = reading_path.split('/')
file_name = tmp[-1].split('.')[0] + '.npz'
tmp.pop(0)
tmp.pop(-1)
result_path = os.path.join('result', 'reconstruction_result', *tmp)
if not os.path.exists(result_path):
os.makedirs(result_path)
np.savez(os.path.join(result_path, file_name),
gt=gt_route,
result=pred_position, gt_ang=gt_directions, result_ang=directions, G=changingG)
fig5 = plt.figure()
plt.title("Reconstuct Loss")
plot_loss_data = []
for dis in sorted(losses.keys()):
plot_loss_data.append(dis)
plot_loss_data.append(losses[dis] / losses_count[dis])
plot_loss_data = np.array(plot_loss_data).reshape(-1, 2)
plt.plot(plot_loss_data[:, 0],
plot_loss_data[:, 1], label='Position loss')
plt.legend()
fig6 = plt.figure()
plt.title("Reconstuct angle Loss")
plot_loss_data = []
for dis in sorted(losses_angle.keys()):
plot_loss_data.append(dis)
plot_loss_data.append(losses_angle[dis] / losses_count_angle[dis])
plot_loss_data = np.array(plot_loss_data).reshape(-1, 2)
plt.plot(plot_loss_data[:, 0], plot_loss_data[:, 1], label='Ang loss')
plt.legend()
fig2 = plt.figure()
plt.title("Magnet Moment")
# plt.ylim(0, 10)
plt.plot(changingM, label='M')
plt.legend()
fig3 = plt.figure()
plt.title("G")
plt.plot(changingG[:, 0], label='Gx')
plt.plot(changingG[:, 1], label='Gy')
plt.plot(changingG[:, 2], label='Gz')
plt.legend()
fig4 = plt.figure()
plt.title("orientation")
plt.ylim(-5, 5)
plt.plot(changingTheta, label='theta')
plt.plot(changingPhy, label='phy')
plt.legend()
plt.show()
# plt.savefig("result/result.jpg", dpi=900)
def show_track_2mag_csv_cpp(reading_path, cali_path, gt_path, pSensor, My_M, use_kalman=False):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
# ax.set_zlim([-2, 30])
ax.set_title("Reconstructed Magnet Position")
ax.set_xlabel('x(cm)')
ax.set_ylabel('y(cm)')
ax.set_zlabel('z(cm)')
# M_choice = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
# M_choice = [0.8, 1, 1.2, 1.4]
M_choice = [2]
reading_data = Reading_Data(data_path=reading_path, cali_path=cali_path)
data = reading_data.readings
lm_data = LM_data_2mag(gt_path)
# set the origin of the gt
lm_data.offset = np.array([-1.5614192, -0.31039926, 0.90800506])
result_parameter = []
color = ['r', 'b', 'g', 'y', 'm']
for index, M in enumerate(M_choice):
pred_position = []
changingM = []
changingG = []
changingTheta = []
changingPhy = []
changingTheta2 = []
changingPhy2 = []
changingDir = []
changingDir2 = []
SNR = []
cut = 0
starting_point = lm_data.get_gt(reading_data.tstamps[cut])
params = {
'X0': 1e-2 * starting_point[0][0],
'Y0': 1e-2 * starting_point[0][1],
'Z0': 1e-2 * starting_point[0][2],
'm0': np.log(My_M),
'theta0': 0.1,
'phy0': 0.1,
'X1': 1e-2 * starting_point[2][0],
'Y1': 1e-2 * starting_point[2][1],
'Z1': 1e-2 * starting_point[2][2],
'm1': np.log(My_M),
'theta1': 0.1,
'phy1': 0.1,
'gx': 0,
'gy': 0,
'gz': 0,
}
params = np.array([
40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, np.log(My_M),
1e-2 * starting_point[0][0], 1e-2 *
starting_point[0][1], 1e-2 * starting_point[0][2], 0, 0,
1e-2 * starting_point[2][0], 1e-2 *
starting_point[2][1], 1e-2 * starting_point[2][2], 0, 0,
])
params = np.array([
40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, np.log(3),
1e-2 * 11, 1e-2 * 1, 1e-2 * (-2), np.pi*0.5, np.pi*0.5,
1e-2 * 5, 1e-2 * (7), 1e-2 * (-4), np.pi*0.5, np.pi*0.25,
])
for i in tqdm(range(cut, data.shape[0] - cut)):
# if i > 5:
# gt = lm_data.get_gt(reading_data.tstamps[i])
# params[4:7] = gt[0]*1e-2
# params[9:12] = gt[2]*1e-2
datai = data[i].reshape(-1, 3)
result = cs.solve_2mag(
datai.reshape(-1), pSensor.reshape(-1), params)
params = result.copy()
result_parameter.append(result)
# print('the m is ', result['m0'])
[x0, y0, z0, x1, y1, z1, Gx, Gy, Gz] = [
result[4] * 1e2, result[5] * 1e2, result[6] * 1e2, result[9] *
1e2, result[10] * 1e2, result[11] *
1e2, result[0],
result[1], result[2]
]
# [m, theta, phy] = [np.exp(result['m0'].value), np.pi * sigmoid(
# result['theta0'].value), np.pi * np.tanh(result['phy0'].value)]
[m, theta1, phy1, theta2, phy2] = [
np.exp(result[3]),
ang_convert(result[7]),
ang_convert(result[8]),
ang_convert(result[12]),
ang_convert(result[13]),
]
# [x, y, z, m] = [result['X'].value*1e2, result['Y'].value*1e2,
# result['Z'].value*1e2, result['m'].value]
G = np.array([Gx, Gy, Gz])
noise = np.linalg.norm(G, 2)
signal = np.linalg.norm(datai - G, 2)
pred_position.append(x0)
pred_position.append(y0)
pred_position.append(z0)
pred_position.append(x1)
pred_position.append(y1)
pred_position.append(z1)
changingM.append(m)
changingTheta.append(theta1)
changingPhy.append(phy1)
changingDir.append(np.array([np.sin(theta1)*np.cos(phy1), np.sin(
theta1)*np.sin(phy1), np.cos(theta1), np.sin(theta2)*np.cos(phy2), np.sin(
theta2)*np.sin(phy2), np.cos(theta2)]))
changingTheta2.append(theta2)
changingPhy2.append(phy2)
changingG.append([Gx, Gy, Gz])
changingG = np.array(changingG)
changingM = np.array(changingM)
changingTheta = np.array(changingTheta)
changingPhy = np.array(changingPhy)
changingAng = np.stack([changingTheta, changingPhy], axis=0).T
changingTheta2 = np.array(changingTheta2)
changingPhy2 = np.array(changingPhy2)
changingAng2 = np.stack([changingTheta2, changingPhy2], axis=0).T
changingDir = np.stack(changingDir, axis=0)
pred_position = np.array(pred_position).reshape(-1, 6)
compare_label = [' ', '(fixing G)']
ax.scatter(pred_position[:, 0],
pred_position[:, 1],
pred_position[:, 2],
s=1,
c=color[index % len(color)],
label='Magnet 1')
ax.scatter(pred_position[:, 3],
pred_position[:, 4],
pred_position[:, 5],
s=1,
c=color[index % len(color)],
label='Magnet 2')
# sensor position
ax.scatter(1e2 * pSensor[:, 0],
1e2 * pSensor[:, 1],
1e2 * pSensor[:, 2],
c='r',
s=1,
alpha=0.5)
# calculate loss
gt_route = []
gt_angle = []
losses1 = {}
losses_count1 = {}
losses1_ang = {}
losses_count1_ang = {}
losses2 = {}
losses_count2 = {}
losses2_ang = {}
losses_count2_ang = {}
for i in range(pred_position.shape[0]):
# mag one
gt = lm_data.get_gt(reading_data.tstamps[i + cut])
if lm_data.idx == 1:
gt_pos1 = gt[0]
gt_ang1 = gt[1]
gt_pos2 = gt[2]
gt_ang2 = gt[3]
else:
gt_pos1 = gt[2]
gt_ang1 = gt[3]
gt_pos2 = gt[0]
gt_ang2 = gt[1]
dis = np.linalg.norm(gt_pos1 - np.mean(pSensor, axis=0), 2)
loss = np.linalg.norm(gt_pos1 - pred_position[i][:3], 2)
ang = np.array([np.sin(changingAng[i][0])*np.cos(changingAng[i][1]), np.sin(
changingAng[i][0])*np.sin(changingAng[i][1]), np.cos(changingAng[i][0])])
loss_angle = np.arccos(np.dot(gt_ang1, changingDir[i, :3]))
if not dis in losses1.keys():
losses1[dis] = loss
losses_count1[dis] = 1
else:
losses1[dis] += loss
losses_count1[dis] += 1
if not dis in losses1_ang.keys():
losses1_ang[dis] = loss_angle
losses_count1_ang[dis] = 1
else:
losses1_ang[dis] += loss_angle
losses_count1_ang[dis] += 1
# mag two
dis = np.linalg.norm(gt_pos2 - np.mean(pSensor, axis=0), 2)
loss = np.linalg.norm(gt_pos2 - pred_position[i][3:], 2)
ang2 = np.array([np.sin(changingAng2[i][0])*np.cos(changingAng2[i][1]), np.sin(
changingAng2[i][0])* | np.sin(changingAng2[i][1]) | numpy.sin |
"""
First created on Mon Aug 13 10:01:03 2018
Main code for the creation of the image for Zernike analysis;
Other moduls avaliable are:
Zernike_Cutting_Module
Zernike_Analysis_Module
Versions:
Oct 31, 2018; 0.1 -> 0.11 fixed FRD effect
Nov 1, 2018; 0.11 -> 0.12 added correct edges to the detector; fixed wrong behavior for misaligment
Nov 2, 2018; 0.12 -> 0.13 added lorentzian wings to the illumination of the pupil
Nov 3, 2018; 0.13 -> 0.13b fixed edges of detector when det_vert is not 1
Nov 12, 2018; 0.13b -> 0.13c changed parameter describing hexagonal effect "f" from 0.1 to 0.2
Nov 12, 2018; 0.13c -> 0.14 changed illumination description modifying entrance -> exit pupil illumination
Nov 29, 2018; 0.14 -> 0.14b added fixed scattering slope, deduced from large image in focus
Dec 16, 2018; 0.14b -> 0.14c allparameters_proposal_err from list to array
Dec 18, 2018; 0.14c -> 0.14d strutFrac upper limit to 0.13 in create_parInit
Dec 23, 2018; 0.14d -> 0.15 refactoring so that x_ilum and y_ilum is one
Dec 26, 2018; 0.15 -> 0.15b when in focus, create exactly 10x oversampling
Dec 31, 2018; 0.15b -> 0.16 major rewrite of downsampling algorithm
Jan 8, 2019; 0.16 -> 0.17 added support for zmax=22
Jan 14, 2019; 0.17 -> 0.18 fixed bug with dowsampling algorithm - I was just taking central values
Jan 15, 2019; 0.18 -> 0.19 added simple algorithm to interpolate between 1/10 pixels in the best position
Feb 15, 2019; 0.19 -> 0.20 updated analysis for the new data
Feb 21, 2019; 0.20 -> 0.20b test parameter for showing globalparamers outside their limits
Feb 22, 2019; 0.20 -> 0.21 added support for Zernike higher than 22
Feb 22, 2019; 0.21 -> 0.21b added support for return image along side likelihood
Apr 17, 2019; 0.21b -> 0.21c changed defintion of residuals from (model-data) to (data-model)
Jun 4, 2019; 0.21c -> 0.21d slight cleaning of the code, no functional changes
Jun 26, 2019; 0.21d -> 0.21e included variable ``dataset'',
which denots which data we are using in the analysis
Jul 29, 2019; 0.21e -> 0.21f changed the spread of paramters when drawing initial solutions, based on data
Sep 11, 2019; 0.21f -> 0.21g globalparameters_flat_6<1 to globalparameters_flat_6<=1
Oct 10, 2019: 0.21g -> 0.21h scattered_light_kernel saving option
Oct 31, 2019: 0.21h -> 0.22 (re)introduced small amount of apodization (PIPE2D-463)
Oct 31, 2019: 0.22 -> 0.22b introduced verbosity
Nov 07, 2019: 0.22b -> 0.22c nan values can pass through find_single_realization_min_cut
Nov 08, 2019: 0.22c -> 0.22d changes to resizing and centering
Nov 13, 2019: 0.22d -> 0.23 major changes to centering - chief ray in the center of oversampled image
Nov 15, 2019: 0.23 -> 0.24 change likelihood definition
Dec 16, 2019: 0.24 -> 0.24a added iluminaton with z4,z11,z22=0
Jan 14, 2020: 0.24a -> 0.24b added verbosity in find_single_realization_min_cut function
Jan 31, 2020: 0.24b -> 0.25 added support for data contaning spots from two wavelengths
Feb 11, 2020: 0.25 -> 0.26 proper bilinear interpolation of the spots
Feb 17, 2020: 0.26 -> 0.26a increased speed when save parameter=0
Feb 18, 2020: 0.26a -> 0.26b mask image going through subpixel interpolation
Feb 19, 2020: 0.26b -> 0.26c normalization of sci image takes into account mask
Mar 1, 2020: 0.26c -> 0.27 apodization scales with the size of input images
Mar 4, 2020: 0.27 -> 0.28 (re-)introduced custom size of pupil image
Mar 6, 2020: 0.28 -> 0.28b refactored cut_square function (making it much faster)
Mar 8, 2020: 0.28b -> 0.28c set limit in grating factor to 120000 in generating code
Apr 1, 2020: 0.28c -> 0.28d svd_invert function
May 6, 2020: 0.28d -> 0.28e clarified and expanded comments in postprocessing part
Jun 28, 2020: 0.28e -> 0.29 added multi analysis
Jul 02, 2020: 0.29 -> 0.30 added internal fitting for flux
Jul 02, 2020: 0.30 -> 0.30a lnlike_Neven_multi_same_spot can accept both 1d and 2d input
Jul 07, 2020: 0.30a -> 0.30b added threading time information
Jul 09, 2020: 0.30b -> 0.30c expwf_grid changed to complex64 from complex128
Jul 09, 2020: 0.30c -> 0.30d changed all float64 to float32
Jul 16, 2020: 0.30d -> 0.31 moved all fft to scipy.signal.fftconvolve
Jul 20, 2020: 0.31 -> 0.32 introduced renormalization_of_var_sum for multi_var analysis
Jul 26, 2020: 0.32 -> 0.32a only changed last value of allparameters if len()==42
Aug 10, 2020: 0.32a -> 0.33 added extra Zernike to parInit
Aug 12, 2020: 0.33 -> 0.33a changed iters to 6 in fluxfit
Sep 08, 2020: 0.33a -> 0.33b added test_run to help with debugging
Oct 05, 2020: 0.33b -> 0.33c trying to always output flux multiplier when fit_for_flux
Oct 06, 2020: 0.33c -> 0.34 added posibility to specify position of created psf
Oct 13, 2020: 0.34 -> 0.34b added finishing step of centering, done with Nelder-Mead
Oct 22, 2020: 0.34b -> 0.35 added class that does Tokovinin multi analysis
Nov 03, 2020: 0.35 -> 0.35a create parInit up to z=22, with larger parametrization
Nov 05, 2020: 0.35a -> 0.35b return same value if Tokovinin does not work
Nov 16, 2020: 0.35b -> 0.35c modified movement of parameters
Nov 17, 2020: 0.35c -> 0.35d small fixes in check_global_parameters with paramters 0 and 1
Nov 19, 2020: 0.35d -> 0.36 realized that vertical strut is different than others -
first, simplest implementation
Nov 19, 2020: 0.36 -> 0.36a modified parInit movements for multi (mostly reduced)
Dec 05, 2020: 0.36a -> 0.37 misalignment and variable strut size
Dec 13, 2020: 0.37 -> 0.37a changed weights in multi_same_spot
Jan 17, 2021: 0.37a -> 0.37b accept True as input for simulation00
Jan 25, 2021: 0.37b -> 0.37c fixed fillCrop function in PsfPosition, slice limits need to be integers
Jan 26, 2021: 0.37c -> 0.38 PIPE2D-701, fixed width of struts implementation
Jan 28, 2021: 0.38 -> 0.39 added flux mask in chi**2 calculation
Jan 28, 2021: 0.39 -> 0.39b lowered allowed values for pixel_effect and fiber_r
Feb 08, 2021: 0.39b -> 0.4 fixed bilinear interpolation for secondary, x and y confusion
Feb 25, 2021: 0.4 -> 0.40a added directory for work on Tiger
Mar 05, 2021: 0.40a -> 0.41 introduced create_custom_var function
Mar 08, 2021: 0.41 -> 0.41a added suport for saving intermediate images to tiger
Mar 24, 2021: 0.41a -> 0.41b added support for masked images in find_centroid_of_flux
Mar 26, 2021: 0.41b -> 0.41c added create_custom_var function as a separate function
Mar 26, 2021: 0.41c -> 0.41d semi-implemented custom variance function in Tokovinin algorithm
Mar 26, 2021: 0.41d -> 0.41e model_multi_out has correct input parameters now
Apr 01, 2021: 0.41e -> 0.42 changed bug/feature in checking wide_43 and wide_42 parameters
Apr 02, 2021: 0.42 -> 0.43 changed width of slit shadow and slit holder shadow
Apr 04, 2021: 0.43 -> 0.44 implemented f_multiplier_factor
Apr 04, 2021: 0.44 -> 0.44a implemented possibility for using np.abs(chi) as likelihood
Apr 08, 2021: 0.44a -> 0.44b propagated change from 0.44a to Tokovinin algorithm
Apr 12, 2021: 0.44b -> 0.44c modified renormalization factors for abs(chi) value
Apr 13, 2021: 0.44c -> 0.44d fixed bug in the estimate of mean_value_of_background
Apr 14, 2021: 0.44d -> 0.44e mean_value_of_background estimated from sci or var data
Apr 22, 2021: 0.44e -> 0.44f introduced multi_background_factor
Apr 27, 2021: 0.44f -> 0.45 Tokovinin now works much quicker with multi_background_factor
(create_simplified_H updated)
Apr 29, 2021: 0.45 -> 0.45a many changes in order to run create_simplified_H efficently
May 07, 2021: 0.45a -> 0.45b if Premodel analysis failed, return 15 values
May 08, 2021: 0.45b -> 0.45c changed that images of same size do not crash out_images creation
May 14, 2021: 0.45c -> 0.45d create_parInit, changed from <> to <= and >=
May 18, 2021: 0.45d -> 0.45e testing focus constrain in Tokovinin
May 19, 2021: 0.45e -> 0.45f expanded verbosity messages in Tokovinin algorithm
May 19, 2021: 0.45f -> 0.45g testing [8., 8., 8., 8., 1., 8., 8., 8., 8.] renormalization
May 20, 2021: 0.45g -> 0.45h do not use multi_background for image in or near focus
May 27, 2021: 0.45h -> 0.45i reordered variables in LN_PFS_single, in preparation for wv analysis
May 27, 2021: 0.45i -> 0.46 changed oversampling to be always 10
Jun 08, 2021: 0.46 -> 0.46a changed to Psf_position to be able to take only_chi and center of flux
Jun 08, 2021: 0.46a -> 0.46b changed normalization so that in focus it is indentical as in pipeline
Jun 15, 2021: 0.46b -> 0.46c change limit on the initial cut of the oversampled image,
in order to handle bluer data
Jun 19, 2021: 0.46c -> 0.46d changed skimage.transform.resize to resize,
to avoid skimage.transform not avaliable in LSST
Jun 20, 2021: 0.46d -> 0.46e changed scipy.signal to signal,
and require that optPsf_cut_downsampled_scattered size is int /
no change to unit test
Jun 24, 2021: 0.46e -> 0.47 removed resize and introduced galsim resizing in Psf_position,
to be consistent with LSST pipeline
Jun 25, 2021: 0.47 -> 0.47a introduced galsim resizing in the first downsampling from natural resolution
to default=10 oversampling also
Jul 11, 2021: 0.47a -> 0.47b changed a minus factor in secondary position estimation
Jul 12, 2021: 0.47b -> 0.47c inital offset in positioning had a wrong +- sign in front
Jul 23, 2021: 0.47c -> 0.47d (only) added comments and explanations
Jul 26, 2021: 0.47d -> 0.47e changed default oversampling to 11
Jul 27, 2021: 0.47e -> 0.47f offset done in galsim, but downsampling via resize function
Aug 26, 2021: 0.47f -> 0.47g direct minimization when use_center_of_flux=True
Aug 30, 2021: 0.47g -> 0.48 offset done in LSST code now
Sep 02, 2021: 0.48 -> 0.48a done cleaning offset code (PIPE2D-880)
Sep 15, 2021: 0.48a -> 0.48b removed minor bug where array_of_var_sum was called too early,
and could fail if nan value was present
Sep 27, 2021: 0.48b -> 0.48c added explicit bool conversion to double_sources
Oct 05, 2021: 0.48c -> 0.48d further explicit bool(double_sources) covnersion in ln_pfs_single
Oct 08, 2021: 0.48d -> 0.48e Pep8 cleaning
Oct 15, 2021: 0.48e -> 0.48f forced a randomseed number in create_parInit function
Oct 25, 2021: 0.48f -> 0.49 set half of init values in create_parInit to be same as init value
Oct 26, 2021: 0.49 -> 0.49a modified create_custom_var that it does lin fit if 2nd degree fit is convex
Oct 28, 2021: 0.49a -> 0.49b modified create_custom_var so that it does not fall below min(var) value
Nov 01, 2021: 0.49b -> 0.49c create_custom_var does not change var image from step to step anymore
Nov 02, 2021: 0.49c -> 0.49d eliminated std varianble from create_simplified_H
Nov 03, 2021: 0.49d -> 0.49e PIPE2D-930; fixed reusing list_of_variance in Tokovinin
Nov 03, 2021: 0.49e -> 0.50 PIPE2D-931; modified creation of polyfit for variance image higher up
so it is done only once per sci/var/mask image combination
Nov 20, 2021: 0.50 -> 0.50a Hilo modifications
Dec 06, 2021: 0.50a -> 0.51 Zernike_estimation_preparation class
Dec 09, 2021: 0.51 -> 0.51a introduced `fixed_single_spot`
Feb 11, 2022: 0.51a -> 0.51b unified index parameter allowed to vary
Mar 18, 2022: 0.51b -> 0.51c introduced div_same par, controlling how many particles are same
Mar 24, 2022: 0.51c -> 0.51d multiple small changes, for running same illum in fiber
Apr 03, 2022: 0.51d -> 0.51e test is now analysis_type_fiber == "fixed_fiber_par"
May 05, 2022: 0.51e -> 0.51f added documentation
May 09, 2022: 0.51f -> 0.51g replaced print with logging
May 24, 2022: 0.51g -> 0.51h small changes to output testing directory
May 26, 2022: 0.51h -> 0.51i linting fixes
Jun 01, 2022: 0.51i -> 0.52 im1.setCenter(0,0), to be compatible with galsim 2.3.4
@author: <NAME>
@contact: <EMAIL>
@web: www.ncaplar.com
"""
########################################
# standard library imports
# from __future__ import absolute_import, division, logging.info_function
from functools import partial
from typing import Tuple, Iterable
# import matplotlib
# from matplotlib.colors import LogNorm
# import matplotlib.pyplot as plt
import lmfit
from scipy.linalg import svd
from scipy import signal
from scipy.ndimage.filters import gaussian_filter
import scipy.fftpack
import scipy.misc
from scipy.special import erf
from astropy.convolution import Gaussian2DKernel
from astropy.convolution import Tophat2DKernel
import lsst.afw.math
import lsst.afw.image
import lsst.afw
import lsst
import galsim
import traceback
# import platform
import threading
# from multiprocessing import current_process
import numpy as np
import os
import time
# import sys
import math
import socket
import sys
import pickle
import logging
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
np.set_printoptions(suppress=True)
np.seterr(divide='ignore', invalid='ignore')
# logging.info(np.__config__)
########################################
# Related third party imports
# none at the moment
########################################
# Local application/library specific imports
# galsim
galsim.GSParams.maximum_fft_size = 12000
# lsst
# astropy
# import astropy
# import astropy.convolution
# scipy
# import scipy
# import skimage.transform
# import scipy.optimize as optimize
# for svd_invert function
# lmfit
# matplotlib
# needed for resizing routines
# for distributing image creation in Tokovinin algorithm
########################################
__all__ = [
'PupilFactory',
'Pupil',
'ZernikeFitterPFS',
'LN_PFS_multi_same_spot',
'LN_PFS_single',
'LNP_PFS',
'find_centroid_of_flux',
'create_parInit',
'PFSPupilFactory',
'custom_fftconvolve',
'stepK',
'maxK',
'sky_scale',
'sky_size',
'remove_pupil_parameters_from_all_parameters',
'resize',
'_interval_overlap',
'svd_invert',
'Tokovinin_multi',
'find_centroid_of_flux',
'create_popt_for_custom_var',
'create_custom_var_from_popt',
'Zernike_estimation_preparation']
__version__ = "0.52"
# classes Pupil, PupilFactory and PFSPupilFactory have different form of documentation,
# compared to other classes as they have been imported from code written by <NAME>
class Pupil(object):
"""!Pupil obscuration function.
"""
def __init__(self, illuminated, size, scale):
"""!Construct a Pupil
@param[in] illuminated 2D numpy array indicating which parts of
the pupil plane are illuminated.
@param[in] size Size of pupil plane array in meters. Note
that this may be larger than the actual
diameter of the illuminated pupil to
accommodate zero-padding.
@param[in] scale Sampling interval of pupil plane array in
meters.
"""
self.illuminated = illuminated
self.size = size
self.scale = scale
class PupilFactory(object):
"""!Pupil obscuration function factory for use with Fourier optics.
Based on the code by <NAME>, developed for HSC camera
Contains functions that can create various obscurations in the camera
"""
def __init__(
self,
pupilSize,
npix,
input_angle,
detFrac,
strutFrac,
slitFrac,
slitFrac_dy,
x_fiber,
y_fiber,
effective_ilum_radius,
frd_sigma,
frd_lorentz_factor,
det_vert,
wide_0=0,
wide_23=0,
wide_43=0,
misalign=0,
verbosity=0):
"""Construct a PupilFactory.
Parameters
----------
pupilSize: `float`
Size of the exit pupil [m]
npix: `int`
Constructed Pupils will be npix x npix
input_angle: `float`
Angle of the pupil (for all practical purposes fixed an np.pi/2)
detFrac: `float`
Value determining how much of the exit pupil obscured by the
central obscuration(detector)
strutFrac: `float`
Value determining how much of the exit pupil is obscured
by a single strut
slitFrac: `float`
Value determining how much of the exit pupil is obscured by slit
slitFrac_dy: `float`
Value determining what is the vertical position of the slit
in the exit pupil
x_fiber: `float`
Position of the fiber misaligment in the x direction
y_fiber: `float`
Position of the fiber misaligment in the y direction
effective_ilum_radius: `float`
Fraction of the maximal radius of the illumination
of the exit pupil that is actually illuminated
frd_sigma: `float`
Sigma of Gaussian convolving only outer edge, mimicking FRD
frd_lorentz_factor: `float`
Strength of the lorentzian factor describing wings
det_vert: `float`
Multiplicative factor determining vertical size
of the detector obscuration
wide_0: `float`
Widening of the strut at 0 degrees
wide_23: `float`
Widening of the strut at the top-left corner
wide_43: `float`
Widening of the strut at the bottom-left corner
misalign: `float`
Describing the amount of misaligment
verbosity: `int`
How verbose during evaluation (1 = full verbosity)
"""
self.verbosity = verbosity
if self.verbosity == 1:
logging.info('Entering PupilFactory class')
logging.info('Entering PupilFactory class')
self.pupilSize = pupilSize
self.npix = npix
self.input_angle = input_angle
self.detFrac = detFrac
self.strutFrac = strutFrac
self.pupilScale = pupilSize / npix
self.slitFrac = slitFrac
self.slitFrac_dy = slitFrac_dy
self.effective_ilum_radius = effective_ilum_radius
self.frd_sigma = frd_sigma
self.frd_lorentz_factor = frd_lorentz_factor
self.det_vert = det_vert
self.wide_0 = wide_0
self.wide_23 = wide_23
self.wide_43 = wide_43
self.misalign = misalign
u = (np.arange(npix, dtype=np.float32) - (npix - 1) / 2) * self.pupilScale
self.u, self.v = np.meshgrid(u, u)
@staticmethod
def _pointLineDistance(p0, p1, p2):
"""Compute the right-angle distance between the points given by `p0`
and the line that passes through `p1` and `p2`.
@param[in] p0 2-tuple of numpy arrays (x,y coords)
@param[in] p1 2-tuple of scalars (x,y coords)
@param[in] p2 2-tuple of scalars (x,y coords)
@returns numpy array of distances; shape congruent to p0[0]
"""
x0, y0 = p0
x1, y1 = p1
x2, y2 = p2
dy21 = y2 - y1
dx21 = x2 - x1
return np.abs(dy21 * x0 - dx21 * y0 + x2 * y1 - y2 * x1) / np.hypot(dy21, dx21)
def _fullPupil(self):
"""Make a fully-illuminated Pupil.
@returns Pupil
"""
illuminated = np.ones(self.u.shape, dtype=np.float32)
return Pupil(illuminated, self.pupilSize, self.pupilScale)
def _cutCircleInterior(self, pupil, p0, r):
"""Cut out the interior of a circular region from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating region center
@param[in] r Circular region radius
"""
r2 = (self.u - p0[0])**2 + (self.v - p0[1])**2
pupil.illuminated[r2 < r**2] = False
def _cutCircleExterior(self, pupil, p0, r):
"""Cut out the exterior of a circular region from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating region center
@param[in] r Circular region radius
"""
r2 = (self.u - p0[0])**2 + (self.v - p0[1])**2
pupil.illuminated[r2 > r**2] = False
def _cutEllipseExterior(self, pupil, p0, r, b, thetarot):
"""Cut out the exterior of a circular region from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating region center
@param[in] r Ellipse region radius = major axis
@param[in] b Ellipse region radius = minor axis
@param[in] thetarot Ellipse region rotation
"""
r2 = (self.u - p0[0])**2 + (self.v - p0[1])**2
theta = np.arctan(self.u / self.v) + thetarot
pupil.illuminated[r2 > r**2 * b**2 / (b**2 * (np.cos(theta))**2 + r**2 * (np.sin(theta))**2)] = False
def _cutSquare(self, pupil, p0, r, angle, det_vert):
"""Cut out the interior of a circular region from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating region center
@param[in] r half lenght of the length of square side
@param[in] angle angle that the camera is rotated
@param[in] det_vert multiplicative factor that distorts the square into a rectangle
"""
pupil_illuminated_only1 = np.ones_like(pupil.illuminated, dtype=np.float32)
time_start_single_square = time.time()
###########################################################
# Central square
if det_vert is None:
det_vert = 1
x21 = -r / 2 * det_vert * 1
x22 = +r / 2 * det_vert * 1
y21 = -r / 2 * 1
y22 = +r / 2 * 1
i_max = self.npix / 2 - 0.5
i_min = -i_max
i_y_max = int(np.round((x22 + p0[1]) / self.pupilScale - (i_min)))
i_y_min = int(np.round((x21 + p0[1]) / self.pupilScale - (i_min)))
i_x_max = int(np.round((y22 + p0[0]) / self.pupilScale - (i_min)))
i_x_min = int(np.round((y21 + p0[0]) / self.pupilScale - (i_min)))
assert angle == np.pi / 2
# angleRad = angle
camX_value_for_f_multiplier = p0[0]
camY_value_for_f_multiplier = p0[1]
# logging.info(camX_value_for_f_multiplier,camY_value_for_f_multiplier)
camY_Max = 0.02
f_multiplier_factor = (-camX_value_for_f_multiplier * 100 / 3) * \
(np.abs(camY_value_for_f_multiplier) / camY_Max) + 1
# f_multiplier_factor=1
if self.verbosity == 1:
logging.info('f_multiplier_factor for size of detector triangle is: ' + str(f_multiplier_factor))
pupil_illuminated_only0_in_only1 = np.zeros((i_y_max - i_y_min, i_x_max - i_x_min))
u0 = self.u[i_y_min:i_y_max, i_x_min:i_x_max]
v0 = self.v[i_y_min:i_y_max, i_x_min:i_x_max]
# factor that is controling how big is the triangle in the corner of the detector?
f = 0.2
f_multiplier = f_multiplier_factor / 1
###########################################################
# Lower right corner
x21 = -r / 2
x22 = +r / 2
y21 = -r / 2 * det_vert
y22 = +r / 2 * det_vert
f_lr = np.copy(f) * (1 / f_multiplier)
angleRad21 = -np.pi / 4
triangle21 = [[p0[0] + x22, p0[1] + y21],
[p0[0] + x22, p0[1] + y21 - y21 * f_lr],
[p0[0] + x22 - x22 * f_lr, p0[1] + y21]]
p21 = triangle21[0]
y22 = (triangle21[1][1] - triangle21[0][1]) / np.sqrt(2)
y21 = 0
x21 = (triangle21[2][0] - triangle21[0][0]) / np.sqrt(2)
x22 = -(triangle21[2][0] - triangle21[0][0]) / np.sqrt(2)
pupil_illuminated_only0_in_only1[((v0 - p21[1]) * np.cos(-angleRad21)
- (u0 - p21[0]) * np.sin(-angleRad21) < y22)] = True
###########################################################
# Upper left corner
x21 = -r / 2 * 1
x22 = +r / 2 * 1
y21 = -r / 2 * det_vert
y22 = +r / 2 * det_vert
# angleRad12 = -np.pi / 4
f_ul = np.copy(f) * (1 / f_multiplier)
triangle12 = [[p0[0] + x21, p0[1] + y22],
[p0[0] + x21, p0[1] + y22 - y22 * f_ul],
[p0[0] + x21 - x21 * f_ul, p0[1] + y22]]
p21 = triangle12[0]
y22 = 0
y21 = (triangle12[1][1] - triangle12[0][1]) / np.sqrt(2)
x21 = -(triangle12[2][0] - triangle12[0][0]) / np.sqrt(2)
x22 = +(triangle12[2][0] - triangle12[0][0]) / np.sqrt(2)
pupil_illuminated_only0_in_only1[((v0 - p21[1]) * np.cos(-angleRad21)
- (u0 - p21[0]) * np.sin(-angleRad21) > y21)] = True
###########################################################
# Upper right corner
x21 = -r / 2 * 1
x22 = +r / 2 * 1
y21 = -r / 2 * det_vert
y22 = +r / 2 * det_vert
f_ur = np.copy(f) * f_multiplier
triangle22 = [[p0[0] + x22, p0[1] + y22],
[p0[0] + x22, p0[1] + y22 - y22 * f_ur],
[p0[0] + x22 - x22 * f_ur, p0[1] + y22]]
p21 = triangle22[0]
y22 = -0
y21 = +(triangle22[1][1] - triangle22[0][1]) / np.sqrt(2)
x21 = +(triangle22[2][0] - triangle22[0][0]) / np.sqrt(2)
x22 = -(triangle22[2][0] - triangle22[0][0]) / np.sqrt(2)
pupil_illuminated_only0_in_only1[((u0 - p21[0]) * np.cos(-angleRad21)
+ (v0 - p21[1]) * np.sin(-angleRad21) > x21)] = True
###########################################################
# Lower left corner
x21 = -r / 2 * 1
x22 = +r / 2 * 1
y21 = -r / 2 * det_vert
y22 = +r / 2 * det_vert
f_ll = np.copy(f) * f_multiplier
triangle11 = [[p0[0] + x21, p0[1] + y21],
[p0[0] + x21, p0[1] + y21 - y21 * f_ll],
[p0[0] + x21 - x21 * f_ll, p0[1] + y21]]
p21 = triangle11[0]
y22 = -(triangle11[1][1] - triangle11[0][1]) / np.sqrt(2)
y21 = 0
x21 = +(triangle11[2][0] - triangle11[0][0]) / np.sqrt(2)
x22 = +(triangle11[2][0] - triangle11[0][0]) / np.sqrt(2)
pupil_illuminated_only0_in_only1[((u0 - p21[0]) * np.cos(-angleRad21)
+ (v0 - p21[1]) * np.sin(-angleRad21) < x22)] = True
pupil_illuminated_only1[i_y_min:i_y_max, i_x_min:i_x_max] = pupil_illuminated_only0_in_only1
pupil.illuminated = pupil.illuminated * pupil_illuminated_only1
time_end_single_square = time.time()
if self.verbosity == 1:
logging.info('Time for cutting out the square is '
+ str(time_end_single_square - time_start_single_square))
def _cutRay(self, pupil, p0, angle, thickness, angleunit=None, wide=0):
"""Cut out a ray from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating ray starting point
@param[in] angle Ray angle measured CCW from +x.
@param[in] thickness Thickness of cutout
@param[in] angleunit If None, changes internal units to radians
@param[in] wide Controls the widening of the strut as
a function of the distance from the origin
"""
if angleunit is None:
angleRad = angle.asRadians()
else:
angleRad = angle
# the 1 is arbitrary, just need something to define another point on
# the line
p1 = (p0[0] + 1, p0[1] + np.tan(angleRad))
d = PupilFactory._pointLineDistance((self.u, self.v), p0, p1)
radial_distance = 14.34 * np.sqrt((self.u - p0[0])**2 + (self.v - p0[1])**2)
pupil.illuminated[(d < 0.5 * thickness * (1 + wide * radial_distance))
& ((self.u - p0[0]) * np.cos(angleRad)
+ (self.v - p0[1]) * np.sin(angleRad) >= 0)] = False
def _addRay(self, pupil, p0, angle, thickness, angleunit=None):
"""Add a ray from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating ray starting point
@param[in] angle Ray angle measured CCW from +x.
@param[in] thickness Thickness of cutout
"""
if angleunit is None:
angleRad = angle.asRadians()
else:
angleRad = angle
# the 1 is arbitrary, just need something to define another point on
# the line
p1 = (p0[0] + 1, p0[1] + np.tan(angleRad))
d = PupilFactory._pointLineDistance((self.u, self.v), p0, p1)
pupil.illuminated[(d < 0.5 * thickness)
& ((self.u - p0[0]) * np.cos(angleRad)
+ (self.v - p0[1]) * np.sin(angleRad) >= 0)] = True
class PFSPupilFactory(PupilFactory):
"""Pupil obscuration function factory for PFS
Based on the code by <NAME>, initially developed for HSC camera
Invokes PupilFactory to create obscurations of the camera
Adds various illumination effects which are specified to the spectrographs
"""
def __init__(
self,
pupilSize,
npix,
input_angle,
detFrac,
strutFrac,
slitFrac,
slitFrac_dy,
x_fiber,
y_fiber,
effective_ilum_radius,
frd_sigma,
frd_lorentz_factor,
det_vert,
slitHolder_frac_dx,
wide_0=0,
wide_23=0,
wide_43=0,
misalign=0,
verbosity=0):
"""!Construct a PupilFactory.
Parameters
----------
pupilSize: `float`
Size of the exit pupil [m]
npix: `int`
Constructed Pupils will be npix x npix
input_angle: `float`
Angle of the pupil (for all practical purposes fixed an np.pi/2)
detFrac: `float`
Value determining how much of the exit pupil obscured by the
central obscuration(detector)
strutFrac: `float`
Value determining how much of the exit pupil is obscured
by a single strut
slitFrac: `float`
Value determining how much of the exit pupil is obscured by slit
slitFrac_dy: `float`
Value determining what is the vertical position of the slit
in the exit pupil
x_fiber: `float`
Position of the fiber misaligment in the x direction
y_fiber: `float`
Position of the fiber misaligment in the y direction
effective_ilum_radius: `float`
Fraction of the maximal radius of the illumination
of the exit pupil that is actually illuminated
frd_sigma: `float`
Sigma of Gaussian convolving only outer edge, mimicking FRD
frd_lorentz_factor: `float`
Strength of the lorentzian factor describing wings
det_vert: `float`
Multiplicative factor determining vertical size
of the detector obscuration
wide_0: `float`
Widening of the strut at 0 degrees
wide_23: `float`
Widening of the strut at the top-left corner
wide_43: `float`
Widening of the strut at the bottom-left corner
misalign: `float`
Describing the amount of misaligment
verbosity: `int`
How verbose during evaluation (1 = full verbosity)
"""
self.verbosity = verbosity
if self.verbosity == 1:
logging.info('Entering PFSPupilFactory class')
PupilFactory.__init__(
self,
pupilSize,
npix,
input_angle,
detFrac,
strutFrac,
slitFrac,
slitFrac_dy,
x_fiber,
y_fiber,
effective_ilum_radius,
frd_sigma,
frd_lorentz_factor,
det_vert,
verbosity=self.verbosity,
wide_0=wide_0,
wide_23=wide_23,
wide_43=wide_43,
misalign=misalign)
self.x_fiber = x_fiber
self.y_fiber = y_fiber
self.slitHolder_frac_dx = slitHolder_frac_dx
self._spiderStartPos = [np.array([0., 0.]), np.array([0., 0.]), np.array([0., 0.])]
self._spiderAngles = [0, np.pi * 2 / 3, np.pi * 4 / 3]
self.effective_ilum_radius = effective_ilum_radius
self.wide_0 = wide_0
self.wide_23 = wide_23
self.wide_43 = wide_43
self.misalign = misalign
def getPupil(self, point):
"""!Calculate a Pupil at a given point in the focal plane.
@param point Point2D indicating focal plane coordinates.
@returns Pupil
"""
if self.verbosity == 1:
logging.info('Entering getPupil (function inside PFSPupilFactory)')
# called subaruRadius as it was taken from the code fitting pupil for HSC on Subaru
subaruRadius = (self.pupilSize / 2) * 1
detFrac = self.detFrac # linear fraction
hscRadius = detFrac * subaruRadius
slitFrac = self.slitFrac # linear fraction
subaruSlit = slitFrac * subaruRadius
strutFrac = self.strutFrac # linear fraction
subaruStrutThick = strutFrac * subaruRadius
# y-position of the slit
slitFrac_dy = self.slitFrac_dy
# relic from the HSC code
# See DM-8589 for more detailed description of following parameters
# d(lensCenter)/d(theta) in meters per degree
# lensRate = 0.0276 * 3600 / 128.9 * subaruRadius
# d(cameraCenter)/d(theta) in meters per degree
hscRate = 2.62 / 1000 * subaruRadius
hscPlateScale = 380
thetaX = point[0] * hscPlateScale
thetaY = point[1] * hscPlateScale
pupil = self._fullPupil()
camX = thetaX * hscRate
camY = thetaY * hscRate
# creating FRD effects
single_element = np.linspace(-1, 1, len(pupil.illuminated), endpoint=True, dtype=np.float32)
u_manual = np.tile(single_element, (len(single_element), 1))
v_manual = np.transpose(u_manual)
center_distance = np.sqrt((u_manual - self.x_fiber * hscRate * hscPlateScale * 12)
** 2 + (v_manual - self.y_fiber * hscRate * hscPlateScale * 12)**2)
frd_sigma = self.frd_sigma
sigma = 2 * frd_sigma
pupil_frd = (1 / 2 * (scipy.special.erf((-center_distance + self.effective_ilum_radius) / sigma)
+ scipy.special.erf((center_distance + self.effective_ilum_radius) / sigma)))
################
# Adding misaligment in this section
time_misalign_start = time.time()
position_of_center_0 = np.where(center_distance == np.min(center_distance))
position_of_center = [position_of_center_0[1][0], position_of_center_0[0][0]]
position_of_center_0_x = position_of_center_0[0][0]
position_of_center_0_y = position_of_center_0[1][0]
distances_to_corners = np.array([np.sqrt(position_of_center[0]**2 + position_of_center[1]**2),
np.sqrt((len(pupil_frd) - position_of_center[0])**2
+ position_of_center[1]**2),
np.sqrt((position_of_center[0])**2
+ (len(pupil_frd) - position_of_center[1])**2),
np.sqrt((len(pupil_frd) - position_of_center[0])**2
+ (len(pupil_frd) - position_of_center[1])**2)])
max_distance_to_corner = np.max(distances_to_corners)
threshold_value = 0.5
left_from_center = np.where(pupil_frd[position_of_center_0_x]
[0:position_of_center_0_y] < threshold_value)[0]
right_from_center = \
np.where(pupil_frd[position_of_center_0_x][position_of_center_0_y:] < threshold_value)[0] +\
position_of_center_0_y
up_from_center = \
np.where(pupil_frd[:, position_of_center_0_y][position_of_center_0_x:] < threshold_value)[0] +\
position_of_center_0_x
down_from_center = np.where(pupil_frd[:, position_of_center_0_y]
[:position_of_center_0_x] < threshold_value)[0]
if len(left_from_center) > 0:
size_of_05_left = position_of_center_0_y - np.max(left_from_center)
else:
size_of_05_left = 0
if len(right_from_center) > 0:
size_of_05_right = np.min(right_from_center) - position_of_center_0_y
else:
size_of_05_right = 0
if len(up_from_center) > 0:
size_of_05_up = np.min(up_from_center) - position_of_center_0_x
else:
size_of_05_up = 0
if len(down_from_center) > 0:
size_of_05_down = position_of_center_0_x - np.max(down_from_center)
else:
size_of_05_down = 0
sizes_4_directions = np.array([size_of_05_left, size_of_05_right, size_of_05_up, size_of_05_down])
max_size = np.max(sizes_4_directions)
imageradius = max_size
radiusvalues = np.linspace(
0, int(
np.ceil(max_distance_to_corner)), int(
np.ceil(max_distance_to_corner)) + 1)
sigtotp = sigma * 550
dif_due_to_mis_class = Pupil_misalign(radiusvalues, imageradius, sigtotp, self.misalign)
dif_due_to_mis = dif_due_to_mis_class()
scaling_factor_pixel_to_physical = max_distance_to_corner / np.max(center_distance)
distance_int = np.round(center_distance * scaling_factor_pixel_to_physical).astype(int)
pupil_frd_with_mis = pupil_frd + dif_due_to_mis[distance_int]
pupil_frd_with_mis[pupil_frd_with_mis > 1] = 1
time_misalign_end = time.time()
if self.verbosity == 1:
logging.info('Time to execute illumination considerations due to misalignment '
+ str(time_misalign_end - time_misalign_start))
####
pupil_lorentz = (np.arctan(2 * (self.effective_ilum_radius - center_distance) / (4 * sigma))
+ np.arctan(2 * (self.effective_ilum_radius + center_distance) / (4 * sigma))) /\
(2 * np.arctan((2 * self.effective_ilum_radius) / (4 * sigma)))
pupil_frd = np.copy(pupil_frd_with_mis)
pupil.illuminated = (pupil_frd + 1 * self.frd_lorentz_factor
* pupil_lorentz) / (1 + self.frd_lorentz_factor)
# Cout out the acceptance angle of the camera
self._cutCircleExterior(pupil, (0.0, 0.0), subaruRadius)
# Cut out detector shadow
self._cutSquare(pupil, (camX, camY), hscRadius, self.input_angle, self.det_vert)
# No vignetting of this kind for the spectroscopic camera
# self._cutCircleExterior(pupil, (lensX, lensY), lensRadius)
# Cut out spider shadow
for pos, angle in zip(self._spiderStartPos, self._spiderAngles):
x = pos[0] + camX
y = pos[1] + camY
if angle == 0:
# logging.info('cutRay applied to strut at angle '+str(angle))
self._cutRay(pupil, (x, y), angle, subaruStrutThick, 'rad', self.wide_0)
if angle == np.pi * 2 / 3:
# logging.info('cutRay applied to strut at angle '+str(angle))
self._cutRay(pupil, (x, y), angle, subaruStrutThick, 'rad', self.wide_23)
if angle == np.pi * 4 / 3:
# logging.info('cutRay applied to strut at angle '+str(angle))
self._cutRay(pupil, (x, y), angle, subaruStrutThick, 'rad', self.wide_43)
# cut out slit shadow
self._cutRay(pupil, (2, slitFrac_dy / 18), -np.pi, subaruSlit * 1.05, 'rad')
# cut out slit holder shadow
# subaruSlit/3 is roughly the width of the holder
self._cutRay(pupil, (self.slitHolder_frac_dx / 18, 1), -np.pi / 2, subaruSlit * 0.3, 'rad')
if self.verbosity == 1:
logging.info('Finished with getPupil')
return pupil
class Pupil_misalign(object):
"""Apply misaligment correction to the illumination of the pupil
Developed by <NAME> (Caltech)
Copied here without modifications
"""
def __init__(self, radiusvalues, imageradius, sigtotp, misalign):
self.radiusvalues = radiusvalues
self.imageradius = imageradius
self.sigtotp = sigtotp
self.misalign = misalign
def wapp(self, A):
# Approximation function by <NAME> to approximate and correct for the
# widening of width due to the angular misalignment convolution. This
# is used to basically scale the contribution of angular misalignment and FRD
# A = angmis/sigFRD
wappA = np.sqrt(1 + A * A * (1 + A * A) / (2 + 1.5 * A * A))
return wappA
def fcorr(self, x, A):
# The function scaled so that it keeps the same (approximate) width value
# after angular convolution
correctedfam = self.fcon(x * self.wapp(A), A)
return correctedfam
def fcon(self, x, A):
# For more detail about this method, see "Analyzing Radial Profiles for FRD
# and Angular Misalignment", by <NAME>unn, 16/06/13.
wt = [0.1864, 0.1469, 0.1134, 0.1066, 0.1134, 0.1469, 0.1864] # from <NAME>'s white paper,
# wt contains the normalized integrals under the angular misalignment
# convolution kernel, i.e., C(1-(x/angmisp)^2)^{-1/2} for |x|<angmisp and 0
# elsewhere. Note that the edges' centers are at +/- a, so they are
# integrated over an effective half of the length of the others.
temp = np.zeros(np.size(x))
for index in range(7):
temp = temp + wt[index] * self.ndfc(x + (index - 3) / 3 * A)
angconvolved = temp
return angconvolved
def ndfc(self, x):
# Standard model dropoff from a Gaussian convolution, normalized to brightness 1,
# radius (rh) 0, and sigTOT 1
# logging.info(len(x))
ndfcfun = 1 - (0.5 * erf(x / np.sqrt(2)) + 0.5)
return ndfcfun
def FA(self, r, rh, sigTOT, A):
# Function that takes all significant variables of the dropoff and
# normalizes the curve to be comparable to ndfc
# r = vector of radius values, in steps of pixels
# rh = radius of half-intensity. Effectively the size of the radius of the dropoff
# sigTOT = total width of the convolution kernel that recreates the width of the dropoff
# between 85% and 15% illumination. Effectively just think of this as sigma
# A = angmis/sigFRD, that is, the ratio between the angular misalignment
# and the sigma due to only FRD. Usually this is on the order of 1-3.
FitwithAngle = self.fcorr((r - rh) / sigTOT, A)
return FitwithAngle
def __call__(self):
no_mis = self.FA(self.radiusvalues, self.imageradius, self.sigtotp, 0)
with_mis = self.FA(self.radiusvalues, self.imageradius, self.sigtotp, self.misalign)
dif_due_to_mis = with_mis - no_mis
return dif_due_to_mis
class ZernikeFitterPFS(object):
"""Create a model images for PFS
Despite its name, it does not actually ``fits'' the paramters describing the donuts,
it ``just'' creates the images
The final image is made by the convolution of
1. an OpticalPSF (constructed using FFT)
2. an input fiber image
3. and other convolutions such as CCD charge diffusion
The OpticalPSF part includes
1.1. description of pupil
1.2. specification of an arbitrary number of zernike wavefront aberrations
This code uses lmfit to initalize the parameters.
Calls Psf_position
Calls Pupil classes (which ones?)
Called by LN_PFS_Single (function constructModelImage_PFS_naturalResolution)
"""
def __init__(self, image=np.ones((20, 20)), image_var=np.ones((20, 20)),
image_mask=None, pixelScale=20.76, wavelength=794,
diam_sic=139.5327e-3, npix=1536, pupilExplicit=None,
wf_full_Image=None,
ilum_Image=None, dithering=1, save=None,
pupil_parameters=None, use_pupil_parameters=None, use_optPSF=None, use_wf_grid=None,
zmaxInit=None, extraZernike=None, simulation_00=None, verbosity=None,
double_sources=None, double_sources_positions_ratios=None, test_run=None,
explicit_psf_position=None, use_only_chi=False, use_center_of_flux=False,
PSF_DIRECTORY=None, *args):
"""
Parameters
----------
image: `np.array`, (N, N)
image that you wish to model
if you do not pass the image that you wish to compare,
the algorithm will default to creating 20x20 image that has
value of '1' everywhere
image_var: `np.array`, (N, N)
variance image
if you do not pass the variance image,
the algorithm will default to creating 20x20 image that has
value of '1' everywhere
image_mask: `np.array`, (N, N)
mask image
pixelScale: `float`
pixel scale in arcseconds
This is size of the pixel in arcsec for PFS red arm in focus
calculated with http://www.wilmslowastro.com/software/formulae.htm
pixel size in microns/focal length in mm x 206.3
pixel size = 15 microns, focal length = 149.2 mm
(138 aperature x 1.1 f number)
wavelength: `float`
wavelength of the psf [nm]
if you do not pass the value for wavelength it will default to 794 nm,
which is roughly in the middle of the red detector
diam_sic: `float`
size of the exit pupil [m]
Exit pupil size in focus, default is 139.5237e-3 meters
(taken from Zemax)
npix: `int`
size of 2d array contaning exit pupil illumination
pupilExplicit: `np.array`, (Np, Np)
if avaliable, uses this image for pupil instead of
creating it from supplied parameters
wf_full_Image: `np.array`, (Np, Np)
wavefront image
if avaliable, uses this image for wavefront instead of
creating it from supplied parameters
dithering: `int`
dithering scale (most likely 1 or 2)
save: `int`
if 1, save various intermediate results, for testing purposes
needs to set up also PSF_DIRECTORY
use_optPSF: `np.array`, (Np, Np)
if provided skip creation of optical psf, only do postprocessing
use_wf_grid: `np.array`, (Ny, Nx)
if provided, use this explicit wavefront map
zmaxInit: `int`
highest Zernike order (11 or 22)
extraZernike: `np.array`, (N)
if provided, simulated Zernike orders higher than 22
simulation_00: `np.array`, (2,)
places optical center at the center of the final image
verbosity: `int`
verbosity during evaluations
double_sources:
is there a second source present in the image
double_sources_positions_ratios: `np.arrray`, (2,)
initial guess for the position and strength of the second source
explicit_psf_position: `np.array`, (2,)
explicit position where to place optical psf
use_only_chi: `bool`
if True, fit to minimize np.abs(chi), and not chi**2
use_center_of_flux: `bool`
if True, fit to minimize the distance between the center of flux
for the model and the input image
PSF_DIRECTORY: `str`
where will intermediate outputs be saved for testing purposes
Notes
----------
Creates a model image that is fitted to the input sicence image
The model image is made by the convolution of
1. an OpticalPSF (constructed using FFT)
created with _getOptPsf_naturalResolution
The OpticalPSF part includes
1.1. description of pupil
created with get_Pupil
1.2. specification of an arbitrary number of
zernike wavefront aberrations,
which are input to galsim.phase_screens.OpticalScreen
2. an input fiber image and other convolutions such as
CCD charge diffusion created with _optPsf_postprocessing
This code uses lmfit to initalize the parameters.
Calls class PsfPosition
Calls class PFSPupilFactory
Examples
----------
Simple exampe with initial parameters, changing only one parameter
>>> zmax = 22
>>> single_image_analysis = ZernikeFitterPFS(zmaxInit = zmax,
verbosity=1)
>>> single_image_analysis.initParams()
>>> single_image_analysis.params['detFrac'] =\
lmfit.Parameter(name='detFrac', value=0.70)
>>> resulting_image, psf_pos =\
single_image_analysis.constructModelImage_PFS_naturalResolution()
"""
self.image = image
self.image_var = image_var
if image_mask is None:
image_mask = np.zeros(image.shape)
self.image_mask = image_mask
self.wavelength = wavelength
self.diam_sic = diam_sic
self.npix = npix
self.dithering = dithering
self.pixelScale = pixelScale
self.pixelScale_effective = self.pixelScale / dithering
if save in (None, 0):
save = None
else:
save = 1
self.save = save
self.use_optPSF = use_optPSF
# puilExplicit can be used to pass explicitly the image of the pupil
# instead of creating it from the supplied parameters
if pupilExplicit is None:
pupilExplicit is False
self.pupilExplicit = pupilExplicit
if pupil_parameters is None:
self.pupil_parameters = pupil_parameters
else:
self.pupil_parameters = pupil_parameters
if use_pupil_parameters is None:
self.use_pupil_parameters = use_pupil_parameters
else:
self.use_pupil_parameters = use_pupil_parameters
self.args = args
self.use_wf_grid = use_wf_grid
self.zmax = zmaxInit
self.simulation_00 = simulation_00
if self.simulation_00:
self.simulation_00 = 1
self.extraZernike = extraZernike
self.verbosity = verbosity
self.double_sources = double_sources
self.double_sources_positions_ratios = double_sources_positions_ratios
self.test_run = test_run
self.explicit_psf_position = explicit_psf_position
self.use_only_chi = use_only_chi
self.use_center_of_flux = use_center_of_flux
self.flux = float(np.sum(image))
try:
if not explicit_psf_position:
self.explicit_psf_position = None
except BaseException:
pass
self.PSF_DIRECTORY = PSF_DIRECTORY
############################################################
if self.PSF_DIRECTORY is None:
# names of default directories where I often work
if socket.gethostname() == 'IapetusUSA':
self.PSF_DIRECTORY = '/Volumes/Saturn_USA/PFS/'
elif socket.gethostname() == 'pfsa-usr01-gb.subaru.nao.ac.jp' or \
socket.gethostname() == 'pfsa-usr02-gb.subaru.nao.ac.jp':
self.PSF_DIRECTORY = '/work/ncaplar/'
else:
self.PSF_DIRECTORY = '/tigress/ncaplar/PFS/'
if self.PSF_DIRECTORY is not None:
self.TESTING_FOLDER = self.PSF_DIRECTORY + 'Testing/'
self.TESTING_PUPIL_IMAGES_FOLDER = self.TESTING_FOLDER + 'Pupil_Images/'
self.TESTING_WAVEFRONT_IMAGES_FOLDER = self.TESTING_FOLDER + 'Wavefront_Images/'
self.TESTING_FINAL_IMAGES_FOLDER = self.TESTING_FOLDER + 'Final_Images/'
if self.verbosity == 1:
# check the versions of the most important libraries
logging.info('np.__version__' + str(np.__version__))
logging.info('scipy.__version__' + str(scipy.__version__))
def initParams(
self,
z4Init=None,
detFracInit=None,
strutFracInit=None,
focalPlanePositionInit=None,
slitFracInit=None,
slitFrac_dy_Init=None,
wide_0Init=None,
wide_23Init=None,
wide_43Init=None,
radiometricEffectInit=None,
radiometricExponentInit=None,
x_ilumInit=None,
y_ilumInit=None,
pixel_effectInit=None,
backgroundInit=None,
x_fiberInit=None,
y_fiberInit=None,
effective_ilum_radiusInit=None,
frd_sigmaInit=None,
frd_lorentz_factorInit=None,
misalignInit=None,
det_vertInit=None,
slitHolder_frac_dxInit=None,
grating_linesInit=None,
scattering_slopeInit=None,
scattering_amplitudeInit=None,
fiber_rInit=None,
fluxInit=None):
"""Initialize lmfit Parameters object.
Allows to set up all parameters describing the pupil and
Zernike parameter (up to z22) explicitly. If any value is not passed,
it will be substituted by a default value (specified below).
Parameters
----------
zmax: `int`
Total number of Zernike aberrations used (11 or 22)
Possible to add more with extra_zernike parameter
z4Init: `float`
Initial Z4 aberration value in waves (that is 2*np.pi*wavelengths)
# pupil parameters
detFracInit: `float`
Value determining how much of the exit pupil obscured by the
central obscuration(detector)
strutFracInit: `float`
Value determining how much of the exit pupil is obscured
by a single strut
focalPlanePositionInit: (`float`, `float`)
2-tuple for position of the central obscuration(detector)
in the focal plane
slitFracInit: `float`
Value determining how much of the exit pupil is obscured by slit
slitFrac_dy_Init: `float`
Value determining what is the vertical position of the slit
in the exit pupil
# parameters dsecribing individual struts
wide_0Init: `float`
Parameter describing widening of the strut at 0 degrees
wide_23Init: `float`
Parameter describing widening of the top-left strut
wide_34Init: `float`
Parameter describing widening of the bottom-left strut
#non-uniform illumination
radiometricEffectInit: `float`
parameter describing non-uniform illumination of the pupil
(1-params['radiometricEffect']**2*r**2)**\
(params['radiometricExponent']) [DEPRECATED]
radiometricExponentInit: `float`
parameter describing non-uniform illumination of the pupil
(1-params['radiometricEffect']**2*r**2)\
**(params['radiometricExponent'])
x_ilumInit: `float`
x-position of the center of illumination
of the exit pupil [DEPRECATED]
y_ilumInit: `float`
y-position of the center of illumination
of the exit pupil [DEPRECATED]
# illumination due to fiber, parameters
x_fiberInit: `float`
position of the fiber misaligment in the x direction
y_fiberInit: `float`
position of the fiber misaligment in the y direction
effective_ilum_radiusInit: `float`
fraction of the maximal radius of the illumination
of the exit pupil that is actually illuminated
frd_sigma: `float`
sigma of Gaussian convolving only outer edge, mimicking FRD
frd_lorentz_factor: `float`
strength of the lorentzian factor describing wings
of the pupil illumination
misalign: `float`
amount of misaligment in the illumination
# further pupil parameters
det_vert: `float
multiplicative factor determining vertical size
of the detector obscuration
slitHolder_frac_dx: `float`
dx position of slit holder
# convolving (postprocessing) parameters
grating_lines: `int`
number of effective lines in the grating
scattering_slopeInit: `float`
slope of scattering
scattering_amplitudeInit: `float`
amplitude of scattering compared to optical PSF
pixel_effectInit: `float`
sigma describing charge diffusion effect [in units of 15 microns]
fiber_rInit: `float`
radius of perfect tophat fiber, as seen on the detector
[in units of 15 microns]
fluxInit: `float`
total flux in generated image compared to input image
(needs to be 1 or very close to 1)
"""
if self.verbosity == 1:
logging.info(' ')
logging.info('Initializing ZernikeFitterPFS')
logging.info('Verbosity parameter is: ' + str(self.verbosity))
logging.info('Highest Zernike polynomial is (zmax): ' + str(self.zmax))
params = lmfit.Parameters()
# Zernike parameters
z_array = []
if z4Init is None:
params.add('z4', 0.0)
else:
params.add('z4', z4Init)
for i in range(5, self.zmax + 1):
params.add('z{}'.format(i), 0.0)
# pupil parameters
if detFracInit is None:
params.add('detFrac', 0.65)
else:
params.add('detFrac', detFracInit)
if strutFracInit is None:
params.add('strutFrac', 0.07)
else:
params.add('strutFrac', strutFracInit)
if focalPlanePositionInit is None:
params.add('dxFocal', 0.0)
params.add('dyFocal', 0.0)
else:
params.add('dxFocal', focalPlanePositionInit[0])
params.add('dyFocal', focalPlanePositionInit[1])
if slitFracInit is None:
params.add('slitFrac', 0.05)
else:
params.add('slitFrac', slitFracInit)
if slitFrac_dy_Init is None:
params.add('slitFrac_dy', 0)
else:
params.add('slitFrac_dy', slitFrac_dy_Init)
# parameters dsecribing individual struts
if wide_0Init is None:
params.add('wide_0', 0)
else:
params.add('wide_0', wide_0Init)
if wide_23Init is None:
params.add('wide_23', 0)
else:
params.add('wide_23', wide_23Init)
if wide_43Init is None:
params.add('wide_43', 0)
else:
params.add('wide_43', wide_43Init)
# non-uniform illumination
if radiometricExponentInit is None:
params.add('radiometricExponent', 0.25)
else:
params.add('radiometricExponent', radiometricExponentInit)
if radiometricEffectInit is None:
params.add('radiometricEffect', 0)
else:
params.add('radiometricEffect', radiometricEffectInit)
if x_ilumInit is None:
params.add('x_ilum', 1)
else:
params.add('x_ilum', x_ilumInit)
if y_ilumInit is None:
params.add('y_ilum', 1)
else:
params.add('y_ilum', y_ilumInit)
# illumination due to fiber, parameters
if x_ilumInit is None:
params.add('x_fiber', 1)
else:
params.add('x_fiber', x_fiberInit)
if y_fiberInit is None:
params.add('y_fiber', 0)
else:
params.add('y_fiber', y_fiberInit)
if effective_ilum_radiusInit is None:
params.add('effective_ilum_radius', 0.9)
else:
params.add('effective_ilum_radius', effective_ilum_radiusInit)
if frd_sigmaInit is None:
params.add('frd_sigma', 0.02)
else:
params.add('frd_sigma', frd_sigmaInit)
if frd_lorentz_factorInit is None:
params.add('frd_lorentz_factor', 0.5)
else:
params.add('frd_lorentz_factor', frd_lorentz_factorInit)
if misalignInit is None:
params.add('misalign', 0)
else:
params.add('misalign', misalignInit)
# further pupil parameters
if det_vertInit is None:
params.add('det_vert', 1)
else:
params.add('det_vert', det_vertInit)
if slitHolder_frac_dxInit is None:
params.add('slitHolder_frac_dx', 0)
else:
params.add('slitHolder_frac_dx', slitHolder_frac_dxInit)
# convolving (postprocessing) parameters
if grating_linesInit is None:
params.add('grating_lines', 100000)
else:
params.add('grating_lines', grating_linesInit)
if scattering_slopeInit is None:
params.add('scattering_slope', 2)
else:
params.add('scattering_slope', scattering_slopeInit)
if scattering_amplitudeInit is None:
params.add('scattering_amplitude', 10**-2)
else:
params.add('scattering_amplitude', scattering_amplitudeInit)
if pixel_effectInit is None:
params.add('pixel_effect', 0.35)
else:
params.add('pixel_effect', pixel_effectInit)
if fiber_rInit is None:
params.add('fiber_r', 1.8)
else:
params.add('fiber_r', fiber_rInit)
if fluxInit is None:
params.add('flux', 1)
else:
params.add('flux', fluxInit)
self.params = params
self.optPsf = None
self.z_array = z_array
def constructModelImage_PFS_naturalResolution(
self,
params=None,
shape=None,
pixelScale=None,
use_optPSF=None,
extraZernike=None,
return_intermediate_images=False):
"""Construct model image given the set of parameters
Parameters
----------
params : `lmfit.Parameters` object or python dictionary
Parameters describing model; None to use self.params
shape : `(int, int)`
Shape for model image; None to use the shape of self.maskedImage
pixelScale : `float`
Pixel scale in arcseconds to use for model image;
None to use self.pixelScale.
use_optPSF : `bool`
If True, use previously generated optical PSF,
skip _getOptPsf_naturalResolution, and conduct only postprocessing
extraZernike : `np.array`, (N,)
Zernike parameteres beyond z22
return_intermediate_images : `bool`
If True, return intermediate images created during the run
This is in order to help with debugging and inspect
the images created during the process
Return
----------
(if not return_intermediate_images)
optPsf_final : `np.array`, (N, N)
Final model image
psf_position : np.array, (2,)
Position where image is centered
(if return_intermediate_images)
optPsf_final : `np.array`, (N, N)
Final model image
ilum : `np.array`, (N, N)
Illumination array
wf_grid_rot : `np.array`, (N, N)
Wavefront array
psf_position : np.array, (2,)
Position where image is centered
Notes
----------
Calls _getOptPsf_naturalResolution and optPsf_postprocessing
"""
if self.verbosity == 1:
logging.info(' ')
logging.info('Entering constructModelImage_PFS_naturalResolution')
if params is None:
params = self.params
if shape is None:
shape = self.image.shape
if pixelScale is None:
pixelScale = self.pixelScale
logging.info('pixelScale_1573'+str(pixelScale))
try:
parameter_values = params.valuesdict()
except AttributeError:
parameter_values = params
use_optPSF = self.use_optPSF
if extraZernike is None:
pass
else:
extraZernike = list(extraZernike)
self.extraZernike = extraZernike
# if you did not pass pure optical psf image, create one here
if use_optPSF is None:
# change outputs depending on if you want intermediate results
if not return_intermediate_images:
optPsf = self._getOptPsf_naturalResolution(
parameter_values, return_intermediate_images=return_intermediate_images)
else:
optPsf, ilum, wf_grid_rot = self._getOptPsf_naturalResolution(
parameter_values, return_intermediate_images=return_intermediate_images)
else:
# if you claimed to have supplied optical psf image,
# but none is provided still create one
if self.optPsf is None:
if not return_intermediate_images:
optPsf = self._getOptPsf_naturalResolution(
parameter_values, return_intermediate_images=return_intermediate_images)
else:
optPsf, ilum, wf_grid_rot = self._getOptPsf_naturalResolution(
parameter_values, return_intermediate_images=return_intermediate_images)
self.optPsf = optPsf
else:
optPsf = self.optPsf
# at the moment, no difference in optPsf_postprocessing depending on return_intermediate_images
optPsf_final, psf_position = self._optPsf_postprocessing(
optPsf, return_intermediate_images=return_intermediate_images)
if self.save == 1:
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf', optPsf)
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_final', optPsf_final)
else:
pass
if not return_intermediate_images:
return optPsf_final, psf_position
if return_intermediate_images:
return optPsf_final, ilum, wf_grid_rot, psf_position
if self.verbosity == 1:
logging.info('Finished with constructModelImage_PFS_naturalResolution')
logging.info(' ')
def _optPsf_postprocessing(self, optPsf, return_intermediate_images=False):
"""Apply postprocessing to the pure optical psf image
Parameters
----------
optPsf : `np.array`, (N, N)
Optical image, only psf
return_intermediate_images : `bool`
If True, return intermediate images created during the run
This is potentially in order to help with debugging and inspect
the images created during the process
Returns
----------
(At the moment, the output is the same no matter what
return_intermediate_images is, but there is a possibility
to add intermediate outputs)
optPsf_final : `np.array`, (N, N)
Final model image
psf_position : `np.array`, (2,)
Position where the image is centered
Notes
----------
Takes optical psf and ``postprocesses`` it to generate final image.
The algorithm first reduces the oversampling and cuts the central part
of the image. This is done to speed up the calculations.
Then we apply various effects that are separate from
the pure optical PSF considerations.
We then finish with the centering algorithm to move our created image
to fit the input science image, invoking PSFPosition class.
The effects we apply are
1. scattered light
function apply_scattered_light
2. convolution with fiber
function convolve_with_fiber
3. CCD difusion
function convolve_with_CCD_diffusion
4. grating effects
function convolve_with_grating
5. centering
via class PsfPosition
"""
time_start_single = time.time()
if self.verbosity == 1:
logging.info(' ')
logging.info('Entering optPsf_postprocessing')
params = self.params
shape = self.image.shape
# all of the parameters for the creation of the image
# very stupidly called ``v'' without any reason whatsoever
param_values = params.valuesdict()
# how much is my generated image oversampled compared to final image
oversampling_original = (self.pixelScale_effective) / self.scale_ModelImage_PFS_naturalResolution
if self.verbosity == 1:
logging.info('Shape of optPsf: ' + str(optPsf.shape))
logging.info('Value of oversampling_original: ' + str(oversampling_original))
# determine the size, so that from the huge generated image we can cut out
# only the central portion (1.4 times larger than the size of actual
# image)
size_of_central_cut = int(oversampling_original * self.image.shape[0] * 1.4)
if size_of_central_cut > optPsf.shape[0]:
# if larger than size of image, cut the image
# fail if not enough space
size_of_central_cut = optPsf.shape[0]
if self.verbosity == 1:
logging.info('size_of_central_cut modified to ' + str(size_of_central_cut))
assert int(oversampling_original * self.image.shape[0] * 1.0) < optPsf.shape[0]
assert size_of_central_cut <= optPsf.shape[0]
if self.verbosity == 1:
logging.info('size_of_central_cut: ' + str(size_of_central_cut))
# cut part which you need to form the final image
# set oversampling to 1 so you are not resizing the image, and dx=0 and
# dy=0 so that you are not moving around, i.e., you are cutting the
# central region
optPsf_cut = PsfPosition.cut_Centroid_of_natural_resolution_image(
image=optPsf, size_natural_resolution=size_of_central_cut + 1, oversampling=1, dx=0, dy=0)
if self.verbosity == 1:
logging.info('optPsf_cut.shape' + str(optPsf_cut.shape))
# we want to reduce oversampling to be roughly around 10 to make things computationaly easier
# if oversamplign_original is smaller than 20 (in case of dithered images),
# make res coarser by factor of 2
# otherwise set it to 11
if oversampling_original < 20:
oversampling = np.round(oversampling_original / 2)
else:
oversampling = 11
if self.verbosity == 1:
logging.info('oversampling:' + str(oversampling))
# what will be the size of the image after you resize it to the from
# ``oversampling_original'' to ``oversampling'' ratio
size_of_optPsf_cut_downsampled = np.int(
np.round(size_of_central_cut / (oversampling_original / oversampling)))
if self.verbosity == 1:
logging.info('size_of_optPsf_cut_downsampled: ' + str(size_of_optPsf_cut_downsampled))
# make sure that optPsf_cut_downsampled is an array which has an odd size
# - increase size by 1 if needed
if (size_of_optPsf_cut_downsampled % 2) == 0:
im1 = galsim.Image(optPsf_cut, copy=True, scale=1)
im1.setCenter(0, 0)
interpolated_image = galsim._InterpolatedImage(im1, x_interpolant=galsim.Lanczos(5, True))
optPsf_cut_downsampled = interpolated_image.\
drawImage(nx=size_of_optPsf_cut_downsampled + 1, ny=size_of_optPsf_cut_downsampled + 1,
scale=(oversampling_original / oversampling), method='no_pixel').array
else:
im1 = galsim.Image(optPsf_cut, copy=True, scale=1)
im1.setCenter(0, 0)
interpolated_image = galsim._InterpolatedImage(im1, x_interpolant=galsim.Lanczos(5, True))
optPsf_cut_downsampled = interpolated_image.\
drawImage(nx=size_of_optPsf_cut_downsampled, ny=size_of_optPsf_cut_downsampled,
scale=(oversampling_original / oversampling), method='no_pixel').array
if self.verbosity == 1:
logging.info('optPsf_cut_downsampled.shape: ' + str(optPsf_cut_downsampled.shape))
# gives middle point of the image to used for calculations of scattered light
# mid_point_of_optPsf_cut_downsampled = int(optPsf_cut_downsampled.shape[0] / 2)
# gives the size of one pixel in optPsf_downsampled in microns
# one physical pixel is 15 microns
# effective size is 15 / dithering
# size_of_pixels_in_optPsf_cut_downsampled = (15 / self.dithering) / oversampling
# size of the created optical PSF images in microns
# size_of_optPsf_cut_in_Microns = size_of_pixels_in_optPsf_cut_downsampled * \
# (optPsf_cut_downsampled.shape[0])
# if self.verbosity == 1:
# logging.info('size_of_optPsf_cut_in_Microns: ' + str(size_of_optPsf_cut_in_Microns))
if self.verbosity == 1:
logging.info('Postprocessing parameters are:')
logging.info(str(['grating_lines', 'scattering_slope', 'scattering_amplitude',
'pixel_effect', 'fiber_r']))
logging.info(str([param_values['grating_lines'], param_values['scattering_slope'],
param_values['scattering_amplitude'], param_values['pixel_effect'],
param_values['fiber_r']]))
##########################################
# 1. scattered light
optPsf_cut_downsampled_scattered = self.apply_scattered_light(optPsf_cut_downsampled,
oversampling,
param_values['scattering_slope'],
param_values['scattering_amplitude'],
dithering=self.dithering)
##########################################
# 2. convolution with fiber
optPsf_cut_fiber_convolved = self.convolve_with_fiber(optPsf_cut_downsampled_scattered,
oversampling,
param_values['fiber_r'],
dithering=self.dithering)
##########################################
# 3. CCD difusion
optPsf_cut_pixel_response_convolved = self.convolve_with_CCD_diffusion(optPsf_cut_fiber_convolved,
oversampling,
param_values['pixel_effect'],
dithering=self.dithering)
##########################################
# 4. grating effects
optPsf_cut_grating_convolved = self.convolve_with_grating(optPsf_cut_pixel_response_convolved,
oversampling,
self.wavelength,
param_values['grating_lines'],
dithering=self.dithering)
##########################################
# 5. centering
# This is the part which creates the final image
# the algorithm finds the best downsampling combination automatically
if self.verbosity == 1:
logging.info('Are we invoking double sources (1 or True if yes): ' + str(self.double_sources))
logging.info('Double source position/ratio is:' + str(self.double_sources_positions_ratios))
# initialize the class which does the centering -
# TODO: the separation between the class and the main function in the class,
# ``find_single_realization_min_cut'', is a bit blurry and unsatisfactory
# this needs to be improved
single_Psf_position = PsfPosition(optPsf_cut_grating_convolved,
int(round(oversampling)),
shape[0],
simulation_00=self.simulation_00,
verbosity=self.verbosity,
save=self.save,
PSF_DIRECTORY=self.PSF_DIRECTORY)
time_end_single = time.time()
if self.verbosity == 1:
logging.info('Time for postprocessing up to single_Psf_position protocol is: '
+ str(time_end_single - time_start_single))
# run the code for centering
time_start_single = time.time()
optPsf_final, psf_position =\
single_Psf_position.find_single_realization_min_cut(optPsf_cut_grating_convolved,
int(round(oversampling)),
shape[0],
self.image,
self.image_var,
self.image_mask,
v_flux=param_values['flux'],
double_sources=self.double_sources,
double_sources_positions_ratios= # noqa: E251
self.double_sources_positions_ratios,
verbosity=self.verbosity,
explicit_psf_position= # noqa: E251
self.explicit_psf_position,
use_only_chi=self.use_only_chi,
use_center_of_flux=self.use_center_of_flux)
time_end_single = time.time()
if self.verbosity == 1:
logging.info('Time for single_Psf_position protocol is '
+ str(time_end_single - time_start_single))
if self.verbosity == 1:
logging.info('Sucesfully created optPsf_final')
print(self.save)
if self.save == 1:
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut', optPsf_cut)
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut_downsampled', optPsf_cut_downsampled)
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut_downsampled_scattered',
optPsf_cut_downsampled_scattered)
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut_fiber_convolved',
optPsf_cut_fiber_convolved)
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut_pixel_response_convolved',
optPsf_cut_pixel_response_convolved)
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut_grating_convolved',
optPsf_cut_grating_convolved)
if self.verbosity == 1:
logging.info('Finished with optPsf_postprocessing')
logging.info(' ')
# TODO: at the moment, the output is the same but there is a possibility to add intermediate outputs
if not return_intermediate_images:
return optPsf_final, psf_position
if return_intermediate_images:
return optPsf_final, psf_position
def apply_scattered_light(self, image, oversampling,
scattering_slope, scattering_amplitude, dithering):
"""Add scattered light to optical psf
Parameters
----------
image : `np.array`, (N, N)
input image
oversampling: `int`
how oversampled is `image`
scattering_slope: `float`
slope of the scattered light
scattering_amplitude: `float`
amplitude of the scattered light
dithering: `int`
dithering
Returns
----------
image_scattered : `np.array`, (N, N)
image convolved with the fiber image
Notes
----------
Assumes that one physical pixel is 15 microns
so that effective size of the pixels is 15 / dithering
"""
size_of_pixels_in_image = (15 / self.dithering) / oversampling
# size of the created optical PSF images in microns
size_of_image_in_Microns = size_of_pixels_in_image * \
(image.shape[0])
# create grid to apply scattered light
pointsx = np.linspace(-(size_of_image_in_Microns - size_of_pixels_in_image) / 2,
(size_of_image_in_Microns - size_of_pixels_in_image) / 2,
num=image.shape[0],
dtype=np.float32)
pointsy = np.linspace(-(size_of_image_in_Microns - size_of_pixels_in_image) / 2,
(size_of_image_in_Microns - size_of_pixels_in_image) / 2,
num=image.shape[0]).astype(np.float32)
xs, ys = np.meshgrid(pointsx, pointsy)
r0 = np.sqrt((xs - 0) ** 2 + (ys - 0) ** 2) + .01
# creating scattered light
scattered_light_kernel = (r0**(-scattering_slope))
scattered_light_kernel[r0 < 7.5] = 7.5**(-scattering_slope)
scattered_light_kernel[scattered_light_kernel == np.inf] = 0
scattered_light_kernel = scattered_light_kernel * \
(scattering_amplitude) / (10 * np.max(scattered_light_kernel))
# convolve the psf with the scattered light kernel to create scattered light component
scattered_light = signal.fftconvolve(image, scattered_light_kernel, mode='same')
# add back the scattering to the image
image_scattered = image + scattered_light
return image_scattered
def convolve_with_fiber(self, image, oversampling, fiber_r, dithering):
"""Convolve optical psf with a fiber
Parameters
----------
image : `np.array`, (N, N)
input image
oversampling: `int`
how oversampled is `image`
fiber_r: `float`
radius of the fiber in pixel units
dithering: `int`
dithering
Returns
----------
image_fiber_convolved : `np.array`, (N, N)
image convolved with the fiber image
Notes
----------
"""
fiber = Tophat2DKernel(oversampling * fiber_r * dithering,
mode='oversample').array
# create array with zeros with size of the current image, which we will
# fill with fiber array in the middle
fiber_padded = np.zeros_like(image, dtype=np.float32)
mid_point_of_image = int(image.shape[0] / 2)
fiber_array_size = fiber.shape[0]
# fill the zeroes image with fiber here
fiber_padded[int(mid_point_of_image - fiber_array_size / 2) + 1:
int(mid_point_of_image + fiber_array_size / 2) + 1,
int(mid_point_of_image - fiber_array_size / 2) + 1:
int(mid_point_of_image + fiber_array_size / 2) + 1] = fiber
# convolve with the fiber
image_fiber_convolved = signal.fftconvolve(image, fiber_padded, mode='same')
return image_fiber_convolved
def convolve_with_CCD_diffusion(self, image, oversampling, pixel_effect, dithering):
"""Convolve optical psf with a ccd diffusion effect
Parameters
----------
image : `np.array`, (N, N)
input image
oversampling: `int`
how oversampled is `image`
pixel_effect: `float`
sigma of gaussian kernel convolving image
dithering: `int`
dithering
Returns
----------
image_pixel_response_convolved : `np.array`, (N, N)
image convolved with the ccd diffusion kernel
Notes
----------
Pixels are not perfect detectors
Charge diffusion in our optical CCDs, can be well described with a Gaussian
sigma that is around 7 microns (<NAME> - private communication).
This is controled in our code by @param 'pixel_effect'
"""
pixel_gauss = Gaussian2DKernel(oversampling * pixel_effect * dithering).array.astype(np.float32)
pixel_gauss_padded = np.pad(pixel_gauss, int((len(image) - len(pixel_gauss)) / 2),
'constant', constant_values=0)
# assert that gauss_padded array did not produce empty array
assert np.sum(pixel_gauss_padded) > 0
image_pixel_response_convolved = signal.fftconvolve(image, pixel_gauss_padded, mode='same')
return image_pixel_response_convolved
def convolve_with_grating(self, image, oversampling, wavelength, grating_lines, dithering):
"""Convolve optical psf with a grating effect
Parameters
----------
image : `np.array`, (N, N)
input image
oversampling: `int`
how oversampled is `image`
wavelength: `float`
central wavelength of the spot
grating_lines: `int`
effective number of grating lines in the spectrograph
dithering: `int`
dithering
Returns
----------
image_grating_convolved : `np.array`, (N, N)
image convolved with the grating effect
Notes
----------
This code assumes that 15 microns covers wavelength range of 0.07907 nm
(assuming that 4300 pixels in real detector uniformly covers 340 nm)
"""
grating_kernel = np.ones((image.shape[0], 1), dtype=np.float32)
for i in range(len(grating_kernel)):
grating_kernel[i] = Ifun16Ne((i - int(image.shape[0] / 2)) * 0.07907 * 10**-9
/ (dithering * oversampling) + wavelength * 10**-9,
wavelength * 10**-9, grating_lines)
grating_kernel = grating_kernel / np.sum(grating_kernel)
image_grating_convolved = signal.fftconvolve(image, grating_kernel, mode='same')
return image_grating_convolved
def _get_Pupil(self):
"""Create an image of the pupil
Parameters
----------
params : `lmfit.Parameters` object or python dictionary
Parameters describing the pupil model
Returns
----------
pupil : `pupil`
Instance of class PFSPupilFactory
Notes
----------
Calls PFSPupilFactory class
"""
if self.verbosity == 1:
logging.info(' ')
logging.info('Entering _get_Pupil (function inside ZernikeFitterPFS)')
if self.verbosity == 1:
logging.info('Size of the pupil (npix): ' + str(self.npix))
Pupil_Image = PFSPupilFactory(
pupilSize=self.diam_sic,
npix=self.npix,
input_angle=np.pi / 2,
detFrac=self.params['detFrac'].value,
strutFrac=self.params['strutFrac'].value,
slitFrac=self.params['slitFrac'].value,
slitFrac_dy=self.params['slitFrac_dy'].value,
x_fiber=self.params['x_fiber'].value,
y_fiber=self.params['y_fiber'].value,
effective_ilum_radius=self.params['effective_ilum_radius'].value,
frd_sigma=self.params['frd_sigma'].value, # noqa: E
frd_lorentz_factor=self.params['frd_lorentz_factor'].value,
det_vert=self.params['det_vert'].value,
slitHolder_frac_dx=self.params['slitHolder_frac_dx'].value,
wide_0=self.params['wide_0'].value,
wide_23=self.params['wide_23'].value,
wide_43=self.params['wide_43'].value,
misalign=self.params['misalign'].value,
verbosity=self.verbosity)
point = [self.params['dxFocal'].value, self.params['dyFocal'].value] # noqa: E
pupil = Pupil_Image.getPupil(point)
if self.save == 1:
np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'pupil.illuminated',
pupil.illuminated.astype(np.float32))
if self.verbosity == 1:
logging.info('Finished with _get_Pupil')
return pupil
def _getOptPsf_naturalResolution(self, params, return_intermediate_images=False):
"""Returns optical PSF, given the initialized parameters
Parameters
----------
params : `lmfit.Parameters` object or python dictionary
Parameters descrubing model
return_intermediate_images : `bool`
If True, return intermediate images created during the run
This is in order to help with debugging and inspect
the images created during the process
Returns
----------
(if not return_intermediate_images)
img_apod : `np.array`
Psf image, only optical components considred
(if return_intermediate_images)
# return the image, pupil, illumination applied to the pupil
img_apod : `np.array`
Psf image, only optical components considred
ilum : `np.array`
Image showing the illumination of the pupil
wf_grid_rot : `np.array`
Image showing the wavefront across the pupil
Notes
----------
called by constructModelImage_PFS_naturalResolution
"""
if self.verbosity == 1:
logging.info(' ')
logging.info('Entering _getOptPsf_naturalResolution')
################################################################################
# pupil and illumination of the pupil
################################################################################
time_start_single_1 = time.time()
if self.verbosity == 1:
logging.info('use_pupil_parameters: ' + str(self.use_pupil_parameters))
logging.info('pupil_parameters if you are explicity passing use_pupil_parameters: '
+ str(self.pupil_parameters))
# parmeters ``i'' just to precision in the construction of ``pupil_parameters'' array
# not sure why linter is complaining here with
# ('...'.format(...) has unused arguments at position(s): 0)
i = 4
if self.use_pupil_parameters is None:
pupil_parameters = np.array([params['detFrac'.format(i)], # noqa: E
params['strutFrac'.format(i)], # noqa: E
params['dxFocal'.format(i)], # noqa: E
params['dyFocal'.format(i)], # noqa: E
params['slitFrac'.format(i)], # noqa: E
params['slitFrac_dy'.format(i)], # noqa: E
params['x_fiber'.format(i)], # noqa: E
params['y_fiber'.format(i)], # noqa: E
params['effective_ilum_radius'.format(i)], # noqa: E
params['frd_sigma'.format(i)], # noqa: E
params['frd_lorentz_factor'.format(i)], # noqa: E
params['det_vert'.format(i)], # noqa: E
params['slitHolder_frac_dx'.format(i)], # noqa: E
params['wide_0'.format(i)], # noqa: E
params['wide_23'.format(i)], # noqa: E
params['wide_43'.format(i)], # noqa: E
params['misalign'.format(i)]]) # noqa: E
self.pupil_parameters = pupil_parameters
else:
pupil_parameters = np.array(self.pupil_parameters)
diam_sic = self.diam_sic
if self.verbosity == 1:
logging.info(['detFrac', 'strutFrac', 'dxFocal', 'dyFocal', 'slitFrac', 'slitFrac_dy'])
logging.info(['x_fiber', 'y_fiber', 'effective_ilum_radius', 'frd_sigma',
'frd_lorentz_factor', 'det_vert', 'slitHolder_frac_dx'])
logging.info(['wide_0', 'wide_23', 'wide_43', 'misalign'])
logging.info('set of pupil_parameters I. : ' + str([params['detFrac'], params['strutFrac'],
params['dxFocal'], params['dyFocal'],
params['slitFrac'], params['slitFrac_dy']]))
logging.info('set of pupil_parameters II. : ' + str([params['x_fiber'], params['y_fiber'],
params['effective_ilum_radius'],
params['slitHolder_frac_dx'],
params['frd_lorentz_factor'],
params['det_vert'],
params['slitHolder_frac_dx']]))
logging.info('set of pupil_parameters III. : ' + str([params['wide_0'], params['wide_23'],
params['wide_43'], params['misalign']]))
time_start_single_2 = time.time()
# initialize galsim.Aperature class
pupil = self._get_Pupil()
aper = galsim.Aperture(
diam=pupil.size,
pupil_plane_im=pupil.illuminated.astype(np.float32),
pupil_plane_scale=pupil.scale,
pupil_plane_size=None)
if self.verbosity == 1:
if self.pupilExplicit is None:
logging.info('Requested pupil size is (pupil.size) [m]: ' + str(pupil.size))
logging.info('One pixel has size of (pupil.scale) [m]: ' + str(pupil.scale))
logging.info('Requested pupil has so many pixels (pupil_plane_im): '
+ str(pupil.illuminated.astype(np.int16).shape))
else:
logging.info('Supplied pupil size is (diam_sic) [m]: ' + str(self.diam_sic))
logging.info('One pixel has size of (diam_sic/npix) [m]: ' + str(self.diam_sic / self.npix))
logging.info('Requested pupil has so many pixels (pupilExplicit): '
+ str(self.pupilExplicit.shape))
time_end_single_2 = time.time()
if self.verbosity == 1:
logging.info('Time for _get_Pupil function is ' + str(time_end_single_2 - time_start_single_2))
time_start_single_3 = time.time()
# create array with pixels=1 if the area is illuminated and 0 if it is obscured
ilum = np.array(aper.illuminated, dtype=np.float32)
assert np.sum(ilum) > 0, str(self.pupil_parameters)
# gives size of the illuminated image
lower_limit_of_ilum = int(ilum.shape[0] / 2 - self.npix / 2)
higher_limit_of_ilum = int(ilum.shape[0] / 2 + self.npix / 2)
if self.verbosity == 1:
logging.info('lower_limit_of_ilum: ' + str(lower_limit_of_ilum))
logging.info('higher_limit_of_ilum: ' + str(higher_limit_of_ilum))
if self.pupilExplicit is None:
ilum[lower_limit_of_ilum:higher_limit_of_ilum,
lower_limit_of_ilum:higher_limit_of_ilum] = ilum[lower_limit_of_ilum:higher_limit_of_ilum,
lower_limit_of_ilum:higher_limit_of_ilum] *\
pupil.illuminated
else:
ilum[lower_limit_of_ilum:higher_limit_of_ilum,
lower_limit_of_ilum:higher_limit_of_ilum] = ilum[lower_limit_of_ilum:higher_limit_of_ilum,
lower_limit_of_ilum:higher_limit_of_ilum] *\
self.pupilExplicit.astype(np.float32)
if self.verbosity == 1:
logging.info('Size after padding zeros to 2x size'
+ 'and extra padding to get size suitable for FFT: '
+ str(ilum.shape))
# maximum extent of pupil image in units of radius of the pupil, needed for next step
size_of_ilum_in_units_of_radius = ilum.shape[0] / self.npix
if self.verbosity == 1:
logging.info('size_of_ilum_in_units_of_radius: ' + str(size_of_ilum_in_units_of_radius))
# do not caculate the ``radiometric effect (difference between entrance and exit pupil)
# if paramters are too small to make any difference
# if that is the case just declare the ``ilum_radiometric'' to be the same as ilum
# i.e., the illumination of the exit pupil is the same as the illumination of the entrance pupil
if params['radiometricExponent'] < 0.01 or params['radiometricEffect'] < 0.01:
if self.verbosity == 1:
logging.info('skiping ``radiometric effect\'\' ')
ilum_radiometric = ilum
else:
if self.verbosity == 1:
logging.info('radiometric parameters are: ')
logging.info('x_ilum,y_ilum,radiometricEffect,radiometricExponent'
+ str([params['x_ilum'], params['y_ilum'],
params['radiometricEffect'], params['radiometricExponent']]))
# add the change of flux between the entrance and exit pupil
# end product is radiometricEffectArray
points = np.linspace(-size_of_ilum_in_units_of_radius,
size_of_ilum_in_units_of_radius, num=ilum.shape[0])
xs, ys = np.meshgrid(points, points)
_radius_coordinate = np.sqrt(
(xs - params['x_ilum'] * params['dxFocal']) ** 2
+ (ys - params['y_ilum'] * params['dyFocal']) ** 2)
# change in v_0.14
# ilumination to which radiometric effet has been applied, describing
# difference betwen entrance and exit pupil
radiometricEffectArray = (1 + params['radiometricEffect']
* _radius_coordinate**2)**(-params['radiometricExponent'])
ilum_radiometric = np.nan_to_num(radiometricEffectArray * ilum, 0)
# this is where you can introduce some apodization in the pupil image by using the line below
# the apodization sigma is set to that in focus it is at 0.75
# for larger images, scale according to the size of the input image which is to be FFT-ed
# 0.75 is an arbitrary number
apodization_sigma = ((len(ilum_radiometric)) / 1158)**0.875 * 0.75
# apodization_sigma=0.75
time_start_single_4 = time.time()
# old code where I applied Gaussian to the whole ilum image
# ilum_radiometric_apodized = gaussian_filter(ilum_radiometric, sigma=apodization_sigma)
# cut out central region, apply Gaussian on the center region and return to the full size image
# done to spped up the calculation
# noqa: E128 in order to keep informative names
ilum_radiometric_center_region =\
ilum_radiometric[(lower_limit_of_ilum - int(np.ceil(3 * apodization_sigma))):
(higher_limit_of_ilum + int(np.ceil(3 * apodization_sigma))),
(lower_limit_of_ilum - int(np.ceil(3 * apodization_sigma))):
(higher_limit_of_ilum + int(np.ceil(3 * apodization_sigma)))]
ilum_radiometric_center_region_apodized = gaussian_filter(
ilum_radiometric_center_region, sigma=apodization_sigma)
ilum_radiometric_apodized = np.copy(ilum_radiometric)
ilum_radiometric_apodized[(lower_limit_of_ilum - int(np.ceil(3 * apodization_sigma))):
(higher_limit_of_ilum + int(np.ceil(3 * apodization_sigma))),
(lower_limit_of_ilum - int(np.ceil(3 * apodization_sigma))):
(higher_limit_of_ilum + int(np.ceil(3 * apodization_sigma)))] =\
ilum_radiometric_center_region_apodized # noqa E:122
time_end_single_4 = time.time()
if self.verbosity == 1:
logging.info('Time to apodize the pupil: ' + str(time_end_single_4 - time_start_single_4))
logging.info('type(ilum_radiometric_apodized)' + str(type(ilum_radiometric_apodized[0][0])))
# put pixels for which amplitude is less than 0.01 to 0
r_ilum_pre = np.copy(ilum_radiometric_apodized)
r_ilum_pre[ilum_radiometric_apodized > 0.01] = 1
r_ilum_pre[ilum_radiometric_apodized < 0.01] = 0
ilum_radiometric_apodized_bool = r_ilum_pre.astype(bool)
# manual creation of aper.u and aper.v (mimicking steps which were automatically done in galsim)
# this gives position information about each point in the exit pupil so we can apply wavefront to it
# aperu_manual=[]
# for i in range(len(ilum_radiometric_apodized_bool)):
# aperu_manual.append(np.linspace(-diam_sic*(size_of_ilum_in_units_of_radius/2),
# diam_sic*(size_of_ilum_in_units_of_radius/2),len(ilum_radiometric_apodized_bool), endpoint=True))
single_line_aperu_manual = np.linspace(-diam_sic * (size_of_ilum_in_units_of_radius / 2), diam_sic * (
size_of_ilum_in_units_of_radius / 2), len(ilum_radiometric_apodized_bool), endpoint=True)
aperu_manual = np.tile(
single_line_aperu_manual,
len(single_line_aperu_manual)).reshape(
len(single_line_aperu_manual),
len(single_line_aperu_manual))
# full grid
# u_manual=np.array(aperu_manual)
u_manual = aperu_manual
v_manual = np.transpose(aperu_manual)
# select only parts of the grid that are actually illuminated
u = u_manual[ilum_radiometric_apodized_bool]
v = v_manual[ilum_radiometric_apodized_bool]
time_end_single_3 = time.time()
if self.verbosity == 1:
logging.info('Time for postprocessing pupil after _get_Pupil '
+ str(time_end_single_3 - time_start_single_3))
time_end_single_1 = time.time()
if self.verbosity == 1:
logging.info('Time for pupil and illumination calculation is '
+ str(time_end_single_1 - time_start_single_1))
################################################################################
# wavefront
################################################################################
# create wavefront across the exit pupil
time_start_single = time.time()
if self.verbosity == 1:
logging.info('')
logging.info('Starting creation of wavefront')
aberrations_init = [0.0, 0, 0.0, 0.0]
aberrations = aberrations_init
# list of aberrations where we set z4, z11, z22 etc...
# This is only for testing purposes to study behaviour of non-focus terms
aberrations_0 = list(np.copy(aberrations_init))
for i in range(4, self.zmax + 1):
aberrations.append(params['z{}'.format(i)])
if i in [4, 11, 22]:
aberrations_0.append(0)
else:
aberrations_0.append(params['z{}'.format(i)])
# if you have passed abberation above Zernike 22, join them with lower
# order abberations here
if self.extraZernike is None:
pass
else:
aberrations_extended = np.concatenate((aberrations, self.extraZernike), axis=0)
if self.verbosity == 1:
logging.info('diam_sic [m]: ' + str(diam_sic))
logging.info('aberrations: ' + str(aberrations))
logging.info('aberrations moved to z4=0: ' + str(aberrations_0))
logging.info('aberrations extra: ' + str(self.extraZernike))
logging.info('wavelength [nm]: ' + str(self.wavelength))
if self.extraZernike is None:
optics_screen = galsim.phase_screens.OpticalScreen(
diam=diam_sic, aberrations=aberrations, lam_0=self.wavelength)
if self.save == 1:
# only create fake with abberations 0 if we are going to save i.e., if we
# presenting the results
optics_screen_fake_0 = galsim.phase_screens.OpticalScreen(
diam=diam_sic, aberrations=aberrations_0, lam_0=self.wavelength)
else:
optics_screen = galsim.phase_screens.OpticalScreen(
diam=diam_sic, aberrations=aberrations_extended, lam_0=self.wavelength)
if self.save == 1:
# only create fake with abberations 0 if we are going to save i.e., if we
# presenting the results
optics_screen_fake_0 = galsim.phase_screens.OpticalScreen(
diam=diam_sic, aberrations=aberrations_0, lam_0=self.wavelength)
screens = galsim.PhaseScreenList(optics_screen)
if self.save == 1:
# only create fake with abberations 0 if we are going to save i.e., if we presenting the results
screens_fake_0 = galsim.PhaseScreenList(optics_screen_fake_0)
time_end_single = time.time()
################################################################################
# combining pupil illumination and wavefront
################################################################################
# apply wavefront to the array describing illumination
# logging.info(self.use_wf_grid)
if self.use_wf_grid is None:
wf = screens.wavefront(u, v, None, 0)
if self.save == 1:
wf_full = screens.wavefront(u_manual, v_manual, None, 0)
wf_grid = np.zeros_like(ilum_radiometric_apodized_bool, dtype=np.float32)
wf_grid[ilum_radiometric_apodized_bool] = (wf / self.wavelength)
wf_grid_rot = wf_grid
else:
# if you want to pass an explit wavefront, it goes here
wf_grid = self.use_wf_grid
wf_grid_rot = wf_grid
if self.save == 1:
# only create fake images with abberations set to 0 if we are going to save
# i.e., if we are testing the results
if self.verbosity == 1:
logging.info('creating wf_full_fake_0')
wf_full_fake_0 = screens_fake_0.wavefront(u_manual, v_manual, None, 0)
# exponential of the wavefront
expwf_grid = np.zeros_like(ilum_radiometric_apodized_bool, dtype=np.complex64)
expwf_grid[ilum_radiometric_apodized_bool] =\
ilum_radiometric_apodized[ilum_radiometric_apodized_bool] *\
np.exp(2j * np.pi * wf_grid_rot[ilum_radiometric_apodized_bool])
if self.verbosity == 1:
logging.info('Time for wavefront and wavefront/pupil combining is '
+ str(time_end_single - time_start_single))
################################################################################
# exectute the FFT
################################################################################
# updated up to here
######################################################################
time_start_single = time.time()
ftexpwf = np.fft.fftshift(scipy.fftpack.fft2(np.fft.fftshift(expwf_grid)))
img_apod = np.abs(ftexpwf)**2
time_end_single = time.time()
if self.verbosity == 1:
logging.info('Time for FFT is ' + str(time_end_single - time_start_single))
######################################################################
# size in arcseconds of the image generated by the code
scale_ModelImage_PFS_naturalResolution = sky_scale(
size_of_ilum_in_units_of_radius * self.diam_sic, self.wavelength)
self.scale_ModelImage_PFS_naturalResolution = scale_ModelImage_PFS_naturalResolution
if self.save == 1:
if socket.gethostname() == 'IapetusUSA' or socket.gethostname() == 'tiger2-sumire.princeton.edu' \
or socket.gethostname() == 'pfsa-usr01-gb.subaru.nao.ac.jp' or \
socket.gethostname() == 'pfsa-usr02-gb.subaru.nao.ac.jp':
np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'aperilluminated', aper.illuminated)
np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'ilum', ilum)
np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'ilum_radiometric', ilum_radiometric)
np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'ilum_radiometric_apodized',
ilum_radiometric_apodized)
np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'ilum_radiometric_apodized_bool',
ilum_radiometric_apodized_bool)
np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'u_manual', u_manual)
np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'v_manual', v_manual)
np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'u', u)
| np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'v', v) | numpy.save |
import random
import numpy as np
from scipy.ndimage import rotate
from scipy import ndimage
import paddle
class Compose:
"""
Do transformation on input data with corresponding pre-processing and augmentation operations.
The shape of input data to all operations is [DXHXW].
Args:
transforms (list): A list contains data pre-processing or augmentation.
Raises:
TypeError: When 'transforms' is not a list.
ValueError: when the length of 'transforms' is less than 1.
"""
def __init__(self, transforms):
if not isinstance(transforms, list):
raise TypeError('The transforms must be a list!')
self.transforms = transforms
def __call__(self, im, label=None):
"""
Args:
im (np.ndarray): It is 3D (DxHxW).
label (np.ndarray): It is 3D (DxHxW).
Returns:
(tuple). A tuple including image, image info, and label after transformation.
"""
if im is None:
raise ValueError('None the image ')
for op in self.transforms:
outputs = op(im, label)
im = outputs[0]
if len(outputs) == 2:
label = outputs[1]
if label is None:
return (im,)
else:
return (im, label)
class RandomHorizontalFlip:
"""
Image can be either 3D (DxHxW) or 4D (CxDxHxW).
Flip an image horizontally with a certain probability.
Args:
prob (float, optional): A probability of horizontally flipping. Default: 0.5.
"""
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, im, label=None):
assert im.ndim in [3, 4], 'Supports only 3D (DxHxW) or 4D (CxDxHxW) images'
if random.random() < self.prob:
if im.ndim == 3:
im = np.flip(im,2)
if label is not None:
label = np.flip(label,2)
else:
channels = [np.flip(im[c], 2) for c in range(im.shape[0])]
im = np.stack(channels, axis=0)
if label is not None:
channels = [np.flip(label[c], 2) for c in range(label.shape[0])]
label = np.stack(channels, axis=0)
if label is None:
return (im, )
else:
return (im, label)
class RandomVerticalFlip:
"""
Image can be either 3D (DxHxW) or 4D (CxDxHxW).
Flip an image vertically with a certain probability.
Args:
prob (float, optional): A probability of vertical flipping. Default: 0.1.
"""
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, im, label=None):
assert im.ndim in [3, 4], 'Supports only 3D (DxHxW) or 4D (CxDxHxW) images'
if random.random() < self.prob:
if im.ndim == 3:
im = np.flip(im,1)
if label is not None:
label = np.flip(label,1)
else:
channels = [np.flip(im[c], 1) for c in range(im.shape[0])]
im = np.stack(channels, axis=0)
if label is not None:
channels = [np.flip(label[c], 1) for c in range(label.shape[0])]
label = np.stack(channels, axis=0)
if label is None:
return (im, )
else:
return (im, label)
class Resize3D:
"""
resample an image.
Args:
target_size (list|tuple, optional): The target size of image. Default: (32,256,256).
Raises:
TypeError: When 'target_size' type is neither list nor tuple.
"""
def __init__(self, target_size=(32,256,256), model='constant',order=1):
self.model = model
self.order=order
if isinstance(target_size, list) or isinstance(target_size, tuple):
if len(target_size) != 3:
raise ValueError(
'`target_size` should include 3 elements, but it is {}'.
format(target_size))
else:
raise TypeError(
"Type of `target_size` is invalid. It should be list or tuple, but it is {}"
.format(type(target_size)))
self.target_size = target_size
def __call__(self, im, label=None):
"""
Args:
im (np.ndarray): The Image data.3D (DxHxW) or 4D (CxDxHxW)
label (np.ndarray, optional): The label data. Default: None.
Returns:
(tuple). When label is None, it returns (im, ), otherwise it returns (im, label),
Raises:
TypeError: When the 'img' type is not numpy.
ValueError: When the length of "im" shape is not 3.
"""
if not isinstance(im, np.ndarray):
raise TypeError("Resize: image type is not numpy.")
if len(im.shape) != 3:
raise ValueError('Resize: image is not 3-dimensional.')
if im.ndim == 3:
desired_depth = depth = self.target_size[0]
desired_width = width = self.target_size[1]
desired_height = height = self.target_size[2]
current_depth = im.shape[0]
current_width = im.shape[1]
current_height = im.shape[2]
depth = current_depth / desired_depth
width = current_width / desired_width
height = current_height / desired_height
depth_factor = 1 / depth
width_factor = 1 / width
height_factor = 1 / height
im = ndimage.zoom(im, (depth_factor,width_factor, height_factor), order=self.order,mode=self.model)
if label is not None:
label = ndimage.zoom(label, (depth_factor,width_factor, height_factor), order=0,mode='nearest', cval=0.0)
else:
channels = [ndimage.zoom(im[c], (depth_factor,width_factor, height_factor), order=self.order,mode=self.model) for c
in range(im.shape[0])]
im = | np.stack(channels, axis=0) | numpy.stack |
# flake8: noqa
import math
import numpy as np
import torch
from catalyst.metrics.functional._average_precision import (
average_precision,
binary_average_precision,
mean_average_precision,
)
def test_binary_average_precision_base():
"""
Tests for catalyst.binary_average_precision metric.
"""
outputs = torch.Tensor([0.1, 0.4, 0.35, 0.8])
targets = torch.Tensor([0, 0, 1, 1])
assert torch.isclose(
binary_average_precision(outputs, targets), torch.tensor(0.8333), atol=1e-3
)
def test_binary_average_precision_weighted():
"""
Tests for catalyst.binary_average_precision metric.
"""
target = torch.Tensor([0, 1, 0, 1])
output = torch.Tensor([0.1, 0.2, 0.3, 4])
weight = torch.Tensor([0.5, 1.0, 2.0, 0.1])
ap = binary_average_precision(outputs=output, targets=target, weights=weight)
val = (1 * 0.1 / 0.1 + 0 * 2.0 / 2.1 + 1.1 * 1 / 3.1 + 0 * 1 / 4) / 2.0
assert math.fabs(ap - val) < 0.01, "ap test1 failed"
ap = binary_average_precision(outputs=output, targets=target, weights=None)
val = (1 * 1.0 / 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3.0 + 0 * 1.0 / 4.0) / 2.0
assert math.fabs(ap - val) < 0.01, "ap test2 failed"
target = torch.Tensor([0, 1, 0, 1])
output = torch.Tensor([4, 3, 2, 1])
weight = torch.Tensor([1, 2, 3, 4])
ap = binary_average_precision(outputs=output, targets=target, weights=weight)
val = (0 * 1.0 / 1.0 + 1.0 * 2.0 / 3.0 + 2.0 * 0 / 6.0 + 6.0 * 1.0 / 10.0) / 2.0
assert math.fabs(ap - val) < 0.01, "ap test3 failed"
ap = binary_average_precision(outputs=output, targets=target, weights=None)
val = (0 * 1.0 + 1 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 2 * 1.0 / 4.0) / 2.0
assert math.fabs(ap - val) < 0.01, "ap test4 failed"
target = torch.Tensor([0, 1, 0, 1])
output = torch.Tensor([1, 4, 2, 3])
weight = torch.Tensor([1, 2, 3, 4])
ap = binary_average_precision(outputs=output, targets=target, weights=weight)
val = (4 * 1.0 / 4.0 + 6 * 1.0 / 6.0 + 0 * 6.0 / 9.0 + 0 * 6.0 / 10.0) / 2.0
assert math.fabs(ap - val) < 0.01, "ap test5 failed"
ap = binary_average_precision(outputs=output, targets=target, weights=None)
val = (1 * 1.0 + 2 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 0 * 1.0 / 4.0) / 2.0
assert math.fabs(ap - val) < 0.01, "ap test6 failed"
target = torch.Tensor([0, 0, 0, 0])
output = torch.Tensor([1, 4, 2, 3])
weight = torch.Tensor([1.0, 0.1, 0.0, 0.5])
ap = binary_average_precision(outputs=output, targets=target, weights=weight)
val = 0.0
assert math.fabs(ap - val) < 0.01, "ap test7 failed"
ap = binary_average_precision(outputs=output, targets=target, weights=None)
val = 0.0
assert math.fabs(ap - val) < 0.01, "ap test8 failed"
target = torch.Tensor([1, 1, 0])
output = torch.Tensor([3, 1, 2])
weight = torch.Tensor([1, 0.1, 3])
ap = binary_average_precision(outputs=output, targets=target, weights=weight)
val = (1 * 1.0 / 1.0 + 1 * 0.0 / 4.0 + 1.1 / 4.1) / 2.0
assert math.fabs(ap - val) < 0.01, "ap test9 failed"
ap = binary_average_precision(outputs=output, targets=target, weights=None)
val = (1 * 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3.0) / 2.0
assert math.fabs(ap - val) < 0.01, "ap test10 failed"
# Test multiple K's
target = torch.Tensor([[0, 1, 0, 1], [0, 1, 0, 1]]).transpose(0, 1)
output = torch.Tensor([[0.1, 0.2, 0.3, 4], [4, 3, 2, 1]]).transpose(0, 1)
weight = torch.Tensor([[1.0, 0.5, 2.0, 3.0]]).transpose(0, 1)
ap = binary_average_precision(outputs=output, targets=target, weights=weight)
assert (
math.fabs(
ap.sum()
- torch.Tensor(
[
(1 * 3.0 / 3.0 + 0 * 3.0 / 5.0 + 3.5 * 1 / 5.5 + 0 * 3.5 / 6.5) / 2.0,
(0 * 1.0 / 1.0 + 1 * 0.5 / 1.5 + 0 * 0.5 / 3.5 + 1 * 3.5 / 6.5) / 2.0,
]
).sum()
)
< 0.01
), "ap test11 failed"
ap = binary_average_precision(outputs=output, targets=target, weights=None)
assert (
math.fabs(
ap.sum()
- torch.Tensor(
[
(1 * 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3 + 0 * 1.0 / 4.0) / 2.0,
(0 * 1.0 + 1 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 2.0 * 1.0 / 4.0) / 2.0,
]
).sum()
)
< 0.01
), "ap test12 failed"
def test_average_precision():
"""
Tests for catalyst.metrics.average_precision metric.
"""
# # check everything is relevant
y_pred = [0.5, 0.2, 0.3, 0.8]
y_true = [1.0, 1.0, 1.0, 1.0]
k = 4
avg_precision = average_precision(torch.Tensor([y_pred]), torch.Tensor([y_true]), k)
assert avg_precision[0] == 1
# # check is everything is relevant for 3 users
y_pred = [0.5, 0.2, 0.3, 0.8]
y_true = [1.0, 1.0, 1.0, 1.0]
k = 4
avg_precision = average_precision(
torch.Tensor([y_pred, y_pred, y_pred]), torch.Tensor([y_true, y_true, y_true]), k
)
assert torch.equal(avg_precision, torch.ones(3))
# # check everything is irrelevant
y_pred = [0.5, 0.2, 0.3, 0.8]
y_true = [0.0, 0.0, 0.0, 0.0]
k = 4
avg_precision = average_precision(torch.Tensor([y_pred]), torch.Tensor([y_true]), k)
assert avg_precision[0] == 0
# # check is everything is irrelevant for 3 users
y_pred = [0.5, 0.2, 0.3, 0.8]
y_true = [0.0, 0.0, 0.0, 0.0]
k = 4
avg_precision = average_precision(
torch.Tensor([y_pred, y_pred, y_pred]), torch.Tensor([y_true, y_true, y_true]), k
)
assert torch.equal(avg_precision, torch.zeros(3))
# # check 4
y_pred1 = [4.0, 2.0, 3.0, 1.0]
y_pred2 = [1.0, 2.0, 3.0, 4.0]
y_true1 = [0.0, 1.0, 1.0, 1.0]
y_true2 = [0.0, 1.0, 0.0, 0.0]
k = 4
y_pred_torch = torch.Tensor([y_pred1, y_pred2])
y_true_torch = torch.Tensor([y_true1, y_true2])
avg_precision = average_precision(y_pred_torch, y_true_torch, k)
assert np.isclose(avg_precision[0], 0.6389, atol=1e-3)
assert np.isclose(avg_precision[1], 0.333, atol=1e-3)
# check 5
# Stanford Introdcution to information retrieval primer
y_pred1 = np.arange(9, -1, -1)
y_true1 = [1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0]
y_pred2 = np.arange(9, -1, -1)
y_true2 = [0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]
k = 10
y_pred_torch = torch.Tensor([y_pred1, y_pred2])
y_true_torch = torch.Tensor([y_true1, y_true2])
avg_precision = average_precision(y_pred_torch, y_true_torch, k)
assert np.isclose(avg_precision[0], 0.6222, atol=1e-3)
assert np.isclose(avg_precision[1], 0.4429, atol=1e-3)
def test_mean_avg_precision():
"""
Tests for catalyst.mean_avg_precision metric.
"""
# check 1
# Stanford Introdcution to information retrieval primer
y_pred1 = np.arange(9, -1, -1)
y_true1 = [1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0]
y_pred2 = np.arange(9, -1, -1)
y_true2 = [0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]
y_pred_torch = torch.Tensor([y_pred1, y_pred2])
y_true_torch = torch.Tensor([y_true1, y_true2])
top_k = [10]
map_at10 = mean_average_precision(y_pred_torch, y_true_torch, top_k)[0]
assert np.allclose(map_at10, 0.5325, atol=1e-3)
# check 2
# map_at1: (1.0 + 0.0) / 2 = 0.5
# map_at3: ((1 + 0.67)/2 + 0.5) / 2 = 0.6675
# map_at5: ((1 + 0.67)/2 + (0.5 + 0.4)/2) / 2 = 0.6425
# map_at10: ((1 + 0.67 + 0.5 + 0.44 + 0.5)/5 + (0.5 + 0.4 + 0.43)/3 ) / 2 = 0.53
y_pred1 = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
y_pred2 = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
y_true1 = [1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0]
y_true2 = [0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]
y_pred_torch = torch.Tensor([y_pred1, y_pred2])
y_true_torch = torch.Tensor([y_true1, y_true2])
top_k = [1, 3, 5, 10]
map_k = mean_average_precision(y_pred_torch, y_true_torch, top_k)
map_at1 = map_k[0]
map_at3 = map_k[1]
map_at5 = map_k[2]
map_at10 = map_k[3]
assert np.allclose(map_at1, 0.5, atol=1e-3)
assert np.allclose(map_at3, 0.6675, atol=1e-3)
assert np.allclose(map_at5, 0.6425, atol=1e-3)
assert | np.allclose(map_at10, 0.5325, atol=1e-3) | numpy.allclose |
r"""
####################################################################################################
tellurium 2.2.1
-+++++++++++++++++- Python Environment for Modeling and Simulating Biological Systems
.+++++++++++++++.
.+++++++++++++. Homepage: http://tellurium.analogmachine.org/
-//++++++++++++/. -:/-` Documentation: https://tellurium.readthedocs.io/en/latest/index.html
.----:+++++++/.++ .++++/ Forum: https://groups.google.com/forum/#!forum/tellurium-discuss
:+++++: .+:` .--++ Bug reports: https://github.com/sys-bio/tellurium/issues
-+++- ./+:-://. Repository: https://github.com/sys-bio/tellurium
.+. `...`
SED-ML simulation experiments: http://www.sed-ml.org/
# Change back to the original (with 'getName') when libsedml is fixed
sedmlDoc: L1V4
inputType: 'SEDML_STRING'
workingDir: 'C:\Users\Lucian\Desktop\tellurium'
saveOutputs: 'False'
outputDir: 'None'
plottingEngine: '<MatplotlibEngine>'
Windows-10-10.0.19041-SP0
python 3.8.3 (tags/v3.8.3:6f8c832, May 13 2020, 22:37:02) [MSC v.1924 64 bit (AMD64)]
####################################################################################################
"""
import tellurium as te
from roadrunner import Config
from tellurium.sedml.mathml import *
from tellurium.sedml.tesedml import process_trace, terminate_trace, fix_endpoints
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
try:
import libsedml
except ImportError:
import tesedml as libsedml
import pandas
import os.path
Config.LOADSBMLOPTIONS_RECOMPILE = True
workingDir = r'C:\Users\Lucian\Desktop\tellurium'
# --------------------------------------------------------
# Models
# --------------------------------------------------------
# Model <model0>
model0 = te.loadSBMLModel(os.path.join(workingDir, 'hill.xml'))
# --------------------------------------------------------
# Tasks
# --------------------------------------------------------
# Task <task0>
# not part of any DataGenerator: task0
# Task <task1>
task1 = []
# Task: <task0>
task0 = [None]
model0.setIntegrator('cvode')
if model0.conservedMoietyAnalysis == True: model0.conservedMoietyAnalysis = False
__range__uniform_linear_for_n = np.linspace(start=1.0, stop=15.0, num=26)
for __k__uniform_linear_for_n, __value__uniform_linear_for_n in enumerate(__range__uniform_linear_for_n):
model0.reset()
model0['n'] = __value__uniform_linear_for_n
model0.timeCourseSelections = ['n', 'time', '[S2]']
model0.reset()
task0[0] = model0.simulate(start=0.0, end=35.0, steps=30)
task1.extend(task0)
# --------------------------------------------------------
# DataGenerators
# --------------------------------------------------------
# DataGenerator <plot_0_0_0>
__var__task1_____time = np.column_stack([sim['time'] for sim in task1])
if len(__var__task1_____time.shape) == 1:
__var__task1_____time.shape += (1,)
plot_0_0_0 = __var__task1_____time
# DataGenerator <plot_0_0_1>
__var__task1_____n = np.column_stack([sim['n'] for sim in task1])
if len(__var__task1_____n.shape) == 1:
__var__task1_____n.shape += (1,)
plot_0_0_1 = __var__task1_____n
# DataGenerator <plot_0_0_2>
__var__task1_____S2 = | np.column_stack([sim['[S2]'] for sim in task1]) | numpy.column_stack |
""" Test functions for linalg module
"""
import os
import sys
import itertools
import traceback
import textwrap
import subprocess
import pytest
import numpy as np
from numpy import array, single, double, csingle, cdouble, dot, identity, matmul
from numpy import multiply, atleast_2d, inf, asarray
from numpy import linalg
from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError
from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_allclose, suppress_warnings,
assert_raises_regex, HAS_LAPACK64,
)
from numpy.testing._private.utils import requires_memory
def consistent_subclass(out, in_):
# For ndarray subclass input, our output should have the same subclass
# (non-ndarray input gets converted to ndarray).
return type(out) is (type(in_) if isinstance(in_, np.ndarray)
else np.ndarray)
old_assert_almost_equal = assert_almost_equal
def assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw):
if asarray(a).dtype.type in (single, csingle):
decimal = single_decimal
else:
decimal = double_decimal
old_assert_almost_equal(a, b, decimal=decimal, **kw)
def get_real_dtype(dtype):
return {single: single, double: double,
csingle: single, cdouble: double}[dtype]
def get_complex_dtype(dtype):
return {single: csingle, double: cdouble,
csingle: csingle, cdouble: cdouble}[dtype]
def get_rtol(dtype):
# Choose a safe rtol
if dtype in (single, csingle):
return 1e-5
else:
return 1e-11
# used to categorize tests
all_tags = {
'square', 'nonsquare', 'hermitian', # mutually exclusive
'generalized', 'size-0', 'strided' # optional additions
}
class LinalgCase:
def __init__(self, name, a, b, tags=set()):
"""
A bundle of arguments to be passed to a test case, with an identifying
name, the operands a and b, and a set of tags to filter the tests
"""
assert_(isinstance(name, str))
self.name = name
self.a = a
self.b = b
self.tags = frozenset(tags) # prevent shared tags
def check(self, do):
"""
Run the function `do` on this test case, expanding arguments
"""
do(self.a, self.b, tags=self.tags)
def __repr__(self):
return f'<LinalgCase: {self.name}>'
def apply_tag(tag, cases):
"""
Add the given tag (a string) to each of the cases (a list of LinalgCase
objects)
"""
assert tag in all_tags, "Invalid tag"
for case in cases:
case.tags = case.tags | {tag}
return cases
#
# Base test cases
#
np.random.seed(1234)
CASES = []
# square test cases
CASES += apply_tag('square', [
LinalgCase("single",
array([[1., 2.], [3., 4.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("double",
array([[1., 2.], [3., 4.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_2",
array([[1., 2.], [3., 4.]], dtype=double),
array([[2., 1., 4.], [3., 4., 6.]], dtype=double)),
LinalgCase("csingle",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle),
array([2. + 1j, 1. + 2j], dtype=csingle)),
LinalgCase("cdouble",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
array([2. + 1j, 1. + 2j], dtype=cdouble)),
LinalgCase("cdouble_2",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)),
LinalgCase("0x0",
np.empty((0, 0), dtype=double),
np.empty((0,), dtype=double),
tags={'size-0'}),
LinalgCase("8x8",
np.random.rand(8, 8),
np.random.rand(8)),
LinalgCase("1x1",
np.random.rand(1, 1),
np.random.rand(1)),
LinalgCase("nonarray",
[[1, 2], [3, 4]],
[2, 1]),
])
# non-square test-cases
CASES += apply_tag('nonsquare', [
LinalgCase("single_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("single_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=single),
array([2., 1., 3.], dtype=single)),
LinalgCase("double_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=double),
array([2., 1., 3.], dtype=double)),
LinalgCase("csingle_nsq_1",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle),
array([2. + 1j, 1. + 2j], dtype=csingle)),
LinalgCase("csingle_nsq_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle),
array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)),
LinalgCase("cdouble_nsq_1",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
array([2. + 1j, 1. + 2j], dtype=cdouble)),
LinalgCase("cdouble_nsq_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)),
LinalgCase("cdouble_nsq_1_2",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
LinalgCase("cdouble_nsq_2_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
LinalgCase("8x11",
np.random.rand(8, 11),
np.random.rand(8)),
LinalgCase("1x5",
np.random.rand(1, 5),
np.random.rand(1)),
LinalgCase("5x1",
np.random.rand(5, 1),
np.random.rand(5)),
LinalgCase("0x4",
np.random.rand(0, 4),
np.random.rand(0),
tags={'size-0'}),
LinalgCase("4x0",
np.random.rand(4, 0),
np.random.rand(4),
tags={'size-0'}),
])
# hermitian test-cases
CASES += apply_tag('hermitian', [
LinalgCase("hsingle",
array([[1., 2.], [2., 1.]], dtype=single),
None),
LinalgCase("hdouble",
array([[1., 2.], [2., 1.]], dtype=double),
None),
LinalgCase("hcsingle",
array([[1., 2 + 3j], [2 - 3j, 1]], dtype=csingle),
None),
LinalgCase("hcdouble",
array([[1., 2 + 3j], [2 - 3j, 1]], dtype=cdouble),
None),
LinalgCase("hempty",
np.empty((0, 0), dtype=double),
None,
tags={'size-0'}),
LinalgCase("hnonarray",
[[1, 2], [2, 1]],
None),
LinalgCase("matrix_b_only",
array([[1., 2.], [2., 1.]]),
None),
LinalgCase("hmatrix_1x1",
np.random.rand(1, 1),
None),
])
#
# Gufunc test cases
#
def _make_generalized_cases():
new_cases = []
for case in CASES:
if not isinstance(case.a, np.ndarray):
continue
a = np.array([case.a, 2 * case.a, 3 * case.a])
if case.b is None:
b = None
else:
b = np.array([case.b, 7 * case.b, 6 * case.b])
new_case = LinalgCase(case.name + "_tile3", a, b,
tags=case.tags | {'generalized'})
new_cases.append(new_case)
a = np.array([case.a] * 2 * 3).reshape((3, 2) + case.a.shape)
if case.b is None:
b = None
else:
b = np.array([case.b] * 2 * 3).reshape((3, 2) + case.b.shape)
new_case = LinalgCase(case.name + "_tile213", a, b,
tags=case.tags | {'generalized'})
new_cases.append(new_case)
return new_cases
CASES += _make_generalized_cases()
#
# Generate stride combination variations of the above
#
def _stride_comb_iter(x):
"""
Generate cartesian product of strides for all axes
"""
if not isinstance(x, np.ndarray):
yield x, "nop"
return
stride_set = [(1,)] * x.ndim
stride_set[-1] = (1, 3, -4)
if x.ndim > 1:
stride_set[-2] = (1, 3, -4)
if x.ndim > 2:
stride_set[-3] = (1, -4)
for repeats in itertools.product(*tuple(stride_set)):
new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)]
slices = tuple([slice(None, None, repeat) for repeat in repeats])
# new array with different strides, but same data
xi = np.empty(new_shape, dtype=x.dtype)
xi.view(np.uint32).fill(0xdeadbeef)
xi = xi[slices]
xi[...] = x
xi = xi.view(x.__class__)
assert_(np.all(xi == x))
yield xi, "stride_" + "_".join(["%+d" % j for j in repeats])
# generate also zero strides if possible
if x.ndim >= 1 and x.shape[-1] == 1:
s = list(x.strides)
s[-1] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0"
if x.ndim >= 2 and x.shape[-2] == 1:
s = list(x.strides)
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_x"
if x.ndim >= 2 and x.shape[:-2] == (1, 1):
s = list(x.strides)
s[-1] = 0
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_0"
def _make_strided_cases():
new_cases = []
for case in CASES:
for a, a_label in _stride_comb_iter(case.a):
for b, b_label in _stride_comb_iter(case.b):
new_case = LinalgCase(case.name + "_" + a_label + "_" + b_label, a, b,
tags=case.tags | {'strided'})
new_cases.append(new_case)
return new_cases
CASES += _make_strided_cases()
#
# Test different routines against the above cases
#
class LinalgTestCase:
TEST_CASES = CASES
def check_cases(self, require=set(), exclude=set()):
"""
Run func on each of the cases with all of the tags in require, and none
of the tags in exclude
"""
for case in self.TEST_CASES:
# filter by require and exclude
if case.tags & require != require:
continue
if case.tags & exclude:
continue
try:
case.check(self.do)
except Exception as e:
msg = f'In test case: {case!r}\n\n'
msg += traceback.format_exc()
raise AssertionError(msg) from e
class LinalgSquareTestCase(LinalgTestCase):
def test_sq_cases(self):
self.check_cases(require={'square'},
exclude={'generalized', 'size-0'})
def test_empty_sq_cases(self):
self.check_cases(require={'square', 'size-0'},
exclude={'generalized'})
class LinalgNonsquareTestCase(LinalgTestCase):
def test_nonsq_cases(self):
self.check_cases(require={'nonsquare'},
exclude={'generalized', 'size-0'})
def test_empty_nonsq_cases(self):
self.check_cases(require={'nonsquare', 'size-0'},
exclude={'generalized'})
class HermitianTestCase(LinalgTestCase):
def test_herm_cases(self):
self.check_cases(require={'hermitian'},
exclude={'generalized', 'size-0'})
def test_empty_herm_cases(self):
self.check_cases(require={'hermitian', 'size-0'},
exclude={'generalized'})
class LinalgGeneralizedSquareTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_sq_cases(self):
self.check_cases(require={'generalized', 'square'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_sq_cases(self):
self.check_cases(require={'generalized', 'square', 'size-0'})
class LinalgGeneralizedNonsquareTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_nonsq_cases(self):
self.check_cases(require={'generalized', 'nonsquare'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_nonsq_cases(self):
self.check_cases(require={'generalized', 'nonsquare', 'size-0'})
class HermitianGeneralizedTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_herm_cases(self):
self.check_cases(require={'generalized', 'hermitian'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_herm_cases(self):
self.check_cases(require={'generalized', 'hermitian', 'size-0'},
exclude={'none'})
def dot_generalized(a, b):
a = asarray(a)
if a.ndim >= 3:
if a.ndim == b.ndim:
# matrix x matrix
new_shape = a.shape[:-1] + b.shape[-1:]
elif a.ndim == b.ndim + 1:
# matrix x vector
new_shape = a.shape[:-1]
else:
raise ValueError("Not implemented...")
r = np.empty(new_shape, dtype=np.common_type(a, b))
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = dot(a[c], b[c])
return r
else:
return dot(a, b)
def identity_like_generalized(a):
a = asarray(a)
if a.ndim >= 3:
r = np.empty(a.shape, dtype=a.dtype)
r[...] = identity(a.shape[-2])
return r
else:
return identity(a.shape[0])
class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
# kept apart from TestSolve for use for testing with matrices.
def do(self, a, b, tags):
x = linalg.solve(a, b)
assert_almost_equal(b, dot_generalized(a, x))
assert_(consistent_subclass(x, b))
class TestSolve(SolveCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.solve(x, x).dtype, dtype)
def test_0_size(self):
class ArraySubclass(np.ndarray):
pass
# Test system of 0x0 matrices
a = np.arange(8).reshape(2, 2, 2)
b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0, :]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# Test errors for non-square and only b's dimension being 0
assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b)
assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :])
# Test broadcasting error
b = np.arange(6).reshape(1, 3, 2) # broadcasting error
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
# Test zero "single equations" with 0x0 matrices.
b = np.arange(2).reshape(1, 2).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
b = np.arange(3).reshape(1, 3)
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b)
def test_0_size_k(self):
# test zero multiple equation (K=0) case.
class ArraySubclass(np.ndarray):
pass
a = np.arange(4).reshape(1, 2, 2)
b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass)
expected = linalg.solve(a, b)[:, :, 0:0]
result = linalg.solve(a, b[:, :, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# test both zero.
expected = linalg.solve(a, b)[:, 0:0, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
a_inv = linalg.inv(a)
assert_almost_equal(dot_generalized(a, a_inv),
identity_like_generalized(a))
assert_(consistent_subclass(a_inv, a))
class TestInv(InvCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.inv(x).dtype, dtype)
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res.shape)
assert_(isinstance(res, ArraySubclass))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.complex64)
assert_equal(a.shape, res.shape)
assert_(isinstance(res, ArraySubclass))
class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
ev = linalg.eigvals(a)
evalues, evectors = linalg.eig(a)
assert_almost_equal(ev, evalues)
class TestEigvals(EigvalsCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype))
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.eigvals(a)
assert_(res.dtype.type is np.float64)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.eigvals(a)
assert_(res.dtype.type is np.complex64)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
evalues, evectors = linalg.eig(a)
assert_allclose(dot_generalized(a, evectors),
np.asarray(evectors) * np.asarray(evalues)[..., None, :],
rtol=get_rtol(evalues.dtype))
assert_(consistent_subclass(evectors, a))
class TestEig(EigCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, dtype)
assert_equal(v.dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, get_complex_dtype(dtype))
assert_equal(v.dtype, get_complex_dtype(dtype))
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res, res_v = linalg.eig(a)
assert_(res_v.dtype.type is np.float64)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res_v.shape)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res, res_v = linalg.eig(a)
assert_(res_v.dtype.type is np.complex64)
assert_(res.dtype.type is np.complex64)
assert_equal(a.shape, res_v.shape)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
class SVDBaseTests:
hermitian = False
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
u, s, vh = linalg.svd(x)
assert_equal(u.dtype, dtype)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(vh.dtype, dtype)
s = linalg.svd(x, compute_uv=False, hermitian=self.hermitian)
assert_equal(s.dtype, get_real_dtype(dtype))
class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
u, s, vt = linalg.svd(a, False)
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
assert_(consistent_subclass(u, a))
assert_(consistent_subclass(vt, a))
class TestSVD(SVDCases, SVDBaseTests):
def test_empty_identity(self):
""" Empty input should put an identity matrix in u or vh """
x = np.empty((4, 0))
u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian)
assert_equal(u.shape, (4, 4))
assert_equal(vh.shape, (0, 0))
assert_equal(u, np.eye(4))
x = np.empty((0, 4))
u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian)
assert_equal(u.shape, (0, 0))
assert_equal(vh.shape, (4, 4))
assert_equal(vh, np.eye(4))
class SVDHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
u, s, vt = linalg.svd(a, False, hermitian=True)
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
def hermitian(mat):
axes = list(range(mat.ndim))
axes[-1], axes[-2] = axes[-2], axes[-1]
return np.conj(np.transpose(mat, axes=axes))
assert_almost_equal(np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape))
assert_almost_equal(np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape))
assert_equal(np.sort(s)[..., ::-1], s)
assert_(consistent_subclass(u, a))
assert_(consistent_subclass(vt, a))
class TestSVDHermitian(SVDHermitianCases, SVDBaseTests):
hermitian = True
class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
# cond(x, p) for p in (None, 2, -2)
def do(self, a, b, tags):
c = asarray(a) # a might be a matrix
if 'size-0' in tags:
assert_raises(LinAlgError, linalg.cond, c)
return
# +-2 norms
s = linalg.svd(c, compute_uv=False)
assert_almost_equal(
linalg.cond(a), s[..., 0] / s[..., -1],
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, 2), s[..., 0] / s[..., -1],
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -2), s[..., -1] / s[..., 0],
single_decimal=5, double_decimal=11)
# Other norms
cinv = np.linalg.inv(c)
assert_almost_equal(
linalg.cond(a, 1),
abs(c).sum(-2).max(-1) * abs(cinv).sum(-2).max(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -1),
abs(c).sum(-2).min(-1) * abs(cinv).sum(-2).min(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, np.inf),
abs(c).sum(-1).max(-1) * abs(cinv).sum(-1).max(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -np.inf),
abs(c).sum(-1).min(-1) * abs(cinv).sum(-1).min(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, 'fro'),
np.sqrt((abs(c)**2).sum(-1).sum(-1)
* (abs(cinv)**2).sum(-1).sum(-1)),
single_decimal=5, double_decimal=11)
class TestCond(CondCases):
def test_basic_nonsvd(self):
# Smoketest the non-svd norms
A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]])
assert_almost_equal(linalg.cond(A, inf), 4)
assert_almost_equal(linalg.cond(A, -inf), 2/3)
assert_almost_equal(linalg.cond(A, 1), 4)
assert_almost_equal(linalg.cond(A, -1), 0.5)
assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12))
def test_singular(self):
# Singular matrices have infinite condition number for
# positive norms, and negative norms shouldn't raise
# exceptions
As = [np.zeros((2, 2)), np.ones((2, 2))]
p_pos = [None, 1, 2, 'fro']
p_neg = [-1, -2]
for A, p in itertools.product(As, p_pos):
# Inversion may not hit exact infinity, so just check the
# number is large
assert_(linalg.cond(A, p) > 1e15)
for A, p in itertools.product(As, p_neg):
linalg.cond(A, p)
def test_nan(self):
# nans should be passed through, not converted to infs
ps = [None, 1, -1, 2, -2, 'fro']
p_pos = [None, 1, 2, 'fro']
A = np.ones((2, 2))
A[0,1] = np.nan
for p in ps:
c = linalg.cond(A, p)
assert_(isinstance(c, np.float_))
assert_(np.isnan(c))
A = np.ones((3, 2, 2))
A[1,0,1] = np.nan
for p in ps:
c = linalg.cond(A, p)
assert_(np.isnan(c[1]))
if p in p_pos:
assert_(c[0] > 1e15)
assert_(c[2] > 1e15)
else:
assert_(not np.isnan(c[0]))
assert_(not np.isnan(c[2]))
def test_stacked_singular(self):
# Check behavior when only some of the stacked matrices are
# singular
np.random.seed(1234)
A = np.random.rand(2, 2, 2, 2)
A[0,0] = 0
A[1,1] = 0
for p in (None, 1, 2, 'fro', -1, -2):
c = linalg.cond(A, p)
assert_equal(c[0,0], np.inf)
assert_equal(c[1,1], np.inf)
assert_(np.isfinite(c[0,1]))
assert_(np.isfinite(c[1,0]))
class PinvCases(LinalgSquareTestCase,
LinalgNonsquareTestCase,
LinalgGeneralizedSquareTestCase,
LinalgGeneralizedNonsquareTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a)
# `a @ a_ginv == I` does not hold if a is singular
dot = dot_generalized
assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
assert_(consistent_subclass(a_ginv, a))
class TestPinv(PinvCases):
pass
class PinvHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a, hermitian=True)
# `a @ a_ginv == I` does not hold if a is singular
dot = dot_generalized
assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
assert_(consistent_subclass(a_ginv, a))
class TestPinvHermitian(PinvHermitianCases):
pass
class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
d = linalg.det(a)
(s, ld) = linalg.slogdet(a)
if asarray(a).dtype.type in (single, double):
ad = asarray(a).astype(double)
else:
ad = asarray(a).astype(cdouble)
ev = linalg.eigvals(ad)
assert_almost_equal(d, multiply.reduce(ev, axis=-1))
assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))
s = np.atleast_1d(s)
ld = np.atleast_1d(ld)
m = (s != 0)
assert_almost_equal(np.abs(s[m]), 1)
assert_equal(ld[~m], -inf)
class TestDet(DetCases):
def test_zero(self):
assert_equal(linalg.det([[0.0]]), 0.0)
assert_equal(type(linalg.det([[0.0]])), double)
assert_equal(linalg.det([[0.0j]]), 0.0)
assert_equal(type(linalg.det([[0.0j]])), cdouble)
assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf))
assert_equal(type(linalg.slogdet([[0.0]])[0]), double)
assert_equal(type(linalg.slogdet([[0.0]])[1]), double)
assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf))
assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble)
assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(np.linalg.det(x).dtype, dtype)
ph, s = np.linalg.slogdet(x)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(ph.dtype, dtype)
def test_0_size(self):
a = np.zeros((0, 0), dtype=np.complex64)
res = linalg.det(a)
assert_equal(res, 1.)
assert_(res.dtype.type is np.complex64)
res = linalg.slogdet(a)
assert_equal(res, (1, 0))
assert_(res[0].dtype.type is np.complex64)
| assert_(res[1].dtype.type is np.float32) | numpy.testing.assert_ |
import pytest
from molsysmt.multitool import get_form as get_form
from molsysmt import get
from molsysmt import convert
import numpy as np
import pickle
with open('data/1tcd.pickle', 'rb') as f:
expected_values = pickle.load(f)
args = [
# 'data/1tcd.pdb',
# convert('data/1tcd.pdb', to_form='molsysmt.MolSys'),
# convert('data/1tcd.pdb', to_form='molsysmt.Topology'),
# convert('data/1tcd.pdb', to_form='openmm.Topology'),
# convert('data/1tcd.pdb', to_form='openmm.Modeller'),
# convert('data/1tcd.pdb', to_form='openmm.PDBFile'),
# convert('data/1tcd.pdb', to_form='pdbfixer.PDBFixer'),
convert('data/1tcd.pdb', to_form='mdtraj.Topology'),
convert('data/1tcd.pdb', to_form='mdtraj.Trajectory')
]
# Group
@pytest.mark.parametrize("item", args, ids=get_form)
def test_atom_index_from_group_1(item):
output = get(item, target='group', atom_index=True)
assert np.all( np.all(output[ii]==expected_values['atom_index_from_group_1'][ii]) for ii in range(output.shape[0]))
@pytest.mark.parametrize("item", args, ids=get_form)
def test_atom_index_from_group_2(item):
output = get(item, target='group', indices=range(50,60), atom_index=True)
assert np.all( np.all(output[ii]==expected_values['atom_index_from_group_2'][ii]) for ii in range(output.shape[0]))
@pytest.mark.parametrize("item", args, ids=get_form)
def test_atom_name_from_group_1(item):
output = get(item, target='group', atom_name=True)
assert np.all( np.all(output[ii]==expected_values['atom_name_from_group_1'][ii]) for ii in range(output.shape[0]))
@pytest.mark.parametrize("item", args, ids=get_form)
def test_atom_name_from_group_2(item):
output = get(item, target='group', indices=range(50,60), atom_name=True)
assert np.all( np.all(output[ii]==expected_values['atom_name_from_group_2'][ii]) for ii in range(output.shape[0]))
@pytest.mark.parametrize("item", args, ids=get_form)
def test_atom_id_from_group_1(item):
output = get(item, target='group', atom_id=True)
assert np.all( np.all(output[ii]==expected_values['atom_id_from_group_1'][ii]) for ii in range(output.shape[0]))
@pytest.mark.parametrize("item", args, ids=get_form)
def test_atom_id_from_group_2(item):
output = get(item, target='group', indices=range(50,60), atom_id=True)
assert np.all( np.all(output[ii]==expected_values['atom_id_from_group_2'][ii]) for ii in range(output.shape[0]))
@pytest.mark.parametrize("item", args, ids=get_form)
def test_atom_type_from_group_1(item):
output = get(item, target='group', atom_type=True)
assert np.all( np.all(output[ii]==expected_values['atom_type_from_group_1'][ii]) for ii in range(output.shape[0]))
@pytest.mark.parametrize("item", args, ids=get_form)
def test_atom_type_from_group_2(item):
output = get(item, target='group', indices=range(50,60), atom_type=True)
assert np.all( np.all(output[ii]==expected_values['atom_type_from_group_2'][ii]) for ii in range(output.shape[0]))
@pytest.mark.parametrize("item", args, ids=get_form)
def test_group_index_from_group_1(item):
output = get(item, target='group', group_index=True)
assert np.all(output==expected_values['group_index_from_group_1'])
@pytest.mark.parametrize("item", args, ids=get_form)
def test_group_index_from_group_2(item):
output = get(item, target='group', indices=range(50,60), group_index=True)
assert np.all(output==expected_values['group_index_from_group_2'])
@pytest.mark.parametrize("item", args, ids=get_form)
def test_group_name_from_group_1(item):
output = get(item, target='group', group_name=True)
assert np.all(output==expected_values['group_name_from_group_1'])
@pytest.mark.parametrize("item", args, ids=get_form)
def test_group_name_from_group_2(item):
output = get(item, target='group', indices=range(50,60), group_name=True)
assert np.all(output==expected_values['group_name_from_group_2'])
@pytest.mark.parametrize("item", args, ids=get_form)
def test_group_id_from_group_1(item):
output = get(item, target='group', group_id=True)
assert np.all(output==expected_values['group_id_from_group_1'])
@pytest.mark.parametrize("item", args, ids=get_form)
def test_group_id_from_group_2(item):
output = get(item, target='group', indices=range(50,60), group_id=True)
assert np.all(output==expected_values['group_id_from_group_2'])
@pytest.mark.parametrize("item", args, ids=get_form)
def test_group_type_from_group_1(item):
output = get(item, target='group', group_type=True)
assert np.all(output==expected_values['group_type_from_group_1'])
@pytest.mark.parametrize("item", args, ids=get_form)
def test_group_type_from_group_2(item):
output = get(item, target='group', indices=range(50,60), group_type=True)
assert np.all(output==expected_values['group_type_from_group_2'])
@pytest.mark.parametrize("item", args, ids=get_form)
def test_component_index_from_group_1(item):
output = get(item, target='group', component_index=True)
assert np.all(output==expected_values['component_index_from_group_1'])
@pytest.mark.parametrize("item", args, ids=get_form)
def test_component_index_from_group_2(item):
output = get(item, target='group', indices=range(50,60), component_index=True)
assert np.all(output==expected_values['component_index_from_group_2'])
@pytest.mark.parametrize("item", args, ids=get_form)
def test_component_name_from_group_1(item):
output = get(item, target='group', component_name=True)
assert np.all(output==expected_values['component_name_from_group_1'])
@pytest.mark.parametrize("item", args, ids=get_form)
def test_component_name_from_group_2(item):
output = get(item, target='group', indices=range(50,60), component_name=True)
assert np.all(output==expected_values['component_name_from_group_2'])
@pytest.mark.parametrize("item", args, ids=get_form)
def test_component_id_from_group_1(item):
output = get(item, target='group', component_id=True)
assert np.all(output==expected_values['component_id_from_group_1'])
@pytest.mark.parametrize("item", args, ids=get_form)
def test_component_id_from_group_2(item):
output = get(item, target='group', indices=range(50,60), component_id=True)
assert np.all(output==expected_values['component_id_from_group_2'])
@pytest.mark.parametrize("item", args, ids=get_form)
def test_component_type_from_group_1(item):
output = get(item, target='group', component_type=True)
assert | np.all(output==expected_values['component_type_from_group_1']) | numpy.all |
"""Base class for modeling portfolio and measuring its performance.
The job of the `Portfolio` class is to create a series of positions allocated
against a cash component, produce an equity curve, incorporate basic transaction costs
and produce a set of statistics about its performance. In particular it outputs
position/profit metrics and drawdown information.
## Workflow
`Portfolio` class can be instantiated using main price of the asset, initial capital,
records of filled orders, and cash and shares balances (as a result of filling orders).
It also accepts many other parameters such as annualization factor.
* Order records are used to track trades and positions, and to measure their performance.
* Main price, initial capital, and balances are used to compute risk and performance metrics.
To simplify creation of order records and keeping track of balances, it exposes several convenience methods
with prefix `from_`. For example, you can use `Portfolio.from_signals` method to create and fill orders
based on entry and exit signals. Alternatively, you can use `Portfolio.from_order_func` to define
a custom order function. The results are then automatically passed to the constructor method of
`Portfolio` and you will receive a portfolio instance ready to be used for performance measurements.
## Properties
The `Portfolio` class offers numerous properties for measuring the performance of a strategy.
They can be categorized as follows:
* Time series indexed by time, such as `Portfolio.returns`.
* Metrics indexed by columns, such as `Portfolio.total_profit`.
* Group objects with own time series and metrics, such as `Portfolio.positions`.
### Caching
Each property is cached, thus properties can effectively build upon each other, without side effects.
!!! note
Due to caching, `Portfolio` class is meant to be atomic and immutable, thus each public attribute
is marked as read-only. To change any parameter, you need to create a new `Portfolio` instance.
## Indexing
In addition, you can use pandas indexing on the `Portfolio` class itself, which forwards
indexing operation to each `__init__` argument with pandas type:
```python-repl
>>> import vectorbt as vbt
>>> import numpy as np
>>> import pandas as pd
>>> from numba import njit
>>> from datetime import datetime
>>> price = pd.Series([1, 2, 3, 2, 1], index=pd.Index([
... datetime(2020, 1, 1),
... datetime(2020, 1, 2),
... datetime(2020, 1, 3),
... datetime(2020, 1, 4),
... datetime(2020, 1, 5)
... ]), name='a')
>>> orders = pd.DataFrame({
... 'a': [np.inf, 0, 0, 0, 0],
... 'b': [1, 1, 1, 1, -np.inf],
... 'c': [np.inf, -np.inf, np.inf, -np.inf, np.inf]
... }, index=index)
>>> portfolio = vbt.Portfolio.from_orders(price, orders, init_capital=100)
>>> portfolio.equity
a b c
2020-01-01 100.0 100.0 100.000000
2020-01-02 200.0 101.0 200.000000
2020-01-03 300.0 103.0 200.000000
2020-01-04 200.0 100.0 133.333333
2020-01-05 100.0 96.0 133.333333
>>> portfolio['a'].equity
2020-01-01 100.0
2020-01-02 200.0
2020-01-03 300.0
2020-01-04 200.0
2020-01-05 100.0
Name: a, dtype: float64
```
!!! note
Changing index (time axis) is not supported."""
import numpy as np
import pandas as pd
from vectorbt import defaults
from vectorbt.utils import checks
from vectorbt.utils.decorators import cached_property
from vectorbt.base import reshape_fns
from vectorbt.base.indexing import PandasIndexer
from vectorbt.base.array_wrapper import ArrayWrapper
from vectorbt.generic import nb as generic_nb
from vectorbt.portfolio import nb
from vectorbt.portfolio.enums import SizeType, AccumulateExitMode, ConflictMode
from vectorbt.records import Orders, Trades, Positions, Drawdowns
def _indexing_func(obj, pd_indexing_func):
"""Perform indexing on `Portfolio`."""
if obj.wrapper.ndim == 1:
raise TypeError("Indexing on Series is not supported")
n_rows = len(obj.wrapper.index)
n_cols = len(obj.wrapper.columns)
col_mapper = obj.wrapper.wrap(np.broadcast_to(np.arange(n_cols), (n_rows, n_cols)))
col_mapper = pd_indexing_func(col_mapper)
if not pd.Index.equals(col_mapper.index, obj.wrapper.index):
raise NotImplementedError("Changing index (time axis) is not supported")
new_cols = col_mapper.values[0]
# Array-like params
def index_arraylike_param(param):
if np.asarray(param).ndim > 0:
param = reshape_fns.broadcast_to_axis_of(param, obj.main_price, 1)
param = param[new_cols]
return param
factor_returns = obj.factor_returns
if factor_returns is not None:
if checks.is_frame(factor_returns):
factor_returns = reshape_fns.broadcast_to(factor_returns, obj.main_price)
factor_returns = pd_indexing_func(factor_returns)
# Create new Portfolio instance
return obj.__class__(
pd_indexing_func(obj.main_price),
obj.init_capital.iloc[new_cols],
pd_indexing_func(obj.orders), # Orders class supports indexing
pd_indexing_func(obj.cash),
pd_indexing_func(obj.shares),
freq=obj.freq,
year_freq=obj.year_freq,
levy_alpha=index_arraylike_param(obj.levy_alpha),
risk_free=index_arraylike_param(obj.risk_free),
required_return=index_arraylike_param(obj.required_return),
cutoff=index_arraylike_param(obj.cutoff),
factor_returns=factor_returns,
incl_unrealized_stats=obj.incl_unrealized_stats
)
class Portfolio(PandasIndexer):
"""Class for modeling portfolio and measuring its performance.
Args:
main_price (pandas_like): Main price of the asset.
init_capital (float or pd.Series): The initial capital.
Each element must correspond to a column in `main_price`.
orders (vectorbt.records.orders.Orders): Order records.
cash (pandas_like): Cash held at each time step.
Must have the same metadata as `main_price`.
shares (pandas_like): Shares held at each time step.
Must have the same metadata as `main_price`.
freq (any): Index frequency in case `main_price.index` is not datetime-like.
year_freq (any): Year frequency for working with returns.
levy_alpha (float or array_like): Scaling relation (Levy stability exponent).
Single value or value per column.
risk_free (float or array_like): Constant risk-free return throughout the period.
Single value or value per column.
required_return (float or array_like): Minimum acceptance return of the investor.
Single value or value per column.
cutoff (float or array_like): Decimal representing the percentage cutoff for the
bottom percentile of returns.
Single value or value per column.
factor_returns (array_like): Benchmark return to compare returns against. Will broadcast.
By default it's `None`, but it's required by some return-based metrics.
incl_unrealized_stats (bool): Whether to include unrealized metrics in `Portfolio.stats`.
!!! note
Use class methods with `from_` prefix to build a portfolio.
The `__init__` method is reserved for indexing purposes.
All array objects must have the same metadata as `main_price`."""
def __init__(self, main_price, init_capital, orders, cash, shares, freq=None,
year_freq=None, levy_alpha=None, risk_free=None, required_return=None,
cutoff=None, factor_returns=None, incl_unrealized_stats=False):
# Perform checks
checks.assert_type(main_price, (pd.Series, pd.DataFrame))
if checks.is_frame(main_price):
checks.assert_type(init_capital, pd.Series)
checks.assert_same(main_price.columns, init_capital.index)
else:
checks.assert_ndim(init_capital, 0)
checks.assert_same_meta(main_price, cash)
checks.assert_same_meta(main_price, shares)
# Store passed arguments
self._main_price = main_price
self._init_capital = init_capital
self._orders = orders
self._cash = cash
self._shares = shares
self._incl_unrealized_stats = incl_unrealized_stats
freq = main_price.vbt(freq=freq).freq
if freq is None:
raise ValueError("Couldn't parse the frequency of index. You must set `freq`.")
self._freq = freq
year_freq = main_price.vbt.returns(year_freq=year_freq).year_freq
if freq is None:
raise ValueError("You must set `year_freq`.")
self._year_freq = year_freq
# Parameters
self._levy_alpha = defaults.portfolio['levy_alpha'] if levy_alpha is None else levy_alpha
self._risk_free = defaults.portfolio['risk_free'] if risk_free is None else risk_free
self._required_return = defaults.portfolio['required_return'] if required_return is None else required_return
self._cutoff = defaults.portfolio['cutoff'] if cutoff is None else cutoff
self._factor_returns = defaults.portfolio['factor_returns'] if factor_returns is None else factor_returns
# Supercharge
PandasIndexer.__init__(self, _indexing_func)
self.wrapper = ArrayWrapper.from_obj(main_price, freq=freq)
# ############# Class methods ############# #
@classmethod
def from_signals(cls, main_price, entries, exits, size=np.inf, size_type=SizeType.Shares,
entry_price=None, exit_price=None, init_capital=None, fees=None, fixed_fees=None,
slippage=None, accumulate=None, accumulate_exit_mode=None, conflict_mode=None,
broadcast_kwargs={}, freq=None, **kwargs):
"""Build portfolio from entry and exit signals.
For each signal in `entries`, buys `size` of shares for `entry_price` to enter
the position. For each signal in `exits`, sells everything for `exit_price`
to exit the position. Accumulation of orders is disabled by default.
For more details, see `vectorbt.portfolio.nb.simulate_from_signals_nb`.
Args:
main_price (pandas_like): Main price of the asset, such as close. Will broadcast.
entries (array_like): Boolean array of entry signals. Will broadcast.
exits (array_like): Boolean array of exit signals. Will broadcast.
size (float or array_like): The amount of shares to order. Will broadcast.
To buy/sell everything, set the size to `np.inf`.
size_type (int or array_like): See `vectorbt.portfolio.enums.SizeType`.
Only `SizeType.Shares` and `SizeType.Cash` are supported.
entry_price (array_like): Entry price. Defaults to `main_price`. Will broadcast.
exit_price (array_like): Exit price. Defaults to `main_price`. Will broadcast.
init_capital (float or array_like): The initial capital. Will broadcast.
Allowed is either a single value or value per column.
fees (float or array_like): Fees in percentage of the order value. Will broadcast.
fixed_fees (float or array_like): Fixed amount of fees to pay per order. Will broadcast.
slippage (float or array_like): Slippage in percentage of price. Will broadcast.
accumulate (bool): If `accumulate` is `True`, entering the market when already
in the market will be allowed to increase a position.
accumulate_exit_mode: See `vectorbt.portfolio.enums.AccumulateExitMode`.
conflict_mode: See `vectorbt.portfolio.enums.ConflictMode`.
broadcast_kwargs: Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.
freq (any): Index frequency in case `main_price.index` is not datetime-like.
**kwargs: Keyword arguments passed to the `__init__` method.
For defaults, see `vectorbt.defaults.portfolio`.
All time series will be broadcasted together using `vectorbt.base.reshape_fns.broadcast`.
At the end, they will have the same metadata.
Example:
Portfolio from various signal sequences:
```python-repl
>>> entries = pd.DataFrame({
... 'a': [True, False, False, False, False],
... 'b': [True, False, True, False, True],
... 'c': [True, True, True, True, True]
... }, index=index)
>>> exits = pd.DataFrame({
... 'a': [False, False, False, False, False],
... 'b': [False, True, False, True, False],
... 'c': [True, True, True, True, True]
... }, index=index)
>>> portfolio = vbt.Portfolio.from_signals(
... price, entries, exits, size=10, init_capital=100,
... fees=0.0025, fixed_fees=1., slippage=0.001)
>>> portfolio.orders.records
col idx size price fees side
0 0 0 10.0 1.001 1.025025 0
1 1 0 10.0 1.001 1.025025 0
2 1 1 10.0 1.998 1.049950 1
3 1 2 10.0 3.003 1.075075 0
4 1 3 10.0 1.998 1.049950 1
5 1 4 10.0 1.001 1.025025 0
6 2 0 10.0 1.001 1.025025 0
>>> portfolio.equity
a b c
2020-01-01 98.964975 98.964975 98.964975
2020-01-02 108.964975 107.895025 108.964975
2020-01-03 118.964975 106.789950 118.964975
2020-01-04 108.964975 95.720000 108.964975
2020-01-05 98.964975 94.684975 98.964975
```
"""
# Get defaults
if entry_price is None:
entry_price = main_price
if exit_price is None:
exit_price = main_price
if init_capital is None:
init_capital = defaults.portfolio['init_capital']
if size is None:
size = defaults.portfolio['size']
if size_type is None:
size_type = defaults.portfolio['size_type']
if isinstance(size_type, str):
size_type = getattr(SizeType, size_type)
if fees is None:
fees = defaults.portfolio['fees']
if fixed_fees is None:
fixed_fees = defaults.portfolio['fixed_fees']
if slippage is None:
slippage = defaults.portfolio['slippage']
if accumulate is None:
accumulate = defaults.portfolio['accumulate']
if accumulate_exit_mode is None:
accumulate_exit_mode = defaults.portfolio['accumulate_exit_mode']
if isinstance(accumulate_exit_mode, str):
accumulate_exit_mode = getattr(AccumulateExitMode, accumulate_exit_mode)
if conflict_mode is None:
conflict_mode = defaults.portfolio['conflict_mode']
if isinstance(conflict_mode, str):
conflict_mode = getattr(ConflictMode, conflict_mode)
# Perform checks
checks.assert_type(main_price, (pd.Series, pd.DataFrame))
checks.assert_dtype(entries, np.bool_)
checks.assert_dtype(exits, np.bool_)
# Broadcast inputs
# Only main_price is broadcasted, others can remain unchanged thanks to flexible indexing
keep_raw = (False, True, True, True, True, True, True, True, True, True, True)
main_price, entries, exits, size, size_type, entry_price, \
exit_price, fees, fixed_fees, slippage, init_capital = \
reshape_fns.broadcast(
main_price, entries, exits, size, size_type, entry_price, exit_price, fees,
fixed_fees, slippage, init_capital, **broadcast_kwargs,
writeable=True, keep_raw=keep_raw)
target_shape = (main_price.shape[0], main_price.shape[1] if main_price.ndim > 1 else 1)
# Perform calculation
order_records, cash, shares = nb.simulate_from_signals_nb(
target_shape,
init_capital,
entries,
exits,
size,
size_type,
entry_price,
exit_price,
fees,
fixed_fees,
slippage,
accumulate,
accumulate_exit_mode,
conflict_mode,
is_2d=main_price.ndim == 2
)
# Bring to the same meta
cash = main_price.vbt.wrap(cash)
shares = main_price.vbt.wrap(shares)
orders = Orders(order_records, main_price, freq=freq)
if checks.is_series(main_price):
init_capital = init_capital.item(0)
else:
init_capital = np.broadcast_to(init_capital, (target_shape[1],))
init_capital = main_price.vbt.wrap_reduced(init_capital)
return cls(main_price, init_capital, orders, cash, shares, freq=freq, **kwargs)
@classmethod
def from_orders(cls, main_price, order_size, size_type=None, order_price=None,
init_capital=None, fees=None, fixed_fees=None, slippage=None,
broadcast_kwargs={}, freq=None, **kwargs):
"""Build portfolio from orders.
Starting with initial capital `init_capital`, at each time step, orders the number
of shares specified in `order_size` for `order_price`.
For more details, see `vectorbt.portfolio.nb.simulate_from_orders_nb`.
Args:
main_price (pandas_like): Main price of the asset, such as close. Will broadcast.
order_size (float or array_like): The amount of shares to order. Will broadcast.
If the size is positive, this is the number of shares to buy.
If the size is negative, this is the number of shares to sell.
To buy/sell everything, set the size to `np.inf`.
size_type (int or array_like): See `vectorbt.portfolio.enums.SizeType`.
order_price (array_like): Order price. Defaults to `main_price`. Will broadcast.
init_capital (float or array_like): The initial capital. Will broadcast.
Allowed is either a single value or value per column.
fees (float or array_like): Fees in percentage of the order value. Will broadcast.
fixed_fees (float or array_like): Fixed amount of fees to pay per order. Will broadcast.
slippage (float or array_like): Slippage in percentage of price. Will broadcast.
broadcast_kwargs: Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.
freq (any): Index frequency in case `main_price.index` is not datetime-like.
**kwargs: Keyword arguments passed to the `__init__` method.
For defaults, see `vectorbt.defaults.portfolio`.
All time series will be broadcasted together using `vectorbt.base.reshape_fns.broadcast`.
At the end, they will have the same metadata.
Example:
Portfolio from various order sequences:
```python-repl
>>> portfolio = vbt.Portfolio.from_orders(price, orders,
... init_capital=100, fees=0.0025, fixed_fees=1., slippage=0.001)
>>> portfolio.orders.records
col idx size price fees side
0 0 0 98.654463 1.001 1.246883 0
1 1 0 1.000000 1.001 1.002502 0
2 1 1 1.000000 2.002 1.005005 0
3 1 2 1.000000 3.003 1.007507 0
4 1 3 1.000000 2.002 1.005005 0
5 1 4 4.000000 0.999 1.009990 1
6 2 0 98.654463 1.001 1.246883 0
7 2 1 98.654463 1.998 1.492779 1
8 2 2 64.646521 3.003 1.485334 0
9 2 3 64.646521 1.998 1.322909 1
10 2 4 126.398131 1.001 1.316311 0
>>> portfolio.equity
a b c
2020-01-01 98.654463 98.996498 98.654463
2020-01-02 197.308925 98.989493 195.618838
2020-01-03 295.963388 99.978985 193.939564
2020-01-04 197.308925 95.971980 127.840840
2020-01-05 98.654463 90.957990 126.398131
```
"""
# Get defaults
if order_price is None:
order_price = main_price
if size_type is None:
size_type = defaults.portfolio['size_type']
if isinstance(size_type, str):
size_type = getattr(SizeType, size_type)
if init_capital is None:
init_capital = defaults.portfolio['init_capital']
if fees is None:
fees = defaults.portfolio['fees']
if fixed_fees is None:
fixed_fees = defaults.portfolio['fixed_fees']
if slippage is None:
slippage = defaults.portfolio['slippage']
# Perform checks
checks.assert_type(main_price, (pd.Series, pd.DataFrame))
# Broadcast inputs
# Only main_price is broadcasted, others can remain unchanged thanks to flexible indexing
keep_raw = (False, True, True, True, True, True, True, True)
main_price, order_size, size_type, order_price, fees, fixed_fees, slippage, init_capital = \
reshape_fns.broadcast(
main_price, order_size, size_type, order_price, fees, fixed_fees, slippage, init_capital,
**broadcast_kwargs, writeable=True, keep_raw=keep_raw)
target_shape = (main_price.shape[0], main_price.shape[1] if main_price.ndim > 1 else 1)
# Perform calculation
order_records, cash, shares = nb.simulate_from_orders_nb(
target_shape, init_capital, order_size, size_type, order_price,
fees, fixed_fees, slippage, is_2d=main_price.ndim == 2)
# Bring to the same meta
cash = main_price.vbt.wrap(cash)
shares = main_price.vbt.wrap(shares)
orders = Orders(order_records, main_price, freq=freq)
if checks.is_series(main_price):
init_capital = init_capital.item(0)
else:
init_capital = | np.broadcast_to(init_capital, (target_shape[1],)) | numpy.broadcast_to |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 17 14:36:33 2020
@author: similarities
"""
import numpy as np
import matplotlib.pyplot as plt
class optical_Transfer_matrix:
def __init__(self, x, Theta):
self.working_array = | np.array([[1,0], [0,1]]) | numpy.array |
# (c) Copyright [2018-2021] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# |_ |~) _ _| _ /~\ _ |.
# |_)\/ |_)(_|(_|| \_/|_|(_|||
# /
# ____________ ______
# / __ `\ / /
# | \/ / / /
# |______ / / /
# |____/ / /
# _____________ / /
# \ / / /
# \ / / /
# \_______/ / /
# ______ / /
# \ / / /
# \ / / /
# \/ / /
# / /
# / /
# \ /
# \ /
# \/
# _
# \ / _ __|_. _ _ |_)
# \/ (/_| | |(_(_|| \/
# /
# VerticaPy is a Python library with scikit-like functionality to use to conduct
# data science projects on data stored in Vertica, taking advantage Vertica’s
# speed and built-in analytics and machine learning features. It supports the
# entire data science life cycle, uses a ‘pipeline’ mechanism to sequentialize
# data transformation operations, and offers beautiful graphical options.
#
# VerticaPy aims to solve all of these problems. The idea is simple: instead
# of moving data around for processing, VerticaPy brings the logic to the data.
#
#
# Modules
#
# Standard Python Modules
import os, warnings
import numpy as np
# VerticaPy Modules
from verticapy import vDataFrame
from verticapy.learn.mlplot import *
from verticapy.learn.model_selection import *
from verticapy.utilities import *
from verticapy.toolbox import *
from verticapy.errors import *
from verticapy.learn.metrics import *
##
# ___ ___ ___ ___ ______ ________ _______ ___
# |" \ /" ||" \ /" | / " \ |" "\ /" "||" |
# \ \ // / \ \ // | // ____ \ (. ___ :)(: ______)|| |
# \\ \/. ./ /\\ \/. | / / ) :)|: \ ) || \/ | |: |
# \. // |: \. |(: (____/ // (| (___\ || // ___)_ \ |___
# \\ / |. \ /: | \ / |: :)(: "|( \_|: \
# \__/ |___|\__/|___| \"_____/ (________/ \_______) \_______)
#
#
# ---#
class vModel:
"""
---------------------------------------------------------------------------
Main Class for Vertica Model
"""
# ---#
def __repr__(self):
"""
---------------------------------------------------------------------------
Returns the model Representation.
"""
try:
rep = ""
if self.type not in (
"DBSCAN",
"NearestCentroid",
"VAR",
"SARIMAX",
"LocalOutlierFactor",
"KNeighborsRegressor",
"KNeighborsClassifier",
"CountVectorizer",
):
name = self.tree_name if self.type in ("KernelDensity") else self.name
try:
version(cursor=self.cursor, condition=[9, 0, 0])
executeSQL(
self.cursor,
"SELECT GET_MODEL_SUMMARY(USING PARAMETERS model_name = '{}')".format(
name
),
"Summarizing the model.",
)
except:
executeSQL(
self.cursor,
"SELECT SUMMARIZE_MODEL('{}')".format(name),
"Summarizing the model.",
)
return self.cursor.fetchone()[0]
elif self.type == "DBSCAN":
rep = "=======\ndetails\n=======\nNumber of Clusters: {}\nNumber of Outliers: {}".format(
self.n_cluster_, self.n_noise_
)
elif self.type == "LocalOutlierFactor":
rep = "=======\ndetails\n=======\nNumber of Errors: {}".format(
self.n_errors_
)
elif self.type == "NearestCentroid":
rep = "=======\ndetails\n=======\n" + self.centroids_.__repr__()
elif self.type == "VAR":
rep = "=======\ndetails\n======="
for idx, elem in enumerate(self.X):
rep += "\n\n # " + str(elem) + "\n\n" + self.coef_[idx].__repr__()
rep += "\n\n===============\nAdditional Info\n==============="
rep += "\nInput Relation : {}".format(self.input_relation)
rep += "\nX : {}".format(", ".join(self.X))
rep += "\nts : {}".format(self.ts)
elif self.type == "SARIMAX":
rep = "=======\ndetails\n======="
rep += "\n\n# Coefficients\n\n" + self.coef_.__repr__()
if self.ma_piq_:
rep += "\n\n# MA PIQ\n\n" + self.ma_piq_.__repr__()
rep += "\n\n===============\nAdditional Info\n==============="
rep += "\nInput Relation : {}".format(self.input_relation)
rep += "\ny : {}".format(self.y)
rep += "\nts : {}".format(self.ts)
if self.exogenous:
rep += "\nExogenous Variables : {}".format(
", ".join(self.exogenous)
)
if self.ma_avg_:
rep += "\nMA AVG : {}".format(self.ma_avg_)
elif self.type == "CountVectorizer":
rep = "=======\ndetails\n======="
if self.vocabulary_:
voc = [str(elem) for elem in self.vocabulary_]
if len(voc) > 100:
voc = voc[0:100] + [
"... ({} more)".format(len(self.vocabulary_) - 100)
]
rep += "\n\n# Vocabulary\n\n" + ", ".join(voc)
if self.stop_words_:
rep += "\n\n# Stop Words\n\n" + ", ".join(
[str(elem) for elem in self.stop_words_]
)
rep += "\n\n===============\nAdditional Info\n==============="
rep += "\nInput Relation : {}".format(self.input_relation)
rep += "\nX : {}".format(", ".join(self.X))
if self.type in (
"DBSCAN",
"NearestCentroid",
"LocalOutlierFactor",
"KNeighborsRegressor",
"KNeighborsClassifier",
):
rep += "\n\n===============\nAdditional Info\n==============="
rep += "\nInput Relation : {}".format(self.input_relation)
rep += "\nX : {}".format(", ".join(self.X))
if self.type in (
"NearestCentroid",
"KNeighborsRegressor",
"KNeighborsClassifier",
):
rep += "\ny : {}".format(self.y)
return rep
except:
return "<{}>".format(self.type)
# ---#
def deploySQL(self, X: list = []):
"""
---------------------------------------------------------------------------
Returns the SQL code needed to deploy the model.
Parameters
----------
X: list, optional
List of the columns used to deploy the model. If empty, the model
predictors will be used.
Returns
-------
str
the SQL code needed to deploy the model.
"""
if self.type not in ("DBSCAN", "LocalOutlierFactor"):
name = self.tree_name if self.type in ("KernelDensity") else self.name
check_types([("X", X, [list],)])
X = [str_column(elem) for elem in X]
fun = self.get_model_fun()[1]
sql = "{}({} USING PARAMETERS model_name = '{}', match_by_pos = 'true')"
return sql.format(fun, ", ".join(self.X if not (X) else X), name)
else:
raise FunctionError(
"Method 'deploySQL' for '{}' doesn't exist.".format(self.type)
)
# ---#
def drop(self):
"""
---------------------------------------------------------------------------
Drops the model from the Vertica DB.
"""
with warnings.catch_warnings(record=True) as w:
drop_model(
self.name, self.cursor,
)
# ---#
def features_importance(
self, ax=None, tree_id: int = None, show: bool = True, **style_kwds,
):
"""
---------------------------------------------------------------------------
Computes the model features importance.
Parameters
----------
ax: Matplotlib axes object, optional
The axes to plot on.
tree_id: int
Tree ID in case of Tree Based models.
show: bool
If set to True, draw the features importance.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
if self.type in (
"RandomForestClassifier",
"RandomForestRegressor",
"KernelDensity",
):
check_types([("tree_id", tree_id, [int])])
name = self.tree_name if self.type in ("KernelDensity") else self.name
version(cursor=self.cursor, condition=[9, 1, 1])
tree_id = "" if not (tree_id) else ", tree_id={}".format(tree_id)
query = "SELECT predictor_name AS predictor, ROUND(100 * importance_value / SUM(importance_value) OVER (), 2)::float AS importance, SIGN(importance_value)::int AS sign FROM (SELECT RF_PREDICTOR_IMPORTANCE ( USING PARAMETERS model_name = '{}'{})) VERTICAPY_SUBTABLE ORDER BY 2 DESC;".format(
name, tree_id,
)
print_legend = False
elif self.type in (
"LinearRegression",
"LogisticRegression",
"LinearSVC",
"LinearSVR",
"SARIMAX",
):
if self.type == "SARIMAX":
relation = (
self.transform_relation.replace("[VerticaPy_y]", self.y)
.replace("[VerticaPy_ts]", self.ts)
.replace(
"[VerticaPy_key_columns]", ", ".join(self.exogenous + [self.ts])
)
.format(self.input_relation)
)
else:
relation = self.input_relation
version(cursor=self.cursor, condition=[8, 1, 1])
query = "SELECT predictor, ROUND(100 * importance / SUM(importance) OVER(), 2) AS importance, sign FROM "
query += "(SELECT stat.predictor AS predictor, ABS(coefficient * (max - min))::float AS importance, SIGN(coefficient)::int AS sign FROM "
query += '(SELECT LOWER("column") AS predictor, min, max FROM (SELECT SUMMARIZE_NUMCOL({}) OVER() '.format(
", ".join(self.X)
)
query += " FROM {}) VERTICAPY_SUBTABLE) stat NATURAL JOIN ({})".format(
relation, self.coef_.to_sql()
)
query += " coeff) importance_t ORDER BY 2 DESC;"
print_legend = True
else:
raise FunctionError(
"Method 'features_importance' for '{}' doesn't exist.".format(self.type)
)
executeSQL(self.cursor, query, "Computing Features Importance.")
result = self.cursor.fetchall()
coeff_importances, coeff_sign = {}, {}
for elem in result:
coeff_importances[elem[0]] = elem[1]
coeff_sign[elem[0]] = elem[2]
if show:
plot_importance(
coeff_importances,
coeff_sign,
print_legend=print_legend,
ax=ax,
**style_kwds,
)
importances = {"index": ["importance", "sign"]}
for elem in coeff_importances:
importances[elem] = [coeff_importances[elem], coeff_sign[elem]]
return tablesample(values=importances).transpose()
# ---#
def get_attr(self, attr_name: str = ""):
"""
---------------------------------------------------------------------------
Returns the model attribute.
Parameters
----------
attr_name: str, optional
Attribute Name.
Returns
-------
tablesample
model attribute
"""
if self.type not in ("DBSCAN", "LocalOutlierFactor", "VAR", "SARIMAX"):
name = self.tree_name if self.type in ("KernelDensity") else self.name
version(cursor=self.cursor, condition=[8, 1, 1])
result = to_tablesample(
query="SELECT GET_MODEL_ATTRIBUTE(USING PARAMETERS model_name = '{}'{})".format(
name, ", attr_name = '{}'".format(attr_name) if attr_name else "",
),
cursor=self.cursor,
title="Getting Model Attributes.",
)
return result
elif self.type in ("DBSCAN"):
if attr_name == "n_cluster":
return self.n_cluster_
elif attr_name == "n_noise":
return self.n_noise_
elif not (attr_name):
result = tablesample(
values={
"attr_name": ["n_cluster", "n_noise"],
"value": [self.n_cluster_, self.n_noise_],
},
name="Attributes",
)
return result
else:
raise ParameterError("Attribute '' doesn't exist.".format(attr_name))
elif self.type in ("LocalOutlierFactor"):
if attr_name == "n_errors":
return self.n_errors_
elif not (attr_name):
result = tablesample(
values={"attr_name": ["n_errors"], "value": [self.n_errors_]},
)
return result
else:
raise ParameterError("Attribute '' doesn't exist.".format(attr_name))
elif self.type in ("SARIMAX"):
if attr_name == "coef":
return self.coef_
elif attr_name == "ma_avg":
return self.ma_avg_
elif attr_name == "ma_piq":
return self.ma_piq_
elif not (attr_name):
result = tablesample(
values={"attr_name": ["coef", "ma_avg", "ma_piq"]},
)
return result
else:
raise ParameterError("Attribute '' doesn't exist.".format(attr_name))
elif self.type in ("VAR"):
if attr_name == "coef":
return self.coef_
elif not (attr_name):
result = tablesample(values={"attr_name": ["coef"]},)
return result
else:
raise ParameterError("Attribute '' doesn't exist.".format(attr_name))
elif self.type in ("KernelDensity"):
if attr_name == "map":
return self.map_
elif not (attr_name):
result = tablesample(values={"attr_name": ["map"]},)
return result
else:
raise ParameterError("Attribute '' doesn't exist.".format(attr_name))
else:
raise FunctionError(
"Method 'get_attr' for '{}' doesn't exist.".format(self.type)
)
# ---#
def get_model_fun(self):
"""
---------------------------------------------------------------------------
Returns the Vertica associated functions.
Returns
-------
tuple
(FIT, PREDICT, INVERSE)
"""
if self.type in ("LinearRegression", "SARIMAX"):
return ("LINEAR_REG", "PREDICT_LINEAR_REG", "")
elif self.type == "LogisticRegression":
return ("LOGISTIC_REG", "PREDICT_LOGISTIC_REG", "")
elif self.type == "LinearSVC":
return ("SVM_CLASSIFIER", "PREDICT_SVM_CLASSIFIER", "")
elif self.type == "LinearSVR":
return ("SVM_REGRESSOR", "PREDICT_SVM_REGRESSOR", "")
elif self.type in ("RandomForestRegressor", "KernelDensity"):
return ("RF_REGRESSOR", "PREDICT_RF_REGRESSOR", "")
elif self.type == "RandomForestClassifier":
return ("RF_CLASSIFIER", "PREDICT_RF_CLASSIFIER", "")
elif self.type == "NaiveBayes":
return ("NAIVE_BAYES", "PREDICT_NAIVE_BAYES", "")
elif self.type == "KMeans":
return ("KMEANS", "APPLY_KMEANS", "")
elif self.type == "BisectingKMeans":
return ("BISECTING_KMEANS", "APPLY_BISECTING_KMEANS", "")
elif self.type == "PCA":
return ("PCA", "APPLY_PCA", "APPLY_INVERSE_PCA")
elif self.type == "SVD":
return ("SVD", "APPLY_SVD", "APPLY_INVERSE_SVD")
elif self.type == "Normalizer":
return ("NORMALIZE_FIT", "APPLY_NORMALIZE", "REVERSE_NORMALIZE")
elif self.type == "OneHotEncoder":
return ("ONE_HOT_ENCODER_FIT", "APPLY_ONE_HOT_ENCODER", "")
else:
return ("", "", "")
# ---#
def get_params(self):
"""
---------------------------------------------------------------------------
Returns the model Parameters.
Returns
-------
dict
model parameters
"""
return self.parameters
# ---#
def plot(
self, max_nb_points: int = 100, ax=None, **style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the Model.
Parameters
----------
max_nb_points: int
Maximum number of points to display.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
"""
check_types([("max_nb_points", max_nb_points, [int, float],)])
if self.type in (
"LinearRegression",
"LogisticRegression",
"LinearSVC",
"LinearSVR",
):
coefficients = self.coef_.values["coefficient"]
if self.type == "LogisticRegression":
return logit_plot(
self.X,
self.y,
self.input_relation,
coefficients,
self.cursor,
max_nb_points,
ax=ax,
**style_kwds,
)
elif self.type == "LinearSVC":
return svm_classifier_plot(
self.X,
self.y,
self.input_relation,
coefficients,
self.cursor,
max_nb_points,
ax=ax,
**style_kwds,
)
else:
return regression_plot(
self.X,
self.y,
self.input_relation,
coefficients,
self.cursor,
max_nb_points,
ax=ax,
**style_kwds,
)
elif self.type in ("KMeans", "BisectingKMeans", "DBSCAN"):
if self.type != "DBSCAN":
vdf = vdf_from_relation(self.input_relation, cursor=self.cursor)
self.predict(vdf, name="kmeans_cluster")
catcol = "kmeans_cluster"
else:
vdf = vdf_from_relation(self.name, cursor=self.cursor)
catcol = "dbscan_cluster"
if 2 <= len(self.X) <= 3:
return vdf.scatter(
columns=self.X,
catcol=catcol,
max_cardinality=100,
max_nb_points=max_nb_points,
ax=ax,
**style_kwds,
)
else:
raise Exception("Clustering Plots are only available in 2D or 3D.")
elif self.type in ("PCA", "SVD"):
if 2 <= self.parameters["n_components"] or (
self.parameters["n_components"] <= 0 and len(self.X) > 1
):
X = [
"col{}".format(i + 1)
for i in range(min(max(self.parameters["n_components"], 2), 3))
]
return self.transform().scatter(
columns=X, max_nb_points=max_nb_points, ax=ax, **style_kwds,
)
else:
raise Exception("Decomposition Plots are not available in 1D")
elif self.type in ("LocalOutlierFactor"):
query = "SELECT COUNT(*) FROM {}".format(self.name)
tablesample = 100 * min(
float(max_nb_points / self.cursor.execute(query).fetchone()[0]), 1
)
return lof_plot(
self.name, self.X, "lof_score", self.cursor, 100, ax=ax, **style_kwds,
)
else:
raise FunctionError(
"Method 'plot' for '{}' doesn't exist.".format(self.type)
)
# ---#
def set_cursor(self, cursor):
"""
---------------------------------------------------------------------------
Sets a new DB cursor. It can be very usefull if the connection to the DB is
lost.
Parameters
----------
cursor: DBcursor
New cursor.
Returns
-------
model
self
"""
check_cursor(cursor)
cursor.execute("SELECT 1;")
self.cursor = cursor
return self
# ---#
def set_params(self, parameters: dict = {}):
"""
---------------------------------------------------------------------------
Sets the parameters of the model.
Parameters
----------
parameters: dict, optional
New parameters.
"""
try:
self.parameters
except:
self.parameters = {}
model_parameters = {}
default_parameters = default_model_parameters(self.type)
if self.type in ("LinearRegression", "LogisticRegression", "SARIMAX", "VAR"):
if "solver" in parameters:
check_types([("solver", parameters["solver"], [str],)])
assert str(parameters["solver"]).lower() in [
"newton",
"bfgs",
"cgd",
], ParameterError(
"Incorrect parameter 'solver'.\nThe optimizer must be in (Newton | BFGS | CGD), found '{}'.".format(
parameters["solver"]
)
)
model_parameters["solver"] = parameters["solver"]
elif "solver" not in self.parameters:
model_parameters["solver"] = default_parameters["solver"]
else:
model_parameters["solver"] = self.parameters["solver"]
if "penalty" in parameters and self.type in (
"LinearRegression",
"LogisticRegression",
):
check_types([("penalty", parameters["penalty"], [str],)])
assert str(parameters["penalty"]).lower() in [
"none",
"l1",
"l2",
"enet",
], ParameterError(
"Incorrect parameter 'penalty'.\nThe regularization must be in (None | L1 | L2 | ENet), found '{}'.".format(
parameters["penalty"]
)
)
model_parameters["penalty"] = parameters["penalty"]
elif (
self.type in ("LinearRegression", "LogisticRegression")
and "penalty" not in self.parameters
):
model_parameters["penalty"] = default_parameters["penalty"]
elif self.type in ("LinearRegression", "LogisticRegression"):
model_parameters["penalty"] = self.parameters["penalty"]
if "max_iter" in parameters:
check_types([("max_iter", parameters["max_iter"], [int, float],)])
assert 0 <= parameters["max_iter"], ParameterError(
"Incorrect parameter 'max_iter'.\nThe maximum number of iterations must be positive."
)
model_parameters["max_iter"] = parameters["max_iter"]
elif "max_iter" not in self.parameters:
model_parameters["max_iter"] = default_parameters["max_iter"]
else:
model_parameters["max_iter"] = self.parameters["max_iter"]
if "l1_ratio" in parameters and self.type in (
"LinearRegression",
"LogisticRegression",
):
check_types([("l1_ratio", parameters["l1_ratio"], [int, float],)])
assert 0 <= parameters["l1_ratio"] <= 1, ParameterError(
"Incorrect parameter 'l1_ratio'.\nThe ENet Mixture must be between 0 and 1."
)
model_parameters["l1_ratio"] = parameters["l1_ratio"]
elif (
self.type in ("LinearRegression", "LogisticRegression")
and "l1_ratio" not in self.parameters
):
model_parameters["l1_ratio"] = default_parameters["l1_ratio"]
elif self.type in ("LinearRegression", "LogisticRegression"):
model_parameters["l1_ratio"] = self.parameters["l1_ratio"]
if "C" in parameters and self.type in (
"LinearRegression",
"LogisticRegression",
):
check_types([("C", parameters["C"], [int, float],)])
assert 0 <= parameters["C"], ParameterError(
"Incorrect parameter 'C'.\nThe regularization parameter value must be positive."
)
model_parameters["C"] = parameters["C"]
elif (
self.type in ("LinearRegression", "LogisticRegression")
and "C" not in self.parameters
):
model_parameters["C"] = default_parameters["C"]
elif self.type in ("LinearRegression", "LogisticRegression"):
model_parameters["C"] = self.parameters["C"]
if "tol" in parameters:
check_types([("tol", parameters["tol"], [int, float],)])
assert 0 <= parameters["tol"], ParameterError(
"Incorrect parameter 'tol'.\nThe tolerance parameter value must be positive."
)
model_parameters["tol"] = parameters["tol"]
elif "tol" not in self.parameters:
model_parameters["tol"] = default_parameters["tol"]
else:
model_parameters["tol"] = self.parameters["tol"]
if "p" in parameters and self.type in ("SARIMAX", "VAR"):
check_types([("p", parameters["p"], [int, float],)])
assert 0 <= parameters["p"], ParameterError(
"Incorrect parameter 'p'.\nThe order of the AR part must be positive."
)
model_parameters["p"] = parameters["p"]
elif self.type in ("SARIMAX", "VAR") and "p" not in self.parameters:
model_parameters["p"] = default_parameters["p"]
elif self.type in ("SARIMAX", "VAR"):
model_parameters["p"] = self.parameters["p"]
if "q" in parameters and self.type == "SARIMAX":
check_types([("q", parameters["q"], [int, float],)])
assert 0 <= parameters["q"], ParameterError(
"Incorrect parameter 'q'.\nThe order of the MA part must be positive."
)
model_parameters["q"] = parameters["q"]
elif self.type == "SARIMAX" and "q" not in self.parameters:
model_parameters["q"] = default_parameters["q"]
elif self.type == "SARIMAX":
model_parameters["q"] = self.parameters["q"]
if "d" in parameters and self.type == "SARIMAX":
check_types([("d", parameters["d"], [int, float],)])
assert 0 <= parameters["d"], ParameterError(
"Incorrect parameter 'd'.\nThe order of the I part must be positive."
)
model_parameters["d"] = parameters["d"]
elif self.type == "SARIMAX" and "d" not in self.parameters:
model_parameters["d"] = default_parameters["d"]
elif self.type == "SARIMAX":
model_parameters["d"] = self.parameters["d"]
if "P" in parameters and self.type == "SARIMAX":
check_types([("P", parameters["P"], [int, float],)])
assert 0 <= parameters["P"], ParameterError(
"Incorrect parameter 'P'.\nThe seasonal order of the AR part must be positive."
)
model_parameters["P"] = parameters["P"]
elif self.type == "SARIMAX" and "P" not in self.parameters:
model_parameters["P"] = default_parameters["P"]
elif self.type == "SARIMAX":
model_parameters["P"] = self.parameters["P"]
if "Q" in parameters and self.type == "SARIMAX":
check_types([("Q", parameters["Q"], [int, float],)])
assert 0 <= parameters["Q"], ParameterError(
"Incorrect parameter 'Q'.\nThe seasonal order of the MA part must be positive."
)
model_parameters["Q"] = parameters["Q"]
elif self.type == "SARIMAX" and "Q" not in self.parameters:
model_parameters["Q"] = default_parameters["Q"]
elif self.type == "SARIMAX":
model_parameters["Q"] = self.parameters["Q"]
if "D" in parameters and self.type == "SARIMAX":
check_types([("D", parameters["D"], [int, float],)])
assert 0 <= parameters["D"], ParameterError(
"Incorrect parameter 'D'.\nThe seasonal order of the I part must be positive."
)
model_parameters["D"] = parameters["D"]
elif self.type == "SARIMAX" and "D" not in self.parameters:
model_parameters["D"] = default_parameters["D"]
elif self.type == "SARIMAX":
model_parameters["D"] = self.parameters["D"]
if "s" in parameters and self.type == "SARIMAX":
check_types([("s", parameters["s"], [int, float],)])
assert 0 <= parameters["s"], ParameterError(
"Incorrect parameter 's'.\nThe Span of the seasonality must be positive."
)
model_parameters["s"] = parameters["s"]
elif self.type == "SARIMAX" and "s" not in self.parameters:
model_parameters["s"] = default_parameters["s"]
elif self.type == "SARIMAX":
model_parameters["s"] = self.parameters["s"]
if "max_pik" in parameters and self.type == "SARIMAX":
check_types([("max_pik", parameters["max_pik"], [int, float],)])
assert 0 <= parameters["max_pik"], ParameterError(
"Incorrect parameter 'max_pik'.\nThe Maximum number of inverse MA coefficients took during the computation must be positive."
)
model_parameters["max_pik"] = parameters["max_pik"]
elif self.type == "SARIMAX" and "max_pik" not in self.parameters:
model_parameters["max_pik"] = default_parameters["max_pik"]
elif self.type == "SARIMAX":
model_parameters["max_pik"] = self.parameters["max_pik"]
if "papprox_ma" in parameters and self.type == "SARIMAX":
check_types([("papprox_ma", parameters["papprox_ma"], [int, float],)])
assert 0 <= parameters["papprox_ma"], ParameterError(
"Incorrect parameter 'papprox_ma'.\nThe Maximum number of AR(P) used to approximate the MA during the computation must be positive."
)
model_parameters["papprox_ma"] = parameters["papprox_ma"]
elif self.type == "SARIMAX" and "papprox_ma" not in self.parameters:
model_parameters["papprox_ma"] = default_parameters["papprox_ma"]
elif self.type == "SARIMAX":
model_parameters["papprox_ma"] = self.parameters["papprox_ma"]
elif self.type in ("KernelDensity"):
if "bandwidth" in parameters:
check_types([("bandwidth", parameters["bandwidth"], [int, float],)])
assert 0 <= parameters["bandwidth"], ParameterError(
"Incorrect parameter 'bandwidth'.\nThe bandwidth must be positive."
)
model_parameters["bandwidth"] = parameters["bandwidth"]
elif "bandwidth" not in self.parameters:
model_parameters["bandwidth"] = default_parameters["bandwidth"]
else:
model_parameters["bandwidth"] = self.parameters["bandwidth"]
if "kernel" in parameters:
check_types(
[
(
"kernel",
parameters["kernel"],
["gaussian", "logistic", "sigmoid", "silverman"],
)
]
)
assert parameters["kernel"] in [
"gaussian",
"logistic",
"sigmoid",
"silverman",
], ParameterError(
"Incorrect parameter 'kernel'.\nThe parameter 'kernel' must be in [gaussian|logistic|sigmoid|silverman], found '{}'.".format(
kernel
)
)
model_parameters["kernel"] = parameters["kernel"]
elif "kernel" not in self.parameters:
model_parameters["kernel"] = default_parameters["kernel"]
else:
model_parameters["kernel"] = self.parameters["kernel"]
if "max_leaf_nodes" in parameters:
check_types(
[
(
"max_leaf_nodes",
parameters["max_leaf_nodes"],
[int, float],
False,
)
]
)
assert 1 <= parameters["max_leaf_nodes"] <= 1e9, ParameterError(
"Incorrect parameter 'max_leaf_nodes'.\nThe maximum number of leaf nodes must be between 1 and 1e9, inclusive."
)
model_parameters["max_leaf_nodes"] = parameters["max_leaf_nodes"]
elif "max_leaf_nodes" not in self.parameters:
model_parameters["max_leaf_nodes"] = default_parameters[
"max_leaf_nodes"
]
else:
model_parameters["max_leaf_nodes"] = self.parameters["max_leaf_nodes"]
if "max_depth" in parameters:
check_types([("max_depth", parameters["max_depth"], [int],)])
assert 1 <= parameters["max_depth"] <= 100, ParameterError(
"Incorrect parameter 'max_depth'.\nThe maximum depth for growing each tree must be between 1 and 100, inclusive."
)
model_parameters["max_depth"] = parameters["max_depth"]
elif "max_depth" not in self.parameters:
model_parameters["max_depth"] = default_parameters["max_depth"]
else:
model_parameters["max_depth"] = self.parameters["max_depth"]
if "min_samples_leaf" in parameters:
check_types(
[
(
"min_samples_leaf",
parameters["min_samples_leaf"],
[int, float],
False,
)
]
)
assert 1 <= parameters["min_samples_leaf"] <= 1e6, ParameterError(
"Incorrect parameter 'min_samples_leaf'.\nThe minimum number of samples each branch must have after splitting a node must be between 1 and 1e6, inclusive."
)
model_parameters["min_samples_leaf"] = parameters["min_samples_leaf"]
elif "min_samples_leaf" not in self.parameters:
model_parameters["min_samples_leaf"] = default_parameters[
"min_samples_leaf"
]
else:
model_parameters["min_samples_leaf"] = self.parameters[
"min_samples_leaf"
]
if "nbins" in parameters:
check_types([("nbins", parameters["nbins"], [int, float],)])
assert 2 <= parameters["nbins"], ParameterError(
"Incorrect parameter 'nbins'.\nThe number of bins to use for continuous features must be greater than 2."
)
model_parameters["nbins"] = parameters["nbins"]
elif "nbins" not in self.parameters:
model_parameters["nbins"] = default_parameters["nbins"]
else:
model_parameters["nbins"] = self.parameters["nbins"]
if "p" in parameters:
check_types([("p", parameters["p"], [int, float],)])
assert 0 < parameters["p"], ParameterError(
"Incorrect parameter 'p'.\nThe p of the p-distance must be strictly positive."
)
model_parameters["p"] = parameters["p"]
elif "p" not in self.parameters:
model_parameters["p"] = default_parameters["p"]
else:
model_parameters["p"] = self.parameters["p"]
if "xlim" in parameters:
check_types([("xlim", parameters["xlim"], [list],)])
model_parameters["xlim"] = parameters["xlim"]
elif "xlim" not in self.parameters:
model_parameters["xlim"] = default_parameters["xlim"]
else:
model_parameters["xlim"] = self.parameters["xlim"]
elif self.type in ("RandomForestClassifier", "RandomForestRegressor"):
if "n_estimators" in parameters:
check_types([("n_estimators", parameters["n_estimators"], [int],)])
assert 0 <= parameters["n_estimators"] <= 1000, ParameterError(
"Incorrect parameter 'n_estimators'.\nThe number of trees must be lesser than 1000."
)
model_parameters["n_estimators"] = parameters["n_estimators"]
elif "n_estimators" not in self.parameters:
model_parameters["n_estimators"] = default_parameters["n_estimators"]
else:
model_parameters["n_estimators"] = self.parameters["n_estimators"]
if "max_features" in parameters:
check_types(
[
(
"max_features",
parameters["max_features"],
[int, float, str],
False,
)
]
)
if isinstance(parameters["max_features"], str):
assert str(parameters["max_features"]).lower() in [
"max",
"auto",
], ParameterError(
"Incorrect parameter 'init'.\nThe maximum number of features to test must be in (max | auto) or an integer, found '{}'.".format(
parameters["max_features"]
)
)
model_parameters["max_features"] = parameters["max_features"]
elif "max_features" not in self.parameters:
model_parameters["max_features"] = default_parameters["max_features"]
else:
model_parameters["max_features"] = self.parameters["max_features"]
if "max_leaf_nodes" in parameters:
check_types(
[
(
"max_leaf_nodes",
parameters["max_leaf_nodes"],
[int, float],
False,
)
]
)
assert 1 <= parameters["max_leaf_nodes"] <= 1e9, ParameterError(
"Incorrect parameter 'max_leaf_nodes'.\nThe maximum number of leaf nodes must be between 1 and 1e9, inclusive."
)
model_parameters["max_leaf_nodes"] = parameters["max_leaf_nodes"]
elif "max_leaf_nodes" not in self.parameters:
model_parameters["max_leaf_nodes"] = default_parameters[
"max_leaf_nodes"
]
else:
model_parameters["max_leaf_nodes"] = self.parameters["max_leaf_nodes"]
if "sample" in parameters:
check_types([("sample", parameters["sample"], [int, float],)])
assert 0 <= parameters["sample"] <= 1, ParameterError(
"Incorrect parameter 'sample'.\nThe portion of the input data set that is randomly picked for training each tree must be between 0.0 and 1.0, inclusive."
)
model_parameters["sample"] = parameters["sample"]
elif "sample" not in self.parameters:
model_parameters["sample"] = default_parameters["sample"]
else:
model_parameters["sample"] = self.parameters["sample"]
if "max_depth" in parameters:
check_types([("max_depth", parameters["max_depth"], [int],)])
assert 1 <= parameters["max_depth"] <= 100, ParameterError(
"Incorrect parameter 'max_depth'.\nThe maximum depth for growing each tree must be between 1 and 100, inclusive."
)
model_parameters["max_depth"] = parameters["max_depth"]
elif "max_depth" not in self.parameters:
model_parameters["max_depth"] = default_parameters["max_depth"]
else:
model_parameters["max_depth"] = self.parameters["max_depth"]
if "min_samples_leaf" in parameters:
check_types(
[
(
"min_samples_leaf",
parameters["min_samples_leaf"],
[int, float],
False,
)
]
)
assert 1 <= parameters["min_samples_leaf"] <= 1e6, ParameterError(
"Incorrect parameter 'min_samples_leaf'.\nThe minimum number of samples each branch must have after splitting a node must be between 1 and 1e6, inclusive."
)
model_parameters["min_samples_leaf"] = parameters["min_samples_leaf"]
elif "min_samples_leaf" not in self.parameters:
model_parameters["min_samples_leaf"] = default_parameters[
"min_samples_leaf"
]
else:
model_parameters["min_samples_leaf"] = self.parameters[
"min_samples_leaf"
]
if "min_info_gain" in parameters:
check_types(
[
(
"min_info_gain",
parameters["min_info_gain"],
[int, float],
False,
)
]
)
assert 0 <= parameters["min_info_gain"] <= 1, ParameterError(
"Incorrect parameter 'min_info_gain'.\nThe minimum threshold for including a split must be between 0.0 and 1.0, inclusive."
)
model_parameters["min_info_gain"] = parameters["min_info_gain"]
elif "min_info_gain" not in self.parameters:
model_parameters["min_info_gain"] = default_parameters["min_info_gain"]
else:
model_parameters["min_info_gain"] = self.parameters["min_info_gain"]
if "nbins" in parameters:
check_types([("nbins", parameters["nbins"], [int, float],)])
assert 2 <= parameters["nbins"] <= 1000, ParameterError(
"Incorrect parameter 'nbins'.\nThe number of bins to use for continuous features must be between 2 and 1000, inclusive."
)
model_parameters["nbins"] = parameters["nbins"]
elif "nbins" not in self.parameters:
model_parameters["nbins"] = default_parameters["nbins"]
else:
model_parameters["nbins"] = self.parameters["nbins"]
elif self.type in ("NaiveBayes",):
if "alpha" in parameters:
check_types([("alpha", parameters["alpha"], [int, float],)])
assert 0 <= parameters["alpha"], ParameterError(
"Incorrect parameter 'alpha'.\nThe smoothing factor must be positive."
)
model_parameters["alpha"] = parameters["alpha"]
elif "alpha" not in self.parameters:
model_parameters["alpha"] = default_parameters["alpha"]
else:
model_parameters["alpha"] = self.parameters["alpha"]
if "nbtype" in parameters:
check_types([("nbtype", parameters["nbtype"], [str],)])
if isinstance(parameters["nbtype"], str):
assert str(parameters["nbtype"]).lower() in [
"bernoulli",
"categorical",
"multinomial",
"gaussian",
"auto",
], ParameterError(
"Incorrect parameter 'nbtype'.\nThe Naive Bayes type must be in (bernoulli | categorical | multinomial | gaussian | auto), found '{}'.".format(
parameters["init"]
)
)
model_parameters["nbtype"] = parameters["nbtype"]
elif "nbtype" not in self.parameters:
model_parameters["nbtype"] = default_parameters["nbtype"]
else:
model_parameters["nbtype"] = self.parameters["nbtype"]
elif self.type in ("KMeans", "BisectingKMeans"):
if "max_iter" in parameters:
check_types([("max_iter", parameters["max_iter"], [int, float],)])
assert 0 <= parameters["max_iter"], ParameterError(
"Incorrect parameter 'max_iter'.\nThe maximum number of iterations must be positive."
)
model_parameters["max_iter"] = parameters["max_iter"]
elif "max_iter" not in self.parameters:
model_parameters["max_iter"] = default_parameters["max_iter"]
else:
model_parameters["max_iter"] = self.parameters["max_iter"]
if "tol" in parameters:
check_types([("tol", parameters["tol"], [int, float],)])
assert 0 <= parameters["tol"], ParameterError(
"Incorrect parameter 'tol'.\nThe tolerance parameter value must be positive."
)
model_parameters["tol"] = parameters["tol"]
elif "tol" not in self.parameters:
model_parameters["tol"] = default_parameters["tol"]
else:
model_parameters["tol"] = self.parameters["tol"]
if "n_cluster" in parameters:
check_types([("n_cluster", parameters["n_cluster"], [int, float],)])
assert 1 <= parameters["n_cluster"] <= 10000, ParameterError(
"Incorrect parameter 'n_cluster'.\nThe number of clusters must be between 1 and 10000, inclusive."
)
model_parameters["n_cluster"] = parameters["n_cluster"]
elif "n_cluster" not in self.parameters:
model_parameters["n_cluster"] = default_parameters["n_cluster"]
else:
model_parameters["n_cluster"] = self.parameters["n_cluster"]
if "init" in parameters:
check_types([("init", parameters["init"], [str, list],)])
if isinstance(parameters["init"], str):
if self.type in ("BisectingKMeans",):
assert str(parameters["init"]).lower() in [
"random",
"kmeanspp",
"pseudo",
], ParameterError(
"Incorrect parameter 'init'.\nThe initialization method of the clusters must be in (random | kmeanspp | pseudo) or a list of the initial clusters position, found '{}'.".format(
parameters["init"]
)
)
else:
assert str(parameters["init"]).lower() in [
"random",
"kmeanspp",
], ParameterError(
"Incorrect parameter 'init'.\nThe initialization method of the clusters must be in (random | kmeanspp) or a list of the initial clusters position, found '{}'.".format(
parameters["init"]
)
)
model_parameters["init"] = parameters["init"]
elif "init" not in self.parameters:
model_parameters["init"] = default_parameters["init"]
else:
model_parameters["init"] = self.parameters["init"]
if "bisection_iterations" in parameters:
check_types(
[
(
"bisection_iterations",
parameters["bisection_iterations"],
[int, float],
False,
)
]
)
assert (
1 <= parameters["bisection_iterations"] <= 1000000
), ParameterError(
"Incorrect parameter 'bisection_iterations'.\nThe number of iterations the bisecting k-means algorithm performs for each bisection step must be between 1 and 1e6, inclusive."
)
model_parameters["bisection_iterations"] = parameters[
"bisection_iterations"
]
elif (
self.type == "BisectingKMeans"
and "bisection_iterations" not in self.parameters
):
model_parameters["bisection_iterations"] = default_parameters[
"bisection_iterations"
]
elif self.type == "BisectingKMeans":
model_parameters["bisection_iterationss"] = self.parameters[
"bisection_iterations"
]
if "split_method" in parameters:
check_types([("split_method", parameters["split_method"], [str],)])
assert str(parameters["split_method"]).lower() in [
"size",
"sum_squares",
], ParameterError(
"Incorrect parameter 'split_method'.\nThe split method must be in (size | sum_squares), found '{}'.".format(
parameters["split_method"]
)
)
model_parameters["split_method"] = parameters["split_method"]
elif (
self.type == "BisectingKMeans" and "split_method" not in self.parameters
):
model_parameters["split_method"] = default_parameters["split_method"]
elif self.type == "BisectingKMeans":
model_parameters["split_method"] = self.parameters["split_method"]
if "min_divisible_cluster_size" in parameters:
check_types(
[
(
"min_divisible_cluster_size",
parameters["min_divisible_cluster_size"],
[int, float],
False,
)
]
)
assert 2 <= parameters["min_divisible_cluster_size"], ParameterError(
"Incorrect parameter 'min_divisible_cluster_size'.\nThe minimum number of points of a divisible cluster must be greater than or equal to 2."
)
model_parameters["min_divisible_cluster_size"] = parameters[
"min_divisible_cluster_size"
]
elif (
self.type == "BisectingKMeans"
and "min_divisible_cluster_size" not in self.parameters
):
model_parameters["min_divisible_cluster_size"] = default_parameters[
"min_divisible_cluster_size"
]
elif self.type == "BisectingKMeans":
model_parameters["min_divisible_cluster_size"] = self.parameters[
"min_divisible_cluster_size"
]
if "distance_method" in parameters:
check_types(
[("distance_method", parameters["distance_method"], [str],)]
)
assert str(parameters["distance_method"]).lower() in [
"euclidean"
], ParameterError(
"Incorrect parameter 'distance_method'.\nThe distance method must be in (euclidean), found '{}'.".format(
parameters["distance_method"]
)
)
model_parameters["distance_method"] = parameters["distance_method"]
elif (
self.type == "BisectingKMeans"
and "distance_method" not in self.parameters
):
model_parameters["distance_method"] = default_parameters[
"distance_method"
]
elif self.type == "BisectingKMeans":
model_parameters["distance_method"] = self.parameters["distance_method"]
elif self.type in ("LinearSVC", "LinearSVR"):
if "tol" in parameters:
check_types([("tol", parameters["tol"], [int, float],)])
assert 0 <= parameters["tol"], ParameterError(
"Incorrect parameter 'tol'.\nThe tolerance parameter value must be positive."
)
model_parameters["tol"] = parameters["tol"]
elif "tol" not in self.parameters:
model_parameters["tol"] = default_parameters["tol"]
else:
model_parameters["tol"] = self.parameters["tol"]
if "C" in parameters:
check_types([("C", parameters["C"], [int, float],)])
assert 0 <= parameters["C"], ParameterError(
"Incorrect parameter 'C'.\nThe weight for misclassification cost must be positive."
)
model_parameters["C"] = parameters["C"]
elif "C" not in self.parameters:
model_parameters["C"] = default_parameters["C"]
else:
model_parameters["C"] = self.parameters["C"]
if "max_iter" in parameters:
check_types([("max_iter", parameters["max_iter"], [int, float],)])
assert 0 <= parameters["max_iter"], ParameterError(
"Incorrect parameter 'max_iter'.\nThe maximum number of iterations must be positive."
)
model_parameters["max_iter"] = parameters["max_iter"]
elif "max_iter" not in self.parameters:
model_parameters["max_iter"] = default_parameters["max_iter"]
else:
model_parameters["max_iter"] = self.parameters["max_iter"]
if "fit_intercept" in parameters:
check_types([("fit_intercept", parameters["fit_intercept"], [bool],)])
model_parameters["fit_intercept"] = parameters["fit_intercept"]
elif "fit_intercept" not in self.parameters:
model_parameters["fit_intercept"] = default_parameters["fit_intercept"]
else:
model_parameters["fit_intercept"] = self.parameters["fit_intercept"]
if "intercept_scaling" in parameters:
check_types(
[
(
"intercept_scaling",
parameters["intercept_scaling"],
[float],
False,
)
]
)
assert 0 <= parameters["intercept_scaling"], ParameterError(
"Incorrect parameter 'intercept_scaling'.\nThe Intercept Scaling parameter value must be positive."
)
model_parameters["intercept_scaling"] = parameters["intercept_scaling"]
elif "intercept_scaling" not in self.parameters:
model_parameters["intercept_scaling"] = default_parameters[
"intercept_scaling"
]
else:
model_parameters["intercept_scaling"] = self.parameters[
"intercept_scaling"
]
if "intercept_mode" in parameters:
check_types([("intercept_mode", parameters["intercept_mode"], [str],)])
assert str(parameters["intercept_mode"]).lower() in [
"regularized",
"unregularized",
], ParameterError(
"Incorrect parameter 'intercept_mode'.\nThe Intercept Mode must be in (size | sum_squares), found '{}'.".format(
parameters["intercept_mode"]
)
)
model_parameters["intercept_mode"] = parameters["intercept_mode"]
elif "intercept_mode" not in self.parameters:
model_parameters["intercept_mode"] = default_parameters[
"intercept_mode"
]
else:
model_parameters["intercept_mode"] = self.parameters["intercept_mode"]
if ("class_weight" in parameters) and self.type in ("LinearSVC"):
check_types(
[("class_weight", parameters["class_weight"], [list, tuple],)]
)
model_parameters["class_weight"] = parameters["class_weight"]
elif self.type in ("LinearSVC",) and "class_weight" not in self.parameters:
model_parameters["class_weight"] = default_parameters["class_weight"]
elif self.type in ("LinearSVC",):
model_parameters["class_weight"] = self.parameters["class_weight"]
if ("acceptable_error_margin" in parameters) and self.type in ("LinearSVR"):
check_types(
[
(
"acceptable_error_margin",
parameters["acceptable_error_margin"],
[int, float],
False,
)
]
)
assert 0 <= parameters["acceptable_error_margin"], ParameterError(
"Incorrect parameter 'acceptable_error_margin'.\nThe Acceptable Error Margin parameter value must be positive."
)
model_parameters["acceptable_error_margin"] = parameters[
"acceptable_error_margin"
]
elif (
self.type in ("LinearSVR",)
and "acceptable_error_margin" not in self.parameters
):
model_parameters["acceptable_error_margin"] = default_parameters[
"acceptable_error_margin"
]
elif self.type in ("LinearSVR",):
model_parameters["acceptable_error_margin"] = self.parameters[
"acceptable_error_margin"
]
elif self.type in ("PCA", "SVD"):
if ("scale" in parameters) and self.type in ("PCA"):
check_types([("scale", parameters["scale"], [bool],)])
model_parameters["scale"] = parameters["scale"]
elif self.type in ("PCA",) and "scale" not in self.parameters:
model_parameters["scale"] = default_parameters["scale"]
elif self.type in ("PCA",):
model_parameters["scale"] = self.parameters["scale"]
if "method" in parameters:
check_types([("method", parameters["method"], [str],)])
assert str(parameters["method"]).lower() in ["lapack"], ParameterError(
"Incorrect parameter 'method'.\nThe decomposition method must be in (lapack), found '{}'.".format(
parameters["method"]
)
)
model_parameters["method"] = parameters["method"]
elif "method" not in self.parameters:
model_parameters["method"] = default_parameters["method"]
else:
model_parameters["method"] = self.parameters["method"]
if "n_components" in parameters:
check_types(
[("n_components", parameters["n_components"], [int, float],)]
)
assert 0 <= parameters["n_components"], ParameterError(
"Incorrect parameter 'n_components'.\nThe number of components must be positive. If it is equal to 0, all the components will be considered."
)
model_parameters["n_components"] = parameters["n_components"]
elif "n_components" not in self.parameters:
model_parameters["n_components"] = default_parameters["n_components"]
else:
model_parameters["n_components"] = self.parameters["n_components"]
elif self.type in ("OneHotEncoder",):
if "extra_levels" in parameters:
check_types([("extra_levels", parameters["extra_levels"], [dict],)])
model_parameters["extra_levels"] = parameters["extra_levels"]
elif "extra_levels" not in self.parameters:
model_parameters["extra_levels"] = default_parameters["extra_levels"]
else:
model_parameters["extra_levels"] = self.parameters["extra_levels"]
if "drop_first" in parameters:
check_types([("drop_first", parameters["drop_first"], [bool],)])
model_parameters["drop_first"] = parameters["drop_first"]
elif "drop_first" not in self.parameters:
model_parameters["drop_first"] = default_parameters["drop_first"]
else:
model_parameters["drop_first"] = self.parameters["drop_first"]
if "ignore_null" in parameters:
check_types([("ignore_null", parameters["ignore_null"], [bool],)])
model_parameters["ignore_null"] = parameters["ignore_null"]
elif "ignore_null" not in self.parameters:
model_parameters["ignore_null"] = default_parameters["ignore_null"]
else:
model_parameters["ignore_null"] = self.parameters["ignore_null"]
if "separator" in parameters:
check_types([("separator", parameters["separator"], [str],)])
model_parameters["separator"] = parameters["separator"]
elif "separator" not in self.parameters:
model_parameters["separator"] = default_parameters["separator"]
else:
model_parameters["separator"] = self.parameters["separator"]
if "null_column_name" in parameters:
check_types(
[("null_column_name", parameters["null_column_name"], [str],)]
)
model_parameters["null_column_name"] = parameters["null_column_name"]
elif "null_column_name" not in self.parameters:
model_parameters["null_column_name"] = default_parameters[
"null_column_name"
]
else:
model_parameters["null_column_name"] = self.parameters[
"null_column_name"
]
if "column_naming" in parameters:
check_types([("column_naming", parameters["column_naming"], [str],)])
assert str(parameters["column_naming"]).lower() in [
"indices",
"values",
"values_relaxed",
], ParameterError(
"Incorrect parameter 'column_naming'.\nThe column_naming method must be in (indices | values | values_relaxed), found '{}'.".format(
parameters["column_naming"]
)
)
model_parameters["column_naming"] = parameters["column_naming"]
elif "column_naming" not in self.parameters:
model_parameters["column_naming"] = default_parameters["column_naming"]
else:
model_parameters["column_naming"] = self.parameters["column_naming"]
elif self.type in ("Normalizer",):
if "method" in parameters:
check_types([("method", parameters["method"], [str],)])
assert str(parameters["method"]).lower() in [
"zscore",
"robust_zscore",
"minmax",
], ParameterError(
"Incorrect parameter 'method'.\nThe normalization method must be in (zscore | robust_zscore | minmax), found '{}'.".format(
parameters["method"]
)
)
model_parameters["method"] = parameters["method"]
elif "method" not in self.parameters:
model_parameters["method"] = default_parameters["method"]
else:
model_parameters["method"] = self.parameters["method"]
elif self.type in ("DBSCAN",):
if "eps" in parameters:
check_types([("eps", parameters["eps"], [int, float],)])
assert 0 < parameters["eps"], ParameterError(
"Incorrect parameter 'eps'.\nThe radius of a neighborhood must be strictly positive."
)
model_parameters["eps"] = parameters["eps"]
elif "eps" not in self.parameters:
model_parameters["eps"] = default_parameters["eps"]
else:
model_parameters["eps"] = self.parameters["eps"]
if "p" in parameters:
check_types([("p", parameters["p"], [int, float],)])
assert 0 < parameters["p"], ParameterError(
"Incorrect parameter 'p'.\nThe p of the p-distance must be strictly positive."
)
model_parameters["p"] = parameters["p"]
elif "p" not in self.parameters:
model_parameters["p"] = default_parameters["p"]
else:
model_parameters["p"] = self.parameters["p"]
if "min_samples" in parameters:
check_types([("min_samples", parameters["min_samples"], [int, float],)])
assert 0 < parameters["min_samples"], ParameterError(
"Incorrect parameter 'min_samples'.\nThe minimum number of points required to form a dense region must be strictly positive."
)
model_parameters["min_samples"] = parameters["min_samples"]
elif "min_samples" not in self.parameters:
model_parameters["min_samples"] = default_parameters["min_samples"]
else:
model_parameters["min_samples"] = self.parameters["min_samples"]
elif self.type in (
"NearestCentroid",
"KNeighborsClassifier",
"KNeighborsRegressor",
"LocalOutlierFactor",
):
if "p" in parameters:
check_types([("p", parameters["p"], [int, float],)])
assert 0 < parameters["p"], ParameterError(
"Incorrect parameter 'p'.\nThe p of the p-distance must be strictly positive."
)
model_parameters["p"] = parameters["p"]
elif "p" not in self.parameters:
model_parameters["p"] = default_parameters["p"]
else:
model_parameters["p"] = self.parameters["p"]
if ("n_neighbors" in parameters) and (self.type != "NearestCentroid"):
check_types([("n_neighbors", parameters["n_neighbors"], [int, float],)])
assert 0 < parameters["n_neighbors"], ParameterError(
"Incorrect parameter 'n_neighbors'.\nThe number of neighbors must be strictly positive."
)
model_parameters["n_neighbors"] = parameters["n_neighbors"]
elif (
self.type != "NearestCentroid" and "n_neighbors" not in self.parameters
):
model_parameters["n_neighbors"] = default_parameters["n_neighbors"]
elif self.type != "NearestCentroid":
model_parameters["n_neighbors"] = self.parameters["n_neighbors"]
elif self.type in ("CountVectorizer",):
if "max_df" in parameters:
check_types([("max_df", parameters["max_df"], [int, float],)])
assert 0 <= parameters["max_df"] <= 1, ParameterError(
"Incorrect parameter 'max_df'.\nIt must be between 0 and 1, inclusive."
)
model_parameters["max_df"] = parameters["max_df"]
elif "max_df" not in self.parameters:
model_parameters["max_df"] = default_parameters["max_df"]
else:
model_parameters["max_df"] = self.parameters["max_df"]
if "min_df" in parameters:
check_types([("min_df", parameters["min_df"], [int, float],)])
assert 0 <= parameters["min_df"] <= 1, ParameterError(
"Incorrect parameter 'min_df'.\nIt must be between 0 and 1, inclusive."
)
model_parameters["min_df"] = parameters["min_df"]
elif "min_df" not in self.parameters:
model_parameters["min_df"] = default_parameters["min_df"]
else:
model_parameters["min_df"] = self.parameters["min_df"]
if "lowercase" in parameters:
check_types([("lowercase", parameters["lowercase"], [bool],)])
model_parameters["lowercase"] = parameters["lowercase"]
elif "lowercase" not in self.parameters:
model_parameters["lowercase"] = default_parameters["lowercase"]
else:
model_parameters["lowercase"] = self.parameters["lowercase"]
if "ignore_special" in parameters:
check_types([("ignore_special", parameters["ignore_special"], [bool],)])
model_parameters["ignore_special"] = parameters["ignore_special"]
elif "ignore_special" not in self.parameters:
model_parameters["ignore_special"] = default_parameters[
"ignore_special"
]
else:
model_parameters["ignore_special"] = self.parameters["ignore_special"]
if "max_text_size" in parameters:
check_types(
[
(
"max_text_size",
parameters["max_text_size"],
[int, float],
False,
)
]
)
assert 0 < parameters["max_text_size"], ParameterError(
"Incorrect parameter 'max_text_size'.\nThe maximum text size must be positive."
)
model_parameters["max_text_size"] = parameters["max_text_size"]
elif "max_text_size" not in self.parameters:
model_parameters["max_text_size"] = default_parameters["max_text_size"]
else:
model_parameters["max_text_size"] = self.parameters["max_text_size"]
if "max_features" in parameters:
check_types(
[("max_features", parameters["max_features"], [int, float],)]
)
model_parameters["max_features"] = parameters["max_features"]
elif "max_features" not in self.parameters:
model_parameters["max_features"] = default_parameters["max_features"]
else:
model_parameters["max_features"] = self.parameters["max_features"]
from verticapy.learn.linear_model import Lasso, Ridge, LinearRegression
from verticapy.learn.tree import (
DecisionTreeClassifier,
DecisionTreeRegressor,
DummyTreeClassifier,
DummyTreeRegressor,
)
if isinstance(self, Lasso):
model_parameters["penalty"] = "l1"
if "l1_ratio" in model_parameters:
del model_parameters["l1_ratio"]
elif isinstance(self, Ridge):
model_parameters["penalty"] = "l2"
if "l1_ratio" in model_parameters:
del model_parameters["l1_ratio"]
elif isinstance(self, LinearRegression):
model_parameters["penalty"] = "none"
if "l1_ratio" in model_parameters:
del model_parameters["l1_ratio"]
if "C" in model_parameters:
del model_parameters["C"]
elif isinstance(
self,
(
DecisionTreeClassifier,
DecisionTreeRegressor,
DummyTreeClassifier,
DummyTreeRegressor,
),
):
model_parameters["n_estimators"] = 1
model_parameters["sample"] = 1.0
if isinstance(self, (DummyTreeClassifier, DummyTreeRegressor)):
model_parameters["max_features"] = "max"
model_parameters["max_leaf_nodes"] = 1e9
model_parameters["max_depth"] = 100
model_parameters["min_samples_leaf"] = 1
model_parameters["min_info_gain"] = 0.0
self.parameters = model_parameters
# ---#
def shapExplainer(self):
"""
---------------------------------------------------------------------------
Creates the Model shapExplainer. Only linear models are supported.
Returns
-------
shap.Explainer
the shap Explainer.
"""
try:
import shap
except:
raise ImportError(
"The shap module seems to not be installed in your environment.\nTo be able to use this method, you'll have to install it.\n[Tips] Run: 'pip3 install shap' in your terminal to install the module."
)
if self.type in (
"LinearRegression",
"LogisticRegression",
"LinearSVC",
"LinearSVR",
):
vdf = vdf_from_relation(self.input_relation, cursor=self.cursor)
cov_matrix = vdf.cov(self.X, show=False)
if len(self.X) == 1:
cov_matrix = np.array([[1]])
elif len(self.X) == 2:
cov_matrix = np.array([[1, cov_matrix], [cov_matrix, 1]])
else:
cov_matrix = cov_matrix.to_numpy()
data = (vdf.avg(self.X).to_numpy(), cov_matrix)
model = self.to_sklearn()
with warnings.catch_warnings(record=True) as w:
return shap.LinearExplainer(
model, data, feature_perturbation="correlation_dependent"
)
else:
raise FunctionError(
"The method 'to_shapExplainer' is not available for model type '{}'.".format(
self.type
)
)
# ---#
def to_sklearn(self):
"""
---------------------------------------------------------------------------
Converts the Vertica Model to sklearn model.
Returns
-------
object
sklearn model.
"""
import verticapy.learn.linear_model as lm
import verticapy.learn.svm as svm
import verticapy.learn.naive_bayes as vnb
import verticapy.learn.cluster as vcl
import verticapy.learn.ensemble as vens
import verticapy.learn.neighbors as vng
import verticapy.learn.preprocessing as vpp
import verticapy.learn.decomposition as vdcp
try:
import sklearn
except:
raise ImportError(
"The scikit-learn module seems to not be installed in your environment.\nTo be able to use this method, you'll have to install it.\n[Tips] Run: 'pip3 install scikit-learn' in your terminal to install the module."
)
params = self.get_params()
if self.type in (
"LinearRegression",
"LogisticRegression",
"LinearSVC",
"LinearSVR",
):
import sklearn.linear_model as sklm
import sklearn.svm as sksvm
if isinstance(self, lm.LinearRegression):
model = sklm.LinearRegression()
elif isinstance(self, lm.ElasticNet):
model = sklm.ElasticNet(
alpha=params["C"],
l1_ratio=params["l1_ratio"],
max_iter=params["max_iter"],
tol=params["tol"],
)
elif isinstance(self, lm.Lasso):
model = sklm.Lasso(max_iter=params["max_iter"], tol=params["tol"],)
elif isinstance(self, lm.Ridge):
model = sklm.Ridge(max_iter=params["max_iter"], tol=params["tol"],)
elif isinstance(self, lm.LogisticRegression):
if "C" not in params:
params["C"] = 1.0
if "l1_ratio" not in params:
params["l1_ratio"] = None
model = sklm.LogisticRegression(
penalty=params["penalty"].lower(),
C=float(1 / params["C"]),
l1_ratio=params["l1_ratio"],
max_iter=params["max_iter"],
tol=params["tol"],
)
elif isinstance(self, svm.LinearSVC):
if params["intercept_mode"] == "regularized":
params["penalty"] = "l2"
else:
params["penalty"] = "l1"
model = sksvm.LinearSVC(
penalty=params["penalty"],
C=params["C"],
fit_intercept=params["fit_intercept"],
intercept_scaling=params["intercept_scaling"],
max_iter=params["max_iter"],
tol=params["tol"],
)
elif isinstance(self, svm.LinearSVR):
if params["intercept_mode"] == "regularized":
params["loss"] = "epsilon_insensitive"
else:
params["loss"] = "squared_epsilon_insensitive"
model = sksvm.LinearSVR(
loss=params["loss"],
C=params["C"],
fit_intercept=params["fit_intercept"],
intercept_scaling=params["intercept_scaling"],
max_iter=params["max_iter"],
tol=params["tol"],
)
if isinstance(self, (lm.LogisticRegression, svm.LinearSVC)):
model.classes_ = np.array([0, 1])
model.coef_ = np.array([self.coef_["coefficient"][1:]])
model.intercept_ = self.coef_["coefficient"][0]
try:
model.n_iter_ = self.get_attr("iteration_count")["iteration_count"][0]
except:
model.n_iter_ = 1
elif self.type in ("Normalizer", "OneHotEncoder"):
import sklearn.preprocessing as skpp
if isinstance(self, (vpp.Normalizer,)):
attr = self.get_attr("details")
if "avg" in attr.values:
model = skpp.StandardScaler()
model.mean_ = np.array(attr["avg"])
model.scale_ = np.array(attr["std_dev"])
model.var_ = model.scale_ ** 2
model.n_features_in_ = len(self.X)
model.n_samples_seen_ = np.array(
vdf_from_relation(
self.input_relation, cursor=self.cursor
).count(columns=self.X)["count"]
)
elif "median" in attr.values:
model = skpp.RobustScaler()
model.center_ = np.array(attr["median"])
model.scale_ = np.array(attr["mad"])
model.n_features_in_ = len(self.X)
elif "min" in attr.values:
model = skpp.MinMaxScaler()
model.data_min_ = np.array(attr["min"])
model.data_max_ = np.array(attr["max"])
model.data_range_ = np.array(attr["max"]) - np.array(attr["min"])
model.scale_ = 1 / model.data_range_
model.min_ = 0 - model.data_min_ * model.scale_
model.n_features_in_ = len(self.X)
self.cursor.execute(
"SELECT COUNT(*) FROM {} WHERE {}".format(
self.input_relation,
" AND ".join(
["{} IS NOT NULL".format(elem) for elem in self.X]
),
)
)
model.n_samples_seen_ = self.cursor.fetchone()[0]
elif isinstance(self, (vpp.OneHotEncoder,)):
drop = None
model = skpp.OneHotEncoder()
model.drop_idx_ = None
if self.parameters["drop_first"]:
model.drop_idx_ = np.array([0 for elem in range(len(self.X))])
params = self.param_
vdf = vdf_from_relation(self.input_relation, cursor=self.cursor)
categories = []
for column in self.X:
idx = []
for i in range(len(params["category_name"])):
if str_column(params["category_name"][i]) == str_column(
column
) and (
not (self.parameters["ignore_null"])
or params["category_level"][i] != None
):
idx += [i]
cat_tmp = []
for j, i in enumerate(idx):
elem = params["category_level"][i]
if vdf[column].dtype() == "int":
try:
elem = int(elem)
except:
pass
cat_tmp += [elem]
categories += [np.array(cat_tmp)]
model.categories_ = categories
elif self.type in ("PCA", "SVD"):
import sklearn.decomposition as skdcp
if isinstance(self, (vdcp.PCA,)):
model = skdcp.PCA(n_components=params["n_components"])
model.components_ = []
all_pc = self.get_attr("principal_components")
for idx, elem in enumerate(all_pc.values):
if idx > 0:
model.components_ += [np.array(all_pc.values[elem])]
model.components_ = np.array(model.components_)
model.explained_variance_ratio_ = np.array(
self.get_attr("singular_values")["explained_variance"]
)
model.explained_variance_ = np.array(
self.get_attr("singular_values")["explained_variance"]
)
model.singular_values_ = np.array(
self.get_attr("singular_values")["value"]
)
model.mean_ = np.array(self.get_attr("columns")["mean"])
model.n_components_ = params["n_components"]
model.n_features_ = len(self.X)
model.n_samples_ = self.get_attr("counters")["counter_value"][0]
model.noise_variance_ = 0.0
elif isinstance(self, (vdcp.SVD,)):
model = skdcp.TruncatedSVD(n_components=params["n_components"])
model.components_ = []
all_pc = self.get_attr("right_singular_vectors")
for idx, elem in enumerate(all_pc.values):
if idx > 0:
model.components_ += [np.array(all_pc.values[elem])]
model.components_ = | np.array(model.components_) | numpy.array |
#!/usr/bin/env python
from copy import copy
import rasterio
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
import pandas as pd
from rasterio.plot import show
import re
import pdb
import projections.pd_utils as pd_utils
from projections.lu.luh2 import LU
shape = (567, 1440)
bounds = (-180, -58, 180, 83.75)
palette = copy(plt.cm.viridis)
#palette.set_over('g', 1.0)
palette.set_under('r', 1.0)
palette.set_bad('k', 1.0)
palette2 = copy(plt.cm.viridis)
palette2.set_over('b', 1.0)
palette2.set_under('r', 1.0)
palette2.set_bad('k', 1.0)
def rcs(height, res, left, bottom, right, top):
er = 6378137.0
lats = np.linspace(top, bottom + res[1], height)
vec = ((np.sin( | np.radians(lats + res[1] / 2.0) | numpy.radians |
import pytest
import numpy as np
import os.path
import sofa
import scipy.io.wavfile as wavfile
from pyfar.samplings import SphericalVoronoi
from pyfar import Orientations
from pyfar import Coordinates
from pyfar import FrequencyData, TimeData
import pyfar.classes.filter as fo
import pyfar.signals
from pyfar.testing import stub_utils
@pytest.fixture
def sine_stub():
"""Sine signal stub.
To be used in cases, when a dependence on the Signal class is prohibited,
but a correct, fixed relation of the time signal and the spectrum is
needed.
Returns
-------
signal : Signal
Stub of sine signal
"""
frequency = 441
sampling_rate = 44100
n_samples = 10000
fft_norm = 'rms'
cshape = (1,)
time, freq, frequency = stub_utils.sine_func(
frequency, sampling_rate, n_samples, fft_norm, cshape)
signal = stub_utils.signal_stub(
time, freq, sampling_rate, fft_norm)
return signal
@pytest.fixture
def sine_stub_odd():
"""Sine signal stub, odd number of samples
To be used in cases, when a dependence on the Signal class is prohibited,
but a correct, fixed relation of the time signal and the spectrum is
needed.
Returns
-------
signal : Signal
Stub of sine signal
"""
frequency = 441
sampling_rate = 44100
n_samples = 9999
fft_norm = 'rms'
cshape = (1,)
time, freq, frequency = stub_utils.sine_func(
frequency, sampling_rate, n_samples, fft_norm, cshape)
signal = stub_utils.signal_stub(
time, freq, sampling_rate, fft_norm)
return signal
@pytest.fixture
def impulse_stub():
"""Delta impulse signal stub.
To be used in cases, when a dependence on the Signal class is prohibited,
but a correct, fixed relation of the time signal and the spectrum is
needed.
Returns
-------
signal : Signal
Stub of impulse signal
"""
delay = 0
sampling_rate = 44100
n_samples = 10000
fft_norm = 'none'
cshape = (1,)
time, freq = stub_utils.impulse_func(
delay, n_samples, fft_norm, cshape)
signal = stub_utils.signal_stub(
time, freq, sampling_rate, fft_norm)
return signal
@pytest.fixture
def noise_stub():
"""Gaussian white noise signal stub.
To be used in cases, when a dependence on the Signal class is prohibited,
but a correct, fixed relation of the time signal and the spectrum is
needed.
Returns
-------
signal : Signal
Stub of noise signal
"""
sigma = 1
n_samples = int(1e5)
cshape = (1,)
sampling_rate = 44100
fft_norm = 'rms'
time, freq = stub_utils.noise_func(sigma, n_samples, cshape)
signal = stub_utils.signal_stub(
time, freq, sampling_rate, fft_norm)
return signal
@pytest.fixture
def noise_stub_odd():
"""Gaussian white noise signal stub, odd number of samples.
To be used in cases, when a dependence on the Signal class is prohibited,
but a correct, fixed relation of the time signal and the spectrum is
needed.
Returns
-------
signal : Signal
Stub of noise signal
"""
sigma = 1
n_samples = int(1e5 - 1)
cshape = (1,)
sampling_rate = 44100
fft_norm = 'rms'
time, freq = stub_utils.noise_func(sigma, n_samples, cshape)
signal = stub_utils.signal_stub(
time, freq, sampling_rate, fft_norm)
return signal
@pytest.fixture
def sine():
"""Sine signal.
Returns
-------
signal : Signal
Sine signal
"""
frequency = 441
n_samples = 10000
sampling_rate = 44100
amplitude = 1
signal = pyfar.signals.sine(
frequency, n_samples, amplitude=amplitude,
sampling_rate=sampling_rate)
return signal
@pytest.fixture
def sine_short():
"""Short sine signal where the first frequency is > 20 Hz.
This is used for testing plot._line._lower_frequency_limit.
Returns
-------
signal : Signal
Sine signal
"""
frequency = 441
n_samples = 100
sampling_rate = 44100
amplitude = 1
signal = pyfar.signals.sine(
frequency, n_samples, amplitude=amplitude,
sampling_rate=sampling_rate)
return signal
@pytest.fixture
def impulse():
"""Delta impulse signal.
Returns
-------
signal : Signal
Impulse signal
"""
n_samples = 10000
delay = 0
amplitude = 1
sampling_rate = 44100
signal = pyfar.signals.impulse(
n_samples, delay=delay, amplitude=amplitude,
sampling_rate=sampling_rate)
return signal
@pytest.fixture
def impulse_group_delay():
"""Delayed delta impulse signal with analytical group delay.
Returns
-------
signal : Signal
Impulse signal
group_delay : ndarray
Group delay of impulse signal
"""
n_samples = 10000
delay = 0
amplitude = 1
sampling_rate = 44100
signal = pyfar.signals.impulse(
n_samples, delay=delay, amplitude=amplitude,
sampling_rate=sampling_rate)
group_delay = delay * np.ones_like(signal.freq, dtype=float)
return signal, group_delay
@pytest.fixture
def impulse_group_delay_two_channel():
"""Delayed 2 channel delta impulse signal with analytical group delay.
Returns
-------
signal : Signal
Impulse signal
group_delay : ndarray
Group delay of impulse signal
"""
n_samples = 10000
delay = | np.atleast_1d([1000, 2000]) | numpy.atleast_1d |
import numpy as np
import pytest
from scipy import sparse
from xugrid import connectivity
@pytest.fixture(scope="function")
def triangle_mesh():
fill_value = -1
# Two triangles
faces = np.array(
[
[0, 1, 2],
[1, 3, 2],
]
)
return faces, fill_value
@pytest.fixture(scope="function")
def mixed_mesh():
fill_value = -1
# Triangle, quadrangle
faces = np.array(
[
[0, 1, 2, fill_value],
[1, 3, 4, 2],
]
)
return faces, fill_value
def test_neighbors():
i = [0, 0, 0, 1, 1, 1]
j = [0, 1, 2, 1, 3, 2]
coo_content = (j, (i, j))
A = sparse.coo_matrix(coo_content).tocsr()
A = connectivity.AdjacencyMatrix(A.indices, A.indptr, A.nnz)
assert np.array_equal(connectivity.neighbors(A, 0), [0, 1, 2])
assert np.array_equal(connectivity.neighbors(A, 1), [1, 2, 3])
def test_to_ij(triangle_mesh, mixed_mesh):
faces, fill_value = triangle_mesh
actual_i, actual_j = connectivity._to_ij(faces, fill_value, invert=False)
expected_i = [0, 0, 0, 1, 1, 1]
expected_j = [0, 1, 2, 1, 3, 2]
assert np.array_equal(actual_i, expected_i)
assert np.array_equal(actual_j, expected_j)
# Inverted
actual_i, actual_j = connectivity._to_ij(faces, fill_value, invert=True)
assert np.array_equal(actual_i, expected_j)
assert np.array_equal(actual_j, expected_i)
faces, fill_value = mixed_mesh
actual_i, actual_j = connectivity._to_ij(faces, fill_value, invert=False)
expected_i = [0, 0, 0, 1, 1, 1, 1]
expected_j = [0, 1, 2, 1, 3, 4, 2]
assert np.array_equal(actual_i, expected_i)
assert np.array_equal(actual_j, expected_j)
# Inverted
actual_i, actual_j = connectivity._to_ij(faces, fill_value, invert=True)
assert np.array_equal(actual_i, expected_j)
assert np.array_equal(actual_j, expected_i)
def test_to_sparse(mixed_mesh):
faces, fill_value = mixed_mesh
csr = connectivity._to_sparse(faces, fill_value, invert=False, sort_indices=True)
expected_j = | np.array([0, 1, 2, 1, 2, 3, 4]) | numpy.array |
from pppr import aabb
import numpy as np
from pak.datasets.MOT import MOT16
from pak import utils
from pppr import aabb
from time import time
from cselect import color as cs
# ===========================================
# Helper functions
# ===========================================
def remove_negative_pairs(Dt, W, H, is_gt_trajectory=False):
"""
...
is_gt_trajectory: {boolean} if true than the
structure of the data is slightly different
"""
result = []
if is_gt_trajectory:
for frame, pid, x, y, w, h in Dt:
if x >= 0 and y >= 0 and x + w < W and y + h < H:
result.append((frame, pid, x, y, w, h))
else:
if Dt.shape[1] == 7:
for frame, pid, x, y, w, h, score in Dt:
if x >= 0 and y >= 0 and x + w < W and y + h < H:
result.append((frame, pid, x, y, w, h, score))
else:
for frame, x, y, w, h, score in Dt:
if x >= 0 and y >= 0 and x + w < W and y + h < H:
result.append((frame, x, y, w, h, score))
return np.array(result)
def get_visible_pedestrains(Y_gt, frame):
Y_gt_frame1 = utils.extract_eq(Y_gt, col=0, value=frame)
#Y_gt_frame1 = utils.extract_eq(Y_gt_frame1, col=7, value=1)
#Y_gt_frame1 = utils.extract_eq(Y_gt_frame1, col=8, value=1)
return Y_gt_frame1
def get_visible_pedestrains_det(Y_det, frame):
Y_det_frame1 = utils.extract_eq(Y_det, col=0, value=frame)
return Y_det_frame1
def get_center(d):
""" full detection has 7 parameters:
full_detection: (frame, pid, x, y, w, h, score)
"""
x, y, w, h = d[2], d[3], d[4], d[5]
return x+w/2, y+h/2
# ===========================================
# Experiments implementation
# ===========================================
verbose = False
class MOT16_Experiments:
def __init__(self, folder):
""" For the experiments we need MOT16-02 and
MOT16-11 for the analysis
The detections will have the following structure:
0: frame_nbr
1: person id
2: detection top-left x position
3: detection top-left y position
4: detection bb width
5: detection bb height
6: detection output score
"""
global verbose
mot16 = MOT16(folder, verbose=verbose)
mot16_02 = mot16.get_train("MOT16-02", memmapped=True)
mot16_11 = mot16.get_train("MOT16-11", memmapped=True)
self.mot16_02_X = mot16_02[0]
self.mot16_11_X = mot16_11[0]
detections_per_video = []
gt_per_video = []
true_detections_per_video = []
true_detections_per_video_no_pid = []
color_lookups_per_video = []
for X, Y_det, Y_gt in [mot16_02, mot16_11]:
# --- run for each video ---
# this is not the most efficient way but not important atm..
_, H, W, _ = X.shape
Y_gt = MOT16.simplify_gt(Y_gt)
gt_bbs = []
all_detections = []
detections_per_video.append(all_detections)
true_detections = []
true_detections_per_video.append(true_detections)
true_detections_no_pid = []
true_detections_per_video_no_pid.append(true_detections_no_pid)
gt_per_video.append(gt_bbs)
frames = X.shape[0]
TIMING_start = time()
for frame in range(1, frames+1):
y_gt = get_visible_pedestrains(Y_gt, frame)
y_det = get_visible_pedestrains_det(Y_det, frame)
for ped_ in y_gt:
j, pid, l_gt, t_gt, w_gt, h_gt = ped_
gt_bbs.append((j, pid, l_gt, t_gt, w_gt, h_gt))
for ped in y_det:
i, _,l, t, w, h, score, _, _,_ = ped
if l >= 0 and t >= 0 and l + w < W and \
t + h < H:
all_detections.append(
np.array([i, l, t, w, h, score])
)
for ped_ in y_gt:
j, pid, l_gt, t_gt, w_gt, h_gt = ped_
assert(i == j)
if aabb.IoU((l,t,w,h), (l_gt,t_gt,w_gt,h_gt)) > 0.5:
true_detections.append(
np.array([i, pid, l, t, w, h, score]))
true_detections_no_pid.append(
np.array([i, l, t, w, h, score]))
TIMING_end = time()
if verbose:
print("Handling " + str(frames) + " frames in " + \
str(TIMING_end - TIMING_start) + " seconds")
# --- figure out coloring ---
Y = np.array(true_detections)
U = np.unique(Y[:,1])
Color_lookup = {}
Colors = cs.lincolor(len(U), random_sat=True, random_val=True)
#Colors = cs.poisson_disc_sampling_Lab(len(U))
Colors = | np.array(Colors, 'float32') | numpy.array |
from problem2 import *
import numpy as np
import sys
from sklearn.datasets import make_classification
'''
Unit test 2:
This file includes unit tests for problem2.py.
You could test the correctness of your code by typing `nosetests -v test2.py` in the terminal.
'''
#-------------------------------------------------------------------------
def test_python_version():
''' ----------- Problem 2 (50 points in total)--------------'''
assert sys.version_info[0]==3 # require python 3
#-------------------------------------------------------------------------
def test_compute_z1():
'''(2 point) compute_z1'''
x = | np.mat('1.; 2.; 3.') | numpy.mat |
import torch
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from transformers import AdamW
from transformers import get_linear_schedule_with_warmup
from tqdm import tqdm
from load_data import traindataloader, valdataloader
from model import Bert_CoSENT
SAVED_DIR = '../saved_model'
EPOCHS = 10
BERT_PATH = '../bert-base-chinese'
WARMUP_PROPORTION = 0.1
METHOD = 'mean_pooling'
device = "cuda" if torch.cuda.is_available() else 'cpu'
model = Bert_CoSENT.from_pretrained(BERT_PATH)
model.to(device)
total_steps = len(traindataloader) * EPOCHS
optimizer = AdamW(model.parameters(), lr=5e-5)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(WARMUP_PROPORTION * total_steps), num_training_steps=total_steps)
def cal_loss(simi_scores, labels):
#simi_scores, labels 都为1维tensor; simi_scores是每个句子对的余弦相似度,labels对应改句子对的标签
neg_indices = torch.nonzero(labels != 1, as_tuple=True)
pos_indices = torch.nonzero(labels != 0, as_tuple=True)
neg = simi_scores[neg_indices]
pos = simi_scores[pos_indices]
neg = neg[:, None]
pos = pos[None, :]
#取出负例-正例的差值
diff = neg-pos
diff = diff.view(1,-1)
diff = torch.cat((torch.tensor([[0]]).float().to(device), diff), dim=1)
return torch.logsumexp(diff, 1)
loss_vals = []
for epoch in range(EPOCHS):
model.train()
epoch_loss= []
pbar = tqdm(traindataloader)
pbar.set_description("[Epoch {}]".format(epoch))
for batch in pbar:
input_ids_1, attention_mask_1, input_ids_2, attention_mask_2, labels = batch['input_ids_1'].to(device), batch['attention_mask_1'].to(device), batch['input_ids_2'].to(device), batch['attention_mask_2'].to(device), batch['labels'].to(device)
model.zero_grad()
simi_scores = model(input_ids_1, attention_mask_1, input_ids_2, attention_mask_2, METHOD)
loss = cal_loss(simi_scores, labels)
loss.backward()
epoch_loss.append(loss.item())
optimizer.step()
scheduler.step()
pbar.set_postfix(loss=loss.item())
loss_vals.append(np.mean(epoch_loss))
model.eval()
for t in [0.5, 0.6, 0.7, 0.8, 0.9, 0.95]:
predict_all = np.array([], dtype=int)
labels_all = np.array([], dtype=int)
with torch.no_grad():
for batch in valdataloader:
input_ids_1, attention_mask_1, input_ids_2, attention_mask_2, labels = batch['input_ids_1'].to(device), batch['attention_mask_1'].to(device), batch['input_ids_2'].to(device), batch['attention_mask_2'].to(device), batch['labels'].to(device)
pred = model.predict(input_ids_1, attention_mask_1, input_ids_2, attention_mask_2, t, METHOD)
predict_all = np.append(predict_all, pred)
truth = labels.cpu().numpy()
labels_all = | np.append(labels_all, truth) | numpy.append |
"""
-----------------------------------------------------------------------------
Name : WIFI Diag
Author : <NAME>
Date : 20 September 2020
------------------------------------------------------------------------------
"""
"""
Example: python PcaplibFiles.py --input "11ax.pcapng","sta1.pcap"
"""
import datetime
import pyshark
import pandas as pd
from bokeh.plotting import figure, output_file, show, save
from bokeh.io.export import get_screenshot_as_png, export_png, export_svgs, export_svg
import matplotlib.pyplot as plt
from plotly.offline import iplot, init_notebook_mode
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import matplotlib.pyplot as plt
import base64
from io import BytesIO
from htmlText import *
from Dataplot import Plot
import shutil
import argparse
import logging
import numpy as np
import os
def PacketHistogram(subtype_list, Managementls, Controlls, Data_framels, count):
# Created a Dictonary of Management Frame : {Subtype} , Control Frame : {Subtype} , Data Frame : {Subtype}
Type_Subtype = {"Management Frame": [Managementls], "Control Frame": [Controlls], "Data Frame": [Data_framels]}
Type_list = []
Sub_list = []
pack_list = []
per_list = []
# To calculate Total number of Subtype of packets in Type
# Ex. To calculate how many packets have a subtype which are in Management/Control/Data Frame Type
for Type, Subtype in Type_Subtype.items():
liskeys = []
for key in subtype_list.values():
if (key in liskeys):
continue
val = Subtype[0].count(key)
liskeys.append(key)
# liskeys.append(key)
if (val != 0):
# Type_list = [Type,key,val,(val*100)/count]
Type_list.append(str(Type))
Sub_list.append(key)
pack_list.append(val)
per_list.append((round((val * 100) / count, 2)))
# Type_list.append("")
Sub = Sub_list
NewSubList = Sub_list
# NewSubList.append("Sum: ")
#
# pack_list.append(sum(pack_list))
NewPerList = per_list
# NewPerList.append(sum(NewPerList))
# print(len(subtype_list),len(NewSubList),len(pack_list),print(NewPerList))
df_Type = pd.DataFrame(({" Type ": Type_list, " Subtype ": NewSubList, " Total Packets ": pack_list, "Percentage": NewPerList}))
# print("df_Type",df_Type)
df_Type = df_Type.to_html(index=False)
# NewPerList.pop()
# NewSubList.pop()
plot = Plot()
path = plot.bar(datax=NewSubList,datay=NewPerList,title="Type/SubType plot",xaxis="Subtype",yaxis="Percentage",figname="Type")
htmltable(" Packet Type histogram", df_Type, str(path), "0", "0","Summary ")
def RateHistogram(DataRate, PhyType, SignalStrength, count):
countUniqueData = []
perUniqueData = []
countUniquePhy = []
perUniquePhy = []
countUniqueSignal = []
perUniqueSignal = []
# This is for Data Table Histogram
uniqueData = np.unique(DataRate)
for i in uniqueData:
countUniqueData.append(DataRate.count(i))
uniqueData = [i for i in uniqueData]
# uniqueData.append("Sum: ")
# countUniqueData.append(sum(countUniqueData))
dictRate = (dict(zip(uniqueData, countUniqueData, )))
for c in countUniqueData:
perUniqueData.append(round((c * 100) / count, 2))
df_Rate = pd.DataFrame({" Rate MBPS ": [i for i in dictRate.keys()], " Total Packets ": [j for j in dictRate.values()], " Percentage ": [k for k in perUniqueData]})
# df_Rate = df_Rate.T
# df_Rate.columns = df_Rate.iloc[0]
# df_Rate = df_Rate.drop(df_Rate.iloc[0].index.name)
df_Rate = df_Rate.to_html(index=False)
# uniqueData.pop()
# perUniqueData.pop()
plot1 = Plot()
path = plot1.bar(datax=uniqueData, datay=perUniqueData, title="Rate plot", xaxis="Rate MBPS", yaxis="Percentage",
figname="rate")
htmltable(" Encoding rate histogram.", df_Rate, str(path), "0", "0","Summary ")
# This is for Phy Histogram
uniquePhy = np.unique(PhyType)
for j in uniquePhy:
countUniquePhy.append(PhyType.count(j))
uniquePhy = [i for i in uniquePhy]
# uniquePhy.append("Sum: ")
# countUniquePhy.append(sum(countUniquePhy))
dictPhy = (dict(zip(uniquePhy, countUniquePhy)))
for d in countUniquePhy:
perUniquePhy.append(round((d * 100) / count, 2))
df_Phy = pd.DataFrame({" PHY ": [i for i in dictPhy.keys()], " Total Packets ": [j for j in dictPhy.values()]," Percentage ": [k for k in perUniquePhy]})
# print("df_Phy",df_Phy)
# df_Phy = df_Phy.to_html()
# df_Phy = df_Phy.T
# df_Phy.columns = df_Phy.iloc[0]
# df_Phy = df_Phy.drop(df_Phy.iloc[0].index.name)
df_Phy = df_Phy.to_html(index=False)
dictphys = [i for i in dictPhy.keys()]
# dictphys.pop()
# perUniquePhy.pop()
plot2 = Plot()
path = plot2.bar(datax=dictphys, datay=perUniquePhy, title="Phy plot", xaxis="Subtype", yaxis="Percentage",
figname="Phy")
htmltable(" Phy Histogram.",df_Phy,str(path),"0","0","Summary ")
# This is for Signal Histogram
uniqueSignal = np.unique(SignalStrength)
for k in uniqueSignal:
countUniqueSignal.append(SignalStrength.count(k))
uniqueSignal = [i for i in uniqueSignal]
# uniqueSignal.append("Sum: ")
# countUniqueSignal.append(sum(countUniqueSignal))
dictSig = (dict(zip(uniqueSignal, countUniqueSignal)))
for e in countUniqueSignal:
perUniqueSignal.append(round((e * 100) / count, 2))
# perUniqueSignal.append(sum(perUniqueSignal))
# pd.DataFrame.reset_index(drop=True,inplace=True)
# df_Sig = pd.DataFrame({"Signal": [i for i in dictSig.keys()], "Packet to Packet": [j for j in dictSig.values()],
# "Percentage": [k for k in perUniqueSignal]})
# pd.DataFrame.reset_index(drop=True,inplace=True)
# print([k for k in dictSig.keys()])
# print([i for i in dictSig.values()])
# print("perUniqueSignal",perUniqueSignal)
# pd.DataFrame
df_Sig = pd.DataFrame({" Signal ":[k for k in dictSig.keys()]," Total Packets ":[i for i in dictSig.values()]," Percentage ":[j for j in perUniqueSignal]})
# df_Sig = df_Sig.T
# df_Sig.columns = df_Sig.iloc[0]
# df_Sig = df_Sig.drop(df_Sig.iloc[0].index.name)
# df_Sig.columns.name = None
# df_Sig.index.name = "Signal"
# print("df_Sig",df_Sig)
# print("df_Sig",df_Sig)
# df_Sig = df_Sig.to_html()
# df_Sig = df_Sig.transpose()
df_Sig = df_Sig.to_html(index=False)
# perUniqueSignal.pop()
dictSigs = [i for i in dictSig.keys()]
# dictSigs.pop()
plot3 = Plot()
path = plot3.bar(datax=dictSigs, datay=perUniqueSignal, title="Signal plot", xaxis="Signal", yaxis="Percentage",
figname="Signal")
htmltable(" Signal Histogram.", df_Sig, str(path), "0", "0","Summary ")
# print(dictSigs,perUniqueSignal)
def PHY_BW_MCS_NCS(MCSIndex, vMCS, Bandwidth, vBW, PHY, vPHY, Spatial_Stream, vNCS, count):
countUniqueMCSIndex = []
countUniqueBandwidth = []
countUniquePHY = []
countUniqueSpatial_stream = []
perUniqueMCS = []
perUniqueBW = []
perUniquePHY = []
perUniqueNCS = []
uniqueMCSIndex = np.unique(MCSIndex)
uniqueBandwidth = ((np.unique(Bandwidth)))
# uniquePHY = ((np.unique(PHY)))
uniqueSpatial_stream = ((np.unique(Spatial_Stream)))
for countMCS in uniqueMCSIndex:
countUniqueMCSIndex.append(MCSIndex.count(countMCS))
for cnt in countUniqueMCSIndex:
perUniqueMCS.append(round((cnt * 100) / count, 2))
dictMCS = dict(zip(uniqueMCSIndex,countUniqueMCSIndex))
df_MCS = pd.DataFrame({" MCS ": [k for k in dictMCS.keys()], " Total Packets ": [i for i in dictMCS.values()]," Percentage ":[j for j in perUniqueMCS]})
# df_MCS = df_MCS.T
# df_MCS.columns = df_MCS.iloc[0]
# df_MCS = df_MCS.drop(df_MCS.iloc[0].index.name)
# print("df_MCS", df_MCS)
df_MCS = df_MCS.to_html(index=False)
dictMCSs = [i for i in dictMCS.keys()]
plot4 = Plot()
path = plot4.bar(datax=dictMCSs, datay=perUniqueMCS, title="MCS plot", xaxis="MCS", yaxis="Percentage",
figname="MCS")
PacketInfo = ("Data packets having MCS field: "+str(vMCS)+"<br>")
htmltable("Data MCS Histogram.", df_MCS, str(path), "0", "0",PacketInfo)
# print(uniqueMCSIndex, countUniqueMCSIndex)
for countBandwidth in uniqueBandwidth:
countUniqueBandwidth.append(Bandwidth.count(countBandwidth))
for cnt in countUniqueBandwidth:
perUniqueBW.append(round((cnt * 100) / count, 2))
dictBW = dict(zip(uniqueBandwidth, countUniqueBandwidth))
df_BW = pd.DataFrame({" Bandwidth ": [k for k in dictBW.keys()], " Total Packets ": [i for i in dictBW.values()]," Percentage ":[j for j in perUniqueBW]})
# df_BW = df_BW.T
# df_BW.columns = df_BW.iloc[0]
# df_BW = df_BW.drop(df_BW.iloc[0].index.name)
# print("df_BW", df_BW)
df_BW = df_BW.to_html(index=False)
dictBWs = [i for i in dictBW.keys()]
plot5 = Plot()
path = plot5.bar(datax=dictBWs, datay=perUniqueBW, title="Bandwidth plot", xaxis="Bandwidth", yaxis="Percentage",
figname="Bandwidth")
PacketInfo = ("Data packets having BW field: " + str(vBW) + "<br>")
htmltable("Data Bandwidth Histogram.", df_BW, str(path), "0", "0",PacketInfo)
# print(uniqueBandwidth, countUniqueBandwidth)
"""
#For PHY
for countPHY in uniquePHY:
countUniquePHY.append(PHY.count(countPHY))
for cnt in countUniquePHY:
perUniquePHY.append(round((cnt * 100) / count, 2))
dictPHY = dict(zip(uniquePHY, countUniquePHY))
df_PHY = pd.DataFrame({"PHY": [k for k in dictPHY.keys()], "Packet": [i for i in dictPHY.values()],"Percentage":[j for j in perUniquePHY]})
df_PHY = df_PHY.T
df_PHY.columns = df_PHY.iloc[0]
df_PHY = df_PHY.drop(df_PHY.iloc[0].index.name)
print("df_PHY", df_PHY)
"""
for countNCS in uniqueSpatial_stream:
countUniqueSpatial_stream.append(Spatial_Stream.count(countNCS))
for cnt in countUniqueSpatial_stream:
perUniqueNCS.append(round((cnt * 100) / count, 2))
dictNCS = dict(zip(uniqueSpatial_stream, countUniqueSpatial_stream))
df_NCS = pd.DataFrame({" NSS ": [k for k in dictNCS.keys()], " Total Packets ": [i for i in dictNCS.values()]," Percentage ":[j for j in perUniqueNCS]})
# df_NCS = df_NCS.T
# df_NCS.columns = df_NCS.iloc[0]
# df_NCS = df_NCS.drop(df_NCS.iloc[0].index.name)
# print("df_NCS", df_NCS)
# df_NCS = df_NCS.T
df_NCS = df_NCS.to_html(index=False)
dictNCSs = [i for i in dictNCS.keys()]
plot6 = Plot()
path = plot6.bar(datax=dictNCSs, datay=perUniqueNCS, title="NCS plot", xaxis="Spatial stream", yaxis="Percentage",
figname="NSS")
PacketInfo = ("Data packets having NSS field: " + str(vNCS) + "<br>")
htmltable("Data NSS Histogram.", df_NCS, str(path), "0", "0",PacketInfo)
def RateAMPDU(AMPDU,count):
countAMPDU = []
# print("IN AMPDU")
# print("AMPDU: ",AMPDU)
countUniqueAMPDU = []
perUniqueAMPDU = []
chainCountAMPDU = []
uniqueAMPDU = np.unique(AMPDU)
uniqueAMPDU = [i for i in uniqueAMPDU]
# print("uniqueAMPDU",uniqueAMPDU)
#
# print("len(uniqueAMPDU)",len(uniqueAMPDU))
for countAMPDU in uniqueAMPDU:
countUniqueAMPDU.append(AMPDU.count(countAMPDU))
# print("countUniqueAMPDU",countUniqueAMPDU)
# print("len(countUniqueAMPDU)",len(countUniqueAMPDU))
chainUniqueAMPDU = np.unique(countUniqueAMPDU)
chainUniqueAMPDU = [i for i in chainUniqueAMPDU]
# print("chainUniqueAMPDU", chainUniqueAMPDU)
# print("len(chainUniqueAMPDU)", len(chainUniqueAMPDU))
for Acount in chainUniqueAMPDU:
chainCountAMPDU.append(countUniqueAMPDU.count(Acount))
# print(" len(chainCountAMPDU): ", len(chainCountAMPDU))
# print("chainCountAMPDU",chainCountAMPDU)
UniqueChainCountAMPDU = | np.unique(chainCountAMPDU) | numpy.unique |
import warnings
import networkx as nx
import numpy as np
import scipy.sparse as sp
from sklearn import metrics
class DataUtils:
def __init__(self, graph_file):
with np.load(graph_file, allow_pickle=True) as loader:
loader = dict(loader)
self.A = sp.csr_matrix((loader['adj_data'], loader['adj_indices'],
loader['adj_indptr']), shape=loader['adj_shape'])
self.X = sp.csr_matrix((loader['attr_data'], loader['attr_indices'],
loader['attr_indptr']), shape=loader['attr_shape'])
self.labels = loader['labels']
self.val_edges = loader['val_edges']
self.val_ground_truth = loader['val_ground_truth']
self.test_edges = loader['test_edges']
self.test_ground_truth = loader['test_ground_truth']
self.g = nx.from_scipy_sparse_matrix(self.A)
self.num_of_nodes = self.g.number_of_nodes()
self.num_of_edges = self.g.number_of_edges()
self.edges_raw = self.g.edges(data=True)
# edges_arr = np.array([(a, b) for a, b, c in self.edges_raw])
# self.edges_is_hom = self.labels[edges_arr[:, 0]] == self.labels[edges_arr[:, 1]]
self.nodes_raw = self.g.nodes(data=True)
self.edge_distribution = np.array([attr['weight'] for _, _, attr in self.edges_raw], dtype=np.float32)
self.edge_distribution /= np.sum(self.edge_distribution)
self.edge_sampling = AliasSampling(prob=self.edge_distribution)
self.node_negative_distribution = np.power(
np.array([self.g.degree(node, weight='weight') for node, _ in self.nodes_raw], dtype=np.float32), 0.75)
self.node_negative_distribution /= np.sum(self.node_negative_distribution)
self.node_sampling = AliasSampling(prob=self.node_negative_distribution)
self.node_index = {}
self.node_index_reversed = {}
for index, (node, _) in enumerate(self.nodes_raw):
self.node_index[node] = index
self.node_index_reversed[index] = node
self.edges = [(self.node_index[u], self.node_index[v]) for u, v, _ in self.edges_raw]
def fetch_next_batch(self, labels_to_use, batch_size=16, K=10):
u_i = []
u_j = []
label = []
is_hom = []
for edge_index in self.edge_sampling.sampling(batch_size):
edge = self.edges[edge_index]
if self.labels[edge[0]] in labels_to_use and self.labels[edge[1]] in labels_to_use:
if self.g.__class__ == nx.Graph:
if np.random.rand() > 0.5:
edge = (edge[1], edge[0])
u_i.append(edge[0])
u_j.append(edge[1])
label.append(1)
is_hom.append(self.labels[edge[0]] == self.labels[edge[1]])
for i in range(K):
while True:
negative_node = self.node_sampling.sampling()
if self.labels[negative_node] in labels_to_use:
if not self.g.has_edge(
self.node_index_reversed[negative_node],
self.node_index_reversed[edge[0]]):
break
u_i.append(edge[0])
u_j.append(negative_node)
label.append(-1)
is_hom.append(self.labels[edge[0]] == self.labels[negative_node])
return u_i, u_j, label, is_hom
def embedding_mapping(self, embedding):
return {node: embedding[self.node_index[node]] for node, _ in self.nodes_raw}
class AliasSampling:
# Reference: LINE source code from https://github.com/snowkylin/line
# Reference: https://en.wikipedia.org/wiki/Alias_method
def __init__(self, prob):
self.n = len(prob)
self.U = | np.array(prob) | numpy.array |
# -*- coding:utf-8 -*-
import wave
import numpy as np
import scipy.io.wavfile
import scipy.signal
from pylab import *
def autocorr(x, nlags=None):
"""自己相関関数を求める
x: 信号
nlags: 自己相関関数のサイズ(lag=0からnlags-1まで)
引数がなければ(lag=0からlen(x)-1まですべて)
"""
N = len(x)
if nlags == None: nlags = N
r = np.zeros(nlags)
for lag in range(nlags):
for n in range(N - lag):
r[lag] += x[n] * x[n + lag]
return r
def LevinsonDurbin(r, lpcOrder):
"""Levinson-Durbinのアルゴリズム
k次のLPC係数からk+1次のLPC係数を再帰的に計算して
LPC係数を求める"""
# LPC係数(再帰的に更新される)
# a[0]は1で固定のためlpcOrder個の係数を得るためには+1が必要
a = np.zeros(lpcOrder + 1)
e = np.zeros(lpcOrder + 1)
# k = 1の場合
a[0] = 1.0
a[1] = - r[1] / r[0]
e[1] = r[0] + r[1] * a[1]
lam = - r[1] / r[0]
# kの場合からk+1の場合を再帰的に求める
for k in range(1, lpcOrder):
# lambdaを更新
lam = 0.0
for j in range(k + 1):
lam -= a[j] * r[k + 1 - j]
lam /= e[k]
# aを更新
# UとVからaを更新
U = [1]
U.extend([a[i] for i in range(1, k + 1)])
U.append(0)
V = [0]
V.extend([a[i] for i in range(k, 0, -1)])
V.append(1)
a = np.array(U) + lam * np.array(V)
# eを更新
e[k + 1] = e[k] * (1.0 - lam * lam)
return a, e[-1]
class Identifer(object):
def __init__(self):
## ----*----- コンストラクタ -----*----- ##
self.rate = 8000
def read_wavfile(self, file):
## -----*----- 音声ファイル読み込み -----*----- ##
wf = wave.open(file, 'r')
fs = wf.getframerate()
x = wf.readframes(wf.getnframes())
x = np.frombuffer(x, dtype="int16") / 32768.0 # (-1, 1)に正規化
wf.close()
return x, float(fs)
def preEmphasis(self, signal, p):
## -----*----- プリエンファシスィルタ -----*----- ##
# 係数 (0.97, -p) のFIRフィルタを作成
return scipy.signal.lfilter([1.0, -p], 1, signal)
def spectrum(self, s, a, e, fs, file):
## -----*----- LPC係数の振幅スペクトルを求める -----*----- ##
fscale = np.fft.fftfreq(self.rate, d=1.0 / fs)[:self.rate // 2]
# オリジナル信号の対数スペクトル
spec = np.abs(np.fft.fft(s, self.rate))
logspec = 20 * np.log10(spec)
# LPC対数スペクトル
w, h = scipy.signal.freqz(np.sqrt(e), a, self.rate, "whole")
lpcspec = | np.abs(h) | numpy.abs |
from netCDF4 import Dataset,num2date
import pdb
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime#,timedelta
#import xarray
#lon = np.arange(-19.888889,12.99967+1/9.,1/9.)
#lat = np.arange(40.066669,65+1/15.,1/15.)
#import os
from scipy.stats import chi2
from CurrUncertEllipses import *
def main():
curr_uncert_prob_threshold_perc_data_in_xsd_table()
fig = curr_uncert_prob_threshold_perc_data_in_xsd_figure()
def curr_uncert_prob_threshold_perc_data_in_xsd_figure():
# Calculate and plot the percentage of data within an uncertainty ellipse
# of a given size (in terms of standard deviations).
#
# As well deriving these values for the chi-squared distribution table, two
# numerical methods are used (See Tinker et al. (2022) for details).
#
# Produces Appendix Figure 7 in Tinker et al. 2022
#Array of Standard deviations
n_std_mat = np.arange(0,3.2,0.1)
# precentage of data within ellipse of a given standard deviation size, using the:
#Statistical theoretical method (using chi squared probabilty tables)
stat_sd_plev_mat = data_within_xsd_chi_sq(n_std_mat = n_std_mat)
#Gaussian distribution method (Integrating a bivariate Gaussian distribution within the ellipse)
gauss_sd_plev_mat = data_within_xsd_gauss_integ(n_std_mat = n_std_mat)
#Random data method (asking the proprotion of a random bivariate gaussian data set is within an ellipse).
rand_sd_plev_mat = data_within_xsd_random_cnt(n_std_mat = n_std_mat,npnts = 10000)# 100 = 1min, 1000 = 1 min # 10000 = 2 mins
print('Start plotting',datetime.now())
fig = plt.figure()
fig.set_figheight(4)
fig.set_figwidth(6.0)
plt.subplots_adjust(top=0.95,bottom=0.15,left=0.15,right=0.95,hspace=0.2,wspace=0.2)
plt.plot([0,nstd_cutoff(90),nstd_cutoff(90)],[90,90,0],'0.75')
plt.plot([0,nstd_cutoff(95),nstd_cutoff(95)],[95,95,0],'0.75')
plt.text(0.1,95,'95%', ha = 'left', va = 'center')
plt.text(0.1,90,'90%', ha = 'left', va = 'center')
plt.text(nstd_cutoff(90),5,'%.2f'%nstd_cutoff(90), ha = 'center', va = 'center')
plt.text(nstd_cutoff(95),5,'%.2f'%nstd_cutoff(95), ha = 'center', va = 'center')
plt.plot(n_std_mat, 100.*rand_sd_plev_mat.mean(axis = 1),'r', lw = 2, label = 'Random')
plt.plot(n_std_mat, 100.*rand_sd_plev_mat.mean(axis = 1) + 2*rand_sd_plev_mat.std(axis = 1),'r-', lw = 1)
plt.plot(n_std_mat, 100.*rand_sd_plev_mat.mean(axis = 1) - 2*rand_sd_plev_mat.std(axis = 1),'r-', lw = 1)
plt.plot(n_std_mat, 100.*gauss_sd_plev_mat.mean(axis = 1),'b', lw = 2, label = 'Distr Integ')
plt.plot(n_std_mat, 100.*gauss_sd_plev_mat.mean(axis = 1) + 2*gauss_sd_plev_mat.std(axis = 1),'b-', lw = 1)
plt.plot(n_std_mat, 100.*gauss_sd_plev_mat.mean(axis = 1) - 2*gauss_sd_plev_mat.std(axis = 1),'b-', lw = 1)
plt.plot(n_std_mat, 100.*stat_sd_plev_mat,'k--', lw = 2, label = 'Chi Sq')
plt.xlabel('Size of uncertainty ellipse\n(number of standard deviation)')
plt.ylabel('% Data within uncertainty ellipse')
plt.ylim([0,100])
plt.xlim([0,3])
plt.legend()
print('Return handle',datetime.now())
return fig
def curr_uncert_prob_threshold_perc_data_in_xsd_table():
# Produce a table of probabilty thresholds for ellipse size.
# Produces Appendix Table 1 in Tinker et al. 2022
perc_lev_mat = np.array([50, 75,90, 95, 97.5, 99,99.5 ])
p_lev = 1-(perc_lev_mat/100.)
chi_sq_table_vals_mat = nstd_cutoff(perc_lev_mat)**2
nstd_thresh_size_mat = nstd_cutoff(perc_lev_mat)
print('')
print('------------------------------------------------------------------------------------')
print('')
print('Uncertainty Ellipse size (in standard deviations) and data coverage (%),,Chi Squared Distribution Table (with 2 degrees of freedom),,')
print('Percentage of data within Uncertainty Ellipse,Size of uncertainty ellipse (# standard deviations),Critical value,Probability of exceeding the critical value')
for ii,jj,kk,ll in zip(perc_lev_mat,nstd_thresh_size_mat,p_lev,chi_sq_table_vals_mat,):print('%.1f%%,%.4f,%.3f,%.3f'%(ii,jj,kk,ll))
print('')
print('------------------------------------------------------------------------------------')
print('')
def nstd_cutoff(percent_val):
#For a given percentage value, how big (in standard deviations)
#must the ellipse be to capture that precengate of data
#
# Based on the Chi-squared inverse survival function
nstd = np.sqrt(chi2.isf(1-percent_val/100, 2))
return nstd
def data_within_xsd_chi_sq(n_std_mat = np.arange(0,3,0.1)):
# following:
#https://www.visiondummy.com/2014/04/draw-error-ellipse-representing-covariance-matrix
# To calculate the amount of data within an ellipse of size x std devs,
# we can use the chi squared probabilty table.
#chi squared probability table:
#https://people.richland.edu/james/lecture/m170/tbl-chi.html
#c&p 2df row and headers:
chi2_prob_2df = np.array([0.010,0.020,0.051,0.103,0.211,4.605,5.991,7.378,9.210,10.597])
chi2_prob_plev = np.array([0.995, 0.99, 0.975, 0.95, 0.90, 0.10, 0.05, 0.025, 0.01, 0.005])
# this can be created with python scipy.stats chi2:
# https://stackoverflow.com/questions/32301698/how-to-build-a-chi-square-distribution-table
chi_sq_prob_2df_table = chi2.isf(chi2_prob_plev, 2)
# plotting 1-chi2_prob_plev against np.sqrt(chi2_prob_plev) gives you the
# required number of std devs (sqrt(chi2_prob_plev)) to encapsulate x % of
# data (1-chi2_prob_plev).
# for a given array of standard deviations, we can use this approach to
# calculate the percentage data within the corresponding ellipse.
# rather than using the inverse survival function, we now use the
# survival function
chi2_pval_mat = 1-chi2.sf(n_std_mat**2, 2)
return chi2_pval_mat #, chi2_prob_plev, chi_sq_prob_2df_table
def data_within_xsd_gauss_integ_val(U_mean = 0.,U_var = 1.,V_mean = 0.,V_var = 1.,UV_corr = 0.5, n_std = 1.96, plotting = False, verbose = True, npnt_counting = 151, n_std_limits = 2):
# To calculate the amount of data within an ellipse of size x std devs,
# we can integrate a bivariate gaussian distribution surface within the ellipse.
# We do this numerically, so this is a semi-numerical semi-analytical method.
#
# We created a decretised bivariate gaussian distribution surface
# (for a given means, variaences and covariance (actually correlation).
# We find the (near constant) value of the surface around the ellipse, and
# then (numerically) integrate the values of the surface that are greater
# than this value.
#Covariance from Pearsons Correlation.
UV_cov = UV_corr*np.sqrt(U_var)*np.sqrt(V_var)
#details of the ellipse
X_elip_amp,Y_elip_amp,X_elip_phi,Y_elip_phi,X_elip_phi_cos,Y_elip_phi_cos = confidence_ellipse_uv_stats_parametric_equation(U_mean,V_mean,U_var, V_var, UV_cov)
twoaltone = np.array(([-1,1]))
ang = np.linspace(-np.pi,np.pi, 100)
#limits of the Gaussian surface
Xlim_val = n_std_limits*n_std*(X_elip_amp)
Ylim_val = n_std_limits*n_std*(Y_elip_amp)
if Xlim_val <(4*(X_elip_amp)):Xlim_val = (4*(X_elip_amp))
if Ylim_val <(4*(Y_elip_amp)):Ylim_val = (4*(Y_elip_amp))
Xlim = Xlim_val*twoaltone+U_mean
Ylim = Ylim_val*twoaltone+V_mean
# x and y mesh for the surface
tmpx_test = np.linspace(np.min((Xlim)),np.max((Xlim)),npnt_counting)
tmpy_test = np.linspace(np.min((Ylim)),np.max((Ylim)),npnt_counting)
tmpx_test_mat,tmpy_test_mat = np.meshgrid(tmpx_test,tmpy_test)
tmpdx = np.diff(tmpx_test).mean()
tmpdy = np.diff(tmpy_test).mean()
# the uncertainty ellipse
Xo = n_std*(X_elip_amp*np.sin(ang + X_elip_phi))+U_mean
Yo = n_std*(Y_elip_amp*np.sin(ang + Y_elip_phi))+V_mean
#Calcuate the Gaussian Surface over the x and y mesh, and around the ellipse
gauss = gauss_func_2d(tmpx_test_mat,tmpy_test_mat,U_mean,V_mean,U_var,V_var,UV_cov)[0]
gauss_ell = gauss_func_2d(Xo,Yo,U_mean,V_mean,U_var,V_var,UV_cov)[0]
# find the values that distribution values that are greater than the (mean)
# ellipse distribution value
ind_inside_ell = gauss>=gauss_ell.mean()
# The infinite bivariate distrbution surface should integrate to 1.
# By integrating the full decretised distribution, we get an idea of the
# error term
p_val_full_decrete_dist = gauss.sum()*tmpdx*tmpdy
# Integrating the values greater than the ellipse values is equivalent to
# integrating the values within the ellipse.
p_val = gauss[ind_inside_ell].sum()*tmpdx*tmpdy
if plotting:
ax = []
ax.append(plt.subplot(2,2,1))
plt.pcolormesh(tmpx_test_mat,tmpy_test_mat,gauss)
plt.contour(tmpx_test_mat,tmpy_test_mat,gauss, [gauss_ell.mean()], colors = 'y')
plt.plot(Xo,Yo,'r--')
ax.append(plt.subplot(2,2,2))
plt.pcolormesh(tmpx_test_mat,tmpy_test_mat,ind_inside_ell)
plt.contour(tmpx_test_mat,tmpy_test_mat,gauss, [gauss_ell.mean()], colors = 'y')
plt.plot(Xo,Yo,'r--')
ax.append(plt.subplot(2,2,3))
plt.pcolormesh(tmpx_test_mat,tmpy_test_mat,)
plt.contour(tmpx_test_mat,tmpy_test_mat,gauss, [gauss_ell.mean()], colors = 'y')
plt.plot(Xo,Yo,'r--')
if verbose: print(n_std, p_val)
return p_val, p_val_full_decrete_dist
#plt.show()
def data_within_xsd_random_cnt_val(U_mean = 0,U_var = 1,V_mean = 0,V_var = 1,UV_corr=0., npnts = 100000,n_std_mat = np.arange(0,3,0.01)):
# To calculate the amount of data within an ellipse of size x std devs,
# we can create a random data with a bivariate normal distribution for a
# given set of means, variance and covariance (actually correlation).
# We can then fit an ellipse to these data (for a given number of standard
# deviations), and calucate the precentage of points within the ellipse.
# We then cycle through a range of standard deviations (n_std_mat)
#Covariance from Pearsons Correlation.
UV_cov = UV_corr*np.sqrt(U_var)*np.sqrt(V_var)
#Create a random data with a bivariate normal distribution
U_mat,V_mat = np.random.multivariate_normal([U_mean,V_mean], [[U_var,UV_cov],[UV_cov,V_var]], npnts).T
#cycle through a range of elipses sizes of varying standard deviations
n_perc_joint_mat = n_std_mat.copy()*0.
for ni,n_std in enumerate(n_std_mat):
#for a given standard deviation:
#find the uncertainty ellipse, a details of it:
X_elip_amp,Y_elip_amp,X_elip_phi,Y_elip_phi,X_elip_phi_cos,Y_elip_phi_cos = confidence_ellipse_uv_mat_parametric_equation(U_mat.reshape(-1,1,1),V_mat.reshape(-1,1,1), n_std = n_std)
qmax,qmin, ecc, theta_max, zero_ang = ellipse_parameters_from_parametric_equation(X_elip_amp,Y_elip_amp,X_elip_phi,Y_elip_phi,U_mean,V_mean)
# find the ellipse foci (important for asking whether a point is within an ellipse or not)
foci_max,foci_x_1,foci_y_1,foci_x_2,foci_y_2 = find_parameteric_ellipse_foci(qmax, qmin,theta_max,U_mean,V_mean,n_std)
# Ask which of our random data set are within the ellipse
pnt_inside_ell_sig_1,foci_pnt_foci_dist_sig = point_inside_parameteric_ellipse(U_mat.reshape(-1,1,1),V_mat.reshape(-1,1,1),n_std, foci_x_1,foci_y_1,foci_x_2,foci_y_2,qmax)
# Record the percentage of data within our ellipse.
n_perc_joint_mat[ni] = pnt_inside_ell_sig_1.sum()/pnt_inside_ell_sig_1.size
# Repeat of a univariate normal discribution.
# ask which points are within x standard deviation of the mean
n_perc_single_mat = n_std_mat.copy()*0.
U_std = U_mat.std()
for ni,n_std in enumerate(n_std_mat):n_perc_single_mat[ni] = (np.abs((U_mat[:]-U_mean)/U_std)<=n_std).sum()/U_mat.size#((np.abs(U_mat)/U_std)<n_std).sum()/U_mat.size
return n_perc_joint_mat, n_perc_single_mat
################################################################################
def data_within_xsd_gauss_integ(n_std_mat = np.arange(0,3,0.1), U_mean_mat = np.arange(-1.5,1.,0.5), V_mean_mat = np.arange(-1.5,2,0.5), U_var_mat = np.arange(0.25,1.5,0.25), V_var_mat = np.arange(0.25,1.5,0.25), UV_corr_mat = np.arange(-0.75,1.00,0.25)):
# To calculate the amount of data within an ellipse of size x std devs,
# we can integrate a bivariate gaussian distribution surface within the ellipse.
#
# Here we cycle through a range of values mean, variance and covarinace
# (actually correlations) and apply data_within_xsd_gauss_integ_val to
# create an ensemble of results, to show that there is very little dependence
# on the shape and location of the ellipse.
uv_ms_c_lst = [(U_mean,U_var,V_mean,V_var,UV_corr) for U_mean in U_mean_mat for V_mean in V_mean_mat for U_var in U_var_mat for V_var in V_var_mat for UV_corr in UV_corr_mat]
uv_ms_c_mat =np.array(uv_ms_c_lst)
print('Start Gaussian method',datetime.now()) # 2min run time
gauss_sd_plev_lst = []
for (U_mean,U_var,V_mean,V_var,UV_corr) in uv_ms_c_lst[:]:
gauss_sd_plev_lst_curr_it = []
for n_std in n_std_mat:
gauss_sd_plev_lst_curr_it.append(data_within_xsd_gauss_integ_val(U_mean = U_mean,U_var = U_var,V_mean = V_mean,V_var = V_var,UV_corr=UV_corr,n_std = n_std, plotting = False, verbose = False)[0])
gauss_sd_plev_lst.append(gauss_sd_plev_lst_curr_it)
gauss_sd_plev_mat = np.array(gauss_sd_plev_lst)
print('Stop Gaussian method',datetime.now())
return gauss_sd_plev_mat.T
def data_within_xsd_random_cnt(n_std_mat = | np.arange(0,3,0.1) | numpy.arange |
import argparse
import os
import csv
import random
import logging
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from pytorch_pretrained_bert import (OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer,
OpenAIAdam, cached_path, WEIGHTS_NAME, CONFIG_NAME)
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
###############################################################################
def accuracy(out, labels):
outputs = | np.argmax(out, axis=1) | numpy.argmax |
# Copyright 2021 The T5X Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for t5x.models."""
import functools
from unittest import mock
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import flax
from flax import traverse_util
import jax
import jax.numpy as jnp
import numpy as np
import t5.data.tasks # pylint:disable=unused-import
from t5x import decoding
from t5x import models
from t5x import partitioning
from t5x import trainer as trainer_lib
from t5x import utils
import tensorflow as tf
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
PartitionSpec = partitioning.PartitionSpec
class ModelsTest(parameterized.TestCase):
def test_remove_prefix(self):
sequences = np.array([[1, 2, 3, 4, 5, 6, 7, 0], [6, 7, 8, 9, 10, 11, 0, 0]])
prefix_lengths = np.array([2, 4])
expected = [[3, 4, 5, 6, 7, 0, 0, 0], [10, 11, 0, 0, 0, 0, 0, 0]]
remove_prefix = jax.jit(models.remove_prefix)
actual = remove_prefix(sequences, prefix_lengths)
np.testing.assert_array_equal(actual, expected)
def test_remove_prefix_zero_len_prefix(self):
sequences = np.array([[1, 2, 3, 4, 5, 6, 7, 0], [6, 7, 8, 9, 10, 11, 0, 0]])
prefix_lengths = np.array([0, 0])
remove_prefix = jax.jit(models.remove_prefix)
actual = remove_prefix(sequences, prefix_lengths)
# The expected output is the original sequences.
np.testing.assert_array_equal(actual, sequences)
BATCH_SIZE, ENCODER_LEN, MAX_DECODE_LEN, EMBED_DIM = 2, 3, 4, 5
class EncoderDecoderModelTest(parameterized.TestCase):
@parameterized.named_parameters(
dict(
testcase_name='no_types',
shapes={
'encoder_input_tokens': [1, 512],
'decoder_input_tokens': [1, 62]
},
types=None),
dict(
testcase_name='int32',
shapes={
'encoder_input_tokens': [1, 512],
'decoder_input_tokens': [1, 62]
},
types={
'encoder_input_tokens': jnp.int32,
'decoder_input_tokens': jnp.int32
}),
dict(
testcase_name='float32',
shapes={
'encoder_input_tokens': [1, 512],
'decoder_input_tokens': [1, 62]
},
types={
'encoder_input_tokens': jnp.int32,
'decoder_input_tokens': jnp.int32
}),
)
def test_get_initial_variables_shapes_and_types(self, shapes, types):
mock_transformer = mock.Mock()
mock_transformer.init.return_value = {'params': {}}
mock_optimizer_def = mock.Mock()
rng = mock.Mock()
def mock_init(self):
self.module = mock_transformer
self.optimizer_def = mock_optimizer_def
with mock.patch.object(
models.EncoderDecoderModel, '__init__', new=mock_init):
model = models.EncoderDecoderModel()
model.get_initial_variables(rng, shapes, types)
if types is None:
encoder_input = jnp.ones(
shapes['encoder_input_tokens'], dtype=jnp.float32)
decoder_input = jnp.ones(
shapes['decoder_input_tokens'], dtype=jnp.float32)
else:
encoder_input = jnp.ones(
shapes['encoder_input_tokens'], dtype=types['encoder_input_tokens'])
decoder_input = jnp.ones(
shapes['decoder_input_tokens'], dtype=types['decoder_input_tokens'])
# Using `.assert_called_once_with` doesn't work because the simple
# comparison it does for the array arguments fail (truth value of an array
# is ambiguous).
called_with = mock_transformer.init.call_args
self.assertEqual(called_with[0][0], rng)
np.testing.assert_allclose(called_with[0][1], encoder_input)
np.testing.assert_allclose(called_with[0][2], decoder_input)
np.testing.assert_allclose(called_with[0][3], decoder_input)
self.assertEqual(mock_transformer.init.call_args[1], {
'decode': False,
'enable_dropout': False
})
def test_score_batch(self):
encoder_input_tokens = jnp.ones((2, 3))
# For this test, decoder input and target tokens are dummy values.
decoder_input_tokens = jnp.array([[1, 2, 1, 0], [0, 1, 0, 2]])
decoder_target_tokens = jnp.array([[1, 2, 1, 0], [0, 1, 0, 2]])
decoder_loss_weights = jnp.array([[1, 1, 1, 0], [0, 1, 0, 1]])
logits = jnp.arange(0, 24).reshape((2, 4, 3))
params = {'foo': jnp.zeros(3)}
mock_transformer = mock.Mock()
mock_transformer.apply.return_value = logits
mock_transformer.dtype = jnp.float32
batch = {
'encoder_input_tokens': encoder_input_tokens,
'decoder_input_tokens': decoder_input_tokens,
'decoder_target_tokens': decoder_target_tokens,
'decoder_loss_weights': decoder_loss_weights
}
def mock_init(self):
self.module = mock_transformer
with mock.patch.object(
models.EncoderDecoderModel, '__init__', new=mock_init):
model = models.EncoderDecoderModel()
res = model.score_batch(params, batch)
mock_transformer.apply.assert_called_with({'params': params},
encoder_input_tokens,
decoder_input_tokens,
decoder_target_tokens,
encoder_segment_ids=None,
decoder_segment_ids=None,
encoder_positions=None,
decoder_positions=None,
decode=False,
enable_dropout=False,
rngs=None,
mutable=False)
np.testing.assert_allclose(res, [-3.222973, -1.815315], rtol=1e-4)
@parameterized.parameters(
{'decode_fn': decoding.beam_search},
{'decode_fn': functools.partial(decoding.temperature_sample, topk=4)})
def test_predict_batch(self, decode_fn):
batch_size, encoder_len, max_decode_len, emb_dim = 2, 3, 4, 5
batch = {
'encoder_input_tokens':
np.zeros((batch_size, encoder_len), dtype=np.int32),
'decoder_input_tokens':
np.zeros((batch_size, max_decode_len), dtype=np.int32)
}
# These dummy logits represent the probability distribution where all the
# probability mass is in one item (i.e., degenerate distribution). For
# batch element 0, it is vocabulary index 2.
# We test `_predict_step` to avoid having to define a task and its
# vocabulary.
dummy_logits = jnp.expand_dims(
jnp.array([[-1e7, -1e7, 0, -1e7], [-1e7, -1e7, -1e7, 0]]), axis=1)
class MockModule:
def __init__(self):
self.dtype = jnp.float32
def apply(self, *args, method=None, **kwargs):
del args, kwargs
if method is None: # use for module.`__call__`
return (dummy_logits, {'cache': {}})
else:
return method()
def encode(self):
return jnp.zeros((batch_size, encoder_len, emb_dim))
def decode(self):
return (dummy_logits, {'cache': {}})
def mock_init(self):
self.module = MockModule()
self.module.scan_layers = False
self._input_vocabulary = mock.Mock(eos_id=1)
self._output_vocabulary = mock.Mock(eos_id=1)
self._decode_fn = decode_fn
with mock.patch.object(
models.EncoderDecoderModel, '__init__', new=mock_init):
model = models.EncoderDecoderModel()
actual = model.predict_batch({}, batch)
# The predicted token for the first batch element is always 2 and it is 3
# for the second batch element.
expected = [[2] * max_decode_len, [3] * max_decode_len]
np.testing.assert_array_equal(actual, expected)
@parameterized.named_parameters(
dict(
testcase_name='int32',
batch={
'encoder_input_tokens':
np.zeros((BATCH_SIZE, ENCODER_LEN), dtype=np.int32),
'decoder_input_tokens':
np.zeros((BATCH_SIZE, MAX_DECODE_LEN), dtype=np.int32)
}),
dict(
testcase_name='float32',
batch={
'encoder_input_tokens':
np.zeros((BATCH_SIZE, ENCODER_LEN), dtype=np.float32),
'decoder_input_tokens':
| np.zeros((BATCH_SIZE, MAX_DECODE_LEN), dtype=np.float32) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
def euclidean(vector1,vector2):
return np.sqrt(np.sum(np.power(vector1-vector2,2)))
def randomcent(k):
n = dataset.shape[1]
cent = np.mat(np.zeros((k, n)))
for j in range(n):
lolimit = min(dataset[:, j])
datarange = float(max(dataset[:, j]) -lolimit)
cent[:,j] = np.array(lolimit + datarange * np.random.rand(k, 1))
return cent
def k_means(dataset,k,cent=randomcent):
m=dataset.shape[0]
centreplot=cent(k)
evaluate=np.mat(np.zeros([m,2]))
clusterchange = True
while clusterchange:
clusterchange = False
for i in range(m):
mineuclid = np.inf
mincentidx=-1
for j in range(k):
eucliJ=euclidean(dataset[i,], centreplot[j, :])
if eucliJ<mineuclid:
mincentidx=j
mineuclid=eucliJ
if evaluate[i, 0] != mincentidx:
clusterchange = True
evaluate[i, :] = [mincentidx, mineuclid **2]
for elfa in range(k):
ptsInClust = dataset[np.nonzero(evaluate[:, 0].getA() == elfa)[0]]
centreplot[elfa, :] = np.mean(ptsInClust, axis=0)
return evaluate
def affPoint(pt1,pt2,alpha):
return np.exp(-alpha*(np.linalg.norm(pt1-pt2)**2))
def calc_affinity(Mat):
Msize = Mat.shape[0]
affMat = np.zeros([Msize,Msize])
for i in range(Msize):
for j in range(Msize):
affMat[i,j] = affPoint(Mat[i,:],Mat[j,:],0.5)
return affMat
def calcLapM(affMat,mode=1):
if mode== 0:
LaplMat = np.diag(np.sum(affMat,axis=1)) - affMat #row
elif mode ==1:
D = np.diag(np.sqrt(np.sum(affMat, axis=1)))
LaplMat = np.dot(np.dot(np.linalg.inv(D), affMat), D)
return LaplMat
def calc_eigen(Mat):
eigVal,eigeVec = np.linalg.eig(Mat)
return eigeVec
def project(Mat,vec):
Msize = Mat.shape
ProjMat = np.dot(Mat,vec)
return ProjMat
def NormFeature(FMat):
rows = FMat.shape[0]
for k in range(rows):
FMat[k,:] = FMat[k,:] / np.sqrt(np.sum(FMat[k:]**2))
return FMat
def spectrum_clustring(Mat,mode):
affMat = calc_affinity(Mat)
LaplaseMat = calcLapM(affMat,mode)
eigVec = calc_eigen(LaplaseMat)
#prjval = project(Mat,eigVec)
return eigVec[:,:10]
if __name__=='__main__':
sampleNo = 200
mu1 = np.array([[1, -1]])
sigma1 = np.array([[1, 0], [0, 1]])
R1 = np.linalg.cholesky(sigma1)
s1 = np.dot(np.random.randn(sampleNo, 2), R1) + mu1
mu2 = np.array([[5.5, -4.5]])
sigma2 = np.array([[1, 0], [0, 1]])
R2 = np.linalg.cholesky(sigma2)
s2 = np.dot(np.random.randn(sampleNo, 2), R2) + mu2
mu3 = np.array([[1, 4]])
sigma3 = np.array([[1, 0], [0, 1]])
R3 = np.linalg.cholesky(sigma3)
s3 = np.dot(np.random.randn(sampleNo, 2), R3) + mu3
mu4 = np.array([[6, 4.5]])
sigma4 = np.array([[1, 0], [0, 1]])
R4 = | np.linalg.cholesky(sigma4) | numpy.linalg.cholesky |
'''
Created: 31 August 2016
Last Updated: 9 March 2018
<NAME>
<EMAIL>
Texas A&M University
<NAME>
<EMAIL>
University of Michigan, Ann Arbor, MI 48109 (former student)
-----
Much of the code was inherited from a plotting packaged developed by Bennett.
As it has merged with CyMiniAna, other features have been added to improve modularity.
'''
import os
import sys
import ROOT
from math import log10
from copy import deepcopy
from collections import OrderedDict
os.environ['PATH'] = os.environ['PATH']+':/usr/texbin'+':/Library/TeX/texbin' # LaTeX support
from hepPlotter import HepPlotter
import hepPlotterTools as hpt
import hepPlotterLabels as hpl
import numpy as np
import matplotlib
mpl_version = matplotlib.__version__
from matplotlib import rc
from matplotlib import pyplot as plt
from matplotlib import gridspec
from matplotlib.colors import LogNorm
from matplotlib.ticker import AutoMinorLocator
fontProperties = {}
if mpl_version.startswith('1.5') or mpl_version.startswith('2'):
fontProperties = {}
else:
fontProperties = {'family':'sans-serif','sans-serif':['Helvetica']}
class HepPlotterDataMC(HepPlotter):
def __init__(self):
"""
@param typeOfPlot Set the kind of plot: histogram or efficiency
"""
HepPlotter.__init__(self,"histogram",1)
self.stackSignal = False
self.plotLUMI = True
self.blind = False
self.ymaxScale = 1.3
self.drawStatUncertainty = True # draw stat uncertainty separately
self.drawSystUncertainty = False # draw syst uncertainty separately
self.drawStatSystUncertainty = False # draw stat+syst uncertainty
self.drawTotalUncertainty = False # draw a total uncertainty (that you can pass into the plotting)
self.totaluncertainty = None # total uncertainty to plot (pass this in directly as an array)
self.statColor = '#66b266' # '#336633' <- use alpha=0.5 with this
self.systColor = '#99cc99' # '#4d994d' <- use alpha=0.5 with this
self.statSystColor = '#cce5cc' # '#99cc99' <- use alpha=0.5 with this (=#c1e1c0)
self.legendLoc = 1
return
def initialize(self):
"""Initialize some things."""
HepPlotter.initialize(self)
self.sampleTypes = {'background':[],
'signal':[],
'data':[],
'systematic':[]} # systematic is for plotting single systematic uncertainties
self.labels = hpl.variable_labels()
self.sample_labels = hpl.sample_labels()
self.systematics = OrderedDict()
self.uncertainty_handles = []
self.uncertainty_labels = []
return
def Add(self,data,name='',weights=None,sampleType='background',file=None,systematics=None):
"""
Add histogram data for this figure.
@param data histogram or list/array of values to plot
@param name name of sample ('ttbar','wjets',etc.)
@param weights weights (if any) for histogram
@param sampleType Kind of sample ('background','signal', or 'data')
@param file The root file of histograms to get systematic uncertainties
@param systematics The total systematic uncertainty for a given sample
(histogram or array of bin content from histogram)
"""
self.names.append(name)
if isinstance(data,ROOT.TH1):
self.hists2plot[name] = hpt.hist2list(data,name=name,reBin=self.rebin)
else:
self.hists2plot[name] = hpt.data2list(data)
self.weights[name] = weights
self.sampleTypes[sampleType].append( name )
self.systematics[name] = systematics
return
def execute(self):
"""
Execute the plot. Pass arguments concerning the data in the following way:
return the Figure object to the user (they can edit it if they please)
"""
totpred = 0.0
fig = plt.figure()
gs = matplotlib.gridspec.GridSpec(2,1,height_ratios=[3,1],hspace=0.0)
self.ax1 = fig.add_subplot(gs[0])
self.ax2 = fig.add_subplot(gs[1],sharex=self.ax1)
plt.setp(self.ax1.get_xticklabels(),visible=False)
## -- Loop over samples for data plot [should only be one entry, but loop for protection]
if not self.blind:
totDataError=None
for name in self.sampleTypes['data']:
data = self.hists2plot[name]['data']
data = np.asarray([i if i else float('NaN') for i in data])
error = self.hists2plot[name]['error']
binning = self.hists2plot[name]['bins']
bin_center = self.hists2plot[name]['center']
bin_width = self.hists2plot[name]['width']
if totDataError is None:
totDataError = np.square(error)
else:
totDataError += np.square(error)
p,c,b = self.ax1.errorbar(bin_center,data,yerr=error,capsize=0,
fmt="o",mec="k",mfc="k",color="k",
label=self.sample_labels[name].label,zorder=100)
self.histograms[name] = data
self.uncertainties[name] = | np.sqrt(totDataError) | numpy.sqrt |
import h5py as h5
import numpy as np
from tqdm.autonotebook import tqdm #
import torch
from copy import deepcopy
import cv2
import imagesize
from functools import partial
from os import path
from data.data_conversions_3d import (
kinematic_tree,
rotmat2expmap,
euler_to_rotation_matrix,
normalization_stats,
revert_output_format,
apply_affine_transform,
camera_projection
)
from lib.utils import JointModel, t2p, t3p, t4p, t5p, make_joint_img
from data.base_dataset import BaseDataset
__data_split__ = {
"train": ["S1", "S5", "S6", "S7", "S8"],
"test": ["S9", "S11"],
}
__actionID_to_action__ = {
2: "Directions",
3: "Discussion",
4: "Eating",
5: "Greeting",
6: "Phoning",
7: "Posing",
8: "Purchases",
9: "Sitting",
10: "SittingDown",
11: "Smoking",
12: "TakingPhoto",
13: "Waiting",
14: "Walking",
15: "WalkingDog",
16: "WalkTogether",
}
class Human36mDataset(BaseDataset):
def __init__(
self, transforms, data_keys, seq_length, mode="train", **kwargs
):
assert mode in ["train", "test"]
self.small_joint_model = (
kwargs["small_joint_model"]
if "small_joint_model" in kwargs.keys()
else False
)
self.action_split_type = kwargs["action_split_type"] if "action_split_type" in kwargs else "default"
self.valid_keypoint_types = [
"angle_euler",
"norm_keypoints",
"keypoints_3d",
"keypoints_3d_univ",
"angle_expmap",
"angle_world_euler",
"angle_world_expmap",
"keypoints_3d_world",
]
if "keypoint_type" in kwargs:
assert kwargs["keypoint_type"] in self.valid_keypoint_types
self.keypoint_key = kwargs["keypoint_type"]
else:
self.keypoint_key = None
if self.small_joint_model:
joint_model = JointModel(
body=[25, 17, 6, 1],
right_lines=[(3, 2), (2, 1), (1, 25), (25, 26), (26, 30)],
left_lines=[(8, 7), (7, 6), (6, 17), (17, 18), (18, 22)],
head_lines=[],
face=[],
rshoulder=25,
lshoulder=17,
headup=15,
kps_to_use=[1, 2, 3, 6, 7, 8, 15, 17, 18, 22, 25, 26, 30],
total_relative_joints=[
[0, 1],
[1, 2],
[3, 4],
[4, 5],
[0, 3],
[3, 7],
[0, 10],
[7, 10],
[7, 8],
[8, 9],
[10, 11],
[11, 12],
],
right_hand=[],
left_hand=[],
head_part=[],
kp_to_joint=[
"r_hip",
"r_knee",
"r_foot",
"l_hip",
"l_knee",
"l_foot",
"head",
"l_shoulder",
"l_elbow",
"l_hand",
"r_shoulder",
"r_elbow",
"r_hand",
],
kps_to_change=[1, 2, 3, 6, 7, 8, 15, 17, 18, 22, 25, 26, 30],
kps_to_change_rel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
norm_T=[
t3p,
t4p,
partial(t2p, ids=[25, 26]),
partial(t2p, ids=[26, 30]),
partial(t2p, ids=[17, 18]),
partial(t2p, ids=[18, 22]),
partial(t2p, ids=[1, 2]),
partial(t2p, ids=[2, 3]),
partial(t2p, ids=[6, 7]),
partial(t2p, ids=[7, 8]),
],
)
else:
# detailed joint model
joint_model = JointModel(
body=[1, 25, 13, 17, 6]
if self.keypoint_key != "keypoints_3d_world"
else [0, 14, 8, 11, 3],
right_lines=[(3, 2), (2, 1), (1, 25), (25, 26), (26, 27)]
if self.keypoint_key != "keypoints_3d_world"
else [(0, 1), (1, 2), (0, 14), (14, 15), (15, 16)],
left_lines=[(8, 7), (7, 6), (6, 17), (17, 18), (18, 19)]
if self.keypoint_key != "keypoints_3d_world"
else [(3, 4), (4, 5), (3, 11), (11, 12), (12, 13)],
head_lines=[(13, 14), (14, 15)]
if self.keypoint_key != "keypoints_3d_world"
else [(8, 9), (9, 10)],
face=[],
rshoulder=25,
lshoulder=17,
headup=15,
kps_to_use=[
1, # 0
2,
3, # 2
6,
7, # 4
8,
11, # 6
12,
13, # 8
14,
15, # 10
17,
18, # 12
19,
25, # 14
26,
27, # 16
],
total_relative_joints=[
[0, 1],
[1, 2],
[3, 4],
[4, 5],
[3, 6],
[0, 6],
[6, 7],
[7, 8],
[8, 9],
[9, 10],
[8, 11],
[8, 14],
[11, 12],
[12, 13],
[14, 15],
[15, 16],
],
right_hand=[],
left_hand=[],
head_part=[],
kp_to_joint=[
"r_hip",
"r_knee",
"r_foot",
"l_hip",
"l_knee",
"l_foot",
"pelvis",
"thorax",
"neck",
"nose",
"head",
"l_shoulder",
"l_elbow",
"l_wirst",
"r_shoulder",
"r_elbow",
"r_wrist",
],
kps_to_change=[],
kps_to_change_rel=[],
norm_T=[
t3p, # head
t5p, # body
partial(t2p, ids=[25, 26]), # right upper arm
partial(t2p, ids=[26, 30]), # right lower arm
partial(t2p, ids=[17, 18]), # left upper arm
partial(t2p, ids=[18, 22]), # left lower arm
partial(t2p, ids=[1, 2]), # right upper leg
partial(t2p, ids=[2, 3]), # right lower leg
partial(t2p, ids=[6, 7]), # left upper leg
partial(t2p, ids=[7, 8]), # left lower leg
],
)
self.debug = "debug" in kwargs.keys() and kwargs["debug"]
super().__init__(
transforms, mode, seq_length, data_keys, joint_model, **kwargs
)
self._output_dict.update(
{
"intrinsics": self._get_intrinsic_params,
"intrinsics_paired": partial(
self._get_intrinsic_params, use_map_ids=True
),
"extrinsics": self._get_extrinsic_params,
"extrinsics_paired": partial(
self._get_extrinsic_params, use_map_ids=True
),
}
)
# self.datapath = (
# "/export/scratch/compvis/datasets/human3.6M/processed/all/"
# )
# sequence matching only enabled when using 2d keypoints
if not (
self.keypoint_key == "norm_keypoints" or self.keypoint_key is None
):
self.prepare_seq_matching = False
self.use_matched_map_ids = True
self.train_synthesis = (
kwargs["train_synthesis"] if "train_synthesis" in kwargs else False
)
self.use_3d_for_stickman = (
kwargs["use_3d_for_stickman"]
if "use_3d_for_stickman" in kwargs
else False
)
if self.use_3d_for_stickman:
self._output_dict["stickman"] = self._get_stickman_from_3d
assert self.keypoint_key in [
"angle_world_expmap",
"keypoints_3d_world",
]
if self.keypoint_key == "keypoints_3d_world":
assert not self.small_joint_model
assert self.train_synthesis
self.label_type = "action"
self.single_app = "target_apperance" in kwargs
self.target_app = (
kwargs["target_appearance"] if self.single_app else None
)
# adjust action categories used for training
if "all_actions" in kwargs and kwargs["all_actions"]:
self.actions_to_use = list(__actionID_to_action__.values())
else:
self.actions_to_use = (
kwargs["actions_to_use"]
if "actions_to_use" in kwargs
and len(kwargs["actions_to_use"]) > 1
else [
"Greeting",
"Purchases",
"SittingDown",
"TakingPhoto",
"Walking",
"WalkingDog",
"WalkTogether",
]
)
self.actions_to_discard = (
kwargs["actions_to_discard"]
if "actions_to_discard" in kwargs
else None
)
self.prepare_seq_matching = (
kwargs["prepare_seq_matching"]
if "prepare_seq_matching" in kwargs
else False
)
# if self.motion_based_sampling:
# assert not self.prepare_seq_matching
# load data (default = h36m_full)
dataset_version = "dataset_version" in kwargs
dataset_version = (
kwargs["dataset_version"] if dataset_version else "h36m_full"
)
self._load_data(self.datapath, dataset_version)
# set appearance mapping ids
if mode == "train":
self.datadict["map_ids"] = np.zeros_like(
self.datadict["p_ids"], dtype=np.int
)
self.complete_datadict["map_ids"] = np.zeros_like(
self.complete_datadict["p_ids"], dtype=np.int
)
elif mode == "test":
ids = np.linspace(
0,
len(self.datadict["p_ids"]),
len(self.datadict["p_ids"]),
endpoint=False,
dtype=np.int64,
)
# if not self.single_app:
np.random.shuffle(ids)
self.datadict["map_ids"] = ids
ids_c = np.linspace(
0,
len(self.complete_datadict["p_ids"]),
len(self.complete_datadict["p_ids"]),
endpoint=False,
dtype=np.int64,
)
# if not self.single_app:
np.random.shuffle(ids_c)
self.complete_datadict["map_ids"] = ids_c
# self.transforms = transforms
# set unique appearance image for inference
if self.single_app:
# this contains the name of the image path of the target image
target_app_path = self._get_target_app_image()
target_id = np.where(self.datadict["img_paths"] == target_app_path)[
0
]
self.datadict["map_ids"] = np.full_like(
self.datadict["map_ids"], target_id
)
if self.use_matched_map_ids: # or self.motion_based_sampling:
self.matched_map_ids = np.zeros_like(self.datadict["map_ids"])
unique_aids = np.unique(self.datadict["action"])
for id in unique_aids:
valid_ids = np.nonzero(np.equal(self.datadict["action"], id))[0]
map_ids = deepcopy(valid_ids)
np.random.shuffle(map_ids)
self.matched_map_ids[valid_ids] = map_ids
# get sequence lengths per video
self._get_sequence_end_ids()
self._get_sequence_start_ids()
self._check_seq_len_and_frame_lag()
self.action_id_to_action = __actionID_to_action__
# if self.motion_based_sampling:
# # compute motion scores which are required for motion based sampling
# self._compute_motion_scores()
self.resample_map_ids()
# compute image_shapes
uniqe_vids, first_vid_occs = np.unique(
self.datadict["v_ids"], return_index=True
)
self.image_shapes = {
vid: imagesize.get(self.datadict["img_paths"][fo])
for (vid, fo) in zip(uniqe_vids, first_vid_occs)
}
print(
f"Constructed Human3.6m Dataset in {self.mode}-mode, which consists of {self.__len__()} samples."
)
def get_test_app_images(self) -> dict:
return {
"S1": path.join(
self.datapath,
"S1/Walking-2/imageSequence/54138969/img_000189.jpg",
),
"S5": path.join(
self.datapath,
"S5/Walking-1/imageSequence/55011271/img_000048.jpg",
),
"S6": path.join(
self.datapath,
"S6/Walking-2/imageSequence/55011271/img_000206.jpg",
),
"S7": path.join(
self.datapath,
"S7/Walking-2/imageSequence/58860488/img_000277.jpg",
),
"S8": path.join(
self.datapath,
"S8/Walking-1/imageSequence/60457274/img_000001.jpg",
),
"S9": path.join(
self.datapath,
"S9/Walking-1/imageSequence/58860488/img_000321.jpg",
),
"S11": path.join(
self.datapath,
"S11/Walking-2/imageSequence/58860488/img_000193.jpg",
),
}
def __len__(self):
return self.datadict["img_paths"].shape[0]
def _get_target_app_image(self):
if not self.target_app in self.get_test_app_images().keys():
raise TypeError(
f"The target appearance has to be a string object in {list(self.get_test_app_images().keys())}."
)
return self.get_test_app_images()[self.target_app]
### dataset loading ###
def _load_data(self, basepath, version):
if version == "h36m_small20":
self._load_h36m_small20(basepath)
elif version == "h36m_full":
self._load_h36m_full(basepath)
else:
raise Exception(f"Dataset version not valid.")
# human3.6m full dataset
def _load_h36m_full(self, basepath):
# load and convert meta data
# if self.normalize_keypoints:
attribute_mapping = {
"frame_path": "img_paths",
"pose_2d": "keypoints",
"subject": "p_ids",
"frame": "f_ids", # frame id, 0,....,len(seq)
"action": "action",
"subaction": "subaction",
"pose_normalized_2d": "norm_keypoints",
"camera": "camera_id",
# "angle_3d": "angle_euler",
"image_size": "image_size",
# "intrinsics": "intrinsics",
"intrinsics_univ": "intrinsics_univ",
"pose_3d": "keypoints_3d",
# "pose_3d_univ": "keypoints_3d_univ",
# "angle_3d_expmap": "angle_expmap",
# "angle_3d_world": "angle_world_euler",
"pose_3d_world": "keypoints_3d_world",
# "angle_expmap_world": "angle_world_expmap",
# "extrinsics": "extrinsics",
"extrinsics_univ": "extrinsics_univ",
}
h5_file = path.join(basepath, "annot_export.h5")
# else:
# attribute_mapping = {
# "frame_path": "img_paths",
# "keypoints": "keypoints",
# "subject": "p_ids",
# "fid": "f_ids", # frame id, 0,....,len(seq)
# # 'frame': 'frame', # original frame name, e.g. 0001.png
# "action": "action",
# "subaction": "subaction",
# }
# h5_file = path.join(basepath, "annot.h5")
with h5.File(h5_file, "r") as f:
for k in tqdm(f.keys(), desc="Constructing Human3.6m dataset..."):
if k not in self.valid_keypoint_types or k == self.keypoint_key:
# if self.debug:
# self.datadict[attribute_mapping[k]] = np.asarray(f[k])[:100000]
# else:
# self.datadict[attribute_mapping[k]] = np.asarray(f[k])
self.datadict[k] = np.asarray(f[k])
# load kinematic tree
if self.debug:
# load small dataset
n_samples_per_person = len(__actionID_to_action__) * 100
unique_pids, pids_first = np.unique(
self.datadict["p_ids"], return_index=True
)
unique_aids = np.unique(self.datadict["action"])
ids = np.zeros(
n_samples_per_person * unique_pids.shape[0], dtype=np.int
)
count = 0
for pid in tqdm(
unique_pids,
desc=f"Debug-mode: Generating small data set which contains {ids.shape[0]} samples",
):
for aid in unique_aids:
ids[count : count + 100] = np.nonzero(
np.logical_and(
self.datadict["action"] == aid,
self.datadict["p_ids"] == pid,
)
)[0][:100]
count += 100
self.datadict = {
key: self.datadict[key][ids]
for key in self.datadict
if self.datadict[key].size > 0
}
self.kinematic_tree = kinematic_tree()
# get unique person ids
self.person_ids = list(np.unique(self.datadict["p_ids"]))
# add base path to img_paths
base_path_tmp = "/" + path.join(
*self.datapath.split("/")[
0 : np.where(np.array(self.datapath.split("/")) == "processed")[
0
][0]
]
)
self.datadict["img_paths"] = [
path.join(base_path_tmp, p.decode("utf-8"))
for p in self.datadict["img_paths"]
]
self.datadict["img_paths"] = np.asarray(self.datadict["img_paths"])
# for k in self.datadict:
# self.datadict[k] = np.asarray(self.datadict[k])
self.datadict["f_ids"] = self.datadict["f_ids"] - 1
self.complete_datadict = deepcopy(self.datadict)
# reduce dataset size if world coordinates (angles or poses) are used
if "world" in self.keypoint_key and not self.train_synthesis:
target_cam_id = np.unique(self.datadict["camera_id"])[0]
t_sample_ids = self.datadict["camera_id"] == target_cam_id
for key in self.datadict:
if self.datadict[key].size > 0:
self.datadict[key] = self.datadict[key][t_sample_ids]
pre_vids = (
1000000 * self.datadict["camera_id"]
+ 10000 * self.datadict["action"]
+ 1000 * self.datadict["subaction"]
+ self.datadict["p_ids"]
)
vid_mapping = {u: i for i, u in enumerate(np.unique(pre_vids))}
self.datadict["v_ids"] = np.full_like(pre_vids, -1)
for key in vid_mapping:
self.datadict["v_ids"][pre_vids == key] = vid_mapping[key]
pre_vids_c = (
1000000 * self.complete_datadict["camera_id"]
+ 10000 * self.complete_datadict["action"]
+ 1000 * self.complete_datadict["subaction"]
+ self.complete_datadict["p_ids"]
)
vid_mapping = {u: i for i, u in enumerate(np.unique(pre_vids_c))}
self.complete_datadict["v_ids"] = np.full_like(pre_vids_c, -1)
for key in vid_mapping:
self.complete_datadict["v_ids"][pre_vids_c == key] = vid_mapping[
key
]
assert not np.any(self.datadict["v_ids"] == -1)
if (
"angle" in self.keypoint_key
or self.keypoint_key == "keypoints_3d_world"
):
keypoints_shape = self.datadict[self.keypoint_key].shape
if self.keypoint_key == "keypoints_3d_world":
self.datadict[self.keypoint_key] = (
self.datadict[self.keypoint_key] / 1000.0
) # m to comply with test setting
self.datadict["extrinsics_univ"][:, :, -1] = (
self.datadict["extrinsics_univ"][:, :, -1] / 1000
) # translation mm to m
self.complete_datadict[self.keypoint_key] = (
self.complete_datadict[self.keypoint_key] / 1000.0
) # m to comply with test setting
self.complete_datadict["extrinsics_univ"][:, :, -1] = (
self.complete_datadict["extrinsics_univ"][:, :, -1] / 1000
)
self.datadict[self.keypoint_key] = self.datadict[
self.keypoint_key
][:, self.joint_model.kps_to_use]
self.complete_datadict[
self.keypoint_key
] = self.complete_datadict[self.keypoint_key][
:, self.joint_model.kps_to_use
]
self.datadict[self.keypoint_key] = self.datadict[
self.keypoint_key
].reshape(keypoints_shape[0], -1)
self.complete_datadict[
self.keypoint_key
] = self.complete_datadict[self.keypoint_key].reshape(
self.complete_datadict[self.keypoint_key].shape[0], -1
)
self.data_mean, self.data_std, self.dim_to_ignore, self.dim_to_use = normalization_stats(
self.datadict[self.keypoint_key]
)
# normalize keypoints
self.datadict[self.keypoint_key] = self.__normalize_poses(
self.datadict[self.keypoint_key]
)
self.complete_datadict[self.keypoint_key] = self.__normalize_poses(
self.complete_datadict[self.keypoint_key]
)
self.maxs_normalized = np.expand_dims(
np.expand_dims(
np.expand_dims(
np.asarray(
[
np.amax(
self.datadict[self.keypoint_key][:, ::3]
),
np.amax(
self.datadict[self.keypoint_key][:, 1::3]
),
np.amax(
self.datadict[self.keypoint_key][:, 2::3]
),
]
),
axis=0,
),
axis=0,
),
axis=0,
)
self.mins_normalized = np.expand_dims(
np.expand_dims(
np.expand_dims(
np.asarray(
[
np.amin(
self.datadict[self.keypoint_key][:, ::3]
),
np.amin(
self.datadict[self.keypoint_key][:, 1::3]
),
np.amin(
self.datadict[self.keypoint_key][:, 2::3]
),
]
),
axis=0,
),
axis=0,
),
axis=0,
)
# get data split
if self.overall_split:
print("Using overall datasplit....")
self._make_overall_split()
else:
split_indices = set(self._get_split_full()[self.mode])
for k, v in tqdm(
self.datadict.items(),
desc="Selecting desired subset of overall data...",
):
self.datadict[k] = np.asarray(
[p for i, p in enumerate(v) if i in split_indices]
)
# select or discard individual action categories
if (
self.actions_to_discard is not None
or self.actions_to_use is not None
):
if (
self.actions_to_discard is not None
and self.actions_to_use is not None
):
raise ValueError(
"Please only consider actions_to_use OR actions_to_discard"
)
if self.actions_to_discard is not None: # discard actions ...
indices_to_use = [
i
for i, e in enumerate(self.datadict["action"])
if __actionID_to_action__[e] not in self.actions_to_discard
]
elif self.actions_to_use is not None: # select actions ...
indices_to_use = [
i
for i, e in enumerate(self.datadict["action"])
if __actionID_to_action__[e] in self.actions_to_use
]
# filter
indices_to_use_set = set(indices_to_use)
for k, v in tqdm(
self.datadict.items(), desc="Filtering actions..."
):
self.datadict[k] = np.asarray(
[p for i, p in enumerate(v) if i in indices_to_use_set]
)
print(
f'Actions to be used: {[__actionID_to_action__[i] for i in np.unique(self.datadict["action"]).tolist()]}'
)
if self.prepare_seq_matching:
self.pose_encodings = self.pose_encodings[
np.asarray(indices_to_use)
]
# get sequences per action if required
if self.prepare_seq_matching:
seqs_per_action = dict()
v_id_curr = self.datadict["v_ids"][0]
curr_seq = []
for k, v_id in tqdm(
enumerate(self.datadict["v_ids"]),
desc="Get sequences per action...",
):
if v_id == v_id_curr:
curr_seq.append(k)
action_id = self.datadict["action"][k]
else:
if action_id not in seqs_per_action.keys():
seqs_per_action[action_id] = []
seqs_per_action[action_id].append(curr_seq)
curr_seq = []
v_id_curr = v_id
self.seqs_per_action = seqs_per_action
def _get_split_full(self):
if self.use_person_split:
_data_split_int_ = {"train": [1, 5, 6, 7, 8], "test": [9, 11]}
target_data = self.datadict["p_ids"]
else:
if self.action_split_type == "generalize_sitting":
_data_split_int_ = {
"train": [2, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16],
"test": [9, 8, 10],
}
elif self.action_split_type == "generalize_walking":
_data_split_int_ = {
"train": [2, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16],
"test": [14, 15, 16],
}
else:
_data_split_int_ = {
"train": [2, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16],
"test": [8, 12, 13, 14],
}
target_data = self.datadict["action"]
split_indices_train = [
i
for i, e in enumerate(target_data)
if e in _data_split_int_["train"]
]
split_indices_test = [
i
for i, e in enumerate(target_data)
if e in _data_split_int_["test"]
]
return {"train": split_indices_train, "test": split_indices_test}
def __normalize_poses(self, poses):
poses_out = np.divide((poses - self.data_mean), self.data_std)
poses_out = poses_out[:, self.dim_to_use]
return poses_out
def _get_stickman_from_3d(self, ids):
stickmans = []
for i in ids:
ang = self.datadict[self.keypoint_key][i]
intr = self.datadict["intrinsics_univ"][i]
extr = self.datadict["extrinsics_univ"][i]
imsize = self.datadict["image_size"][i]
ang = revert_output_format(
np.expand_dims(ang, axis=0),
self.data_mean,
self.data_std,
self.dim_to_ignore,
)
if self.keypoint_key != "keypoints_3d_world":
kps3d_w = convert_to_3d(ang, self.kinematic_tree, swap_yz=False)
else:
kps3d_w = ang.reshape(
ang.shape[0], len(self.joint_model.kps_to_use), 3
)
kps3d_c = apply_affine_transform(kps3d_w.squeeze(axis=0), extr)
kps2d = camera_projection(kps3d_c, intr)
scale_x = float(self.spatial_size) / imsize[0]
scale_y = float(self.spatial_size) / imsize[1]
kps_rescaled = np.multiply(
kps2d[:, :2], np.asarray([scale_x, scale_y], dtype=np.float)
)
stickman = make_joint_img(
[self.spatial_size, self.spatial_size],
kps_rescaled,
self.joint_model,
line_colors=self.line_colors,
scale_factor=self.stickman_scale,
)
stickmans.append(self.stickman_transforms(stickman))
return torch.stack(stickmans, dim=0).squeeze()
def _get_keypoints(self, ids, use_map_ids=False):
kpts = []
if use_map_ids:
ids = self._sample_valid_seq_ids(
[self.datadict["map_ids"][ids[0]], len(ids) - 1]
)
if self.keypoint_key is None:
key = "norm_keypoints"
else:
key = self.keypoint_key
for id in ids:
if self.keypoint_key == "keypoints_3d_world":
kps = self.datadict[key][id]
else:
kps = self.datadict[key][id]
# if key == "angles_3d":
# # convert to expmap format
# kps = self.__euler2expmap(kps)
if self.train_reg:
# keypoints need to be converted to normalized image coordinates
kps3d_w = revert_output_format(
np.expand_dims(kps, axis=0),
self.data_mean,
self.data_std,
self.dim_to_ignore,
)
kps3d_w = kps3d_w.reshape(kps3d_w.shape[0], len(self.joint_model.kps_to_use), 3)
extr = self.datadict["extrinsics_univ"][id]
intr = self.datadict["intrinsics_univ"][id]
imsize = self.datadict["image_size"][id]
# to camera
kps3d_c = apply_affine_transform(kps3d_w.squeeze(axis=0), extr)
# to image
kps2d = camera_projection(kps3d_c, intr)
# normalize
kps = np.divide(kps2d[:, :2], imsize)
kpts.append(kps)
kpts = np.stack(kpts, axis=0).squeeze()
# if self.keypoint_key == "keypoints_3d_world":
# kpts = kpts.reshape(kpts.shape[0],-1)
return kpts
def _get_intrinsic_params(self, ids, use_map_ids=False):
if use_map_ids:
ids = self._sample_valid_seq_ids(
[self.datadict["map_ids"][ids[0]], len(ids) - 1]
)
cam_params = self.datadict["intrinsics_univ"][ids].squeeze()
return cam_params
def _get_extrinsic_params(self, ids, use_map_ids=False):
if use_map_ids:
ids = self._sample_valid_seq_ids(
[self.datadict["map_ids"][ids[0]], len(ids) - 1]
)
extrs = self.datadict["extrinsics_univ"][ids]
return extrs
def _euler2expmap(angles, kin_tree):
"""
Transforms a angle array from euler to expmap representation
:param angles: shape (n_samples,78)
:param kin_tree:
:return:
"""
black_list = {"sample": [], "joint_id": [], "input_angle": []}
posInd = kin_tree["posInd"]["ids"]
rotInd = kin_tree["rotInd"]
expmap_ind = kin_tree["expmapInd"]
# order = self.kinematic_tree["order"]
expmaps = np.zeros((angles.shape[0], 99), dtype=np.float)
for n, ang in enumerate(tqdm(angles)):
expmaps[n, posInd] = ang[posInd]
for i, pack in enumerate(zip(rotInd, expmap_ind)):
ri, ei = pack
# if ri = []
if not ri:
ea = np.asarray([0.0, 0.0, 0.0])
else:
ea = ang[ri]
R = euler_to_rotation_matrix(ea, deg=True)
try:
expmaps[n, ei] = rotmat2expmap(R)
except ValueError as e:
print("Error : Quaternion not unit quaternion!")
black_list["sample"].append(n)
black_list["joint_id"].append(i)
black_list["input_angle"].append(ea)
return expmaps # black_list
def eval_black_list(dataset, save_path):
from data.data_conversions_3d import fkl, camera_projection
import pandas as pd
from lib.utils import add_joints_to_img
parent = dataset.kinematic_tree["parent"]
offset = dataset.kinematic_tree["offset"]
rotInd = dataset.kinematic_tree["rotInd"]
expmapInd = dataset.kinematic_tree["expmapInd"]
posInd = dataset.kinematic_tree["posInd"]["ids"]
eulers = dataset.datadict["angle_euler"]
df = pd.read_csv("black_list_expmap.csv")
corrupted_sample_ids = np.asarray(df["sample"])
corrupted_poses = eulers[corrupted_sample_ids]
healthy_ones = eulers[corrupted_sample_ids + 1]
img_paths = dataset.datadict["img_paths"][corrupted_sample_ids]
img_paths_valid = dataset.datadict["img_paths"][corrupted_sample_ids + 1]
camera_parameters = dataset.datadict["intrinsics_univ"]
for i, tup in enumerate(
tqdm(
zip(
img_paths,
corrupted_poses,
camera_parameters,
healthy_ones,
img_paths_valid,
)
)
):
# cam params are the same for valid and invalid samples
img_p, pose, cam_params, pose_valid, img_valid_p = tup
as_exp = _euler2expmap(
np.expand_dims(pose, axis=0), dataset.kinematic_tree
)
as_exp_valid = _euler2expmap(
np.expand_dims(pose_valid, axis=0), dataset.kinematic_tree
)
as_kps = fkl(
np.squeeze(as_exp, axis=0),
parent,
offset,
rotInd,
expmapInd,
posInd,
)
as_kps = as_kps.reshape((32, 3))
as_kps_valid = fkl(
np.squeeze(as_exp_valid, axis=0),
parent,
offset,
rotInd,
expmapInd,
posInd,
)
as_kps_valid = as_kps_valid.reshape((32, 3))
projected = camera_projection(as_kps, cam_params)
projected_valid = camera_projection(as_kps_valid, cam_params)
img = cv2.imread(img_p)
img_valid = cv2.imread(img_valid_p)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = add_joints_to_img(
img,
projected[dataset.joint_model.kps_to_use],
dataset.joint_model.total_relative_joints,
)
img_valid = add_joints_to_img(
img_valid,
projected_valid[dataset.joint_model.kps_to_use],
dataset.joint_model.total_relative_joints,
color_kps=[[0, 255, 0]],
color_joints=[[0, 255, 0]],
)
cv2.imwrite(
path.join(save_path, f"corrupted_sample_nr{i + 1}.jpg"), img
)
cv2.imwrite(
path.join(save_path, f"valid_sample_nr{i + 1}.jpg"), img_valid
)
def get_intrinsic_mat(params):
"""
Linear projection matrix withut distortion parameters
:param params:
:return:
"""
return np.asarray(
[
[params[0], 0.0, params[1]],
[0.0, params[2], params[3]],
[0.0, 0.0, 1.0],
]
)
def estimate_extrinsics(dataset):
"""
Estimate Extrinsic parameters from world to cam point correspondences
:param dataset:
:return:
"""
# extrinsics are matrices M of shape (3,4) for every datapoint --> M = [R,t] where R=rotation matrix and t = translation vector
camera_extrinsics_univ = np.zeros(
(dataset.datadict["keypoints_3d_univ"].shape[0], 3, 4), dtype=np.float
)
camera_extrinsics = np.zeros(
(dataset.datadict["keypoints_3d"].shape[0], 3, 4), dtype=np.float
)
for i, vid in enumerate(
tqdm(
np.unique(dataset.datadict["v_ids"]),
desc="Estimate extrinsics per video",
)
):
ids = dataset.datadict["v_ids"] == vid
kps3d_c = dataset.datadict["keypoints_3d"][ids]
kps3d_c_univ = dataset.datadict["keypoints_3d_univ"][ids]
kps3d_w = dataset.datadict["keypoints_3d_world"][ids]
kps3d_c = np.reshape(kps3d_c, (-1, 3))
kps3d_c_univ = np.reshape(kps3d_c_univ, (-1, 3))
kps3d_w = np.reshape(kps3d_w, (-1, 3))
_, M, _ = cv2.estimateAffine3D(
kps3d_w, kps3d_c, ransacThreshold=10, confidence=0.999
)
_, M_univ, _ = cv2.estimateAffine3D(
kps3d_w, kps3d_c_univ, ransacThreshold=10, confidence=0.999
)
# returned values correspond to [R,t]^T
camera_extrinsics[ids] = M
camera_extrinsics_univ[ids] = M_univ
return camera_extrinsics_univ, camera_extrinsics
if __name__ == "__main__":
import torchvision.transforms as tt
from os import path, makedirs
from PIL import Image
from torch.utils.data import DataLoader, WeightedRandomSampler
from lib.logging import create_video_3d
from skvideo import io as vio
from data.data_conversions_3d import (
fkl,
camera_projection,
apply_affine_transform,
revert_output_format,
convert_to_3d,
)
from lib.utils import parallel_data_prefetch, add_joints_to_img
import yaml
save_path = "./test_data/human36m_full"
makedirs(save_path, exist_ok=True)
with open("../config/test_datasets.yaml", "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
if config["general"]["mode"] == "visualize_projection":
transforms = tt.Compose([tt.ToTensor()])
print("preparing dataset...")
dataset = Human36mDataset(
transforms,
data_keys=["keypoints"], # "kp_change", "keypoints"
mode="test",
**config["data"]
)
# complete data as euler angles
angles_euler = dataset.datadict["angle_euler"]
parent = dataset.kinematic_tree["parent"]
offset = dataset.kinematic_tree["offset"]
rotInd = dataset.kinematic_tree["rotInd"]
expmapInd = dataset.kinematic_tree["expmapInd"]
posInd = dataset.kinematic_tree["posInd"]["ids"]
# visualize as a test
time_points = np.random.choice(
np.arange(0, len(dataset) - 50), 5, replace=False
)
for nr, i in enumerate(tqdm(time_points, leave=False)):
#
frame_ids = | np.arange(i, i + 50) | numpy.arange |
# Parallel test script for initializing problem with preexisting array
# Standard modules
import sys
# Other modules
import logging
import numpy as np
import h5py
# Athena modules
import scripts.utils.athena as athena
sys.path.insert(0, '../../vis/python')
import athena_read # noqa
athena_read.check_nan_flag = True
logger = logging.getLogger('athena' + __name__[7:]) # set logger name based on module
# Parameters
filename_input = 'initial_data.hdf5'
filename_output = 'from_array.cons.00000.athdf'
dataset_cons = 'cons'
dataset_b1 = 'b1'
dataset_b2 = 'b2'
dataset_b3 = 'b3'
nb1 = 4
nx1 = 4
nx2 = 6
nx3 = 4
gamma = 5.0/3.0
num_ranks = 3
# Prepare Athena++
def prepare(**kwargs):
logger.debug('Running test ' + __name__)
# Configure and compile code
athena.configure('b',
'mpi',
'hdf5', 'h5double',
prob='from_array',
**kwargs)
athena.make()
# Calculate initial field values
b1 = np.empty((nx3, nx2, nb1 * nx1 + 1))
b1[...] = np.arange(nx2)[None, :, None] - np.arange(nx3)[:, None, None]
b1_input = | np.empty((nb1, nx3, nx2, nx1 + 1)) | numpy.empty |
"""
Keras implementation of CapsNet in Hinton's paper Dynamic Routing Between Capsules.
The current version maybe only works for TensorFlow backend. Actually it will be straightforward to re-write to TF code.
Adopting to other backends should be easy, but I have not tested this.
Usage:
python capsulenet.py
python capsulenet.py --epochs 50
python capsulenet.py --epochs 50 --routings 3
... ...
Result:
Validation accuracy > 99.5% after 20 epochs. Converge to 99.66% after 50 epochs.
About 110 seconds per epoch on a single Nvidia GTX 1070 GPU card
"""
import keras.backend as K
def cosine_distance(vests):
x, y = vests
x = K.l2_normalize(x, axis=-1)
y = K.l2_normalize(y, axis=-1)
return -K.mean(x * y, axis=-1, keepdims=True)
def cos_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0],1)
def cosine_similarity(y_true, y_pred):
distance = Lambda(cosine_distance, output_shape=cos_dist_output_shape)([y_pred, y_true])
return distance
import os
import cv2
import glob
import ntpath
import random
import numpy as np
import pandas as pd
from PIL import Image
import tensorflow as tf
from keras.layers import Lambda
from keras import layers, models, optimizers, losses
from keras import backend as K
from keras.layers import Lambda
import matplotlib.pyplot as plt
from utils import combine_images
from keras.utils import to_categorical
from keras import layers, models, optimizers
from capsulelayers import CapsuleLayer, PrimaryCap, Length, Mask
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
K.set_image_data_format('channels_last')
mean = -1 # Dummy Values
log_variance = -1 # Dummy Values
# Change this dataset
dataset_path = "../Dataset/"
input_image_height, input_image_width = (28, 28)
def CapsNet(input_shape, n_class, routings):
"""
A Capsule Network on MNIST.
:param input_shape: data shape, 3d, [width, height, channels]
:param n_class: number of classes
:param routings: number of routing iterations
:return: Two Keras Models, the first one used for training, and the second one for evaluation.
`eval_model` can also be used for training.
"""
x = layers.Input(shape=input_shape)
# Layer 1: Just a conventional Conv2D layer
conv1 = layers.Conv2D(filters=256, kernel_size=9, strides=1, padding='valid', activation='relu', name='conv1')(x)
# Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_capsule]
primarycaps = PrimaryCap(conv1, dim_capsule=8, n_channels=32, kernel_size=9, strides=2, padding='valid')
# Layer 3: Capsule layer. Routing algorithm works here.
global mean
mean = CapsuleLayer(num_capsule=n_class, dim_capsule=16, routings=routings,
name='mean')(primarycaps)
global log_variance
log_variance = CapsuleLayer(num_capsule=n_class, dim_capsule=16, routings=routings,
name='log_variance')(primarycaps)
# reparameterization trick
# instead of sampling from Q(z|X), sample eps = N(0,I)
# z = z_mean + sqrt(variance)*eps
def reparam(args):
"""
Reparameterization trick by sampling from an isotropic unit Gaussian.
# Arguments:
args (tensor): mean and log of variance of Q(z|X)
# Returns:
z (tensor): sampled latent vector
"""
mean, log_variance = args
batch = K.shape(mean)[0]
dim = K.int_shape(mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=K.shape(mean))
return (mean + K.exp(0.5 * log_variance) * epsilon)
# use reparameterization trick to push the sampling out as input
# note that "output_shape" isn't necessary with the TensorFlow backend
z = Lambda(reparam, name='z')([mean, log_variance])
# Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
# If using tensorflow, this will not be necessary. :)
out_caps = Length(name='capsnet')(z)
# Hierarchy output section
#----------------------------------------------------------------------------------------------------------------------------
y = layers.Input(shape=((n_class),))
masked_by_y = Mask()([z, y]) # The true label is used to mask the output of capsule layer. For training
masked = Mask()(z) # Mask using the capsule with maximal length. For prediction
longest_vector_train = masked_by_y
longest_vector_eval = masked
# Keep adding hierarchies
# Face hierarchy
face = layers.Dense(units=9,activation='relu',name='face')
face_train = face(longest_vector_train)
face_eval = face(longest_vector_eval)
face_output = layers.Dense(units=1,activation='softmax',name='face_output')
face_output_train = face_output(face_train)
face_output_eval = face_output(face_eval)
eyes = layers.Dense(units=1,activation='relu',name='eyes')
eyes_train = eyes(face_train)
eyes_eval = eyes(face_eval)
mouth = layers.Dense(units=1,activation='relu',name='mouth')
mouth_train = mouth(face_train)
mouth_eval = mouth(face_eval)
snout = layers.Dense(units=1,activation='relu',name='snout')
snout_train = snout(face_train)
snout_eval = snout(face_eval)
ears = layers.Dense(units=1,activation='relu',name='ears')
ears_train = ears(face_train)
ears_eval = ears(face_eval)
whiskers = layers.Dense(units=1,activation='relu',name='whiskers')
whiskers_train = whiskers(face_train)
whiskers_eval = whiskers(face_eval)
nose = layers.Dense(units=1,activation='relu',name='nose') # NEW
nose_train = nose(face_train)
nose_eval = nose(face_eval)
teeth = layers.Dense(units=1,activation='relu',name='teeth') # NEW
teeth_train = teeth(face_train)
teeth_eval = teeth(face_eval)
beak = layers.Dense(units=1,activation='relu',name='beak') # NEW
beak_train = beak(face_train)
beak_eval = beak(face_eval)
tongue = layers.Dense(units=1,activation='relu',name='tongue') # NEW
tongue_train = tongue(face_train)
tongue_eval = tongue(face_eval)
# Body hierarchy
body = layers.Dense(units=12,activation='relu',name='body')
body_train = body(longest_vector_train)
body_eval = body(longest_vector_eval)
body_output = layers.Dense(units=1,activation='softmax',name='body_output')
body_output_train = body_output(body_train)
body_output_eval = body_output(body_eval)
wings = layers.Dense(units=1,activation='relu',name='wings') # NEW
wings_train = wings(body_train)
wings_eval = wings(body_eval)
paws = layers.Dense(units=1,activation='relu',name='paws')
paws_train = paws(body_train)
paws_eval = paws(body_eval)
tail = layers.Dense(units=1,activation='relu',name='tail')
tail_train = tail(body_train)
tail_eval = tail(body_eval)
legs = layers.Dense(units=1,activation='relu',name='legs') # NEW
legs_train = legs(body_train)
legs_eval = legs(body_eval)
surface = layers.Dense(units=1,activation='relu',name='surface') # NEW
surface_train = surface(body_train)
surface_eval = surface(body_eval)
arm_rest = layers.Dense(units=1,activation='relu',name='arm_rest') # NEW
arm_rest_train = arm_rest(body_train)
arm_rest_eval = arm_rest(body_eval)
base = layers.Dense(units=1,activation='relu',name='base') # NEW
base_train = base(body_train)
base_eval = base(body_eval)
pillows = layers.Dense(units=1,activation='relu',name='pillows') # NEW
pillows_train = pillows(body_train)
pillows_eval = pillows(body_eval)
cushions = layers.Dense(units=1,activation='relu',name='cushions') # NEW
cushions_train = cushions(body_train)
cushions_eval = cushions(body_eval)
drawer = layers.Dense(units=1,activation='relu',name='drawer') # NEW
drawer_train = drawer(body_train)
drawer_eval = drawer(body_eval)
knob = layers.Dense(units=1,activation='relu',name='knob') # NEW
knob_train = knob(body_train)
knob_eval = knob(body_eval)
mattress = layers.Dense(units=1,activation='relu',name='mattress') # NEW
mattress_train = mattress(body_train)
mattress_eval = mattress(body_eval)
# Colour hierarchy
colour = layers.Dense(units=8,activation='relu',name='colour')
colour_train = colour(longest_vector_train)
colour_eval = colour(longest_vector_eval)
colour_output = layers.Dense(units=1,activation='softmax',name='colour_output')
colour_output_train = colour_output(colour_train)
colour_output_eval = colour_output(colour_eval)
brown = layers.Dense(units=1,activation='relu',name='brown')
brown_train = brown(colour_train)
brown_eval = brown(colour_eval)
black = layers.Dense(units=1,activation='relu',name='black')
black_train = black(colour_train)
black_eval = black(colour_eval)
grey = layers.Dense(units=1,activation='relu',name='grey')
grey_train = grey(colour_train)
grey_eval = grey(colour_eval)
white = layers.Dense(units=1,activation='relu',name='white')
white_train = white(colour_train)
white_eval = white(colour_eval)
purple = layers.Dense(units=1,activation='relu',name='purple') # NEW
purple_train = purple(colour_train)
purple_eval = purple(colour_eval)
pink = layers.Dense(units=1,activation='relu',name='pink') # NEW
pink_train = pink(colour_train)
pink_eval = pink(colour_eval)
yellow = layers.Dense(units=1,activation='relu',name='yellow') # NEW
yellow_train = yellow(colour_train)
yellow_eval = yellow(colour_eval)
turqoise = layers.Dense(units=1,activation='relu',name='turqoise') # NEW
turqoise_train = turqoise(colour_train)
turqoise_eval = turqoise(colour_eval)
# Alternate / Unknown hierarchy
unknown = layers.Dense(units=1,activation='relu',name='unknown') # NEW
unknown_train = unknown(longest_vector_train)
unknown_eval = unknown(longest_vector_eval)
# Now, build both the models
hierarchy_train_model = models.Model([x, y], [out_caps,face_output_train,eyes_train,mouth_train,snout_train,ears_train,whiskers_train,nose_train,teeth_train,beak_train,tongue_train,body_output_train,wings_train,paws_train,tail_train,legs_train,surface_train,arm_rest_train,base_train,pillows_train,cushions_train,drawer_train,knob_train,mattress_train,colour_output_train,brown_train,black_train,grey_train,white_train,purple_train,pink_train,yellow_train,turqoise_train,unknown_train])
hierarchy_eval_model = models.Model(x, [out_caps,face_output_eval,eyes_eval,mouth_eval,snout_eval,ears_eval,whiskers_eval,nose_eval,teeth_eval,beak_eval,tongue_eval,body_output_eval,wings_eval,paws_eval,tail_eval,legs_eval,surface_eval,arm_rest_eval,base_eval,pillows_eval,cushions_eval,drawer_eval,knob_eval,mattress_eval,colour_output_eval,brown_eval,black_eval,grey_eval,white_eval,purple_eval,pink_eval,yellow_eval,turqoise_eval,unknown_eval])
#------------------------------------------------------------------------------------------------------------------------------
# Decoder network.
y = layers.Input(shape=(n_class,))
masked_by_y = Mask()([z, y]) # The true label is used to mask the output of capsule layer. For training
masked = Mask()(z) # Mask using the capsule with maximal length. For prediction
# Shared Decoder model in training and prediction
decoder = models.Sequential(name='decoder')
decoder.add(layers.Dense(256, activation='relu', input_dim=16*n_class))
decoder.add(layers.Dense(512, activation='relu'))
decoder.add(layers.Dense( | np.prod(input_shape) | numpy.prod |
# Copyright 2017 <NAME> (<EMAIL>)
# Copyright 2021 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import pi
import numpy as np
class NFOV:
def __init__(self, height=400, width=800, FOV=None):
self.FOV = FOV or [0.45, 0.45]
self.PI = pi
self.PI_2 = pi * 0.5
self.PI2 = pi * 2.0
self.height = height
self.width = width
self.screen_points = self._get_screen_img()
def _get_coord_rad_for_point(self, center_point):
return (center_point * 2 - 1) * np.array([self.PI, self.PI_2])
def _get_coord_rad(self):
return (self.screen_points * 2 - 1) * np.array([self.PI, self.PI_2]) * (
np.ones(self.screen_points.shape) * self.FOV)
def _get_screen_img(self):
xx, yy = np.meshgrid(np.linspace(0, 1, self.width), np.linspace(0, 1, self.height))
return np.array([xx.ravel(), yy.ravel()]).T
def _calcSphericaltoGnomonic(self, convertedScreenCoord):
x = convertedScreenCoord.T[0]
y = convertedScreenCoord.T[1]
rou = np.sqrt(x ** 2 + y ** 2)
c = np.arctan(rou)
sin_c = np.sin(c)
cos_c = np.cos(c)
lat = np.arcsin(cos_c * np.sin(self.cp[1]) + (y * sin_c * np.cos(self.cp[1])) / rou)
lon = self.cp[0] + np.arctan2(x * sin_c, rou * np.cos(self.cp[1]) * cos_c - y * np.sin(self.cp[1]) * sin_c)
lat = (lat / self.PI_2 + 1.) * 0.5
lon = (lon / self.PI + 1.) * 0.5
return np.array([lon, lat]).T
def _bilinear_interpolation(self, screen_coord, dt):
uf = np.mod(screen_coord.T[0], 1) * self.frame_width # long - width
vf = np.mod(screen_coord.T[1], 1) * self.frame_height # lat - height
x0 = np.floor(uf).astype(int) # coord of pixel to bottom left
y0 = np.floor(vf).astype(int)
x2 = np.add(x0, np.ones(uf.shape).astype(int)) # coords of pixel to top right
y2 = np.add(y0, np.ones(vf.shape).astype(int))
base_y0 = np.multiply(y0, self.frame_width)
base_y2 = np.multiply(y2, self.frame_width)
A_idx = np.add(base_y0, x0)
B_idx = np.add(base_y2, x0)
C_idx = np.add(base_y0, x2)
D_idx = | np.add(base_y2, x2) | numpy.add |
import cv2
import os
import numpy as np
import torch
import argparse
import sys
import scipy.io as io
from shutil import copyfile
import itertools
from reconstruction import NMFCRenderer
def mkdirs(paths):
for path in paths:
if not os.path.exists(path):
os.makedirs(path)
def save_results(nmfcs, eye_landmarks, source_images_paths, args):
assert len(nmfcs) == len(source_images_paths), \
'Rendered NMFC and original source sequence have different lengths.'
if eye_landmarks is not None:
assert len(eye_landmarks) == len(source_images_paths), \
'Adapted eye landmark sequence and original source sequence have different lengths.'
save_nmfcs_dir = os.path.join(args.dataset_path, 'test',
'source_nmfcs', args.target_id + '_' + args.source_id)
save_images_dir = os.path.join(args.dataset_path, 'test',
'source_images', args.target_id + '_' + args.source_id)
mkdirs([save_nmfcs_dir, save_images_dir])
if eye_landmarks is not None:
# Save them as 70 landmarks, even they are actually only eye landmarks.
save_landmarks70_dir = os.path.join(args.dataset_path, 'test',
'source_landmarks70', args.target_id + '_' + args.source_id)
mkdirs([save_landmarks70_dir])
for i, source_images_path in enumerate(source_images_paths):
frame_name = os.path.basename(source_images_path)
copyfile(source_images_path, os.path.join(save_images_dir, frame_name))
cv2.imwrite(os.path.join(save_nmfcs_dir, frame_name), nmfcs[i])
if eye_landmarks is not None:
np.savetxt(os.path.join(save_landmarks70_dir, os.path.splitext(frame_name)[0] + '.txt'), eye_landmarks[i])
def smoothen_signal(S, window_size=15):
left_p = window_size // 2
right_p = window_size // 2 if window_size % 2 == 1 else window_size // 2 - 1
window = np.ones(int(window_size))/float(window_size) # kernel-filter
S = np.array(S)
# Padding
left_padding = np.stack([S[0]] * left_p, axis=0)
right_padding = np.stack([S[-1]] * right_p, axis=0)
S_padded = np.concatenate([left_padding, S, right_padding])
if len(S_padded.shape) == 1:
S = np.convolve(S_padded, window, 'valid')
else:
for coord in range(S_padded.shape[1]):
S[:, coord] = np.convolve(S_padded[:, coord], window, 'valid')
return S
def compute_cam_params(s_cam_params, t_cam_params, args):
cam_params = s_cam_params
if not args.no_scale_or_translation_adaptation:
mean_S_target = np.mean([params[0] for params in t_cam_params])
mean_S_source = np.mean([params[0] for params in s_cam_params])
S = [params[0] * (mean_S_target / mean_S_source)
for params in s_cam_params]
# Smoothen scale
S = smoothen_signal(S)
# Normalised Translation for source and target.
nT_target = [params[2] / params[0] for params in t_cam_params]
nT_source = [params[2] / params[0] for params in s_cam_params]
cam_params = [(s, params[1], s * t) \
for s, params, t in zip(S, s_cam_params, nT_source)]
if not args.no_translation_adaptation:
mean_nT_target = np.mean(nT_target, axis=0)
mean_nT_source = np.mean(nT_source, axis=0)
std_nT_target = np.std(nT_target, axis=0)
# Allow camera translation two standard deviation away from the one on target video.
upper_limit = mean_nT_target + std_nT_target * 2
lower_limit = mean_nT_target - std_nT_target * 2
nT = [np.maximum(np.minimum(t - mean_nT_source + mean_nT_target,
upper_limit), lower_limit) for t in nT_source]
# Smoothen translation
nT = smoothen_signal(nT)
cam_params = [(s, params[1], s * t) \
for s, params, t in zip(S, s_cam_params, nT)]
return cam_params
def read_params(params_type, path, speaker_id):
if params_type == 'id':
path = os.path.join(path, speaker_id + '.txt')
if os.path.exists(path):
return np.loadtxt(path), None
if params_type == 'exp' or params_type == 'cam':
txt_files = []
params = []
parts = os.listdir(path)
base_part = os.path.join(path, speaker_id)
for part in sorted(parts):
dir = os.path.join(path, part)
if base_part in dir:
txt_files.extend([os.path.join(dir, txt) \
for txt in sorted(os.listdir(dir))])
for f in txt_files:
if os.path.exists(f):
if params_type == 'exp':
params.append(np.loadtxt(f))
else:
S = np.loadtxt(f, max_rows=1)
R = np.loadtxt(f, skiprows=1, max_rows=3)
T = np.loadtxt(f, skiprows=4)
params.append((S, R, T))
return params, txt_files
def read_eye_landmarks(path, speaker_id):
txt_files = []
eye_landmarks_left = []
eye_landmarks_right = []
parts = os.listdir(path)
base_part = os.path.join(path, speaker_id)
for part in sorted(parts):
dir = os.path.join(path, part)
if base_part in dir:
txt_files.extend([os.path.join(dir, txt) \
for txt in sorted(os.listdir(dir))])
for f in txt_files:
if os.path.exists(f):
left = np.concatenate([np.loadtxt(f)[36:42], np.loadtxt(f)[68:69]], axis=0)
right = np.concatenate([np.loadtxt(f)[42:48], np.loadtxt(f)[69:70]], axis=0)
eye_landmarks_left.append(left) # Left eye
eye_landmarks_right.append(right) # Right eye
return [eye_landmarks_left, eye_landmarks_right]
def search_eye_centres(nmfcs, prev_arg_mins=None):
points = [np.array([192, 180, 81]), # Left eye NMFC code
np.array([192, 180, 171])] # Right eye NMFC code
ret = []
arg_mins = []
if prev_arg_mins is None:
prev_arg_mins = [None, None]
for point, prev_arg_min in zip(points, prev_arg_mins):
centres = []
for n, nmfc in enumerate(nmfcs):
min_dst = 99999999
if prev_arg_min is None:
lim_i_l, lim_i_h = 0, nmfc.shape[0]-1
lim_j_l, lim_j_h = 0, nmfc.shape[1]-1
else:
lim_i_l, lim_i_h = prev_arg_min[0]-20, prev_arg_min[0]+20
lim_j_l, lim_j_h = prev_arg_min[1]-20, prev_arg_min[1]+20
# Check bounds
lim_i_l = min(max(lim_i_l, 0), nmfc.shape[0]-1)
lim_i_h = min(max(lim_i_h, 0), nmfc.shape[0]-1)
lim_j_l = min(max(lim_j_l, 0), nmfc.shape[1]-1)
lim_j_h = min(max(lim_j_h, 0), nmfc.shape[1]-1)
for i in range(lim_i_l, lim_i_h):
for j in range(lim_j_l, lim_j_h):
dst = sum(abs(nmfc[i,j,:] - point))
if dst < min_dst:
min_dst = dst
arg_min = np.array([i, j])
centres.append(np.flip(arg_min)) # flip, since landmarks are width, heigth
prev_arg_min = arg_min
arg_mins.append(arg_min)
ret.append(centres)
return ret, arg_mins
def smoothen_eye_landmarks(eye_landmarks, window_size=1):
window_size = max(min(window_size, len(eye_landmarks)), 1)
left_p = window_size // 2
right_p = window_size // 2 if window_size % 2 == 1 else window_size // 2 - 1
window = np.ones(int(window_size))/float(window_size) # kernel-filter
eye_landmarks = np.array(eye_landmarks)
# Padding
left_padding = np.stack([eye_landmarks[0]] * left_p, axis=0) if left_p > 0 else None
right_padding = np.stack([eye_landmarks[-1]] * right_p, axis=0) if right_p > 0 else None
eye_landmarks_padded = eye_landmarks
if left_padding is not None:
eye_landmarks_padded = np.concatenate([left_padding, eye_landmarks_padded])
if right_padding is not None:
eye_landmarks_padded = np.concatenate([eye_landmarks_padded, right_padding])
for land in range(eye_landmarks.shape[1]):
for coord in range(eye_landmarks.shape[2]):
eye_landmarks[:, land, coord] = np.convolve(eye_landmarks_padded[:, land, coord], window, 'valid')
return eye_landmarks
def compute_eye_landmarks_ratio(eye_landmarks_source, eye_landmarks_target):
dsts = []
for eye_landmarks in [eye_landmarks_source, eye_landmarks_target]:
each_eye_dsts = []
for each_eye_landmarks in eye_landmarks:
dst = 0
for each_eye_landmark in each_eye_landmarks:
eye_width = np.linalg.norm(each_eye_landmark[0,:] - each_eye_landmark[3,:])
dst += (abs(each_eye_landmark[1, 1] - each_eye_landmark[4, 1]) + \
abs(each_eye_landmark[2, 1] - each_eye_landmark[5, 1])) #/ eye_width
each_eye_dsts.append(dst / len(each_eye_landmarks))
dsts.append(each_eye_dsts)
left_eye_ratio = dsts[1][0] / dsts[0][0]
right_eye_ratio = dsts[1][1] / dsts[0][1]
return [left_eye_ratio, right_eye_ratio]
def adapt_eye_landmarks(eye_landmarks, eye_centres, eye_ratios, s_cam_params, cam_params):
new_eye_landmarks = []
ratios = [cam_param[0] / s_cam_param[0]
for s_cam_param, cam_param in zip(s_cam_params, cam_params)]
for each_eye_landmarks, each_eye_centres, each_eye_ratios in zip(eye_landmarks, eye_centres, eye_ratios):
new_each_eye_landmarks = []
for each_eye_landmark, each_eye_centre, ratio in zip(each_eye_landmarks, each_eye_centres, ratios):
mean = | np.mean(each_eye_landmark, axis=0, keepdims=True) | numpy.mean |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 28 17:38:35 2020
@author: ls
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#plt.rcParams['font.family']='Times New Roman'
plt.rcParams['font.size']=12
import sys
fimo=sys.argv[1]
dataframe=pd.read_csv(fimo,sep='\t')
columns=dataframe.columns.tolist()
ids=dataframe[columns[0]]
target_motifs=[i for i in ids]
def get_ex_no_dataframe(dataframe,target_motifs):
result_dic={}
for target_motif in target_motifs:
data_targets=dataframe.loc[dataframe['#pattern name']==target_motif]
data_targets_ex=data_targets.loc[data_targets['sequence name'].isin(list(set([i for i in data_targets['sequence name'] if 'ae' in i])))]
data_targets_no=data_targets.loc[data_targets['sequence name'].isin(list(set([i for i in data_targets['sequence name'] if 'aw' in i])))]
result_dic[target_motif]=(data_targets_ex,data_targets_no)
return result_dic
def calculate_ratio(data_targets_ex):
dic={}
coord=[(i,j) for i,j in zip(data_targets_ex['start'],data_targets_ex['stop'])]
gene_numbers_ex=len(list(set([i for i in data_targets_ex['sequence name']])))
if gene_numbers_ex==0:
for i in range(1,1001):
dic[i]=0
else:
for i in range(1,1001):
dic[i]=0
for z in coord:
if (i>=z[0])&(i<=z[1]):
dic[i]+=1
else:
continue
dic[i]=dic[i]/gene_numbers_ex
return dic
#def
target_motifs_dics=get_ex_no_dataframe(dataframe,target_motifs)
for motif in target_motifs_dics.keys():
print('fetching genes with %s'%motif)
dat=target_motifs_dics[motif][0]
with open(fimo+'_%s.txt'%motif,'w') as file:
for i in dat['sequence name']:
file.write(i[:-2]+'\n')
def get_data_visualize(target_motifs__dics):
visualization_result={}
for motif in target_motifs__dics.keys():
dic_ex=calculate_ratio(target_motifs__dics[motif][0])
dic_no=calculate_ratio(target_motifs__dics[motif][1])
visualization_result[motif]=(dic_ex,dic_no)
return visualization_result
visualization_results=get_data_visualize(target_motifs_dics)
for i in visualization_results.keys():
data_ex,data_no=visualization_results[i][0],visualization_results[i][1]
plt.figure(figsize=(6.68,3),dpi=600)
plt.plot(np.arange(1,1001),[data_ex[i]*100 for i in | np.arange(1,1001) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Last update: 30/11/2021
<NAME>
"""
import os
try:
import numpy as np
from scipy.interpolate import griddata
except ModuleNotFoundError as error:
if error.name in ('numpy','scipy'):
print('\n'+error.msg+'\nPlease use PIP to install: "pip install '+error.name+'"\n')
def roundup(x):
"""Return x (int,float) rounded up to nearest 100.0"""
return np.ceil(x/100)*100
def tth2Q(tth,lambd):
Q = 4*np.pi*np.sin(np.radians(tth/2))/lambd
return(Q)
def Q2tth(Q,lambd):
tth = 2*np.degrees(np.arcsin(Q/(4*np.pi)))*lambd
return(tth)
def Q2d(Q,lambd):
d = 2*np.pi/Q
return d
def d2tth(d,lambd):
tth = np.degrees(np.arcsin(lambd/(2*d)))*2
return tth
def tth2d(tth,lambd):
d = lambd/(np.sin(np.radians(tth)/2) * 2)
return d
def r2tth(x,dist):
"""Convert a numpy array of azimuthal radii to 2 theta"""
return np.arctan(x/dist)*180/np.pi
def findFiles(path,extension='.dat'):
files = [path+'/'+f for f in os.listdir(path) if f.endswith(extension)]
files.sort()
return files
def commonName(names):
"""return common name from list of names"""
name = names[0]
for n in names[1:]:
while not name in n:
name = name[:-1]
# strip common endings
for s in [' ','(','.','/','\\','_']:
name = name.strip(s)
return name
def scaleArray(a,scale='linear',retain_sign=False):
if scale == 'linear' or np.all(a==0.0):
return a
sign = np.sign(a)
a = np.abs(a)
if scale == 'log10':
a = | np.log10(a,where=a!=0) | numpy.log10 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2017 Prof. <NAME> (<EMAIL>),
# Prof. <NAME> (<EMAIL>) and the RMG Team (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains functionality for working with kinetics family functional
groups, including support for using group additivity to estimate rate
coefficients.
"""
import logging
import math
import numpy
from copy import deepcopy
from rmgpy.data.base import Database, Entry, Group, LogicNode, getAllCombinations, makeLogicNode
from rmgpy.kinetics import Arrhenius, ArrheniusEP, KineticsData
from rmgpy.species import Species
from rmgpy.quantity import constants
from rmgpy.exceptions import KineticsError, UndeterminableKineticsError, DatabaseError
################################################################################
class KineticsGroups(Database):
"""
A class for working with an RMG kinetics family group additivity values.
"""
def __init__(self,
entries=None,
top=None,
label='',
name='',
shortDesc='',
longDesc='',
forwardTemplate=None,
forwardRecipe=None,
reverseTemplate=None,
reverseRecipe=None,
forbidden=None
):
Database.__init__(self, entries, top, label, name, shortDesc, longDesc)
self.numReactants = 0
def __repr__(self):
return '<KineticsGroups "{0}">'.format(self.label)
def loadEntry(self, index, label, group, kinetics, reference=None, referenceType='', shortDesc='', longDesc='',nodalDistance=None):
"""
nodalDistance is the distance between a given entry and its parent specified by a float
"""
if group[0:3].upper() == 'OR{' or group[0:4].upper() == 'AND{' or group[0:7].upper() == 'NOT OR{' or group[0:8].upper() == 'NOT AND{':
item = makeLogicNode(group)
else:
item = Group().fromAdjacencyList(group)
if label in self.entries:
raise DatabaseError("Duplicate group name {label} found in kinetics groups for {family} family.".format(label=label,family=self.label))
self.entries[label] = Entry(
index = index,
label = label,
item = item,
data = kinetics,
reference = reference,
referenceType = referenceType,
shortDesc = shortDesc,
longDesc = longDesc.strip(),
nodalDistance=nodalDistance
)
def getReactionTemplate(self, reaction):
"""
For a given `reaction` with properly-labeled :class:`Molecule` objects
as the reactants, determine the most specific nodes in the tree that
describe the reaction.
"""
# Get forward reaction template and remove any duplicates
forwardTemplate = self.top[:]
temporary = []
symmetricTree = False
for entry in forwardTemplate:
if entry not in temporary:
temporary.append(entry)
else:
# duplicate node found at top of tree
# eg. R_recombination: ['Y_rad', 'Y_rad']
assert len(forwardTemplate)==2 , 'Can currently only do symmetric trees with nothing else in them'
symmetricTree = True
forwardTemplate = temporary
# Descend reactant trees as far as possible
template = []
for entry in forwardTemplate:
# entry is a top-level node that should be matched
group = entry.item
# Identify the atom labels in a group if it is not a logical node
atomList = []
if not isinstance(entry.item, LogicNode):
atomList = group.getLabeledAtoms()
for reactant in reaction.reactants:
if isinstance(reactant, Species):
reactant = reactant.molecule[0]
# Match labeled atoms
# Check that this reactant has each of the atom labels in this group. If it is a LogicNode, the atomList is empty and
# it will proceed directly to the descendTree step.
if not all([reactant.containsLabeledAtom(label) for label in atomList]):
continue # don't try to match this structure - the atoms aren't there!
# Match structures
atoms = reactant.getLabeledAtoms()
# Descend the tree, making sure to match atomlabels exactly using strict = True
matched_node = self.descendTree(reactant, atoms, root=entry, strict=True)
if matched_node is not None:
template.append(matched_node)
#else:
# logging.warning("Couldn't find match for {0} in {1}".format(entry,atomList))
# logging.warning(reactant.toAdjacencyList())
# Get fresh templates (with duplicate nodes back in)
forwardTemplate = self.top[:]
if self.label.lower().startswith('r_recombination'):
forwardTemplate.append(forwardTemplate[0])
# Check that we were able to match the template.
# template is a list of the actual matched nodes
# forwardTemplate is a list of the top level nodes that should be matched
if len(template) != len(forwardTemplate):
# print 'len(template):', len(template)
# print 'len(forwardTemplate):', len(forwardTemplate)
msg = 'Unable to find matching template for reaction {0} in reaction family {1}.'.format(str(reaction), str(self))
msg += 'Trying to match {0} but matched {1}'.format(str(forwardTemplate),str(template))
# print 'reactants'
# for reactant in reaction.reactants:
# print reactant.toAdjacencyList() + '\n'
# print 'products'
# for product in reaction.products:
# print product.toAdjacencyList() + '\n'
raise UndeterminableKineticsError(reaction, message=msg)
return template
def estimateKineticsUsingGroupAdditivity(self, template, referenceKinetics, degeneracy=1):
"""
Determine the appropriate kinetics for a reaction with the given
`template` using group additivity.
Returns just the kinetics.
"""
# Start with the generic kinetics of the top-level nodes
# Make a copy so we don't modify the original
kinetics = deepcopy(referenceKinetics)
# Now add in more specific corrections if possible
for node in template:
entry = node
comment_line = "Matched node "
while entry.data is None and entry not in self.top:
# Keep climbing tree until you find a (non-top) node with data.
comment_line += "{0} >> ".format(entry.label)
entry = entry.parent
if entry.data is not None and entry not in self.top:
kinetics = self.__multiplyKineticsData(kinetics, entry.data)
comment_line += "{0} ({1})".format(entry.label, entry.longDesc.split('\n')[0])
elif entry in self.top:
comment_line += "{0} (Top node)".format(entry.label)
kinetics.comment += comment_line + '\n'
# Also include reaction-path degeneracy
kinetics.changeRate(degeneracy)
kinetics.comment += "Multiplied by reaction path degeneracy {0}".format(degeneracy)
return kinetics
def __multiplyKineticsData(self, kinetics1, kinetics2):
"""
Multiply two kinetics objects `kinetics1` and `kinetics2` of the same
class together, returning their product as a new kinetics object of
that class. Currently this only works for :class:`KineticsData`, :class:`ArrheniusEP` or
:class:`Arrhenius` objects.
"""
if isinstance(kinetics1, KineticsData) and isinstance(kinetics2, KineticsData):
if len(kinetics1.Tdata.value_si) != len(kinetics2.Tdata.value_si) or any([T1 != T2 for T1, T2 in zip(kinetics1.Tdata.value_si, kinetics2.Tdata.value_si)]):
raise KineticsError('Cannot add these KineticsData objects due to their having different temperature points.')
kinetics = KineticsData(
Tdata = (kinetics1.Tdata.value, kinetics2.Tdata.units),
kdata = (kinetics1.kdata.value * kinetics2.kdata.value, kinetics1.kdata.units),
)
elif isinstance(kinetics1, Arrhenius) and isinstance(kinetics2, Arrhenius):
assert kinetics1.A.units == kinetics2.A.units
assert kinetics1.T0.units == kinetics2.T0.units
assert kinetics1.T0.value == kinetics2.T0.value
kinetics = Arrhenius(
A = (kinetics1.A.value * kinetics2.A.value, kinetics1.A.units),
n = (kinetics1.n.value + kinetics2.n.value, kinetics1.n.units),
Ea = (kinetics1.Ea.value_si + kinetics2.Ea.value_si, 'J/mol'),
T0 = (kinetics1.T0.value, kinetics1.T0.units),
)
elif isinstance(kinetics1,ArrheniusEP) and isinstance(kinetics2,ArrheniusEP):
assert kinetics1.A.units == kinetics2.A.units
kinetics = ArrheniusEP(
A = (kinetics1.A.value * kinetics2.A.value, kinetics1.A.units),
n = (kinetics1.n.value + kinetics2.n.value, kinetics1.n.units),
alpha = kinetics1.alpha+kinetics2.alpha,
E0 = (kinetics1.E0.value_si + kinetics2.E0.value_si, 'J/mol'),
)
elif isinstance(kinetics1,Arrhenius) and isinstance(kinetics2,ArrheniusEP):
assert kinetics1.A.units == kinetics2.A.units
assert kinetics1.T0.units == 'K'
assert kinetics1.T0.value == 1.0
kinetics = ArrheniusEP(
A = (kinetics1.A.value * kinetics2.A.value, kinetics1.A.units),
n = (kinetics1.n.value + kinetics2.n.value, kinetics1.n.units),
alpha = kinetics2.alpha,
E0 = (kinetics1.Ea.value_si + kinetics2.E0.value_si, 'J/mol'),
)
elif isinstance(kinetics1,ArrheniusEP) and isinstance(kinetics2,Arrhenius):
assert kinetics1.A.units == kinetics2.A.units
assert 'K' == kinetics2.T0.units
assert 1.0 == kinetics2.T0.value
kinetics = ArrheniusEP(
A = (kinetics1.A.value * kinetics2.A.value, kinetics1.A.units),
n = (kinetics1.n.value + kinetics2.n.value, kinetics1.n.units),
alpha = kinetics1.alpha,
E0 = (kinetics1.E0.value_si + kinetics2.Ea.value_si, 'J/mol'),
)
else:
raise KineticsError('Unable to multiply kinetics types "{0}" and "{1}".'.format(kinetics1.__class__, kinetics2.__class__))
if kinetics1.Tmin is not None and kinetics2.Tmin is not None:
kinetics.Tmin = kinetics1.Tmin if kinetics1.Tmin.value_si > kinetics2.Tmin.value_si else kinetics2.Tmin
elif kinetics1.Tmin is not None and kinetics2.Tmin is None:
kinetics.Tmin = kinetics1.Tmin
elif kinetics1.Tmin is None and kinetics2.Tmin is not None:
kinetics.Tmin = kinetics2.Tmin
if kinetics1.Tmax is not None and kinetics2.Tmax is not None:
kinetics.Tmax = kinetics1.Tmax if kinetics1.Tmax.value_si < kinetics2.Tmax.value_si else kinetics2.Tmax
elif kinetics1.Tmax is not None and kinetics2.Tmax is None:
kinetics.Tmax = kinetics1.Tmax
elif kinetics1.Tmax is None and kinetics2.Tmax is not None:
kinetics.Tmax = kinetics2.Tmax
if kinetics1.Pmin is not None and kinetics2.Pmin is not None:
kinetics.Pmin = kinetics1.Pmin if kinetics1.Pmin.value_si > kinetics2.Pmin.value_si else kinetics2.Pmin
elif kinetics1.Pmin is not None and kinetics2.Pmin is None:
kinetics.Pmin = kinetics1.Pmin
elif kinetics1.Pmin is None and kinetics2.Pmin is not None:
kinetics.Pmin = kinetics2.Pmin
if kinetics1.Pmax is not None and kinetics2.Pmax is not None:
kinetics.Pmax = kinetics1.Pmax if kinetics1.Pmax.value_si < kinetics2.Pmax.value_si else kinetics2.Pmax
elif kinetics1.Pmax is not None and kinetics2.Pmax is None:
kinetics.Pmax = kinetics1.Pmax
elif kinetics1.Pmax is None and kinetics2.Pmax is not None:
kinetics.Pmax = kinetics2.Pmax
if kinetics1.comment == '': kinetics.comment = kinetics2.comment
elif kinetics2.comment == '': kinetics.comment = kinetics1.comment
else: kinetics.comment = kinetics1.comment + ' + ' + kinetics2.comment
return kinetics
def generateGroupAdditivityValues(self, trainingSet, kunits, method='Arrhenius'):
"""
Generate the group additivity values using the given `trainingSet`,
a list of 2-tuples of the form ``(template, kinetics)``. You must also
specify the `kunits` for the family and the `method` to use when
generating the group values. Returns ``True`` if the group values have
changed significantly since the last time they were fitted, or ``False``
otherwise.
"""
# keep track of previous values so we can detect if they change
old_entries = dict()
for label,entry in self.entries.items():
if entry.data is not None:
old_entries[label] = entry.data
# Determine a complete list of the entries in the database, sorted as in the tree
groupEntries = self.top[:]
for entry in self.top:
groupEntries.extend(self.descendants(entry))
# Determine a unique list of the groups we will be able to fit parameters for
groupList = []
for template, kinetics in trainingSet:
for group in template:
if group not in self.top:
groupList.append(group)
groupList.extend(self.ancestors(group)[:-1])
groupList = list(set(groupList))
groupList.sort(key=lambda x: x.index)
if method == 'KineticsData':
# Fit a discrete set of k(T) data points by training against k(T) data
Tdata = numpy.array([300,400,500,600,800,1000,1500,2000])
# Initialize dictionaries of fitted group values and uncertainties
groupValues = {}; groupUncertainties = {}; groupCounts = {}; groupComments = {}
for entry in groupEntries:
groupValues[entry] = []
groupUncertainties[entry] = []
groupCounts[entry] = []
groupComments[entry] = set()
# Generate least-squares matrix and vector
A = []; b = []
kdata = []
for template, kinetics in trainingSet:
if isinstance(kinetics, (Arrhenius, KineticsData)):
kd = [kinetics.getRateCoefficient(T) for T in Tdata]
elif isinstance(kinetics, ArrheniusEP):
kd = [kinetics.getRateCoefficient(T, 0) for T in Tdata]
else:
raise Exception('Unexpected kinetics model of type {0} for template {1}.'.format(kinetics.__class__, template))
kdata.append(kd)
# Create every combination of each group and its ancestors with each other
combinations = []
for group in template:
groups = [group]; groups.extend(self.ancestors(group))
combinations.append(groups)
combinations = getAllCombinations(combinations)
# Add a row to the matrix for each combination
for groups in combinations:
Arow = [1 if group in groups else 0 for group in groupList]
Arow.append(1)
brow = [math.log10(k) for k in kd]
A.append(Arow); b.append(brow)
for group in groups:
groupComments[group].add("{0!s}".format(template))
if len(A) == 0:
logging.warning('Unable to fit kinetics groups for family "{0}"; no valid data found.'.format(self.label))
return
A = numpy.array(A)
b = numpy.array(b)
kdata = numpy.array(kdata)
x, residues, rank, s = numpy.linalg.lstsq(A, b)
for t, T in enumerate(Tdata):
# Determine error in each group (on log scale)
stdev = numpy.zeros(len(groupList)+1, numpy.float64)
count = numpy.zeros(len(groupList)+1, numpy.int)
for index in range(len(trainingSet)):
template, kinetics = trainingSet[index]
kd = math.log10(kdata[index,t])
km = x[-1,t] + sum([x[groupList.index(group),t] for group in template if group in groupList])
variance = (km - kd)**2
for group in template:
groups = [group]; groups.extend(self.ancestors(group))
for g in groups:
if g not in self.top:
ind = groupList.index(g)
stdev[ind] += variance
count[ind] += 1
stdev[-1] += variance
count[-1] += 1
stdev = numpy.sqrt(stdev / (count - 1))
import scipy.stats
ci = scipy.stats.t.ppf(0.975, count - 1) * stdev
# Update dictionaries of fitted group values and uncertainties
for entry in groupEntries:
if entry == self.top[0]:
groupValues[entry].append(10**x[-1,t])
groupUncertainties[entry].append(10**ci[-1])
groupCounts[entry].append(count[-1])
elif entry in groupList:
index = groupList.index(entry)
groupValues[entry].append(10**x[index,t])
groupUncertainties[entry].append(10**ci[index])
groupCounts[entry].append(count[index])
else:
groupValues[entry] = None
groupUncertainties[entry] = None
groupCounts[entry] = None
# Store the fitted group values and uncertainties on the associated entries
for entry in groupEntries:
if groupValues[entry] is not None:
entry.data = KineticsData(Tdata=(Tdata,"K"), kdata=(groupValues[entry],kunits))
if not any(numpy.isnan(numpy.array(groupUncertainties[entry]))):
entry.data.kdata.uncertainties = numpy.array(groupUncertainties[entry])
entry.data.kdata.uncertaintyType = '*|/'
entry.shortDesc = "Group additive kinetics."
entry.longDesc = "Fitted to {0} rates.\n".format(groupCounts[entry])
entry.longDesc += "\n".join(groupComments[entry])
else:
entry.data = None
elif method == 'Arrhenius':
# Fit Arrhenius parameters (A, n, Ea) by training against k(T) data
Tdata = numpy.array([300,400,500,600,800,1000,1500,2000])
logTdata = numpy.log(Tdata)
Tinvdata = 1000. / (constants.R * Tdata)
A = []; b = []
kdata = []
for template, kinetics in trainingSet:
if isinstance(kinetics, (Arrhenius, KineticsData)):
kd = [kinetics.getRateCoefficient(T) for T in Tdata]
elif isinstance(kinetics, ArrheniusEP):
kd = [kinetics.getRateCoefficient(T, 0) for T in Tdata]
else:
raise Exception('Unexpected kinetics model of type {0} for template {1}.'.format(kinetics.__class__, template))
kdata.append(kd)
# Create every combination of each group and its ancestors with each other
combinations = []
for group in template:
groups = [group]; groups.extend(self.ancestors(group))
combinations.append(groups)
combinations = getAllCombinations(combinations)
# Add a row to the matrix for each combination at each temperature
for t, T in enumerate(Tdata):
logT = logTdata[t]
Tinv = Tinvdata[t]
for groups in combinations:
Arow = []
for group in groupList:
if group in groups:
Arow.extend([1,logT,-Tinv])
else:
Arow.extend([0,0,0])
Arow.extend([1,logT,-Tinv])
brow = math.log(kd[t])
A.append(Arow); b.append(brow)
if len(A) == 0:
logging.warning('Unable to fit kinetics groups for family "{0}"; no valid data found.'.format(self.label))
return
A = numpy.array(A)
b = numpy.array(b)
kdata = numpy.array(kdata)
x, residues, rank, s = | numpy.linalg.lstsq(A, b) | numpy.linalg.lstsq |
import cv2
import sys, os, glob, re
import json
from os.path import join, dirname, abspath, realpath, isdir
from os import makedirs
import numpy as np
from shutil import rmtree
from ipdb import set_trace
from .bench_utils.bbox_helper import rect_2_cxy_wh, cxy_wh_2_rect
def center_error(rects1, rects2):
"""Center error.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
"""
centers1 = rects1[..., :2] + (rects1[..., 2:] - 1) / 2
centers2 = rects2[..., :2] + (rects2[..., 2:] - 1) / 2
errors = np.sqrt(np.sum(np.power(centers1 - centers2, 2), axis=-1))
return errors
def _intersection(rects1, rects2):
r"""Rectangle intersection.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
"""
assert rects1.shape == rects2.shape
x1 = np.maximum(rects1[..., 0], rects2[..., 0])
y1 = np.maximum(rects1[..., 1], rects2[..., 1])
x2 = np.minimum(rects1[..., 0] + rects1[..., 2],
rects2[..., 0] + rects2[..., 2])
y2 = np.minimum(rects1[..., 1] + rects1[..., 3],
rects2[..., 1] + rects2[..., 3])
w = np.maximum(x2 - x1, 0)
h = np.maximum(y2 - y1, 0)
return np.stack([x1, y1, w, h]).T
def rect_iou(rects1, rects2, bound=None):
r"""Intersection over union.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
bound (numpy.ndarray): A 4 dimensional array, denotes the bound
(min_left, min_top, max_width, max_height) for ``rects1`` and ``rects2``.
"""
assert rects1.shape == rects2.shape
if bound is not None:
# bounded rects1
rects1[:, 0] = np.clip(rects1[:, 0], 0, bound[0])
rects1[:, 1] = np.clip(rects1[:, 1], 0, bound[1])
rects1[:, 2] = np.clip(rects1[:, 2], 0, bound[0] - rects1[:, 0])
rects1[:, 3] = np.clip(rects1[:, 3], 0, bound[1] - rects1[:, 1])
# bounded rects2
rects2[:, 0] = np.clip(rects2[:, 0], 0, bound[0])
rects2[:, 1] = np.clip(rects2[:, 1], 0, bound[1])
rects2[:, 2] = np.clip(rects2[:, 2], 0, bound[0] - rects2[:, 0])
rects2[:, 3] = np.clip(rects2[:, 3], 0, bound[1] - rects2[:, 1])
rects_inter = _intersection(rects1, rects2)
areas_inter = np.prod(rects_inter[..., 2:], axis=-1)
areas1 = np.prod(rects1[..., 2:], axis=-1)
areas2 = np.prod(rects2[..., 2:], axis=-1)
areas_union = areas1 + areas2 - areas_inter
eps = np.finfo(float).eps
ious = areas_inter / (areas_union + eps)
ious = np.clip(ious, 0.0, 1.0)
return ious
def overlap_ratio(rect1, rect2):
'''
Compute overlap ratio between two rects
- rect: 1d array of [x,y,w,h] or
2d array of N x [x,y,w,h]
'''
if rect1.ndim==1:
rect1 = rect1[None,:]
if rect2.ndim==1:
rect2 = rect2[None,:]
left = np.maximum(rect1[:,0], rect2[:,0])
right = np.minimum(rect1[:,0]+rect1[:,2], rect2[:,0]+rect2[:,2])
top = np.maximum(rect1[:,1], rect2[:,1])
bottom = np.minimum(rect1[:,1]+rect1[:,3], rect2[:,1]+rect2[:,3])
intersect = np.maximum(0,right - left) * np.maximum(0,bottom - top)
union = rect1[:,2]*rect1[:,3] + rect2[:,2]*rect2[:,3] - intersect
iou = np.clip(intersect / union, 0, 1)
return iou
def calc_curves(ious, center_errors, nbins_iou, nbins_ce):
ious = np.asarray(ious, float)[:, np.newaxis]
center_errors = np.asarray(center_errors, float)[:, np.newaxis]
thr_iou = np.linspace(0, 1, nbins_iou)[np.newaxis, :]
thr_ce = np.arange(0, nbins_ce)[np.newaxis, :]
bin_iou = np.greater(ious, thr_iou)
bin_ce = np.less_equal(center_errors, thr_ce)
succ_curve = np.mean(bin_iou, axis=0)
prec_curve = np.mean(bin_ce, axis=0)
return succ_curve, prec_curve
def compute_success_overlap(gt_bb, result_bb):
thresholds_overlap = np.arange(0, 1.05, 0.05)
n_frame = len(gt_bb)
success = np.zeros(len(thresholds_overlap))
iou = overlap_ratio(gt_bb, result_bb)
for i in range(len(thresholds_overlap)):
success[i] = sum(iou > thresholds_overlap[i]) / float(n_frame)
return success
def compute_success_error(gt_center, result_center):
thresholds_error = np.arange(0, 51, 1)
n_frame = len(gt_center)
success = np.zeros(len(thresholds_error))
dist = np.sqrt(np.sum(np.power(gt_center - result_center, 2), axis=1))
for i in range(len(thresholds_error)):
success[i] = sum(dist <= thresholds_error[i]) / float(n_frame)
return success
def get_result_bb(arch, seq):
result_path = join(arch, seq + '.txt')
temp = np.loadtxt(result_path, delimiter=',').astype(np.float)
return | np.array(temp) | numpy.array |
# pylint: disable=invalid-name
# pylint: disable=unused-argument
# pylint: disable=too-many-locals
"""
Mie scattering in pure python.
`miepython` is a pure Python module to calculate light scattering by
non-absorbing, partially-absorbing, or perfectly conducting spheres. Mie theory
is used, following the procedure in given by Wiscombe in
http://opensky.ucar.edu/islandora/object/technotes:232 and validated
against his results.
This code provides functions for calculating the extinction efficiency,
scattering efficiency, backscattering, and scattering asymmetry. Moreover,
a set of angles can be given to calculate the scattering for a sphere.
"""
from __future__ import division
import numpy as np
__all__ = ('generate_mie_costheta',
'i_par',
'i_per',
'i_unpolarized',
'mie',
'mie_S1_S2',
'mie_cdf',
'mie_mu_with_uniform_cdf')
def _Lentz_Dn(z, N):
"""
Compute the logarithmic derivative of the Ricatti-Bessel function.
Args:
z: function argument
N: order of Ricatti-Bessel function
Returns:
This returns the Ricatti-Bessel function of order N with argument z
using the continued fraction technique of Lentz, Appl. Opt., 15,
668-671, (1976).
"""
zinv = 2.0 / z
alpha = (N + 0.5) * zinv
aj = -(N + 1.5) * zinv
alpha_j1 = aj + 1 / alpha
alpha_j2 = aj
ratio = alpha_j1 / alpha_j2
runratio = alpha * ratio
while abs(abs(ratio) - 1.0) > 1e-12:
aj = zinv - aj
alpha_j1 = 1.0 / alpha_j1 + aj
alpha_j2 = 1.0 / alpha_j2 + aj
ratio = alpha_j1 / alpha_j2
zinv *= -1
runratio = ratio * runratio
return -N / z + runratio
def _D_downwards(z, N):
"""
Compute the logarithmic derivative by downwards recurrence.
Args:
z: function argument
N: order of Ricatti-Bessel function
Returns:
All the Ricatti-Bessel function values for orders from 0 to N for an
argument z using the downwards recurrence relations.
"""
D = np.zeros(N, dtype=complex)
last_D = _Lentz_Dn(z, N)
for n in range(N, 0, -1):
last_D = n / z - 1.0 / (last_D + n / z)
D[n - 1] = last_D
return D
def _D_upwards(z, N):
"""
Compute the logarithmic derivative by upwards recurrence.
Args:
z: function argument
N: order of Ricatti-Bessel function
Returns:
All the Ricatti-Bessel function values for orders from 0 to N for an
argument z using the upwards recurrence relations.
"""
D = np.zeros(N, dtype=complex)
exp = np.exp(-2j * z)
D[1] = -1 / z + (1 - exp) / ((1 - exp) / z - 1j * (1 + exp))
for n in range(2, N):
D[n] = 1 / (n / z - D[n - 1]) - n / z
return D
def _D_calc(m, x, N):
"""
Compute the logarithmic derivative using best method.
Args:
m: the complex index of refraction of the sphere
x: the size parameter of the sphere
N: order of Ricatti-Bessel function
Returns:
The values of the Ricatti-Bessel function for orders from 0 to N.
"""
n = m.real
kappa = abs(m.imag)
if n < 1 or n > 10 or kappa > 10 or x*kappa >= 3.9 - 10.8 * n + 13.78 * n**2:
return _D_downwards(m*x, N)
return _D_upwards(m*x, N)
def _mie_An_Bn(m, x):
"""
Compute arrays of Mie coefficients A and B for a sphere.
This estimates the size of the arrays based on Wiscombe's formula. The length
of the arrays is chosen so that the error when the series are summed is
around 1e-6.
Args:
m: the complex index of refraction of the sphere
x: the size parameter of the sphere
Returns:
An, Bn: arrays of Mie coefficents
"""
nstop = int(x + 4.05 * x**0.33333 + 2.0) + 1
if m.real > 0.0:
D = _D_calc(m, x, nstop + 1)
a = np.zeros(nstop - 1, dtype=complex)
b = np.zeros(nstop - 1, dtype=complex)
psi_nm1 = | np.sin(x) | numpy.sin |
import numpy as np
class Event(object):
def __init__(self, data):
self.name, self.sample, self.var_type = data[0:3]
self.qualA, self.qualB, self.qual_res = [float(x) for x in data[3:6]]
self.uniqA, self.uniqB, self.uniq_res = [float(x) for x in data[6:9]]
self.spanA, self.spanB, self.span_res = [float(x) for x in data[9:12]]
self.global_cov = float(data[12])
self.localA, self.localB, self.local_res = [float(x) for x in data[13:16]]
self.gcA, self.gcB, self.gc_res = [float(x) for x in data[16:19]]
self.alignA, self.alignB, self.align_res = [float(x) for x in data[19:22]]
@classmethod
def init_link(cls, clusters, name):
qualA = np.mean([x.qualA for x in clusters])
qualB = np.mean([x.qualB for x in clusters])
qual_res = np.mean([x.qual_res for x in clusters])
uniqA = np.mean([x.uniqA for x in clusters])
uniqB = np.mean([x.uniqB for x in clusters])
uniq_res = np.mean([x.uniq_res for x in clusters])
spanA = np.mean([abs(x.spanA) for x in clusters])
spanB = np.mean([abs(x.spanB) for x in clusters])
span_res = np.mean([x.span_res for x in clusters])
global_cov = np.mean([x.global_cov for x in clusters])
localA = np.mean([x.localA for x in clusters])
localB = np.mean([x.localB for x in clusters])
local_res = np.mean([x.local_res for x in clusters])
gcA = np.mean([x.gcA for x in clusters])
gcB = np.mean([x.gcB for x in clusters])
gc_res = np.mean([x.gc_res for x in clusters])
alignA = np.mean([x.alignA for x in clusters])
alignB = np.mean([x.alignB for x in clusters])
align_res = | np.mean([x.align_res for x in clusters]) | numpy.mean |
"""Algebraic routines."""
import warnings
from typing import Optional, Tuple
import numpy as np
import scipy.linalg
from .basics import Array
def multiply_tensor_and_matrix(a: Array, b: Array) -> Array:
"""Multiply a 3D tensor with a 2D matrix in a loop to exploit speed gains from optimized 2D multiplication."""
n1, n2, n3 = a.shape[0], a.shape[1], b.shape[1]
multiplied = np.zeros((n1, n2, n3), a.dtype)
for i in range(n1):
multiplied[i] = a[i] @ b
return multiplied
def multiply_matrix_and_tensor(a: Array, b: Array) -> Array:
"""Multiply a 2D matrix with a 3D tensor in a loop to exploit speed gains from optimized 2D multiplication."""
n1, n2, n3 = b.shape[0], a.shape[0], b.shape[2]
multiplied = np.zeros((n1, n2, n3), a.dtype)
for i in range(n1):
multiplied[i] = a @ b[i]
return multiplied
def precisely_identify_collinearity(x: Array, atol: float, rtol: float) -> Tuple[Array, bool]:
"""Compute the QR decomposition of a matrix and identify which diagonal elements of the upper diagonal matrix are
within absolute and relative tolerances.
"""
try:
with warnings.catch_warnings():
warnings.filterwarnings('error')
r = scipy.linalg.qr(x, mode='r')[0] if x.size > 0 else x
collinear = np.abs(r.diagonal()) < atol + rtol * x.std(axis=0)
successful = True
except (ValueError, scipy.linalg.LinAlgError, scipy.linalg.LinAlgWarning):
collinear = np.zeros(x.shape[1], np.bool)
successful = False
return collinear, successful
def precisely_compute_eigenvalues(x: Array) -> Tuple[Array, bool]:
"""Compute the eigenvalues of a real symmetric matrix."""
try:
with warnings.catch_warnings():
warnings.filterwarnings('error')
eigenvalues = scipy.linalg.eigvalsh(x) if x.size > 0 else x.flatten()
successful = True
except (ValueError, scipy.linalg.LinAlgError, scipy.linalg.LinAlgWarning):
eigenvalues = np.full_like( | np.diag(x) | numpy.diag |
"""
This module is an example of a barebones function plugin for napari
It implements the ``napari_experimental_provide_function`` hook specification.
see: https://napari.org/docs/dev/plugins/hook_specifications.html
Replace code below according to your needs.
"""
from __future__ import print_function, division
from typing import TYPE_CHECKING, DefaultDict
from unicodedata import name
import six
# import modules
import sys # input, output, errors, and files
import os # interacting with file systems
import time # getting time
import datetime
import inspect # get passed parameters
import yaml # parameter importing
import json # for importing tiff metadata
try:
import cPickle as pickle # loading and saving python objects
except:
import pickle
import numpy as np # numbers package
import struct # for interpretting strings as binary data
import re # regular expressions
from pprint import pprint # for human readable file output
import traceback # for error messaging
import warnings # error messaging
import copy # not sure this is needed
import h5py # working with HDF5 files
import pandas as pd
import networkx as nx
import collections
# scipy and image analysis
from scipy.signal import find_peaks_cwt # used in channel finding
from scipy.optimize import curve_fit # fitting ring profile
from scipy.optimize import leastsq # fitting 2d gaussian
from scipy import ndimage as ndi # labeling and distance transform
from skimage import io
from skimage import segmentation # used in make_masks and segmentation
from skimage.transform import rotate
from skimage.feature import match_template # used to align images
from skimage.feature import blob_log # used for foci finding
from skimage.filters import threshold_otsu, median # segmentation
from skimage import filters
from skimage import morphology # many functions is segmentation used from this
from skimage.measure import regionprops # used for creating lineages
from skimage.measure import profile_line # used for ring an nucleoid analysis
from skimage import util, measure, transform, feature
import tifffile as tiff
from sklearn import metrics
# deep learning
import tensorflow as tf # ignore message about how tf was compiled
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import models
from tensorflow.keras import losses
from tensorflow.keras import utils
from tensorflow.keras import backend as K
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # supress warnings
# Parralelization modules
import multiprocessing
from multiprocessing import Pool
# Plotting for debug
import matplotlib as mpl
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 12}
mpl.rc('font', **font)
mpl.rcParams['pdf.fonttype'] = 42
from matplotlib.patches import Ellipse
from pathlib import Path
import time
import matplotlib.pyplot as plt
# import modules
import os
import glob
import re
import numpy as np
import tifffile as tiff
import pims_nd2
from skimage import io, measure, morphology
import tifffile as tiff
from scipy import stats
from pprint import pprint # for human readable file output
import multiprocessing
from multiprocessing import Pool
import numpy as np
import warnings
from tensorflow.python.keras import models
from enum import Enum
import numpy as np
import multiprocessing
from multiprocessing import Pool
import os
from napari_plugin_engine import napari_hook_implementation
from skimage.filters import threshold_otsu # segmentation
from skimage import morphology # many functions is segmentation used from this
from skimage import segmentation # used in make_masks and segmentation
from scipy import ndimage as ndi # labeling and distance transform
import matplotlib.gridspec as gridspec
from skimage.exposure import rescale_intensity # for displaying in GUI
from skimage import io, morphology, segmentation
# import mm3_helpers as mm3
import napari
# This is the actual plugin function, where we export our function
# (The functions themselves are defined below)
@napari_hook_implementation
def napari_experimental_provide_function():
# we can return a single function
# or a tuple of (function, magicgui_options)
# or a list of multiple functions with or without options, as shown here:
#return [Segment, threshold, image_arithmetic]
return [Compile, ChannelPicker, Segment]
# 1. First example, a simple function that thresholds an image and creates a labels layer
def threshold(data: "napari.types.ImageData", threshold: int) -> "napari.types.LabelsData":
"""Threshold an image and return a mask."""
return (data > threshold).astype(int)
# print a warning
def warning(*objs):
print(time.strftime("%H:%M:%S WARNING:", time.localtime()), *objs, file=sys.stderr)
# print information
def information(*objs):
print(time.strftime("%H:%M:%S", time.localtime()), *objs, file=sys.stdout)
def julian_day_number():
"""
Need this to solve a bug in pims_nd2.nd2reader.ND2_Reader instance initialization.
The bug is in /usr/local/lib/python2.7/site-packages/pims_nd2/ND2SDK.py in function `jdn_to_datetime_local`, when the year number in the metadata (self._lim_metadata_desc) is not in the correct range. This causes a problem when calling self.metadata.
https://en.wikipedia.org/wiki/Julian_day
"""
dt=datetime.datetime.now()
tt=dt.timetuple()
jdn=(1461.*(tt.tm_year + 4800. + (tt.tm_mon - 14.)/12))/4. + (367.*(tt.tm_mon - 2. - 12.*((tt.tm_mon -14.)/12)))/12. - (3.*((tt.tm_year + 4900. + (tt.tm_mon - 14.)/12.)/100.))/4. + tt.tm_mday - 32075
return jdn
def get_plane(filepath):
pattern = r'(c\d+).tif'
res = re.search(pattern,filepath)
if (res != None):
return res.group(1)
else:
return None
def get_fov(filepath):
pattern = r'xy(\d+)\w*.tif'
res = re.search(pattern,filepath)
if (res != None):
return int(res.group(1))
else:
return None
def get_time(filepath):
pattern = r't(\d+)xy\w+.tif'
res = re.search(pattern,filepath)
if (res != None):
return np.int_(res.group(1))
else:
return None
# loads and image stack from TIFF or HDF5 using mm3 conventions
def load_stack(fov_id, peak_id, color='c1', image_return_number=None):
'''
Loads an image stack.
Supports reading TIFF stacks or HDF5 files.
Parameters
----------
fov_id : int
The FOV id
peak_id : int
The peak (channel) id. Dummy None value incase color='empty'
color : str
The image stack type to return. Can be:
c1 : phase stack
cN : where n is an integer for arbitrary color channel
sub : subtracted images
seg : segmented images
empty : get the empty channel for this fov, slightly different
Returns
-------
image_stack : np.ndarray
The image stack through time. Shape is (t, y, x)
'''
# things are slightly different for empty channels
if 'empty' in color:
if params['output'] == 'TIFF':
img_filename = params['experiment_name'] + '_xy%03d_%s.tif' % (fov_id, color)
with tiff.TiffFile(os.path.join(params['empty_dir'],img_filename)) as tif:
img_stack = tif.asarray()
if params['output'] == 'HDF5':
with h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r') as h5f:
img_stack = h5f[color][:]
return img_stack
# load normal images for either TIFF or HDF5
if params['output'] == 'TIFF':
if color[0] == 'c':
img_dir = params['chnl_dir']
elif 'sub' in color:
img_dir = params['sub_dir']
elif 'foci' in color:
img_dir = params['foci_seg_dir']
elif 'seg' in color:
img_dir = params['seg_dir']
img_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, color)
with tiff.TiffFile(os.path.join(img_dir, img_filename)) as tif:
img_stack = tif.asarray()
if params['output'] == 'HDF5':
with h5py.File(os.path.join(params['hdf5_dir'], 'xy%03d.hdf5' % fov_id), 'r') as h5f:
# normal naming
# need to use [:] to get a copy, else it references the closed hdf5 dataset
img_stack = h5f['channel_%04d/p%04d_%s' % (peak_id, peak_id, color)][:]
return img_stack
# load the time table and add it to the global params
def load_time_table():
'''Add the time table dictionary to the params global dictionary.
This is so it can be used during Cell creation.
'''
# try first for yaml, then for pkl
try:
with open(os.path.join(params['ana_dir'], 'time_table.yaml'), 'rb') as time_table_file:
params['time_table'] = yaml.safe_load(time_table_file)
except:
with open(os.path.join(params['ana_dir'], 'time_table.pkl'), 'rb') as time_table_file:
params['time_table'] = pickle.load(time_table_file)
return
# function for loading the channel masks
def load_channel_masks():
'''Load channel masks dictionary. Should be .yaml but try pickle too.
'''
information("Loading channel masks dictionary.")
# try loading from .yaml before .pkl
try:
information('Path:', os.path.join(params['ana_dir'], 'channel_masks.yaml'))
with open(os.path.join(params['ana_dir'], 'channel_masks.yaml'), 'r') as cmask_file:
channel_masks = yaml.safe_load(cmask_file)
except:
warning('Could not load channel masks dictionary from .yaml.')
try:
information('Path:', os.path.join(params['ana_dir'], 'channel_masks.pkl'))
with open(os.path.join(params['ana_dir'], 'channel_masks.pkl'), 'rb') as cmask_file:
channel_masks = pickle.load(cmask_file)
except ValueError:
warning('Could not load channel masks dictionary from .pkl.')
return channel_masks
# function for loading the specs file
def load_specs():
'''Load specs file which indicates which channels should be analyzed, used as empties, or ignored.'''
try:
with open(os.path.join(params['ana_dir'], 'specs.yaml'), 'r') as specs_file:
specs = yaml.safe_load(specs_file)
except:
try:
with open(os.path.join(params['ana_dir'], 'specs.pkl'), 'rb') as specs_file:
specs = pickle.load(specs_file)
except ValueError:
warning('Could not load specs file.')
return specs
### functions for dealing with raw TIFF images
# get params is the major function which processes raw TIFF images
def get_initial_tif_params(image_filename):
'''This is a function for getting the information
out of an image for later trap identification, cropping, and aligning with Unet. It loads a tiff file and pulls out the image metadata.
it returns a dictionary like this for each image:
'filename': image_filename,
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jdn' : image_metadata['jdn'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'plane_names' : image_metadata['plane_names'] # list of plane names
Called by
mm3_Compile.py __main__
Calls
mm3.extract_metadata
mm3.find_channels
'''
try:
# open up file and get metadata
with tiff.TiffFile(os.path.join(params['TIFF_dir'],image_filename)) as tif:
image_data = tif.asarray()
#print(image_data.shape) # uncomment for debug
#if len(image_data.shape) == 2:
# img_shape = [image_data.shape[0],image_data.shape[1]]
#else:
img_shape = [image_data.shape[1],image_data.shape[2]]
plane_list = [str(i+1) for i in range(image_data.shape[0])]
#print(plane_list) # uncomment for debug
if params['TIFF_source'] == 'elements':
image_metadata = get_tif_metadata_elements(tif)
elif params['TIFF_source'] == 'nd2ToTIFF':
image_metadata = get_tif_metadata_nd2ToTIFF(tif)
else:
image_metadata = get_tif_metadata_filename(tif)
information('Analyzed %s' % image_filename)
# return the file name, the data for the channels in that image, and the metadata
return {'filepath': os.path.join(params['TIFF_dir'], image_filename),
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jd' : image_metadata['jd'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'planes' : plane_list, # list of plane names
'shape' : img_shape} # image shape x y in pixels
except:
warning('Failed get_params for ' + image_filename.split("/")[-1])
print(sys.exc_info()[0])
print(sys.exc_info()[1])
print(traceback.print_tb(sys.exc_info()[2]))
return {'filepath': os.path.join(params['TIFF_dir'],image_filename), 'analyze_success': False}
# get params is the major function which processes raw TIFF images
def get_tif_params(image_filename, find_channels=True):
'''This is a damn important function for getting the information
out of an image. It loads a tiff file, pulls out the image data, and the metadata,
including the location of the channels if flagged.
it returns a dictionary like this for each image:
'filename': image_filename,
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jdn' : image_metadata['jdn'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'plane_names' : image_metadata['plane_names'] # list of plane names
'channels': cp_dict, # dictionary of channel locations, in the case of Unet-based channel segmentation, it's a dictionary of channel labels
Called by
mm3_Compile.py __main__
Calls
mm3.extract_metadata
mm3.find_channels
'''
try:
# open up file and get metadata
with tiff.TiffFile(os.path.join(params['TIFF_dir'],image_filename)) as tif:
image_data = tif.asarray()
if params['TIFF_source'] == 'elements':
image_metadata = get_tif_metadata_elements(tif)
elif params['TIFF_source'] == 'nd2ToTIFF':
image_metadata = get_tif_metadata_nd2ToTIFF(tif)
else:
image_metadata = get_tif_metadata_filename(tif)
# look for channels if flagged
if find_channels:
# fix the image orientation and get the number of planes
image_data = fix_orientation(image_data)
# if the image data has more than 1 plane restrict image_data to phase,
# which should have highest mean pixel data
if len(image_data.shape) > 2:
#ph_index = np.argmax([np.mean(image_data[ci]) for ci in range(image_data.shape[0])])
ph_index = int(params['phase_plane'][1:]) - 1
image_data = image_data[ph_index]
# get shape of single plane
img_shape = [image_data.shape[0], image_data.shape[1]]
# find channels on the processed image
chnl_loc_dict = find_channel_locs(image_data)
information('Analyzed %s' % image_filename)
# return the file name, the data for the channels in that image, and the metadata
return {'filepath': os.path.join(params['TIFF_dir'], image_filename),
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jd' : image_metadata['jd'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'planes' : image_metadata['planes'], # list of plane names
'shape' : img_shape, # image shape x y in pixels
# 'channels' : {1 : {'A' : 1, 'B' : 2}, 2 : {'C' : 3, 'D' : 4}}}
'channels' : chnl_loc_dict} # dictionary of channel locations
except:
warning('Failed get_params for ' + image_filename.split("/")[-1])
print(sys.exc_info()[0])
print(sys.exc_info()[1])
print(traceback.print_tb(sys.exc_info()[2]))
return {'filepath': os.path.join(params['TIFF_dir'],image_filename), 'analyze_success': False}
# finds metdata in a tiff image which has been expoted with Nikon Elements.
def get_tif_metadata_elements(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This if tiff files as exported by Nikon Elements as a stacked tiff, each for one tpoint.
tif is an opened tif file (using the package tifffile)
arguments:
fname (tifffile.TiffFile): TIFF file object from which data will be extracted
returns:
dictionary of values:
'jdn' (float)
'x' (float)
'y' (float)
'plane_names' (list of strings)
Called by
mm3.Compile
'''
# image Metadata
idata = { 'fov': -1,
't' : -1,
'jd': -1 * 0.0,
'x': -1 * 0.0,
'y': -1 * 0.0,
'planes': []}
# get the fov and t simply from the file name
idata['fov'] = int(tif.fname.split('xy')[1].split('.tif')[0])
idata['t'] = int(tif.fname.split('xy')[0].split('t')[-1])
# a page is plane, or stack, in the tiff. The other metdata is hidden down in there.
for page in tif:
for tag in page.tags.values():
#print("Checking tag",tag.name,tag.value)
t = tag.name, tag.value
t_string = u""
time_string = u""
# Interesting tag names: 65330, 65331 (binary data; good stuff), 65332
# we wnat to work with the tag of the name 65331
# if the tag name is not in the set of tegs we find interesting then skip this cycle of the loop
if tag.name not in ('65331', '65332', 'strip_byte_counts', 'image_width', 'orientation', 'compression', 'new_subfile_type', 'fill_order', 'max_sample_value', 'bits_per_sample', '65328', '65333'):
#print("*** " + tag.name)
#print(tag.value)
pass
#if tag.name == '65330':
# return tag.value
if tag.name in ('65331'):
# make info list a list of the tag values 0 to 65535 by zipoing up a paired list of two bytes, at two byte intervals i.e. fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b
# note that 0X100 is hex for 256
infolist = [a+b*0x100 for a,b in zip(tag.value[0::2], tag.value[1::2])]
# get char values for each element in infolist
for c_entry in range(0, len(infolist)):
# the element corresponds to an ascii char for a letter or bracket (and a few other things)
if infolist[c_entry] < 127 and infolist[c_entry] > 64:
# add the letter to the unicode string t_string
t_string += chr(infolist[c_entry])
#elif infolist[c_entry] == 0:
# continue
else:
t_string += " "
# this block will find the dTimeAbsolute and print the subsequent integers
# index 170 is counting seconds, and rollover of index 170 leads to increment of index 171
# rollover of index 171 leads to increment of index 172
# get the position of the array by finding the index of the t_string at which dTimeAbsolute is listed not that 2*len(dTimeAbsolute)=26
#print(t_string)
arraypos = t_string.index("dXPos") * 2 + 16
xarr = tag.value[arraypos:arraypos+4]
b = ''.join(chr(i) for i in xarr)
idata['x'] = float(struct.unpack('<f', b)[0])
arraypos = t_string.index("dYPos") * 2 + 16
yarr = tag.value[arraypos:arraypos+4]
b = ''.join(chr(i) for i in yarr)
idata['y'] = float(struct.unpack('<f', b)[0])
arraypos = t_string.index("dTimeAbsolute") * 2 + 26
shortarray = tag.value[arraypos+2:arraypos+10]
b = ''.join(chr(i) for i in shortarray)
idata['jd'] = float(struct.unpack('<d', b)[0])
# extract plane names
il = [a+b*0x100 for a,b in zip(tag.value[0::2], tag.value[1::2])]
li = [a+b*0x100 for a,b in zip(tag.value[1::2], tag.value[2::2])]
strings = list(zip(il, li))
allchars = ""
for c_entry in range(0, len(strings)):
if 31 < strings[c_entry][0] < 127:
allchars += chr(strings[c_entry][0])
elif 31 < strings[c_entry][1] < 127:
allchars += chr(strings[c_entry][1])
else:
allchars += " "
allchars = re.sub(' +',' ', allchars)
words = allchars.split(" ")
planes = []
for idx in [i for i, x in enumerate(words) if x == "sOpticalConfigName"]:
planes.append(words[idx+1])
idata['planes'] = planes
return idata
# finds metdata in a tiff image which has been expoted with nd2ToTIFF.py.
def get_tif_metadata_nd2ToTIFF(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This if tiff files as exported by the mm3 function mm3_nd2ToTIFF.py. All the metdata
is found in that script and saved in json format to the tiff, so it is simply extracted here
Paramters:
tif: TIFF file object from which data will be extracted
Returns:
dictionary of values:
'fov': int,
't' : int,
'jdn' (float)
'x' (float)
'y' (float)
'planes' (list of strings)
Called by
mm3_Compile.get_tif_params
'''
# get the first page of the tiff and pull out image description
# this dictionary should be in the above form
for tag in tif.pages[0].tags:
if tag.name=="ImageDescription":
idata=tag.value
break
#print(idata)
idata = json.loads(idata)
return idata
# Finds metadata from the filename
def get_tif_metadata_filename(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This just gets the tiff metadata from the filename and is a backup option when the known format of the metadata is not known.
Paramters:
tif: TIFF file object from which data will be extracted
Returns:
dictionary of values:
'fov': int,
't' : int,
'jdn' (float)
'x' (float)
'y' (float)
Called by
mm3_Compile.get_tif_params
'''
idata = {'fov' : get_fov(tif.filename), # fov id
't' : get_time(tif.filename), # time point
'jd' : -1 * 0.0, # absolute julian time
'x' : -1 * 0.0, # x position on stage [um]
'y' : -1 * 0.0} # y position on stage [um]
return idata
# make a lookup time table for converting nominal time to elapsed time in seconds
def make_time_table(analyzed_imgs):
'''
Loops through the analyzed images and uses the jd time in the metadata to find the elapsed
time in seconds that each picture was taken. This is later used for more accurate elongation
rate calculation.
Parametrs
---------
analyzed_imgs : dict
The output of get_tif_params.
params['use_jd'] : boolean
If set to True, 'jd' time will be used from the image metadata to use to create time table. Otherwise the 't' index will be used, and the parameter 'seconds_per_time_index' will be used from the parameters.yaml file to convert to seconds.
Returns
-------
time_table : dict
Look up dictionary with keys for the FOV and then the time point.
'''
information('Making time table...')
# initialize
time_table = {}
first_time = float('inf')
# need to go through the data once to find the first time
for iname, idata in six.iteritems(analyzed_imgs):
if params['use_jd']:
if idata['jd'] < first_time:
first_time = idata['jd']
else:
if idata['t'] < first_time:
first_time = idata['t']
# init dictionary for specific times per FOV
if idata['fov'] not in time_table:
time_table[idata['fov']] = {}
for iname, idata in six.iteritems(analyzed_imgs):
if params['use_jd']:
# convert jd time to elapsed time in seconds
t_in_seconds = np.around((idata['jd'] - first_time) * 24*60*60, decimals=0).astype('uint32')
else:
t_in_seconds = np.around((idata['t'] - first_time) * params['moviemaker']['seconds_per_time_index'], decimals=0).astype('uint32')
time_table[int(idata['fov'])][int(idata['t'])] = int(t_in_seconds)
# save to .pkl. This pkl will be loaded into the params
# with open(os.path.join(params['ana_dir'], 'time_table.pkl'), 'wb') as time_table_file:
# pickle.dump(time_table, time_table_file, protocol=pickle.HIGHEST_PROTOCOL)
# with open(os.path.join(params['ana_dir'], 'time_table.txt'), 'w') as time_table_file:
# pprint(time_table, stream=time_table_file)
with open(os.path.join(params['ana_dir'], 'time_table.yaml'), 'w') as time_table_file:
yaml.dump(data=time_table, stream=time_table_file, default_flow_style=False, tags=None)
information('Time table saved.')
return time_table
# saves traps sliced via Unet
def save_tiffs(imgDict, analyzed_imgs, fov_id):
savePath = os.path.join(params['experiment_directory'],
params['analysis_directory'],
params['chnl_dir'])
img_names = [key for key in analyzed_imgs.keys()]
image_params = analyzed_imgs[img_names[0]]
for peak,img in six.iteritems(imgDict):
img = img.astype('uint16')
if not os.path.isdir(savePath):
os.mkdir(savePath)
for planeNumber in image_params['planes']:
channel_filename = os.path.join(savePath, params['experiment_name'] + '_xy{0:0=3}_p{1:0=4}_c{2}.tif'.format(fov_id, peak, planeNumber))
io.imsave(channel_filename, img[:,:,:,int(planeNumber)-1])
# slice_and_write cuts up the image files one at a time and writes them out to tiff stacks
def tiff_stack_slice_and_write(images_to_write, channel_masks, analyzed_imgs):
'''Writes out 4D stacks of TIFF images per channel.
Loads all tiffs from and FOV into memory and then slices all time points at once.
Called by
__main__
'''
# make an array of images and then concatenate them into one big stack
image_fov_stack = []
# go through list of images and get the file path
for n, image in enumerate(images_to_write):
# analyzed_imgs dictionary will be found in main scope. [0] is the key, [1] is jd
image_params = analyzed_imgs[image[0]]
information("Loading %s." % image_params['filepath'].split('/')[-1])
if n == 1:
# declare identification variables for saving using first image
fov_id = image_params['fov']
# load the tif and store it in array
with tiff.TiffFile(image_params['filepath']) as tif:
image_data = tif.asarray()
# channel finding was also done on images after orientation was fixed
image_data = fix_orientation(image_data)
# add additional axis if the image is flat
if len(image_data.shape) == 2:
image_data = np.expand_dims(image_data, 0)
# change axis so it goes Y, X, Plane
image_data = np.rollaxis(image_data, 0, 3)
# add it to list. The images should be in time order
image_fov_stack.append(image_data)
# concatenate the list into one big ass stack
image_fov_stack = np.stack(image_fov_stack, axis=0)
# cut out the channels as per channel masks for this fov
for peak, channel_loc in six.iteritems(channel_masks[fov_id]):
#information('Slicing and saving channel peak %s.' % channel_filename.split('/')[-1])
information('Slicing and saving channel peak %d.' % peak)
# channel masks should only contain ints, but you can use this for hard fix
# for i in range(len(channel_loc)):
# for j in range(len(channel_loc[i])):
# channel_loc[i][j] = int(channel_loc[i][j])
# slice out channel.
# The function should recognize the shape length as 4 and cut all time points
channel_stack = cut_slice(image_fov_stack, channel_loc)
# save a different time stack for all colors
for color_index in range(channel_stack.shape[3]):
# this is the filename for the channel
# # chnl_dir and p will be looked for in the scope above (__main__)
channel_filename = os.path.join(params['chnl_dir'], params['experiment_name'] + '_xy%03d_p%04d_c%1d.tif' % (fov_id, peak, color_index+1))
# save stack
tiff.imsave(channel_filename, channel_stack[:,:,:,color_index], compress=4)
return
# saves traps sliced via Unet to an hdf5 file
def save_hdf5(imgDict, img_names, analyzed_imgs, fov_id, channel_masks):
'''Writes out 4D stacks of images to an HDF5 file.
Called by
mm3_Compile.py
'''
savePath = params['hdf5_dir']
if not os.path.isdir(savePath):
os.mkdir(savePath)
img_times = [analyzed_imgs[key]['t'] for key in img_names]
img_jds = [analyzed_imgs[key]['jd'] for key in img_names]
fov_ids = [analyzed_imgs[key]['fov'] for key in img_names]
# get image_params from first image from current fov
image_params = analyzed_imgs[img_names[0]]
# establish some variables for hdf5 attributes
fov_id = image_params['fov']
x_loc = image_params['x']
y_loc = image_params['y']
image_shape = image_params['shape']
image_planes = image_params['planes']
fov_channel_masks = channel_masks[fov_id]
with h5py.File(os.path.join(savePath,'{}_xy{:0=2}.hdf5'.format(params['experiment_name'],fov_id)), 'w', libver='earliest') as h5f:
# add in metadata for this FOV
# these attributes should be common for all channel
h5f.attrs.create('fov_id', fov_id)
h5f.attrs.create('stage_x_loc', x_loc)
h5f.attrs.create('stage_y_loc', y_loc)
h5f.attrs.create('image_shape', image_shape)
# encoding is because HDF5 has problems with numpy unicode
h5f.attrs.create('planes', [plane.encode('utf8') for plane in image_planes])
h5f.attrs.create('peaks', sorted([key for key in imgDict.keys()]))
# this is for things that change across time, for these create a dataset
img_names = np.asarray(img_names)
img_names = np.expand_dims(img_names, 1)
img_names = img_names.astype('S100')
h5ds = h5f.create_dataset(u'filenames', data=img_names,
chunks=True, maxshape=(None, 1), dtype='S100',
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times', data=np.expand_dims(img_times, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times_jd', data=np.expand_dims(img_jds, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
# cut out the channels as per channel masks for this fov
for peak,channel_stack in six.iteritems(imgDict):
channel_stack = channel_stack.astype('uint16')
# create group for this trap
h5g = h5f.create_group('channel_%04d' % peak)
# add attribute for peak_id, channel location
# add attribute for peak_id, channel location
h5g.attrs.create('peak_id', peak)
channel_loc = fov_channel_masks[peak]
h5g.attrs.create('channel_loc', channel_loc)
# save a different dataset for all colors
for color_index in range(channel_stack.shape[3]):
# create the dataset for the image. Review docs for these options.
h5ds = h5g.create_dataset(u'p%04d_c%1d' % (peak, color_index+1),
data=channel_stack[:,:,:,color_index],
chunks=(1, channel_stack.shape[1], channel_stack.shape[2]),
maxshape=(None, channel_stack.shape[1], channel_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# h5ds.attrs.create('plane', image_planes[color_index].encode('utf8'))
# write the data even though we have more to write (free up memory)
h5f.flush()
return
# same thing as tiff_stack_slice_and_write but do it for hdf5
def hdf5_stack_slice_and_write(images_to_write, channel_masks, analyzed_imgs):
'''Writes out 4D stacks of TIFF images to an HDF5 file.
Called by
__main__
'''
# make an array of images and then concatenate them into one big stack
image_fov_stack = []
# make arrays for filenames and times
image_filenames = []
image_times = [] # times is still an integer but may be indexed arbitrarily
image_jds = [] # jds = julian dates (times)
# go through list of images, load and fix them, and create arrays of metadata
for n, image in enumerate(images_to_write):
image_name = image[0] # [0] is the key, [1] is jd
# analyzed_imgs dictionary will be found in main scope.
image_params = analyzed_imgs[image_name]
information("Loading %s." % image_params['filepath'].split('/')[-1])
# add information to metadata arrays
image_filenames.append(image_name)
image_times.append(image_params['t'])
image_jds.append(image_params['jd'])
# declare identification variables for saving using first image
if n == 1:
# same across fov
fov_id = image_params['fov']
x_loc = image_params['x']
y_loc = image_params['y']
image_shape = image_params['shape']
image_planes = image_params['planes']
# load the tif and store it in array
with tiff.TiffFile(image_params['filepath']) as tif:
image_data = tif.asarray()
# channel finding was also done on images after orientation was fixed
image_data = fix_orientation(image_data)
# add additional axis if the image is flat
if len(image_data.shape) == 2:
image_data = np.expand_dims(image_data, 0)
#change axis so it goes X, Y, Plane
image_data = np.rollaxis(image_data, 0, 3)
# add it to list. The images should be in time order
image_fov_stack.append(image_data)
# concatenate the list into one big ass stack
image_fov_stack = np.stack(image_fov_stack, axis=0)
# create the HDF5 file for the FOV, first time this is being done.
with h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'w', libver='earliest') as h5f:
# add in metadata for this FOV
# these attributes should be common for all channel
h5f.attrs.create('fov_id', fov_id)
h5f.attrs.create('stage_x_loc', x_loc)
h5f.attrs.create('stage_y_loc', y_loc)
h5f.attrs.create('image_shape', image_shape)
# encoding is because HDF5 has problems with numpy unicode
h5f.attrs.create('planes', [plane.encode('utf8') for plane in image_planes])
h5f.attrs.create('peaks', sorted(channel_masks[fov_id].keys()))
# this is for things that change across time, for these create a dataset
h5ds = h5f.create_dataset(u'filenames', data=np.expand_dims(image_filenames, 1),
chunks=True, maxshape=(None, 1), dtype='S100',
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times', data=np.expand_dims(image_times, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times_jd', data=np.expand_dims(image_jds, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
# cut out the channels as per channel masks for this fov
for peak, channel_loc in six.iteritems(channel_masks[fov_id]):
#information('Slicing and saving channel peak %s.' % channel_filename.split('/')[-1])
information('Slicing and saving channel peak %d.' % peak)
# create group for this channel
h5g = h5f.create_group('channel_%04d' % peak)
# add attribute for peak_id, channel location
h5g.attrs.create('peak_id', peak)
h5g.attrs.create('channel_loc', channel_loc)
# channel masks should only contain ints, but you can use this for a hard fix
# for i in range(len(channel_loc)):
# for j in range(len(channel_loc[i])):
# channel_loc[i][j] = int(channel_loc[i][j])
# slice out channel.
# The function should recognize the shape length as 4 and cut all time points
channel_stack = cut_slice(image_fov_stack, channel_loc)
# save a different dataset for all colors
for color_index in range(channel_stack.shape[3]):
# create the dataset for the image. Review docs for these options.
h5ds = h5g.create_dataset(u'p%04d_c%1d' % (peak, color_index+1),
data=channel_stack[:,:,:,color_index],
chunks=(1, channel_stack.shape[1], channel_stack.shape[2]),
maxshape=(None, channel_stack.shape[1], channel_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# h5ds.attrs.create('plane', image_planes[color_index].encode('utf8'))
# write the data even though we have more to write (free up memory)
h5f.flush()
return
def tileImage(img, subImageNumber):
divisor = int(np.sqrt(subImageNumber))
M = img.shape[0]//divisor
N = img.shape[0]//divisor
print(img.shape, M, N, divisor, subImageNumber)
ans = ([img[x:x+M,y:y+N] for x in range(0,img.shape[0],M) for y in range(0,img.shape[1],N)])
tiles=[]
for m in ans:
if m.shape[0]==512 and m.shape[1]==512:
tiles.append(m)
tiles=np.asarray(tiles)
#print(tiles)
return(tiles)
def get_weights(img, subImageNumber):
divisor = int(np.sqrt(subImageNumber))
M = img.shape[0]//divisor
N = img.shape[0]//divisor
weights = np.ones((img.shape[0],img.shape[1]),dtype='uint8')
for i in range(divisor-1):
weights[(M*(i+1))-25:(M*(i+1)+25),:] = 0
weights[:,(N*(i+1))-25:(N*(i+1)+25)] = 0
return(weights)
def permute_image(img, trap_align_metadata):
# are there three dimensions?
if len(img.shape) == 3:
if img.shape[0] < 3: # for tifs with fewer than three imageing channels, the first dimension separates channels
# img = np.transpose(img, (1,2,0))
img = img[trap_align_metadata['phase_plane_index'],:,:] # grab just the phase channel
else:
img = img[:,:,trap_align_metadata['phase_plane_index']] # grab just the phase channel
return(img)
def imageConcatenatorFeatures(imgStack, subImageNumber = 64):
rowNumPerImage = int(np.sqrt(subImageNumber)) # here I'm assuming our large images are square, with equal number of crops in each dimension
#print(rowNumPerImage)
imageNum = int(imgStack.shape[0]/subImageNumber) # total number of sub-images divided by the number of sub-images in each original large image
iterNum = int(imageNum*rowNumPerImage)
imageDims = int(np.sqrt(imgStack.shape[1]*imgStack.shape[2]*subImageNumber))
featureNum = int(imgStack.shape[3])
bigImg = np.zeros(shape=(imageNum, imageDims, imageDims, featureNum), dtype='float32') # create array to store reconstructed images
featureRowDicts = []
for j in range(featureNum):
rowDict = {}
for i in range(iterNum):
baseNum = int(i*iterNum/imageNum)
# concatenate columns of 256x256 images to build each 256x2048 row
rowDict[i] = np.column_stack((imgStack[baseNum,:,:,j],imgStack[baseNum+1,:,:,j],
imgStack[baseNum+2,:,:,j], imgStack[baseNum+3,:,:,j]))#,
#imgStack[baseNum+4,:,:,j],imgStack[baseNum+5,:,:,j],
#imgStack[baseNum+6,:,:,j],imgStack[baseNum+7,:,:,j]))
featureRowDicts.append(rowDict)
for j in range(featureNum):
for i in range(imageNum):
baseNum = int(i*rowNumPerImage)
# concatenate appropriate 256x2048 rows to build a 2048x2048 image and place it into bigImg
bigImg[i,:,:,j] = np.row_stack((featureRowDicts[j][baseNum],featureRowDicts[j][baseNum+1],
featureRowDicts[j][baseNum+2],featureRowDicts[j][baseNum+3]))#,
#featureRowDicts[j][baseNum+4],featureRowDicts[j][baseNum+5],
#featureRowDicts[j][baseNum+6],featureRowDicts[j][baseNum+7]))
return(bigImg)
def imageConcatenatorFeatures2(imgStack, subImageNumber = 81):
rowNumPerImage = int(np.sqrt(subImageNumber)) # here I'm assuming our large images are square, with equal number of crops in each dimension
imageNum = int(imgStack.shape[0]/subImageNumber) # total number of sub-images divided by the number of sub-images in each original large image
iterNum = int(imageNum*rowNumPerImage)
imageDims = int(np.sqrt(imgStack.shape[1]*imgStack.shape[2]*subImageNumber))
featureNum = int(imgStack.shape[3])
bigImg = np.zeros(shape=(imageNum, imageDims, imageDims, featureNum), dtype='float32') # create array to store reconstructed images
featureRowDicts = []
for j in range(featureNum):
rowDict = {}
for i in range(iterNum):
baseNum = int(i*iterNum/imageNum)
# concatenate columns of 256x256 images to build each 256x2048 row
rowDict[i] = np.column_stack((imgStack[baseNum,:,:,j],imgStack[baseNum+1,:,:,j],
imgStack[baseNum+2,:,:,j], imgStack[baseNum+3,:,:,j],
imgStack[baseNum+4,:,:,j]))#,imgStack[baseNum+5,:,:,j],
#imgStack[baseNum+6,:,:,j],imgStack[baseNum+7,:,:,j],
#imgStack[baseNum+8,:,:,j]))
featureRowDicts.append(rowDict)
for j in range(featureNum):
for i in range(imageNum):
baseNum = int(i*rowNumPerImage)
# concatenate appropriate 256x2048 rows to build a 2048x2048 image and place it into bigImg
bigImg[i,:,:,j] = np.row_stack((featureRowDicts[j][baseNum],featureRowDicts[j][baseNum+1],
featureRowDicts[j][baseNum+2],featureRowDicts[j][baseNum+3],
featureRowDicts[j][baseNum+4]))#,featureRowDicts[j][baseNum+5],
#featureRowDicts[j][baseNum+6],featureRowDicts[j][baseNum+7],
#featureRowDicts[j][baseNum+8]))
return(bigImg)
def get_weights_array(arr=np.zeros((2048,2048)), shiftDistance=128, subImageNumber=64, padSubImageNumber=81):
originalImageWeights = get_weights(arr, subImageNumber=subImageNumber)
shiftLeftWeights = np.pad(originalImageWeights, pad_width=((0,0),(0,shiftDistance)),
mode='constant', constant_values=((0,0),(0,0)))[:,shiftDistance:]
shiftRightWeights = np.pad(originalImageWeights, pad_width=((0,0),(shiftDistance,0)),
mode='constant', constant_values=((0,0),(0,0)))[:,:(-1*shiftDistance)]
shiftUpWeights = np.pad(originalImageWeights, pad_width=((0,shiftDistance),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[shiftDistance:,:]
shiftDownWeights = np.pad(originalImageWeights, pad_width=((shiftDistance,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[:(-1*shiftDistance),:]
expandedImageWeights = get_weights(np.zeros((arr.shape[0]+2*shiftDistance,arr.shape[1]+2*shiftDistance)), subImageNumber=padSubImageNumber)[shiftDistance:-shiftDistance,shiftDistance:-shiftDistance]
allWeights = np.stack((originalImageWeights, expandedImageWeights, shiftUpWeights, shiftDownWeights, shiftLeftWeights,shiftRightWeights), axis=-1)
stackWeights = np.stack((allWeights,allWeights),axis=0)
stackWeights = np.stack((stackWeights,stackWeights,stackWeights),axis=3)
return(stackWeights)
# predicts locations of channels in an image using deep learning model
def get_frame_predictions(img,model,stackWeights, shiftDistance=256, subImageNumber=16, padSubImageNumber=25, debug=False):
pred = predict_first_image_channels(img, model, shiftDistance=shiftDistance,
subImageNumber=subImageNumber, padSubImageNumber=padSubImageNumber, debug=debug)[0,...]
# print(pred.shape)
if debug:
print(pred.shape)
compositePrediction = np.average(pred, axis=3, weights=stackWeights)
# print(compositePrediction.shape)
padSize = (compositePrediction.shape[0]-img.shape[0])//2
compositePrediction = util.crop(compositePrediction,((padSize,padSize),
(padSize,padSize),
(0,0)))
# print(compositePrediction.shape)
return(compositePrediction)
def apply_median_filter_normalize(imgs):
selem = morphology.disk(3)
for i in range(imgs.shape[0]):
# Store sample
tmpImg = imgs[i,:,:,0]
medImg = median(tmpImg, selem)
tmpImg = medImg/np.max(medImg)
tmpImg = np.expand_dims(tmpImg, axis=-1)
imgs[i,:,:,:] = tmpImg
return(imgs)
def predict_first_image_channels(img, model,
subImageNumber=16, padSubImageNumber=25,
shiftDistance=128, batchSize=1,
debug=False):
imgSize = img.shape[0]
padSize = (2048-imgSize)//2 # how much to pad on each side to get up to 2048x2048?
imgStack = np.pad(img, pad_width=((padSize,padSize),(padSize,padSize)),
mode='constant', constant_values=((0,0),(0,0))) # pad the images to make them 2048x2048
# pad the stack by 128 pixels on each side to get complemetary crops that I can run the network on. This
# should help me fill in low-confidence regions where the crop boundaries were for the original image
imgStackExpand = np.pad(imgStack, pad_width=((shiftDistance,shiftDistance),(shiftDistance,shiftDistance)),
mode='constant', constant_values=((0,0),(0,0)))
imgStackShiftRight = np.pad(imgStack, pad_width=((0,0),(0,shiftDistance)),
mode='constant', constant_values=((0,0),(0,0)))[:,shiftDistance:]
imgStackShiftLeft = np.pad(imgStack, pad_width=((0,0),(shiftDistance,0)),
mode='constant', constant_values=((0,0),(0,0)))[:,:-shiftDistance]
imgStackShiftDown = np.pad(imgStack, pad_width=((0,shiftDistance),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[shiftDistance:,:]
imgStackShiftUp = np.pad(imgStack, pad_width=((shiftDistance,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[:-shiftDistance,:]
#print(imgStackShiftUp.shape)
crops = tileImage(imgStack, subImageNumber=subImageNumber)
print("Crops: ", crops.shape)
crops = np.expand_dims(crops, -1)
data_gen_args = {'batch_size':params['compile']['channel_prediction_batch_size'],
'n_channels':1,
'normalize_to_one':True,
'shuffle':False}
predict_gen_args = {'verbose':1,
'use_multiprocessing':True,
'workers':params['num_analyzers']}
img_generator = TrapSegmentationDataGenerator(crops, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
prediction = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
#print(prediction.shape)
cropsExpand = tileImage(imgStackExpand, subImageNumber=padSubImageNumber)
cropsExpand = np.expand_dims(cropsExpand, -1)
img_generator = TrapSegmentationDataGenerator(cropsExpand, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionExpand = imageConcatenatorFeatures2(predictions, subImageNumber=padSubImageNumber)
predictionExpand = util.crop(predictionExpand, ((0,0),(shiftDistance,shiftDistance),(shiftDistance,shiftDistance),(0,0)))
#print(predictionExpand.shape)
cropsShiftLeft = tileImage(imgStackShiftLeft, subImageNumber=subImageNumber)
cropsShiftLeft = np.expand_dims(cropsShiftLeft, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftLeft, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionLeft = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionLeft = np.pad(predictionLeft, pad_width=((0,0),(0,0),(0,shiftDistance),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,:,shiftDistance:,:]
#print(predictionLeft.shape)
cropsShiftRight = tileImage(imgStackShiftRight, subImageNumber=subImageNumber)
cropsShiftRight = np.expand_dims(cropsShiftRight, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftRight, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionRight = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionRight = np.pad(predictionRight, pad_width=((0,0),(0,0),(shiftDistance,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,:,:(-1*shiftDistance),:]
#print(predictionRight.shape)
cropsShiftUp = tileImage(imgStackShiftUp, subImageNumber=subImageNumber)
#print(cropsShiftUp.shape)
cropsShiftUp = np.expand_dims(cropsShiftUp, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftUp, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionUp = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionUp = np.pad(predictionUp, pad_width=((0,0),(0,shiftDistance),(0,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,shiftDistance:,:,:]
#print(predictionUp.shape)
cropsShiftDown = tileImage(imgStackShiftDown, subImageNumber=subImageNumber)
cropsShiftDown = np.expand_dims(cropsShiftDown, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftDown, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionDown = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionDown = np.pad(predictionDown, pad_width=((0,0),(shiftDistance,0),(0,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,:(-1*shiftDistance),:,:]
#print(predictionDown.shape)
allPredictions = np.stack((prediction, predictionExpand,
predictionUp, predictionDown,
predictionLeft, predictionRight), axis=-1)
return(allPredictions)
# takes initial U-net centroids for trap locations, and creats bounding boxes for each trap at the defined height and width
def get_frame_trap_bounding_boxes(trapLabels, trapProps, trapAreaThreshold=2000, trapWidth=27, trapHeight=256):
badTrapLabels = [reg.label for reg in trapProps if reg.area < trapAreaThreshold] # filter out small "trap" regions
goodTraps = trapLabels.copy()
for label in badTrapLabels:
goodTraps[goodTraps == label] = 0 # re-label bad traps as background (0)
goodTrapProps = measure.regionprops(goodTraps)
trapCentroids = [(int(np.round(reg.centroid[0])),int(np.round(reg.centroid[1]))) for reg in goodTrapProps] # get centroids as integers
trapBboxes = []
for centroid in trapCentroids:
rowIndex = centroid[0]
colIndex = centroid[1]
minRow = rowIndex-trapHeight//2
maxRow = rowIndex+trapHeight//2
minCol = colIndex-trapWidth//2
maxCol = colIndex+trapWidth//2
if trapWidth % 2 != 0:
maxCol += 1
coordArray = np.array([minRow,maxRow,minCol,maxCol])
# remove any traps at edges of image
if np.any(coordArray > goodTraps.shape[0]):
continue
if np.any(coordArray < 0):
continue
trapBboxes.append((minRow,minCol,maxRow,maxCol))
return(trapBboxes)
# this function performs image alignment as defined by the shifts passed as an argument
def crop_traps(fileNames, trapProps, labelledTraps, bboxesDict, trap_align_metadata):
frameNum = trap_align_metadata['frame_count']
channelNum = trap_align_metadata['plane_number']
trapImagesDict = {key:np.zeros((frameNum,
trap_align_metadata['trap_height'],
trap_align_metadata['trap_width'],
channelNum)) for key in bboxesDict}
trapClosedEndPxDict = {}
flipImageDict = {}
trapMask = labelledTraps
for frame in range(frameNum):
if (frame+1) % 20 == 0:
print("Cropping trap regions for frame number {} of {}.".format(frame+1, frameNum))
imgPath = os.path.join(params['experiment_directory'],params['image_directory'],fileNames[frame])
fullFrameImg = io.imread(imgPath)
if len(fullFrameImg.shape) == 3:
if fullFrameImg.shape[0] < 3: # for tifs with less than three imaging channels, the first dimension separates channels
fullFrameImg = np.transpose(fullFrameImg, (1,2,0))
trapClosedEndPxDict[fileNames[frame]] = {key:{} for key in bboxesDict.keys()}
for key in trapImagesDict.keys():
bbox = bboxesDict[key][frame]
trapImagesDict[key][frame,:,:,:] = fullFrameImg[bbox[0]:bbox[2],bbox[1]:bbox[3],:]
#tmpImg = np.reshape(fullFrameImg[trapMask==key], (trapHeight,trapWidth,channelNum))
if frame == 0:
medianProfile = np.median(trapImagesDict[key][frame,:,:,0],axis=1) # get intensity of middle column of trap
maxIntensityRow = np.argmax(medianProfile)
if maxIntensityRow > trap_align_metadata['trap_height']//2:
flipImageDict[key] = 0
else:
flipImageDict[key] = 1
if flipImageDict[key] == 1:
trapImagesDict[key][frame,:,:,:] = trapImagesDict[key][frame,::-1,:,:]
trapClosedEndPxDict[fileNames[frame]][key]['closed_end_px'] = bbox[0]
trapClosedEndPxDict[fileNames[frame]][key]['open_end_px'] = bbox[2]
else:
trapClosedEndPxDict[fileNames[frame]][key]['closed_end_px'] = bbox[2]
trapClosedEndPxDict[fileNames[frame]][key]['open_end_px'] = bbox[0]
continue
return(trapImagesDict, trapClosedEndPxDict)
# gets shifted bounding boxes to crop traps through time
def shift_bounding_boxes(bboxesDict, shifts, imgSize):
bboxesShiftDict = {}
for key in bboxesDict.keys():
bboxesShiftDict[key] = []
bboxes = bboxesDict[key]
for i in range(shifts.shape[0]):
if i == 0:
bboxesShiftDict[key].append(bboxes)
else:
minRow = bboxes[0]+shifts[i,0]
minCol = bboxes[1]+shifts[i,1]
maxRow = bboxes[2]+shifts[i,0]
maxCol = bboxes[3]+shifts[i,1]
bboxesShiftDict[key].append((minRow,
minCol,
maxRow,
maxCol))
if np.any(np.asarray([minRow,minCol,maxRow,maxCol]) < 0):
print("channel {} removed: out of frame".format(key))
del bboxesShiftDict[key]
break
if np.any(np.asarray([minRow,minCol,maxRow,maxCol]) > imgSize):
print("channel {} removed: out of frame".format(key))
del bboxesShiftDict[key]
break
return(bboxesShiftDict)
# finds the location of channels in a tif
def find_channel_locs(image_data):
'''Finds the location of channels from a phase contrast image. The channels are returned in
a dictionary where the key is the x position of the channel in pixel and the value is a
dicionary with the open and closed end in pixels in y.
Called by
mm3_Compile.get_tif_params
'''
# declare temp variables from yaml parameter dict.
chan_w = params['compile']['channel_width']
chan_sep = params['compile']['channel_separation']
crop_wp = int(params['compile']['channel_width_pad'] + chan_w/2)
chan_snr = params['compile']['channel_detection_snr']
# Detect peaks in the x projection (i.e. find the channels)
projection_x = image_data.sum(axis=0).astype(np.int32)
# find_peaks_cwt is a function which attempts to find the peaks in a 1-D array by
# convolving it with a wave. here the wave is the default Mexican hat wave
# but the minimum signal to noise ratio is specified
# *** The range here should be a parameter or changed to a fraction.
peaks = find_peaks_cwt(projection_x, np.arange(chan_w-5,chan_w+5), min_snr=chan_snr)
# If the left-most peak position is within half of a channel separation,
# discard the channel from the list.
if peaks[0] < (chan_sep / 2):
peaks = peaks[1:]
# If the diference between the right-most peak position and the right edge
# of the image is less than half of a channel separation, discard the channel.
if image_data.shape[1] - peaks[-1] < (chan_sep / 2):
peaks = peaks[:-1]
# Find the average channel ends for the y-projected image
projection_y = image_data.sum(axis=1)
# find derivative, must use int32 because it was unsigned 16b before.
proj_y_d = np.diff(projection_y.astype(np.int32))
# use the top third to look for closed end, is pixel location of highest deriv
onethirdpoint_y = int(projection_y.shape[0]/3.0)
default_closed_end_px = proj_y_d[:onethirdpoint_y].argmax()
# use bottom third to look for open end, pixel location of lowest deriv
twothirdpoint_y = int(projection_y.shape[0]*2.0/3.0)
default_open_end_px = twothirdpoint_y + proj_y_d[twothirdpoint_y:].argmin()
default_length = default_open_end_px - default_closed_end_px # used for checks
# go through peaks and assign information
# dict for channel dimensions
chnl_loc_dict = {}
# key is peak location, value is dict with {'closed_end_px': px, 'open_end_px': px}
for peak in peaks:
# set defaults
chnl_loc_dict[peak] = {'closed_end_px': default_closed_end_px,
'open_end_px': default_open_end_px}
# redo the previous y projection finding with just this channel
channel_slice = image_data[:, peak-crop_wp:peak+crop_wp]
slice_projection_y = channel_slice.sum(axis = 1)
slice_proj_y_d = np.diff(slice_projection_y.astype(np.int32))
slice_closed_end_px = slice_proj_y_d[:onethirdpoint_y].argmax()
slice_open_end_px = twothirdpoint_y + slice_proj_y_d[twothirdpoint_y:].argmin()
slice_length = slice_open_end_px - slice_closed_end_px
# check if these values make sense. If so, use them. If not, use default
# make sure lenght is not 30 pixels bigger or smaller than default
# *** This 15 should probably be a parameter or at least changed to a fraction.
if slice_length + 15 < default_length or slice_length - 15 > default_length:
continue
# make sure ends are greater than 15 pixels from image edge
if slice_closed_end_px < 15 or slice_open_end_px > image_data.shape[0] - 15:
continue
# if you made it to this point then update the entry
chnl_loc_dict[peak] = {'closed_end_px' : slice_closed_end_px,
'open_end_px' : slice_open_end_px}
return chnl_loc_dict
# make masks from initial set of images (same images as clusters)
def make_masks(analyzed_imgs):
'''
Make masks goes through the channel locations in the image metadata and builds a consensus
Mask for each image per fov, which it returns as dictionary named channel_masks.
The keys in this dictionary are fov id, and the values is a another dictionary. This dict's keys are channel locations (peaks) and the values is a [2][2] array:
[[minrow, maxrow],[mincol, maxcol]] of pixel locations designating the corner of each mask
for each channel on the whole image
One important consequence of these function is that the channel ids and the size of the
channel slices are decided now. Updates to mask must coordinate with these values.
Parameters
analyzed_imgs : dict
image information created by get_params
Returns
channel_masks : dict
dictionary of consensus channel masks.
Called By
mm3_Compile.py
Calls
'''
information("Determining initial channel masks...")
# declare temp variables from yaml parameter dict.
crop_wp = int(params['compile']['channel_width_pad'] + params['compile']['channel_width']/2)
chan_lp = int(params['compile']['channel_length_pad'])
#intiaize dictionary
channel_masks = {}
# get the size of the images (hope they are the same)
for img_k in analyzed_imgs.keys():
img_v = analyzed_imgs[img_k]
image_rows = img_v['shape'][0] # x pixels
image_cols = img_v['shape'][1] # y pixels
break # just need one. using iteritems mean the whole dict doesn't load
# get the fov ids
fovs = []
for img_k in analyzed_imgs.keys():
img_v = analyzed_imgs[img_k]
if img_v['fov'] not in fovs:
fovs.append(img_v['fov'])
# max width and length across all fovs. channels will get expanded by these values
# this important for later updates to the masks, which should be the same
max_chnl_mask_len = 0
max_chnl_mask_wid = 0
# for each fov make a channel_mask dictionary from consensus mask
for fov in fovs:
# initialize a the dict and consensus mask
channel_masks_1fov = {} # dict which holds channel masks {peak : [[y1, y2],[x1,x2]],...}
consensus_mask = | np.zeros([image_rows, image_cols]) | numpy.zeros |
"""Build lipid grids derived from COMFrame objects.
Classes and functions to implement lipid COM gridding and analysis for
lipid bilayers. This module defines version that build grids off of
COMFrame objects and is meant primarily for internal use by the
BilayerAnalyzer class. The gridding and anlaysis procedures are based on
the descriptions given in Gapsys et al. J Comput Aided Mol Des (2013)
27:845-858, which is itself a modified version of the GridMAT-MD method
by Allen et al. Vol. 30, No. 12 Journal of Computational Chemistry.
However, I have currently left out bits of the extra functionality like
the handling of an embedded proteins.
"""
# TODO (<EMAIL>): Add embedded protein functionality to lipid grid.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import object
from six.moves import range
import numpy as np
from scipy.ndimage.filters import gaussian_filter
# pybilt imports
from pybilt.common.running_stats import RunningStats
def grid_curvature(x_vals, y_vals, zgrid):
"""Compute the Mean and Gaussian curvature across a grid.
Args:
x_vals (np.array): The bin labels along the x-axis of the gridded data.
y_vals (np.arrray): The bin labels along the y-axis of the gridded data.
zgrid (np.array): The 2d grid of bin values.
Returns:
tuple: Returns a 2 item tuple with the 2d numpy arrays of the curvatures with
format (mean curvature, Gaussian curvature).
"""
nxb = len(x_vals)
nyb = len(y_vals)
x_incr = x_vals[1]-x_vals[0]
y_incr = y_vals[1]-y_vals[0]
# print("x_incr {} y_incr {}".format(x_incr, y_incr))
[sy, sx] = np.gradient(zgrid, y_incr, x_incr)
[syy, syx] = np.gradient(sy, y_incr, x_incr)
[sxy, sxx] = np.gradient(sx, y_incr, x_incr)
#now get curvatures
curv_mean_u = np.zeros((nxb, nyb))
curv_gauss_u = np.zeros((nxb, nyb))
for ix in range(nxb):
for iy in range(nyb):
#upper
sx_c = sx[ix, iy]
sy_c = sy[ix, iy]
ssx = sxx[ix, iy]
ssy = syy[ix, iy]
ssxy = sxy[ix, iy]
sx_v = np.array([x_incr, 0.0, sx_c])
sy_v = np.array([0.0, y_incr, sy_c])
ssx_v = np.array([x_incr, 0.0, ssx])
ssy_v = np.array([0.0, y_incr, ssy])
ssxy_v = np.array([0.0, y_incr, ssxy])
E = np.dot(sx_v, sx_v)
F = np.dot(sx_v, sy_v)
G = np.dot(sy_v, sy_v)
n = np.cross(sx_v, sy_v)
n /=np.linalg.norm(n)
L = np.dot(ssx_v, n)
M = np.dot(ssxy_v, n)
N = np.dot(ssy_v, n)
#mean curvature
J = (E*N+G*L-2.0*F*M)/(2.0*(E*G-F)**2)
#Gaussian curvature
K = (L*N-M**2)/(E*G-F**2)
curv_mean_u[ix, iy] = J
curv_gauss_u[ix, iy] = K
# print("ix: {} iy: {} J: {} K: {}".format(ix,iy,J,K))
return (curv_mean_u, curv_gauss_u)
def grid_surface_area(x_vals, y_vals, zgrid):
"""Compute the surface area across a regular 2d grid.
Args:
x_vals (np.array): The bin labels along the x-axis of the gridded data.
y_vals (np.arrray): The bin labels along the y-axis of the gridded data.
zgrid (np.array): The 2d grid of bin values.
Returns:
float: Returns a the surface area estimate.
"""
nxb = len(x_vals)
nyb = len(y_vals)
x_incr = x_vals[1]-x_vals[0]
y_incr = y_vals[1]-y_vals[0]
# print("x_incr {} y_incr {}".format(x_incr, y_incr))
[sy, sx] = np.gradient(zgrid, y_incr, x_incr)
#now get curvatures
sa = 0.0
for ix in range(nxb):
for iy in range(nyb):
#upper
sx_c = sx[ix, iy]
sy_c = sy[ix, iy]
sx_v = np.array([1.0, 0.0, sx_c])
sy_v = np.array([0.0, 1.0, sy_c])
cross = np.cross(sx_v, sy_v)
dA = np.sqrt(np.dot(cross, cross))*x_incr*y_incr
sa += dA
return sa
class LipidGrid2d(object):
"""A 2d lipid grid object.
This object is used by the LipidGrids object to construct a 2d grid for
a bilayer leaflet and assign lipids to it using the coordinates derived
from a COMFrame representation object.
Attributes:
frame (COMFrame): Stores a local copy of the COMFrame object from
which the the coordinate data and lipid type data is derived from.
x_nbins (int): The number of grid bins in the 'x' dimension.
y_nbins (int): The number of grid bins in the 'y' dimension.
x_min (float): The lower boundary of the grid range in the 'x'
dimension.
x_max (float): The upper boundary of the grid range in the 'x'
dimension.
x_incr (float): The size of grid boxes (or spacing between grid
points) in the 'x' dimension.
x_centers (np.array): The center points of the grid boxes in the 'x'
dimension.
x_edges (np.array): The edges of the grid boxes in the 'x' dimension.
x_centers (np.array): The centers of the grid boxes in the 'x'
dimension.
y_min (float): The lower boundary of the grid range in the 'y'
dimension.
y_max (float): The upper boundary of the grid range in the 'y'
dimension.
y_incr (float): The size of grid boxes (or spacing between grid
points) in the 'y' dimension.
y_centers (np.array): The center points of the grid boxes in the 'y'
dimension.
y_edges (np.array): The edges of the grid boxes in the 'y' dimension.
y_centers (np.array): The centers of the grid boxes in the 'y'
dimension.
lipid_grid (np.array): A 2d array of size x_nbins*y_nbins that stores
the index of lipids from the COMFrame that are assigned to each
grid box.
lipid_grid_z (np.array): A 2d array of size x_nbins*y_nbins that stores
the z coordinate of the lipids assigned to each grid box.
"""
def __init__(self, com_frame, com_frame_indices, plane, nxbins=50,
nybins=50):
"""Initialize the LipidGrid2d object.
Args:
com_frame (COMFrame): The instance of COMFrame from which to
pull the coordinates for lipids to use when building the grid.
com_frame_indices (list): A list COMFrame lipid indices to
include when building the grid.
plane (list): The indices from the 3d coordinates for the
coordinates that correspond to the bilayer lateral plane.
nxbins (Optional[int]): The number of bins along the 'x'
dimension, i.e. along the dimension corresponding to plane[0].
Defaults to 50.
nybins (Optional[int): The number of bins along the 'y'
dimension, i.e. along the dimension corresponding to
plane[1]. Defaults to 50.
"""
# store the frame and leaflet
self.frame = com_frame
# self.leaflet = ms_leaflet
# get the x and y indices
ix = plane[0]
iy = plane[1]
iz = [i for i in [0, 1, 2] if i not in plane][0]
# get the box dimemsions
box = com_frame.box[plane]
boxx = box[ix]
boxy = box[iy]
# save the numbers of bins
self.x_nbins = nxbins
self.y_nbins = nybins
# initialize the edges of the and centers of the gridpoints
# x
self.x_min = 0.0
self.x_max = boxx
self.x_edges = np.linspace(self.x_min, self.x_max, (nxbins + 1),
endpoint=True)
self.x_incr = self.x_edges[1] - self.x_edges[0]
x_incr_h = self.x_incr / 2.0
self.x_centers = np.zeros(nxbins)
self._x_nedges = len(self.x_edges)
for i in range(1, self._x_nedges):
j = i-1
self.x_centers[j] = self.x_edges[j] + x_incr_h
# y
self.y_min = 0.0
self.y_max = boxy
self.y_edges = np.linspace(self.y_min, self.y_max, (nybins + 1),
endpoint=True)
self.y_incr = self.y_edges[1] - self.y_edges[0]
y_incr_h = self.y_incr / 2.0
self.y_centers = np.zeros(nybins)
self.y_nedges = len(self.y_edges)
for i in range(1, self._x_nedges):
j = i - 1
self.y_centers[j] = self.y_edges[j] + y_incr_h
self.x_length = self.x_max - self.x_min
self.y_length = self.y_max - self.y_min
# get the lipid indices for this leaflet
indices = com_frame_indices
# now assign lipids to the gridpoints
self.lipid_grid = np.zeros((nxbins, nybins), dtype=np.int)
self.lipid_grid_z = np.zeros((nxbins, nybins))
bxh = boxx / 2.0
byh = boxy / 2.0
cx = 0
for x in self.x_centers:
cy = 0
for y in self.y_centers:
r_min = 1.0e10
i_min = 0
z_min = 0.0
#check lipid COMs
for i in indices:
xi = com_frame.lipidcom[i].com[ix]
yi = com_frame.lipidcom[i].com[iy]
zi = com_frame.lipidcom[i].com_unwrap[iz]
#print "iz ",iz," zi ",zi
dx = x - xi
dy = y - yi
#Minimum image -- coordinates must be pre-wrapped
if np.absolute(dx) > bxh:
dx = boxx - np.absolute(x - bxh) - np.absolute(xi -
bxh)
if np.absolute(dy) > bxh:
dy = boxy - np.absolute(y - byh) - np.absolute(yi -
byh)
rxy = np.sqrt(dx**2 + dy**2)
if rxy < r_min:
r_min = rxy
i_min = i
z_min = zi
self.lipid_grid[cx,cy] = i_min
self.lipid_grid_z[cx,cy] = z_min
cy += 1
cx += 1
def get_index_at(self, ix, iy):
"""Returns the COMFrame index of the lipid at the specified position
in the lipid_grid.
Args:
ix (int): The 'x' index in the lipid_grid.
iy (): The 'y' index in the lipid_grid.
Returns:
int: The index of the lipid.
"""
return self.lipid_grid[ix, iy]
def get_z_at(self, ix, iy):
"""Returns the z coordinate of the lipid at the specified position
in the lipid_grid.
Args:
ix (int): The 'x' index in the lipid_grid.
iy (): The 'y' index in the lipid_grid.
Returns:
float: The z coordinate of the lipid.
"""
return self.lipid_grid_z[ix, iy]
def z_perturb_grid(self):
"""Returns the array with z coordinates shifted by the mean.
Returns:
np.array: The mean shifted z coordinate array.
"""
z_grid = self.lipid_grid_z
z_avg = z_grid.mean()
z_pert = z_grid - z_avg
return z_pert
# Outputs the grid as an xyz coordinate file
def write_xyz(self, xyz_name):
"""Write out the lipid grid as an xyz coordinate file.
Args:
xyz_name (str): File path and name for the output file.
"""
# Open up the file to write to
xyz_out = open(xyz_name, "w")
npoints = self.x_nbins*self.y_nbins
comment = "Leaflet Grid " + self.leaflet.name
xyz_out.write(str(npoints))
xyz_out.write("\n")
xyz_out.write(comment)
xyz_out.write("\n")
cx=0
for x in self.x_centers:
cy=0
for y in self.y_centers:
#get the z coordinate
z = self.lipid_grid_z[cx,cy]
#get the lipid resname
ic = self.lipid_grid[cx,cy]
oname = self.frame.lipidcom[ic].type
#write to file
line = str(oname)+" "+str(x)+" "+str(y)+" "+str(z)
xyz_out.write(line)
xyz_out.write("\n")
cy+=1
cx+=1
xyz_out.close()
return
class LipidGrids(object):
def __init__(self, com_frame, leaflets, plane, nxbins=50, nybins=50):
#store the frame and leaflet
self.frame = com_frame
self.leaflets = leaflets
self.plane = plane
self.norm = [i for i in [0,1,2] if i not in plane][0]
self.nbins_x = nxbins
self.nbins_y = nybins
self.leaf_grid = {}
self.myframe = com_frame.mdnumber
#initialize the grids
#upper
upper_indices = leaflets['upper'].get_member_indices()
self.leaf_grid['upper'] = LipidGrid2d(com_frame, upper_indices,
plane, nxbins=nxbins,
nybins=nybins)
#lower
lower_indices = leaflets['lower'].get_member_indices()
self.leaf_grid['lower'] = LipidGrid2d(com_frame, lower_indices,
plane, nxbins=nxbins,
nybins=nybins)
return
def thickness_grid(self):
tgrid = np.zeros((self.nbins_x, self.nbins_y))
for ix in range(self.nbins_x):
for iy in range(self.nbins_y):
zu = self.leaf_grid['upper'].get_z_at(ix, iy)
zl = self.leaf_grid['lower'].get_z_at(ix, iy)
dz = zu - zl
tgrid[ix,iy] = dz
if dz < 0.0:
print("Warning!!--MD frame number ",self.myframe," --Value thickness less than zero (",dz,") at grid point ",ix," ",iy)
return tgrid
def average_thickness(self, return_grid=False):
trun = RunningStats()
tgrid = self.thickness_grid()
for ix in range(self.nbins_x):
for iy in range(self.nbins_y):
tc = tgrid[ix, iy]
trun.push(tc)
avg_out = (trun.mean(), trun.deviation())
if return_grid:
return avg_out, tgrid
else:
return avg_out
def map_to_grid(self, com_values_dict, leaflet='both'):
do_leaflet = []
if leaflet == "both":
do_leaflet.append('upper')
do_leaflet.append('lower')
elif leaflet == "upper" or leaflet == "lower":
do_leaflet.append(leaflet)
else:
#unknown option--use default "both"
print("!! Warning - request for unknown leaflet name \'",leaflet,"\' from the LeafletGrids of frame ",self.myframe)
print("!! the options are \"upper\", \"lower\", or \"both\"--using the default \"both\"")
do_leaflet.append('upper')
do_leaflet.append('lower')
out_dict = {}
for leaf in do_leaflet:
out_dict[leaf] = | np.zeros((self.nbins_x,self.nbins_y)) | numpy.zeros |
import cv2
import numpy as np
import math
from scipy.signal import convolve2d
import matplotlib.pyplot as plt
from collections import deque
from skimage.segmentation import slic
from skimage import morphology
import random
from scipy.ndimage import label,sum
from functools import reduce
# Many functions have been adapted from <NAME> : https://www.peterkovesi.com/matlabfns/
def plotPoints(img,points, color = 'red', size=10):
implot = plt.imshow(img)
# put a blue dot at (10, 20)
points_x = points[:,0]
points_y = points[:,1]
plt.scatter([points_x], [points_y],c=color,s=size)
plt.show()
def plotHist(img):
# hist,bins = np.histogram(img.flatten(),256,[0,256])
plt.hist(img.flatten(),256,[0,256], color = 'r')
plt.xlim([0,256])
plt.legend(('cdf','histogram'), loc = 'upper left')
plt.show()
def normalise(im, reqmean = 0, reqvar = 1):
im = np.array(im,dtype = np.float32)
#im = im - np.mean(im)
#im = im / np.std(im)
# n = reqmean + im * np.sqrt(reqvar);
return im
def canny(i_image,isigma):
image = gaussfilt(i_image,isigma)
Ix,Iy = derivative5(image)
Ix_2 = np.multiply(Ix,Ix)
Iy_2 = np.multiply(Iy,Iy)
gradient = np.sqrt(Ix_2 + Iy_2) # Gradient magnitude.
orientation = np.arctan2(-Iy, Ix) # Angles -pi to + pi.
orientation[orientation<0] = orientation[orientation<0]+np.pi; # Map angles to 0-pi.
orientation = orientation*180/np.pi;
return gradient,orientation
def gaussfilt(img,sigma):
sze = int(math.ceil(6*sigma))
if(sze%2 == 0):
sze = sze+1
h = fspecial_gauss2D((sze,sze),sigma)
# conv2(image, mask) is the same as filter2(rot90(mask,2), image)
image = convolve2d(img,h,'same')
return image
def fspecial_gauss2D(shape=(3,3),sigma=0.5):
"""
2D gaussian mask - should give the same result as MATLAB's
fspecial('gaussian',[shape],[sigma])
"""
m,n = [(ss-1.)/2. for ss in shape]
y,x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )
h[ h < np.finfo(h.dtype).eps*h.max() ] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
def derivative5(i_image):
# 5 tap 1st derivative cofficients. These are optimal if you are just
# seeking the 1st derivatives
# Copyright (c) 2010 <NAME>
p = np.array([0.037659,0.249153,0.426375,0.249153,0.037659], dtype = np.float32)
d1 =np.array([0.109604,0.276691,0.000000,-0.276691,-0.109604],dtype = np.float32)
a = p[:,np.newaxis]*d1.transpose()
b = d1[:,np.newaxis]*p.transpose()
Ix = convolve2d(i_image,a,'same')
Iy = convolve2d(i_image,b,'same')
return Ix,Iy
def floodfill(bw, r, c, N):
filled = np.zeros(bw.shape)
theStack = deque(zip(r, c))
m, n = bw.shape
while len(theStack) > 0:
x, y = theStack.pop()
if x < 0:
x = 0
if x >= n:
x = n - 1
if y < 0:
y = 0
if y >= m:
y = m - 1
if filled[x, y] == 1:
continue
if bw[x, y] == 0:
continue
filled[x, y] = 1
theStack.append((x + 1, y)) # right
theStack.append((x - 1, y)) # left
theStack.append((x, y + 1)) # down
theStack.append((x, y - 1)) # up
if (N == 8):
theStack.append((x + 1, y + 1)) # d right
theStack.append((x - 1, y - 1)) # d left
theStack.append((x - 1, y + 1)) # down
theStack.append((x + 1, y - 1)) # up
return filled
class Pixel:
value = 0
i = 0
j = 0
distance = 0
label = 0
def __init__(self,distance,i,j,label):
self.distance = distance
self.i = i
self.j = j
self.label = label
def propagate(img,mask,seeds,ilambda):
labels_out = np.copy(seeds)
dists = np.full(img.shape,np.inf)
dists[seeds>0] = 0
pq = deque([])
total_seeds = seeds.max()+1
for i in range(1,total_seeds):
# Get all pixel coordinates from pixels that are seeds
listpx, listpy = np.where(seeds==i)
for x,y in zip(listpx,listpy):
push_neighs_on_queue(pq,0.0,x,y ,img,ilambda,i,labels_out, mask)
while(len(pq)>0):
p = pq.popleft()
if(dists[p.i,p.j]>p.distance):
dists[p.i,p.j] = p.distance
labels_out[p.i,p.j] = p.label
push_neighs_on_queue(pq, p.distance,p.i,p.j, img, ilambda, labels_out[p.i,p.j], labels_out, mask)
return dists,labels_out
def clamped_fetch(img,i,j):
m,n = img.shape
if i < 0:
i = 0
if i >= n:
i = n-1
if j < 0:
j = 0
if j >= m:
j = m-1
return img[i,j]
def difference(img,i1,j1,i2,j2,ilambda):
pixel_diff = 0
#s1 = integrate(ii,i1-1,j1-1,i1+1,j1+1)
#s2 = integrate(ii,i2-1,j2-1,i2+1,j2+1)
#pixel_diff = np.abs(s1-s2)
dEucl = (i1-i2)*(i1-i2) + (j1-j2)*(j1-j2)
#fdist =np.sqrt((pixel_diff * pixel_diff +dEucl*dEucl*ilambda*ilambda)) # / (1.0 +ilambda ))
return int(dEucl*ilambda)
#return np.sqrt((pixel_diff * pixel_diff +ilambda *dEucl) / (1.0 +ilambda ))
#return (sqrt(pixel_diff * pixel_diff + (fabs((double) i1 - i2) + fabs((double) j1 - j2)) * lambda * lambda ));
def push_neighs_on_queue(pq,distance,i,j,img,ilambda,label, labels_out, mask):
# 4-connected
m,n = img.shape
if (i > 0):
val = labels_out[i-1,j]
if (val==0 and mask[i-1, j]>0):
delta_d = difference(img, i, j, i-1, j, ilambda) # if the neighbor was not labeled, do pushing
pix = Pixel(distance + delta_d, i-1, j, label)
pq.append(pix)
if (j > 0):
val = labels_out[i,j-1]
if val==0 and mask[i, j-1]!=0 :
delta_d = difference(img,i,j,i,j-1,ilambda)
pix = Pixel(distance + delta_d, i, j-1, label)
pq.append(pix)
if i<(n-1):
val = labels_out[i+1,j]
if (val==0 and mask[i+1, j]!=0) :
delta_d = difference(img, i, j, i+1, j , ilambda)
pix = Pixel(distance + delta_d, i+1, j , label)
pq.append(pix)
if (j < (m-1)):
val = labels_out[i,j+1]
if val==0 and (mask[i, j+1]!=0):
delta_d = difference(img, i, j, i, j + 1, ilambda)
pix = Pixel(distance + delta_d, i, j + 1, label)
pq.append(pix)
# 8-connected
if (i > 0) and (j > 0):
val = labels_out[i-1,j-1]
if(val==0 and mask[i-1, j-1]!=0):
delta_d = difference(img, i, j, i-1, j - 1, ilambda)
pix = Pixel(distance + delta_d, i-1, j - 1, label)
pq.append(pix)
if (i < (n-1) and (j > 0)):
val=labels_out[i+1,j-1]
if (val==0 and (mask[i+1, j-1])!=0):
delta_d = difference(img, i, j, i+1, j - 1, ilambda)
pix = Pixel(distance + delta_d, i+1, j - 1, label)
pq.append(pix)
if (i > 0) and j < (m-1):
val =labels_out[i-1,j+1]
if (val==0 and mask[i-1, j+1]!=0 ):
delta_d = difference(img, i, j, i-1, j + 1, ilambda)
pix = Pixel(distance + delta_d, i-1, j + 1, label)
pq.append(pix)
if (i < (n-1) and j < (m-1)):
val=labels_out[i+1,j+1]
if val==0 and (mask[i+1, j+1]!=0):
delta_d = difference(img, i, j, i+1, j + 1, ilambda)
pix = Pixel(distance + delta_d, i+1, j + 1, label)
pq.append(pix)
return
def integral_image(x):
"""Integral image / summed area table.
The integral image contains the sum of all elements above and to the
left of it, i.e.:
.. math::
S[m, n] = \sum_{i \leq m} \sum_{j \leq n} X[i, j]
Parameters
----------
x : ndarray
Input image.
Returns
-------
S : ndarray
Integral image / summed area table.
References
----------
.. [1] <NAME>, "Summed-area tables for texture mapping,"
ACM SIGGRAPH Computer Graphics, vol. 18, 1984, pp. 207-212.
"""
return x.cumsum(1).cumsum(0)
def integrate(ii, r0, c0, r1, c1):
"""Use an integral image to integrate over a given window.
Parameters
----------
ii : ndarray
Integral image.
r0, c0 : int
Top-left corner of block to be summed.
r1, c1 : int
Bottom-right corner of block to be summed.
Returns
-------
S : int
Integral (sum) over the given window.
"""
S = 0
S += clamped_fetch(ii,r1,c1)
if (r0 - 1 >= 0) and (c0 - 1 >= 0):
S += clamped_fetch(ii,r0-1,c0-1)
if (r0 - 1 >= 0):
S -= clamped_fetch(ii,r0-1,c1)
if (c0 - 1 >= 0):
S -= clamped_fetch(ii,r1,c0-1)
return S
def softmax(y):
s = np.exp(y)
y_prob = s / np.sum(s)
return y_prob
def remove_borders(img,border):
# remove borders
m,n = img.shape
img[:border, :] = 0
img[-border:, :] = 0
img[:, :border] = 0
img[:, -border:] = 0
return img
def ridgeorient(im,gradientsigma,blocksigma,orientsmoothsigma, rel = 0.01):
# Arguments: im - A normalised input image.
# gradientsigma - Sigma of the derivative of Gaussian
# used to compute image gradients.
# blocksigma - Sigma of the Gaussian weighting used to
# sum the gradient moments.
# orientsmoothsigma - Sigma of the Gaussian used to smooth
# the final orientation vector field.
# Optional: if ommitted it defaults to 0
# Returns: orientim - The orientation image in radians.
# Orientation values are +ve clockwise
# and give the direction *along* the
# ridges.
# reliability - Measure of the reliability of the
# orientation measure. This is a value
# between 0 and 1. I think a value above
# about 0.5 can be considered 'reliable'.
# reliability = 1 - Imin./(Imax+.001);
# coherence - A measure of the degree to which the local
# area is oriented.
# coherence = ((Imax-Imin)./(Imax+Imin)).^2;
rows,cols = im.shape
# Calculate image gradients.
sze = int(np.fix(6*gradientsigma))
if(sze%2 == 0):
sze = sze+1
h = fspecial_gauss2D((sze,sze),gradientsigma)
fx,fy = np.gradient(h) # Gradient of Gausian.
Gx = convolve2d(im, fx,'same') # Gradient of the image in x
Gy = convolve2d(im, fy, 'same') # ... and y
# Estimate the local ridge orientation at each point by finding the
# principal axis of variation in the image gradients.
Gxx = np.multiply(Gx,Gx) # Covariance data for the image gradients
Gxy = np.multiply(Gx,Gy)
Gyy = np.multiply(Gy,Gy)
# Now smooth the covariance data to perform a weighted summation of the data.
sze = int(np.fix(6*blocksigma))
if(sze%2 == 0):
sze = sze+1
h = fspecial_gauss2D((sze,sze),blocksigma)
Gxx = convolve2d(Gxx, h,'same');
Gxy = 2*convolve2d(Gxy,h,'same');
Gyy = convolve2d(Gyy,h,'same');
# Analytic solution of principal direction
Gxy_2 = np.multiply(Gxy,Gxy)
Gm = Gxx-Gyy
Gm = np.multiply(Gm,Gm)
denom = np.sqrt(Gxy_2 + Gm) + np.spacing(1)
sin2theta = np.divide(Gxy,denom) # Sine and cosine of doubled angles
cos2theta = np.divide(Gxx-Gyy,denom)
sze = int(np.fix(6*orientsmoothsigma))
if(sze%2 == 0):
sze = sze+1
h = fspecial_gauss2D((sze,sze),orientsmoothsigma)
cos2theta = convolve2d(cos2theta,h,'same')# Smoothed sine and cosine of
sin2theta = convolve2d(sin2theta,h,'same'); # doubled angles
orientim = np.pi/2 + np.arctan2(sin2theta,cos2theta)/2;
# Calculate 'reliability' of orientation data. Here we calculate the
# area moment of inertia about the orientation axis found (this will
# be the minimum inertia) and an axis perpendicular (which will be
# the maximum inertia). The reliability measure is given by
# 1.0-min_inertia/max_inertia. The reasoning being that if the ratio
# of the minimum to maximum inertia is close to one we have little
# orientation information.
Imin = (Gyy+Gxx)/2
Imin = Imin - np.multiply((Gxx-Gyy),cos2theta)/2 - np.multiply(Gxy,sin2theta)/2
Imax = Gyy+Gxx - Imin
reliability = 1 - np.divide(Imin,(Imax+.001))
# aux = Imax+Imin
# aux = np.multiply(aux,aux)
# coherence = np.divide((Imax-Imin),aux)
# Finally mask reliability to exclude regions where the denominator
# in the orientation calculation above was small. Here I have set
# the value to 0.001, adjust this if you feel the need
reliability = np.multiply(reliability,(denom>rel))
return orientim,reliability
def SWT(i_img, edgeImage, orientim, stroke_width=20, angle=np.pi / 6):
orientim = np.radians(orientim)
im = gaussfilt(i_img, 1)
Ix, Iy = derivative5(im)
Ix_2 = np.multiply(Ix, Ix)
Iy_2 = np.multiply(Iy, Iy)
g_mag = np.sqrt(Ix_2 + Iy_2) # Gradient magnitude.
Ix = np.divide(Ix, g_mag)
Iy = np.divide(Iy, g_mag)
cres = 0
prec = 0.4
mSWT = -np.ones(i_img.shape)
count = 1
h_stroke = stroke_width * 0.5
rows, cols = i_img.shape
for i in range(rows):
for j in range(cols):
if (edgeImage[i, j] > 0):
count = 0
points_x = []
points_y = []
points_x.append(j)
points_y.append(i)
count += 1
curX = float(j) + 0.5
curY = float(i) + 0.5
cres = 0
while cres < stroke_width:
curX = curX + Ix[i, j] * prec # find directionality increments x or y
curY = curY + Iy[i, j] * prec
cres = cres + 1
curPixX = int(math.floor(curX))
curPixY = int(math.floor(curY))
if (curPixX < 0 or curPixX > cols - 1 or curPixY < 0 or curPixY > rows - 1):
break
points_x.append(curPixX)
points_y.append(curPixY)
count += 1
if (edgeImage[curPixY, curPixX] > 0 and count < 21):
ang_plus = orientim[i, j] + angle
if (ang_plus > np.pi):
ang_plus = np.pi
ang_minus = orientim[i, j] - angle
if (ang_minus < 0):
ang_minus = 0
if ((orientim[curPixY, curPixX] < ang_plus) and (
orientim[curPixY, curPixX] > ang_minus) and count > h_stroke):
dist = math.sqrt((curPixX - j) * (curPixX - j) + (curPixY - i) * (curPixY - i))
for k in range(count - 1):
if (mSWT[points_y[k], points_x[k]] < 0):
mSWT[points_y[k], points_x[k]] = dist
else:
mSWT[points_y[k], points_x[k]] = np.min([dist, mSWT[points_y[k], points_x[k]]])
if (count > stroke_width):
break
return mSWT
def SWT_Total(i_image, edges, orientation, stroke_width, angle=np.pi / 6):
inv_iim = 255 - i_image # needed for shadowing
swtim = SWT(i_image, edges, orientation, stroke_width, angle) # one image
swtinv_im = SWT(inv_iim, edges, orientation, stroke_width, angle) # the inverse
swtim[np.nonzero(swtim < 0)] = 0
swtinv_im[np.nonzero(swtinv_im < 0)] = 0
swt_end = swtim
indexes = np.nonzero(swtim == 0)
swt_end[indexes] = swtinv_im[indexes]
return swt_end
def hysthresh(image,T1,T2):
if T1 < T2 : # T1 and T2 reversed - swap values
tmp = T1
T1 = T2
T2 = tmp
aboveT2 = image > T2; # Edge points above lower threshold.
[aboveT1r,aboveT1c] = np.nonzero(image > T1); # Row and colum coords of points above upper threshold.
# Obtain all connected regions in aboveT2 that include a point that has a
# value above T1
bw = floodfill(aboveT2, aboveT1r, aboveT1c, 8)
return bw
def cleanswt2(swt,edges):
mask = swt[swt > 0]
labeled,nr_objects = label(mask)
w, h = swt.shape
max_pix = (0.05 * w)
for i in range(nr_objects):
numpix = len(np.where(labeled == i))
if(numpix < max_pix):
swt[np.where(labeled==i)] = 0
swt[edges > 0] = np.max(swt)
return swt
def autocanny(nm,canthresh):
m,n = nm.shape
im_size = np.array([m,n])
med = float(np.median(nm[nm > 0]))
max_factor = 0.95 * np.max(nm)
factor_a = max_factor
factor_b_p = 0.4*med
bwedge = []
value = 0
msize = m*n
max_pix = int(msize*canthresh)
iter = 0
while (value < max_pix and iter<50):
iter = iter+1
bwedge = hysthresh(nm, factor_a * med, factor_b_p)
value = np.sum(bwedge)
factor_a = factor_a * 0.9
if (factor_a < 1e-15):
break
c1 = 0
alpha_1 = 0.01
alpha_2 = 0.01
inv = True
iter = 0
while (np.abs(value-max_pix)>200 and iter<20):
bwedge = hysthresh(nm, factor_a * med, factor_b_p)
value = np.sum(bwedge)
iter = iter+1
if(value<max_pix):
if(inv):
alpha_1 = 0.01
inv = False
factor_a = factor_a - alpha_1
c1 = c1 + 1
if(c1==2):
alpha_1 = alpha_1 * 2
c1 = 0
else:
if(not inv):
alpha_2 = 0.01
inv = True
c1 = c1 - 1
factor_a = factor_a + alpha_2
if(c1 == -2 ):
alpha_2 = alpha_2 * 2
c1 = 0
return bwedge
def autocanny2(prepro, nm, canthresh, blocksize):
m,n = prepro.shape
im_size = np.array([m,n])
size_pixels = im_size / blocksize
size_pixels = int(size_pixels[0] * size_pixels[1])
# Clustering of image
segments = slic(prepro, n_segments=size_pixels, sigma=1.5, compactness=0.08, start_label=0)
num_labels = np.max(segments) + 1
med = float(np.median(nm[nm > 0]))
max_factor = 0.95 * np.max(nm)
factor_a = max_factor
factor_b_p = 0.4*med
bwedge = []
value = 0
msize = m*n
max_pix = int(msize*canthresh)
while (value < max_pix):
bwedge = hysthresh(nm, factor_a * med, factor_b_p)
value = np.sum(bwedge)
factor_a = factor_a * 0.9
if (factor_a < 1e-15):
break
f = []
f.append(factor_a)
factor_original = factor_a
c1 = 0
alpha_1 = 0.01
alpha_2 = 0.01
inv = True
iter = 0
while (np.abs(value-max_pix)>200 and iter<20):
bwedge = hysthresh(nm, factor_a * med, factor_b_p)
value = np.sum(bwedge)
iter = iter+1
if(value<max_pix):
if(inv):
alpha_1 = 0.01
inv = False
factor_a = factor_a - alpha_1
c1 = c1 + 1
if(c1==2):
alpha_1 = alpha_1 * 2
c1 = 0
else:
if(not inv):
alpha_2 = 0.01
inv = True
c1 = c1 - 1
factor_a = factor_a + alpha_2
if(c1 == -2 ):
alpha_2 = alpha_2 * 2
c1 = 0
f.append(factor_a)
expected_density = (msize * canthresh) / size_pixels # Expected
label_counter = 0
for i in range(num_labels):
label_density = np.sum(bwedge[np.where(segments == i)])
if (label_density < 2 * expected_density):
nm[segments == i]= 0
else:
bwedge[np.where(segments == i)] = 0;
label_counter = label_counter + 1
subsize = label_counter * blocksize * blocksize
max_pix = (subsize/(msize*1.0))*canthresh
factor_a = max_factor
value = 0
bwedge2 = np.zeros((m,n))
while (value < max_pix):
bwedge2 = hysthresh(nm, factor_a * med, factor_b_p);
value = np.sum(bwedge2)/subsize
factor_a = factor_a * 0.9;
if (factor_a < 1e-15):
break
f = []
f.append(factor_a)
factor_original = factor_a
c1 = 0
alpha_1 = 0.01
alpha_2 = 0.01
inv = True
iter = 0
while (np.abs(value-max_pix)>0.001 and iter<20):
bwedge2 = hysthresh(nm, factor_a * med, factor_b_p)
value = np.sum(bwedge2)/subsize
iter = iter+1
if(value<max_pix):
if(inv):
alpha_1 = 0.01
inv = False
factor_a = factor_a - alpha_1
c1 = c1 + 1
if(c1==2):
alpha_1 = alpha_1 * 2
c1 = 0
else:
if(not inv):
alpha_2 = 0.01
inv = True
c1 = c1 - 1
factor_a = factor_a + alpha_2
if(c1 == -2 ):
alpha_2 = alpha_2 * 2
c1 = 0
f.append(factor_a)
bwedge = np.logical_or(bwedge, bwedge2)
return bwedge
def kuwahara_filter(input,winsize):
# Kuwahara filters an image using the Kuwahara filter
"""
filtered = Kuwahara(original, windowSize)
filters the image with a given windowSize and yielsd the result in filtered
It uses = variance = (mean of squares) - (square of mean).
filtered = Kuwahara(original, 5);
Description : The kuwahara filter workds on a window divide into 4 overlapping subwindows
In each subwindow the mean and hte variance are computed. The output value (locate at the center of the window)
is set to the mean of the subwindow with the smallest variance
References:
http: // www.ph.tn.tudelft.nl / DIPlib / docs / FIP.pdf
http: // www.incx.nec.co.jp / imap - vision / library / wouter / kuwahara.html
:param input:
:param winsize:
:return:
"""
input = np.array(input,dtype = np.float64)
m,n = input.shape
if (winsize%4) != 1 :
return
tmpAvgKerRow = np.concatenate((np.ones( (1, (winsize - 1) / 2 + 1)), np.zeros((1, (winsize - 1) / 2))),axis=1)
tmpPadder = np.zeros((1, winsize));
tmpavgker = np.matlib.repmat(tmpAvgKerRow, (winsize - 1) / 2 + 1, 1)
tmpavgker = np.concatenate((tmpavgker, np.matlib.repmat(tmpPadder, (winsize - 1) / 2, 1)))
tmpavgker = tmpavgker / np.sum(tmpavgker)
# tmpavgker is a 'north-west'
t1,t2 = tmpavgker.shape
avgker = np.zeros((t1,t2,4))
avgker[:,:, 0] = tmpavgker # North - west(a)
avgker[:,:, 1] = np.fliplr(tmpavgker) # North - east(b)
avgker[:,:, 3] = np.flipud(tmpavgker) # South - east(c)
avgker[:,:, 2] = np.fliplr(np.flipud(tmpavgker)) # South - west(d)
squaredImg = input**2
avgs = np.zeros((m,n,4))
stddevs = np.zeros((m,n,4))
## Calculation of averages and variances on subwindows
for k in range(0,4):
avgs[:,:, k] = convolve2d(input, avgker[:,:, k], 'same') # mean
stddevs[:,:, k] = convolve2d(squaredImg, avgker[:,:, k], 'same') # mean
stddevs[:,:, k] = stddevs[:,:, k]-avgs[:,:, k]**2 # variance
# minima = np.min(stddevs, axis=2)
indices = np.argmin(stddevs,axis = 2)
filtered = np.zeros(input.shape)
for k in range(m) :
for i in range(n):
filtered[k, i] = avgs[k, i, indices[k, i]]
return filtered
def nonmaxsup_python(gradient,orientation,radius = 1.2):
"""
# Input:
# inimage - Image to be non-maxima suppressed.
# orient - Image containing feature normal orientation angles in degrees
# (0-180), angles positive anti-clockwise.
# radius - Distance in pixel units to be looked at on each side of each
# pixel when determining whether it is a local maxima or not.
# This value cannot be less than 1.
# (Suggested value about 1.2 - 1.5)
# Returns:
# im - Non maximally suppressed image.
#
# Notes:
# The suggested radius value is 1.2 - 1.5 for the following reason. If the
# radius parameter is set to 1 there is a chance that a maxima will not be
# identified on a broad peak where adjacent pixels have the same value. To
# overcome this one typically uses a radius value of 1.2 to 1.5. However
# under these conditions there will be cases where two adjacent pixels will
# both be marked as maxima. Accordingly there is a final morphological
# thinning step to correct this.
# This function is slow. It uses bilinear interpolation to estimate
# intensity values at ideal, real-valued pixel locations on each side of
# pixels to determine if they are local maxima.
# Copyright (c) 1996-2013 <NAME>
"""
im = np.zeros(gradient.shape)
if(radius<1):
return
iradius = int(math.ceil(radius))
# Precalculate x and y offsets relative to centre pixel for each orientation angle
angle = range(0,181,1)
angle = (np.array(angle)*np.pi)/180 # Array of angles in 1 degree increments (but in radians).
xoff = radius*np.cos(angle) # x and y offset of points at specified radius and angle
yoff = radius*np.sin(angle) # from each reference position.
hfrac = xoff - np.floor(xoff) # Fractional offset of xoff relative to integer location
vfrac = yoff - np.floor(yoff) # Fractional offset of yoff relative to integer location
orient = np.fix(orientation) # Orientations start at 0 degrees but arrays start
# with index 1.
orient = np.array(orient,dtype=np.int16)
# Now run through the image interpolating grey values on each side
# of the centre pixel to be used for the non-maximal suppression.
[rows,cols] = gradient.shape
nrow = range(iradius+1,rows - iradius)
ncol = range(iradius+1,cols - iradius)
for elr in nrow:
for elc in ncol:
ori = orient[elr,elc] # Index into precomputed arrays
x = elc + xoff[ori] # x, y location on one side of the point in question
y = elr - yoff[ori]
fx = int(np.floor(x)) # Get integer pixel locations that surround location x,y
cx = int(np.ceil(x))
fy = int(np.floor(y))
cy = int(np.ceil(y))
tl = gradient[fy,fx] # Value at top left integer pixel location.
tr = gradient[fy,cx] # top right
bl = gradient[cy,fx] # bottom left
br = gradient[cy,cx] # bottom right
upperavg = tl + hfrac[ori]*(tr - tl) # Now use bilinear interpolation to
loweravg = bl + hfrac[ori]*(br - bl) # estimate value at x,y
v1 = upperavg + vfrac[ori]*(loweravg - upperavg)
if (gradient[elr, elc] > v1): # We need to check the value on the other side...
x = elc - xoff[ori] # x, y location on the `other side' of the point in question
y = elr + yoff[ori]
fx = int(np.floor(x))
cx = int(np.ceil(x))
fy = int(np.floor(y))
cy = int(np.ceil(y))
tl = gradient[fy,fx] # % Value at top left integer pixel location.
tr = gradient[fy,cx] # % top right
bl = gradient[cy,fx] # % bottom left
br = gradient[cy,cx] # % bottom right
upperavg = tl + hfrac[ori]*(tr - tl)
loweravg = bl + hfrac[ori]*(br - bl)
v2 = upperavg + vfrac[ori]*(loweravg - upperavg)
if (gradient[elr,elc] > v2): # This is a local maximum.
im[elr, elc] = gradient[elr, elc] # Record value in the output
# Finally thin the 'nonmaximally suppressed' image by pointwise
# multiplying itself with a morphological skeletonization of itself.
# I know it is oxymoronic to thin a nonmaximally supressed image but
# fixes the multiple adjacent peaks that can arise from using a radius
# value > 1.
#
# skel = bwmorph(im>0,'skel',Inf);
#
im2 = (im>0).astype(np.int8)
skel= morphology.skeletonize(im2)
im = np.multiply(im,skel)
return im
def floodfill(bw, r, c, N=8):
filled = np.zeros(bw.shape)
theStack = deque(zip(r, c))
while len(theStack) > 0:
x, y = theStack.pop()
if filled[x, y] == 1:
continue
if bw[x, y] == 0:
continue
filled[x, y] = 1
theStack.append((x + 1, y)) # right
theStack.append((x - 1, y)) # left
theStack.append((x, y + 1)) # down
theStack.append((x, y - 1)) # up
if (N == 8):
theStack.append((x + 1, y + 1)) # d right
theStack.append((x - 1, y - 1)) # d left
theStack.append((x - 1, y + 1)) # down
theStack.append((x + 1, y - 1)) # up
return filled
def borderEnhancer(img,filtersize):
# Estimate the local mean of f.
prod_fs = reduce(lambda x, y: x * y, filtersize, 1)
localMean = convolve2d(img,np.ones(filtersize),'same') / prod_fs;
# Estimate of the local variance of f.
img_2 = np.multiply(img,img)
localMean_2 = localMean*localMean
localVar = convolve2d(img_2, | np.ones(filtersize) | numpy.ones |
import glob
import os
import subprocess,shlex,shutil
import sys
from astropy.io import fits
from spectral_cube import SpectralCube
import numpy as np
################
#Parameters
#define the number of subcubes per axis
splitfactor=7
#specify source cube location
sourcefile='/avatar/nickill/smc/diagnostic_cubes/smc_masked_0.07.fits'
#Naomis original smc cube: '/avatar/naomi/ASKAP/SMC/SB_8906/SMC_8906.lsr.K.fits'
cubenameprefix='/avatar/nickill/smc/grid_cubes/smc_grid7x7_masked'
wholecube=fits.open(sourcefile)
print(wholecube[0].shape)
###################
###################
##Find dimensions
xlen=len(wholecube[0].data[0,:,0])
ylen=len(wholecube[0].data[0,0,:])
xax=[]
for i in | np.arange(splitfactor+1) | numpy.arange |
#!/usr/bin/python3
import pytz, torch, math, os
from datetime import datetime, timedelta
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import trading_agents.dc011.nets as nets
import trading_agents.dc011.dataloader as dataloader
import matplotlib.pyplot as plt
from .timefeatures import time_features
import coin_wizard.historical_pair_data as hist
from coin_wizard.technical_indicators import TechnicalIndicators
utc = pytz.utc
### Global settings ###
verbose = False
stance_dict = {
0: 'long ',
1: 'short',
2: 'wait ',
}
# granularities = ['M5', 'M15', 'H1', 'H4']
# granularities = ['M5', 'M15', 'H1', 'H4', 'D']
granularities = ['M15', 'H1', 'H4']
macd_amplifier = [8, 4, 2]
# granularities = ['M5', 'M15', 'H1', 'H4', 'D']
granularity_time_delta = {
"M1": timedelta(seconds=60),
"M5": timedelta(seconds=60*5),
"M15": timedelta(seconds=60*15),
"M30": timedelta(seconds=60*30),
"H1": timedelta(seconds=60*60),
"H4": timedelta(seconds=60*240),
"D": timedelta(seconds=60*60*24),
}
# input_period_list = [64, 64, 64]
# input_period_list = [64, 64, 64, 64]
# input_period_list = [96, 96, 96, 96]
# input_period_list = [64, 64, 64, 64, 32]
# input_period_list = [96, 128, 96, 96, 96, 32]
# input_period_list = [256, 256, 256]
input_period_list = [288, 288, 288]
# input_period_list = [384, 384, 384]
# input_period_list = [480, 480, 480]
cci_trigger_granularity = 0
decode_inference_granularity = 2
# decode_inference_len = 64
decode_inference_len = 72
decode_prediction_granularity = 0
decode_predict_len = 48
# decode_predict_len = 24
decode_predict_resolution = 24
lamb = np.log(1-0.5)/np.log(np.exp(-decode_predict_len))
cuda_enabled = True
# cuda_enabled = False
selected_net = 'backup.net'
# selected_net = 'DC_2021_09_05_23_13.net'
plot_range = 0.0026
### Indicator settings ###
# moving_average = 9
momentum_period=10
rsi_period=14
cci_period=14
### Train settings ###
load_selected_net = True
learning_rate = 0.00001
fine_tune = True
# learning_rate = learning_rate * 0.23960349927
primary_intrument = 'eurusd'
primary_intrument_trade = 'EUR_USD'
primary_intrument_index = 0
# primary_intrument = 'usdchf'
# primary_intrument_trade = 'USD_CHF'
# primary_intrument_index = 9
# selected_instrument_list = ['eurusd', 'gbpusd', 'audusd', 'usdcad', 'eurjpy', 'usdjpy']
# selected_instrument_datascaler_list = [ dataloader.DataScaler(np.array(1.1924490551226474), np.array(0.09608276821936781)),
# dataloader.DataScaler(np.array(1.428601840627313), np.array(0.14754311756904043)),
# dataloader.DataScaler(np.array(0.8109898637830456), np.array(0.12188596874201743)),
# dataloader.DataScaler(np.array(1.222725673225676), np.array(0.13292115495331883)),
# dataloader.DataScaler(np.array(125.33177145959843), np.array(10.895352856678667)),
# dataloader.DataScaler(np.array(105.68095956301106), np.array(11.318548392932305))]
selected_instrument_list = ['eurusd', 'gbpusd', 'audusd', 'eurjpy', 'nzdusd',
'usdcad', 'usdjpy', 'gbpaud', 'euraud', 'gbpcad',
'gbpnzd', 'nzdchf', 'cadchf', 'eurcad', 'gbpchf',
'audjpy', 'eurnok', 'usdtry', 'audnzd', 'audchf',
'sgdjpy', 'xagusd', 'xauusd', 'zarjpy', 'usdzar',
'gbpjpy', 'usdczk', 'audcad', 'cadjpy', 'chfjpy',
'eurgbp', 'usdnok', 'xauaud', 'xaugbp', 'xaueur',
'eurczk', 'nzdcad', 'usdsgd', 'usdchf', 'eurtry',
]
selected_instrument_datascaler_list = [
dataloader.DataScaler(np.array(1.2226727737949628), np.array(0.0015582402708278174), (np.array(0.8245795527757817), np.array(0.0010407336554503633))),
dataloader.DataScaler(np.array(1.45501340226881), np.array(0.0018410780600621854), (np.array(0.6944530971657596), np.array(0.000897687910107338))),
dataloader.DataScaler(np.array(0.8409227146528752), np.array(0.0013779465889715195), (np.array(1.2173862015744954), np.array(0.001985033987049325))),
dataloader.DataScaler(np.array(123.22119330754076), np.array(0.18733760914037093), (np.array(0.008186416107584096), np.array(1.2785196787446727e-05))),
dataloader.DataScaler(np.array(0.7346609054141837), np.array(0.0012690734157646652), (np.array(1.3730623095712402), np.array(0.0023793789604519824))),
dataloader.DataScaler(np.array(1.1841049275904305), np.array(0.001393785442655719), (np.array(0.8579560513445401), np.array(0.0010185741021392022))),
dataloader.DataScaler(np.array(101.70687582712374), np.array(0.1280890673058432), (np.array(0.010023415791657853), np.array(1.2729417314643028e-05))),
dataloader.DataScaler(np.array(1.7489657289994534), np.array(0.0027142330190730547), (np.array(0.5766111839612996), np.array(0.0008861522209003131))),
dataloader.DataScaler(np.array(1.4704677095197058), np.array(0.002112831837152215), (np.array(0.6851559784596104), np.array(0.0009821517239837786))),
dataloader.DataScaler(np.array(1.7050919335627375), np.array(0.0022733263216393822), (np.array(0.5894682775027766), np.array(0.0007849350028570728))),
dataloader.DataScaler(np.array(1.982708837715431), np.array(0.003268181944023001), (np.array(0.5065921940839548), np.array(0.0008249428984395599))),
dataloader.DataScaler(np.array(0.7022315989619157), np.array(0.0011926923591010603), (np.array(1.4323212856562635), np.array(0.0024494270546285674))),
dataloader.DataScaler(np.array(0.8205161447191238), np.array(0.0011933777762369428), (np.array(1.235808265135375), np.array(0.001775612282246314))),
dataloader.DataScaler(np.array(1.433110284870101), np.array(0.0018295687141570555), (np.array(0.7002372294521236), np.array(0.0008987075010232095))),
dataloader.DataScaler(np.array(1.3918337555225828), np.array(0.0018957498566576404), (np.array(0.7246585175431945), np.array(0.0009914910750076992))),
dataloader.DataScaler(np.array(84.04944948208193), np.array(0.15862065209357476), (np.array(0.011988386059928918), np.array(2.3033473135028014e-05))),
dataloader.DataScaler(np.array(8.854169837315494), np.array(0.01078380023386487), (np.array(0.11441015554546918), np.array(0.00013404402277801653))),
dataloader.DataScaler(np.array(3.42330615833745), np.array(0.006855786304454206), (np.array(0.36742037340075556), np.array(0.0006249576373701751))),
dataloader.DataScaler(np.array(1.1399203007488543), np.array(0.001232440516230078), (np.array(0.8834800696059951), np.array(0.0009485977308032728))),
dataloader.DataScaler(np.array(0.8033054645248916), np.array(0.0013308023296079174), (np.array(1.2689265265399547), np.array(0.0021012245214045134))),
dataloader.DataScaler(np.array(76.38232800119137), np.array(0.10013714825059432), (np.array(0.013265018486138017), np.array(1.803955839518167e-05))),
dataloader.DataScaler(np.array(21.14781348972715), np.array(0.09095701770204394), (np.array(0.051400577718606504), np.array(0.00019655292246843287))),
dataloader.DataScaler(np.array(1389.8507780231075), np.array(3.1098342142082243), (np.array(0.0007356900144239168), np.array(1.6037441284644798e-06))),
dataloader.DataScaler(np.array(8.852639836823544), np.array(0.023127164682104996), (np.array(0.11628637971154769), np.array(0.00031377956361584126))),
dataloader.DataScaler(np.array(12.073743261105015), np.array(0.028976414893401654), (np.array(0.08882246487819616), np.array(0.00020644898537478375))),
dataloader.DataScaler(np.array(146.94883903543672), np.array(0.24138156009816492), (np.array(0.006905001306824298), np.array(1.154016358952686e-05))),
dataloader.DataScaler(np.array(21.73129795531456), np.array(0.031955581139748), (np.array(0.04658759402623105), np.array(6.991211397683505e-05))),
dataloader.DataScaler(np.array(0.9773231643543236), np.array(0.0012336168367192166), (np.array(1.0252740581476445), np.array(0.00130321477799189))),
dataloader.DataScaler(np.array(85.99703161581871), np.array(0.14533326332574023), (np.array(0.011695720742257734), np.array(1.9930311892417374e-05))),
dataloader.DataScaler(np.array(106.12622235088332), np.array(0.1505194604840329), (np.array(0.009589886755925096), np.array(1.425809533480594e-05))),
dataloader.DataScaler(np.array(0.842727828980084), np.array(0.0010011659819559372), (np.array(1.191358333333061), np.array(0.0014221490580598507))),
dataloader.DataScaler(np.array(7.347265629033329), np.array(0.012873449871549071), (np.array(0.14107633230771463), np.array(0.000245065577215033))),
dataloader.DataScaler(np.array(1540.9470141007052), np.array(3.5130337186000578), (np.array(0.000654562148216634), np.array(1.503670051191871e-06))),
dataloader.DataScaler(np.array(900.9406697635376), np.array(2.03870964411316), (np.array(0.001129281475217387), np.array(2.559468239382574e-06))),
dataloader.DataScaler(np.array(1081.3723723600124), | np.array(2.3237191677445015) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# dphutils.py
"""
This is for small utility functions that don't have a proper home yet
Copyright (c) 2016, <NAME>
"""
import subprocess
import numpy as np
import scipy as sp
import re
import io
import os
import requests
import tifffile as tif
from scipy.fftpack.helper import next_fast_len
from scipy.optimize import minimize_scalar, minimize
from scipy.ndimage.fourier import fourier_gaussian
from scipy.ndimage._ni_support import _normalize_sequence
from scipy.signal import signaltools as sig
from scipy.special import zeta
from scipy.stats import nbinom
from .lm import curve_fit
from .rolling_ball import rolling_ball_filter
import tqdm
import matplotlib.pyplot as plt
# from .llc import jit_filter_function, jit_filter1d_function
try:
import pyfftw
from pyfftw.interfaces.numpy_fft import fftshift, ifftshift, fftn, ifftn, rfftn, irfftn
# Turn on the cache for optimum performance
pyfftw.interfaces.cache.enable()
FFTW = True
except ImportError:
from numpy.fft import fftshift, ifftshift, fftn, ifftn, rfftn, irfftn
FFTW = False
import logging
logger = logging.getLogger(__name__)
eps = np.finfo(float).eps
def get_git(path="."):
try:
# we slice to remove trailing new line.
cmd = ["git", "--git-dir=" + os.path.join(path, ".git"), "describe", "--long", "--always"]
return subprocess.check_output(cmd).decode()[:-1]
except (subprocess.CalledProcessError, FileNotFoundError) as e:
logger.error(e)
logger.error(" ".join(cmd))
return "Unknown"
def generate_meta_data():
pass
def bin_ndarray(ndarray, new_shape=None, bin_size=None, operation="sum"):
"""
Bins an ndarray in all axes based on the target shape, by summing or
averaging.
Number of output dimensions must match number of input dimensions and
new axes must divide old ones.
Parameters
----------
ndarray : array like object (can be dask array)
new_shape : iterable (optional)
The new size to bin the data to
bin_size : scalar or iterable (optional)
The size of the new bins
Returns
-------
binned array.
"""
if new_shape is None:
# if new shape isn't passed then calculate it
if bin_size is None:
# if bin_size isn't passed then raise error
raise ValueError("Either new shape or bin_size must be passed")
# pull old shape
old_shape = np.array(ndarray.shape)
# calculate new shape, integer division!
new_shape = old_shape // bin_size
# calculate the crop window
crop = tuple(slice(None, -r) if r else slice(None) for r in old_shape % bin_size)
# crop the input array
ndarray = ndarray[crop]
# proceed as before
operation = operation.lower()
if operation not in {"sum", "mean"}:
raise ValueError("Operation not supported.")
if ndarray.ndim != len(new_shape):
raise ValueError(f"Shape mismatch: {ndarray.shape} -> {new_shape}")
compression_pairs = [(d, c // d) for d, c in zip(new_shape, ndarray.shape)]
flattened = [l for p in compression_pairs for l in p]
ndarray = ndarray.reshape(flattened)
for i in range(len(new_shape)):
op = getattr(ndarray, operation)
ndarray = op(-1 * (i + 1))
return ndarray
def scale(data, dtype=None):
"""
Scales data to [0.0, 1.0] range, unless an integer dtype is specified
in which case the data is scaled to fill the bit depth of the dtype.
Parameters
----------
data : numeric type
Data to be scaled, can contain nan
dtype : integer dtype
Specify the bit depth to fill
Returns
-------
scaled_data : numeric type
Scaled data
Examples
--------
>>> from numpy.random import randn
>>> a = randn(10)
>>> b = scale(a)
>>> b.max()
1.0
>>> b.min()
0.0
>>> b = scale(a, dtype = np.uint16)
>>> b.max()
65535
>>> b.min()
0
"""
if np.issubdtype(data.dtype, np.complexfloating):
raise TypeError("`scale` is not defined for complex values")
dmin = np.nanmin(data)
dmax = np.nanmax(data)
if np.issubdtype(dtype, np.integer):
tmin = np.iinfo(dtype).min
tmax = np.iinfo(dtype).max
else:
tmin = 0.0
tmax = 1.0
return ((data - dmin) / (dmax - dmin) * (tmax - tmin) + tmin).astype(dtype)
def scale_uint16(data):
"""Convenience function to scale data to the uint16 range."""
return scale(data, np.uint16)
def radial_profile(data, center=None, binsize=1.0):
"""Take the radial average of a 2D data array
Adapted from http://stackoverflow.com/a/21242776/5030014
Parameters
----------
data : ndarray (2D)
the 2D array for which you want to calculate the radial average
center : sequence
the center about which you want to calculate the radial average
binsize : sequence
Size of radial bins, numbers less than one have questionable utility
Returns
-------
radial_mean : ndarray
a 1D radial average of data
radial_std : ndarray
a 1D radial standard deviation of data
Examples
--------
>>> radial_profile(np.ones((11, 11)))
(array([ 1., 1., 1., 1., 1., 1., 1., 1.]), array([ 0., 0., 0., 0., 0., 0., 0., 0.]))
"""
# test if the data is complex
if np.iscomplexobj(data):
# if it is complex, call this function on the real and
# imaginary parts and return the complex sum.
real_prof, real_std = radial_profile(np.real(data), center, binsize)
imag_prof, imag_std = radial_profile(np.imag(data), center, binsize)
return real_prof + imag_prof * 1j, np.sqrt(real_std ** 2 + imag_std ** 2)
# or do mag and phase
# mag_prof, mag_std = radial_profile(np.abs(data), center, binsize)
# phase_prof, phase_std = radial_profile(np.angle(data), center, binsize)
# return mag_prof * np.exp(phase_prof * 1j), mag_std * np.exp(phase_std * 1j)
# pull the data shape
idx = np.indices((data.shape))
if center is None:
# find the center
center = np.array(data.shape) // 2
else:
# make sure center is an array.
center = np.asarray(center)
# calculate the radius from center
idx2 = idx - center[(Ellipsis,) + (np.newaxis,) * (data.ndim)]
r = np.sqrt(np.sum([i ** 2 for i in idx2], 0))
# convert to int
r = np.round(r / binsize).astype(np.int)
# sum the values at equal r
tbin = np.bincount(r.ravel(), data.ravel())
# sum the squares at equal r
tbin2 = np.bincount(r.ravel(), (data ** 2).ravel())
# find how many equal r's there are
nr = np.bincount(r.ravel())
# calculate the radial mean
# NOTE: because nr could be zero (for missing bins) the results will
# have NaN for binsize != 1
radial_mean = tbin / nr
# calculate the radial std
radial_std = np.sqrt(tbin2 / nr - radial_mean ** 2)
# return them
return radial_mean, radial_std
def mode(data):
"""Quickly find the mode of data
up to 1000 times faster than scipy mode
but not nearly as feature rich
Note: we can vectorize this to work on different
axes with numba"""
# will not work with negative numbers (for now)
return np.bincount(data.ravel()).argmax()
def slice_maker(xs, ws):
"""
A utility function to generate slices for later use.
Parameters
----------
y0 : int
center y position of the slice
x0 : int
center x position of the slice
width : int
Width of the slice
Returns
-------
slices : list
A list of slice objects, the first one is for the y dimension and
and the second is for the x dimension.
Notes
-----
The method will automatically coerce slices into acceptable bounds.
Examples
--------
>>> slice_maker((30,20),10)
[slice(25, 35, None), slice(15, 25, None)]
>>> slice_maker((30,20),25)
[slice(18, 43, None), slice(8, 33, None)]
"""
# normalize inputs
xs = np.asarray(xs)
ws = np.asarray(_normalize_sequence(ws, len(xs)))
if not np.isrealobj((xs, ws)):
raise TypeError("`slice_maker` only accepts real input")
if np.any(ws < 0):
raise ValueError(f"width cannot be negative, width = {ws}")
# ensure integers
xs = np.rint(xs).astype(int)
ws = np.rint(ws).astype(int)
# use _calc_pad
toreturn = []
for x, w in zip(xs, ws):
half2, half1 = _calc_pad(0, w)
xstart = x - half1
xend = x + half2
assert xstart <= xend, "xstart > xend"
if xend <= 0:
xstart, xend = 0, 0
# the max calls are to make slice_maker play nice with edges.
toreturn.append(slice(max(0, xstart), xend))
# return a list of slices
return tuple(toreturn)
def fft_pad(array, newshape=None, mode="median", **kwargs):
"""Pad an array to prep it for fft"""
# pull the old shape
oldshape = array.shape
if newshape is None:
# update each dimension to a 5-smooth hamming number
newshape = tuple(next_fast_len(n) for n in oldshape)
else:
if isinstance(newshape, int):
newshape = tuple(newshape for n in oldshape)
else:
newshape = tuple(newshape)
# generate padding and slices
padding, slices = padding_slices(oldshape, newshape)
return np.pad(array[slices], padding, mode=mode, **kwargs)
def padding_slices(oldshape, newshape):
"""This function takes the old shape and the new shape and calculates
the required padding or cropping.newshape
Can be used to generate the slices needed to undo fft_pad above"""
# generate pad widths from new shape
padding = tuple(
_calc_pad(o, n) if n is not None else _calc_pad(o, o) for o, n in zip(oldshape, newshape)
)
# Make a crop list, if any of the padding is negative
slices = tuple(_calc_crop(s1, s2) for s1, s2 in padding)
# leave 0 pad width where it was cropped
padding = [(max(s1, 0), max(s2, 0)) for s1, s2 in padding]
return padding, slices
# def easy_rfft(data, axes=None):
# """utility method that includes fft shifting"""
# return fftshift(
# rfftn(
# ifftshift(
# data, axes=axes
# ), axes=axes
# ), axes=axes)
# def easy_irfft(data, axes=None):
# """utility method that includes fft shifting"""
# return ifftshift(
# irfftn(
# fftshift(
# data, axes=axes
# ), axes=axes
# ), axes=axes)
# add np.pad docstring
fft_pad.__doc__ += np.pad.__doc__
def _calc_crop(s1, s2):
"""Calc the cropping from the padding"""
a1 = abs(s1) if s1 < 0 else None
a2 = s2 if s2 < 0 else None
return slice(a1, a2, None)
def _calc_pad(oldnum, newnum):
""" Calculate the proper padding for fft_pad
We have three cases:
old number even new number even
>>> _calc_pad(10, 16)
(3, 3)
old number odd new number even
>>> _calc_pad(11, 16)
(2, 3)
old number odd new number odd
>>> _calc_pad(11, 17)
(3, 3)
old number even new number odd
>>> _calc_pad(10, 17)
(4, 3)
same numbers
>>> _calc_pad(17, 17)
(0, 0)
from larger to smaller.
>>> _calc_pad(17, 10)
(-4, -3)
"""
# how much do we need to add?
width = newnum - oldnum
# calculate one side, smaller
pad_s = width // 2
# calculate the other, bigger
pad_b = width - pad_s
# if oldnum is odd and newnum is even
# we want to pull things backward
if oldnum % 2:
pad1, pad2 = pad_s, pad_b
else:
pad1, pad2 = pad_b, pad_s
return pad1, pad2
# If we have fftw installed than make a better fftconvolve
if FFTW:
def fftconvolve(in1, in2, mode="same", threads=1):
"""Same as above but with pyfftw added in"""
in1 = np.asarray(in1)
in2 = np.asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return np.array([])
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
complex_result = np.issubdtype(in1.dtype, complex) or np.issubdtype(in2.dtype, complex)
shape = s1 + s2 - 1
# Check that input sizes are compatible with 'valid' mode
if sig._inputs_swap_needed(mode, s1, s2):
# Convolution is commutative; order doesn't have any effect on output
in1, s1, in2, s2 = in2, s2, in1, s1
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [next_fast_len(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (sig._rfft_mt_safe or sig._rfft_lock.acquire(False)):
try:
sp1 = rfftn(in1, fshape, threads=threads)
sp2 = rfftn(in2, fshape, threads=threads)
ret = irfftn(sp1 * sp2, fshape, threads=threads)[fslice].copy()
finally:
if not sig._rfft_mt_safe:
sig._rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
sp1 = fftn(in1, fshape, threads=threads)
sp2 = fftn(in2, fshape, threads=threads)
ret = ifftn(sp1 * sp2, threads=threads)[fslice].copy()
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
return sig._centered(ret, s1)
elif mode == "valid":
return sig._centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid'," " 'same', or 'full'.")
# fftconvolve.__doc__ = "DPH Utils: " + sig.fftconvolve.__doc__
else:
fftconvolve = sig.fftconvolve
def fftconvolve_fast(data, kernel, **kwargs):
"""A faster version of fft convolution
In this case the kernel ifftshifted before FFT but the data is not.
This can be done because the effect of fourier convolution is to
"wrap" around the data edges so whether we ifftshift before FFT
and then fftshift after it makes no difference so we can skip the
step entirely.
"""
# TODO: add error checking like in the above and add functionality
# for complex inputs. Also could add options for different types of
# padding.
dshape = np.array(data.shape)
kshape = np.array(kernel.shape)
# find maximum dimensions
maxshape = np.max((dshape, kshape), 0)
# calculate a nice shape
fshape = [next_fast_len(int(d)) for d in maxshape]
# pad out with reflection
pad_data = fft_pad(data, fshape, "reflect")
# calculate padding
padding = tuple(_calc_pad(o, n) for o, n in zip(data.shape, pad_data.shape))
# so that we can calculate the cropping, maybe this should be integrated
# into `fft_pad` ...
fslice = tuple(slice(s, -e) if e != 0 else slice(s, None) for s, e in padding)
if kernel.shape != pad_data.shape:
# its been assumed that the background of the kernel has already been
# removed and that the kernel has already been centered
kernel = fft_pad(kernel, pad_data.shape, mode="constant")
k_kernel = rfftn(ifftshift(kernel), pad_data.shape, **kwargs)
k_data = rfftn(pad_data, pad_data.shape, **kwargs)
convolve_data = irfftn(k_kernel * k_data, pad_data.shape, **kwargs)
# return data with same shape as original data
return convolve_data[fslice]
def win_nd(size, win_func=sp.signal.hann, **kwargs):
"""
A function to make a multidimensional version of a window function
Parameters
----------
size : tuple of ints
size of the output window
win_func : callable
Default is the Hanning window
**kwargs : key word arguments to be passed to win_func
Returns
-------
w : ndarray
window function
"""
ndim = len(size)
newshapes = tuple(
[tuple([1 if i != j else k for i in range(ndim)]) for j, k in enumerate(size)]
)
# Initialize to return
toreturn = 1.0
# cross product the 1D windows together
for newshape in newshapes:
toreturn = toreturn * win_func(max(newshape), **kwargs).reshape(newshape)
# return
return toreturn
def anscombe(data):
"""Apply Anscombe transform to data
https://en.wikipedia.org/wiki/Anscombe_transform
"""
return 2 * np.sqrt(data + 3 / 8)
def anscombe_inv(data):
"""Apply inverse Anscombe transform to data
https://en.wikipedia.org/wiki/Anscombe_transform
"""
part0 = 1 / 4 * data ** 2
part1 = 1 / 4 * np.sqrt(3 / 2) / data
part2 = -11 / 8 / (data ** 2)
part3 = 5 / 8 * np.sqrt(3 / 2) / (data ** 3)
return part0 + part1 + part2 + part3 - 1 / 8
def fft_gaussian_filter(img, sigma):
"""FFT gaussian convolution
Parameters
----------
img : ndarray
Image to convolve with a gaussian kernel
sigma : int or sequence
The sigma(s) of the gaussian kernel in _real space_
Returns
-------
filt_img : ndarray
The filtered image
"""
# This doesn't help agreement but it will make things faster
# pull the shape
s1 = np.array(img.shape)
# s2 = np.array([int(s * 4) for s in _normalize_sequence(sigma, img.ndim)])
shape = s1 # + s2 - 1
# calculate a nice shape
fshape = [next_fast_len(int(d)) for d in shape]
# pad out with reflection
pad_img = fft_pad(img, fshape, "reflect")
# calculate the padding
padding = tuple(_calc_pad(o, n) for o, n in zip(img.shape, pad_img.shape))
# so that we can calculate the cropping, maybe this should be integrated
# into `fft_pad` ...
fslice = tuple(slice(s, -e) if e != 0 else slice(s, None) for s, e in padding)
# fourier transfrom and apply the filter
kimg = rfftn(pad_img, fshape)
filt_kimg = fourier_gaussian(kimg, sigma, pad_img.shape[-1])
# inverse FFT and return.
return irfftn(filt_kimg, fshape)[fslice]
def multi_exp(xdata, *args):
"""Power and exponent"""
odd = len(args) % 2
if odd:
offset = args[-1]
else:
offset = 0
res = np.ones_like(xdata, dtype=float) * offset
for i in range(0, len(args) - odd, 2):
a, k = args[i : i + 2]
res += a * np.exp(-k * xdata)
return res
def multi_exp_jac(xdata, *args):
"""Power and exponent jacobian"""
odd = len(args) % 2
tostack = []
for i in range(0, len(args) - odd, 2):
a, k = args[i : i + 2]
tostack.append( | np.exp(-k * xdata) | numpy.exp |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 21 12:27:21 2020
@author: dhulls
"""
from os import sys
import pathlib
import numpy as np
import pandas as pd
import seaborn as sns
import random
from scipy.stats import lognorm
from scipy.stats import norm
from scipy.stats import rayleigh
from scipy.stats import uniform
import matplotlib.pyplot as plt
from UQpy.SampleMethods import MH
from UQpy.Distributions import Distribution
import time
from UQpy.Distributions import Normal
from UQpy.SampleMethods import MMH
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
tfb = tfp.bijectors
tfd = tfp.distributions
tfk = tfp.math.psd_kernels
tf.enable_v2_behavior()
from LimitStateFunctions import LimitStateFunctions as LSF
from ML_TF import ML_TF
from DrawRandom import DrawRandom as DR
from pyDOE import *
Ndim = 1
LS1 = LSF()
DR1 = DR()
num_s = 500
value = 17 # (1)
# value = 1.8
## Training GP
# uniform(loc=-5,scale=10).rvs()
lhd = lhs(1, samples=200, criterion='maximin')
lhd = uniform(loc=-5,scale=10).ppf(lhd)
y_LF_GP = np.empty(1, dtype = float)
y_HF_GP = np.empty(1, dtype = float)
inp_GPtrain = np.empty(1, dtype = float)
Ninit_GP = 50
for ii in np.arange(0,Ninit_GP,1):
inp = np.array(lhd[ii,0]).reshape(1)
inpp = inp[None, :]
inp_GPtrain = np.concatenate((inp_GPtrain, inp))
y_LF_GP = np.concatenate((y_LF_GP, LS1.Scalar_LS2_LF(inpp)))
y_HF_GP = np.concatenate((y_HF_GP, LS1.Scalar_LS2_HF(inpp)))
inp_GPtrain = np.delete(inp_GPtrain, 0)
y_LF_GP = np.delete(y_LF_GP, 0)
y_HF_GP = np.delete(y_HF_GP, 0)
ML = ML_TF(obs_ind = inp_GPtrain[:,None], obs = (y_HF_GP-y_LF_GP), amp_init=1., len_init=1., var_init=1., num_iters = 1000)
amp1, len1, var1 = ML.GP_train()
# samples1 = ML.GP_predict(amplitude_var = amp1, length_scale_var=len1, observation_noise_variance_var=var1, pred_ind = inp_GPtrain[:,None], num_samples=num_s)
x_req = np.array(lhd[np.arange((Ninit_GP+1),200,1),0]).reshape(len(np.array(lhd[np.arange((Ninit_GP+1),200,1),0])),1)
samples1 = ML.GP_predict(amplitude_var = amp1, length_scale_var=len1, observation_noise_variance_var=var1, pred_ind = x_req, num_samples=num_s)
LF_req = LS1.Scalar_LS2_LF(x_req)
u_req = (np.abs(LF_req + np.mean(np.array(samples1),axis=0)))/np.std(np.array(samples1),axis=0)
HF_req = LS1.Scalar_LS2_HF(x_req)
ind_req = np.rot90(np.where(u_req<2))
for ii in np.arange(0,len(ind_req),1):
inp = np.array(lhd[(Ninit_GP+1+ind_req[ii]),0]).reshape(1)
inpp = inp[None, :]
inp_GPtrain = np.concatenate((inp_GPtrain, inp))
y_LF_GP = np.concatenate((y_LF_GP, LS1.Scalar_LS2_LF(inpp)))
y_HF_GP = np.concatenate((y_HF_GP, LS1.Scalar_LS2_HF(inpp)))
ML = ML_TF(obs_ind = inp_GPtrain[:,None], obs = (y_HF_GP-y_LF_GP), amp_init=amp1, len_init=len1, var_init=var1, num_iters = 30)
amp1, len1, var1 = ML.GP_train()
## Subset simultion with HF-LF and GP
# Ninit_GP = 50
# y_LF_GP = np.empty(1, dtype = float)
# y_HF_GP = np.empty(1, dtype = float)
# inp_GPtrain = np.empty(1, dtype = float)
# for ii in np.arange(0,Ninit_GP,1):
# inp = (DR1.StandardNormal_Indep(N=Ndim))
# inpp = inp[None, :]
# inp_GPtrain = np.concatenate((inp_GPtrain, inp))
# y_LF_GP = np.concatenate((y_LF_GP, LS1.Scalar_LS2_LF(inpp)))
# y_HF_GP = np.concatenate((y_HF_GP, LS1.Scalar_LS2_HF(inpp)))
# ML = ML_TF(obs_ind = inp_GPtrain[:,None], obs = (y_HF_GP-y_LF_GP))
# amp1, len1, var1 = ML.GP_train()
# samples1 = ML.GP_predict(amplitude_var = amp1, length_scale_var=len1, observation_noise_variance_var=var1, pred_ind = inp_GPtrain[:,None], num_samples=num_s)
uni = uniform()
Nsub = 1500
Psub = 0.1
Nlim = 3
y1 = np.zeros((Nsub,Nlim))
y1_lim = np.zeros(Nlim)
y1_lim[Nlim-1] = value
inp1 = np.zeros((Nsub,1,Nlim))
rv = norm(loc=0,scale=1)
u_lim_vec = np.array([2,2,2,2,2,2])
u_GP = np.empty(1, dtype = float)
var_GP = np.empty(1, dtype = float)
var_GP[0] = var1.numpy().reshape(1)
subs_info = np.empty(1, dtype = float)
subs_info[0] = np.array(0).reshape(1)
LF_plus_GP = np.empty(1, dtype = float)
GP_pred = np.empty(1, dtype = float)
for ii in np.arange(0,Nsub,1):
inp = DR1.StandardNormal_Indep(N=Ndim)
inpp = inp[None,:]
LF = LS1.Scalar_LS2_LF(inpp)
inp1[ii,:,0] = inp
samples1 = ML.GP_predict(amplitude_var = amp1, length_scale_var=len1, observation_noise_variance_var=var1, pred_ind = inpp, num_samples=num_s)
GP_diff = np.mean(np.array(samples1),axis=0)
u_check = (np.abs(LF + GP_diff))/np.std(np.array(samples1),axis=0)
u_GP = np.concatenate((u_GP, u_check))
u_lim = u_lim_vec[0]
if u_check > u_lim:
y1[ii,0] = LF + GP_diff
else:
y1[ii,0] = LS1.Scalar_LS2_HF(inpp)
inp_GPtrain = np.concatenate((inp_GPtrain, inp))
y_LF_GP = np.concatenate((y_LF_GP, LF))
y_HF_GP = np.concatenate((y_HF_GP, y1[ii,0].reshape(1)))
LF_plus_GP = np.concatenate((LF_plus_GP, (LF + np.array(GP_diff).reshape(1))))
GP_pred = np.concatenate((GP_pred, (np.array(GP_diff).reshape(1))))
# ML = ML_TF(obs_ind = (np.array(inp_GPtrain))[:,:,0], obs = (np.array(y_HF_GP)[:,:,0]-np.array(y_LF_GP)[:,:,0])[:,0])
ML = ML_TF(obs_ind = inp_GPtrain[:,None], obs = (y_HF_GP-y_LF_GP), amp_init=amp1, len_init=len1, var_init=var1, num_iters = 30)
amp1, len1, var1 = ML.GP_train()
var_GP = np.concatenate((var_GP, var1.numpy().reshape(1)))
subs_info = np.concatenate((subs_info, np.array(0).reshape(1)))
# inpp = np.zeros(Ndim)
for kk in np.arange(1,Nlim,1):
y1[0:(int(Psub*Nsub)-1),kk] = | np.sort(y1[:,kk-1]) | numpy.sort |
def detval(input, gt, cfg):
import numpy as np
import json
from utils.polygon_wrapper import iod
from utils.polygon_wrapper import area_of_intersection
from utils.polygon_wrapper import area
import os
input_json_path = input#os.path.join(cfg.ADDRESS.OUTPUT_DIR, 'result.json')
gt_json_path = gt#os.path.join(cfg.ADDRESS.DETECTION.TRAIN_GT_DIR, 'train_labels.json')
global val_result
def input_reading(polygons):
det = []
for polygon in polygons:
polygon['points'] = np.array(polygon['points'])
det.append(polygon)
return det
def gt_reading(gt_dict, img_key):
polygons = gt_dict[img_key]
gt = []
for polygon in polygons:
polygon['points'] = np.array(polygon['points'])
gt.append(polygon)
return gt
def detection_filtering(detections, groundtruths, threshold=0.5):
"""
ignore detected illegal text region
"""
before_filter_num = len(detections)
for gt_id, gt in enumerate(groundtruths):
if (gt['transcription'] == '###') and (gt['points'].shape[1] > 1):
gt_x = list(map(int, np.squeeze(gt['points'][:, 0])))
gt_y = list(map(int, np.squeeze(gt['points'][:, 1])))
for det_id, detection in enumerate(detections):
det_x = list(map(int, np.squeeze(detection['points'][:, 0])))
det_y = list(map(int, np.squeeze(detection['points'][:, 1])))
det_gt_iou = iod(det_x, det_y, gt_x, gt_y)
if det_gt_iou > threshold:
detections[det_id] = []
detections[:] = [item for item in detections if item != []]
if before_filter_num - len(detections) > 0:
print("Ignore {} illegal detections".format(before_filter_num - len(detections)))
return detections
def gt_filtering(groundtruths):
before_filter_num = len(groundtruths)
for gt_id, gt in enumerate(groundtruths):
if gt['transcription'] == '###' or gt['points'].shape[0] < 3:
groundtruths[gt_id] = []
groundtruths[:] = [item for item in groundtruths if item != []]
if before_filter_num - len(groundtruths) > 0:
print("Ignore {} illegal groundtruths".format(before_filter_num - len(groundtruths)))
return groundtruths
def generate_json(cfg):
if cfg.BASE.MODEL == 'TEXTNET':
from model.detection_model.TextSnake_pytorch.util import global_data
val_result = global_data._get_det_value()
with open(os.path.join(cfg.ADDRESS.DET_RESULT_DIR, 'result.json'), 'w') as f:
json.dump(val_result, f)
def sigma_calculation(det_x, det_y, gt_x, gt_y):
"""
sigma = inter_area / gt_area
"""
return np.round((area_of_intersection(det_x, det_y, gt_x, gt_y) / area(gt_x, gt_y)), 2)
def tau_calculation(det_x, det_y, gt_x, gt_y):
"""
tau = inter_area / det_area
"""
return np.round((area_of_intersection(det_x, det_y, gt_x, gt_y) / area(det_x, det_y)), 2)
def one_to_one(local_sigma_table, local_tau_table, local_accumulative_recall,
local_accumulative_precision, global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag):
"""
Args:
local_sigma_table:
local_tau_table:
local_accumulative_recall:
local_accumulative_precision:
global_accumulative_recall:
global_accumulative_precision:
gt_flag:
det_flag:
Returns:
"""
for gt_id in range(num_gt):
qualified_sigma_candidates = np.where(local_sigma_table[gt_id, :] > tr)
num_qualified_sigma_candidates = qualified_sigma_candidates[0].shape[0]
qualified_tau_candidates = np.where(local_tau_table[gt_id, :] > tp)
num_qualified_tau_candidates = qualified_tau_candidates[0].shape[0]
if (num_qualified_sigma_candidates == 1) and (num_qualified_tau_candidates == 1):
global_accumulative_recall = global_accumulative_recall + 1.0
global_accumulative_precision = global_accumulative_precision + 1.0
local_accumulative_recall = local_accumulative_recall + 1.0
local_accumulative_precision = local_accumulative_precision + 1.0
gt_flag[0, gt_id] = 1
matched_det_id = np.where(local_sigma_table[gt_id, :] > tr)
det_flag[0, matched_det_id] = 1
return local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, gt_flag, det_flag
def one_to_many(local_sigma_table, local_tau_table, local_accumulative_recall,
local_accumulative_precision, global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag):
for gt_id in range(num_gt):
# skip the following if the groundtruth was matched
if gt_flag[0, gt_id] > 0:
continue
non_zero_in_sigma = np.where(local_sigma_table[gt_id, :] > 0)
num_non_zero_in_sigma = non_zero_in_sigma[0].shape[0]
if num_non_zero_in_sigma >= k:
# search for all detections that overlaps with this groundtruth
qualified_tau_candidates = np.where((local_tau_table[gt_id, :] >= tp) & (det_flag[0, :] == 0))
num_qualified_tau_candidates = qualified_tau_candidates[0].shape[0]
if num_qualified_tau_candidates == 1:
if local_tau_table[gt_id, qualified_tau_candidates] >= tp and local_sigma_table[gt_id, qualified_tau_candidates] >= tr:
# became an one-to-one case
global_accumulative_recall = global_accumulative_recall + 1.0
global_accumulative_precision = global_accumulative_precision + 1.0
local_accumulative_recall = local_accumulative_recall + 1.0
local_accumulative_precision = local_accumulative_precision + 1.0
gt_flag[0, gt_id] = 1
det_flag[0, qualified_tau_candidates] = 1
elif np.sum(local_sigma_table[gt_id, qualified_tau_candidates]) >= tr:
gt_flag[0, gt_id] = 1
det_flag[0, qualified_tau_candidates] = 1
global_accumulative_recall = global_accumulative_recall + fsc_k
global_accumulative_precision = global_accumulative_precision + num_qualified_tau_candidates * fsc_k
local_accumulative_recall = local_accumulative_recall + fsc_k
local_accumulative_precision = local_accumulative_precision + num_qualified_tau_candidates * fsc_k
return local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, gt_flag, det_flag
def many_to_many(local_sigma_table, local_tau_table, local_accumulative_recall,
local_accumulative_precision, global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag):
for det_id in range(num_det):
# skip the following if the detection was matched
if det_flag[0, det_id] > 0:
continue
non_zero_in_tau = np.where(local_tau_table[:, det_id] > 0)
num_non_zero_in_tau = non_zero_in_tau[0].shape[0]
if num_non_zero_in_tau >= k:
# search for all detections that overlaps with this groundtruth
qualified_sigma_candidates = np.where((local_sigma_table[:, det_id] >= tp) & (gt_flag[0, :] == 0))
num_qualified_sigma_candidates = qualified_sigma_candidates[0].shape[0]
if num_qualified_sigma_candidates == 1:
if local_tau_table[qualified_sigma_candidates, det_id] >= tp and local_sigma_table[qualified_sigma_candidates, det_id] >= tr:
# became an one-to-one case
global_accumulative_recall = global_accumulative_recall + 1.0
global_accumulative_precision = global_accumulative_precision + 1.0
local_accumulative_recall = local_accumulative_recall + 1.0
local_accumulative_precision = local_accumulative_precision + 1.0
gt_flag[0, qualified_sigma_candidates] = 1
det_flag[0, det_id] = 1
elif np.sum(local_tau_table[qualified_sigma_candidates, det_id]) >= tp:
det_flag[0, det_id] = 1
gt_flag[0, qualified_sigma_candidates] = 1
global_accumulative_recall = global_accumulative_recall + num_qualified_sigma_candidates * fsc_k
global_accumulative_precision = global_accumulative_precision + fsc_k
local_accumulative_recall = local_accumulative_recall + num_qualified_sigma_candidates * fsc_k
local_accumulative_precision = local_accumulative_precision + fsc_k
return local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, gt_flag, det_flag
# Initial config
global_tp = 0
global_fp = 0
global_fn = 0
global_sigma = []
global_tau = []
tr = 0.7
tp = 0.6
fsc_k = 0.8
k = 2
# load json file as dict
generate_json(cfg)
with open(input_json_path, 'r') as f:
input_dict = json.load(f)
with open(gt_json_path, 'r') as f:
gt_dict = json.load(f)
for input_img_key, input_cnts in input_dict.items():
print(input_img_key)
detections = input_reading(input_cnts)
groundtruths = gt_reading(gt_dict, input_img_key.replace('res', 'gt'))
detections = detection_filtering(detections, groundtruths) # filters detections overlapping with DC area
groundtruths = gt_filtering(groundtruths)
local_sigma_table = np.zeros((len(groundtruths), len(detections)))
local_tau_table = np.zeros((len(groundtruths), len(detections)))
for gt_id, gt in enumerate(groundtruths):
if len(detections) > 0:
gt_x = list(map(int, np.squeeze(gt['points'][:, 0])))
gt_y = list(map(int, np.squeeze(gt['points'][:, 1])))
for det_id, detection in enumerate(detections):
det_x = list(map(int, | np.squeeze(detection['points'][:, 0]) | numpy.squeeze |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test cases for Logistic distribution"""
import numpy as np
from scipy import stats
import mindspore.context as context
import mindspore.nn as nn
import mindspore.nn.probability.distribution as msd
from mindspore import Tensor
from mindspore import dtype
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Prob(nn.Cell):
"""
Test class: probability of Logistic distribution.
"""
def __init__(self):
super(Prob, self).__init__()
self.l = msd.Logistic(np.array([3.0]), np.array([[2.0], [4.0]]), dtype=dtype.float32)
def construct(self, x_):
return self.l.prob(x_)
def test_pdf():
"""
Test pdf.
"""
logistic_benchmark = stats.logistic(np.array([3.0]), np.array([[2.0], [4.0]]))
expect_pdf = logistic_benchmark.pdf([1.0, 2.0]).astype(np.float32)
pdf = Prob()
output = pdf(Tensor([1.0, 2.0], dtype=dtype.float32))
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_pdf) < tol).all()
class LogProb(nn.Cell):
"""
Test class: log probability of Logistic distribution.
"""
def __init__(self):
super(LogProb, self).__init__()
self.l = msd.Logistic(np.array([3.0]), np.array([[2.0], [4.0]]), dtype=dtype.float32)
def construct(self, x_):
return self.l.log_prob(x_)
def test_log_likelihood():
"""
Test log_pdf.
"""
logistic_benchmark = stats.logistic(np.array([3.0]), np.array([[2.0], [4.0]]))
expect_logpdf = logistic_benchmark.logpdf([1.0, 2.0]).astype(np.float32)
logprob = LogProb()
output = logprob(Tensor([1.0, 2.0], dtype=dtype.float32))
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_logpdf) < tol).all()
class Basics(nn.Cell):
"""
Test class: mean/sd/mode of Logistic distribution.
"""
def __init__(self):
super(Basics, self).__init__()
self.l = msd.Logistic(np.array([3.0]), np.array([2.0, 4.0]), dtype=dtype.float32)
def construct(self):
return self.l.mean(), self.l.sd(), self.l.mode()
def test_basics():
"""
Test mean/standard deviation/mode.
"""
basics = Basics()
mean, sd, mode = basics()
expect_mean = [3.0, 3.0]
expect_sd = np.pi * np.array([2.0, 4.0]) / np.sqrt(np.array([3.0]))
tol = 1e-6
assert (np.abs(mean.asnumpy() - expect_mean) < tol).all()
assert (np.abs(mode.asnumpy() - expect_mean) < tol).all()
assert (np.abs(sd.asnumpy() - expect_sd) < tol).all()
class Sampling(nn.Cell):
"""
Test class: sample of Logistic distribution.
"""
def __init__(self, shape, seed=0):
super(Sampling, self).__init__()
self.l = msd.Logistic(np.array([3.0]), np.array([[2.0], [4.0]]), seed=seed, dtype=dtype.float32)
self.shape = shape
def construct(self, mean=None, sd=None):
return self.l.sample(self.shape, mean, sd)
def test_sample():
"""
Test sample.
"""
shape = (2, 3)
seed = 10
mean = Tensor([2.0], dtype=dtype.float32)
sd = Tensor([2.0, 2.0, 2.0], dtype=dtype.float32)
sample = Sampling(shape, seed=seed)
output = sample(mean, sd)
assert output.shape == (2, 3, 3)
class CDF(nn.Cell):
"""
Test class: cdf of Logistic distribution.
"""
def __init__(self):
super(CDF, self).__init__()
self.l = msd.Logistic(np.array([3.0]), np.array([[2.0], [4.0]]), dtype=dtype.float32)
def construct(self, x_):
return self.l.cdf(x_)
def test_cdf():
"""
Test cdf.
"""
logistic_benchmark = stats.logistic( | np.array([3.0]) | numpy.array |
#!/usr/bin/env python
"""Extension of the BumpHunter algorithm to 2D distributions"""
import concurrent.futures as thd
import itertools
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import colors as mcl
from scipy.special import gammainc as G # # Need G(a,b) for the gamma function
from scipy.stats import norm
from pyBumpHunter.bumphunter_1dim import BumpHunterInterface
from .util import deprecated, deprecated_arg
# THE super BumpHunter2D class
class BumpHunter2D(BumpHunterInterface):
"""
The BumpHunter2D class provide a extension of the BumpHunter algorithm to 2d histograms.
Currently, only rectangular scan widows are supported.
Also, note that 2D signal injection is not yet implemented.
It comes with a 2D version of the methods and parameters of the one-dimentionnal BumpHunter class.
List of inner parameter variables :
rang :
[x,y]-axis ranges of the histograms. Also define the ranges in which the scan will be performed.
mode :
String specifying if the algorithm must look for a excess or a deficit in the data.
Can be either 'excess' or 'deficit'.
width_min :
Minimum [x,y] values of the scan window width that should be tested (in number of bins).
width_max :
Maximum [x,y] values of the scan window width that should be tested (in number of bins).
Can be either None or an array-like of 2 positive integers.
If None, the value is set to the total number of bins of the histograms divided by 2.
width_step :
Number of bins by which the scan window width is increased at each step.
scan_step :
Number of bins by which the position of the scan window is shifted at each step.
Can an array-like length 2 of either {'full', 'half'} or positive integers.
If 'full', the window will be shifted by a number of bins equal to its width.
If 'half', the window will be shifted by a number of bins equal to max(1,width//2).
npe :
Number of pseudo-data distributions to be sampled from the reference background distribution.
bins :
Define the bins of the histograms. Can be ether a [integer,integer] or a 2D array-like of floats.
If integer ([N,M]), N*M bins of equal width will be considered.
If 2D array-like of float (a), a number of bins equal to (a.shape[0]-1)*(a.shape[1]-1) with the values of a
as edges will be considered (variable width bins allowed).
weights :
Weights for the background distribution. Can be either None or a array-like of float.
If array-like of floats, each background events will be accounted by its weights when making histograms.
The size of the array-like must be the same than the number of of bkg events.
If None, no weights will be considered.
nworker :
Number of thread to be run in parallel when scanning all the histograms (data and pseudo-data).
If less or equal to 1, then parallelism will be disabled.
seed :
Seed for the random number generator.
use_sideband :
Boolean specifying if side-band normalization should be applied when computing p-values.
sigma_limit :
The minimum significance required after injection.
str_min :
The minimum number signal stregth to inject in background (first iteration).
str_step :
Increase of the signal stregth to be injected in the background at each iteration.
str_scale :
Specify how the signal strength should vary.
If 'log', the signal strength will vary according to a log scale starting from 10**str_min.
If 'lin', the signal will vary according to a linear scale starting from str_min with a step of str_step.
signal_exp :
Expected number of signal used to compute the signal strength.
If None, the signal strength is not computed.
flip_sig :
Boolean specifying if the signal should be fliped when running in deficit mode.
Ignored in excess mode.
List of inner results variables :
global_Pval :
Global p-value obtained from the test statistic distribution.
significance :
Significance corresponding to the globbal p-value from the test statistic distribution.
res_ar :
Array-like container containing all the local p-values calculated during the last BumpHnter scan.
The indice 0 (res_ar[0]) correspond to the sacn of the data and the other indices correspond to the the pseudo-data.
For each indices, there is a Numpy array of python list containing all the p-values of all windows obtained for a given distribution.
The numpy array has dimention (Nwidth), with Nwidth the number of window's width tested.
Each python list as dimension (Nstep), with Nstep the number of scan step for a given width (different for every value of width).
min_Pval_ar :
Array containing the minimum p-values obtained for the data (indice=0) and and the pseudo-data (indice>0).
min_loc_ar :
Array containing the positions of the windows for which the minimum p-value has been found for the data (indice=0) and pseudo-data (indice>0).
min_width_ar :
Array containing the width of the windows for which the minimum p-value has been found for the data (indice=0) and pseudo-data (indice>0).
signal_eval :
Number of signal events evaluated form the last scan.
signal_min :
Minimum number of signal events ones must inject in the data in order to reach the required significance.
signal_ratio :
Ratio signal_min/signal_exp (signal strength).
data_inject :
Data obtained after injecting signal events in the backgound.
sigma_ar :
Numpy array containing the significance values obtained at each step of the injection.
"""
# Initializer method
@deprecated_arg("useSideBand", "use_sideband")
@deprecated_arg("Nworker", "nworker")
@deprecated_arg("Npe", "npe")
def __init__(
self,
rang=None,
mode: str="excess",
width_min=None,
width_max=None,
width_step=[1, 1],
scan_step=[1, 1],
npe: int=100,
bins=[20, 20],
weights=None,
nworker: int=4,
sigma_limit: float=5,
str_min: float=0.5,
str_step: float=0.25,
str_scale: str="lin",
signal_exp=None,
flip_sig: bool=True,
seed=None,
use_sideband: bool=False,
Npe=None,
Nworker=None,
useSideBand=None,
):
"""
Arguments:
rang :
[x,y]-axis ranges of the histograms. Also define the ranges in which the scan will be performed.
Can be either None or a array-like of float with shape (2,2).
If None, the range is set automatically to include all the data given.
Default to None.
mode :
String specifying if the algorithm must look for a excess or a deficit in the data.
Can be either 'excess' or 'deficit'.
Default to 'excess'.
width_min :
Minimum [x,y] values of the scan window width that should be tested (in number of bins).
Can be either None or a array-like of 2 positive integers.
If None, it will be set to [1,1].
Default to None.
width_max :
Maximum [x,y] values of the scan window width that should be tested (in number of bins).
Can be either None or an array-like of 2 positive integers.
If None, the value is set to the total number of bins of the histograms divided by 2. Default to none.
width_step :
Number of bins by which the scan window width is increased at each step.
Default to [1,1].
scan_step :
Number of bins by which the position of the scan window is shifted at each step.
Can an array-like length 2 of either 'full', 'half' or positive integers.
If 'full', the window will be shifted by a number of bins equal to its width.
If 'half', the window will be shifted by a number of bins equal to max(1,width//2).
Default to [1,1].
npe :
Number of pseudo-data distributions to be sampled from the reference background distribution.
Default to 100.
bins :
Define the bins of the histograms. Can be ether a pair of integer or a 2D array-like of floats.
If a pair of integer integer ([N,M]), N*M bins of equal width will be considered.
If 2D array-like of float (a), a number of bins equal to (a.shape[0]-1)*(a.shape[1]-1) with the values of a as edges will be considered (variable width bins allowed).
Default to [20,20].
weights :
Weights for the background distribution.
Can be either None or a array-like of float.
If array-like of floats, each background events will be accounted by its weights when making histograms.
The size of the array-like must be the same than of bkg.
If None, no weights will be considered.
Default to None.
nworker :
Number of thread to be run in parallel when scanning all the histograms (data and pseudo-data).
If less or equal to 1, then parallelism will be disabled.
Default to 4.
sigma_limit :
The minimum significance required after injection.
Deault to 5.
str_min :
The minimum number signal stregth to inject in background (first iteration).
Default to 0.5.
str_step :
Increase of the signal stregth to be injected in the background at each iteration.
Default to 0.25.
str_scale :
Specify how the signal strength should vary.
If 'log', the signal strength will vary according to a log scale starting from 10**str_min
If 'lin', the signal will vary according to a linear scale starting from str_min with a step of str_step.
Default to 'lin'.
signal_exp :
Expected number of signal used to compute the signal strength.
If None, the signal strength is not computed.
Default to None.
flip_sig :
Boolean specifying if the signal should be fliped when running in deficit mode.
Ignored in excess mode.
Default to True.
seed :
Seed for the random number generator.
Default to None.
use_sideband :
Boolean specifying if the side-band normalization should be applied.
Default to False.
Npe : *Deprecated*
Same as npe. This argument is deprecated and will be removed in future versions.
Nworker : *Deprecated*
Same as nworker. This argument is deprecated and will be removed in future versions.
useSideBand : *Deprecated*
Same as use_sideband. This argument is deprecated and will be removed in future versions.
"""
# legacy deprecation
if useSideBand is not None:
use_sideband = useSideBand
if Nworker is not None:
nworker = Nworker
if Npe is not None:
npe = Npe
if width_min is None:
width_min = [1, 1]
# Initilize all inner parameter variables
self.rang = rang
self.mode = mode
self.width_min = width_min
self.width_max = width_max
self.width_step = width_step
self.scan_step = scan_step
self.npe = npe
self.bins = bins
self.weights = weights
self.nworker = nworker
self.sigma_limit = sigma_limit
self.str_min = str_min
self.str_step = str_step
self.str_scale = str_scale
self.signal_exp = signal_exp
self.flip_sig = flip_sig
self.seed = seed
self.use_sideband = use_sideband
# Initialize all inner result variables
self.reset()
return
## Private methods
# Method that performs a scan of a given data histogram and compares it to a reference background histogram.
# This method is used by the BumpHunter class methods and is not intended to be used directly.
def _scan_hist(self, hist, ref, w_ar, ih: int):
"""Scan a distribution and compute the p-value associated to every scan window.
The algorithm follows the BumpHunter algorithm. Compute also the significance for the data histogram.
Arguments :
hist :
The data histogram (as obtain with the numpy.histogram2d function).
ref :
The reference (background) histogram (as obtain with the numpy.histogram2d function).
w_ar :
Array containing all the values of width to be tested.
ih :
Indice of the distribution to be scanned.
ih==0 refers to the data distribution and ih>0 refers to the ih-th pseudo-data distribution.
Results stored in inner variables :
res :
Numpy array of python list containing all the p-values of all windows computed durring the scan.
The numpy array as dimention (Nwidth), with Nwidth the number of window's width tested.
Each python list as dimension (Nstep), with Nstep the number of scan step for a given width (different for every value of width).
min_Pval :
Minimum p_value obtained durring the scan (float).
min_loc :
Position of the window corresponding to the minimum p-value ([integer,integer]).
min_width :
Width of the window corresponding to the minimum p-value ([integer,integer]).
"""
# Create the results array
res = np.empty(w_ar.shape[0], dtype=object)
min_Pval, min_loc = np.empty(w_ar.shape[0]), np.empty(
w_ar.shape[0], dtype=object
)
signal_eval = np.empty(w_ar.shape[0])
if self.use_sideband:
ref_total = ref.sum()
hist_total = hist.sum()
# Loop over all the width of the window
i = 0
for w in w_ar:
# Auto-adjust scan step if specified
scan_stepp = [0, 0]
if self.scan_step[0] == "full":
scan_stepp[0] = w
elif self.scan_step[0] == "half":
scan_stepp[0] = max(1, w // 2)
else:
scan_stepp[0] = self.scan_step[0]
if self.scan_step[1] == "full":
scan_stepp[1] = w
elif self.scan_step[1] == "half":
scan_stepp[1] = max(1, w // 2)
else:
scan_stepp[1] = self.scan_step[1]
# Define possition range
posx = np.arange(0, ref.shape[0] - w[0] + 1, scan_stepp[0])
posy = np.arange(0, ref.shape[1] - w[1] + 1, scan_stepp[1])
pos = np.array([[p[0], p[1]] for p in itertools.product(posx, posy)])
# Check that there is at least one interval to check for width w
# If not, we must set dummy values in order to avoid crashes
if pos.size == 0:
res[i] = np.array([1.0])
min_Pval[i] = 1.0
min_loc[i] = [0, 0]
signal_eval[i] = 0
i += 1
continue
# Initialize local p-value array for width w
res[i] = np.ones(pos.shape[0])
# Count events in all windows of width w
# FIXME any better way to do it ?? Without loop ?? FIXME
Nref = np.array(
[ref[p[0] : p[0] + w[0], p[1] : p[1] + w[1]].sum() for p in pos]
)
Nhist = np.array(
[hist[p[0] : p[0] + w[0], p[1] : p[1] + w[1]].sum() for p in pos]
)
# Apply side-band normalization if required
if self.use_sideband == True:
Nref *= (hist_total - Nhist) / (ref_total - Nref)
# Calculate all local p-values for for width w
if self.mode == "excess":
res[i][(Nhist > Nref) & (Nref > 0)] = G(
Nhist[(Nhist > Nref) & (Nref > 0)],
Nref[(Nhist > Nref) & (Nref > 0)],
)
elif self.mode == "deficit":
res[i][Nhist < Nref] = 1.0 - G(
Nhist[Nhist < Nref] + 1, Nref[Nhist < Nref]
)
if self.use_sideband == True:
res[i][
res[i] < 1e-300
] = 1e-300 # prevent issue with very low p-value, sometimes induced by normalisation in the tail
# Get the minimum p-value and associated position for width w
min_Pval[i] = res[i].min()
min_loc[i] = pos[res[i].argmin()]
signal_eval[i] = Nhist[res[i].argmin()] - Nref[res[i].argmin()]
i += 1
# Get the minimum p-value and associated window among all width
min_width = w_ar[min_Pval.argmin()]
min_loc = min_loc[min_Pval.argmin()]
# Evaluate the number of signal event (for data only)
if ih == 0:
self.signal_eval = signal_eval[min_Pval.argmin()]
min_Pval = min_Pval.min()
# Save the results in inner variables and return
self.res_ar[ih] = res
self.min_Pval_ar[ih] = min_Pval
self.min_loc_ar[ih] = [int(min_loc[0]), int(min_loc[1])]
self.min_width_ar[ih] = [int(min_width[0]), int(min_width[1])]
return
## Variable management methods
# Reset method
def reset(self):
"""
Reset all the inner result parameter for this BumpHunter instance.
Use with caution.
"""
self.global_Pval = 0
self.significance = 0
self.res_ar = []
self.min_Pval_ar = []
self.min_loc_ar = []
self.min_width_ar = []
self.t_ar = []
self.signal_eval = 0
self.signal_min = 0
self.signal_ratio = None
self.data_inject = []
return
@deprecated("Use `reset` instead.")
def Reset(self, *args, **kwargs):
return self.reset(*args, **kwargs)
# Export/import parameters/results
def save_state(self):
"""
Save the current state (all parameters and results) of a BupHunter instance into a dict variable.
Ruturns:
state :
The dict containing all the parameters and results of this BumpHunter instance.
The keys of the dict entries correspond the name of their associated parameters/results as defined in the BumpHunter class.
"""
state = dict()
# Save parameters
state["mode"] = self.mode
state["rang"] = self.rang
state["bins"] = self.bins
state["weights"] = self.weights
state["width_min"] = self.width_min
state["width_max"] = self.width_max
state["width_step"] = self.width_step
state["scan_step"] = self.scan_step
state["npe"] = self.npe
state["nworker"] = self.nworker
state["seed"] = self.seed
state["sigma_limit"] = self.sigma_limit
state["str_min"] = self.str_min
state["str_step"] = self.str_step
state["str_scale"] = self.str_scale
state["signal_exp"] = self.signal_exp
state["sig_flip"] = self.flip_sig
state["use_sideband"] = self.use_sideband
# Save results
state["global_Pval"] = self.global_Pval
state["significance"] = self.significance
state["res_ar"] = self.res_ar
state["min_Pval_ar"] = self.min_Pval_ar
state["min_loc_ar"] = self.min_loc_ar
state["min_width_ar"] = self.min_width_ar
state["t_ar"] = self.t_ar
state["signal_eval"] = self.signal_eval
state["signal_min"] = self.signal_min
state["signal_ratio"] = self.signal_ratio
state["data_inject"] = self.data_inject
return state
@deprecated("Use `save_state` instead.")
def SaveState(self, *args, **kwargs):
return self.save_state(*args, **kwargs)
def load_state(self, state: dict):
"""
Load all the parameters and results of a previous BumpHunter intance that were saved using the SaveState method.
Arguments :
state :
A dict containing all the parameters/results of a previous BumpHunter instance.
If a parameter or a result field is missing, it will be set to its default value.
"""
# Load parameters
if "mode" in state.keys():
self.mode = state["mode"]
else:
self.mode = "excess"
if "rang" in state.keys():
self.rang = state["rang"]
else:
self.rang = None
if "bins" in state.keys():
self.bins = state["bins"]
else:
self.bins = [20, 20]
if "weights" in state.keys():
self.rang = state["weights"]
else:
self.rang = None
if "width_min" in state.keys():
self.width_min = state["width_min"]
else:
self.width_min = [1, 1]
if "width_max" in state.keys():
self.width_max = state["width_max"]
else:
self.width_max = None
if "width_step" in state.keys():
self.width_step = state["width_step"]
else:
self.width_step = [1, 1]
if "scan_step" in state.keys():
self.scan_step = state["scan_step"]
else:
self.scan_step = [1, 1]
if "npe" in state.keys():
self.npe = state["npe"]
else:
self.npe = 100
if "nworker" in state.keys():
self.nworker = state["nworker"]
else:
self.nworker = 4
if "seed" in state.keys():
self.seed = state["seed"]
else:
self.seed = None
if "use_sideband" in state.keys():
self.use_sideband = state["use_sideband"]
else:
self.use_sideband = False
if "sigma_limit" in state.keys():
self.sigma_limit = state["sigma_limit"]
else:
self.sigma_limit = 5
if "str_min" in state.keys():
self.str_min = state["str_min"]
else:
self.str_min = 0.5
if "str_step" in state.keys():
self.str_step = state["str_step"]
else:
self.str_step = 0.25
if "str_scale" in state.keys():
self.str_scale = state["str_scale"]
else:
self.str_scale = "lin"
if "signal_exp" in state.keys():
self.signal_exp = state["signal_exp"]
else:
self.signal_exp = None
if "sig_flip" in state.keys():
self.sig_flip = state["sig_flip"]
else:
self.sig_flip = True
# Load results
self.reset()
if "global_Pval" in state.keys():
self.global_Pval = state["global_Pval"]
if "significance" in state.keys():
self.significance = state["significance"]
if "res_ar" in state.keys():
self.res_ar = state["res_ar"]
if "min_Pval_ar" in state.keys():
self.min_Pval_ar = state["min_Pval_ar"]
if "min_loc_ar" in state.keys():
self.min_loc_ar = state["min_loc_ar"]
if "min_width_ar" in state.keys():
self.min_width_ar = state["min_width_ar"]
if "t_ar" in state.keys():
self.t_ar = state["t_ar"]
if "signal_eval" in state.keys():
self.signal_eval = state["signal_eval"]
if "signal_min" in state.keys():
self.signal_min = state["signal_min"]
if "signal_ratio" in state.keys():
self.signal_ratio = state["signal_ratio"]
if "data_inject" in state.keys():
self.data_inject = state["data_inject"]
return
@deprecated("Use `load_state` instead.")
def LoadState(self, *args, **kwargs):
return self.load_state(*args, **kwargs)
## Scan methods
# Method that perform the scan on every pseudo experiment and data (in parrallel threads).
# For each scan, the value of p-value and test statistic t is computed and stored in result array
def bump_scan(self, data, bkg, is_hist: bool=False, do_pseudo: bool=True):
"""
Function that perform the full BumpHunter algorithm presented in https://arxiv.org/pdf/1101.0390.pdf without sidebands.
This includes the generation of pseudo-data, the calculation of the BumpHunter p-value associated to data and to all pseudo experiment as well as the calculation of the test satistic t.
The results are stored in the inner result variables of this BumpHunter instance.
Arguments :
data :
Numpy array containing the data distribution.
This distribution will be transformed into a binned histogram and the algorithm will look for the most significant excess.
bkg :
Numpy array containing the background reference distribution.
This distribution will be transformed into a binned histogram and the algorithm will compare it to data while looking for a bump.
is_hist :
Boolean that specify if the given data and background are already in histogram form.
If true, the data and backgrouns are considered as already 'histogramed'.
Default to False.
do_pseudo :
Boolean specifying if pesudo data should be generated.
If False, then the BumpHunter statistics distribution kept in memory is used to compute the global p-value and significance.
If there is nothing in memory, the global p-value and significance will not be computed.
Default to True.
Result inner variables :
global_Pval :
Global p-value obtained from the test statistic distribution.
res_ar :
Array of containers containing all the p-value calculated durring the scan of the data (indice=0) and of the pseudo-data (indice>0).
For more detail about how the p-values are sorted in the containers, please reffer the the doc of the function _scan_hist.
min_Pval_ar :
Array containing the minimum p-values obtained for the data (indice=0) and and the pseudo-data (indice>0).
min_loc_ar :
Array containing the positions of the windows for which the minimum p-value has been found for the data (indice=0) and pseudo-data (indice>0).
min_width_ar :
Array containing the width of the windows for which the minimum p-value has been found for the data (indice=0) and pseudo-data (indice>0).
signal_eval :
Number of signal events evaluated form the last scan.
"""
# Set the seed if required (or reset it if None)
np.random.seed(self.seed)
# Generate the background and data histograms
print("Generating histograms")
if not is_hist:
bkg_hist, Hbinx, Hbiny = np.histogram2d(
bkg[:, 0],
bkg[:, 1],
bins=self.bins,
weights=self.weights,
range=self.rang,
)
data_hist = np.histogram2d(
data[:, 0], data[:, 1], bins=self.bins, range=self.rang
)[0]
else:
if self.weights is None:
bkg_hist = bkg
else:
bkg_hist = bkg * self.weights
data_hist = data
Hbins = self.bins
# Generate all the pseudo-data histograms
if do_pseudo:
pseudo_hist = bkg_hist.flatten()
pseudo_hist = np.random.poisson(
lam=np.tile(pseudo_hist, (self.npe, 1)).transpose(),
size=(pseudo_hist.size, self.npe),
)
pseudo_hist = np.reshape(
pseudo_hist, (bkg_hist.shape[0], bkg_hist.shape[1], self.npe)
)
# Set width_max if it is given as None
if self.width_max is None:
self.width_max = [data_hist.shape[0] // 2, data_hist.shape[1] // 2]
# Initialize all results containenrs
if do_pseudo:
self.min_Pval_ar = np.empty(self.npe + 1)
self.min_loc_ar = np.empty(self.npe + 1, dtype=object)
self.min_width_ar = np.empty(self.npe + 1, dtype=object)
self.res_ar = np.empty(self.npe + 1, dtype=object)
else:
if self.res_ar == []:
self.min_Pval_ar = np.empty(1)
self.min_loc_ar = np.empty(1, dtype=object)
self.min_width_ar = np.empty(1, dtype=object)
self.res_ar = np.empty(1, dtype=object)
# Auto-adjust the value of width_max and do an array of all width
wx = np.arange(self.width_min[0], self.width_max[0] + 1, self.width_step[0])
wy = | np.arange(self.width_min[1], self.width_max[1] + 1, self.width_step[1]) | numpy.arange |
"""
Nonparametric survival estimators
Author: <NAME> (georgechen [at symbol] cmu.edu)
This file contains the following classes and various helper functions (all of
these implement both Kaplan-Meier and Nelson-Aalen versions):
- BasicSurvival : basic Kaplan-Meier and Nelson-Aalen estimators that do not
account for feature vectors
- KNNSurvival : k-NN survival estimation
- KNNWeightedSurvival: weighted k-NN survival estimation
- KernelSurvival : kernel survival estimation
- RandomSurvivalForest : a heavily modified version of Wrymm's random survival
forest code (the version last updated Feb 28, 2017)
[https://github.com/Wrymm/Random-Survival-Forests]; changes are discussed
below
- RandomSurvivalForestANN : kernel survival estimation where the kernel is
learned using a random survival forest (ANN stands for "adaptive nearest
neighbors"; one can interpret this is either an adaptive kernel method or an
adaptive nearest neighbors method where the neighbors are weighted)
- CDFRegressionKNNWeightedSurvival : implements the "cdf-reg" two-step method
mentioned in the ICML paper:
<NAME>. Nearest Neighbor and Kernel Survival Analysis:
Nonasymptotic Error Bounds and Strong Consistency Rates. ICML 2019.
Random survival forests are by <NAME>, <NAME>, <NAME>, and <NAME>: "Random survival forests" (Annals of Applied
Stats 2008); see also Ishwaran and Kogalur's "Random survival forests for R"
article in Rnews (2007) and their R package "randomForestSRC".
Setup
-----
Be sure to compile the cython code by running:
python setup_random_survival_forest_cython.py build_ext --inplace
* * * * *
Main changes to Wrymm's code (the version last updated Feb 28, 2017):
- the log-rank splitting score denominator calculation appeared to be missing a
Y_i factor (prior to taking the square root); this has been fixed
- the log-rank splitting score code is implemented in cython
- Wrymm's code only splits on medians of feature values rather than optimizing
for the best split; I have added both an exhaustive split option (tries
every split threshold among the observed feature values) and a random split
option (Ishwaran et al suggest in their Annals of Applied Stats paper that
this randomized strategy actually works quite well)
- Wrymm's code has `min_samples_split` refer to what scikit-learn calls
`min_samples_leaf`; I switched the variable name to match that of
scikit-learn and also introduced what scikit-learn calls `min_samples_split`
as a parameter
- many survival probabilities are computed at once for a given feature vector
(i.e., rather than computing the probability of a subject surviving beyond
one choice of time, compute the probabilities of a subject surviving beyond a
collection of different times)
- added code to predict subject-specific cumulative hazard functions
- randomization can now be made deterministic by providing either an integer
random seed or a numpy RandomState instance
- pandas has been removed to speed up the code
- parallelism is now supported both in fitting and prediction
"""
from collections import Counter
import functools
import pickle
import numpy as np
from joblib import Parallel, delayed
from lifelines.utils import concordance_index
from sklearn.neighbors import NearestNeighbors
from random_survival_forest_cython import logrank
class RandomSurvivalForest():
def __init__(self, n_estimators=100, max_features='sqrt', max_depth=None,
min_samples_split=2, min_samples_leaf=1, split='logrank',
split_threshold_mode='exhaustive', random_state=None,
n_jobs=None, oob_score=False, feature_importance=False):
"""
A random survival forest survival probability estimator. This is very
similar to the usual random forest that is used for regression and
classification. However, in a random survival forest, the prediction
task is to estimate the survival probability function for a test
feature vector. Training data can have right-censoring. For details,
see any introductory text on survival analysis.
Parameters
----------
n_estimators : int, optional (default=100)
Number of trees.
max_features : int, string, optional (default='sqrt')
Number of features chosen per tree. Allowable string choices are
'sqrt' (max_features=ceil(sqrt(n_features))) and 'log2'
(max_features=ceil(log2(n_features))).
max_depth : int, optional (default=None)
Maximum depth of each tree. If None, then each tree is grown
until other termination criteria are met (see `min_samples_split`
and `min_samples_leaf` parameters).
min_samples_split : int, optional (default=2)
A node must have at least this many samples to be split.
min_samples_leaf : int, float, optional (default=1)
Both sides of a split must have at least this many samples
(or in the case of a fraction, at least a fraction of samples)
for the split to happen. Otherwise, the node is turned into a
leaf node.
split : string, optional (default='logrank')
Currently only the log-rank splitting criterion is supported.
split_threshold_mode : string, optional (default='exhaustive')
If 'exhaustive', then we compute the split score for every observed
feature value as a possible threshold (this can be very expensive).
If 'median', then for any feature, we always split on the median
value observed for that feature (this is the only supported option
in Wrymm's original random survival analysis code).
If 'random', then for any feature, we randomly choose a split
threshold among the observed feature values (this is recommended by
the random survival forest authors if fast computation is desired).
random_state : int, numpy RandomState instance, None, optional
(default=None)
If an integer, then a new numpy RandomState is created with the
integer as the random seed. If a numpy RandomState instance is
provided, then it is used as the pseudorandom number generator. If
None is specified, then a new numpy RandomState is created without
providing a seed.
n_jobs : int, None, optional (default=None)
Number of cores to use with joblib's Parallel. This is the same
`n_jobs` parameter as for Parallel. Setting `n_jobs` to -1 uses all
the cores.
oob_score : boolean, optional (default=False)
Whether to compute an out-of-bag (OOB) accuracy estimate (as with
the original random survival forest paper, this is done using
c-index with cumulative hazard estimates). The OOB estimate is
computed during model fitting (via fit()), and the resulting
c-index estimate is stored in the attribute `oob_score_`.
feature_importance : boolean, optional (default=False)
Whether to compute feature importances (requires `oob_score` to
be set to True). Feature importances are computed during the
model fitting (via fit()), and the resulting feature importances is
stored in the attribute `feature_importances_`.
"""
self.n_estimators = n_estimators
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_features = max_features
self.split_threshold_mode = split_threshold_mode
self.n_jobs = n_jobs
self.oob_score = oob_score
self.feature_importance = feature_importance
self.column_names = None
self.oob_score_ = None
self.feature_importances_ = None
if random_state is None:
self.random_state = np.random.RandomState()
elif type(random_state) == int:
self.random_state = np.random.RandomState(random_state)
else:
self.random_state = random_state
if split == 'logrank':
self.split_score_function = logrank
else:
raise NotImplementedError('Unsupported split criterion '
+ '"{0}"'.format(split))
def save(self, filename):
data = {'n_estimators': self.n_estimators,
'max_depth': self.max_depth,
'min_samples_split': self.min_samples_split,
'min_samples_leaf': self.min_samples_leaf,
'max_features': self.max_features,
'split_threshold_mode': self.split_threshold_mode,
'n_jobs': self.n_jobs,
'oob_score': self.oob_score,
'feature_importance': self.feature_importance,
'column_names': list(self.column_names),
'oob_score_': self.oob_score_}
if self.feature_importances_ is not None:
data['feature_importances_'] = self.feature_importances_.tolist()
else:
data['feature_importances_'] = None
data['trees'] = \
[_convert_to_not_use_numpy(tree) for tree in self.trees]
data['tree_bootstrap_indices'] = \
[indices.tolist() for indices in self.tree_bootstrap_indices]
with open(filename, 'wb') as f:
pickle.dump(data, f)
@staticmethod
def load(filename):
with open(filename, 'rb') as f:
data = pickle.load(f)
rsf = \
RandomSurvivalForest(n_estimators=data['n_estimators'],
max_features=data['max_features'],
max_depth=data['max_depth'],
min_samples_split=data['min_samples_split'],
min_samples_leaf=data['min_samples_leaf'],
split='logrank',
split_threshold_mode='exhaustive',
random_state=None,
n_jobs=data['n_jobs'],
oob_score=data['oob_score'],
feature_importance=data['feature_importance'])
rsf.column_names = data['column_names']
rsf.oob_score_ = data['oob_score_']
if data['feature_importances_'] is None:
rsf.feature_importances_ = None
else:
rsf.feature_importances_ = np.array(data['feature_importances_'])
rsf.trees = [_convert_to_use_numpy(tree) for tree in data['trees']]
rsf.tree_bootstrap_indices = \
np.array([indices for indices in data['tree_bootstrap_indices']])
for tree in rsf.trees:
_label_leaves(tree)
return rsf
def fit(self, X, y, column_names=None):
"""
Fits the random survival forest to training data.
Parameters
----------
X : 2D numpy array, shape = [n_samples, n_features]
Feature vectors.
y : 2D numpy array, shape = [n_samples, 2]
Survival labels (first column is for observed times, second column
is for event indicators). The i-th row corresponds to the i-th row
in `X`.
column_names : list, None, optional (default=None)
Names for features can be specified. This is only for display
purposes when using the `draw` method. If set to None, then
`column_names` is just set to be a range of integers indexing the
columns from 0.
Returns
-------
None
"""
if column_names is None:
self.column_names = list(range(X.shape[1]))
else:
self.column_names = column_names
assert len(column_names) == X.shape[1]
if type(self.max_features) == str:
if self.max_features == 'sqrt':
max_features = int(np.ceil(np.sqrt(X.shape[1])))
elif self.max_features == 'log2':
max_features = int(np.ceil(np.log2(X.shape[1])))
else:
raise NotImplementedError('Unsupported max features choice '
+ '"{0}"'.format(self.max_features))
else:
max_features = self.max_features
self.tree_bootstrap_indices = []
sort_indices = np.argsort(y[:, 0])
X = X[sort_indices].astype(np.float)
y = y[sort_indices].astype(np.float)
random_state = self.random_state
for tree_idx in range(self.n_estimators):
bootstrap_indices = np.sort(random_state.choice(X.shape[0],
X.shape[0],
replace=True))
self.tree_bootstrap_indices.append(bootstrap_indices)
with Parallel(n_jobs=self.n_jobs) as parallel:
self.trees = \
parallel(
delayed(_build_tree)(
X[self.tree_bootstrap_indices[tree_idx]],
y[self.tree_bootstrap_indices[tree_idx]],
0, self.max_depth, max_features,
self.split_score_function, self.min_samples_split,
self.min_samples_leaf, self.split_threshold_mode,
np.random.RandomState(random_state.randint(4294967296)))
for tree_idx in range(self.n_estimators))
if self.oob_score:
parallel_args = []
oob_masks = []
for tree_idx, bootstrap_indices \
in enumerate(self.tree_bootstrap_indices):
oob_mask = np.ones(X.shape[0], dtype=np.bool)
for idx in bootstrap_indices:
oob_mask[idx] = 0
if oob_mask.sum() > 0:
X_oob = X[oob_mask]
if len(X_oob.shape) == 1:
X_oob = X_oob.reshape(1, -1)
parallel_args.append((tree_idx, X_oob))
oob_masks.append(
(oob_mask,
{original_idx: new_idx
for new_idx, original_idx
in enumerate(np.where(oob_mask)[0])}))
sorted_unique_times = np.unique(y[:, 0])
results = parallel(
delayed(_predict_tree)(
self.trees[tree_idx], 'cum_haz', X_oob,
sorted_unique_times, True)
for (tree_idx, X_oob) in parallel_args)
num_unique_times = len(sorted_unique_times)
cum_hazard_scores = []
oob_y = []
for idx in range(X.shape[0]):
num = 0.
den = 0.
for tree_idx2, (oob_mask, forward_map) \
in enumerate(oob_masks):
if oob_mask[idx]:
num += results[tree_idx2][forward_map[idx]].sum()
den += 1
if den > 0:
cum_hazard_scores.append(num / den)
oob_y.append(y[idx])
cum_hazard_scores = np.array(cum_hazard_scores)
oob_y = np.array(oob_y)
self.oob_score_ = concordance_index(oob_y[:, 0],
-cum_hazard_scores,
oob_y[:, 1])
if self.feature_importance:
self.feature_importances_ = []
for col_idx in range(X.shape[1]):
vimp_results = \
parallel(
delayed(_predict_tree_vimp)(
self.trees[tree_idx], 'cum_haz',
X_oob, sorted_unique_times, True,
col_idx,
np.random.RandomState(
random_state.randint(4294967296)))
for (tree_idx, X_oob)
in parallel_args)
cum_hazard_scores = []
oob_y = []
for idx in range(X.shape[0]):
num = 0.
den = 0.
for tree_idx2, (oob_mask, forward_map) \
in enumerate(oob_masks):
if oob_mask[idx]:
num += vimp_results[tree_idx2][
forward_map[idx]].sum()
den += 1
if den > 0:
cum_hazard_scores.append(num / den)
oob_y.append(y[idx])
if len(cum_hazard_scores) > 0:
cum_hazard_scores = np.array(cum_hazard_scores)
oob_y = np.array(oob_y)
vimp = self.oob_score_ - \
concordance_index(oob_y[:, 0],
-cum_hazard_scores,
oob_y[:, 1])
else:
vimp = np.nan
self.feature_importances_.append(vimp)
self.feature_importances_ \
= np.array(self.feature_importances_)
for tree in self.trees:
_label_leaves(tree)
def predict_leaf_ids(self, X):
results = Parallel(n_jobs=self.n_jobs)(
delayed(_predict_tree_leaf_id)(self.trees[tree_idx], X)
for tree_idx in range(self.n_estimators))
return np.array(results).T
def predict_surv(self, X, times, presorted_times=False,
use_kaplan_meier=True):
"""
Computes the forest's survival probability function estimate for each
feature vector evaluated at user-specified times.
Parameters
----------
X : 2D numpy array, shape = [n_samples, n_features]
Feature vectors.
times : 1D numpy array
Times to compute the survival probability function at.
presorted_times : boolean, optional (default=False)
Flag for whether `times` is already sorted.
use_kaplan_meier : boolean, optional (default=True)
In the original random survival forests paper, only the cumulative
hazard function H(t|x) is predicted from the leafs rather than the
survival function S(t|x). One can back out the survival function
from the cumulative hazard function since S(t|x)=exp(-H(t|x)).
If this flag is set to True, then we have the forest predict S(t|x)
using Kaplan-Meier estimates at the leaves (instead of the
default of predicting H(t|x) with Nelson-Aalen estimates at the
leaves), and average the trees' S(t|x) estimates.
Returns
-------
output : 2D numpy array
Survival probability function evaluated at each of the times
specified in `times` for each feature vector. The i-th row
corresponds to the i-th feature vector.
"""
if use_kaplan_meier:
results = Parallel(n_jobs=self.n_jobs)(
delayed(_predict_tree)(self.trees[tree_idx], 'surv', X, times,
presorted_times)
for tree_idx in range(self.n_estimators))
return functools.reduce(lambda x, y: x + y, results) \
/ self.n_estimators
else:
return np.exp(-self.predict_cum_haz(X, times, presorted_times))
def predict_cum_haz(self, X, times, presorted_times=False,
use_kaplan_meier=False, surv_eps=1e-12):
"""
Computes the forest's cumulative hazard function estimate for each
feature vector evaluated at user-specified times.
Parameters
----------
X : 2D numpy array, shape = [n_samples, n_features]
Feature vectors.
times : 1D numpy array
Times to compute the cumulative hazard function at.
presorted_times : boolean, optional (default=False)
Flag for whether `times` is already sorted.
use_kaplan_meier : boolean, optional (default=False)
In the original random survival forests paper, only the cumulative
hazard function H(t|x) is predicted from the leafs rather than the
survival function S(t|x). One can back out the cumulative hazard
function from the survival function since H(t|x)=-log(S(t|x)).
If this flag is set to True, then we have the forest predict S(t|x)
first using Kaplan-Meier estimates at the leaves (instead of the
default of predicting H(t|x) with Nelson-Aalen estimates at the
leaves), and then we back out an estimate for H(t|x).
surv_eps : float, optional (default=1e-12)
If `use_kaplan_meier` is set to True, then we clip the estimated
survival function so that any value less than `surv_eps` is set to
`surv_eps`. This makes it so that when we take the negative log of
the survival function, we don't take logs of 0.
Returns
-------
output : 2D numpy array
Cumulative hazard function evaluated at each of the times
specified in `times` for each feature vector. The i-th row
corresponds to the i-th feature vector.
"""
if use_kaplan_meier:
surv = self.predict_surv(X, times, presorted_times, True)
return -np.log(np.clip(surv, surv_eps, 1.))
else:
results = Parallel(n_jobs=self.n_jobs)(
delayed(_predict_tree)(self.trees[tree_idx], 'cum_haz', X, times,
presorted_times)
for tree_idx in range(self.n_estimators))
return functools.reduce(lambda x, y: x + y, results) \
/ self.n_estimators
def _print_with_depth(self, string, depth):
"""
Auxiliary function to print a string with indentation dependent on
depth.
"""
print("{0}{1}".format(" " * depth, string))
def _print_tree(self, tree, current_depth=0):
"""
Auxiliary function to print a survival tree.
"""
if 'surv' in tree:
self._print_with_depth(tree['times'], current_depth)
return
self._print_with_depth(
"{0} > {1}".format(self.column_names[tree['feature']],
tree['threshold']),
current_depth)
self._print_tree(tree['left'], current_depth + 1)
self._print_tree(tree['right'], current_depth + 1)
def draw(self):
"""
Prints out each tree of the random survival forest.
"""
for tree_idx, tree in enumerate(self.trees):
print("==========================================\nTree",
tree_idx)
self._print_tree(tree)
class BasicSurvival():
def __init__(self):
self.tree = None
def fit(self, y):
self.tree = _fit_leaf(y)
def predict_surv(self, times, presorted_times=False,
limit_from_left=False):
"""
Computes the Kaplan-Meier survival probability function estimate at
user-specified times.
Parameters
----------
times : 1D numpy array (default=None)
Times to compute the survival probability function at.
presorted_times : boolean, optional (default=False)
Flag for whether `times` is already sorted.
limit_from_left : boolean, optional (default=False)
Flag for whether to output the function evaluated at a time just to
the left, i.e., instead of outputting f(t) where f is the survival
probability function estimate, output:
f(t-) := limit as t' approaches t from the left of f(t').
Returns
-------
output : 1D numpy array
Survival probability function evaluated at each of the times
specified in `times`.
"""
return _predict_leaf(self.tree, 'surv', times, presorted_times,
limit_from_left)
def predict_cum_haz(self, times, presorted_times=False,
limit_from_left=False):
"""
Computes the Nelson-Aalen cumulative hazard function estimate at
user-specified times.
Parameters
----------
times : 1D numpy array
Times to compute the cumulative hazard function at.
presorted_times : boolean, optional (default=False)
Flag for whether `times` is already sorted.
limit_from_left : boolean, optional (default=False)
Flag for whether to output the function evaluated at a time just to
the left, i.e., instead of outputting f(t) where f is the
cumulative hazard function estimate, output:
f(t-) := limit as t' approaches t from the left of f(t').
Returns
-------
output : 1D numpy array
Cumulative hazard function evaluated at each of the times
specified in `times`.
"""
return _predict_leaf(self.tree, 'cum_haz', times, presorted_times,
limit_from_left)
class KNNSurvival():
def __init__(self, *args, **kwargs):
"""
Arguments are the same as for `sklearn.neighbors.NearestNeighbors`.
The simplest usage of this class is to use a single argument, which is
`n_neighbors` for the number of nearest neighbors (Euclidean distance
is assumed in this case). If you want to parallelize across different
search queries, use the `n_jobs` keyword parameter (-1 to use all
cores). To use other distances and for other details, please refer to
the documentation for sklearn's `NearestNeighbors` class.
*Important:* The prediction methods for this class use unweighted
k-nearest neighbors, where "k" is set equal to the `n_neighbors`
parameter.
"""
self.NN_index_args = args
self.NN_index_kwargs = kwargs
self.NN_index = None
def fit(self, X, y):
"""
Constructs a nearest-neighbor index given training data (so that for
a future data point, we can use the nearest-neighbor index to quickly
find what the closest training data are to the future point).
Parameters
----------
X : 2D numpy array, shape = [n_samples, n_features]
Feature vectors.
y : 2D numpy array, shape = [n_samples, 2]
Survival labels (first column is for observed times, second column
is for event indicators). The i-th row corresponds to the i-th row
in `X`.
Returns
-------
None
"""
self.train_y = y
self.NN_index = NearestNeighbors(*self.NN_index_args,
**self.NN_index_kwargs)
self.NN_index.fit(X)
def predict_surv(self, X, times, presorted_times=False,
limit_from_left=False, n_neighbors=None):
"""
Computes the k-NN Kaplan-Meier survival probability function estimate
at user-specified times.
*Important:* The default number of nearest neighbors to use is whatever
was specified in `args` or `kwargs` when creating an instance of this
class (the "k" in k-NN)!
Parameters
----------
X : 2D numpy array, shape = [n_samples, n_features]
Feature vectors.
times : 1D numpy array
Times to compute the survival probability function at.
presorted_times : boolean, optional (default=False)
Flag for whether `times` is already sorted.
limit_from_left : boolean, optional (default=False)
Flag for whether to output the function evaluated at a time just to
the left, i.e., instead of outputting f(t) where f is the survival
probability function estimate, output:
f(t-) := limit as t' approaches t from the left of f(t').
n_neighbors : int, None, optional (default=None)
Number of nearest neighbors to use. If set to None then the number
used is whatever was passed into `args` or `kwargs` when creating
an instance of this class.
Returns
-------
output : 2D numpy array
Survival probability function evaluated at each of the times
specified in `times` for each feature vector. The i-th row
corresponds to the i-th feature vector.
"""
indices = self.NN_index.kneighbors(X, n_neighbors=n_neighbors,
return_distance=False)
train_y = self.train_y
return np.array([_predict_leaf(_fit_leaf(train_y[idx]), 'surv', times,
presorted_times, limit_from_left)
for idx in indices])
def predict_cum_haz(self, X, times, presorted_times=False,
limit_from_left=False, n_neighbors=None):
"""
Computes the k-NN Nelson-Aalen cumulative hazard function estimate at
user-specified times.
*Important:* The default number of nearest neighbors to use is whatever
was specified in `args` or `kwargs` when creating an instance of this
class (the "k" in k-NN)!
Parameters
----------
X : 2D numpy array, shape = [n_samples, n_features]
Feature vectors.
times : 1D numpy array
Times to compute the cumulative hazard function at.
presorted_times : boolean, optional (default=False)
Flag for whether `times` is already sorted.
limit_from_left : boolean, optional (default=False)
Flag for whether to output the function evaluated at a time just to
the left, i.e., instead of outputting f(t) where f is the
cumulative hazard function estimate, output:
f(t-) := limit as t' approaches t from the left of f(t').
n_neighbors : int, None, optional (default=None)
Number of nearest neighbors to use. If set to None then the number
used is whatever was passed into `args` or `kwargs` when creating
an instance of this class.
Returns
-------
output : 2D numpy array
Cumulative hazard function evaluated at each of the times
specified in `times` for each feature vector. The i-th row
corresponds to the i-th feature vector.
"""
indices = self.NN_index.kneighbors(X, n_neighbors=n_neighbors,
return_distance=False)
train_y = self.train_y
return np.array([_predict_leaf(_fit_leaf(train_y[idx]), 'cum_haz',
times, presorted_times, limit_from_left)
for idx in indices])
class KNNWeightedSurvival():
def __init__(self, *args, **kwargs):
"""
Arguments are the same as for `sklearn.neighbors.NearestNeighbors`.
The simplest usage of this class is to use a single argument, which is
`n_neighbors` for the number of nearest neighbors (Euclidean distance
is assumed in this case). If you want to parallelize across different
search queries, use the `n_jobs` keyword parameter (-1 to use all
cores). To use other distances and for other details, please refer to
the documentation for sklearn's `NearestNeighbors` class.
*Important:* The prediction methods for this class use weighted
k-nearest neighbors, where "k" is set equal to the `n_neighbors`
parameter. The weights are specified through a kernel function K. In
particular, the i-th nearest neighbor X_i for a test point x is given a
weight of:
K( (distance between x and X_i) / (distance between x and X_k) ).
"""
self.NN_index_args = args
self.NN_index_kwargs = kwargs
self.NN_index = None
def fit(self, X, y):
"""
Constructs a nearest-neighbor index given training data (so that for
a future data point, we can use the nearest-neighbor index to quickly
find what the closest training data are to the future point).
Parameters
----------
X : 2D numpy array, shape = [n_samples, n_features]
Feature vectors.
y : 2D numpy array, shape = [n_samples, 2]
Survival labels (first column is for observed times, second column
is for event indicators). The i-th row corresponds to the i-th row
in `X`.
Returns
-------
None
"""
self.train_y = y
self.NN_index = NearestNeighbors(*self.NN_index_args,
**self.NN_index_kwargs)
self.NN_index.fit(X)
def predict_surv(self, X, times, presorted_times=False,
limit_from_left=False, n_neighbors=None,
kernel_function=None):
"""
Computes the weighted k-NN Kaplan-Meier survival probability function
estimate at user-specified times.
*Important:* The default number of nearest neighbors to use is whatever
was specified in `args` or `kwargs` when creating an instance of this
class (the "k" in k-NN)!
Parameters
----------
X : 2D numpy array, shape = [n_samples, n_features]
Feature vectors.
times : 1D numpy array
Times to compute the survival probability function at.
presorted_times : boolean, optional (default=False)
Flag for whether `times` is already sorted.
limit_from_left : boolean, optional (default=False)
Flag for whether to output the function evaluated at a time just to
the left, i.e., instead of outputting f(t) where f is the survival
probability function estimate, output:
f(t-) := limit as t' approaches t from the left of f(t').
n_neighbors : int, None, optional (default=None)
Number of nearest neighbors to use. If set to None then the number
used is whatever was passed into `args` or `kwargs` when creating
an instance of this class.
kernel_function : function, None, optional (default=None)
Kernel function to use. None corresponds to unweighted k-NN
survival analysis. If a function is specified, then the weighting
function used is of the form
"kernel(distance / distance to k-th nearest neighbor)".
Returns
-------
output : 2D numpy array
Survival probability function evaluated at each of the times
specified in `times` for each feature vector. The i-th row
corresponds to the i-th feature vector.
"""
if kernel_function is None:
kernel_function = lambda s: 1
dists, indices = self.NN_index.kneighbors(X, n_neighbors=n_neighbors,
return_distance=True)
train_y = self.train_y
output = []
n_times = len(times)
for dist, idx in zip(dists, indices):
max_dist = np.max(dist)
weights = np.array([kernel_function(d / max_dist) for d in dist])
zero_weight = (weights == 0)
if zero_weight.sum() > 0:
weights_subset = weights[~zero_weight]
if weights_subset.size > 0:
output.append(
_predict_leaf(
_fit_leaf_weighted(train_y[idx[~zero_weight]],
weights_subset),
'surv', times, presorted_times, limit_from_left))
else:
output.append(np.ones(n_times))
else:
output.append(
_predict_leaf(
_fit_leaf_weighted(train_y[idx],
weights),
'surv', times, presorted_times, limit_from_left))
return np.array(output)
def predict_cum_haz(self, X, times, presorted_times=False,
limit_from_left=False, n_neighbors=None,
kernel_function=None):
"""
Computes the weighted k-NN Nelson-Aalen cumulative hazard function
estimate at user-specified times.
*Important:* The default number of nearest neighbors to use is whatever
was specified in `args` or `kwargs` when creating an instance of this
class (the "k" in k-NN)!
Parameters
----------
X : 2D numpy array, shape = [n_samples, n_features]
Feature vectors.
times : 1D numpy array
Times to compute the cumulative hazard function at.
presorted_times : boolean, optional (default=False)
Flag for whether `times` is already sorted.
limit_from_left : boolean, optional (default=False)
Flag for whether to output the function evaluated at a time just to
the left, i.e., instead of outputting f(t) where f is the
cumulative hazard function estimate, output:
f(t-) := limit as t' approaches t from the left of f(t').
n_neighbors : int, None, optional (default=None)
Number of nearest neighbors to use. If set to None then the number
used is whatever was passed into `args` or `kwargs` when creating
an instance of this class.
kernel_function : function, None, optional (default=None)
Kernel function to use. None corresponds to unweighted k-NN
survival analysis. If a function is specified, then the weighting
function used is of the form
"kernel(distance / distance to k-th nearest neighbor)".
Returns
-------
output : 2D numpy array
Cumulative hazard function evaluated at each of the times
specified in `times` for each feature vector. The i-th row
corresponds to the i-th feature vector.
"""
if kernel_function is None:
kernel_function = lambda s: 1
dists, indices = self.NN_index.kneighbors(X, n_neighbors=n_neighbors,
return_distance=True)
train_y = self.train_y
output = []
n_times = len(times)
for dist, idx in zip(dists, indices):
max_dist = np.max(dist)
weights = np.array([kernel_function(d / max_dist) for d in dist])
zero_weight = (weights == 0)
if zero_weight.sum() > 0:
weights_subset = weights[~zero_weight]
if weights_subset.size > 0:
output.append(
_predict_leaf(
_fit_leaf_weighted(train_y[idx[~zero_weight]],
weights_subset),
'cum_haz', times, presorted_times, limit_from_left))
else:
output.append(np.zeros(n_times))
else:
output.append(
_predict_leaf(
_fit_leaf_weighted(train_y[idx],
weights),
'cum_haz', times, presorted_times, limit_from_left))
return np.array(output)
class KernelSurvival():
def __init__(self, *args, **kwargs):
"""
Arguments are the same as for `sklearn.neighbors.NearestNeighbors`.
The simplest usage of this class is to use a single argument, which is
`radius` for fixed-radius near-neighbor search (Euclidean distance is
assumed in this case). Put another way, any training data point farther
than `radius` away from a test point is assumed to contribute 0 weight
toward prediction for the test point. If you want to parallelize across
different search queries, use the `n_jobs` keyword parameter (-1 to use
all cores). To use other distances and for other details, please refer
to the documentation for sklearn's `NearestNeighbors` class.
"""
self.NN_index_args = args
self.NN_index_kwargs = kwargs
self.NN_index = None
def fit(self, X, y):
"""
Constructs a nearest-neighbor index given training data (so that for
a future data point, we can use the nearest-neighbor index to quickly
find what the closest training data are to the future point).
Parameters
----------
X : 2D numpy array, shape = [n_samples, n_features]
Feature vectors.
y : 2D numpy array, shape = [n_samples, 2]
Survival labels (first column is for observed times, second column
is for event indicators). The i-th row corresponds to the i-th row
in `X`.
Returns
-------
None
"""
self.train_y = y
self.NN_index = NearestNeighbors(*self.NN_index_args,
**self.NN_index_kwargs)
self.NN_index.fit(X)
def predict_surv(self, X, times, presorted_times=False,
limit_from_left=False, radius=None,
kernel_function=None):
"""
Computes the kernel Kaplan-Meier survival probability function estimate
at user-specified times.
*Important:* The default radius to use is whatever was specified in
`args` or `kwargs` when creating an instance of this class!
Parameters
----------
X : 2D numpy array, shape = [n_samples, n_features]
Feature vectors.
times : 1D numpy array
Times to compute the survival probability function at.
presorted_times : boolean, optional (default=False)
Flag for whether `times` is already sorted.
limit_from_left : boolean, optional (default=False)
Flag for whether to output the function evaluated at a time just to
the left, i.e., instead of outputting f(t) where f is the survival
probability function estimate, output:
f(t-) := limit as t' approaches t from the left of f(t').
radius : float, None, optional (default=None)
Neighbors farther than this distance from a test point have kernel
weight 0.
kernel_function : function, None, optional (default=None)
Kernel function to use. None corresponds to fixed-radius near
neighbors kernel survival analysis (i.e., a box kernel that
becomes 0 after `radius` distance away). If a function is
specified, then the weighting function used is of the form
"kernel(distance / radius)".
Returns
-------
output : 2D numpy array
Survival probability function evaluated at each of the times
specified in `times` for each feature vector. The i-th row
corresponds to the i-th feature vector.
"""
if radius is None:
radius = self.NN_index.radius
if kernel_function is None:
kernel_function = lambda s: 1 # box kernel (i.e., uniform weights)
dists, indices = self.NN_index.radius_neighbors(X, radius=radius,
return_distance=True)
train_y = self.train_y
output = []
n_times = len(times)
for dist, idx in zip(dists, indices):
if dist.size > 0:
weights = np.array([kernel_function(d / radius) for d in dist])
zero_weight = (weights == 0)
if zero_weight.sum() > 0:
weights_subset = weights[~zero_weight]
if weights_subset.size > 0:
output.append(
_predict_leaf(
_fit_leaf_weighted(train_y[idx[~zero_weight]],
weights_subset),
'surv', times, presorted_times,
limit_from_left))
else:
output.append(np.ones(n_times))
else:
output.append(
_predict_leaf(
_fit_leaf_weighted(train_y[idx],
weights),
'surv', times, presorted_times, limit_from_left))
else:
output.append(np.ones(n_times))
return np.array(output)
def predict_cum_haz(self, X, times, presorted_times=False,
limit_from_left=False, radius=None,
kernel_function=None):
"""
Computes the kernel Nelson-Aalen cumulative hazard function estimate at
user-specified times.
*Important:* The default radius to use is whatever was specified in
`args` or `kwargs` when creating an instance of this class!
Parameters
----------
X : 2D numpy array, shape = [n_samples, n_features]
Feature vectors.
times : 1D numpy array
Times to compute the cumulative hazard function at.
presorted_times : boolean, optional (default=False)
Flag for whether `times` is already sorted.
limit_from_left : boolean, optional (default=False)
Flag for whether to output the function evaluated at a time just to
the left, i.e., instead of outputting f(t) where f is the
cumulative hazard function estimate, output:
f(t-) := limit as t' approaches t from the left of f(t').
radius : float, None, optional (default=None)
Neighbors farther than this distance from a test point have kernel
weight 0.
kernel_function : function, None, optional (default=None)
Kernel function to use. None corresponds to fixed-radius near
neighbors kernel survival analysis (i.e., a box kernel that
becomes 0 after `radius` distance away). If a function is
specified, then the weighting function used is of the form
"kernel(distance / radius)".
Returns
-------
output : 2D numpy array
Cumulative hazard function evaluated at each of the times specified
in `times` for each feature vector. The i-th row corresponds to the
i-th feature vector.
"""
if radius is None:
radius = self.NN_index.radius
if kernel_function is None:
kernel_function = lambda s: 1 # box kernel (i.e., uniform weights)
dists, indices = self.NN_index.radius_neighbors(X, radius=radius,
return_distance=True)
train_y = self.train_y
output = []
n_times = len(times)
for dist, idx in zip(dists, indices):
if dist.size > 0:
weights = np.array([kernel_function(d / radius) for d in dist])
zero_weight = (weights == 0)
if zero_weight.sum() > 0:
weights_subset = weights[~zero_weight]
if weights_subset.size > 0:
output.append(
_predict_leaf(
_fit_leaf_weighted(train_y[idx[~zero_weight]],
weights_subset),
'cum_haz', times, presorted_times,
limit_from_left))
else:
output.append(np.zeros(n_times))
else:
output.append(
_predict_leaf(
_fit_leaf_weighted(train_y[idx],
weights),
'cum_haz', times, presorted_times,
limit_from_left))
else:
output.append(np.zeros(n_times))
return np.array(output)
class RandomSurvivalForestANN():
def __init__(self, n_estimators=100, max_features='sqrt', max_depth=None,
min_samples_split=2, min_samples_leaf=1, split='logrank',
split_threshold_mode='exhaustive', random_state=None,
n_jobs=None):
"""
A modified version of the random survival forest survival probability
estimator. From a theoretical standpoint, tree construction works the
same way so each survival tree is associated with the same training
subjects as regular random survival forests. However, what needs to be
stored at each leaf is different in that instead of computing survival
probability or cumulative hazard function estimates per tree, we
instead use the learned tree only for identifying the adaptive nearest
neighbors (which have weights!). These weighted nearest neighbors
found per test point are then used to make a survival probability or
cumulative hazard function estimate using kernel variants of the
Kaplan-Meier and Nelson-Aalen estimators.
Parameters
----------
n_estimators : int, optional (default=100)
Number of trees.
max_features : int, string, optional (default='sqrt')
Number of features chosen per tree. Allowable string choices are
'sqrt' (max_features=ceil(sqrt(n_features))) and 'log2'
(max_features=ceil(log2(n_features))).
max_depth : int, optional (default=None)
Maximum depth of each tree. If None, then each tree is grown
until other termination criteria are met (see `min_samples_split`
and `min_samples_leaf` parameters).
min_samples_split : int, optional (default=2)
A node must have at least this many samples to be split.
min_samples_leaf : int, float, optional (default=1)
Both sides of a split must have at least this many samples
(or in the case of a fraction, at least a fraction of samples)
for the split to happen. Otherwise, the node is turned into a
leaf node.
split : string, optional (default='logrank')
Currently only the log-rank splitting criterion is supported.
split_threshold_mode : string, optional (default='exhaustive')
If 'exhaustive', then we compute the split score for every observed
feature value as a possible threshold (this can be very expensive).
If 'median', then for any feature, we always split on the median
value observed for that feature (this is the only supported option
in Wrymm's original random survival analysis code).
If 'random', then for any feature, we randomly choose a split
threshold among the observed feature values (this is recommended by
the random survival forest authors if fast computation is desired).
random_state : int, numpy RandomState instance, None, optional
(default=None)
If an integer, then a new numpy RandomState is created with the
integer as the random seed. If a numpy RandomState instance is
provided, then it is used as the pseudorandom number generator. If
None is specified, then a new numpy RandomState is created without
providing a seed.
n_jobs : int, None, optional (default=None)
Number of cores to use with joblib's Parallel. This is the same
`n_jobs` parameter as for Parallel. Setting `n_jobs` to -1 uses all
the cores.
"""
self.n_estimators = n_estimators
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_features = max_features
self.split_threshold_mode = split_threshold_mode
self.n_jobs = n_jobs
self.column_names = None
if random_state is None:
self.random_state = np.random.RandomState()
elif type(random_state) == int:
self.random_state = np.random.RandomState(random_state)
else:
self.random_state = random_state
if split == 'logrank':
self.split_score_function = logrank
else:
raise NotImplementedError('Unsupported split criterion '
+ '"{0}"'.format(split))
def save(self, filename):
data = {'n_estimators': self.n_estimators,
'max_depth': self.max_depth,
'min_samples_split': self.min_samples_split,
'min_samples_leaf': self.min_samples_leaf,
'max_features': self.max_features,
'split_threshold_mode': self.split_threshold_mode,
'n_jobs': self.n_jobs,
'oob_score': self.oob_score,
'feature_importance': self.feature_importance,
'column_names': list(self.column_names),
'oob_score_': self.oob_score_}
if self.feature_importances_ is not None:
data['feature_importances_'] = self.feature_importances_.tolist()
else:
data['feature_importances_'] = None
data['trees'] = \
[_convert_to_not_use_numpy(tree) for tree in self.trees]
data['tree_bootstrap_indices'] = \
[indices.tolist() for indices in self.tree_bootstrap_indices]
with open(filename, 'wb') as f:
pickle.dump(data, f)
@staticmethod
def load(filename):
with open(filename, 'rb') as f:
data = pickle.load(f)
rsf = \
RandomSurvivalForest(n_estimators=data['n_estimators'],
max_features=data['max_features'],
max_depth=data['max_depth'],
min_samples_split=data['min_samples_split'],
min_samples_leaf=data['min_samples_leaf'],
split='logrank',
split_threshold_mode='exhaustive',
random_state=None,
n_jobs=data['n_jobs'],
oob_score=data['oob_score'],
feature_importance=data['feature_importance'])
rsf.column_names = data['column_names']
rsf.oob_score_ = data['oob_score_']
if data['feature_importances_'] is None:
rsf.feature_importances_ = None
else:
rsf.feature_importances_ = np.array(data['feature_importances_'])
rsf.trees = [_convert_to_use_numpy(tree) for tree in data['trees']]
rsf.tree_bootstrap_indices = \
np.array([indices for indices in data['tree_bootstrap_indices']])
return rsf
def fit(self, X, y, column_names=None):
"""
Fits the random survival forest to training data.
Parameters
----------
X : 2D numpy array, shape = [n_samples, n_features]
Feature vectors.
y : 2D numpy array, shape = [n_samples, 2]
Survival labels (first column is for observed times, second column
is for event indicators). The i-th row corresponds to the i-th row
in `X`.
column_names : list, None, optional (default=None)
Names for features can be specified. This is only for display
purposes when using the `draw` method. If set to None, then
`column_names` is just set to be a range of integers indexing the
columns from 0.
Returns
-------
None
"""
if column_names is None:
self.column_names = list(range(X.shape[1]))
else:
self.column_names = column_names
assert len(column_names) == X.shape[1]
if type(self.max_features) == str:
if self.max_features == 'sqrt':
max_features = int(np.ceil(np.sqrt(X.shape[1])))
elif self.max_features == 'log2':
max_features = int(np.ceil(np.log2(X.shape[1])))
else:
raise NotImplementedError('Unsupported max features choice '
+ '"{0}"'.format(self.max_features))
else:
max_features = self.max_features
self.tree_bootstrap_indices = []
sort_indices = np.argsort(y[:, 0])
X = X[sort_indices].astype(np.float)
y = y[sort_indices].astype(np.float)
self.train_y = y
random_state = self.random_state
for tree_idx in range(self.n_estimators):
bootstrap_indices = np.sort(random_state.choice(X.shape[0],
X.shape[0],
replace=True))
self.tree_bootstrap_indices.append(bootstrap_indices)
with Parallel(n_jobs=self.n_jobs) as parallel:
self.trees = \
parallel(
delayed(_build_tree_ANN)(
X[self.tree_bootstrap_indices[tree_idx]],
y[self.tree_bootstrap_indices[tree_idx]],
self.tree_bootstrap_indices[tree_idx],
0, self.max_depth, max_features,
self.split_score_function, self.min_samples_split,
self.min_samples_leaf, self.split_threshold_mode,
np.random.RandomState(random_state.randint(4294967296)))
for tree_idx in range(self.n_estimators))
def predict_surv(self, X, times, presorted_times=False,
use_kaplan_meier=True):
"""
Computes the forest's survival probability function estimate for each
feature vector evaluated at user-specified times.
Parameters
----------
X : 2D numpy array, shape = [n_samples, n_features]
Feature vectors.
times : 1D numpy array
Times to compute the survival probability function at.
presorted_times : boolean, optional (default=False)
Flag for whether `times` is already sorted.
use_kaplan_meier : boolean, optional (default=False)
If this flag is set to True, then we have the forest predict S(t|x)
using a conditional Kaplan-Meier estimator. Otherwise, we have the
forest predict H(t|x) using a conditional Nelson-Aalen estimator
and then back out an estimate of S(t|x) via S(t|x)=exp(-H(t|x)).
Returns
-------
output : 2D numpy array
Survival probability function evaluated at each of the times
specified in `times` for each feature vector. The i-th row
corresponds to the i-th feature vector.
"""
if use_kaplan_meier:
# step 1: find adaptive nearest neighbors
results = Parallel(n_jobs=self.n_jobs)(
delayed(_compute_tree_ANN)(self.trees[tree_idx], X)
for tree_idx in range(self.n_estimators))
# step 2: aggregate adaptive nearest neighbors
output = []
y = self.train_y
for i in range(len(X)):
histogram = Counter()
total = 0
for t in range(self.n_estimators):
for j in results[t][i]:
histogram[j] += 1
total += len(results[t][i])
nearest_neighbors = sorted(histogram.keys())
weights = [histogram[j] / total for j in nearest_neighbors]
output.append(
_predict_leaf(
_fit_leaf_weighted(y[np.array(nearest_neighbors,
dtype=np.int)],
np.array(weights)),
'surv', times, presorted_times))
return np.array(output)
else:
return np.exp(-self.predict_cum_haz(X, times, presorted_times))
def predict_cum_haz(self, X, times, presorted_times=False,
use_kaplan_meier=False, surv_eps=1e-12):
"""
Computes the forest's cumulative hazard function estimate for each
feature vector evaluated at user-specified times.
Parameters
----------
X : 2D numpy array, shape = [n_samples, n_features]
Feature vectors.
times : 1D numpy array
Times to compute the cumulative hazard function at.
presorted_times : boolean, optional (default=False)
Flag for whether `times` is already sorted.
use_kaplan_meier : boolean, optional (default=False)
If this flag is set to True, then we have the forest predict S(t|x)
first using a conditional Kaplan-Meier estimate and then back out
an estimate of H(t|x) via H(t|x)=-log(S(t|x)).
surv_eps : float, optional (default=1e-12)
If `use_kaplan_meier` is set to True, then we clip the estimated
survival function so that any value less than `surv_eps` is set to
`surv_eps`. This makes it so that when we take the negative log of
the survival function, we don't take logs of 0.
Returns
-------
output : 2D numpy array
Cumulative hazard function evaluated at each of the times
specified in `times` for each feature vector. The i-th row
corresponds to the i-th feature vector.
"""
if use_kaplan_meier:
surv = self.predict_surv(X, times, presorted_times, True)
return -np.log(np.clip(surv, surv_eps, 1.))
else:
# step 1: find adaptive nearest neighbors
results = Parallel(n_jobs=self.n_jobs)(
delayed(_compute_tree_ANN)(self.trees[tree_idx], X)
for tree_idx in range(self.n_estimators))
# step 2: aggregate adaptive nearest neighbors
output = []
y = self.train_y
for i in range(len(X)):
histogram = Counter()
total = 0
for t in range(self.n_estimators):
for j in results[t][i]:
histogram[j] += 1
total += len(results[t][i])
nearest_neighbors = sorted(histogram.keys())
weights = [histogram[j] / total for j in nearest_neighbors]
output.append(
_predict_leaf(
_fit_leaf_weighted(y[np.array(nearest_neighbors,
dtype=np.int)],
np.array(weights)),
'cum_haz', times, presorted_times))
return np.array(output)
def _print_with_depth(self, string, depth):
"""
Auxiliary function to print a string with indentation dependent on
depth.
"""
print("{0}{1}".format(" " * depth, string))
def _print_tree(self, tree, current_depth=0):
"""
Auxiliary function to print a survival tree.
"""
if 'surv' in tree:
self._print_with_depth(tree['times'], current_depth)
return
self._print_with_depth(
"{0} > {1}".format(self.column_names[tree['feature']],
tree['threshold']),
current_depth)
self._print_tree(tree['left'], current_depth + 1)
self._print_tree(tree['right'], current_depth + 1)
def draw(self):
"""
Prints out each tree of the random survival forest.
"""
for tree_idx, tree in enumerate(self.trees):
print("==========================================\nTree",
tree_idx)
self._print_tree(tree)
def _find_best_feature_split(X, y, max_features, split_score_function,
min_samples_split, min_samples_leaf,
split_threshold_mode, random_state):
"""
Finds the best single feature to split on and the split threshold to use.
Parameters
----------
X : 2D numpy array, shape = [n_samples, n_features]
Feature vectors.
y : 2D numpy array, shape = [n_samples, 2]
Survival labels (first column is for observed times, second column is
for event indicators). The i-th row corresponds to the i-th row in `X`.
max_features : int
Number of randomly chosen features that we find a split for.
split_score_function : function
Function that computes a split score. Look at `logrank` for an example.
min_samples_split : int
See documentation for RandomSurvivalForest's `__init__` function.
min_samples_leaf : int, float
See documentation for RandomSurvivalForest's `__init__` function.
split_threshold_mode : string
See documentation for RandomSurvivalForest's `__init__` function.
random_state : numpy RandomState instance
Pseudorandom number generator.
*Warning*: for this function, `random_state` actually does have to be a
numpy RandomState instance. This is for computational efficiency
reasons as to not keep having to sanity check the input.
Returns
-------
None, or (feature column index as integer, split threshold as float, mask
for which data go into the left branch)
"""
num_features = X.shape[1]
if max_features >= num_features:
candidate_features = list(range(num_features))
else:
candidate_features = list(random_state.choice(num_features,
max_features,
replace=False))
num_candidate_features = len(candidate_features)
X_slice = X[:, candidate_features].copy()
drop_features = []
keep_feature_mask = np.ones(num_candidate_features, dtype=np.bool)
for idx in range(num_candidate_features):
nan_mask = np.isnan(X_slice[:, idx])
num_nans = nan_mask.sum()
if num_nans > 0:
not_nan_mask = ~nan_mask
if np.any(not_nan_mask):
# impute
X_slice[nan_mask, idx] = \
random_state.choice(X_slice[not_nan_mask, idx],
num_nans)
else:
drop_features.append(idx)
keep_feature_mask[idx] = 0
num_drop_features = len(drop_features)
num_candidate_features -= num_drop_features
if num_candidate_features == 0:
return None
if num_drop_features > 0:
X_slice = X_slice[:, keep_feature_mask]
for idx in drop_features[::-1]:
del candidate_features[idx]
if split_threshold_mode == 'exhaustive':
score_arg_pairs \
= [(split_score_function(X_slice[:, idx], y, split_threshold,
min_samples_split, min_samples_leaf),
(col_idx,
split_threshold,
X_slice[:, idx] <= split_threshold))
for idx, col_idx in enumerate(candidate_features)
for split_threshold in np.sort(np.unique(X_slice[:, idx]))]
argmax = np.argmax([score for score, arg in score_arg_pairs])
best_score, best_arg = score_arg_pairs[argmax]
if best_score == 0:
return None
else:
return best_arg
elif split_threshold_mode == 'median':
max_score = -np.inf
best_arg = None
for idx, col_idx in enumerate(candidate_features):
split_threshold = np.median(X_slice[:, idx])
score = split_score_function(X_slice[:, idx], y, split_threshold,
min_samples_split, min_samples_leaf)
if score > max_score:
max_score = score
best_arg = (col_idx, split_threshold,
X_slice[:, idx] <= split_threshold)
if max_score == 0:
return None
else:
return best_arg
elif split_threshold_mode == 'random':
max_score = -np.inf
best_arg = None
for idx, col_idx in enumerate(candidate_features):
split_threshold = random_state.choice(X_slice[:, idx])
score = split_score_function(X_slice[:, idx], y, split_threshold,
min_samples_split, min_samples_leaf)
if score > max_score:
max_score = score
best_arg = (col_idx, split_threshold,
X_slice[:, idx] <= split_threshold)
if max_score == 0:
return None
else:
return best_arg
else:
raise NotImplementedError('Unsupported split threshold strategy '
+ '"{0}"'.format(split_threshold_mode))
def _fit_leaf(y):
"""
Computes leaf node information given survival labels (observed times and
event indicators).
Parameters
----------
y : 2D numpy array, shape=[n_samples, 2]
The two columns correspond to observed times and event indicators.
Returns
-------
tree : dictionary
The leaf node information stored as a dictionary. Specifically, the
key-value pairs of this dictionary are as follows:
- 'times': stores the sorted unique observed times
- 'event_counts': in the same order as `times`, the number of events
at each unique observed time
- 'at_risk_counts': in the same order as `times`, the number of
subjects at risk at each unique observed time
- 'surv': in the same order as `times`, the Kaplan-Meier survival
probability estimate at each unique observed time
- 'cum_haz': in the same order as `times`, the Nelson-Aalen cumulative
hazard estimate at each unique observed time
"""
if len(y.shape) == 1:
y = y.reshape(1, -1)
sorted_unique_observed_times = | np.unique(y[:, 0]) | numpy.unique |
#
#
# 0=================================0
# | Project Name |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Implements: Support Vector Machine
#
# ----------------------------------------------------------------------------------------------------------------------
#
# YUWEI CAO - 2020/11/13 13:05 PM
#
#
# ----------------------------------------
# Import packages and constant
# ----------------------------------------
import os
import h5py
import numpy as np
from glob import glob
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
# use seaborn plotting defaults
import seaborn as sns; sns.set()
def ResizeDataset(path, percentage, n_classes, shuffle):
if path == 'cache/modelnet40/features/':
original_name = ['train0.h5', 'train1.h5', 'train2.h5',
'train3.h5', 'train4.h5']
else:
original_name = ['train0.h5', 'train1.h5', 'train2.h5',
'train3.h5', 'train4.h5', 'train5.h5', 'train6.h5', 'train7.h5']
for h5_name in original_name:
ori_name = os.path.join(path, h5_name)
out_file_name= ori_name + "_" + str(percentage)+ "_resized.h5"
if os.path.exists(out_file_name):
os.remove(out_file_name)
fw = h5py.File(out_file_name, 'w', libver='latest')
dset = fw.create_dataset("data", (1,1024,),maxshape=(None,1024), dtype='<f4')
dset_l = fw.create_dataset("label",(1,),maxshape=(None,),dtype='uint8')
fw.swmr_mode = True
f = h5py.File(ori_name)
data = f['data'][:]
cls_label = f['label'][:]
#data shuffle
if shuffle:
idx = np.arange(len(cls_label))
np.random.shuffle(idx)
data,cls_label = data[idx, ...], cls_label[idx]
class_dist= np.zeros(n_classes)
for c in range(len(data)):
class_dist[cls_label[c]]+=1
log_string('Ori data to size of :', np.sum(class_dist))
log_string ('class distribution of this dataset :',class_dist)
class_dist_new= (percentage*class_dist/100).astype(int)
for i in range(n_classes):
if class_dist_new[i]<1:
class_dist_new[i]=1
class_dist_count=np.zeros(n_classes)
data_count=0
for c in range(len(data)):
label_c=cls_label[c]
if(class_dist_count[label_c] < class_dist_new[label_c]):
class_dist_count[label_c]+=1
new_shape = (data_count+1,1024,)
dset.resize(new_shape)
dset_l.resize((data_count+1,))
dset[data_count,:] = data[c]
dset_l[data_count] = cls_label[c]
dset.flush()
dset_l.flush()
data_count+=1
log_string('Finished resizing data to size of :', np.sum(class_dist_new))
log_string ('class distribution of resized dataset :',class_dist_new)
fw.close
# Read in the list of categories in MODELNET40
def get_category_names():
shape_names_file = os.path.join('modelnet40_ply_hdf5_2048', 'shape_names.txt')
shape_names = [line.rstrip() for line in open(shape_names_file)]
return shape_names
class SVM(object):
def __init__(self, feature_dir, percent=100):
self.feature_dir = feature_dir
self.test_path = glob(os.path.join(self.feature_dir, 'test*.h5'))
if(percent<100):
ResizeDataset(path = self.feature_dir, percentage=percent, n_classes=40, shuffle=True)
self.train_path = glob(os.path.join(self.feature_dir, 'train*%s_resized.h5'%percent))
else:
self.train_path = glob(os.path.join(self.feature_dir, 'train*.h5'))
log_string(str(self.train_path))
log_string("Loading feature dataset...")
train_data = []
train_label = []
for path in self.train_path:
log_string("Loading path: " + str(path))
f = h5py.File(path, 'r+')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
train_data.append(data)
train_label.append(label)
self.train_data = np.concatenate(train_data, axis=0)
self.train_label = | np.concatenate(train_label, axis=0) | numpy.concatenate |
# Author: <NAME>
import numpy as np
import pyvista as pv
from pyvistaqt import BackgroundPlotter
#from pyrateoptics import raytracer
from pyrateoptics.raytracer.localcoordinates import LocalCoordinates
from pyrateoptics.raytracer.aperture import BaseAperture, create_aperture
from pyrateoptics.raytracer.globalconstants import standard_wavelength, canonical_ex, canonical_ey
# add method to class "RayBundle"
from pyrateoptics.raytracer.ray import RayBundle
def draw3d(self, plotter, color="blue", plane_normal=canonical_ex,
up=canonical_ey, **kwargs):
# normalizing plane_normal, up direction
plane_normal = plane_normal / np.linalg.norm(plane_normal)
up = up / np.linalg.norm(up)
ez = np.cross(plane_normal, up)
(num_points, num_dims, num_rays) = np.shape(self.x)
if num_rays == 0:
return
# arrange num_ray copies of simple vectors in appropriate form
plane_normal = np.repeat(plane_normal[:, np.newaxis], num_rays, axis=1)
ez = np.repeat(ez[:, np.newaxis], num_rays, axis=1)
up = np.repeat(up[:, np.newaxis], num_rays, axis=1)
ptlist = [self.x[i] for i in np.arange(num_points)]
validity = [self.valid[i] for i in np.arange(num_points)]
#just for debugging
#print("len(ptlist)", len(ptlist))
#print("ptlist", ptlist[0].shape, ptlist[1].shape)
#print("ptlist", ptlist[0][:,0], ptlist[1][:,0])
#print("a", ptlist[1:][0].shape, ptlist[:-1][0].shape)
#print("ptlist", ptlist[1:], ptlist[:-1])
Line_LL = []
for (pt1, pt2, todraw) in zip(ptlist[1:], ptlist[:-1], validity[1:]):
# perform in-plane projection
pt1inplane = pt1 - np.sum(pt1 * plane_normal, axis=0) * plane_normal
pt2inplane = pt2 - np.sum(pt2 * plane_normal, axis=0) * plane_normal
# calculate y-components
ypt1 = np.sum(pt1inplane * up, axis=0)
ypt2 = np.sum(pt2inplane * up, axis=0)
# calculate z-components
zpt1 = np.sum(pt1inplane * ez, axis=0)
zpt2 = np.sum(pt2inplane * ez, axis=0)
y = np.vstack((ypt1, ypt2))[:, todraw]
z = np.vstack((zpt1, zpt2))[:, todraw]
#ax.plot(z, y, color=color, **kwargs)
#print("pt-shape", pt1.shape, pt2.shape)
#print("pt", pt1[:,0], pt2[:,0])
Line_L = []
for i in range(0, len(pt1[0])):
line = pv.Line(pointa=np.array(pt1[:,i].real),pointb=np.array(pt2[:,i].real))
Line_L.append(line)
if i == 0:
Mesh = Line_L[0]
Mesh += Line_L[i]
Line_L = np.array(Line_L)
# for i in range(0, len(pt1[0])):
# plotter.add_mesh(Line_L[i], color=color, opacity = 0.75)
plotter.add_mesh(Mesh, color=color, opacity = 0.75)
RayBundle.draw3d = draw3d
from pyrateoptics.raytracer.ray import RayPath
def draw3d(self, plotter, color="blue",
plane_normal=canonical_ex, up=canonical_ey,
do_not_draw_raybundles=[], **kwargs):
"""
Draw raybundles.
"""
# TODO: exclude different raybundles from drawing
"""
print(self.raybundles)
xdraw_list = []
kdraw_list = []
edraw_list = []
valid_list = []
for (ind, r) in enumerate(self.raybundles):
if ind not in do_not_draw_raybundles:
(numpts, numdims, numrays) = r.x.shape
xdraw_list += [r.x[i] for i in np.arange(numpts)]
kdraw_list += [r.k[i] for i in np.arange(numpts)]
edraw_list += [r.Efield[i] for i in np.arange(numpts)]
valid_list += [r.valid[i] for i in np.arange(numpts)]
# r.draw2d(ax, color=color,
# plane_normal=plane_normal, up=up, **kwargs)
# ugly construction to perform a nice drawing of the raybundle
r_draw = RayBundle(x0=xdraw_list[0],
k0=kdraw_list[0],
Efield0=edraw_list[0])
r_draw.x = np.array(xdraw_list)
r_draw.k = np.array(kdraw_list)
r_draw.Efield = np.array(edraw_list)
r_draw.valid = np.array(valid_list)
r_draw.draw2d(ax, color=color,
plane_normal=plane_normal, up=up, **kwargs)
"""
for r in self.raybundles:
r.draw3d(plotter, color=color, plane_normal=plane_normal,
up=up, **kwargs)
RayPath.draw3d = draw3d
# add a method draw3D to the class Surface
from pyrateoptics.raytracer.surface import Surface
def draw3d(self, plotter, vertices=50,
inyzplane=True,
color="white",
style="points", style_swapped_lines=False, c = np.array([0,0,255]), **kwargs):
"""
:param plotter (plotter object)
:param vertices (int), vertices in xy for aperture sampling
:param inyzplane (bool), cuts globalpts in yz plane before projection
on plane_normal
:param color (string), "red", "blue", "grey", "green", ...
:param plane_normal (1D numpy array of float), new x projection axis
:param up (1D numpy array), invariant y axis, z = x x y
:param style (string), "points", "meander"
"""
sizelimit = 1000.0
failsafevalue = 11.0
if self.aperture is None:
effsemidia = failsafevalue
# TODO: choose max ray height of all bundles instead
# (cosmetic but absolutely necessary for beauty)
else:
if self.aperture.get_typical_dimension() <= sizelimit:
# TODO: aperture types Object and Image to distuingish
# from very large normal apertures
effsemidia = self.aperture.get_typical_dimension()
else:
effsemidia = failsafevalue
xl = effsemidia * np.linspace(-1, 1, num=vertices)
yl = effsemidia * np.linspace(-1, 1, num=vertices)
X, Y = np.meshgrid(xl, yl)
if style_swapped_lines:
X[::2, :] = X[::2, ::-1]
x = X.flatten()
y = Y.flatten()
isinap = self.aperture.are_points_in_aperture(x, y)
xinap = x[isinap]
yinap = y[isinap]
zinap = np.zeros_like(xinap)
localpts_aperture = np.row_stack((xinap, yinap, zinap))
localpts_shape =\
self.shape.lc.returnOtherToActualPoints(localpts_aperture,
self.aperture.lc)
xinap_shape = localpts_shape[0, :]
yinap_shape = localpts_shape[1, :]
zinap_shape = self.shape.getSag(xinap_shape, yinap_shape)
localpts_shape = | np.row_stack((xinap_shape, yinap_shape, zinap_shape)) | numpy.row_stack |
import numpy as np
import scipy.signal as sp
import matplotlib.pyplot as plt
def one_sided_fft(t, x):
full_amplitude_spectrum = np.abs(np.fft.fft(x))/x.size
full_freqs = np.fft.fftfreq(x.size, np.mean(np.ediff1d(t)))
oneinds = np.where(full_freqs >= 0.0)
one_sided_freqs = full_freqs[oneinds]
one_sided_amplitude_spectrum = 2*full_amplitude_spectrum[oneinds]
return one_sided_freqs, one_sided_amplitude_spectrum
def power_spectrum(t, x):
onef, oneamps = one_sided_fft(t, x)
return onef, oneamps**2
def lomb_scargle_pspec(t, x):
tstep = np.mean(np.ediff1d(t))
freqs = np.fft.fftfreq(x.size, tstep)
idxx = np.argsort(freqs)
one_sided_freqs = freqs[idxx]
one_sided_freqs = one_sided_freqs[one_sided_freqs > 0]
# KLUDGE TO KEEP PERIODOGRAM FROM CRASHING
one_sided_freqs = one_sided_freqs+0.00001 * \
| np.random.random(one_sided_freqs.size) | numpy.random.random |
#!/usr/bin/env python
# coding: utf-8
# In[5]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from libsvm.svmutil import *
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from timeit import default_timer as timer
#Reading files
data_points_train = pd.read_csv('2019MT60763.csv', header = None, nrows = 3000)
data = np.array((data_points_train.sort_values(data_points_train.columns[25])).values)
dp = np.array(data)
class_label = dp[:,25]
# counting no of occurence of labels of each class
unique, counts = np.unique(class_label, return_counts=True)
dict(zip(unique, counts))
#print(counts)
# for 25 features
# FOR CLASSES {0,1}
text_x = dp[:631,:25]
text_t = dp[:631,25]
# for cross_validation
tp_x_1 = np.append(dp[:100,:25],dp[306:406,:25],axis=0)
tp_t_1 = np.append(dp[:100,25],dp[306:406,25],axis=0)
tp_x_2 = np.append(dp[101:201,:25],dp[407:507,:25],axis=0)
tp_t_2 = np.append(dp[101:201,25],dp[407:507,25],axis=0)
tp_x_3 = np.append(dp[202:305,:25],dp[508:631,:25],axis=0)
tp_t_3 = np.append(dp[202:305,25],dp[508:631,25],axis=0)
PIPE = Pipeline([('scaler', StandardScaler()), ('SVM', svm.SVC(kernel='linear'))])
parameters = {'SVM__C':np.logspace(0, 1, 10)}
G = GridSearchCV(PIPE, param_grid=parameters, cv=5)
G.fit(text_x, text_t)
print ('Training score',G.score(text_x, text_t))
print (G.best_params_)
G.fit(tp_x_1,tp_t_1)
x = G.score(tp_x_2, tp_t_2)
x+=G.score(tp_x_3, tp_t_3)
G.fit(tp_x_2,tp_t_2)
x+=G.score(tp_x_3, tp_t_3)
x+=G.score(tp_x_1, tp_t_1)
G.fit(tp_x_3,tp_t_3)
x+=G.score(tp_x_2, tp_t_2)
x+=G.score(tp_x_1, tp_t_1)
print('Cross_validation score',x/6)
print(((svm.SVC(kernel = 'linear', C = 1)).fit(text_x,text_t)).support_)
fig = plt.figure(1)
c = np.logspace(0, 1, 10)
matrix = np.zeros((10,3))
for i in range (10):
svc = svm.SVC(kernel='linear',C = c[i])
svc.fit(text_x, text_t)
matrix[i][0] = i
matrix[i][1] = svc.score(text_x, text_t)
svc.fit(tp_x_1,tp_t_1)
x1 = svc.score(tp_x_2, tp_t_2)
x1+=svc.score(tp_x_3, tp_t_3)
svc.fit(tp_x_2,tp_t_2)
x1+=svc.score(tp_x_3, tp_t_3)
x1+=svc.score(tp_x_1, tp_t_1)
svc.fit(tp_x_3,tp_t_3)
x1+=svc.score(tp_x_2, tp_t_2)
x1+=svc.score(tp_x_1, tp_t_1)
matrix[i][2] = x1/6
plt.plot(matrix[:,0:1],matrix[:,1:2],label = 'cross_validation score')
plt.plot(matrix[:,0:1],matrix[:,2:3],label = 'Training score')
plt.title('C vs Accuracy')
plt.xlabel('C')
plt.ylabel('Accuracy')
plt.xscale('log')
plt.legend()
plt.show()
PIPE = Pipeline([('scaler', StandardScaler()), ('SVM', svm.SVC(kernel='rbf'))])
parameters = {'SVM__C':np.logspace(0, 1, 10), 'SVM__gamma':np.logspace(0, 1, 10)}
G = GridSearchCV(PIPE, param_grid=parameters, cv=5)
G.fit(text_x, text_t)
print ('Training score',G.score(text_x, text_t))
print (G.best_params_)
G.fit(tp_x_1,tp_t_1)
y = G.score(tp_x_2, tp_t_2)
y+=G.score(tp_x_3, tp_t_3)
G.fit(tp_x_2,tp_t_2)
y+=G.score(tp_x_3, tp_t_3)
y+=G.score(tp_x_1, tp_t_1)
G.fit(tp_x_3,tp_t_3)
y+=G.score(tp_x_2, tp_t_2)
y+=G.score(tp_x_1, tp_t_1)
print('Cross_validation score',y/6)
print(((svm.SVC(kernel = 'rbf', C = 1.29,gamma = 1)).fit(text_x,text_t)).support_)
puto = np.zeros((100,1))
luto = np.zeros((100,1))
c = np.logspace(0, 1, 10)
g = np.logspace(0, 1, 10)
for i in range (10):
for j in range(10):
svc = svm.SVC(kernel='rbf',C = c[i],gamma = g[j])
svc.fit(text_x, text_t)
puto[10*i+j][0] = svc.score(text_x, text_t)
svc.fit(tp_x_1,tp_t_1)
y1 = svc.score(tp_x_2, tp_t_2)
y1+=svc.score(tp_x_3, tp_t_3)
svc.fit(tp_x_2,tp_t_2)
y1+=svc.score(tp_x_3, tp_t_3)
y1+=svc.score(tp_x_1, tp_t_1)
svc.fit(tp_x_3,tp_t_3)
y1+=svc.score(tp_x_2, tp_t_2)
y1+=svc.score(tp_x_1, tp_t_1)
luto[10*i+j][0] = y1/6
g, c = np.meshgrid(g, c)
graph = np.ravel(puto)
patrix = np.ravel(luto)
patrix = patrix.reshape(c.shape)
fig, p = plt.subplots()
k = p.contourf(c, g, patrix)
cbar = fig.colorbar(k)
plt.title('Accuracy v/s C and gamma (cross-validation)')
plt.xlabel('C')
plt.ylabel('gamma')
plt.xscale('log')
plt.yscale('log')
plt.show()
graph = graph.reshape(c.shape)
fig, p = plt.subplots()
k = p.contourf(c, g, graph)
cbar = fig.colorbar(k)
plt.title('Accuracy v/s C and gamma (training)')
plt.xlabel('C')
plt.ylabel('gamma')
plt.xscale('log')
plt.yscale('log')
plt.show()
start = timer()
PIPE = Pipeline([('scaler', StandardScaler()), ('SVM', svm.SVC(kernel='poly'))])
parameters = {'SVM__C':np.logspace(0, 1, 10), 'SVM__gamma':np.logspace(0, 1, 10),'SVM__degree':[1,5]}
G = GridSearchCV(PIPE, param_grid=parameters, cv=5)
G.fit(text_x, text_t)
print ('Training score',G.score(text_x, text_t))
print (G.best_params_)
G.fit(tp_x_1,tp_t_1)
z = G.score(tp_x_2, tp_t_2)
z+=G.score(tp_x_3, tp_t_3)
G.fit(tp_x_2,tp_t_2)
z+=G.score(tp_x_3, tp_t_3)
z+=G.score(tp_x_1, tp_t_1)
G.fit(tp_x_3,tp_t_3)
z+=G.score(tp_x_2, tp_t_2)
z+=G.score(tp_x_1, tp_t_1)
print('Cross_validation score',z/6)
end = timer()
print('TIME',end - start)
print(((svm.SVC(kernel = 'poly', C = 1,gamma = 1,degree = 1)).fit(text_x,text_t)).support_)
suto = np.zeros((100,1))
nuto = np.zeros((100,1))
c = np.logspace(0, 1, 10)
g = np.logspace(0, 1, 10)
for i in range (10):
for j in range(10):
svc = svm.SVC(kernel='poly',C = c[i],gamma = g[j],degree = 1)
svc.fit(text_x, text_t)
suto[10*i+j][0] = svc.score(text_x, text_t)
svc.fit(tp_x_1,tp_t_1)
z1 = svc.score(tp_x_2, tp_t_2)
z1+=svc.score(tp_x_3, tp_t_3)
svc.fit(tp_x_2,tp_t_2)
z1+=svc.score(tp_x_3, tp_t_3)
z1+=svc.score(tp_x_1, tp_t_1)
svc.fit(tp_x_3,tp_t_3)
z1+=svc.score(tp_x_2, tp_t_2)
z1+=svc.score(tp_x_1, tp_t_1)
nuto[10*i+j][0] = z1/6
g, c = np.meshgrid(g, c)
trix = np.ravel(suto)
prix = np.ravel(nuto)
prix = prix.reshape(c.shape)
fig, p = plt.subplots()
k = p.contourf(c, g, prix)
cbar = fig.colorbar(k)
plt.xlabel('C')
plt.ylabel('gamma')
plt.title('Contour plot for Accuracy v/s C and gamma (cross-validation)')
plt.xscale('log')
plt.yscale('log')
plt.show()
# training
trix = trix.reshape(c.shape)
fig, p = plt.subplots()
k = p.contourf(c, g, trix)
cbar = fig.colorbar(k)
plt.xlabel('C')
plt.ylabel('gamma')
plt.title('Contour plot for Accuracy v/s C and gamma (training)')
plt.xscale('log')
plt.yscale('log')
plt.show()
PIPE = Pipeline([('scaler', StandardScaler()), ('SVM', svm.SVC(kernel='sigmoid'))])
parameters = {'SVM__C':np.logspace(0, 1, 10), 'SVM__gamma':np.logspace(0, 1, 10)}
G = GridSearchCV(PIPE, param_grid=parameters, cv=5)
G.fit(text_x, text_t)
print ('Training score',G.score(text_x, text_t))
print (G.best_params_)
G.fit(tp_x_1,tp_t_1)
f = G.score(tp_x_2, tp_t_2)
f+=G.score(tp_x_3, tp_t_3)
G.fit(tp_x_2,tp_t_2)
f+=G.score(tp_x_3, tp_t_3)
f+=G.score(tp_x_1, tp_t_1)
G.fit(tp_x_3,tp_t_3)
f+=G.score(tp_x_2, tp_t_2)
f+=G.score(tp_x_1, tp_t_1)
print('Cross_validation score',f/6)
print(((svm.SVC(kernel = 'sigmoid', C = 10,gamma = 1)).fit(text_x,text_t)).support_)
jito = np.zeros((100,1))
kito = np.zeros((100,1))
c = np.logspace(0, 1, 10)
g = np.logspace(0, 1, 10)
for i in range (10):
for j in range(10):
svc = svm.SVC(kernel='sigmoid',C = c[i],gamma = g[j])
svc.fit(text_x, text_t)
jito[10*i+j][0] = svc.score(text_x, text_t)
svc.fit(tp_x_1,tp_t_1)
f1 = svc.score(tp_x_2, tp_t_2)
f1+=svc.score(tp_x_3, tp_t_3)
svc.fit(tp_x_2,tp_t_2)
f1+=svc.score(tp_x_3, tp_t_3)
f1+=svc.score(tp_x_1, tp_t_1)
svc.fit(tp_x_3,tp_t_3)
f1+=svc.score(tp_x_2, tp_t_2)
f1+=svc.score(tp_x_1, tp_t_1)
kito[10*i+j][0] = f1/6
g, c = np.meshgrid(g, c)
tatrix = np.ravel(jito)
katrix = np.ravel(kito)
katrix = katrix.reshape(c.shape)
fig, p = plt.subplots()
k = p.contourf(c, g, katrix)
cbar = fig.colorbar(k)
plt.title('Accuracy v/s C and gamma (cross-validation)')
plt.xlabel('C')
plt.ylabel('gamma')
plt.xscale('log')
plt.yscale('log')
plt.show()
tatrix = tatrix.reshape(c.shape)
fig, p = plt.subplots()
k = p.contourf(c, g, tatrix)
cbar = fig.colorbar(k)
plt.title('Accuracy v/s C and gamma (training)')
plt.xlabel('C')
plt.ylabel('gamma')
plt.xscale('log')
plt.yscale('log')
plt.show()
# In[5]:
# FOR CLASSES {2,3}
text_x_2 = (dp[632:1230,:25])
text_t_2 = (dp[632:1230,25])
# for cross_validation
tp_x_1 = np.append(dp[632:732,:25],dp[943:1043,:25],axis=0)
tp_t_1 = np.append(dp[632:732,25],dp[943:1043,25],axis=0)
tp_x_2 = np.append(dp[732:832,:25],dp[1043:1143,:25],axis=0)
tp_t_2 = np.append(dp[732:832,25],dp[1043:1143,25],axis=0)
tp_x_3 = np.append(dp[832:942,:25],dp[1143:1230,:25],axis=0)
tp_t_3 = np.append(dp[832:942,25],dp[1143:1230,25],axis=0)
PIPE = Pipeline([('scaler', StandardScaler()), ('SVM', svm.SVC(kernel='linear'))])
parameters = {'SVM__C':np.logspace(0, 1, 10)}
G = GridSearchCV(PIPE, param_grid=parameters, cv=5)
G.fit(text_x_2, text_t_2)
print ('Training score',G.score(text_x_2, text_t_2))
print (G.best_params_)
G.fit(tp_x_1,tp_t_1)
l1 = G.score(tp_x_2, tp_t_2)
l1+=G.score(tp_x_3, tp_t_3)
G.fit(tp_x_2,tp_t_2)
l1+=G.score(tp_x_3, tp_t_3)
l1+=G.score(tp_x_1, tp_t_1)
G.fit(tp_x_3,tp_t_3)
l1+=G.score(tp_x_2, tp_t_2)
l1+=G.score(tp_x_1, tp_t_1)
print('Cross_validation score',l1/6)
print(((svm.SVC(kernel = 'linear', C = 7.74)).fit(text_x_2,text_t_2)).support_)
fig = plt.figure(2)
c = np.logspace(0, 1, 10)
matrix = np.zeros((10,3))
for i in range (10):
svc = svm.SVC(kernel='linear',C = c[i])
svc.fit(text_x_2, text_t_2)
matrix[i][0] = i
matrix[i][1] = svc.score(text_x_2, text_t_2)
svc.fit(tp_x_1,tp_t_1)
l2 = svc.score(tp_x_2, tp_t_2)
l2+=svc.score(tp_x_3, tp_t_3)
svc.fit(tp_x_2,tp_t_2)
l2+=svc.score(tp_x_3, tp_t_3)
l2+=svc.score(tp_x_1, tp_t_1)
svc.fit(tp_x_3,tp_t_3)
l2+=svc.score(tp_x_2, tp_t_2)
l2+=svc.score(tp_x_1, tp_t_1)
matrix[i][2] = l2/6
plt.plot(matrix[:,0:1],matrix[:,1:2],label = 'cross_validation score')
plt.plot(matrix[:,0:1],matrix[:,2:3],label = 'Training score')
plt.title('C vs Accuracy')
plt.xlabel('C')
plt.ylabel('Accuracy')
plt.xscale('log')
plt.legend()
plt.show()
PIPE = Pipeline([('scaler', StandardScaler()), ('SVM', svm.SVC(kernel='rbf'))])
parameters = {'SVM__C':np.logspace(0, 1, 10), 'SVM__gamma':np.logspace(0, 1, 10)}
G = GridSearchCV(PIPE, param_grid=parameters, cv=5)
G.fit(text_x_2, text_t_2)
print ('Training score',G.score(text_x_2, text_t_2))
print (G.best_params_)
G.fit(tp_x_1,tp_t_1)
l3 = G.score(tp_x_2, tp_t_2)
l3+=G.score(tp_x_3, tp_t_3)
G.fit(tp_x_2,tp_t_2)
l3+=G.score(tp_x_3, tp_t_3)
l3+=G.score(tp_x_1, tp_t_1)
G.fit(tp_x_3,tp_t_3)
l3+=G.score(tp_x_2, tp_t_2)
l3+=G.score(tp_x_1, tp_t_1)
print('Cross_validation score',l3/6)
print(((svm.SVC(kernel = 'rbf', C = 1.29,gamma =1)).fit(text_x_2,text_t_2)).support_)
puto = np.zeros((100,1))
luto = np.zeros((100,1))
c = np.logspace(0, 1, 10)
g = np.logspace(0, 1, 10)
for i in range (10):
for j in range(10):
svc = svm.SVC(kernel='rbf',C = c[i],gamma = g[j])
svc.fit(text_x_2, text_t_2)
puto[10*i+j][0] = svc.score(text_x_2, text_t_2)
svc.fit(tp_x_1,tp_t_1)
l4 = svc.score(tp_x_2, tp_t_2)
l4+=svc.score(tp_x_3, tp_t_3)
svc.fit(tp_x_2,tp_t_2)
l4+=svc.score(tp_x_3, tp_t_3)
l4+=svc.score(tp_x_1, tp_t_1)
svc.fit(tp_x_3,tp_t_3)
l4+=svc.score(tp_x_2, tp_t_2)
l4+=svc.score(tp_x_1, tp_t_1)
luto[10*i+j][0] = l4/6
g, c = np.meshgrid(g, c)
graph = np.ravel(puto)
patrix = np.ravel(luto)
patrix = patrix.reshape(c.shape)
fig, p = plt.subplots()
k = p.contourf(c, g, patrix)
cbar = fig.colorbar(k)
plt.title('Accuracy v/s C and gamma (cross-validation)')
plt.xlabel('C')
plt.ylabel('gamma')
plt.xscale('log')
plt.yscale('log')
plt.show()
graph = graph.reshape(c.shape)
fig, p = plt.subplots()
k = p.contourf(c, g, graph)
cbar = fig.colorbar(k)
plt.title('Accuracy v/s C and gamma (training)')
plt.xlabel('C')
plt.ylabel('gamma')
plt.xscale('log')
plt.yscale('log')
plt.show()
start1 = timer()
PIPE = Pipeline([('scaler', StandardScaler()), ('SVM', svm.SVC(kernel='poly'))])
parameters = {'SVM__C':np.logspace(0, 1, 10), 'SVM__gamma':np.logspace(0, 1, 10),'SVM__degree':[1,5]}
G = GridSearchCV(PIPE, param_grid=parameters, cv=5)
G.fit(text_x_2, text_t_2)
print ('Training score',G.score(text_x_2, text_t_2))
print (G.best_params_)
G.fit(tp_x_1,tp_t_1)
l5 = G.score(tp_x_2, tp_t_2)
l5+=G.score(tp_x_3, tp_t_3)
G.fit(tp_x_2,tp_t_2)
l5+=G.score(tp_x_3, tp_t_3)
l5+=G.score(tp_x_1, tp_t_1)
G.fit(tp_x_3,tp_t_3)
l5+=G.score(tp_x_2, tp_t_2)
l5+=G.score(tp_x_1, tp_t_1)
print('Cross_validation score',l5/6)
end1 = timer()
print('TIME',end1 - start1)
print(((svm.SVC(kernel = 'poly', C = 1,gamma =1 ,degree=5)).fit(text_x_2,text_t_2)).support_)
suto = np.zeros((100,1))
nuto = np.zeros((100,1))
c = np.logspace(0, 1, 10)
g = np.logspace(0, 1, 10)
for i in range (10):
for j in range(10):
svc = svm.SVC(kernel='poly',C = c[i],gamma = g[j],degree = 5)
svc.fit(text_x_2, text_t_2)
suto[10*i+j][0] = svc.score(text_x_2, text_t_2)
svc.fit(tp_x_1,tp_t_1)
l6 = svc.score(tp_x_2, tp_t_2)
l6+=svc.score(tp_x_3, tp_t_3)
svc.fit(tp_x_2,tp_t_2)
l6+=svc.score(tp_x_3, tp_t_3)
l6+=svc.score(tp_x_1, tp_t_1)
svc.fit(tp_x_3,tp_t_3)
l6+=svc.score(tp_x_2, tp_t_2)
l6+=svc.score(tp_x_1, tp_t_1)
nuto[10*i+j][0] = l6/6
g, c = np.meshgrid(g, c)
trix = np.ravel(suto)
prix = np.ravel(nuto)
prix = prix.reshape(c.shape)
fig, p = plt.subplots()
k = p.contourf(c, g, prix)
cbar = fig.colorbar(k)
plt.title('Accuracy v/s C and gamma (cross-validation)')
plt.xlabel('C')
plt.ylabel('gamma')
plt.xscale('log')
plt.yscale('log')
plt.show()
trix = trix.reshape(c.shape)
fig, p = plt.subplots()
k = p.contourf(c, g, trix)
cbar = fig.colorbar(k)
plt.title('Accuracy v/s C and gamma (training)')
plt.xlabel('C')
plt.ylabel('gamma')
plt.xscale('log')
plt.yscale('log')
plt.show()
PIPE = Pipeline([('scaler', StandardScaler()), ('SVM', svm.SVC(kernel='sigmoid'))])
parameters = {'SVM__C': | np.logspace(0, 1, 10) | numpy.logspace |
import pandas as pd
import numpy as np
from scipy.interpolate import griddata
from scipy import ndimage
from typing import List, Tuple, Dict, Optional
from sklearn.neighbors import NearestNeighbors
from .data_helper import low_high_quantile
from matplotlib import pyplot as plt
from matplotlib import patches, patheffects
from mpl_toolkits.axes_grid1 import make_axes_locatable
from collections import OrderedDict
import statsmodels.api as sm
from numpy import ma
from matplotlib import cbook
from matplotlib.colors import Normalize
from matplotlib.colors import LinearSegmentedColormap
#colormap from SHAP packakge
red_blue = LinearSegmentedColormap('red_blue', { # #1E88E5 -> #ff0052
'red': ((0.0, 30./255, 30./255),
(1.0, 255./255, 255./255)),
'green': ((0.0, 136./255, 136./255),
(1.0, 13./255, 13./255)),
'blue': ((0.0, 229./255, 229./255),
(1.0, 87./255, 87./255)),
'alpha': ((0.0, 1, 1),
(0.5, 0.3, 0.3),
(1.0, 1, 1))
})
blue_green = LinearSegmentedColormap('blue_green', { # #1E88E5 -> #ff0052
'green': ((0.0, 30./255, 30./255),
(1.0, 255./255, 255./255)),
'red': ((0.0, 50./255, 50./255),
(1.0, 10./255, 10./255)),
'blue': ((0.0, 229./255, 229./255),
(1.0, 87./255, 87./255)),
'alpha': ((0.0, 1, 1),
(0.5, 0.3, 0.3),
(1.0, 1, 1))
})
blue_green_solid = LinearSegmentedColormap('blue_green_solid', { # #1E88E5 -> #ff0052
'green': ((0.0, 30./255, 30./255),
(1.0, 255./255, 255./255)),
'red': ((0.0, 50./255, 50./255),
(1.0, 10./255, 10./255)),
'blue': ((0.0, 229./255, 229./255),
(1.0, 87./255, 87./255)),
'alpha': ((0.0, 1, 1),
(0.5, 1, 1),
(1.0, 1, 1))
})
# setting midpoint for colorbar
# https://stackoverflow.com/questions/7404116/defining-the-midpoint-of-a-colormap-in-matplotlib
class MidPointNorm(Normalize):
def __init__(self, midpoint=0, vmin=None, vmax=None, clip=False):
Normalize.__init__(self,vmin, vmax, clip)
self.midpoint = midpoint
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
vmin, vmax, midpoint = self.vmin, self.vmax, self.midpoint
if not (vmin < midpoint < vmax):
raise ValueError("midpoint must be between maxvalue and minvalue.")
elif vmin == vmax:
result.fill(0) # Or should it be all masked? Or 0.5?
elif vmin > vmax:
raise ValueError("maxvalue must be bigger than minvalue")
else:
vmin = float(vmin)
vmax = float(vmax)
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# ma division is very slow; we can take a shortcut
resdat = result.data
#First scale to -1 to 1 range, than to from 0 to 1.
resdat -= midpoint
resdat[resdat>0] /= abs(vmax - midpoint)
resdat[resdat<0] /= abs(vmin - midpoint)
resdat /= 2.
resdat += 0.5
result = ma.array(resdat, mask=result.mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax, midpoint = self.vmin, self.vmax, self.midpoint
if cbook.iterable(value):
val = ma.asarray(value)
val = 2 * (val-0.5)
val[val>0] *= abs(vmax - midpoint)
val[val<0] *= abs(vmin - midpoint)
val += midpoint
return val
else:
val = 2 * (val - 0.5)
if val < 0:
return val*abs(vmin-midpoint) + midpoint
else:
return val*abs(vmax-midpoint) + midpoint
def plot_shap_dependence(shapVals_df, df, feature='ProppantIntensity_LBSPerFT',
feature_disp=None, cmap=plt.cm.coolwarm, s=10, title=None, color_bar=True, color_title=None):
feature_disp = feature if feature_disp is None else feature_disp
title = feature_disp if title is None else title
color_title = 'Feature Impact' if color_title is None else color_title
x = df[feature].values
y = shapVals_df[feature].values
cvals =y
clow = np.nanpercentile(cvals, 5)
chigh = np.nanpercentile(cvals, 95)
norm = MidPointNorm(midpoint=0) if color_bar else MidPointNorm(midpoint=0, vmin=clow, vmax=chigh) # setting vmin/vmax will clip cbar
# scalarm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
# scalarm._A = []
cvals_nans = np.isnan(cvals)
cval_notNan = np.invert(cvals_nans)
fig, ax = plt.subplots(figsize=(8,5))
ax.scatter(x[cvals_nans], y[cvals_nans], s=s, color="#777777", alpha=1, rasterized=len(x) > 500)
mapable = ax.scatter(x[cval_notNan], y[cval_notNan], s=s, c=cvals[cval_notNan], cmap=cmap, alpha=1,
norm=norm, rasterized=len(x) > 500)
if color_bar:
cb = colorbar(mapable, size=0.15)
cb.set_clim(clow, chigh) # setting vmin/vmaqx here will set even color beyond these numbers
# cb = colorbar(scalarm, size=0.15)
cb.set_label(color_title, size=13)
cb.outline.set_visible(False)
cb.set_alpha(1)
ax.set_xlabel(feature_disp, fontsize=14)
ax.set_ylabel('Feature Impact', fontsize=14)
ax.set_title(title, fontsize=14)
return ax
def nan_to_mean(arr:np.ndarray, axis:int=0)->np.ndarray:
'''fills nan with mean over axis .
uses masked array to apply mean to complete nan columns np.nanmean() can not do that
other option would be to set some kind of spline extrapolation '''
data_m = | np.ma.masked_invalid(arr, copy=True) | numpy.ma.masked_invalid |
import csv
import numpy as np
import matplotlib.pyplot as plt
anime_data = []
with open("Data/AnimeList.csv", "r", encoding="utf8") as csv_data:
csv_reader = csv.reader(csv_data, delimiter=',')
firstLine = True
for row in csv_reader:
if firstLine: firstLine = False
else:
anime_data.append(row)
anime_meta = [[int(a[0]), float(a[15]), int(a[16]), int(a[19])] for a in anime_data]
anime_members_ranked = sorted(anime_meta, key=lambda m: m[3], reverse=True)
anime_scoredby_ranked = sorted(anime_meta, key=lambda m: m[2], reverse=True)
scoredby = [m[2] for m in anime_meta]
print(max(scoredby), min(scoredby), np.mean( | np.array(scoredby) | numpy.array |
import numpy as np
import scipy.special
import itertools
def isposint(n):
"""
Determines whether number n is a positive integer.
:param n: number
:return: bool
"""
return isinstance(n, int) and n > 0
def isdistribution(p):
"""
:param p: a vector representing a discrete probability distribution
:return: True if p is a valid probability distribution
"""
return np.all(p >= 0.0) and np.isclose( | np.sum(p) | numpy.sum |
import math
import os
from typing import List
import numpy as np
from ..utils.cache import get_data_cache_dir
from .base import BaseDataset
def _ellipse2box(major_r, minor_r, angle, center_x, center_y):
tan_t = -(minor_r / major_r) * math.tan(angle)
t = math.atan(tan_t)
x1 = center_x + (
major_r * math.cos(t) * math.cos(angle)
- minor_r * math.sin(t) * math.sin(angle)
)
x2 = center_x + (
major_r * math.cos(t + math.pi) * math.cos(angle)
- minor_r * math.sin(t + math.pi) * math.sin(angle)
)
x_max = max(x1, x2)
x_min = min(x1, x2)
if math.tan(angle) != 0:
tan_t = (minor_r / major_r) * (1 / math.tan(angle))
else:
tan_t = (minor_r / major_r) * (1 / (math.tan(angle) + 0.0001))
t = math.atan(tan_t)
y1 = center_y + (
minor_r * math.sin(t) * math.cos(angle)
+ major_r * math.cos(t) * math.sin(angle)
)
y2 = center_y + (
minor_r * math.sin(t + math.pi) * math.cos(angle)
+ major_r * math.cos(t + math.pi) * math.sin(angle)
)
y_max = max(y1, y2)
y_min = min(y1, y2)
return x_min, y_min, x_max, y_max
def _load_single_annotation_fold(source_path: str, fold_idx: int):
# source_path/FDDB-fold-{:02d}-ellipseList.txt
# TODO check fold idx range
fold_file_name = "FDDB-fold-{:02d}-ellipseList.txt".format(fold_idx)
fold_prefix = "FDDB-folds"
img_file_path = os.path.join(source_path, "{}.jpg")
fold_file_path = os.path.join(source_path, fold_prefix, fold_file_name)
ids = []
targets = []
boxes = []
with open(fold_file_path, "r") as foo:
for line in foo.read().split("\n"):
if os.path.isfile(img_file_path.format(line)):
# indicates img file path
if len(boxes) > 0:
boxes = np.array(boxes)
targets.append(boxes)
ids.append(img_file_path.format(line))
boxes = []
elif line.isnumeric():
# indicates number of face line
pass
elif line != "":
# indicates box
# 123.583300 85.549500 1.265839 269.693400 161.781200 1
major_r, minor_r, angle, cx, cy, _ = [
float(point) for point in line.split(" ") if point != ""
]
box = _ellipse2box(major_r, minor_r, angle, cx, cy)
boxes.append(box)
if len(boxes) > 0:
boxes = | np.array(boxes) | numpy.array |
from __future__ import annotations
from collections import defaultdict
from dataclasses import dataclass, astuple
from enum import IntEnum
from functools import reduce
from itertools import product
from math import sqrt
from typing import Generator, NamedTuple
import numpy as np
from tool.runners.python import SubmissionPy
SEA_MONSTER = """
#
# ## ## ###
# # # # # #
""".strip(
"\n"
)
class ThoreSubmission(SubmissionPy):
def run(self, s):
"""
:param s: input in string format
:return: solution flag
"""
puzzle = solve_puzzle(s.strip("\n"))
sea_monster = np.array(
[[c == "#" for c in line] for line in SEA_MONSTER.splitlines()], dtype=bool
)
for transformation in Transformation.get_all():
puzzle_transformed = transformation.apply_array(puzzle)
sea_monster_locations = find_pattern(puzzle_transformed, sea_monster)
if sea_monster_locations:
break
return puzzle.sum() - len(sea_monster_locations) * sea_monster.sum()
def find_pattern(puzzle, pattern):
n, p = puzzle.shape
k, l = pattern.shape
return [
(i, j)
for i, j in product(range(n - k + 1), range(p - l + 1))
if (puzzle[i : i + k, j : j + l] & pattern).sum() == pattern.sum()
]
def solve_puzzle(s):
# parse the puzzle pieces
pieces_by_id = {}
for piece_str in s.split("\n\n"):
piece_lines = piece_str.splitlines()
pid = int(piece_lines[0].split(" ")[1][:-1])
piece = np.array(
[[c == "#" for c in line] for line in piece_lines[1:]], dtype=bool
)
pieces_by_id[pid] = piece
n_pieces = len(pieces_by_id)
size = int(sqrt(n_pieces)) # assumption: square puzzle
piece_size = next(iter(pieces_by_id.values())).shape[0] # assumption: square pieces
# list all matching borders
matches = defaultdict(dict)
border_index = {}
for pid, piece in pieces_by_id.items():
for side, border in enumerate(
[
tuple(piece[:, 0]),
tuple(piece[0]),
tuple(piece[:, -1]),
tuple(piece[-1]),
]
):
if border in border_index:
other_pid, other_side = border_index[border]
matches[pid][side] = Match(other_pid, other_side, False)
matches[other_pid][other_side] = Match(pid, side, False)
elif border[::-1] in border_index:
other_pid, other_side = border_index[border[::-1]]
matches[pid][side] = Match(other_pid, other_side, True)
matches[other_pid][other_side] = Match(pid, side, True)
border_index[border] = (pid, side)
corners = [pid for pid in matches if len(matches[pid]) == 2]
# check the assumption that there isn't any "extra" match, and some sanity checks
n_matches = sum(len(d) for d in matches.values())
assert n_matches == 4 * (size - 1) * size, "Wrong number of matches"
assert len(corners) == 4, "Wrong number of corner pieces"
assert len([pid for pid in matches if len(matches[pid]) == 3]) == 4 * (
size - 2
), "Wrong number of side pieces"
assert (
len([pid for pid in matches if len(matches[pid]) == 4]) == (size - 2) ** 2
), "Wrong number of inner pieces"
# take a corner, transform it as necessary and put it at the top left
solution = []
corner_pid = corners[0]
solution.append(
(corner_pid, Transformation.align_top_left_corner(*matches[corner_pid].keys()))
)
# put the pieces one by one, from left to right and top to bottom
for i in range(1, n_pieces):
if i % size == 0:
# new row: match the bottom side of the first piece of the previous row
pid, transformation = solution[i - size]
original_side, flipped = transformation.get_side_after(Side.BOTTOM)
matching_side = matches[pid][original_side]
p, s, f = matching_side
# rotate/transpose the candidate so that the matched side is at the top
solution.append((p, Transformation.from_target(s, Side.TOP, flipped ^ f)))
else:
# continue the row: match the right side of the previous piece
pid, transformation = solution[i - 1]
original_side, flipped = transformation.get_side_after(Side.RIGHT)
matching_side = matches[pid][original_side]
p, s, f = matching_side
# rotate/transpose the candidate so that the matched side is at the left
solution.append((p, Transformation.from_target(s, Side.LEFT, flipped ^ f)))
# reconstruct the puzzle by stitching the border
puzzle = np.zeros(((piece_size - 2) * size,) * 2, dtype=bool)
for i, (pid, transformation) in enumerate(solution):
x, y = i // size, i % size # piece coordinates
transformed_piece = transformation.apply_array(pieces_by_id[pid])
puzzle[
(piece_size - 2) * x : (piece_size - 2) * (x + 1),
(piece_size - 2) * y : (piece_size - 2) * (y + 1),
] = transformed_piece[1:-1, 1:-1]
return puzzle
class Side(IntEnum):
LEFT = 0
TOP = 1
RIGHT = 2
BOTTOM = 3
def __add__(self, rotation):
return Side((int(self) + rotation) % 4)
def __sub__(self, rotation):
return Side((int(self) - rotation) % 4)
class Match(NamedTuple):
pid: int
side: Side
flipped: bool
@dataclass(frozen=True)
class Transformation:
# these three transformations can generate all the rotations/flips
transpose: bool
fliplr: bool
flipud: bool
def apply_array(self, array: np.ndarray) -> np.ndarray:
"""Apply the transformation to an array"""
functions = (
f for f, b in zip((np.transpose, np.fliplr, np.flipud), astuple(self)) if b
)
return reduce(lambda f, g: lambda x: g(f(x)), functions, lambda x: x)(array)
def get_side_after(self, side: Side) -> Side:
"""Apply the transformation and return the new side which replaced side,
and if it's flipped"""
t_side, flipped = side, False
if self.transpose:
t_side = t_side + 1 if side % 2 == 0 else t_side - 1
if self.fliplr:
if side in [Side.LEFT, Side.RIGHT]:
t_side += 2
else:
flipped = True
if self.flipud:
if side in [Side.TOP, Side.BOTTOM]:
t_side += 2
else:
flipped = True
return t_side, flipped
@classmethod
def get_all(cls) -> Generator[Transformation, None, None]:
"""Generate all the possible transformations"""
for transpose, fliplr, flipud in product((False, True), repeat=3):
yield cls(transpose, fliplr, flipud)
@classmethod
def from_target(
cls, source_side: Side, target_side: Side, flip: bool
) -> Transformation:
"""Return the transformation that replace target_side by source_side,
the latter being possibly flipped"""
for transfo in cls.get_all():
if transfo.get_side_after(target_side) == (source_side, flip):
return transfo
@classmethod
def align_top_left_corner(
cls, matching_side1: Side, matching_side2: Side
) -> Transformation:
"""Return the transformation so that the two matching sides are at the
bottom and right, meaning that the piece can be put at the top left corner"""
if {matching_side1, matching_side2} == {Side.RIGHT, Side.BOTTOM}:
return Transformation(False, False, False)
elif {matching_side1, matching_side2} == {Side.RIGHT, Side.TOP}:
return Transformation(False, False, True)
elif {matching_side1, matching_side2} == {Side.LEFT, Side.TOP}:
return Transformation(False, True, True)
elif {matching_side1, matching_side2} == {Side.LEFT, Side.BOTTOM}:
return Transformation(False, True, False)
def test_transformation():
a = | np.arange(9) | numpy.arange |
# Copyright 2016 by Raytheon BBN Technologies Corp. All Rights Reserved.
from qgl2.qgl2 import qgl2decl, qreg, QRegister
from qgl2.qgl1 import X90, Id, Y, U90, MEAS, pulseCentered
from qgl2.basic_sequences.helpers import create_cal_seqs, cal_descriptor, delay_descriptor
from qgl2.util import init
from math import pi
@qgl2decl
def HahnEcho(qubit: qreg, pulseSpacings, periods = 0, calRepeats=2):
"""
A single pulse Hahn echo with variable phase of second pi/2 pulse.
Parameters
----------
qubit : logical channel to implement sequence (LogicalChannel)
pulseSpacings : pulse spacings to sweep over; the t in 90-t-180-t-180 (iterable)
periods: number of artificial oscillations
calRepeats : how many times to repeat calibration scalings (default 2)
"""
# Original:
# seqs=[];
# for k in range(len(pulseSpacings)):
# seqs.append([X90(qubit), Id(qubit, pulseSpacings[k]), Y(qubit), Id(qubit,pulseSpacings[k]), \
# U90(qubit,phase=2*pi*periods/len(pulseSpacings)*k), MEAS(qubit)])
# # Tack on the calibration scalings
# seqs += create_cal_seqs((qubit,), calRepeats)
# fileNames = compile_to_hardware(seqs, 'Echo/Echo')
# print(fileNames)
# if showPlot:
# plot_pulse_files(fileNames)
for k in range(len(pulseSpacings)):
init(qubit)
X90(qubit)
# FIXME 9/28/16: Must name the length arg (issue #45)
Id(qubit, length=pulseSpacings[k])
Y(qubit)
Id(qubit, length=pulseSpacings[k])
U90(qubit, phase=2*pi*periods/len(pulseSpacings)*k)
MEAS(qubit)
create_cal_seqs(qubit, calRepeats)
# compileAndPlot('Echo/Echo', showPlot)
@qgl2decl
def CPMG(qubit: qreg, numPulses, pulseSpacing, calRepeats=2):
"""
CPMG pulse train with fixed pulse spacing. Note this pulse spacing is centre to centre,
i.e. it accounts for the pulse width
Parameters
----------
qubit : logical channel to implement sequence (LogicalChannel)
numPulses : number of 180 pulses; should be even (iterable)
pulseSpacing : spacing between the 180's (seconds)
calRepeats : how many times to repeat calibration scalings (default 2)
"""
# Original:
# # First setup the t-180-t block
# CPMGBlock = [Id(qubit, (pulseSpacing-qubit.pulse_params['length'])/2),
# Y(qubit), Id(qubit, (pulseSpacing-qubit.pulse_params['length'])/2)]
# seqs = [[X90(qubit)] + CPMGBlock*rep + [X90(qubit), MEAS(qubit)] for rep in numPulses]
# # Tack on the calibration scalings
# seqs += create_cal_seqs((qubit,), calRepeats)
# fileNames = compile_to_hardware(seqs, 'CPMG/CPMG')
# print(fileNames)
# if showPlot:
# plot_pulse_files(fileNames)
# Create numPulses sequences
for rep in numPulses:
init(qubit)
X90(qubit)
# Repeat the t-180-t block rep times
for _ in range(rep):
pulseCentered(qubit, Id, pulseSpacing)
Y(qubit)
pulseCentered(qubit, Id, pulseSpacing)
X90(qubit)
MEAS(qubit)
# Tack on calibration
create_cal_seqs(qubit, calRepeats)
# compileAndPlot('CPMG/CPMG', showPlot)
# A main for running the sequences here with some typical argument values
# Here it runs all of them; could do a parse_args like main.py
def main():
from pyqgl2.qreg import QRegister
import pyqgl2.test_cl
from pyqgl2.main import compile_function, qgl2_compile_to_hardware
import numpy as np
toHW = True
plotPulses = False # This tries to produce graphics to display
pyqgl2.test_cl.create_default_channelLibrary(toHW, True)
# # To turn on verbose logging in compile_function
# from pyqgl2.ast_util import NodeError
# from pyqgl2.debugmsg import DebugMsg
# NodeError.MUTE_ERR_LEVEL = NodeError.NODE_ERROR_NONE
# DebugMsg.set_level(0)
# Now compile the QGL2 to produce the function that would generate the expected sequence.
# Supply the path to the QGL2, the main function in that file, and a list of the args to that function.
# Can optionally supply saveOutput=True to save the qgl1.py
# file,
# and intermediate_output="path-to-output-file" to save
# intermediate products
# Pass in QRegister(s) NOT real Qubits
q1 = QRegister("q1")
# Axis Descriptor generator functions here
# This is ugly; they're method dependent, but I can't do them in the QGL2 itself
# Additionally, each uses values from the args to the function
# So here we make those arguments be constants so we can use them twice
# without rewriting the values
hahnSpacings = | np.linspace(0, 5e-6, 11) | numpy.linspace |
from coopihc.space.Space import Space
from coopihc.space.StateElement import StateElement
from coopihc.space.utils import (
StateNotContainedError,
StateNotContainedWarning,
)
import numpy
import pytest
def test_lists_applied_if_inputs_not_in_list_form():
"""Tests if lists are applied if inputs are not in list form."""
x = StateElement(
values=None,
spaces=Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
)
assert x["values"] == numpy.array([None])
assert x["spaces"] == [
Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
)
]
def test_clipping_mode_error():
"""Tests that appropriate error is raised by clipping mode."""
with pytest.raises(StateNotContainedError):
x = StateElement(
values=3.0,
spaces=Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
clipping_mode="error",
)
def test_clipping_mode_warning():
"""Tests that appropriate error is raised by clipping mode."""
with pytest.warns(StateNotContainedWarning):
x = StateElement(
values=3.0,
spaces=Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
clipping_mode="warning",
)
with pytest.warns(StateNotContainedWarning):
y = StateElement(
values=-3.0,
spaces=Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
clipping_mode="warning",
)
assert x["values"] == numpy.array([[3]], dtype=numpy.float32)
assert y["values"] == numpy.array([[-3]], dtype=numpy.float32)
def test_clipping():
"""Tests that clipping works."""
x = StateElement(
values=3.0,
spaces=Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
clipping_mode="clip",
)
y = StateElement(
values=-3.0,
spaces=Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
clipping_mode="clip",
)
assert x["values"] == numpy.array([[1]], dtype=numpy.float32)
assert y["values"] == numpy.array([[-1]], dtype=numpy.float32)
def test_clipping_mode():
"""Tests clipping mode (assumes typing priority set to
default (= space))."""
test_clipping_mode_error()
test_clipping_mode_warning()
test_clipping()
def test_typing_priority():
"""Tests clipping mode (assumes typing priority set to
default (= space))."""
x = StateElement(
values=3,
spaces=Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
typing_priority="space",
)
assert x["values"][0].dtype == numpy.float32
x = StateElement(
values=numpy.array([3], dtype=numpy.int16),
spaces=Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
typing_priority="value",
)
assert x["values"][0].dtype == numpy.int16
def test_init_simple():
test_lists_applied_if_inputs_not_in_list_form()
test_clipping_mode()
test_typing_priority()
def test_init_more_complex():
x = StateElement(
values=None,
spaces=[
Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
Space([numpy.array([1, 2, 3], dtype=numpy.int16)]),
Space([numpy.array([-6, -5, -4, -3, -2, -1], dtype=numpy.int16)]),
],
)
# ---------------------- testing values = None mechanism
assert (x["values"] == numpy.array([None, None, None])).all()
# ========================= clipping mode (assumes typing priority set to default (= space))
# ---------------- error
x.clipping_mode = "error"
with pytest.raises(StateNotContainedError):
x["values"] = [0, 2, 0]
with pytest.raises(StateNotContainedError):
x["values"] = [0, 0, -3]
with pytest.raises(StateNotContainedError):
x["values"] = [2, 2, -3]
with pytest.raises(StateNotContainedError):
x["values"] = [-2, -2, 2]
x["values"] = [0, 2, -3]
x.clipping_mode = "warning"
with pytest.warns(StateNotContainedWarning):
x["values"] = [0, 2, 0]
with pytest.warns(StateNotContainedWarning):
x["values"] = [0, 0, -3]
with pytest.warns(StateNotContainedWarning):
x["values"] = [2, 2, -3]
with pytest.warns(StateNotContainedWarning):
x["values"] = [-2, -2, 2]
x["values"] = [0, 2, -3]
x.clipping_mode = "clip"
x["values"] = [0, 2, 0]
print(x["values"])
assert x["values"] == [
numpy.array([[0.0]], dtype=numpy.float32),
numpy.array([[2]], dtype=numpy.int16),
numpy.array([[-1]], dtype=numpy.int16),
]
x["values"] = [0, 0, -3]
assert x["values"] == [
numpy.array([[0.0]], dtype=numpy.float32),
numpy.array([[1]], dtype=numpy.int16),
numpy.array([[-3]], dtype=numpy.int16),
]
x["values"] = [2, 2, -3]
assert x["values"] == [
numpy.array([[1.0]], dtype=numpy.float32),
numpy.array([[2]], dtype=numpy.int16),
numpy.array([[-3]], dtype=numpy.int16),
]
x["values"] = [-2, -2, 2]
assert x["values"] == [
numpy.array([[-1.0]], dtype=numpy.float32),
numpy.array([[1]], dtype=numpy.int16),
numpy.array([[-1]], dtype=numpy.int16),
]
x["values"] = [0, 2, -3]
# ====================== Typing priority
# This test is currently not passed, solve this.
x.clipping_mode = "error"
x.typing_priority = "space"
x["values"] = [0, 2.0, -3.0]
assert x["values"][0].dtype == numpy.float32
assert x["values"][1].dtype == numpy.int16
assert x["values"][2].dtype == numpy.int16
x.typing_priority = "value"
x["values"] = [0, 2.0, -3.0]
assert x["values"][0].dtype == numpy.int64
assert x["values"][1].dtype == numpy.float64
assert x["values"][2].dtype == numpy.float64
def test_init():
"""Tests the initializer functions."""
test_init_simple()
test_init_more_complex()
####### Comparisons
###### __eq__
###### __lt__
###### __gt__
###### __le__
###### __ge__
def test_compare_eq():
x = StateElement(
values=1.0,
spaces=Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
)
y = StateElement(
values=[0, 2, -4],
spaces=[
Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
Space([numpy.array([1, 2, 3], dtype=numpy.int16)]),
Space([numpy.array([-6, -5, -4, -3, -2, -1], dtype=numpy.int16)]),
],
)
assert x == StateElement(
values=1.0,
spaces=Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
)
assert x != StateElement(
values=0.5,
spaces=Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
)
assert x != StateElement(
values=1.0,
spaces=Space(
[
numpy.array([-1], dtype=numpy.float64),
numpy.array([1], dtype=numpy.float64),
]
),
)
assert x == StateElement(
values=numpy.array([1.0], dtype=numpy.float32),
spaces=Space(
[
numpy.array([-1], dtype=numpy.float64),
numpy.array([1], dtype=numpy.float64),
]
),
typing_priority="value",
)
assert y == StateElement(
values=[0, 2, -4],
spaces=[
Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
Space([numpy.array([1, 2, 3], dtype=numpy.int16)]),
Space([numpy.array([-6, -5, -4, -3, -2, -1], dtype=numpy.int16)]),
],
)
assert y != StateElement(
values=[0, 3, -4],
spaces=[
Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
Space([numpy.array([1, 2, 3], dtype=numpy.int16)]),
Space([numpy.array([-6, -5, -4, -3, -2, -1], dtype=numpy.int16)]),
],
)
def test_compare_lt():
pass
def test_compare_gt():
pass
def test_compare_le():
pass
def test_compare_ge():
pass
def test_comparison():
"""Tests the comparison methods."""
test_compare_eq()
test_compare_lt()
test_compare_gt()
test_compare_le()
test_compare_ge()
####### Arithmetic
###### __neg__
###### __add__
###### __radd__
###### __sub__
###### __rsub__
###### __mul__
###### __rmul__
###### __pow__
###### __matmul__
###### __rmatmul__
x = StateElement(
values=0.2,
spaces=Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
)
y = StateElement(
values=0.2,
spaces=Space(
[
numpy.array([-1], dtype=numpy.float32),
| numpy.array([1], dtype=numpy.float32) | numpy.array |
import numpy as np
import scipy.special as ss
import scipy.signal as ss2
import scipy
from numpy import abs, sin, cos, real, exp, pi, sqrt
def psi_s(z, x, beta):
"""
2D longitudinal potential
Eq. (23) from Ref[1] with no constant factor (e*beta**2/2/rho**2).
Ref[1]: <NAME> and <NAME>, PRAB 23, 014402 (2020).
Note that 'x' here corresponds to 'chi = x / rho' in the paper.
"""
#try:
out = (cos(2 * alpha(z, x, beta)) - 1 / (1+x)) / (
kappa(z, x, beta) - beta * (1+x) * sin(2*alpha(z, x, beta)))
#except ZeroDivisionError:
# out = 0
# print(f"Oops! ZeroDivisionError at (z,x)= ({z:5.2f},{x:5.2f}). Returning 0.")
return np.nan_to_num(out)
def psi_x_where_x_equals_zero(z, dx, beta):
"""
Evaluate psi_x close to x = 0
This is a rough approximation of the singularity across x = 0
"""
return (psi_x(z, -dx/2, beta) + psi_x(z, dx/2, beta))/2
@np.vectorize
def ss_ellipf(phi, m):
y = ss.ellipkinc(phi, m)
# y = np.float(y)
return y
@np.vectorize
def ss_ellipe(phi, m):
y = ss.ellipeinc(phi, m)
# y = np.float(y)
return y
def psi_x(z, x, beta):
"""
Eq.(24) from Ref[1] with argument zeta=0 and no constant factor e*beta**2/2/rho**2.
Note that 'x' here corresponds to 'chi = x/rho',
and 'z' here corresponds to 'xi = z/2/rho' in the paper.
"""
# z = np.float(z)
# x = np.float(x)
kap = kappa(z, x, beta)
alp = alpha(z, x, beta)
arg2 = -4 * (1+x) / x**2
try:
T1 = (1/abs(x)/(1 + x) * ((2 + 2*x + x**2) * ss.ellipkinc(alp, arg2)- x**2 * ss.ellipeinc(alp, arg2)))
D = kap**2 - beta**2 * (1 + x)**2 * sin(2*alp)**2
T2 = ((kap**2 - 2*beta** 2 * (1+x)**2 + beta**2 * (1+x) * (2 + 2*x + x**2) * cos(2*alp))/ beta/ (1+x)/ D)
T3 = -kap * sin(2 * alp) / D
T4 = kap * beta ** 2 * (1 + x) * sin(2 * alp) * cos(2 * alp) / D
T5 = 1 / abs(x) * ss.ellipkinc(alp, arg2) # psi_phi without e/rho**2 factor
out = real((T1 + T2 + T3 + T4) - 2 / beta ** 2 * T5)
except ZeroDivisionError:
out = 0
# print(f"Oops! ZeroDivisionError at (z,x)= ({z:5.2f},{x:5.2f}). Returning 0.")
return np.nan_to_num(out)
def nu(x, beta):
"""
Eq. (6) from Ref[1] (coeffient of alpha**2)
Note that 'x' here corresponds to 'chi = x/rho' in the paper.
"""
return 3 * (1 - beta**2 - beta**2*x) / beta**2 / (1+x)
def eta(z, x, beta):
"""
Eq. (6) from Ref[1] (coeffient of alpha)
Note that 'x' here corresponds to 'chi = x/rho',
and 'z' here corresponds to 'xi = z/2/rho' in the paper.
"""
return -6 * z / beta**2 / (1+x)
def zeta(z, x, beta):
"""
Eq. (6) from Ref[1] (constant term)
Note that 'x' here corresponds to 'chi = x/rho',
and 'z' here corresponds to 'xi = z/2/rho' in the paper.
"""
return 3 * (4* z**2 - beta**2 * x**2) / 4 / beta**2 / (1+x)
def Omega(z, x, beta):
"""
Eq. (A3) from Ref[1]
Note that 'x' here corresponds to 'chi = x/rho',
and 'z' here corresponds to 'xi = z/2/rho' in the paper.
"""
temp = (eta(z, x, beta)**2/16
- zeta(z, x, beta) * nu(x, beta)/6
+ nu(x, beta)**3/216)
return temp + (temp**2 - (zeta(z, x, beta)/3 + nu(x, beta)**2/36)**3)**(1/2)
def m(z, x, beta):
"""
Eq. (A2) from Ref[1]
Note that 'x' here corresponds to 'chi = x/rho',
and 'z' here corresponds to 'xi = z/2/rho' in the paper.
"""
return (-nu(x, beta)/3
+ (zeta(z, x, beta)/3 + nu(x, beta)**2/36) * Omega(z, x, beta)**(-1/3)
+ Omega(z, x, beta)**(1/3))
def alpha_where_z_equals_zero(x, beta):
"""
Evaluate alpha(z,x) when z is zero.
Eq. (24) from Ref[1] simplifies to a quadratic equation for alpha^2.
"""
b = nu(x,beta)
c = -3*(beta**2 * x**2)/4/beta**2/(1+x)
root1 = (-b + sqrt(b**2 - 4*c))/2
# root2 = (-b - sqrt(b**2 - 4*c))/2
# since b>0, root2 is always negative and discarded
return sqrt(root1)
def alpha_where_z_not_zero(z, x, beta):
"""
Eq. (A4) from Ref[1]
Note that 'x' here corresponds to 'chi = x/rho',
and 'z' here corresponds to 'xi = z/2/rho' in the paper.
"""
arg1 = sqrt(2 * abs(m(z, x, beta)))
arg2 = -2 * (m(z, x, beta) + nu(x, beta))
arg3 = 2 * eta(z, x, beta) / arg1
zsign=np.sign(z)
return np.real(1 / 2 * (zsign*arg1 + sqrt(abs(arg2 -zsign*arg3))))
def alpha_old(z, x, beta):
"""
Eq. (A4) from Ref[1]
Note that 'x' here corresponds to 'chi = x/rho',
and 'z' here corresponds to 'xi = z/2/rho' in the paper.
"""
#return np.where(z==0, alpha_where_z_equals_zero(x, beta), alpha_where_z_not_zero(z, x, beta) )
out = np.empty(x.shape)
z_is_zero = z == 0
ix1 = np.where(z_is_zero)
ix2 = np.where(~z_is_zero)
out[ix1] = alpha_where_z_equals_zero(x[ix1], beta[ix1])
out[ix2] = alpha_where_z_not_zero(z[ix2], x[ix2], beta[ix2])
#print('ix1:', ix1)
#print('ix2:', ix2)
return out
def alpha(z, x, beta):
on_x_axis = z == 0
# Check for scalar, then return the normal functions
if not isinstance(z, np.ndarray):
if on_x_axis:
return alpha_where_z_equals_zero(x, beta)
else:
return alpha_where_z_not_zero(z, x, beta)
# Array z
out = np.empty(z.shape)
ix1 = np.where(on_x_axis)
ix2 = np.where(~on_x_axis)
if len(ix1)==0:
print('ix1:', ix1)
print(z)
# Check for arrays
if isinstance(x, np.ndarray):
x1 = x[ix1]
x2 = x[ix2]
else:
x1 = x
x2 = x
if isinstance(beta, np.ndarray):
beta1 = beta[ix1]
beta2 = beta[ix2]
else:
beta1 = beta
beta2 = beta
out[ix1] = alpha_where_z_equals_zero(x1, beta1)
out[ix2] = alpha_where_z_not_zero(z[ix2], x2, beta2)
return out
@np.vectorize
def alpha_exact(z, x, beta):
"""
Exact alpha calculation using numerical root finding.
For testing only!
Eq. (23) from Ref[1]
"""
f = lambda a: a - beta/2*sqrt(x**2 + 4*(1+x)*np.sin(a)**2 ) - z
res = scipy.optimize.root_scalar(f, bracket=(-1,1))
return res.root
def kappa(z, x, beta):
"""
Eq. (13) from Ref[1] with argumaent zeta = 0.
Note that 'x' here corresponds to 'chi = x/rho',
and 'z' here corresponds to 'xi = z/2/rho' in the paper.
"""
return (x**2 + 4*(1+x) * sin(alpha(z, x, beta))**2)**(1/2)
### Functions below are obsolete
def lambda_p_Gauss(z, x):
"""
The z derivative of a 2D Gaussian G(z,x)
"""
sigmaz = 10e-6
sigmax = 10e-6
return (
1/(2*pi*sigmaz*sigmax)
* exp(-x**2 / 2 / sigmax**2)
* exp(-z**2 / 2 / sigmaz**2)
* (-z / sigmaz**2))
def make_2dgrid(func, zmin, zmax, dz, xmin, xmax, dx):
"""
Make a 2D grid of a function
"""
zvec = np.arange(zmin, zmax, dz)
xvec = np.arange(xmin, xmax, dx)
list2d = [[func(i, j) for j in xvec] for i in zvec]
return np.array(list2d, dtype=float)
def WsOld(gamma, rho, sigmaz, sigmax, dz, dx):
"""
Apply 2D convolution to compute the longitudinal wake Ws on a grid
Also returns the zvec and xvec which define the grid
Still needs to improve the convolution step
"""
beta = (1 - 1 / gamma ** 2) ** (1 / 2)
zvec = np.arange(-5 * sigmaz, 5 * sigmaz, dz)
xvec = | np.arange(-5 * sigmax, 5 * sigmax, dx) | numpy.arange |
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import copy
import numpy as np
import theano.tensor as tt
from scipy.linalg import cholesky
from scipy.special import logsumexp
from scipy.stats import multivariate_normal, median_abs_deviation
from scipy.optimize import minimize, approx_fprime
from theano import function as theano_function
import arviz as az
import jax
import jax.numpy as jnp
from jax.experimental import optimizers as jax_optimizers
import time
import pymc3 as pm
import pymc3.nfmc.posdef as posdef
from pymc3.tuning.scaling import find_hessian
from pymc3.tuning.starting import find_MAP
from pymc3.backends.ndarray import NDArray, point_list_to_multitrace
from pymc3.blocking import ArrayOrdering, DictToArrayBijection
from pymc3.model import Point, modelcontext, set_data
from pymc3.distributions.distribution import draw_values, to_tuple
from pymc3.sampling import sample_prior_predictive
from pymc3.theanof import (
floatX,
inputvars,
join_nonshared_inputs,
make_shared_replacements,
gradient,
hessian,
)
from pymc3.util import (
check_start_vals,
get_default_varnames,
get_var_name,
update_start_vals,
)
from pymc3.vartypes import discrete_types, typefilter
# SINF code for fitting the normalizing flow.
from pymc3.sinf.GIS import GIS
import torch
# This is a global variable used to store the optimization steps.
# Presumably there's a nicer way to do this.
param_store = []
class NFMC:
"""Sequential type normalizing flow based sampling/global approx."""
def __init__(
self,
draws=500,
init_draws=500,
resampling_draws=500,
init_ess=100,
sample_mode='reinit',
cull_lowp_tol=0.05,
model=None,
init_method='prior',
init_samples=None,
start=None,
init_EL2O='adam',
use_hess_EL2O=False,
mean_field_EL2O=False,
absEL2O=1e-10,
fracEL2O=1e-2,
EL2O_draws=100,
maxiter_EL2O=500,
EL2O_optim_method='L-BFGS-B',
scipy_map_method='L-BFGS-B',
adam_lr=1e-3,
adam_b1=0.9,
adam_b2=0.999,
adam_eps=1.0e-8,
adam_steps=1000,
simulator=None,
model_data=None,
sim_data_cov=None,
sim_size=None,
sim_params=None,
sim_start=None,
sim_optim_method='lbfgs',
sim_tol=0.01,
local_thresh=3,
local_step_size=0.1,
local_grad=True,
init_local=True,
nf_local_iter=0,
max_line_search=100,
random_seed=-1,
chain=0,
frac_validate=0.1,
iteration=None,
final_iteration=None,
alpha=(0,0),
final_alpha=(0.75,0.75),
optim_iter=1000,
ftol=2.220446049250313e-9,
gtol=1.0e-5,
k_trunc=0.25,
verbose=False,
n_component=None,
interp_nbin=None,
KDE=True,
bw_factor_min=0.5,
bw_factor_max=2.5,
bw_factor_num=11,
edge_bins=None,
ndata_wT=None,
MSWD_max_iter=None,
NBfirstlayer=True,
logit=False,
Whiten=False,
batchsize=None,
nocuda=False,
patch=False,
shape=[28,28,1],
redraw=True,
):
self.draws = draws
self.init_draws = init_draws
self.resampling_draws = resampling_draws
self.init_ess = init_ess
self.sample_mode = sample_mode
self.cull_lowp_tol = cull_lowp_tol
self.model = model
# Init method params.
self.init_method = init_method
self.init_samples = init_samples
self.start = start
self.init_EL2O = init_EL2O
self.mean_field_EL2O = mean_field_EL2O
self.use_hess_EL2O = use_hess_EL2O
self.absEL2O = absEL2O
self.fracEL2O = fracEL2O
self.EL2O_draws = EL2O_draws
self.maxiter_EL2O = maxiter_EL2O
self.EL2O_optim_method = EL2O_optim_method
self.scipy_map_method = scipy_map_method
self.adam_lr = adam_lr
self.adam_b1 = adam_b1
self.adam_b2 = adam_b2
self.adam_eps = adam_eps
self.adam_steps = adam_steps
self.simulator = simulator
self.model_data = model_data
self.sim_data_cov = sim_data_cov
self.sim_size = sim_size
self.sim_params = sim_params
self.sim_start = sim_start
self.sim_optim_method = sim_optim_method
self.sim_tol = sim_tol
# Local exploration params.
self.local_thresh = local_thresh
self.local_step_size = local_step_size
self.local_grad = local_grad
self.init_local = init_local
self.nf_local_iter = nf_local_iter
self.max_line_search = max_line_search
self.random_seed = random_seed
self.chain = chain
# Set the torch seed.
if self.random_seed != 1:
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
# Separating out so I can keep track. These are SINF params.
assert 0.0 <= frac_validate <= 1.0
self.frac_validate = frac_validate
self.iteration = iteration
self.final_iteration = final_iteration
self.alpha = alpha
self.final_alpha = final_alpha
self.optim_iter = optim_iter
self.ftol = ftol
self.gtol = gtol
self.k_trunc = k_trunc
self.verbose = verbose
self.n_component = n_component
self.interp_nbin = interp_nbin
self.KDE = KDE
self.bw_factors = np.logspace(bw_factor_min, bw_factor_max, bw_factor_num)
self.edge_bins = edge_bins
self.ndata_wT = ndata_wT
self.MSWD_max_iter = MSWD_max_iter
self.NBfirstlayer = NBfirstlayer
self.logit = logit
self.Whiten = Whiten
self.batchsize = batchsize
self.nocuda = nocuda
self.patch = patch
self.shape = shape
#whether to redraw samples at every iteration, used for BO testing
self.redraw = redraw
self.model = modelcontext(model)
if self.random_seed != -1:
np.random.seed(self.random_seed)
self.variables = inputvars(self.model.vars)
def initialize_var_info(self):
"""Extract variable info for the model instance."""
var_info = OrderedDict()
init = self.model.test_point
for v in self.variables:
var_info[v.name] = (init[v.name].shape, init[v.name].size)
self.var_info = var_info
def initialize_population(self):
"""Create an initial population from the prior distribution."""
population = []
if self.init_samples is None:
init_rnd = sample_prior_predictive(
self.init_draws,
var_names=[v.name for v in self.model.unobserved_RVs],
model=self.model,
)
for i in range(self.init_draws):
point = Point({v.name: init_rnd[v.name][i] for v in self.variables}, model=self.model)
population.append(self.model.dict_to_array(point))
self.prior_samples = np.array(floatX(population))
elif self.init_samples is not None:
self.prior_samples = np.copy(self.init_samples)
self.weighted_samples = np.copy(self.prior_samples)
self.nf_samples = np.copy(self.weighted_samples)
self.get_posterior_logp()
self.get_prior_logp()
self.log_weight = self.posterior_logp - self.prior_logp
self.log_evidence = logsumexp(self.log_weight) - np.log(len(self.log_weight))
self.evidence = np.exp(self.log_evidence)
self.log_weight = self.log_weight - self.log_evidence
self.regularize_weights()
#same as in fitnf but prior~q
self.log_weight_pq_num = self.posterior_logp + 2*self.prior_logp
self.log_weight_pq_den = 3*self.prior_logp
self.log_evidence_pq = logsumexp(self.log_weight_pq_num) - logsumexp(self.log_weight_pq_den)
self.evidence_pq = np.exp(self.log_evidence_pq)
#sum of mean loss (p - q*Z_pq)^2 /N for diagnostic purposes
self.log_mean_loss = np.log(np.mean( ( np.exp(self.posterior_logp) - np.exp(self.prior_logp+self.log_evidence_pq) )**2 ))
self.init_weights_cleanup(lambda x: self.prior_logp(x), lambda x: self.prior_dlogp(x))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
self.all_logq = np.array([])
self.nf_models = []
def setup_logp(self):
"""Set up the prior and likelihood logp functions, and derivatives."""
shared = make_shared_replacements(self.variables, self.model)
self.prior_logp_func = logp_forw([self.model.varlogpt], self.variables, shared)
self.prior_dlogp_func = logp_forw([gradient(self.model.varlogpt, self.variables)], self.variables, shared)
self.likelihood_logp_func = logp_forw([self.model.datalogpt], self.variables, shared)
self.posterior_logp_func = logp_forw([self.model.logpt], self.variables, shared)
self.posterior_dlogp_func = logp_forw([gradient(self.model.logpt, self.variables)], self.variables, shared)
self.posterior_hessian_func = logp_forw([hessian(self.model.logpt, self.variables)], self.variables, shared)
self.posterior_logp_nojac = logp_forw([self.model.logp_nojact], self.variables, shared)
self.posterior_dlogp_nojac = logp_forw([gradient(self.model.logp_nojact, self.variables)], self.variables, shared)
self.posterior_hessian_nojac = logp_forw([hessian(self.model.logp_nojact, self.variables)], self.variables, shared)
def get_prior_logp(self):
"""Get the prior log probabilities."""
priors = [self.prior_logp_func(sample) for sample in self.nf_samples]
self.prior_logp = np.array(priors).squeeze()
def get_likelihood_logp(self):
"""Get the likelihood log probabilities."""
likelihoods = [self.likelihood_logp_func(sample) for sample in self.nf_samples]
self.likelihood_logp = np.array(likelihoods).squeeze()
def get_posterior_logp(self):
"""Get the posterior log probabilities."""
posteriors = [self.posterior_logp_func(sample) for sample in self.nf_samples]
self.posterior_logp = np.array(posteriors).squeeze()
def optim_target_logp(self, param_vals):
"""Optimization target function"""
return -1.0 * self.posterior_logp_func(param_vals)
def optim_target_dlogp(self, param_vals):
return -1.0 * self.posterior_dlogp_func(param_vals)
def optim_target_logp_nojac(self, param_vals):
"""Optimization target function"""
return -1.0 * self.posterior_logp_nojac(param_vals)
def optim_target_dlogp_nojac(self, param_vals):
return -1.0 * self.posterior_dlogp_nojac(param_vals)
def prior_dlogp(self, param_vals):
dlogps = [self.prior_dlogp_func(val) for val in param_vals]
return np.array(dlogps).squeeze()
def target_logp(self, param_vals):
logps = [self.posterior_logp_func(val) for val in param_vals]
return np.array(logps).squeeze()
def target_dlogp(self, param_vals):
dlogps = [self.posterior_dlogp_func(val) for val in param_vals]
return np.array(dlogps).squeeze()
def target_hessian(self, param_vals):
hessians = [self.posterior_hessian_func(val) for val in param_vals]
return np.array(hessians).squeeze()
def target_logp_nojac(self, param_vals):
logps = [self.posterior_logp_nojac(val) for val in param_vals]
return np.array(logps).squeeze()
def target_dlogp_nojac(self, param_vals):
dlogps = [self.posterior_dlogp_nojac(val) for val in param_vals]
return np.array(dlogps).squeeze()
def target_hessian_nojac(self, param_vals):
hessians = [self.posterior_hessian_nojac(val) for val in param_vals]
return np.array(hessians).squeeze()
def sinf_logq(self, param_vals):
if param_vals.size == 1:
param_vals = np.array([param_vals])
sinf_logq = self.nf_model.evaluate_density(torch.from_numpy(param_vals.astype(np.float32))).numpy().astype(np.float64)
return sinf_logq.item()
def sinf_dlogq(self, param_vals):
if param_vals.size == 1:
param_vals = np.array([param_vals])
sinf_dlogq = self.nf_model.score(torch.from_numpy(param_vals.astype(np.float32))).numpy().astype(np.float64)
return sinf_dlogq.squeeze()
def callback(self, xk):
self.optim_iter_samples = np.append(self.optim_iter_samples, np.array([xk]), axis=0)
def optimize(self, sample):
"""Optimize the prior samples"""
self.optim_iter_samples = np.array([sample])
minimize(self.optim_target_logp, x0=sample, method=self.scipy_map_method,
options={'maxiter': self.optim_iter, 'ftol': self.ftol, 'gtol': self.gtol},
jac=self.optim_target_dlogp, callback=self.callback)
return self.optim_iter_samples
def get_MAP(self, map_method='adam', map_start=None):
"""Get the MAP estimate."""
if map_start is None:
map_start = self.start
if map_method == 'adam':
self.optimization_start()
opt_init, opt_update, get_params = jax_optimizers.adam(step_size=self.adam_lr, b1=self.adam_b1,
b2=self.adam_b2, eps=self.adam_eps)
opt_state = opt_init(map_start)
for i in range(self.adam_steps):
value, opt_state, update_params = self.update_adam(i, opt_state, opt_update, get_params)
target_diff = np.abs((value - np.float64(self.adam_logp(floatX(update_params)))) /
max(value, np.float64(self.adam_logp(floatX(update_params)))))
if target_diff <= self.ftol:
print(f'ADAM converged at step {i}')
break
vars = get_default_varnames(self.model.unobserved_RVs, include_transformed=True)
map_dict = {var.name: value for var, value in zip(vars, self.model.fastfn(vars)(self.bij.rmap(update_params.squeeze())))}
else:
map_dict = find_MAP(start=map_start, model=self.model, method=self.scipy_map_method)
return map_dict
def regularize_weights(self):
"""Apply clipping to importance weights."""
inf_weights = np.isinf(np.exp(self.log_weight))
self.log_weight = np.clip(self.log_weight, a_min=None, a_max=logsumexp(self.log_weight[~inf_weights])
- np.log(len(self.log_weight[~inf_weights])) + self.k_trunc * np.log(len(self.log_weight)))
self.weights = np.exp(self.log_weight)
def regularize_weights_pq(self):
"""Apply clipping to pq importance weights."""
inf_weights = np.isinf(np.exp(self.log_weight_pq))
self.log_weight_pq = np.clip(self.log_weight_pq, a_min=None, a_max=logsumexp(self.log_weight_pq[~inf_weights])
- np.log(len(self.log_weight_pq[~inf_weights])) + self.k_trunc * np.log(len(self.log_weight_pq)))
self.weights_pq = np.exp(self.log_weight_pq)
def calculate_ess(self, logw):
"""Calculate ESS given a set of sample weights"""
logw = logw - logsumexp(logw)
ess = np.exp(-logsumexp(2 * logw) - np.log(logw.shape[0]))
return ess
def calculate_weight_variance(self):
"""Calculates the variance of importance weights for a given q."""
return np.var(self.weight)
def shrink_init(self, mu, sigma):
"""Shrinks the initialization until we acheive some ESS."""
while self.q_ess * self.init_draws < self.init_ess:
previous_q_ess = 1.0 * self.q_ess
print(f'Shrinking intialization to improve ESS. Current ESS: {self.q_ess * self.init_draws}')
sigma = sigma / 2
self.weighted_samples = np.random.multivariate_normal(mu, sigma, size=self.init_draws)
self.nf_samples = np.copy(self.weighted_samples)
self.get_posterior_logp()
self.log_weight = self.posterior_logp - multivariate_normal.logpdf(self.nf_samples, mu.squeeze(), sigma, allow_singular=True)
self.log_evidence = logsumexp(self.log_weight) - np.log(len(self.log_weight))
self.evidence = np.exp(self.log_evidence)
self.log_weight = self.log_weight - self.log_evidence
#same as in fitnf but prior~q
self.log_weight_pq_num = self.posterior_logp + 2 * multivariate_normal.logpdf(self.nf_samples, mu.squeeze(), sigma, allow_singular=True)
self.log_weight_pq_den = 3*multivariate_normal.logpdf(self.nf_samples, mu.squeeze(), sigma, allow_singular=True)
self.log_evidence_pq = logsumexp(self.log_weight_pq_num) - logsumexp(self.log_weight_pq_den)
self.evidence_pq = np.exp(self.log_evidence_pq)
self.regularize_weights()
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
return sigma
def init_weights_cleanup(self, logq_func=None, dlogq_func=None):
"""Finish initializing the first importance weights (including possible local exploration)."""
self.sinf_logw = np.copy(self.log_weight)
self.importance_weights = | np.copy(self.weights) | numpy.copy |
import numpy as np
import os
from kcsd import csd_profile as CSD
from kcsd import KCSD2D
from scipy.integrate import simps
from scipy.interpolate import griddata
from figure_properties import *
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def integrate_2d(csd_at, true_csd, ele_pos, h, csd_lims):
csd_x, csd_y = csd_at
xlin = csd_lims[0]
ylin = csd_lims[1]
Ny = ylin.shape[0]
m = | np.sqrt((ele_pos[0] - csd_x)**2 + (ele_pos[1] - csd_y)**2) | numpy.sqrt |
"""
This module provides the `PerformanceMetrics` class and supporting
functionality for tracking and computing model performance.
"""
from collections import defaultdict, namedtuple
import logging
import os
import warnings
import pandas as pd
import numpy as np
from sklearn.metrics import average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from scipy.stats import rankdata
logger = logging.getLogger("selene")
Metric = namedtuple("Metric", ["fn", "transform", "data"])
"""
A tuple containing a metric function and the results from applying that
metric to some values.
Parameters
----------
fn : types.FunctionType
A metric.
transfrom : types.FunctionType
A transform function which should be applied to data before measuring metric
data : list(float)
A list holding the results from applying the metric.
Attributes
----------
fn : types.FunctionType
A metric.
transfrom : types.FunctionType
A transform function which should be applied to data before measuring metric
data : list(float)
A list holding the results from applying the metric.
"""
def visualize_roc_curves(prediction,
target,
output_dir,
target_mask=None,
report_gt_feature_n_positives=50,
style="seaborn-colorblind",
fig_title="Feature ROC curves",
dpi=500):
"""
Output the ROC curves for each feature predicted by a model
as an SVG.
Parameters
----------
prediction : numpy.ndarray
Value predicted by user model.
target : numpy.ndarray
True value that the user model was trying to predict.
output_dir : str
The path to the directory to output the figures. Directories that
do not currently exist will be automatically created.
report_gt_feature_n_positives : int, optional
Default is 50. Do not visualize an ROC curve for a feature with
less than 50 positive examples in `target`.
style : str, optional
Default is "seaborn-colorblind". Specify a style available in
`matplotlib.pyplot.style.available` to use.
fig_title : str, optional
Default is "Feature ROC curves". Set the figure title.
dpi : int, optional
Default is 500. Specify dots per inch (resolution) of the figure.
Returns
-------
None
Outputs the figure in `output_dir`.
"""
os.makedirs(output_dir, exist_ok=True)
import matplotlib
backend = matplotlib.get_backend()
if "inline" not in backend:
matplotlib.use("SVG")
import matplotlib.pyplot as plt
plt.style.use(style)
plt.figure()
n_features = prediction.shape[-1]
for index in range(n_features):
feature_preds = prediction[..., index]
feature_targets = target[..., index]
if target_mask is not None:
feature_mask = target_mask[..., index]
# if mask is n_samples x n_cell_types,
# feature_targets and feature_preds get flattened but that's ok
# b/c each item is a separate sample anyway
feature_targets = feature_targets[feature_mask]
feature_preds = feature_preds[feature_mask]
if len(np.unique(feature_targets)) > 1 and \
np.sum(feature_targets) > report_gt_feature_n_positives:
fpr, tpr, _ = roc_curve(feature_targets, feature_preds)
plt.plot(fpr, tpr, 'r-', color="black", alpha=0.3, lw=1)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
if fig_title:
plt.title(fig_title)
plt.savefig(os.path.join(output_dir, "roc_curves.svg"),
format="svg",
dpi=dpi)
def visualize_precision_recall_curves(
prediction,
target,
output_dir,
target_mask=None,
report_gt_feature_n_positives=50,
style="seaborn-colorblind",
fig_title="Feature precision-recall curves",
dpi=500):
"""
Output the precision-recall (PR) curves for each feature predicted by
a model as an SVG.
Parameters
----------
prediction : numpy.ndarray
Value predicted by user model.
target : numpy.ndarray
True value that the user model was trying to predict.
output_dir : str
The path to the directory to output the figures. Directories that
do not currently exist will be automatically created.
report_gt_feature_n_positives : int, optional
Default is 50. Do not visualize an PR curve for a feature with
less than 50 positive examples in `target`.
style : str, optional
Default is "seaborn-colorblind". Specify a style available in
`matplotlib.pyplot.style.available` to use.
fig_title : str, optional
Default is "Feature precision-recall curves". Set the figure title.
dpi : int, optional
Default is 500. Specify dots per inch (resolution) of the figure.
Returns
-------
None
Outputs the figure in `output_dir`.
"""
os.makedirs(output_dir, exist_ok=True)
# TODO: fix this
import matplotlib
backend = matplotlib.get_backend()
if "inline" not in backend:
matplotlib.use("SVG")
import matplotlib.pyplot as plt
plt.style.use(style)
plt.figure()
n_features = prediction.shape[-1]
for index in range(n_features):
feature_preds = prediction[..., index]
feature_targets = target[..., index]
if target_mask is not None:
feature_mask = target_mask[..., index]
# if mask is n_samples x n_cell_types,
# feature_targets and feature_preds get flattened but that's ok
# b/c each item is a separate sample anyway
feature_targets = feature_targets[feature_mask]
feature_preds = feature_preds[feature_mask]
if len(np.unique(feature_targets)) > 1 and \
np.sum(feature_targets) > report_gt_feature_n_positives:
precision, recall, _ = precision_recall_curve(
feature_targets, feature_preds)
plt.step(
recall, precision, 'r-',
color="black", alpha=0.3, lw=1, where="post")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
if fig_title:
plt.title(fig_title)
plt.savefig(os.path.join(output_dir, "precision_recall_curves.svg"),
format="svg",
dpi=dpi)
def compute_score(prediction, target, metric_fn, target_mask=None,
report_gt_feature_n_positives=10):
"""
Using a user-specified metric, computes the distance between
two tensors.
Parameters
----------
prediction : numpy.ndarray
Value predicted by user model.
target : numpy.ndarray
True value that the user model was trying to predict.
metric_fn : types.FunctionType
A metric that can measure the distance between the prediction
and target variables.
target_mask: numpy.ndarray, optional
A mask of shape `target.shape` that indicates which values
should be considered when computing the scores.
report_gt_feature_n_positives : int, optional
Default is 10. The minimum number of positive examples for a
feature in order to compute the score for it.
Returns
-------
average_score, feature_scores : tuple(float, numpy.ndarray)
A tuple containing the average of all feature scores, and a
vector containing the scores for each feature. If there were
no features meeting our filtering thresholds, will return
`(None, [])`.
"""
# prediction_shape:
# batch_size*n_batches, n_cell_types, n_features
n_features = prediction.shape[-1]
n_cell_types = prediction.shape[1]
track_scores = np.ones(shape=(n_cell_types,n_features)) * np.nan
for feature_index in range(n_features):
for cell_type_index in range(n_cell_types):
feature_preds = np.ravel(prediction[:, cell_type_index, feature_index])
feature_targets = | np.ravel(target[:, cell_type_index, feature_index]) | numpy.ravel |
import numpy as np
from scipy.linalg import expm
def cost(seq):
N=len(seq)
dt=2*np.pi/N
sx=1/2 * np.mat([[0,1],\
[1,0]], dtype=complex)
sz=1/2 * np.mat([[1,0],\
[0,-1]], dtype=complex)
U = np.matrix( | np.identity(2, dtype=complex) | numpy.identity |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# Author : <NAME>
# github link : https://github.com/amirshnll/Bank-Marketing
# dataset link : http://archive.ics.uci.edu/ml/datasets/Bank+Marketing
# email : <EMAIL>
# # Read data
# In[1]:
import pandas as pd
DataName='bank-full.csv'
print('-----------Loading Data------------------')
data_pd = pd.read_csv(DataName)
print(data_pd.describe)
# # Preproccessing
# In[3]:
print('-----------Preproccessing----------------')
from sklearn.preprocessing import LabelEncoder
import numpy as np
encoder = LabelEncoder()
dic_str={"job":1,"marital":2,"education":3,"default":4,"housing":6,"loan":7,"contact":8,"month":10,"poutcome":15,"y":16}
for col_name, col_idx in dic_str.items():
print("colums: {0} to int".format(dic_str[col_name]))
lbl_txt=np.array(data_pd.iloc[:,col_idx])
data_pd[col_name]=encoder.fit_transform(lbl_txt.reshape(-1, 1))
data_pd
# # normalize
# In[4]:
from sklearn import preprocessing
Data_lable=data_pd.iloc[:,-1]
Data_main=data_pd.iloc[:,:-1]
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(Data_main)
Data_main=scaler.transform(Data_main)
print(Data_main)
# # 'DT','KNN','NB','MLP','LR'
# In[5]:
import numpy as np
import os
from pytictoc import TicToc
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
def run_all_algorithm(Train_data,Test_data,Train_lable,Test_lable,str_out):
print(np.shape(Test_data))
print(np.shape(Train_data))
print(np.shape(Train_lable))
print(np.shape(Test_lable))
algorithms_name=['DT','KNN','NB','MLP','LR']
alg_num=len(algorithms_name)
accuracy_array=np.zeros(alg_num)
precision_array=np.zeros(alg_num)
recall_array=np.zeros(alg_num)
f1_score_array=np.zeros(alg_num)
time_array=np.zeros(alg_num)
t = TicToc()
print('---------------------DT---------------------')
K=0;
t.tic() #Start timer
classifier_DT = DecisionTreeClassifier(max_depth=10,random_state=0)
classifier_DT.fit(Train_data, Train_lable)
Test_predict = classifier_DT.predict(Test_data)
Con_matrix=confusion_matrix(Test_lable, Test_predict)
TimeDT=t.tocvalue() #Time elapsed since t.tic()
classfi_report=classification_report(Test_lable, Test_predict,output_dict=True)
# save to array
accuracy_array[K]=accuracy_score(Test_lable, Test_predict)
precision_array[K]= classfi_report['macro avg']['precision']
recall_array[K]= classfi_report['macro avg']['recall']
f1_score_array[K]= classfi_report['macro avg']['f1-score']
time_array[K]=TimeDT
print('--------------NB----------------')
K+=1;
t.tic() #Start timer
classifier = BernoulliNB()
classifier.fit(Train_data, Train_lable)
Test_predict = classifier.predict(Test_data)
TimeNB=t.tocvalue() #Time elapsed since t.tic()
Con_matrix=confusion_matrix(Test_lable, Test_predict)
classfi_report=classification_report(Test_lable, Test_predict,output_dict=True)
# save to array
accuracy_array[K]=accuracy_score(Test_lable, Test_predict)
precision_array[K]= classfi_report['macro avg']['precision']
recall_array[K]= classfi_report['macro avg']['recall']
f1_score_array[K]= classfi_report['macro avg']['f1-score']
time_array[K]=TimeNB
print('---------------------KNN---------------------')
K+=1;
t.tic() #Start timer
classifier=KNeighborsClassifier(n_neighbors=100)
classifier.fit(Train_data, Train_lable)
Test_predict = classifier.predict(Test_data)
TimeKNN=t.tocvalue() #Time elapsed since t.tic()
Con_matrix=confusion_matrix(Test_lable, Test_predict)
classfi_report=classification_report(Test_lable, Test_predict,output_dict=True)
# save to array
accuracy_array[K]=accuracy_score(Test_lable, Test_predict)
precision_array[K]= classfi_report['macro avg']['precision']
recall_array[K]= classfi_report['macro avg']['recall']
f1_score_array[K]= classfi_report['macro avg']['f1-score']
time_array[K]=TimeKNN
print('---------------------MLP---------------------')
K+=1;
t.tic() #Start timer
classifier=MLPClassifier( solver='adam', random_state=0,hidden_layer_sizes=[15,5], max_iter=200)
classifier.fit(Train_data, Train_lable)
Test_predict = classifier.predict(Test_data)
TimeMLP=t.tocvalue() #Time elapsed since t.tic()
Con_matrix=confusion_matrix(Test_lable, Test_predict)
classfi_report=classification_report(Test_lable, Test_predict,output_dict=True)
# save to array
accuracy_array[K]=accuracy_score(Test_lable, Test_predict)
precision_array[K]= classfi_report['macro avg']['precision']
recall_array[K]= classfi_report['macro avg']['recall']
f1_score_array[K]= classfi_report['macro avg']['f1-score']
time_array[K]=TimeMLP
print('---------------------LogisticRegression---------------------')
K+=1;
t.tic() #Start timer
classifier=LogisticRegression()
classifier.fit(Train_data, Train_lable)
Test_predict = classifier.predict(Test_data)
TimeLR=t.tocvalue() #Time elapsed since t.tic()
Con_matrix=confusion_matrix(Test_lable, Test_predict)
classfi_report=classification_report(Test_lable, Test_predict,output_dict=True)
# save to array
accuracy_array[K]=accuracy_score(Test_lable, Test_predict)
precision_array[K]= classfi_report['macro avg']['precision']
recall_array[K]= classfi_report['macro avg']['recall']
f1_score_array[K]= classfi_report['macro avg']['f1-score']
time_array[K]=TimeLR
H=6
L=8
print('--------------------result--------------------------')
fig1=plt.figure(figsize=(H, L)) #
plt.bar(algorithms_name, accuracy_array,color = ['red', 'green'])
plt.xticks(algorithms_name, rotation=70)
plt.ylabel('percent%')
plt.title('Accuracy of all Algorithm')
plt.xlabel("Algoritm names")
for i, v in enumerate(accuracy_array):
v=round(v,2)
plt.text(i-0.2 , v+0.01 , str(v), color='blue', fontweight='bold')
fig1.show()
plt.savefig(os.path.join(str_out+' accuracy.png'), dpi=300, format='png', bbox_inches='tight') # use format='svg' or 'pdf' for vectorial pictures
fig2=plt.figure(figsize=(H, L)) #
plt.bar(algorithms_name, precision_array,color = ['red', 'green'])
plt.xticks(algorithms_name, rotation=70)
plt.ylabel('percent%')
plt.title('Precision of all Algorithm')
plt.xlabel("Algoritm names")
for i, v in enumerate(precision_array):
v=round(v,2)
plt.text(i-0.2 , v+0.01 , str(v), color='blue', fontweight='bold')
fig2.show()
plt.savefig(os.path.join(str_out+' precision.png'), dpi=300, format='png', bbox_inches='tight') # use format='svg' or 'pdf' for vectorial pictures
fig3=plt.figure(figsize=(H, L)) #
plt.bar(algorithms_name, recall_array,color = ['red', 'green'])
plt.xticks(algorithms_name, rotation=70)
plt.ylabel('percent%')
plt.title('Recallof all Algorithm')
plt.xlabel("Algoritm names")
for i, v in enumerate(recall_array):
v=round(v,2)
plt.text(i-0.2 , v+0.01 , str(v), color='blue', fontweight='bold')
fig3.show()
plt.savefig(os.path.join(str_out+' recall.png'), dpi=300, format='png', bbox_inches='tight') # use format='svg' or 'pdf' for vectorial pictures
fig4=plt.figure(figsize=(H, L)) #
plt.bar(algorithms_name, f1_score_array,color = ['red', 'green'])
plt.xticks(algorithms_name, rotation=70)
plt.ylabel('percent%')
plt.title('f1-score of all Algorithm')
plt.xlabel("Algoritm names")
for i, v in enumerate(f1_score_array):
v=round(v,2)
plt.text(i-0.2 , v+0.01 , str(v), color='blue', fontweight='bold')
fig4.show()
plt.savefig(os.path.join(str_out+' f1_score.png'), dpi=300, format='png', bbox_inches='tight') # use format='svg' or 'pdf' for vectorial pictures
fig5=plt.figure(figsize=(H, L)) #
plt.bar(algorithms_name, time_array,color = ['blue', 'green'])
plt.xticks(algorithms_name, rotation=70)
plt.ylabel('time(s)')
plt.title('time of all Algorithm')
plt.xlabel("Algoritm names")
for i, v in enumerate(time_array):
v=round(v,2)
plt.text(i-0.2 , v+0.01 , str(v), color='blue', fontweight='bold')
plt.savefig(os.path.join(str_out+' time.png'), dpi=300, format='png', bbox_inches='tight') # use format='svg' or 'pdf' for vectorial pictures
fig5.show()
np.savetxt(str_out+'accuracy.csv', accuracy_array, delimiter=',')
np.savetxt(str_out+' precision_array.csv', precision_array, delimiter=',')
| np.savetxt(str_out+'recall_array.csv', recall_array, delimiter=',') | numpy.savetxt |
import numpy.linalg as la
import numpy as np
def is_canonical(ttslice,center,eps=1e-8):
'''
Checks whether ttslice is in canonical form with a specific orthogonality center
and tolerance eps
'''
if center<0:
center=len(ttslice)+center
return is_left_canonical(ttslice[:center],eps) and is_right_canonical(ttslice[center+1:],eps)
def is_right_canonical(ttslice,eps=1e-8):
for m in ttslice[1:]:
mm=m.reshape((m.shape[0],-1))
if not np.allclose([email protected](),np.eye(mm.shape[0],like=mm),atol=eps):
return False
return True
def is_left_canonical(ttslice,eps=1e-8):
for m in ttslice[:-1]:
mm=m.reshape((-1,m.shape[-1]))
if not np.allclose(mm.T.conj()@mm,np.eye(mm.shape[1],like=mm),atol=eps):
return False
return True
def canonicalize(ttslice,center,qr=la.qr):
if center<0:
center=len(ttslice)+center
left_canonicalize(ttslice[:center+1],qr)
right_canonicalize(ttslice[center:],qr)
def shift_orthogonality_center(ttslice,oldcenter,newcenter,qr=la.qr):
'''
Shift the orthogonality center by performing qr decompositions step by step
'''
if oldcenter<0:
oldcenter=len(ttslice)+oldcenter
if newcenter<0:
newcenter=len(ttslice)+newcenter
if oldcenter>newcenter:
sslice=ttslice[newcenter:oldcenter+1]
right_canonicalize(sslice,qr=qr)
ttslice[newcenter:oldcenter+1]=sslice
elif newcenter>oldcenter:
sslice=ttslice[oldcenter:newcenter+1]
left_canonicalize(sslice,qr=qr)
ttslice[oldcenter:newcenter+1]=sslice
def left_canonicalize(ttslice,qr=la.qr):
'''
Bring ttslice to left canonical form, inplace
'''
car=ttslice[0].reshape((-1,ttslice[0].shape[-1]))
cshape=ttslice[0].shape
for i in range(1,len(ttslice)):
nshape=ttslice[i].shape
q,r=qr(car)
cshape=cshape[:-1]+(q.shape[-1],)
ttslice[i-1]=np.reshape(q,cshape)
car=r@ | np.reshape(ttslice[i],(nshape[0],-1)) | numpy.reshape |
#!/usr/bin/env python
from __future__ import division
"""@package etddf
ROS interface script for delta tiering filter
Filter operates in ENU
steps: get this to at least launch by itself
verify it works in sim for static sonar (fast scan) & dynamic agent -> plot the error (associator, no sonar control)
check the controller works statically - may need a correction here
"""
import rospy
from etddf_minau.msg import MeasurementPackage, NetworkEstimate, AssetEstimate, Measurement
from etddf_minau.srv import GetMeasurementPackage
import numpy as np
import tf
np.set_printoptions(suppress=True)
from copy import deepcopy
from std_msgs.msg import Header
from geometry_msgs.msg import PoseWithCovariance, Pose, Point, Quaternion, Twist, Vector3, TwistWithCovariance, PoseWithCovarianceStamped
from nav_msgs.msg import Odometry
from minau.msg import SonarTargetList, SonarTarget
from cuprint.cuprint import CUPrint
from deltatier.kf_filter import KalmanFilter
class ETDDF_Node:
def __init__(self):
self.my_name = rospy.get_param("~my_name")
self.cuprint = CUPrint("{}/etddf".format(self.my_name))
self.blue_agent_names = rospy.get_param("~blue_team_names")
blue_positions = rospy.get_param("~blue_team_positions")
self.topside_name = rospy.get_param("~topside_name")
assert self.topside_name not in self.blue_agent_names
red_agent_name = rospy.get_param("~red_team_name")
self.update_times = []
self.red_agent_exists = red_agent_name != ""
if self.red_agent_exists:
self.red_agent_name = red_agent_name
self.red_agent_id = len(self.blue_agent_names)
self.use_strapdown = rospy.get_param("~use_strapdown")
self.do_correct_strapdown = rospy.get_param("~correct_strapdown")
self.correct_strapdown_next_seq = False
self.position_process_noise = rospy.get_param("~position_process_noise")
self.velocity_process_noise = rospy.get_param("~velocity_process_noise")
self.fast_ci = rospy.get_param("~fast_ci")
self.force_modem_pose = rospy.get_param("~force_modem_pose")
self.meas_variances = {}
self.meas_variances["sonar_range"] = rospy.get_param("~force_sonar_range_var")
self.meas_variances["sonar_az"] = rospy.get_param("~force_sonar_az_var")
self.meas_variances["modem_range"] = rospy.get_param("~force_modem_range_var")
self.meas_variances["modem_az"] = rospy.get_param("~force_modem_az_var")
known_position_uncertainty = rospy.get_param("~known_position_uncertainty")
unknown_position_uncertainty = rospy.get_param("~unknown_position_uncertainty")
self.is_deltatier = rospy.get_param("~is_deltatier")
if self.is_deltatier:
self.delta_multipliers = rospy.get_param("~delta_tiers")
self.delta_codebook_table = {"sonar_range" : rospy.get_param("~sonar_range_start_et_delta"),
"sonar_azimuth" : rospy.get_param("~sonar_az_start_et_delta")}
self.buffer_size = rospy.get_param("~buffer_space")
if self.is_deltatier:
rospy.Service('etddf/get_measurement_package', GetMeasurementPackage, self.get_meas_pkg_callback)
self.kf = KalmanFilter(blue_positions, [], self.red_agent_exists, self.is_deltatier, \
known_posititon_unc=known_position_uncertainty,\
unknown_agent_unc=unknown_position_uncertainty)
self.network_pub = rospy.Publisher("etddf/estimate/network", NetworkEstimate, queue_size=10)
self.asset_pub_dict = {}
for asset in self.blue_agent_names:
self.asset_pub_dict[asset] = rospy.Publisher("etddf/estimate/" + asset, Odometry, queue_size=10)
if self.red_agent_exists:
self.asset_pub_dict[self.red_agent_name] = rospy.Publisher("etddf/estimate/" + self.red_agent_name, Odometry, queue_size=10)
self.last_update_time = rospy.get_rostime()
# Modem & Measurement Packages
rospy.Subscriber("etddf/packages_in", MeasurementPackage, self.meas_pkg_callback, queue_size=1)
# Strapdown configuration
self.update_seq = 0
self.strapdown_correction_period = rospy.get_param("~strapdown_correction_period")
strap_topic = "odometry/filtered/odom"
rospy.Subscriber( strap_topic, Odometry, self.nav_filter_callback, queue_size=1)
self.intersection_pub = rospy.Publisher("set_pose", PoseWithCovarianceStamped, queue_size=1)
self.cuprint("Waiting for strapdown")
rospy.wait_for_message( strap_topic, Odometry)
self.cuprint("Strapdown found")
# Sonar Subscription
rospy.Subscriber("sonar_processing/target_list/associated", SonarTargetList, self.sonar_callback)
self.cuprint("Loaded")
def sonar_callback(self, msg):
# self.cuprint("Receiving sonar meas")
collecting_agent_id = self.blue_agent_names.index(self.my_name)
for st in msg.targets:
collected_agent_id = self.blue_agent_names.index( st.id )
range_meas = st.range_m
azimuth_meas = st.bearing_rad + self.last_orientation_rad
if self.meas_variances["sonar_range"] == -1:
R_range = st.range_variance
else:
R_range = self.meas_variances["sonar_range"]
if self.meas_variances["sonar_az"] == -1:
R_az = st.bearing_variance
else:
R_az = self.meas_variances["sonar_az"]
rounded_range_meas = round(range_meas, 1)
rounded_azimuth_meas = round(np.degrees(azimuth_meas),1)
# self.cuprint("{} r: {} az: {} (deg)".format(st.id, rounded_range_meas, rounded_azimuth_meas))
self.kf.filter_azimuth_tracked(azimuth_meas, R_az, collecting_agent_id, collected_agent_id)
self.kf.filter_range_tracked(range_meas, R_range, collecting_agent_id, collected_agent_id)
def nav_filter_callback(self, odom):
# Update at specified rate
t_now = rospy.get_rostime()
delta_t_ros = t_now - self.last_update_time
if delta_t_ros < rospy.Duration(1):
return
self.kf.propogate(self.position_process_noise, self.velocity_process_noise)
self.update_times.append(t_now)
# Update orientation
last_orientation_quat = odom.pose.pose.orientation
(r, p, y) = tf.transformations.euler_from_quaternion([last_orientation_quat.x, \
last_orientation_quat.y, last_orientation_quat.z, last_orientation_quat.w])
self.last_orientation_rad = y
orientation_cov = np.array(odom.pose.covariance).reshape(6,6)
if self.use_strapdown:
# last_orientation_dot = odom.twist.twist.angular
# last_orientation_dot_cov = np.array(odom.twist.covariance).reshape(6,6)
# Turn odom estimate into numpy
# Note the velocities are in the base_link frame --> Transform to odom frame # Assume zero pitch/roll
v_baselink = np.array([[odom.twist.twist.linear.x, odom.twist.twist.linear.y, odom.twist.twist.linear.z]]).T
rot_mat = np.array([ # base_link to odom frame
[np.cos(y), -np.sin(y), 0],
[np.sin(y), np.cos(y), 0],
[0, 0, 1]
])
v_odom = rot_mat.dot( v_baselink )
mean = np.array([[odom.pose.pose.position.x, odom.pose.pose.position.y, odom.pose.pose.position.z, \
v_odom[0,0], v_odom[1,0], v_odom[2,0]]]).T
cov_pose = np.array(odom.pose.covariance).reshape(6,6)
cov_twist = np.array(odom.twist.covariance).reshape(6,6)
cov = np.zeros((6,6))
cov[:3,:3] = cov_pose[:3,:3] #+ np.eye(3) * 4 #sim
cov[3:,3:] = rot_mat.dot( cov_twist[:3,:3] ).dot( rot_mat.T ) #+ np.eye(3) * 0.03 #sim
my_id = self.blue_agent_names.index(self.my_name)
x_nav, P_nav = self.kf.intersect_strapdown(mean, cov, my_id, fast_ci=False)
if self.do_correct_strapdown and (self.update_seq % self.strapdown_correction_period == 0):
if x_nav is not None and P_nav is not None:
self.correct_strapdown(odom.header, x_nav, P_nav, last_orientation_quat, orientation_cov)
elif self.correct_strapdown_next_seq:
self.correct_strapdown(odom.header, x_nav, P_nav, last_orientation_quat, orientation_cov)
self.correct_strapdown_next_seq = False
self.publish_estimates(t_now, last_orientation_quat, orientation_cov)
self.last_update_time = t_now
self.update_seq += 1
def correct_strapdown(self, header, x_nav, P_nav, orientation, orientation_cov):
msg = PoseWithCovarianceStamped()
msg.header = header
msg.header.frame_id = "odom"
# Transform
msg.pose.pose.position.x = x_nav[0,0]
msg.pose.pose.position.y = x_nav[1,0]
msg.pose.pose.position.z = x_nav[2,0]
msg.pose.pose.orientation = orientation
new_cov = np.zeros((6,6))
new_cov[:3,:3] = P_nav[:3,:3] # TODO add full cross correlations
new_cov[3:,3:] = orientation_cov[3:,3:]
msg.pose.covariance = list(new_cov.flatten())
self.intersection_pub.publish( msg )
def publish_estimates(self, timestamp, last_orientation_quat, orientation_cov):
ne = NetworkEstimate()
for asset in self.blue_agent_names:
ind = self.blue_agent_names.index(asset)
x_hat_agent, P_agent, _ = self.kf.get_agent_states(ind)
pose_cov = np.zeros((6,6))
pose_cov[:3,:3] = P_agent[:3,:3]
if asset == self.my_name:
pose = Pose(Point(x_hat_agent[0],x_hat_agent[1],x_hat_agent[2]),last_orientation_quat)
pose_cov[3:,3:] = orientation_cov[3:,3:]
elif "red" in asset:
pose_cov = 5*np.eye(6) # Just set single uncertainty
red_agent_depth = -0.7
pose = Pose(Point(x_hat_agent[0],x_hat_agent[1],red_agent_depth), Quaternion(0,0,0,1))
pose_cov[3:,3:] = -np.eye(3)
else:
pose = Pose(Point(x_hat_agent[0],x_hat_agent[1],x_hat_agent[2]), Quaternion(0,0,0,1))
pose_cov[3:,3:] = -np.eye(3)
pwc = PoseWithCovariance(pose, list(pose_cov.flatten()))
twist_cov = -np.eye(6)
twist_cov[:3,:3] = P_agent[3:6,3:6]
tw = Twist()
tw.linear = Vector3(x_hat_agent[3],x_hat_agent[4],x_hat_agent[5])
twc = TwistWithCovariance(tw, list(twist_cov.flatten()))
h = Header(self.update_seq, timestamp, "odom")
o = Odometry(h, "odom", pwc, twc)
ae = AssetEstimate(o, asset)
ne.assets.append(ae)
self.asset_pub_dict[asset].publish(o)
if self.red_agent_exists:
asset = self.red_agent_name
ind = self.blue_agent_names.index(asset)
x_hat_agent, P_agent = self.kf.get_agent_states(ind)
pose_cov[:3,:3] = P_agent[:3,:3]
red_agent_depth = -0.7
pose = Pose(Point(x_hat_agent[0],x_hat_agent[1],red_agent_depth), Quaternion(0,0,0,1))
pose_cov[3:,3:] = -np.eye(3)
pwc = PoseWithCovariance(pose, list(pose_cov.flatten()))
twist_cov = -np.eye((6,6))
twist_cov[:3,:3] = P_agent[3:6,3:6]
tw = Twist()
tw.linear = Vector3(x_hat_agent[3],x_hat_agent[4],x_hat_agent[5])
twc = TwistWithCovariance(tw, list(twist_cov.flatten()))
h = Header(self.update_seq, timestamp, "odom")
o = Odometry(h, "odom", pwc, twc)
ae = AssetEstimate(o, asset)
ne.assets.append(ae)
self.asset_pub_dict[asset].publish(o)
self.network_pub.publish(ne)
def meas_pkg_callback(self, msg):
# Modem Meas taken by topside
if msg.src_asset == self.topside_name:
self.cuprint("Receiving Surface Modem Measurements")
meas_indices = []
modem_loc = None
# Approximate all modem measurements as being taken at this time
for meas in msg.measurements:
if len(self.force_modem_pose) == 0:
modem_loc = meas.global_pose[:3]
modem_ori = meas.global_pose[3]
else:
modem_loc = self.force_modem_pose[:3]
modem_ori = | np.radians(self.force_modem_pose[3]) | numpy.radians |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import sys
import random
import gc
import pickle
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-white')
import seaborn as sns
sns.set_style("white")
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.model_selection import train_test_split
from tqdm import tqdm_notebook #, tnrange
#from itertools import chain
from skimage.io import imread, imshow #, concatenate_images
from skimage.transform import resize
from skimage.morphology import label
from keras.models import Model, load_model, save_model
from keras.layers import Input,Dropout,BatchNormalization,Activation,Add
from keras.layers.core import Lambda
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras import backend as K
from keras import optimizers
from keras.callbacks import Callback
import keras.backend as K
import numpy as np
import imgaug as ia
from imgaug import augmenters as iaa
import tensorflow as tf
from tta_wrapper import tta_segmentation
from keras.preprocessing.image import array_to_img, img_to_array, load_img#,save_img
import imgaug
import time
t_start = time.time()
# In[2]:
VERSION = 32
SEED = 42
FOLDS = 5
DEPTH = True
basic_name = f'Unet_resnet_v{VERSION}'
save_model_name = basic_name + '.model'
save_model_name_lov = basic_name + '_lov.model'
submission_file = basic_name + '.csv'
imgaug.seed(SEED)
print(save_model_name)
print(save_model_name_lov)
print(submission_file)
# In[3]:
img_size_ori = 101
img_size_target = 101
def upsample(img):
if img_size_ori == img_size_target:
return img
return resize(img, (img_size_target, img_size_target), mode='constant', preserve_range=True)
def downsample(img):
if img_size_ori == img_size_target:
return img
return resize(img, (img_size_ori, img_size_ori), mode='constant', preserve_range=True)
# In[4]:
# Loading of training/testing ids and depths
train_df = pd.read_csv("../data/raw/train.csv", index_col="id", usecols=[0])
depths_df = pd.read_csv("../data/raw/depths.csv", index_col="id")
train_df = train_df.join(depths_df)
test_df = depths_df[~depths_df.index.isin(train_df.index)]
len(train_df)
# In[5]:
train_df["images"] = [np.array(load_img("../data/raw/train/images/{}.png".format(idx),
color_mode = "grayscale",)) / 255 for idx in tqdm_notebook(train_df.index)]
# In[6]:
train_df["masks"] = [np.array(load_img("../data/raw/train/masks/{}.png".format(idx),
color_mode = "grayscale",)) / 255 for idx in tqdm_notebook(train_df.index)]
# In[7]:
train_df["coverage"] = train_df.masks.map(np.sum) / pow(img_size_ori, 2)
def cov_to_class(val):
for i in range(0, 11):
if val * 10 <= i :
return i
train_df["coverage_class"] = train_df.coverage.map(cov_to_class)
# In[8]:
SUBSET = len(train_df)
train_df = train_df.head(SUBSET)
len(train_df)
# In[9]:
def BatchActivate(x):
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):
x = Conv2D(filters, size, strides=strides, padding=padding)(x)
if activation == True:
x = BatchActivate(x)
return x
def residual_block(blockInput, num_filters=16, batch_activate = False):
x = BatchActivate(blockInput)
x = convolution_block(x, num_filters, (3,3) )
x = convolution_block(x, num_filters, (3,3), activation=False)
x = Add()([x, blockInput])
if batch_activate:
x = BatchActivate(x)
return x
# In[10]:
# Build model
def build_model(input_layer, start_neurons, DropoutRatio = 0.5):
# 101 -> 50
conv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(input_layer)
conv1 = residual_block(conv1,start_neurons * 1)
conv1 = residual_block(conv1,start_neurons * 1, True)
pool1 = MaxPooling2D((2, 2))(conv1)
pool1 = Dropout(DropoutRatio/2)(pool1)
# 50 -> 25
conv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(pool1)
conv2 = residual_block(conv2,start_neurons * 2)
conv2 = residual_block(conv2,start_neurons * 2, True)
pool2 = MaxPooling2D((2, 2))(conv2)
pool2 = Dropout(DropoutRatio)(pool2)
# 25 -> 12
conv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(pool2)
conv3 = residual_block(conv3,start_neurons * 4)
conv3 = residual_block(conv3,start_neurons * 4, True)
pool3 = MaxPooling2D((2, 2))(conv3)
pool3 = Dropout(DropoutRatio)(pool3)
# 12 -> 6
conv4 = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(pool3)
conv4 = residual_block(conv4,start_neurons * 8)
conv4 = residual_block(conv4,start_neurons * 8, True)
pool4 = MaxPooling2D((2, 2))(conv4)
pool4 = Dropout(DropoutRatio)(pool4)
# Middle
convm = Conv2D(start_neurons * 16, (3, 3), activation=None, padding="same")(pool4)
convm = residual_block(convm,start_neurons * 16)
convm = residual_block(convm,start_neurons * 16, True)
# 6 -> 12
deconv4 = Conv2DTranspose(start_neurons * 8, (3, 3), strides=(2, 2), padding="same")(convm)
uconv4 = concatenate([deconv4, conv4])
uconv4 = Dropout(DropoutRatio)(uconv4)
uconv4 = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(uconv4)
uconv4 = residual_block(uconv4,start_neurons * 8)
uconv4 = residual_block(uconv4,start_neurons * 8, True)
# 12 -> 25
#deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(uconv4)
deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="valid")(uconv4)
uconv3 = concatenate([deconv3, conv3])
uconv3 = Dropout(DropoutRatio)(uconv3)
uconv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(uconv3)
uconv3 = residual_block(uconv3,start_neurons * 4)
uconv3 = residual_block(uconv3,start_neurons * 4, True)
# 25 -> 50
deconv2 = Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv3)
uconv2 = concatenate([deconv2, conv2])
uconv2 = Dropout(DropoutRatio)(uconv2)
uconv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(uconv2)
uconv2 = residual_block(uconv2,start_neurons * 2)
uconv2 = residual_block(uconv2,start_neurons * 2, True)
# 50 -> 101
#deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv2)
deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="valid")(uconv2)
uconv1 = concatenate([deconv1, conv1])
uconv1 = Dropout(DropoutRatio)(uconv1)
uconv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(uconv1)
uconv1 = residual_block(uconv1,start_neurons * 1)
uconv1 = residual_block(uconv1,start_neurons * 1, True)
#uconv1 = Dropout(DropoutRatio/2)(uconv1)
#output_layer = Conv2D(1, (1,1), padding="same", activation="sigmoid")(uconv1)
output_layer_noActi = Conv2D(1, (1,1), padding="same", activation=None)(uconv1)
output_layer = Activation('sigmoid')(output_layer_noActi)
return output_layer
# In[11]:
def get_iou_vector(A, B):
batch_size = A.shape[0]
metric = []
for batch in range(batch_size):
t, p = A[batch]>0, B[batch]>0
intersection = np.logical_and(t, p)
union = np.logical_or(t, p)
iou = (np.sum(intersection > 0) + 1e-10 )/ (np.sum(union > 0) + 1e-10)
thresholds = np.arange(0.5, 1, 0.05)
s = []
for thresh in thresholds:
s.append(iou > thresh)
metric.append(np.mean(s))
return np.mean(metric)
def my_iou_metric(label, pred):
return tf.py_func(get_iou_vector, [label, pred>0.5], tf.float64)
def my_iou_metric_2(label, pred):
return tf.py_func(get_iou_vector, [label, pred >0], tf.float64)
# In[12]:
# code download from: https://github.com/bermanmaxim/LovaszSoftmax
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
gts = tf.reduce_sum(gt_sorted)
intersection = gts - tf.cumsum(gt_sorted)
union = gts + tf.cumsum(1. - gt_sorted)
jaccard = 1. - intersection / union
jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0)
return jaccard
# --------------------------- BINARY LOSSES ---------------------------
def lovasz_hinge(logits, labels, per_image=True, ignore=None):
"""
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
if per_image:
def treat_image(log_lab):
log, lab = log_lab
log, lab = tf.expand_dims(log, 0), tf.expand_dims(lab, 0)
log, lab = flatten_binary_scores(log, lab, ignore)
return lovasz_hinge_flat(log, lab)
losses = tf.map_fn(treat_image, (logits, labels), dtype=tf.float32)
loss = tf.reduce_mean(losses)
else:
loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
return loss
def lovasz_hinge_flat(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
def compute_loss():
labelsf = tf.cast(labels, logits.dtype)
signs = 2. * labelsf - 1.
errors = 1. - logits * tf.stop_gradient(signs)
errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name="descending_sort")
gt_sorted = tf.gather(labelsf, perm)
grad = lovasz_grad(gt_sorted)
#loss = tf.tensordot(tf.nn.relu(errors_sorted), tf.stop_gradient(grad), 1, name="loss_non_void")
loss = tf.tensordot(tf.nn.elu(errors_sorted), tf.stop_gradient(grad), 1, name="loss_non_void")
return loss
# deal with the void prediction case (only void pixels)
loss = tf.cond(tf.equal(tf.shape(logits)[0], 0),
lambda: tf.reduce_sum(logits) * 0.,
compute_loss,
strict=True,
name="loss"
)
return loss
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = tf.reshape(scores, (-1,))
labels = tf.reshape(labels, (-1,))
if ignore is None:
return scores, labels
valid = tf.not_equal(labels, ignore)
vscores = tf.boolean_mask(scores, valid, name='valid_scores')
vlabels = tf.boolean_mask(labels, valid, name='valid_labels')
return vscores, vlabels
def lovasz_loss(y_true, y_pred):
y_true, y_pred = K.cast(K.squeeze(y_true, -1), 'int32'), K.cast(K.squeeze(y_pred, -1), 'float32')
#logits = K.log(y_pred / (1. - y_pred))
logits = y_pred #Jiaxin
loss = lovasz_hinge(logits, y_true, per_image = True, ignore = None)
return loss
# In[13]:
def predict_result(model,x_test,img_size_target): # predict both orginal and reflect x
x_test_reflect = np.array([np.fliplr(x) for x in x_test])
preds_test = model.predict(x_test).reshape(-1, img_size_target, img_size_target)
preds_test2_refect = model.predict(x_test_reflect).reshape(-1, img_size_target, img_size_target)
preds_test += np.array([ np.fliplr(x) for x in preds_test2_refect] )
return preds_test/2
# In[14]:
def add_depth_coord(images):
""" Takes dataset (N, W, H, 1) returns (N, W, H, 3). """
if not DEPTH:
return images
assert(len(images.shape) == 4)
channel1 = np.zeros_like(images)
h = images.shape[1]
for row, const in enumerate(np.linspace(0, 1, h)):
channel1[:, row, ...] = const
channel2 = images * channel1
images = np.concatenate([images, channel1, channel2], axis=-1)
return images
class SGDRScheduler(Callback):
'''Cosine annealing learning rate scheduler with periodic restarts.
# Usage
```python
schedule = SGDRScheduler(min_lr=1e-5,
max_lr=1e-2,
steps_per_epoch=np.ceil(epoch_size/batch_size),
lr_decay=0.9,
cycle_length=5,
mult_factor=1.5)
model.fit(X_train, Y_train, epochs=100, callbacks=[schedule])
```
# Arguments
min_lr: The lower bound of the learning rate range for the experiment.
max_lr: The upper bound of the learning rate range for the experiment.
steps_per_epoch: Number of mini-batches in the dataset. Calculated as `np.ceil(epoch_size/batch_size)`.
lr_decay: Reduce the max_lr after the completion of each cycle.
Ex. To reduce the max_lr by 20% after each cycle, set this value to 0.8.
cycle_length: Initial number of epochs in a cycle.
mult_factor: Scale epochs_to_restart after each full cycle completion.
# References
Blog post: jeremyjordan.me/nn-learning-rate
Original paper: http://arxiv.org/abs/1608.03983
'''
def __init__(self,
min_lr,
max_lr,
steps_per_epoch,
lr_decay=1,
cycle_length=10,
mult_factor=2):
self.min_lr = min_lr
self.max_lr = max_lr
self.lr_decay = lr_decay
self.batch_since_restart = 0
self.next_restart = cycle_length
self.steps_per_epoch = steps_per_epoch
self.cycle_length = cycle_length
self.mult_factor = mult_factor
self.history = {}
def clr(self):
'''Calculate the learning rate.'''
fraction_to_restart = self.batch_since_restart / (self.steps_per_epoch * self.cycle_length)
lr = self.min_lr + 0.5 * (self.max_lr - self.min_lr) * (1 + np.cos(fraction_to_restart * np.pi))
return lr
def on_train_begin(self, logs={}):
'''Initialize the learning rate to the minimum value at the start of training.'''
logs = logs or {}
K.set_value(self.model.optimizer.lr, self.max_lr)
def on_batch_end(self, batch, logs={}):
'''Record previous batch statistics and update the learning rate.'''
logs = logs or {}
self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr))
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
self.batch_since_restart += 1
K.set_value(self.model.optimizer.lr, self.clr())
def on_epoch_end(self, epoch, logs={}):
'''Check for end of current cycle, apply restarts when necessary.'''
if epoch + 1 == self.next_restart:
self.batch_since_restart = 0
self.cycle_length = np.ceil(self.cycle_length * self.mult_factor)
self.next_restart += self.cycle_length
self.max_lr *= self.lr_decay
self.best_weights = self.model.get_weights()
def on_train_end(self, logs={}):
'''Set weights to the values from the end of the most recent cycle for best performance.'''
self.model.set_weights(self.best_weights)
# In[15]:
#Data augmentation
import cv2
affine_seq = iaa.Sequential([
# General
iaa.SomeOf((1, 2),
[iaa.Fliplr(0.5),
iaa.Affine(rotate=(-10, 10),
translate_percent={"x": (-0.05, 0.05)},
mode='edge'),
# iaa.CropAndPad(percent=((0.0, 0.0), (0.05, 0.0), (0.0, 0.0), (0.05, 0.0)))
]),
# Deformations
iaa.Sometimes(0.3, iaa.PiecewiseAffine(scale=(0.04, 0.08))),
iaa.Sometimes(0.3, iaa.PerspectiveTransform(scale=(0.05, 0.1))),
], random_order=True)
intensity_seq = iaa.Sequential([
iaa.Invert(0.3),
iaa.Sometimes(0.3, iaa.ContrastNormalization((0.5, 1.5))),
iaa.OneOf([
iaa.Noop(),
iaa.Sequential([
iaa.OneOf([
iaa.Add((-10, 10)),
iaa.AddElementwise((-10, 10)),
iaa.Multiply((0.95, 1.05)),
iaa.MultiplyElementwise((0.95, 1.05)),
]),
]),
iaa.OneOf([
iaa.GaussianBlur(sigma=(0.0, 1.0)),
iaa.AverageBlur(k=(2, 5)),
iaa.MedianBlur(k=(3, 5))
])
])
], random_order=False)
def augment(x, y):
sometimes = lambda aug: iaa.Sometimes(0.3, aug)
seq = iaa.Sequential([
iaa.Fliplr(0.5), # horizontally flip
sometimes(iaa.Add((-10, 10))),
# iaa.OneOf([
# iaa.Noop(),
# iaa.PerspectiveTransform(scale=(0.04, 0.08)),
# iaa.Add((-10, 10)),
# iaa.ContrastNormalization((0.75, 1.5)),
# iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)),
# iaa.EdgeDetect(alpha=(0, 0.7)),
# iaa.Noop(),
# sometimes(iaa.OneOf([
# iaa.EdgeDetect(alpha=(0, 0.7)),
# iaa.DirectedEdgeDetect(
# alpha=(0, 0.7), direction=(0.0, 1.0)
# ),
# ])),
# ]),
#sometimes(iaa.CropAndPad(
# percent=(-0.2, 0.2),
# pad_mode=["reflect"]
# )),
# sometimes(iaa.Sequential([
# iaa.Crop(percent=(0.2), keep_size=False),
# iaa.Scale({"height": img_size_target, "width": img_size_target}),
# iaa.Pad(percent=(0.2), pad_mode=["reflect"])
# ])),
])._to_deterministic()
images_aug_x = seq.augment_images(x)
images_aug_y = seq.augment_images(y)
return np.array(images_aug_x), np.array(images_aug_y)
# Return augmented images/masks arrays of batch size
def generator(features, labels, batch_size, repeat=1):
# create empty arrays to contain batch of features and labels
batch_features = np.zeros((batch_size, img_size_target, img_size_target, features.shape[3]))
batch_labels = np.zeros((batch_size, img_size_target, img_size_target, labels.shape[3]))
print(batch_features.shape)
while True:
# Fill arrays of batch size with augmented data taken randomly from full passed arrays
indexes = random.sample(range(len(features)), batch_size)*repeat
# Perform the exactly the same augmentation for X and y
random_augmented_images, random_augmented_labels = augment(np.apply_along_axis(np.squeeze, 1, features[indexes]*255).astype(np.uint8),
np.apply_along_axis(np.squeeze, 1, labels[indexes]*255).astype(np.uint8))
yield add_depth_coord(random_augmented_images/255), random_augmented_labels/255
#x_train = np.array(train_df.images.map(upsample).tolist()).reshape(-1, img_size_target, img_size_target, 3)
#y_train = np.array(train_df.masks.map(upsample).tolist()).reshape(-1, img_size_target, img_size_target, 3)
#x_test= np.array(test_df.images.map(upsample).tolist()).reshape(-1, img_size_target, img_size_target, 3)
x_train = np.array(train_df.images.tolist()).reshape(-1, img_size_target, img_size_target, 1)
y_train = np.array(train_df.masks.tolist()).reshape(-1, img_size_target, img_size_target, 1)
train_cls = np.array(train_df.coverage_class)
gc.collect()
#x_train, y_train, train_cls = augment(train_df)
# In[16]:
#Score the model and do a threshold optimization by the best IoU.
# src: https://www.kaggle.com/aglotero/another-iou-metric
def iou_metric(y_true_in, y_pred_in, print_table=False):
labels = y_true_in
y_pred = y_pred_in
true_objects = 2
pred_objects = 2
# if all zeros, original code generate wrong bins [-0.5 0 0.5],
temp1 = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=([0,0.5,1], [0,0.5, 1]))
intersection = temp1[0]
area_true = np.histogram(labels,bins=[0,0.5,1])[0]
area_pred = np.histogram(y_pred, bins=[0,0.5,1])[0]
area_true = np.expand_dims(area_true, -1)
area_pred = np.expand_dims(area_pred, 0)
# Compute union
union = area_true + area_pred - intersection
# Exclude background from the analysis
intersection = intersection[1:,1:]
intersection[intersection == 0] = 1e-9
union = union[1:,1:]
union[union == 0] = 1e-9
# Compute the intersection over union
iou = intersection / union
# Precision helper function
def precision_at(threshold, iou):
matches = iou > threshold
true_positives = np.sum(matches, axis=1) == 1 # Correct objects
false_positives = np.sum(matches, axis=0) == 0 # Missed objects
false_negatives = np.sum(matches, axis=1) == 0 # Extra objects
tp, fp, fn = np.sum(true_positives), np.sum(false_positives), | np.sum(false_negatives) | numpy.sum |
import numpy as np
import torch, os, argparse, random
from sklearn.metrics import roc_auc_score, average_precision_score
from sklearn.model_selection import train_test_split, StratifiedKFold
basedir = os.path.abspath(os.path.dirname(__file__))
os.chdir(basedir)
torch.backends.cudnn.deterministic = True
torch.autograd.set_detect_anomaly(True)
from model import NeoDTI
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=26, help="random seed for initialization")
parser.add_argument("--d", default=1024, type=int, help="the embedding dimension d")
parser.add_argument("--n",default=1.0, type=float, help="global gradient norm to be clipped")
parser.add_argument("--k",default=512, type=int, help="the dimension of reprojection matrices k")
parser.add_argument("--t",default = "o", type=str, help="test scenario", choices=['o', 'homo', 'drug', 'disease', 'sideeffect', 'unique'])
parser.add_argument("--r",default = "ten", type=str, help="positive-negative ratio", choices=['ten', 'all'])
parser.add_argument("--l2-factor",default = 0.1, type=float, help="weight of l2 loss")
parser.add_argument("--lr", default=1e-3, type=float, help='learning rate')
parser.add_argument("--weight-decay", default=0, type=float, help='weight decay of the optimizer')
parser.add_argument("--num-steps", default=3000, type=int, help='number of training steps')
parser.add_argument("--device", choices=[-1,0,1,2,3], default=0, type=int, help='device number (-1 for cpu)')
parser.add_argument("--n-folds", default=10, type=int, help="number of folds for cross validation")
parser.add_argument("--round", default=1, type=int, help="number of rounds of sampling")
parser.add_argument("--test-size", default=0.05, type=float, help="portion of validation data w.r.t. trainval-set")
args = parser.parse_args()
return args
def row_normalize(a_matrix, substract_self_loop):
if substract_self_loop == True:
np.fill_diagonal(a_matrix,0)
a_matrix = a_matrix.astype(float)
row_sums = a_matrix.sum(axis=1)+1e-12
new_matrix = a_matrix / row_sums[:, np.newaxis]
new_matrix[np.isnan(new_matrix) | np.isinf(new_matrix)] = 0.0
return torch.Tensor(new_matrix)
def train_and_evaluate(args, DTItrain, DTIvalid, DTItest, verbose=True):
set_seed(args)
drug_protein = np.zeros((num_drug,num_protein))
mask = np.zeros((num_drug,num_protein))
for ele in DTItrain:
drug_protein[ele[0],ele[1]] = ele[2]
mask[ele[0],ele[1]] = 1
protein_drug = drug_protein.T
drug_protein_normalize = row_normalize(drug_protein,False).to(device)
protein_drug_normalize = row_normalize(protein_drug,False).to(device)
drug_protein = torch.Tensor(drug_protein).to(device)
mask = torch.Tensor(mask).to(device)
model = NeoDTI(args, num_drug, num_disease, num_protein, num_sideeffect)
model.to(device)
no_decay = ["bias"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = torch.optim.Adam(optimizer_grouped_parameters, lr=args.lr, weight_decay=args.weight_decay)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', factor=0.8, patience=2)
# ground_truth = [] # for evaluation
# ground_truth_test = []
ground_truth_train = [ele[2] for ele in DTItrain]
ground_truth_valid = [ele[2] for ele in DTIvalid]
ground_truth_test = [ele[2] for ele in DTItest]
# for ele in DTIvalid:
# ground_truth.append(ele[2])
# for ele in DTItest:
# ground_truth_test.append(ele[2])
best_valid_aupr = 0
best_valid_auc = 0
test_aupr = 0
test_auc = 0
for i in range(args.num_steps):
model.train()
model.zero_grad()
tloss, dtiloss, results = model(drug_drug_normalize, drug_chemical_normalize, drug_disease_normalize,
drug_sideeffect_normalize, protein_protein_normalize, protein_sequence_normalize,
protein_disease_normalize, disease_drug_normalize, disease_protein_normalize,
sideeffect_drug_normalize, drug_protein_normalize, protein_drug_normalize,
drug_drug, drug_chemical, drug_disease, drug_sideeffect, protein_protein,
protein_sequence, protein_disease, drug_protein, mask)
# print(results)
tloss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.n)
optimizer.step()
if i % 25 == 0 and verbose == True:
print('step', i, 'total and dti loss', tloss.item(), dtiloss.item())
model.eval()
pred_list_valid = [results[ele[0],ele[1]] for ele in DTIvalid]
valid_auc = roc_auc_score(ground_truth_valid, pred_list_valid)
valid_aupr = average_precision_score(ground_truth_valid, pred_list_valid)
pred_list_train = [results[ele[0],ele[1]] for ele in DTItrain]
train_auc = roc_auc_score(ground_truth_train, pred_list_train)
train_aupr = average_precision_score(ground_truth_train, pred_list_train)
scheduler.step(train_aupr)
if valid_aupr >= best_valid_aupr:
best_valid_aupr = valid_aupr
best_valid_auc = valid_auc
pred_list_test = [results[ele[0],ele[1]] for ele in DTItest]
test_auc = roc_auc_score(ground_truth_test, pred_list_test)
test_aupr = average_precision_score(ground_truth_test, pred_list_test)
print ('train auc aupr', train_auc, train_aupr, 'valid auc aupr,', valid_auc, valid_aupr, 'test auc aupr', test_auc, test_aupr)
return best_valid_auc, best_valid_aupr, test_auc, test_aupr
if __name__ == '__main__':
args = get_args()
set_seed(args)
device = torch.device("cuda:{}".format(args.device)) if args.device >= 0 else torch.device("cpu")
network_path = '../data/'
print('loading networks ...')
drug_drug = np.loadtxt(network_path+'mat_drug_drug.txt')
true_drug = 708 # First [0:708] are drugs, the rest are compounds retrieved from ZINC15 database
drug_chemical = np.loadtxt(network_path+'Similarity_Matrix_Drugs.txt')
drug_chemical=drug_chemical[:true_drug,:true_drug]
drug_disease = np.loadtxt(network_path+'mat_drug_disease.txt')
drug_sideeffect = np.loadtxt(network_path+'mat_drug_se.txt')
disease_drug = drug_disease.T
sideeffect_drug = drug_sideeffect.T
protein_protein = np.loadtxt(network_path+'mat_protein_protein.txt')
protein_sequence = np.loadtxt(network_path+'Similarity_Matrix_Proteins.txt')
protein_disease = np.loadtxt(network_path+'mat_protein_disease.txt')
disease_protein = protein_disease.T
print('normalize network for mean pooling aggregation')
drug_drug_normalize = row_normalize(drug_drug,True).to(device)
drug_chemical_normalize = row_normalize(drug_chemical,True).to(device)
drug_disease_normalize = row_normalize(drug_disease,False).to(device)
drug_sideeffect_normalize = row_normalize(drug_sideeffect,False).to(device)
protein_protein_normalize = row_normalize(protein_protein,True).to(device)
protein_sequence_normalize = row_normalize(protein_sequence,True).to(device)
protein_disease_normalize = row_normalize(protein_disease,False).to(device)
disease_drug_normalize = row_normalize(disease_drug,False).to(device)
disease_protein_normalize = row_normalize(disease_protein,False).to(device)
sideeffect_drug_normalize = row_normalize(sideeffect_drug,False).to(device)
#define computation graph
num_drug = len(drug_drug_normalize)
num_protein = len(protein_protein_normalize)
num_disease = len(disease_protein_normalize)
num_sideeffect = len(sideeffect_drug_normalize)
drug_drug = torch.Tensor(drug_drug).to(device)
drug_chemical = torch.Tensor(drug_chemical).to(device)
drug_disease = torch.Tensor(drug_disease).to(device)
drug_sideeffect = torch.Tensor(drug_sideeffect).to(device)
protein_protein = torch.Tensor(protein_protein).to(device)
protein_sequence = torch.Tensor(protein_sequence).to(device)
protein_disease = torch.Tensor(protein_disease).to(device)
# prepare drug_protein and mask
test_auc_round = []
test_aupr_round = []
val_auc_round = []
val_aupr_round = []
if args.t == 'o':
dti_o = | np.loadtxt(network_path+'mat_drug_protein.txt') | numpy.loadtxt |
import numpy as np
import matplotlib.pyplot as plt
import time
class SpringSim(object):
def __init__(self, n_balls=5, box_size=5., loc_std=.5, vel_norm=.5,
interaction_strength=.1, noise_var=0.):
self.n_balls = n_balls
self.box_size = box_size
self.loc_std = loc_std
self.vel_norm = vel_norm
self.interaction_strength = interaction_strength
self.noise_var = noise_var
self._spring_types = np.array([0., 0.5, 1.])
self._delta_T = 0.001
self._max_F = 0.1 / self._delta_T
self.dim = 3
def _energy(self, loc, vel, edges):
# disables division by zero warning, since I fix it with fill_diagonal
with np.errstate(divide='ignore'):
K = 0.5 * (vel ** 2).sum()
U = 0
for i in range(loc.shape[1]):
for j in range(loc.shape[1]):
if i != j:
r = loc[:, i] - loc[:, j]
dist = np.sqrt((r ** 2).sum())
U += 0.5 * self.interaction_strength * edges[
i, j] * (dist ** 2) / 2
return U + K
def _clamp(self, loc, vel):
'''
:param loc: 2xN location at one time stamp
:param vel: 2xN velocity at one time stamp
:return: location and velocity after hiting walls and returning after
elastically colliding with walls
'''
assert (np.all(loc < self.box_size * 3))
assert (np.all(loc > -self.box_size * 3))
over = loc > self.box_size
loc[over] = 2 * self.box_size - loc[over]
assert (np.all(loc <= self.box_size))
# assert(np.all(vel[over]>0))
vel[over] = -np.abs(vel[over])
under = loc < -self.box_size
loc[under] = -2 * self.box_size - loc[under]
# assert (np.all(vel[under] < 0))
assert (np.all(loc >= -self.box_size))
vel[under] = np.abs(vel[under])
return loc, vel
def _l2(self, A, B):
"""
Input: A is a Nxd matrix
B is a Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm
between A[i,:] and B[j,:]
i.e. dist[i,j] = ||A[i,:]-B[j,:]||^2
"""
A_norm = (A ** 2).sum(axis=1).reshape(A.shape[0], 1)
B_norm = (B ** 2).sum(axis=1).reshape(1, B.shape[0])
dist = A_norm + B_norm - 2 * A.dot(B.transpose())
return dist
def sample_trajectory(self, T=10000, sample_freq=10,
spring_prob=[1. / 2, 0, 1. / 2]):
n = self.n_balls
assert (T % sample_freq == 0)
T_save = int(T / sample_freq - 1)
diag_mask = np.ones((n, n), dtype=bool)
np.fill_diagonal(diag_mask, 0)
counter = 0
# Sample edges
edges = np.random.choice(self._spring_types,
size=(self.n_balls, self.n_balls),
p=spring_prob)
edges = np.tril(edges) + np.tril(edges, -1).T
np.fill_diagonal(edges, 0)
# Initialize location and velocity
loc = np.zeros((T_save, self.dim, n))
vel = np.zeros((T_save, self.dim, n))
loc_next = np.random.randn(self.dim, n) * self.loc_std
vel_next = np.random.randn(self.dim, n)
v_norm = np.sqrt((vel_next ** 2).sum(axis=0)).reshape(1, -1)
vel_next = vel_next * self.vel_norm / v_norm
loc[0, :, :], vel[0, :, :] = self._clamp(loc_next, vel_next)
# disables division by zero warning, since I fix it with fill_diagonal
with np.errstate(divide='ignore'):
forces_size = - self.interaction_strength * edges
np.fill_diagonal(forces_size,
0) # self forces are zero (fixes division by zero)
F = (forces_size.reshape(1, n, n) *
np.concatenate((
np.subtract.outer(loc_next[0, :],
loc_next[0, :]).reshape(1, n, n),
np.subtract.outer(loc_next[1, :],
loc_next[1, :]).reshape(1, n, n),
np.subtract.outer(loc_next[2, :],
loc_next[2, :]).reshape(1, n, n)
))).sum(
axis=-1)
F[F > self._max_F] = self._max_F
F[F < -self._max_F] = -self._max_F
vel_next += self._delta_T * F
# run leapfrog
for i in range(1, T):
loc_next += self._delta_T * vel_next
#loc_next, vel_next = self._clamp(loc_next, vel_next)
if i % sample_freq == 0:
loc[counter, :, :], vel[counter, :, :] = loc_next, vel_next
counter += 1
forces_size = - self.interaction_strength * edges
np.fill_diagonal(forces_size, 0)
# assert (np.abs(forces_size[diag_mask]).min() > 1e-10)
F = (forces_size.reshape(1, n, n) *
np.concatenate((
np.subtract.outer(loc_next[0, :],
loc_next[0, :]).reshape(1, n, n),
np.subtract.outer(loc_next[1, :],
loc_next[1, :]).reshape(1, n, n),
np.subtract.outer(loc_next[2, :],
loc_next[2, :]).reshape(1, n, n)
))).sum(
axis=-1)
F[F > self._max_F] = self._max_F
F[F < -self._max_F] = -self._max_F
vel_next += self._delta_T * F
# Add noise to observations
loc += np.random.randn(T_save, self.dim, self.n_balls) * self.noise_var
vel += np.random.randn(T_save, self.dim, self.n_balls) * self.noise_var
return loc, vel, edges
class ChargedParticlesSim(object):
def __init__(self, n_balls=5, box_size=5., loc_std=1., vel_norm=0.5,
interaction_strength=1., noise_var=0.):
self.n_balls = n_balls
self.box_size = box_size
self.loc_std = loc_std
self.loc_std = loc_std * (float(n_balls)/5.) ** (1/3)
print(self.loc_std)
self.vel_norm = vel_norm
self.interaction_strength = interaction_strength
self.noise_var = noise_var
self._charge_types = np.array([-1., 0., 1.])
self._delta_T = 0.001
self._max_F = 0.1 / self._delta_T
self.dim = 3
def _l2(self, A, B):
"""
Input: A is a Nxd matrix
B is a Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm
between A[i,:] and B[j,:]
i.e. dist[i,j] = ||A[i,:]-B[j,:]||^2
"""
A_norm = (A ** 2).sum(axis=1).reshape(A.shape[0], 1)
B_norm = (B ** 2).sum(axis=1).reshape(1, B.shape[0])
dist = A_norm + B_norm - 2 * A.dot(B.transpose())
return dist
def _energy(self, loc, vel, edges):
# disables division by zero warning, since I fix it with fill_diagonal
with np.errstate(divide='ignore'):
K = 0.5 * (vel ** 2).sum()
U = 0
for i in range(loc.shape[1]):
for j in range(loc.shape[1]):
if i != j:
r = loc[:, i] - loc[:, j]
dist = np.sqrt((r ** 2).sum())
U += 0.5 * self.interaction_strength * edges[
i, j] / dist
return U + K
def _clamp(self, loc, vel):
'''
:param loc: 2xN location at one time stamp
:param vel: 2xN velocity at one time stamp
:return: location and velocity after hiting walls and returning after
elastically colliding with walls
'''
assert (np.all(loc < self.box_size * 3))
assert ( | np.all(loc > -self.box_size * 3) | numpy.all |
"""
===============
Curve fitting
===============
Demos a simple curve fitting
"""
############################################################
# First generate some data
import numpy as np
# Seed the random number generator for reproducibility
| np.random.seed(0) | numpy.random.seed |
#!/usr/bin/env python3
# Copyright 2021 <NAME>, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import os
from networks.tpdi_networks import DFCNetwork
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
from matplotlib.colors import ListedColormap
from matplotlib.ticker import FormatStrFormatter
mpl.rcParams['mathtext.fontset'] = 'stix'
mpl.rcParams['font.family'] = 'STIXGeneral'
out_dir = './logs/toy_experiments/fig2'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
np.random.seed(42)
torch.manual_seed(42)
n_in=5
n_hidden=[2]
n_out=2
nb_Q = 2000
nb_J_damped = 100
fit_on = 'total' # 'J', 'total', 'Q'
def rescale(matrix, scale=1.):
matrix_magnitude = np.linalg.norm(matrix)
return scale/matrix_magnitude * matrix
def all_positive_eig(A):
lamb = np.linalg.eigvals(A)
return sum(lamb.real<0) == 0
def all_negative_eig(A):
lamb = np.linalg.eigvals(A)
return sum(lamb.real>0) == 0
def generate_random_Q(jac):
while True:
permutation = np.random.randn(n_out,n_out)
Q_rand = np.matmul(jac.T, permutation)
if all_positive_eig(np.matmul(jac, Q_rand)):
return rescale(Q_rand.flatten())
def compute_damped_jac(jac, damping):
curv = np.matmul(jac, jac.T)
return rescale(np.matmul(jac.T,
np.linalg.inv(curv + damping * np.eye(curv.shape[0]))).flatten())
net = DFCNetwork(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
activation='tanh', initialization='xavier_normal')
x = torch.randn((1, n_in))
net.forward(x)
jac = net.compute_full_jacobian(linear=True).squeeze(0).numpy()
Qs_vectorized = np.zeros((nb_Q, jac.size))
for i in range(nb_Q):
Qs_vectorized[i,:] = generate_random_Q(jac)
J_damped_pinv = np.zeros((nb_J_damped, jac.size))
damping_values = | np.logspace(-5, 2, num=nb_J_damped) | numpy.logspace |
"""
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import sys
import h5py as h5
import numpy as np
import PIL
import json
from argparse import ArgumentParser
from functools import partial
import shutil
from glob import glob
from pathlib import Path
# 1 split open and closed class
# 2 split unlabeled and labeled in closed class
# 3 extract subset of open set class
def osss_subset(imgs, labels, rng, labeled_ratio=0.2, usage_ratio=0.1, subset_classes=[0]):
N = len(labels)
# np.where(subset_class <= labels, 1, 0)
close_class_mask = np.isin(labels, subset_classes)
open_class_mask = ~np.isin(labels, subset_classes)
print('the number of closed-set sample in original data', np.sum(close_class_mask))
print('the number of open-set sample in original data', np.sum(open_class_mask))
unlabeled_mask = rng.choice([True, False], size=N, p=[1 - labeled_ratio, labeled_ratio])
unlabeled_mask = unlabeled_mask | open_class_mask
new_labels = labels * (1 - unlabeled_mask) + (-1) * unlabeled_mask
usage_mask = rng.choice([True, False], size=N, p=[usage_ratio, 1 - usage_ratio])
mask = close_class_mask | (open_class_mask & usage_mask)
print(np.sum(mask))
return imgs[mask], new_labels[mask], labels[mask]
def class_filtered_dataset(imgs, labels, subset_classes=[0]):
mask = np.isin(labels, subset_classes)
return imgs[mask], labels[mask]
def replace_label(labels, classes):
# Extract out keys and values
k = np.array(list(classes.keys()))
v = np.array(list(classes.values()))
# Get argsort indices
sidx = k.argsort()
ks = k[sidx]
vs = v[sidx]
idx = np.searchsorted(ks, labels)
idx[idx==len(vs)] = 0
mask = ks[idx] == labels
return np.where(mask, vs[idx], len(labels))
def make_semi_supervised_dataset(src_path, dst_path, config, rng, sub_f, subset_classes=[0]):
with h5.File(src_path, 'r') as f:
labels = f['labels'][...]
imgs = f['imgs'][...]
ss_imgs, ss_labels, old_labels = sub_f(imgs, labels, rng)
ss_labels = replace_label(ss_labels, {**{c:i for i,c in enumerate(subset_classes)}, **{-1:-1}})
old_labels = replace_label(old_labels, {c:i for i,c in enumerate(subset_classes)})
assert old_labels.shape == ss_labels.shape
with h5.File(dst_path, 'w') as df:
df.create_dataset('closedclasses', data=subset_classes, dtype='int')
df.create_dataset('labels2', data=old_labels, dtype='int64',
chunks=(config['chunk_size'],), compression=config['compression'])
df.create_dataset('imgs', data=ss_imgs, dtype='uint8',
chunks=(config['chunk_size'], 3, config['img_size'], config['img_size']), compression=config['compression'])
df.create_dataset('labels', data=ss_labels, dtype='int64',
chunks=(config['chunk_size'],), compression=config['compression'])
def make_filtered_dataset(src_path, dst_path, config, subset_classes=[0]):
with h5.File(src_path, 'r') as f:
labels = f['labels'][...]
imgs = f['imgs'][...]
ss_imgs, ss_labels = class_filtered_dataset(imgs, labels, subset_classes=subset_classes)
ss_labels = replace_label(ss_labels, {c:i for i,c in enumerate(subset_classes)})
with h5.File(dst_path, 'w') as df:
df.create_dataset('closedclasses', data=subset_classes, dtype='int')
df.create_dataset('imgs', data=ss_imgs, dtype='uint8',
chunks=(config['chunk_size'], 3, config['img_size'], config['img_size']), compression=config['compression'])
df.create_dataset('labels', data=ss_labels, dtype='int64',
chunks=(config['chunk_size'],), compression=config['compression'])
def report_stats(hdf5_path):
with h5.File(hdf5_path, 'r') as f:
labels = f['labels'][...]
classes = f['closedclasses'][...]
imgs = f['imgs'][...]
print(f'label shape: {labels.shape}')
print(f'image shape: {imgs.shape}')
print(f'The number of classes: {len(np.unique(labels))}, max: { | np.max(labels) | numpy.max |
from pseas.new_instance_selection.new_instance_selection import NewInstanceSelection
from pseas.model import Model
import numpy as np
class Variance(NewInstanceSelection):
def select(self, challenger_configuration: int, incumbent_configuration: int, perf_matrix: np.ndarray, perf_mask: np.ndarray, model: Model, predicted_perf_matrix: np.ndarray, instance_features: np.ndarray) -> int:
selectables_instances = [i for i in range(perf_matrix.shape[0]) if not np.any(perf_mask[i, :])]
current_configurations = | np.any(perf_mask, axis=0) | numpy.any |
import numpy as np
from .ec import ECMeasurement
from ..data_series import ValueSeries, TimeSeries
from ..exceptions import BuildError, SeriesNotFoundError
from .analysis_tools import (
tspan_passing_through,
calc_sharp_v_scan,
find_signed_sections,
)
from ..plotters.ec_plotter import CVDiffPlotter
from ..tools import deprecate
class CyclicVoltammogram(ECMeasurement):
"""Class for cyclic voltammetry measurements.
Onto ECMeasurement, this adds:
- a property `cycle` which is a ValueSeries on the same TimeSeries as potential,
which counts cycles. "cycle" becomes the Measurement's `sel_str`. Indexing with
integer or iterable selects according to `cycle`.
- functions for quantitatively comparing cycles (like a stripping cycle, base cycle)
- the default plot() is plot_vs_potential()
"""
essential_series_names = ("t", "raw_potential", "raw_current", "cycle")
selector_name = "cycle"
series_constructors = ECMeasurement.series_constructors
series_constructors["scan_rate"] = "_build_scan_rate"
"""Name of the default selector"""
def __init__(self, *args, **kwargs):
"""Only reason to have an __init__ here is to set the default plot()"""
super().__init__(*args, **kwargs)
self.plot = self.plotter.plot_vs_potential # gets the right docstrings! :D
try:
_ = self["cycle"]
except SeriesNotFoundError:
median_potential = 1 / 2 * (np.max(self.U) + np.min(self.U))
self.redefine_cycle(start_potential=median_potential, redox=True)
self.start_potential = None # see `redefine_cycle`
self.redox = None # see `redefine_cycle`
def __getitem__(self, key):
"""Given int list or slice key, return a CyclicVoltammogram with those cycles"""
if isinstance(key, slice):
start, stop, step = key.start, key.stop, key.step
if step is None:
step = 1
key = list(range(start, stop, step))
if isinstance(key, (int, list)):
if isinstance(key, list) and not all([isinstance(i, int) for i in key]):
print("can't get an item of type list unless all elements are int")
print(f"you tried to get key = {key}.")
raise AttributeError
return self.select(key)
return super().__getitem__(key)
def redefine_cycle(self, start_potential=None, redox=None, N_points=5):
"""Build `cycle` which iterates when passing through start_potential
Args:
start_potential (float): The potential in [V] at which the cycle counter will
iterate. If start_potential is not given, the cycle is just the
`selector` inherited from ECMeasurement shifted to start at 0.
redox (bool): True (or 1) for anodic, False (or 0) for cathodic. The
direction in which the potential is scanning through start_potential to
trigger an iteration of `cycle`.
N_points (int): The number of consecutive points for which the potential
needs to be above (redox=True) or below (redox=False) the
start_potential for the new cycle to register.
"""
self.start_potential = start_potential
self.redox = redox
if start_potential is None:
old_cycle_series = self["cycle_number"]
new_cycle_series = ValueSeries(
name="cycle",
unit_name=old_cycle_series.unit_name,
data=old_cycle_series.data - min(old_cycle_series.data),
tseries=old_cycle_series.tseries,
)
else:
cycle_vec = | np.zeros(self.t.shape) | numpy.zeros |
"""Contains the Algorithm class and subclasses as well as support classes and functions for running simulations"""
from distributed import as_completed
from subprocess import run
from subprocess import CalledProcessError, TimeoutExpired
from subprocess import STDOUT
from numpy.core.fromnumeric import mean
from .data import Data
from .pset import PSet
from .pset import Trajectory
from .pset import TimeCourse
from .pset import BNGLModel
from .pset import NetModel, BNGLModel, SbmlModelNoTimeout
from .pset import OutOfBoundsException
from .pset import FailedSimulationError
from .printing import print0, print1, print2, PybnfError
from .objective import ObjectiveCalculator, ConstraintCounter
import logging
import numpy as np
import os
import re
import shutil
import copy
import sys
import traceback
import pickle
from scipy import stats
from glob import glob
from tornado import gen
from distributed.client import _wait
from concurrent.futures._base import CancelledError
logger = logging.getLogger(__name__)
class Result(object):
"""
Container for the results of a single evaluation in the fitting algorithm
"""
def __init__(self, paramset, simdata, name):
"""
Instantiates a Result
:param paramset: The parameters corresponding to this evaluation
:type paramset: PSet
:param simdata: The simulation results corresponding to this evaluation, as a nested dictionary structure.
Top-level keys are model names and values are dictionaries whose keys are action suffixes and values are
Data instances
:type simdata: dict Returns a
:param log: The stdout + stderr of the simulations
:type log: list of str
"""
self.pset = paramset
self.simdata = simdata
self.name = name
self.score = None # To be set later when the Result is scored.
self.failed = False
def normalize(self, settings):
"""
Normalizes the Data object in this result, according to settings
:param settings: Config value for 'normalization': a string representing the normalization type, a dict mapping
exp files to normalization type, or None
:return:
"""
if settings is None:
return
for m in self.simdata:
for suff in self.simdata[m]:
if type(settings) == str:
self.simdata[m][suff].normalize(settings)
elif suff in settings:
self.simdata[m][suff].normalize(settings[suff])
def postprocess_data(self, settings):
"""
Postprocess the Data objects in this result with a user-defined Python script
:param settings: A dict that maps a tuple (model, suffix) to a Python filename to load.
That file is expected to contain the definition for the function postprocess(data),
which takes a Data object and returns a processed data object
:return: None
"""
for m, suff in settings:
rawdata = self.simdata[m][suff]
# This could generate all kinds of errors if the user's script is bad. Whatever happens, it's caught
# by the caller of postprocess_data()
# exec(settings[m][suff])
# noinspection PyUnresolvedReferences
# self.simdata[m][suff] = postprocess(rawdata)
# Cleaner attempt - follows good practice and is probably faster, but makes it hard for the user to create
# a new Data object if they want to do that.
# However, they can do that by `dataclass = data.__class__` `newdata = dataclass()`
# Import the user-specified script as a module
import importlib.util
spec = importlib.util.spec_from_file_location("postprocessor", settings[m, suff])
postproc = importlib.util.module_from_spec(spec)
spec.loader.exec_module(postproc)
# Now postproc is the user-defined Python module
self.simdata[m][suff] = postproc.postprocess(rawdata)
def add_result(self, other):
"""
Add simulation data of other models from another Result object into this Result object
:param other: The other Result object
:return:
"""
self.simdata.update(other.simdata)
class FailedSimulation(Result):
def __init__(self, paramset, name, fail_type, einfo=tuple([None, None, None])):
"""
Instantiates a FailedSimulation
:param paramset:
:param log:
:param name:
:param fail_type: 0 - Exceeded walltime, 1 - Other crash
:type fail_type: int
:param einfo:
:type einfo: tuple
"""
super(FailedSimulation, self).__init__(paramset, None, name)
self.fail_type = fail_type
self.failed = True
self.traceback = ''.join(traceback.format_exception(*einfo))
def normalize(self, settings):
return
def postprocess_data(self, settings):
return
def run_job(j, debug=False, failed_logs_dir=''):
"""
Runs the Job j.
This function is passed to Dask instead of j.run_simulation because if you pass j.run_simulation, Dask leaks memory
associated with j.
"""
try:
return j.run_simulation(debug, failed_logs_dir)
except RuntimeError as e:
# Catch the error for running out of threads here - it's the only place outside dask where we can catch it.
if e.args[0] == "can't start new thread":
logger.error("Reached thread limit - can't start new thread")
print0('Too many threads! See "Troubleshooting" in the documentation for how to deal with this problem')
return FailedSimulation(j.params, j.job_id, 1)
else:
raise
class Job:
"""
Container for information necessary to perform a single evaluation in the fitting algorithm
"""
# Seeing these logs for cluster-based fitting requires configuring dask to log to the
# "pybnf.algorithms.job" logger
jlogger = logging.getLogger('pybnf.algorithms.job')
def __init__(self, models, params, job_id, output_dir, timeout, calc_future, norm_settings, postproc_settings,
delete_folder=False):
"""
Instantiates a Job
:param models: The models to evaluate
:type models: list of Model instances
:param params: The parameter set with which to evaluate the model
:type params: PSet
:param job_id: Job identification; also the folder name that the job gets saved to
:type job_id: str
:param output_dir path to the directory where I should create my simulation folder
:type output_dir: str
:param calc_future: Future for an ObjectiveCalculator containing the objective function and experimental data,
which we can use to calculate the objective value.
:type calc_future: Future
:param norm_settings: Config value for 'normalization': a string representing the normalization type, a dict
mapping exp files to normalization type, or None
:type norm_settings: Union[str, dict, NoneType]
:param postproc_settings: dict mapping (model, suffix) tuples to the path of a Python postprocessing file to
run on the result.
:param delete_folder: If True, delete the folder and files created after the simulation runs
:type delete_folder: bool
"""
self.models = models
self.params = params
self.job_id = job_id
self.calc_future = calc_future
self.norm_settings = norm_settings
self.postproc_settings = postproc_settings
# Whether to show warnings about missing data if the job includes an objective evaluation. Toggle this after
# construction if needed.
self.show_warnings = False
self.home_dir = os.getcwd() # This is safe because it is called from the scheduler, not the workers.
# Force absolute paths for bngcommand and output_dir, because workers do not get the relative path info.
if output_dir[0] == '/':
self.output_dir = output_dir
else:
self.output_dir = self.home_dir + '/' + output_dir
self.timeout = timeout
# Folder where we save the model files and outputs.
self.folder = '%s/%s' % (self.output_dir, self.job_id)
self.delete_folder = delete_folder
def _name_with_id(self, model):
return '%s_%s' % (model.name, self.job_id)
def _run_models(self):
ds = {}
for model in self.models:
model_file_prefix = self._name_with_id(model)
model_with_params = model.copy_with_param_set(self.params)
ds[model.name] = model_with_params.execute(self.folder, model_file_prefix, self.timeout)
return ds
def _copy_log_files(self, failed_logs_dir):
if failed_logs_dir == '':
self.jlogger.error('Cannot save log files without specified directory')
return
for m in self.models:
lf = '%s/%s.log' % (self.folder, self._name_with_id(m))
if os.path.isfile(lf):
self.jlogger.debug('Copying log file %s' % lf)
shutil.copy(lf, failed_logs_dir)
def run_simulation(self, debug=False, failed_logs_dir=''):
"""Runs the simulation and reads in the result"""
# Force absolute path for failed_logs_dir
if len(failed_logs_dir) > 0 and failed_logs_dir[0] != '/':
failed_logs_dir = self.home_dir + '/' + failed_logs_dir
# The check here is in case dask decides to run the same job twice, both of them can complete.
made_folder = False
failures = 0
while not made_folder:
try:
os.mkdir(self.folder)
self.jlogger.debug('Created folder %s for simulation' % self.folder)
made_folder = True
except OSError:
self.jlogger.warning('Failed to create folder %s, trying again.' % self.folder)
failures += 1
self.folder = '%s/%s_rerun%i' % (self.output_dir, self.job_id, failures)
if failures > 1000:
self.jlogger.error('Job %s failed because it was unable to write to the Simulations folder' %
self.job_id)
return FailedSimulation(self.params, self.job_id, 1)
try:
simdata = self._run_models()
res = Result(self.params, simdata, self.job_id)
except (CalledProcessError, FailedSimulationError):
if debug:
self._copy_log_files(failed_logs_dir)
res = FailedSimulation(self.params, self.job_id, 1)
except TimeoutExpired:
if debug:
self._copy_log_files(failed_logs_dir)
res = FailedSimulation(self.params, self.job_id, 0)
except FileNotFoundError:
self.jlogger.exception('File not found during job %s. This should only happen if the fitting '
'is already done.' % self.job_id)
res = FailedSimulation(self.params, self.job_id, 2, sys.exc_info())
except Exception:
if debug:
self._copy_log_files(failed_logs_dir)
print1('A simulation failed with an unknown error. See the log for details, and consider reporting this '
'as a bug.')
self.jlogger.exception('Unknown error during job %s' % self.job_id)
res = FailedSimulation(self.params, self.job_id, 2, sys.exc_info())
else:
if self.calc_future is not None:
res.normalize(self.norm_settings)
try:
res.postprocess_data(self.postproc_settings)
except Exception:
self.jlogger.exception('User-defined post-processing script failed')
traceback.print_exc()
print0('User-defined post-processing script failed')
res.score = np.inf
else:
res.score = self.calc_future.result().evaluate_objective(res.simdata, res.pset, show_warnings=self.show_warnings)
res.out = simdata
if res.score is None:
# res.score = np.inf
res.out = np.inf
logger.warning('Simulation corresponding to Result %s contained NaNs or Infs' % res.name)
logger.warning('Discarding Result %s as having an infinite objective function value' % res.name)
res.simdata = None
if self.delete_folder:
if os.name == 'nt': # Windows
try:
shutil.rmtree(self.folder)
self.jlogger.debug('Removed folder %s' % self.folder)
except OSError:
self.jlogger.error('Failed to remove folder %s.' % self.folder)
else:
try:
print('Hello')
run(['rm', '-rf', self.folder], check=True, timeout=1800)
self.jlogger.debug('Removed folder %s' % self.folder)
except (CalledProcessError, TimeoutExpired):
self.jlogger.error('Failed to remove folder %s.' % self.folder)
return res
class JobGroup:
"""
Represents a group of jobs that are identical replicates to be averaged together for smoothing
"""
def __init__(self, job_id, subjob_ids):
"""
:param job_id: The name of the Job this group is representing
:param subjob_ids: A list of the ids of the identical replicate Jobs.
"""
self.job_id = job_id
self.subjob_ids = subjob_ids
self.result_list = []
self.failed = None
def job_finished(self, res):
"""
Called when one job in this group has finished
:param res: Result object for the completed job
:return: Boolean, whether everything in this job group has finished
"""
# Handle edge cases of failed simulations - if we get one FailedSimulation, we declare the group is done,
# and return a FailedSimulation object as the average
if self.failed:
# JobGroup already finished when a previous failed simulation came in.
return False
if isinstance(res, FailedSimulation):
self.failed = res
return True
if res.name not in self.subjob_ids:
raise ValueError('Job group %s received unwanted result %s' % (self.job_id, res.name))
self.result_list.append(res)
return len(self.result_list) == len(self.subjob_ids)
def average_results(self):
"""
To be called after all results are in for this group.
Averages the results and returns a new Result object containing the averages
:return: New Result object with the job_id of this JobGroup and the averaged Data as the simdata
"""
if self.failed:
self.failed.name = self.job_id
return self.failed
# Iterate through the models and suffixes in the simdata strucutre, and calculate the average for each
# Data object it contains
avedata = dict()
for m in self.result_list[0].simdata:
avedata[m] = dict()
for suf in self.result_list[0].simdata[m]:
avedata[m][suf] = Data.average([r.simdata[m][suf] for r in self.result_list])
return Result(self.result_list[0].pset, avedata, self.job_id)
class MultimodelJobGroup(JobGroup):
"""
A JobGroup to handle model-level parallelism
"""
def average_results(self):
"""
To be called after all results are in for this group.
Combines all results from the submodels into a single Result object
:return:
"""
if self.failed:
self.failed.name = self.job_id
return self.failed
# Merge all models into a single Result object
final_result = Result(self.result_list[0].pset, dict(), self.job_id)
for res in self.result_list:
final_result.add_result(res)
return final_result
class custom_as_completed(as_completed):
"""
Subclass created to modify a section of dask.distributed code
By using this subclass instead of as_completed, if you get an exception in a job,
that exception is returned as the result, instead of the job disappearing.
"""
@gen.coroutine
def track_future(self, future):
try:
yield _wait(future)
except CancelledError:
pass
if self.with_results:
try:
result = yield future._result(raiseit=True)
except Exception as e:
result = DaskError(e, traceback.format_exc())
with self.lock:
self.futures[future] -= 1
if not self.futures[future]:
del self.futures[future]
if self.with_results:
self.queue.put_nowait((future, result))
else:
self.queue.put_nowait(future)
self._notify()
class DaskError:
"""
Class representing the result of a job that failed due to a raised exception
"""
def __init__(self, error, tb):
self.error = error
self.traceback = tb
class Algorithm(object):
"""
A superclass containing the structures common to all metaheuristic and MCMC-based algorithms
defined in this software suite
"""
def __init__(self, config):
"""
Instantiates an Algorithm with a Configuration object. Also initializes a
Trajectory instance to track the fitting progress, and performs various additional
configuration that is consistent for all algorithms
:param config: The fitting configuration
:type config: Configuration
"""
self.config = config
self.exp_data = self.config.exp_data
self.objective = self.config.obj
logger.debug('Instantiating Trajectory object')
self.trajectory = Trajectory(self.config.config['num_to_output'])
self.job_id_counter = 0
self.output_counter = 0
self.job_group_dir = dict()
self.fail_count = 0
self.success_count = 0
self.max_iterations = config.config['max_iterations']
logger.debug('Creating output directory')
if not os.path.isdir(self.config.config['output_dir']):
os.mkdir(self.config.config['output_dir'])
if self.config.config['simulation_dir']:
self.sim_dir = self.config.config['simulation_dir'] + '/Simulations'
else:
self.sim_dir = self.config.config['output_dir'] + '/Simulations'
self.res_dir = self.config.config['output_dir'] + '/Results'
self.failed_logs_dir = self.config.config['output_dir'] + '/FailedSimLogs'
# Generate a list of variable names
self.variables = self.config.variables
# Store a list of all Model objects. Change this as needed for compatibility with other parts
logger.debug('Initializing models')
self.model_list = self._initialize_models()
self.bootstrap_number = None
self.best_fit_obj = None
self.calc_future = None # Created during Algorithm.run()
self.refine = False
def reset(self, bootstrap):
"""
Resets the Algorithm, keeping loaded variables and models
:param bootstrap: The bootstrap number (None if not bootstrapping)
:type bootstrap: int or None
:return:
"""
logger.info('Resetting Algorithm for another run')
self.trajectory = Trajectory(self.config.config['num_to_output'])
self.job_id_counter = 0
self.output_counter = 0
self.job_group_dir = dict()
self.fail_count = 0
self.success_count = 0
if bootstrap is not None:
self.bootstrap_number = bootstrap
self.sim_dir = self.config.config['output_dir'] + '/Simulations-boot%s' % bootstrap
self.res_dir = self.config.config['output_dir'] + '/Results-boot%s' % bootstrap
self.failed_logs_dir = self.config.config['output_dir'] + '/FailedSimLogs-boot%s' % bootstrap
for boot_dir in (self.sim_dir, self.res_dir, self.failed_logs_dir):
if os.path.exists(boot_dir):
try:
shutil.rmtree(boot_dir)
except OSError:
logger.error('Failed to remove bootstrap directory '+boot_dir)
os.mkdir(boot_dir)
self.best_fit_obj = None
@staticmethod
def should_pickle(k):
"""
Checks to see if key 'k' should be included in pickling. Currently allows all entries in instance dictionary
except for 'trajectory'
:param k:
:return:
"""
return k not in set(['trajectory', 'calc_future'])
def __getstate__(self):
return {k: v for k, v in self.__dict__.items() if self.should_pickle(k)}
def __setstate__(self, state):
self.__dict__.update(state)
try:
backup_params = 'sorted_params_backup.txt' if not self.refine else 'sorted_params_refine_backup.txt'
self.trajectory = Trajectory.load_trajectory('%s/%s' % (self.res_dir, backup_params),
self.config.variables, self.config.config['num_to_output'])
except IOError:
logger.exception('Failed to load trajectory from file')
print1('Failed to load Results/sorted_params_backup.txt . Still resuming your run, but when I save the '
'best fits, it will only be the ones I\'ve seen since resuming.')
self.trajectory = Trajectory(self.config.config['num_to_output'])
def _initialize_models(self):
"""
Checks initial BNGLModel instances from the Configuration object for models that
can be reinstantiated as NetModel instances
:return: list of Model instances
"""
# Todo: Move to config or BNGL model class?
home_dir = os.getcwd()
os.chdir(self.config.config['output_dir']) # requires creation of this directory prior to function call
logger.debug('Copying list of models')
init_model_list = copy.deepcopy(list(self.config.models.values())) # keeps Configuration object unchanged
final_model_list = []
init_dir = os.getcwd() + '/Initialize'
for m in init_model_list:
if isinstance(m, BNGLModel) and m.generates_network:
logger.debug('Model %s requires network generation' % m.name)
if not os.path.isdir(init_dir):
logger.debug('Creating initialization directory: %s' % init_dir)
os.mkdir(init_dir)
os.chdir(init_dir)
gnm_name = '%s_gen_net' % m.name
default_pset = PSet([var.set_value(var.default_value) for var in self.variables])
m.save(gnm_name, gen_only=True, pset=default_pset)
gn_cmd = [self.config.config['bng_command'], '%s.bngl' % gnm_name]
if os.name == 'nt': # Windows
# Explicitly call perl because the #! line in BNG2.pl is not supported.
gn_cmd = ['perl'] + gn_cmd
try:
with open('%s.log' % gnm_name, 'w') as lf:
print2('Generating network for model %s.bngl' % gnm_name)
run(gn_cmd, check=True, stderr=STDOUT, stdout=lf, timeout=self.config.config['wall_time_gen'])
except CalledProcessError as c:
logger.error("Command %s failed in directory %s" % (gn_cmd, os.getcwd()))
logger.error(c.stdout)
print0('Error: Initial network generation failed for model %s... see BioNetGen error log at '
'%s/%s.log' % (m.name, os.getcwd(), gnm_name))
exit(1)
except TimeoutExpired:
logger.debug("Network generation exceeded %d seconds... exiting" %
self.config.config['wall_time_gen'])
print0("Network generation took too long. Increase 'wall_time_gen' configuration parameter")
exit(1)
except:
tb = ''.join(traceback.format_list(traceback.extract_tb(sys.exc_info())))
logger.debug("Other exception occurred:\n%s" % tb)
print0("Unknown error occurred during network generation, see log... exiting")
exit(1)
finally:
os.chdir(home_dir)
logger.info('Output for network generation of model %s logged in %s/%s.log' %
(m.name, init_dir, gnm_name))
final_model_list.append(NetModel(m.name, m.actions, m.suffixes, m.mutants, nf=init_dir + '/' + gnm_name + '.net'))
final_model_list[-1].bng_command = m.bng_command
else:
logger.info('Model %s does not require network generation' % m.name)
final_model_list.append(m)
os.chdir(home_dir)
return final_model_list
def start_run(self):
"""
Called by the scheduler at the start of a fitting run.
Must return a list of PSets that the scheduler should run.
Algorithm subclasses optionally may set the .name field of the PSet objects to give a meaningful unique
identifier such as 'gen0ind42'. If so, they MUST BE UNIQUE, as this determines the folder name.
Uniqueness will not be checked elsewhere.
:return: list of PSets
"""
raise NotImplementedError("Subclasses must implement start_run()")
def got_result(self, res):
"""
Called by the scheduler when a simulation is completed, with the pset that was run, and the resulting simulation
data
:param res: result from the completed simulation
:type res: Result
:return: List of PSet(s) to be run next or 'STOP' string.
"""
raise NotImplementedError("Subclasses must implement got_result()")
def add_to_trajectory(self, res):
"""
Adds the information from a Result to the Trajectory instance
"""
# Evaluate objective if it wasn't done on workers.
if res.score is None: # Check if the objective wasn't evaluated on the workers
res.normalize(self.config.config['normalization'])
# Do custom postprocessing, if any
try:
res.postprocess_data(self.config.postprocessing)
except Exception:
logger.exception('User-defined post-processing script failed')
traceback.print_exc()
print0('User-defined post-processing script failed')
res.score = np.inf
else:
res.score = self.objective.evaluate_multiple(res.simdata, self.exp_data, res.pset, self.config.constraints)
if res.score is None: # Check if the above evaluation failed
res.score = np.inf
logger.warning('Simulation corresponding to Result %s contained NaNs or Infs' % res.name)
logger.warning('Discarding Result %s as having an infinite objective function value' % res.name)
print1('Simulation data in Result %s has NaN or Inf values. Discarding this parameter set' % res.name)
logger.info('Adding Result %s to Trajectory with score %.4f' % (res.name, res.score))
self.trajectory.add(res.pset, res.score, res.name)
def random_pset(self):
"""
Generates a random PSet based on the distributions and bounds for each parameter specified in the configuration
:return:
"""
logger.debug("Generating a randomly distributed PSet")
pset_vars = []
for var in self.variables:
pset_vars.append(var.sample_value())
return PSet(pset_vars)
def random_latin_hypercube_psets(self, n):
"""
Generates n random PSets with a latin hypercube distribution
More specifically, the uniform_var and loguniform_var variables follow the latin hypercube distribution,
while lognorm are randomized normally.
:param n: Number of psets to generate
:return:
"""
logger.debug("Generating PSets using Latin hypercube sampling")
num_uniform_vars = 0
for var in self.variables:
if var.type == 'uniform_var' or var.type == 'loguniform_var':
num_uniform_vars += 1
# Generate latin hypercube of dimension = number of uniformly distributed variables.
rands = latin_hypercube(n, num_uniform_vars)
psets = []
for row in rands:
# Initialize the variables
# Convert the 0 to 1 random numbers to the required variable range
pset_vars = []
rowindex = 0
for var in self.variables:
if var.type == 'uniform_var':
rescaled_val = var.p1 + row[rowindex]*(var.p2-var.p1)
pset_vars.append(var.set_value(rescaled_val))
rowindex += 1
elif var.type == 'loguniform_var':
rescaled_val = exp10(np.log10(var.p1) + row[rowindex]*(np.log10(var.p2)-np.log10(var.p1)))
pset_vars.append(var.set_value(rescaled_val))
rowindex += 1
else:
pset_vars.append(var.sample_value())
psets.append(PSet(pset_vars))
return psets
def make_job(self, params):
"""
Creates a new Job using the specified params, and additional specifications that are already saved in the
Algorithm object
If smoothing is turned on, makes n identical Jobs and a JobGroup
:param params:
:type params: PSet
:return: list of Jobs (of length equal to smoothing setting)
"""
if params.name:
job_id = params.name
else:
self.job_id_counter += 1
job_id = 'sim_%i' % self.job_id_counter
logger.debug('Creating Job %s' % job_id)
if self.config.config['smoothing'] > 1:
# Create multiple identical Jobs for use with smoothing
newjobs = []
newnames = []
for i in range(self.config.config['smoothing']):
thisname = '%s_rep%i' % (job_id, i)
newnames.append(thisname)
# calc_future is supposed to be None here - the workers don't have enough info to calculate the
# objective on their own
newjobs.append(Job(self.model_list, params, thisname,
self.sim_dir, self.config.config['wall_time_sim'], self.calc_future,
self.config.config['normalization'], dict(),
bool(self.config.config['delete_old_files'])))
new_group = JobGroup(job_id, newnames)
for n in newnames:
self.job_group_dir[n] = new_group
return newjobs
elif self.config.config['parallelize_models'] > 1:
# Partition our model list into n different jobs
newjobs = []
newnames = []
model_count = len(self.model_list)
rep_count = self.config.config['parallelize_models']
for i in range(rep_count):
thisname = '%s_part%i' % (job_id, i)
newnames.append(thisname)
# calc_future is supposed to be None here - the workers don't have enough info to calculate the
# objective on their own
newjobs.append(Job(self.model_list[model_count*i//rep_count:model_count*(i+1)//rep_count],
params, thisname, self.sim_dir, self.config.config['wall_time_sim'],
self.calc_future, self.config.config['normalization'], dict(),
bool(self.config.config['delete_old_files'])))
new_group = MultimodelJobGroup(job_id, newnames)
for n in newnames:
self.job_group_dir[n] = new_group
return newjobs
else:
# Create a single job
return [Job(self.model_list, params, job_id,
self.sim_dir, self.config.config['wall_time_sim'], self.calc_future,
self.config.config['normalization'], self.config.postprocessing,
bool(self.config.config['delete_old_files']))]
def output_results(self, name='', no_move=False):
"""
Tells the Trajectory to output a log file now with the current best fits.
This should be called periodically by each Algorithm subclass, and is called by the Algorithm class at the end
of the simulation.
:return:
:param name: Custom string to add to the saved filename. If omitted, we just use a running counter of the
number of times we've outputted.
:param no_move: If True, overrides the config setting delete_old_files=2, and does not move the result to
overwrite sorted_params.txt
:type name: str
"""
if name == '':
name = str(self.output_counter)
self.output_counter += 1
if self.refine:
name = 'refine_%s' % name
filepath = '%s/sorted_params_%s.txt' % (self.res_dir, name)
logger.info('Outputting results to file %s' % filepath)
self.trajectory.write_to_file(filepath)
# If the user has asked for fewer output files, each time we're here, move the new file to
# Results/sorted_params.txt, overwriting the previous one.
if self.config.config['delete_old_files'] >= 2 and not no_move:
logger.debug("Overwriting previous 'sorted_params.txt'")
noname_filepath = '%s/sorted_params.txt' % self.res_dir
if os.path.isfile(noname_filepath):
os.remove(noname_filepath)
os.replace(filepath, noname_filepath)
def backup(self, pending_psets=()):
"""
Create a backup of this algorithm object that can be reloaded later to resume the run
:param pending_psets: Iterable of PSets that are currently submitted as jobs, and will need to get re-submitted
when resuming the algorithm
:return:
"""
logger.info('Saving a backup of the algorithm')
# Save a backup of the PSets
self.output_results(name='backup', no_move=True)
# Pickle the algorithm
# Save to a temporary file first, so we can't get interrupted and left with no backup.
picklepath = '%s/alg_backup.bp' % self.config.config['output_dir']
temppicklepath = '%s/alg_backup_temp.bp' % self.config.config['output_dir']
try:
f = open(temppicklepath, 'wb')
pickle.dump((self, pending_psets), f)
f.close()
os.replace(temppicklepath, picklepath)
except IOError as e:
logger.exception('Failed to save backup of algorithm')
print1('Failed to save backup of the algorithm.\nSee log for more information')
if e.strerror == 'Too many open files':
print0('Too many open files! See "Troubleshooting" in the documentation for how to deal with this '
'problem.')
def get_backup_every(self):
"""
Returns a number telling after how many individual simulation returns should we back up the algorithm.
Makes a good guess, but could be overridden in a subclass
"""
return self.config.config['backup_every'] * self.config.config['population_size'] * \
self.config.config['smoothing']
def add_iterations(self, n):
"""
Adds n additional iterations to the algorithm.
May be overridden in subclasses that don't use self.max_iterations to track the iteration count
"""
self.max_iterations += n
def run(self, client, resume=None, debug=False):
"""Main loop for executing the algorithm"""
if self.refine:
logger.debug('Setting up Simplex refinement of previous algorithm')
backup_every = self.get_backup_every()
sim_count = 0
logger.debug('Generating initial parameter sets')
if resume:
psets = resume
logger.debug('Resume algorithm with the following PSets: %s' % [p.name for p in resume])
else:
psets = self.start_run()
if not os.path.isdir(self.failed_logs_dir):
os.mkdir(self.failed_logs_dir)
if self.config.config['local_objective_eval'] == 0 and self.config.config['smoothing'] == 1 and \
self.config.config['parallelize_models'] == 1:
calculator = ObjectiveCalculator(self.objective, self.exp_data, self.config.constraints)
[self.calc_future] = client.scatter([calculator], broadcast=True)
else:
self.calc_future = None
jobs = []
pending = dict() # Maps pending futures to tuple (PSet, job_id).
for p in psets:
jobs += self.make_job(p)
jobs[0].show_warnings = True # For only the first job submitted, show warnings if exp data is unused.
logger.info('Submitting initial set of %d Jobs' % len(jobs))
futures = []
for job in jobs:
f = client.submit(run_job, job, True, self.failed_logs_dir)
futures.append(f)
pending[f] = (job.params, job.job_id)
pool = custom_as_completed(futures, with_results=True, raise_errors=False)
backed_up = True
while True:
if sim_count % backup_every == 0 and not backed_up:
self.backup(set([pending[fut][0] for fut in pending]))
backed_up = True
f, res = next(pool)
if isinstance(res, DaskError):
if isinstance(res.error, PybnfError):
raise res.error # User-targeted error should be raised instead of skipped
logger.error('Job failed with an exception')
logger.error(res.traceback)
res = FailedSimulation(pending[f][0], pending[f][1], 3)
# Handle if this result is one of multiple instances for smoothing
del pending[f]
if self.config.config['smoothing'] > 1 or self.config.config['parallelize_models'] > 1:
group = self.job_group_dir.pop(res.name)
done = group.job_finished(res)
if not done:
continue
res = group.average_results()
sim_count += 1
backed_up = False
if isinstance(res, FailedSimulation):
if res.fail_type >= 1:
self.fail_count += 1
tb = '\n'+res.traceback if res.fail_type == 1 else ''
logger.debug('Job %s failed with code %d%s' % (res.name, res.fail_type, tb))
if res.fail_type >= 1:
print1('Job %s failed' % res.name)
else:
print1('Job %s timed out' % res.name)
if self.success_count == 0 and self.fail_count >= 100:
raise PybnfError('Aborted because all jobs are failing',
'Your simulations are failing to run. Logs from failed simulations are saved in '
'the FailedSimLogs directory. For help troubleshooting this error, refer to '
'https://pybnf.readthedocs.io/en/latest/troubleshooting.html#failed-simulations')
elif isinstance(res, CancelledError):
raise PybnfError('PyBNF has encounted a fatel error. If the error has occured on the inital run please varify your model '
'is funcational. To resume run please restart PyBNF using the -r flag')
else:
self.success_count += 1
logger.debug('Job %s complete')
self.add_to_trajectory(res)
if res.score < self.config.config['min_objective']:
logger.info('Minimum objective value achieved')
print1('Minimum objective value achieved')
break
response = self.got_result(res)
if response == 'STOP':
self.best_fit_obj = self.trajectory.best_score()
logger.info("Stop criterion satisfied with objective function value of %s" % self.best_fit_obj)
print1("Stop criterion satisfied with objective function value of %s" % self.best_fit_obj)
break
else:
new_futures = []
for ps in response:
new_js = self.make_job(ps)
for new_j in new_js:
new_f = client.submit(run_job, new_j, (debug or self.fail_count < 10), self.failed_logs_dir)
pending[new_f] = (ps, new_j.job_id)
new_futures.append(new_f)
logger.debug('Submitting %d new Jobs' % len(new_futures))
pool.update(new_futures)
logger.info("Cancelling %d pending jobs" % len(pending))
client.cancel(list(pending.keys()))
self.output_results('final')
# Copy the best simulations into the results folder
best_name = self.trajectory.best_fit_name()
best_pset = self.trajectory.best_fit()
logger.info('Copying simulation results from best fit parameter set to Results/ folder')
for m in self.config.models:
this_model = self.config.models[m]
to_save = this_model.copy_with_param_set(best_pset)
to_save.save_all('%s/%s_%s' % (self.res_dir, to_save.name, best_name))
if self.config.config['delete_old_files'] == 0:
for simtype, suf in this_model.suffixes:
if simtype == 'simulate':
ext = 'gdat'
else: # parameter_scan
ext = 'scan'
if self.config.config['smoothing'] > 1:
best_name = best_name + '_rep0' # Look for one specific replicate of the data
try:
shutil.copy('%s/%s/%s_%s_%s.%s' % (self.sim_dir, best_name, m, best_name, suf, ext),
'%s' % self.res_dir)
except FileNotFoundError:
logger.error('Cannot find files corresponding to best fit parameter set')
print0('Could not find your best fit gdat file. This could happen if all of the simulations\n'
' in your run failed, or if that gdat file was somehow deleted during the run.')
if self.config.config['delete_old_files'] > 0 and self.config.config['save_best_data']:
# Rerun the best fit parameter set so the gdat file(s) are saved in the Results folder.
logger.info('Rerunning best fit parameter set to save data files.')
# Enable saving files for SBML models
for m in self.model_list:
if isinstance(m, SbmlModelNoTimeout):
m.save_files = True
finaljob = Job(self.model_list, best_pset, 'bestfit',
self.sim_dir, self.config.config['wall_time_sim'], None,
self.config.config['normalization'], self.config.postprocessing,
False)
try:
run_job(finaljob)
except Exception:
logger.exception('Failed to rerun best fit parameter set')
print1('Failed to rerun best fit parameter set. See log for details')
else:
# Copy all gdat and scan to Results
for fname in glob(self.sim_dir+'/bestfit/*.gdat') + glob(self.sim_dir+'/bestfit/*.scan'):
shutil.copy(fname, self.res_dir)
# Disable saving files for SBML models (in case there is future bootstrapping or refinement)
for m in self.model_list:
if isinstance(m, SbmlModelNoTimeout):
m.save_files = False
if self.bootstrap_number is None or self.bootstrap_number == self.config.config['bootstrap']:
try:
os.replace('%s/alg_backup.bp' % self.config.config['output_dir'],
'%s/alg_%s.bp' % (self.config.config['output_dir'],
('finished' if not self.refine else 'refine_finished')))
logger.info('Renamed pickled algorithm backup to alg_%s.bp' %
('finished' if not self.refine else 'refine_finished'))
except OSError:
logger.warning('Tried to move pickled algorithm, but it was not found')
if (isinstance(self, SimplexAlgorithm) or self.config.config['refine'] != 1) and self.bootstrap_number is None:
# End of fitting; delete unneeded files
if self.config.config['delete_old_files'] >= 1:
if os.name == 'nt': # Windows
try:
shutil.rmtree(self.sim_dir)
except OSError:
logger.error('Failed to remove simulations directory '+self.sim_dir)
else:
run(['rm', '-rf', self.sim_dir]) # More likely to succeed than rmtree()
logger.info("Fitting complete")
def cleanup(self):
"""
Called before the program exits due to an exception.
:return:
"""
self.output_results('end')
class ParticleSwarm(Algorithm):
"""
Implements particle swarm optimization.
The implementation roughly follows Moraes et al 2015, although is reorganized to better suit PyBNF's format.
Note the global convergence criterion discussed in that paper is not used (would require too long a
computation), and instead uses ????
"""
def __init__(self, config):
# Former params that are now part of the config
# variable_list, num_particles, max_evals, cognitive=1.5, social=1.5, w0=1.,
# wf=0.1, nmax=30, n_stop=np.inf, absolute_tol=0., relative_tol=0.)
"""
Initial configuration of particle swarm optimizer
:param conf_dict: The fitting configuration
:type conf_dict: Configuration
The config should contain the following definitions:
population_size - Number of particles in the swarm
max_iterations - Maximum number of iterations. More precisely, the max number of simulations run is this times
the population size.
cognitive - Acceleration toward the particle's own best
social - Acceleration toward the global best
particle_weight - Inertia weight of the particle (default 1)
The following config parameters relate to the complicated method presented is Moraes et al for adjusting the
inertia weight as you go. These are optional, and this feature will be disabled (by setting
particle_weight_final = particle_weight) if these are not included.
It remains to be seen whether this method is at all useful for our applications.
particle_weight_final - Inertia weight at the end of the simulation
adaptive_n_max - Controls how quickly we approach wf - After nmax "unproductive" iterations, we are halfway from
w0 to wf
adaptive_n_stop - nd the entire run if we have had this many "unproductive" iterations (should be more than
adaptive_n_max)
adaptive_abs_tol - Tolerance for determining if an iteration was "unproductive". A run is unproductive if the
change in global_best is less than absolute_tol + relative_tol * global_best
adaptive_rel_tol - Tolerance 2 for determining if an iteration was "unproductive" (see above)
"""
super(ParticleSwarm, self).__init__(config)
# This default value gets special treatment because if missing, it should take the value of particle_weight,
# disabling the adaptive weight change entirely.
if 'particle_weight_final' not in self.config.config:
self.config.config['particle_weight_final'] = self.config.config['particle_weight']
# Save config parameters
self.c1 = self.config.config['cognitive']
self.c2 = self.config.config['social']
self.max_evals = self.config.config['population_size'] * self.config.config['max_iterations']
self.output_every = self.config.config['population_size'] * self.config.config['output_every']
self.num_particles = self.config.config['population_size']
# Todo: Nice error message if a required key is missing
self.w0 = self.config.config['particle_weight']
self.wf = self.config.config['particle_weight_final']
self.nmax = self.config.config['adaptive_n_max']
self.n_stop = self.config.config['adaptive_n_stop']
self.absolute_tol = self.config.config['adaptive_abs_tol']
self.relative_tol = self.config.config['adaptive_rel_tol']
self.nv = 0 # Counter that controls the current weight. Counts number of "unproductive" iterations.
self.num_evals = 0 # Counter for the total number of results received
# Initialize storage for the swarm data
self.swarm = [] # List of lists of the form [PSet, velocity]. Velocity is stored as a dict with the same keys
# as PSet
self.pset_map = dict() # Maps each PSet to it s particle number, for easy lookup.
self.bests = [[None, np.inf]] * self.num_particles # The best result for each particle: list of the
# form [PSet, objective]
self.global_best = [None, np.inf] # The best result for the whole swarm
self.last_best = np.inf
def reset(self, bootstrap=None):
super(ParticleSwarm, self).reset(bootstrap)
self.nv = 0
self.num_evals = 0
self.swarm = []
self.pset_map = dict()
self.bests = [[None, np.inf]] * self.num_particles
self.global_best = [None, np.inf]
self.last_best = np.inf
def start_run(self):
"""
Start the run by initializing n particles at random positions and velocities
:return:
"""
print2('Running Particle Swarm Optimization with %i particles for %i total simulations' %
(self.num_particles, self.max_evals))
if self.config.config['initialization'] == 'lh':
new_params_list = self.random_latin_hypercube_psets(self.num_particles)
else:
new_params_list = [self.random_pset() for i in range(self.num_particles)]
for i in range(len(new_params_list)):
p = new_params_list[i]
p.name = 'iter0p%i' % i
# As suggested by Engelbrecht 2012, set all initial velocities to 0
new_velocity = dict({v.name: 0. for v in self.variables})
self.swarm.append([p, new_velocity])
self.pset_map[p] = len(self.swarm)-1 # Index of the newly added PSet.
return [particle[0] for particle in self.swarm]
def got_result(self, res):
"""
Updates particle velocity and position after a simulation completes.
:param res: Result object containing the run PSet and the resulting Data.
:return:
"""
paramset = res.pset
score = res.score
self.num_evals += 1
if self.num_evals % self.num_particles == 0:
if (self.num_evals / self.num_particles) % 10 == 0:
print1('Completed %i of %i simulations' % (self.num_evals, self.max_evals))
else:
print2('Completed %i of %i simulations' % (self.num_evals, self.max_evals))
print2('Current best score: %f' % self.global_best[1])
# End of one "pseudoflight", check if it was productive.
if (self.last_best != np.inf and
np.abs(self.last_best - self.global_best[1]) <
self.absolute_tol + self.relative_tol * self.last_best):
self.nv += 1
self.last_best = self.global_best[1]
# Check stop criterion
if self.config.config['v_stop'] > 0:
max_speed = max([abs(v) for p in self.swarm for v in p[1].values()])
if max_speed < self.config.config['v_stop']:
logger.info('Stopping particle swarm because the max speed is %s' % max_speed)
return 'STOP'
if self.num_evals % self.output_every == 0:
self.output_results()
p = self.pset_map.pop(paramset) # Particle number
# Update best scores if needed.
if score <= self.bests[p][1]:
self.bests[p] = [paramset, score]
if score <= self.global_best[1]:
self.global_best = [paramset, score]
# Update own position and velocity
# The order matters - updating velocity first seems to make the best use of our current info.
w = self.w0 + (self.wf - self.w0) * self.nv / (self.nv + self.nmax)
self.swarm[p][1] = \
{v.name:
w * self.swarm[p][1][v.name] +
self.c1 * np.random.random() * self.bests[p][0].get_param(v.name).diff(self.swarm[p][0].get_param(v.name)) +
self.c2 * np.random.random() * self.global_best[0].get_param(v.name).diff(self.swarm[p][0].get_param(v.name))
for v in self.variables}
# Manually check to determine if reflection occurred (i.e. attempted assigning of variable outside its bounds)
# If so, update based on reflection protocol and set velocity to 0
new_vars = []
for v in self.swarm[p][0]:
new_vars.append(v.add(self.swarm[p][1][v.name]))
if v.log_space:
new_val = 10.**(np.log10(v.value) + self.swarm[p][1][v.name])
else:
new_val = v.value + self.swarm[p][1][v.name]
if new_val < v.lower_bound or v.upper_bound < new_val:
self.swarm[p][1][v.name] = 0.0
new_pset = PSet(new_vars)
self.swarm[p][0] = new_pset
# This will cause a crash if new_pset happens to be the same as an already running pset in pset_map.
# This could come up in practice if all parameters have hit a box constraint.
# As a simple workaround, perturb the parameters slightly
while new_pset in self.pset_map:
new_pset = PSet([v.add_rand(-1e-6, 1e-6) for v in self.swarm[p][0]])
self.pset_map[new_pset] = p
# Set the new name: the old pset name is iter##p##
# Extract the iter number
iternum = int(re.search('iter([0-9]+)', paramset.name).groups()[0])
new_pset.name = 'iter%ip%i' % (iternum+1, p)
# Check for stopping criteria
if self.num_evals >= self.max_evals or self.nv >= self.n_stop:
return 'STOP'
return [new_pset]
def add_iterations(self, n):
self.max_evals += n * self.config.config['population_size']
class DifferentialEvolutionBase(Algorithm):
def __init__(self, config):
super(DifferentialEvolutionBase, self).__init__(config)
self.mutation_rate = config.config['mutation_rate']
self.mutation_factor = config.config['mutation_factor']
self.max_iterations = config.config['max_iterations']
self.stop_tolerance = config.config['stop_tolerance']
self.strategy = config.config['de_strategy']
options = ('rand1', 'rand2', 'best1', 'best2', 'all1', 'all2')
if self.strategy not in options:
raise PybnfError('Invalid differential evolution strategy "%s". Options are: %s' %
(self.strategy, ','.join(options)))
def new_individual(self, individuals, base_index=None):
"""
Create a new individual for the specified island, according to the set strategy
:param base_index: The index to use for the new individual, or None for a random index.
:return:
"""
# Choose a starting parameter set (either a random one or the base_index specified)
# and others to cross over (always random)
if '1' in self.strategy:
pickn = 3
else:
pickn = 5
# Choose pickn random unique indices, or if base_index was given, choose base_index followed by pickn-1 unique
# indices
picks = np.random.choice(len(individuals), pickn, replace=False)
if base_index is not None:
if base_index in picks:
# If we accidentally picked base_index, replace it with picks[0], preserving uniqueness in our list
iswitch = list(picks).index(base_index)
picks[iswitch] = picks[0]
# Now overwrite picks[0] with base_index. If we have base_index, picks[0] was an "extra pick" we only needed
# in case we sampled base_index and had to replace it.
picks[0] = base_index
base = individuals[picks[0]]
others = [individuals[p] for p in picks[1:]]
# Iterate through parameters; decide whether to mutate or leave the same.
new_pset_vars = []
for p in base:
if np.random.random() < self.mutation_rate:
if '1' in self.strategy:
update_val = self.mutation_factor * others[0].get_param(p.name).diff(others[1].get_param(p.name))
else:
update_val = self.mutation_factor * others[0].get_param(p.name).diff(others[1].get_param(p.name)) +\
self.mutation_factor * others[2].get_param(p.name).diff(others[3].get_param(p.name))
new_pset_vars.append(p.add(update_val))
else:
new_pset_vars.append(p)
return PSet(new_pset_vars)
def start_run(self):
return NotImplementedError("start_run() not implemented in DifferentialEvolutionBase class")
def got_result(self, res):
return NotImplementedError("got_result() not implemented in DifferentialEvolutionBase class")
class DifferentialEvolution(DifferentialEvolutionBase):
"""
Implements the parallelized, island-based differential evolution algorithm
described in Penas et al 2015.
In some cases, I had to make my own decisions for specifics I couldn't find in the original paper. Namely:
At each migration, a user-defined number of individuals are migrated from each island. For each individual, a
random index is chosen; the same index for all islands. A random permutation is used to redistribute individuals
with that index to different islands.
Each island performs its migration individually, on the first callback when all islands are ready for that
migration. It receives individuals from the migration iteration, regardless of what the current iteration is.
This can sometimes lead to wasted effort.
For example, suppose migration is set to occur at iteration 40, but island 1 has reached iteration 42 by the time
all islands reach 40. Individual j on island 1 after iteration 42 gets replaced with individual j on island X
after iteration 40. Some other island Y receives individual j on island 1 after iteration 40.
"""
def __init__(self, config):
"""
Initializes algorithm based on the config object.
The following config keys specify algorithm parameters. For move information, see config_documentation.txt
population_size
num_islands
max_iterations
mutation_rate
mutation_factor
migrate_every
num_to_migrate
"""
super(DifferentialEvolution, self).__init__(config)
self.num_islands = config.config['islands']
self.num_per_island = int(config.config['population_size'] / self.num_islands)
if self.num_per_island < 3:
self.num_per_island = 3
if self.num_islands == 1:
print1('Differential evolution requires a population size of at least 3. Increased the population size '
'to 3.')
logger.warning('Increased population size to minimum allowed value of 3')
else:
print1('Island-based differential evolution requires a population size of at least 3 times '
'the number of islands. Increased the population size to %i.' % (3*self.num_islands))
logger.warning('Increased population size to minimum allowed value of 3 per island')
if config.config['population_size'] % config.config['islands'] != 0:
logger.warning('Reduced population_size to %i to evenly distribute it over %i islands' %
(self.num_islands * self.num_per_island, self.num_islands))
self.migrate_every = config.config['migrate_every']
if self.num_islands == 1:
self.migrate_every = np.inf
self.num_to_migrate = config.config['num_to_migrate']
self.island_map = dict() # Maps each proposed PSet to its location (island, individual_i)
self.iter_num = [0] * self.num_islands # Count the number of completed iterations on each island
self.waiting_count = [] # Count of the number of PSets that are pending evaluation on the current iteration of each island.
self.individuals = [] # Nested list; individuals[i][j] gives individual j on island i.
self.proposed_individuals = [] # Nested list of the same shape, gives individuals proposed for replacement in next generation
self.fitnesses = [] # Nested list of same shape, gives fitness of each individual
self.migration_ready = [0] * self.num_islands # What migration number is each island ready for
self.migration_done = [0] * self.num_islands # What migration number has each island completed
# These variables store data related to individual migrations.
# Each one has migration number as keys. When the first island starts migration, the required entries are
# created. When the last island completes migration, they are deleted to keep these structures small.
self.migration_transit = dict() # Store (PSet, fitness) tuples here that are getting migrated - one list per island
self.migration_indices = dict() # Which individual numbers are migrating in migration i - a single tuple for
# each migration, used for all islands
self.migration_perms = dict() # How do we rearrange between islands on migration i?
# For each migration, a list of num_to_migrate permutations of range(num_islands)
def reset(self, bootstrap=None):
super(DifferentialEvolution, self).reset(bootstrap)
self.island_map = dict()
self.iter_num = [0] * self.num_islands
self.waiting_count = []
self.individuals = []
self.proposed_individuals = []
self.fitnesses = []
self.migration_ready = [0] * self.num_islands
self.migration_done = [0] * self.num_islands
self.migration_transit = dict()
self.migration_indices = dict()
self.migration_perms = dict()
def start_run(self):
if self.num_islands == 1:
print2('Running Differential Evolution with population size %i for up to %i iterations' %
(self.num_per_island, self.max_iterations))
else:
print2('Running island-based Differential Evolution with %i islands of %i individuals each, '
'for up to %i iterations' % (self.num_islands, self.num_per_island, self.max_iterations))
# Initialize random individuals
if self.config.config['initialization'] == 'lh':
psets = self.random_latin_hypercube_psets(self.num_islands*self.num_per_island)
self.proposed_individuals = [psets[i * self.num_per_island: (i + 1) * self.num_per_island]
for i in range(self.num_islands)]
else:
self.proposed_individuals = [[self.random_pset() for i in range(self.num_per_island)]
for j in range(self.num_islands)]
# Initialize the individual list to empty, will be filled with the proposed_individuals once their fitnesses
# are computed.
self.individuals = [[None
for i in range(self.num_per_island)]
for j in range(self.num_islands)]
# Set all fitnesses to Inf, guaranteeing a replacement by the first proposed individual
self.fitnesses = [[np.Inf
for i in range(self.num_per_island)]
for j in range(self.num_islands)]
for i in range(len(self.proposed_individuals)):
for j in range(len(self.proposed_individuals[i])):
self.island_map[self.proposed_individuals[i][j]] = (i, j)
if self.num_islands == 1:
self.proposed_individuals[i][j].name = 'gen0ind%i' % j
else:
self.proposed_individuals[i][j].name = 'gen0isl%iind%i' % (i, j)
self.waiting_count = [self.num_per_island] * self.num_islands
return [ind for island in self.proposed_individuals for ind in island]
def got_result(self, res):
"""
Called when a simulation run finishes
This is not thread safe - the Scheduler must ensure only one process at a time enters
this function.
(or, I should rewrite this function to make it thread safe)
:param res: Result object
:return:
"""
pset = res.pset
score = res.score
# Calculate the fitness of this individual, and replace if it is better than the previous one.
island, j = self.island_map.pop(pset)
fitness = score
if fitness <= self.fitnesses[island][j]:
self.individuals[island][j] = pset
self.fitnesses[island][j] = fitness
self.waiting_count[island] -= 1
# Determine if the current iteration is over for the current island
if self.waiting_count[island] == 0:
self.iter_num[island] += 1
if min(self.iter_num) == self.iter_num[island]:
# Last island to complete this iteration
if self.iter_num[island] % self.config.config['output_every'] == 0:
self.output_results()
if self.iter_num[island] % 10 == 0:
print1('Completed %i of %i iterations' % (self.iter_num[island], self.max_iterations))
else:
print2('Completed %i of %i iterations' % (self.iter_num[island], self.max_iterations))
print2('Current population fitnesses:')
for l in self.fitnesses:
print2(sorted(l))
if self.iter_num[island] == self.max_iterations:
# Submit no more jobs for this island
# Once all islands reach this, simulation is over.
if min(self.iter_num) == self.max_iterations:
return 'STOP'
else:
return []
if self.iter_num[island] % self.migrate_every == 0:
# This island prepares for migration
migration_num = int(self.iter_num[island] / self.migrate_every)
if max(self.migration_ready) < migration_num:
# This is the first island to reach this migration.
# Need to set global parameters for this migration.
self.migration_transit[migration_num] = [list() for i in range(self.num_islands)]
self.migration_indices[migration_num] = np.random.choice(range(self.num_per_island),
size=self.num_to_migrate, replace=False)
self.migration_perms[migration_num] = [np.random.permutation(self.num_islands)
for i in range(self.num_to_migrate)]
logger.debug('Island %i just set up the migration.' % island)
# Send the required PSets to migration_transit
for j in self.migration_indices[migration_num]:
self.migration_transit[migration_num][island].append((self.individuals[island][j],
self.fitnesses[island][j]))
# Tell other islands that this one is ready for this migration.
self.migration_ready[island] = migration_num
if self.migration_done[island] < min(self.migration_ready):
# This island performs a migration
logger.debug('Island %i is migrating!' % island)
migration_num = self.migration_done[island] + 1
# Fetch the appropriate new individuals from migration_transit
for migrater_index in range(self.num_to_migrate):
j = self.migration_indices[migration_num][migrater_index] # Index of the individual
newisland = self.migration_perms[migration_num][migrater_index][island]
self.individuals[island][j], self.fitnesses[island][j] = \
self.migration_transit[migration_num][newisland][migrater_index]
logger.debug('Island %i gained new individual with fitness %f' % (island, self.fitnesses[island][j]))
self.migration_done[island] = migration_num
if min(self.migration_done) == migration_num:
# This is the last island to complete this migration
# Delete the migration data to free space.
del self.migration_transit[migration_num]
del self.migration_perms[migration_num]
del self.migration_indices[migration_num]
# Set up the next generation
best = np.argmin(self.fitnesses[island])
for jj in range(self.num_per_island):
if 'best' in self.strategy:
new_pset = self.new_individual(self.individuals[island], best)
elif 'all' in self.strategy:
new_pset = self.new_individual(self.individuals[island], jj)
else:
new_pset = self.new_individual(self.individuals[island])
# If the new pset is a duplicate of one already in the island_map, it will cause problems.
# As a workaround, perturb it slightly.
while new_pset in self.island_map:
new_pset = PSet([v.add(np.random.uniform(-1e-6, 1e-6)) for v in new_pset])
self.proposed_individuals[island][jj] = new_pset
self.island_map[new_pset] = (island, jj)
if self.num_islands == 1:
new_pset.name = 'gen%iind%i' % (self.iter_num[island], jj)
else:
new_pset.name = 'gen%iisl%iind%i' % (self.iter_num[island], island, jj)
self.waiting_count[island] = self.num_per_island
if self.iter_num[island] % 20 == 0:
logger.info('Island %i completed %i iterations' % (island, self.iter_num[island]))
# print(sorted(self.fitnesses[island]))
# Convergence check
if (np.min(self.fitnesses) != 0) and (np.max(self.fitnesses) / np.min(self.fitnesses) < 1. + self.stop_tolerance):
return 'STOP'
# Return a copy, so our internal data structure is not tampered with.
return copy.copy(self.proposed_individuals[island])
else:
# Add no new jobs, wait for this generation to complete.
return []
class AsynchronousDifferentialEvolution(DifferentialEvolutionBase):
"""
Implements a simple asynchronous differential evolution algorithm.
Contains no islands or migrations. Instead, each time a PSet finishes, proposes a new PSet at the same index using
the standard DE formula and whatever the current population happens to be at the time.
"""
def __init__(self, config):
"""
Initializes algorithm based on the config object.
"""
super(AsynchronousDifferentialEvolution, self).__init__(config)
self.population_size = config.config['population_size']
if self.population_size < 3:
self.population_size = 3
self.config.config['population_size'] = 3
print1('Asynchronous differential evolution requires a population size of at least 3. '
'Increasing the population size to 3.')
logger.warning('Increased population_size to the minimum allowed value of 3')
self.sims_completed = 0
self.individuals = [] # List of individuals
self.fitnesses = [] # List of same shape, gives fitness of each individual
def reset(self, bootstrap=None):
super(AsynchronousDifferentialEvolution, self).reset(bootstrap)
self.sims_completed = 0
self.individuals = []
self.fitnesses = []
def start_run(self):
print2('Running Asyncrhonous Differential Evolution with population size %i for up to %i iterations' %
(self.population_size, self.max_iterations))
# Initialize random individuals
if self.config.config['initialization'] == 'lh':
self.individuals = self.random_latin_hypercube_psets(self.population_size)
else:
self.individuals = [self.random_pset() for i in range(self.population_size)]
# Set all fitnesses to Inf, guaranteeing a replacement by the first proposed individual.
# The first replacement will replace with a copy of the same PSet, with the correct objective calculated.
self.fitnesses = [np.Inf for i in range(self.population_size)]
for i in range(len(self.individuals)):
self.individuals[i].name = 'gen0ind%i' % i
return copy.deepcopy(self.individuals)
def got_result(self, res):
"""
Called when a simulation run finishes
:param res: Result object
:return:
"""
pset = res.pset
fitness = res.score
gen = int(re.search('(?<=gen)\d+', pset.name).group(0))
j = int(re.search('(?<=ind)\d+', pset.name).group(0))
if fitness <= self.fitnesses[j]:
self.individuals[j] = pset
self.fitnesses[j] = fitness
self.sims_completed += 1
# Do various "per iteration" stuff
if self.sims_completed % self.population_size == 0:
iters_complete = self.sims_completed / self.population_size
if iters_complete % self.config.config['output_every'] == 0:
self.output_results()
if iters_complete % 10 == 0:
print1('Completed %i of %i simulations' % (self.sims_completed, self.max_iterations * self.population_size))
else:
print2('Completed %i of %i simulations' % (self.sims_completed, self.max_iterations * self.population_size))
print2('Current population fitnesses:')
print2(sorted(self.fitnesses))
if iters_complete % 20 == 0:
logger.info('Completed %i simulations' % self.sims_completed)
if iters_complete >= self.max_iterations:
return 'STOP'
# Convergence check
if np.max(self.fitnesses) / np.min(self.fitnesses) < 1. + self.stop_tolerance:
return 'STOP'
if 'best' in self.strategy:
best = np.argmin(self.fitnesses)
new_pset = self.new_individual(self.individuals, best)
elif 'all' in self.strategy:
new_pset = self.new_individual(self.individuals, j)
else:
new_pset = self.new_individual(self.individuals)
new_pset.name = 'gen%iind%i' % (gen+1, j)
return [new_pset]
class ScatterSearch(Algorithm):
"""
Implements ScatterSearch as described in the introduction of Penas et al 2017 (but not the fancy parallelized
version from that paper).
Uses the individual combination method described in Egea et al 2009
"""
def __init__(self, config): # variables, popsize, maxiters, saveevery):
super(ScatterSearch, self).__init__(config)
self.popsize = config.config['population_size']
if self.popsize < 3:
print1('Scatter search requires a population size of at least 3. '
'Increasing the population size to 3.')
logger.warning('Increasing population_size to the minimum allowed value of 3')
self.config.config['population_size'] = 3
self.popsize = 3
self.max_iterations = config.config['max_iterations']
if 'reserve_size' in config.config:
self.reserve_size = config.config['reserve_size']
else:
self.reserve_size = self.max_iterations
if 'init_size' in config.config:
self.init_size = config.config['init_size']
if self.init_size < self.popsize:
logger.warning('init_size less than population_size. Setting it equal to population_size.')
print1("Scatter search parameter 'init_size' cannot be less than 'population_size'. "
"Automatically setting it equal to population_size.")
self.init_size = self.popsize
else:
self.init_size = 10*len(self.variables)
if self.init_size < self.popsize:
logger.warning('init_size less than population_size. Setting it equal to population_size.')
self.init_size = self.popsize
self.local_min_limit = config.config['local_min_limit']
self.pending = dict() # {pendingPSet: parentPSet}
self.received = dict() # {parentPSet: [(donependingPSet, score)]
self.refs = [] # (refPset, score)
self.stuckcounter = dict()
self.iteration = 0
self.local_mins = [] # (Pset, score) pairs that were stuck for 5 gens, and so replaced.
self.reserve = []
def reset(self, bootstrap=None):
super(ScatterSearch, self).reset(bootstrap)
self.pending = dict()
self.received = dict()
self.refs = []
self.stuckcounter = dict()
self.iteration = 0
self.local_mins = []
self.reserve = []
def start_run(self):
print2('Running Scatter Search with population size %i (%i simulations per iteration) for %i iterations' %
(self.popsize, self.popsize * (self.popsize - 1), self.max_iterations))
# Generate big number = 10 * variable_count (or user's chosen init_size) initial individuals.
if self.config.config['initialization'] == 'lh':
psets = self.random_latin_hypercube_psets(self.init_size)
else:
psets = [self.random_pset() for i in range(self.init_size)]
for i in range(len(psets)):
psets[i].name = 'init%i' % i
# Generate a latin hypercube distributed "reserve". When we need a random new individual, pop one from here
# so we aren't repeating ground. Size of this could be customizable.
# Note that this is not part of the original algorithm description, Eshan made it up
# because otherwise, the "choose a new random point" step of the algorithm can cause useless repetition.
if self.reserve_size > 0:
self.reserve = self.random_latin_hypercube_psets(self.reserve_size)
else:
self.reserve = []
self.pending = {p: None for p in psets}
self.received = {None: []}
return psets
def round_1_init(self):
start_psets = sorted(self.received[None], key=lambda x: x[1])
# Half is the top of the list, half is random.
topcount = int(np.ceil(self.popsize / 2.))
randcount = int(np.floor(self.popsize / 2.))
self.refs = start_psets[:topcount]
randindices = np.random.choice(range(topcount, len(start_psets)), randcount, replace=False)
for i in randindices:
self.refs.append(start_psets[i])
self.stuckcounter = {r[0]: 0 for r in self.refs}
def got_result(self, res):
"""
Called when a simulation run finishes
:param res:
:type res Result
:return:
"""
ps = res.pset
score = res.score
parent = self.pending[ps]
self.received[parent].append((ps, score))
del self.pending[ps]
if len(self.pending) == 0:
# All of this generation done, make the next list of psets
if None in self.received:
# This is the initialization round, special case
self.round_1_init()
else:
# 1) Replace parent with highest scoring child
for i in range(len(self.refs)):
best_child = min(self.received[self.refs[i][0]], key=lambda x: x[1])
if best_child[1] < self.refs[i][1]:
del self.stuckcounter[self.refs[i][0]]
self.stuckcounter[best_child[0]] = 0
self.refs[i] = best_child
else:
self.stuckcounter[self.refs[i][0]] += 1
if self.stuckcounter[self.refs[i][0]] >= self.local_min_limit:
del self.stuckcounter[self.refs[i][0]]
self.local_mins.append(self.refs[i])
# For output. Not the most efficient, but not in a performance-critical section
self.local_mins = sorted(self.local_mins, key=lambda x: x[1])
self.local_mins = self.local_mins[:self.popsize] # So this doesn't get huge
# Pick a new random pset
if len(self.reserve) > 0:
new_pset = self.reserve.pop()
else:
new_pset = self.random_pset()
self.refs[i] = (new_pset, np.inf) # For simplicity, assume its score is awful
self.stuckcounter[new_pset] = 0
# 2) Sort the refs list by quality.
self.refs = sorted(self.refs, key=lambda x: x[1])
logger.info('Iteration %i' % self.iteration)
if self.iteration % 10 == 0:
print1('Completed iteration %i of %i' % (self.iteration, self.max_iterations))
else:
print2('Completed iteration %i of %i' % (self.iteration, self.max_iterations))
print2('Current scores: ' + str([x[1] for x in self.refs]))
print2('Best archived scores: ' + str([x[1] for x in self.local_mins]))
if self.iteration % self.config.config['output_every'] == 0:
self.output_results()
self.iteration += 1
if self.iteration == self.max_iterations:
return 'STOP'
# 3) Do the combination antics to generate new candidates
query_psets = []
for pi in range(self.popsize): # parent index
for hi in range(self.popsize): # helper index
if pi == hi:
continue
new_vars = []
for v in self.variables:
# d = (self.refs[hi][0][v] - self.refs[pi][0][v]) / 2.
d = self.refs[hi][0].get_param(v.name).diff(self.refs[pi][0].get_param(v.name))
alpha = np.sign(hi-pi)
beta = (abs(hi-pi) - 1) / (self.popsize - 2)
# c1 = self.refs[pi][0][v] - d*(1 + alpha*beta)
# c2 = self.refs[pi][0][v] + d*(1 - alpha*beta)
# newval = np.random.uniform(c1, c2)
# newdict[v] = max(min(newval, var[2]), var[1])
new_vars.append(self.refs[pi][0].get_param(v.name).add_rand(-d*(1 + alpha*beta), d*(1 - alpha * beta)))
newpset = PSet(new_vars)
# Check to avoid duplicate PSets. If duplicate, don't have to try again because SS doesn't really
# care about the number of PSets queried.
if newpset not in self.pending:
newpset.name = 'iter%ip%ih%i' % (self.iteration, pi, hi)
query_psets.append(newpset)
self.pending[newpset] = self.refs[pi][0]
else:
print(newpset)
self.received = {r[0]: [] for r in self.refs}
return query_psets
else:
return []
def get_backup_every(self):
"""
Overrides base method because Scatter Search runs n*(n-1) PSets per iteration.
"""
return self.config.config['backup_every'] * self.config.config['population_size'] * \
(self.config.config['population_size']-1) * self.config.config['smoothing']
class BayesianAlgorithm(Algorithm):
"""Superclass for Bayesian MCMC algorithms"""
def __init__(self, config):
super(BayesianAlgorithm, self).__init__(config)
self.num_parallel = config.config['population_size']
self.max_iterations = config.config['max_iterations']
self.step_size = config.config['step_size']
self.iteration = [0] * self.num_parallel # Iteration number that each PSet is on
self.current_pset = None # List of n PSets corresponding to the n independent runs
self.ln_current_P = None # List of n probabilities of those n PSets.
self.burn_in = config.config['burn_in'] # todo: 'auto' option
self.adaptive = config.config['adaptive']
self.sample_every = config.config['sample_every']
self.output_hist_every = config.config['output_hist_every']
# A list of the % credible intervals to save, eg [68. 95]
self.credible_intervals = config.config['credible_intervals']
self.num_bins = config.config['hist_bins']
self.wait_for_sync = [False] * self.num_parallel
self.prior = None
self.load_priors()
self.samples_file = self.config.config['output_dir'] + '/Results/samples.txt'
# Check that the iteration range is valid with respect to the burnin and or adaptive iterations
def load_priors(self):
"""Builds the data structures for the priors, based on the variables specified in the config."""
self.prior = dict() # Maps each variable to a 4-tuple (space, dist, val1, val2)
# space is 'reg' for regular space, 'log' for log space. dist is 'n' for normal, 'b' for box.
# For normal distribution, val1 = mean, val2 = sigma (in regular or log space as appropriate)
# For box distribution, val1 = min, val2 = max (in regular or log space as appropriate)
for var in self.variables:
if var.type == 'normal_var':
self.prior[var.name] = ('reg', 'n', var.p1, var.p2)
elif var.type == 'lognormal_var':
self.prior[var.name] = ('log', 'n', var.p1, var.p2)
elif var.type == 'uniform_var':
self.prior[var.name] = ('reg', 'b', var.p1, var.p2)
elif var.type == 'loguniform_var':
self.prior[var.name] = ('log', 'b', np.log10(var.p1), np.log10(var.p2))
def start_run(self, setup_samples=True):
if self.config.config['initialization'] == 'lh':
first_psets = self.random_latin_hypercube_psets(self.num_parallel)
else:
first_psets = [self.random_pset() for i in range(self.num_parallel)]
self.ln_current_P = [np.nan]*self.num_parallel # Forces accept on the first run
self.current_pset = [None]*self.num_parallel
if self.config.config['continue_run'] == 1:
self.mle_start = np.loadtxt(self.config.config['output_dir'] + '/adaptive_files/MLE_params.txt')
for n in range(self.num_parallel):
for i,p in enumerate(first_psets[n]):
p.value = self.mle_start[i]
if self.config.config['starting_params'] and self.config.config['continue_run'] != 1:
for n in range(self.num_parallel):
for i,p in enumerate(first_psets[n]):
p.value = self.config.config['starting_params'][i]
for i in range(len(first_psets)):
first_psets[i].name = 'iter0run%i' % i
# Set up the output files
# Cant do this in the constructor because that happens before the output folder is potentially overwritten.
if setup_samples:
with open(self.samples_file, 'w') as f:
f.write('# Name\tLn_probability\t'+first_psets[0].keys_to_string()+'\n')
os.makedirs(self.config.config['output_dir'] + '/Results/Histograms/', exist_ok=True)
return first_psets
def got_result(self, res):
NotImplementedError("got_result() must be implemented in BayesianAlgorithm subclass")
def ln_prior(self, pset):
"""
Returns the value of the prior distribution for the given parameter set
:param pset:
:type pset: PSet
:return: float value of ln times the prior distribution
"""
total = 0.
for v in self.prior:
(space, dist, x1, x2) = self.prior[v]
if space == 'log':
val = np.log10(pset[v])
else:
val = pset[v]
if dist == 'n':
# Normal with mean x1 and value x2
total += -1. / (2. * x2 ** 2.) * (x1 - val)**2.
else:
# Uniform from x1 to x2
if x1 <= val <= x2:
total += -np.log(x2-x1)
else:
logger.warning('Box-constrained parameter %s reached a value outside the box.')
total += -np.inf
return total
def sample_pset(self, pset, ln_prob):
"""
Adds this pset to the set of sampled psets for the final distribution.
:param pset:
:type pset: PSet
:param ln_prob - The probability of this PSet to record in the samples file.
:type ln_prob: float
"""
with open(self.samples_file, 'a') as f:
f.write(pset.name+'\t'+str(ln_prob)+'\t'+pset.values_to_string()+'\n')
def update_histograms(self, file_ext):
"""
Updates the files that contain histogram points for each variable
:param file_ext: String to append to the save file names
:type file_ext: str
:return:
"""
# Read the samples file into an array, ignoring the first row (header)
# and first 2 columns (pset names, probabilities)
dat_array = np.genfromtxt(self.samples_file, delimiter='\t', dtype=float,
usecols=range(2, len(self.variables)+2))
# Open the file(s) to save the credible intervals
cred_files = []
for i in self.credible_intervals:
f = open(self.config.config['output_dir']+'/Results/credible%i%s.txt' % (i, file_ext), 'w')
f.write('# param\tlower_bound\tupper_bound\n')
cred_files.append(f)
for i in range(len(self.variables)):
v = self.variables[i]
fname = self.config.config['output_dir']+'/Results/Histograms/%s%s.txt' % (v.name, file_ext)
# For log-space variables, we want the histogram in log space
if v.log_space:
histdata = np.log10(dat_array[:, i])
header = 'log10_lower_bound\tlog10_upper_bound\tcount'
else:
histdata = dat_array[:, i]
header = 'lower_bound\tupper_bound\tcount'
hist, bin_edges = np.histogram(histdata, bins=self.num_bins)
result_array = np.stack((bin_edges[:-1], bin_edges[1:], hist), axis=-1)
np.savetxt(fname, result_array, delimiter='\t', header=header)
sorted_data = sorted(dat_array[:, i])
for interval, file in zip(self.credible_intervals, cred_files):
n = len(sorted_data)
want = n * (interval/100)
min_index = int(np.round(n/2 - want/2))
max_index = int(np.round(n/2 + want/2 - 1))
file.write('%s\t%s\t%s\n' % (v.name, sorted_data[min_index], sorted_data[max_index]))
for file in cred_files:
file.close()
def cleanup(self):
"""Called when quitting due to error.
Save the histograms in addition to the usual algorithm cleanup"""
super().cleanup()
self.update_histograms('_end')
class DreamAlgorithm(BayesianAlgorithm):
"""
**This algorithm is a work in progress, and does not currently work correctly. In our most recent testing, it
generates incorrect probability distributions**
Implements a variant of the DREAM algorithm as described in Vrugt (2016) Environmental Modelling
and Software.
Adapts Bayesian MCMC to use methods from differential evolution for accelerated convergence and
more efficient sampling of parameter space
"""
def __init__(self, config):
super(DreamAlgorithm, self).__init__(config)
print0('You are running the DREAM algorithm. This is a work in progress, and is not officially supported! In '
'our most recent testing, it generates incorrect probability distributions.')
self.n_dim = len(self.variables)
self.all_idcs = np.arange(self.n_dim)
self.ncr = [(1+x)/self.config.config['crossover_number'] for x in range(self.config.config['crossover_number'])]
self.g_prob = self.config.config['gamma_prob']
self.acceptances = [0]*self.num_parallel
self.acceptance_rates = [0.0]*self.num_parallel
def got_result(self, res):
"""
Called by the scheduler when a simulation is completed, with the pset that was run, and the resulting simulation
data
:param res: PSet that was run in this simulation
:type res: Result
:return: List of PSet(s) to be run next.
"""
pset = res.pset
score = res.score
m = re.search('(?<=run)\d+', pset.name)
index = int(m.group(0))
# Calculate posterior of finished job
lnprior = self.ln_prior(pset)
lnlikelihood = -score
lnposterior = lnprior + lnlikelihood
# Metropolis-Hastings criterion
ln_p_accept = np.log10(np.random.uniform()) < min(0., lnposterior - self.ln_current_P[index])
if ln_p_accept: # accept update based on MH criterion
self.current_pset[index] = pset
self.ln_current_P[index] = lnposterior
self.acceptances[index] += 1
# Record that this individual is complete
self.wait_for_sync[index] = True
self.iteration[index] += 1
self.acceptance_rates[index] = self.acceptances[index] / self.iteration[index]
# Update histograms and trajectories if necessary
if self.iteration[index] % self.sample_every == 0 and self.iteration[index] > self.burn_in:
self.sample_pset(self.current_pset[index], self.ln_current_P[index])
if (self.iteration[index] % (self.sample_every * self.output_hist_every) == 0
and self.iteration[index] > self.burn_in):
self.update_histograms('_%i' % self.iteration[index])
# Wait for entire generation to finish
if np.all(self.wait_for_sync):
self.wait_for_sync = [False] * self.num_parallel
if min(self.iteration) >= self.max_iterations:
return 'STOP'
if self.iteration[index] % 10 == 0:
print1('Completed iteration %i of %i' % (self.iteration[index], self.max_iterations))
print2('Acceptance rates: %s\n' % str(self.acceptance_rates))
else:
print2('Completed iteration %i of %i' % (self.iteration[index], self.max_iterations))
logger.info('Completed %i iterations' % self.iteration[index])
print2('Current -Ln Posteriors: %s' % str(self.ln_current_P))
next_gen = []
for i, p in enumerate(self.current_pset):
new_pset = self.calculate_new_pset(i)
if new_pset:
new_pset.name = 'iter%irun%i' % (self.iteration[i], i)
next_gen.append(new_pset)
else:
# If new PSet is outside of variable bounds, keep current PSet and wait for next generation
logger.debug('Proposed PSet %s is invalid. Rejecting and waiting until next iteration' % i)
self.wait_for_sync[i] = True
self.iteration[i] += 1
return next_gen
return []
def calculate_new_pset(self, idx):
"""
Uses differential evolution-like update to calculate new PSet
:param idx: Index of PSet to update
:return:
"""
# Choose individuals (not individual to be updated) for mutation
sel = np.random.choice(self.all_idcs[self.all_idcs != idx], 2, replace=False)
x0 = self.current_pset[idx]
x1 = self.current_pset[sel[0]]
x2 = self.current_pset[sel[1]]
# Sample the probability of modifying a parameter
cr = np.random.choice(self.ncr)
while True:
ds = np.random.uniform(size=self.n_dim) <= cr # sample parameter subspace
if np.any(ds):
break
# Sample whether to jump to the mode (when gamma = 1)
gamma = 1 if np.random.uniform() < self.g_prob else self.step_size
new_vars = []
for i, d in enumerate(np.random.permutation(ds)):
k = self.variables[i]
diff = x1.get_param(k.name).diff(x2.get_param(k.name)) if d else 0.0
zeta = np.random.normal(0, self.config.config['zeta'])
lamb = np.random.uniform(-self.config.config['lambda'], self.config.config['lambda'])
# Differential evolution calculation (while satisfying detailed balance)
try:
# Do not reflect the parameter (need to reject if outside bounds)
new_var = x0.get_param(k.name).add(zeta + (1. + lamb) * gamma * diff, False)
new_vars.append(new_var)
except OutOfBoundsException:
logger.debug("Variable %s is outside of bounds")
return None
return PSet(new_vars)
class BasicBayesMCMCAlgorithm(BayesianAlgorithm):
"""
Implements a Bayesian Markov chain Monte Carlo simulation.
This is essentially a non-parallel algorithm, but here, we run n instances in parallel, and pool all results.
This will give you a best fit (which is maybe not great), but more importantly, generates an extra result file
that gives the probability distribution of each variable.
This distribution depends on the prior, which is specified according to the variable initialization rules.
With sa=True, this instead acts as a simulated annealing algorithm with n indepdendent chains.
"""
def __init__(self, config, sa=False): # expdata, objective, priorfile, gamma=0.1):
super(BasicBayesMCMCAlgorithm, self).__init__(config)
self.sa = sa
if sa:
self.cooling = config.config['cooling']
self.beta_max = config.config['beta_max']
self.exchange_every = config.config['exchange_every']
self.pt = self.exchange_every != np.inf
self.reps_per_beta = self.config.config['reps_per_beta']
self.betas_per_group = self.num_parallel // self.reps_per_beta # Number of unique betas considered (in PT)
# The temperature of each replicate
# For MCMC, probably n copies of the same number, unless the user set it up strangely
# For SA, starts all the same (unless set up strangely), and independently decrease during the run
# For PT, contains reps_per_beta copies of the same ascending sequence of betas, e.g.
# [0.6, 0.8, 1., 0.6, 0.8, 1.]. Indices congruent to -1 mod (population_size/reps_per_beta) have the max beta
# (probably 1), and only these replicas are sampled.
self.betas = config.config['beta_list']
self.wait_for_sync = [False] * self.num_parallel
self.prior = None
self.load_priors()
self.attempts = 0
self.accepted = 0
self.exchange_attempts = 0
self.exchange_accepted = 0
self.staged = [] # Used only when resuming a run and adding iterations
def reset(self, bootstrap=None):
super(BasicBayesMCMCAlgorithm, self).reset(bootstrap)
self.current_pset = None
self.ln_current_P = None
self.iteration = [0] * self.num_parallel
self.wait_for_sync = [False] * self.num_parallel
self.samples_file = None
def start_run(self):
"""
Called by the scheduler at the start of a fitting run.
Must return a list of PSets that the scheduler should run.
:return: list of PSets
"""
if self.sa:
print2('Running simulated annealing on %i independent replicates in parallel, for %i iterations each or '
'until 1/T reaches %s' % (self.num_parallel, self.max_iterations, self.beta_max))
else:
if not self.pt:
print2('Running Markov Chain Monte Carlo on %i independent replicates in parallel, for %i iterations each.'
% (self.num_parallel, self.max_iterations))
else:
print2('Running parallel tempering on %i replicates for %i iterations, with replica exchanges performed '
'every %i iterations' % (self.num_parallel, self.max_iterations, self.exchange_every))
print2('Statistical samples will be recorded every %i iterations, after an initial %i-iteration burn-in period'
% (self.sample_every, self.burn_in))
setup_samples = not self.sa
return super(BasicBayesMCMCAlgorithm, self).start_run(setup_samples=setup_samples)
def got_result(self, res):
"""
Called by the scheduler when a simulation is completed, with the pset that was run, and the resulting simulation
data
:param res: PSet that was run in this simulation
:type res: Result
:return: List of PSet(s) to be run next.
"""
pset = res.pset
score = res.score
# Figure out which parallel run this is from based on the .name field.
m = re.search('(?<=run)\d+', pset.name)
index = int(m.group(0))
# Calculate the acceptance probability
lnprior = self.ln_prior(pset) # Need something clever for box constraints
lnlikelihood = -score
# Because the P's are so small to start, we express posterior, p_accept, and current_P in ln space
lnposterior = lnprior + lnlikelihood
ln_p_accept = min(0., lnposterior - self.ln_current_P[index])
# Decide whether to accept move.
self.attempts += 1
if np.random.rand() < np.exp(ln_p_accept*self.betas[index]) or np.isnan(self.ln_current_P[index]):
# Accept the move, so update our current PSet and P
self.accepted += 1
self.current_pset[index] = pset
self.ln_current_P[index] = lnposterior
# For simulated annealing, reduce the temperature if this was an unfavorable move.
if self.sa and ln_p_accept < 0.:
self.betas[index] += self.cooling
if self.betas[index] >= self.beta_max:
print2('Finished replicate %i because beta_max was reached.' % index)
logger.info('Finished replicate %i because beta_max was reached.' % index)
if min(self.betas) >= self.beta_max:
logger.info('All annealing replicates have reached the maximum beta value')
return 'STOP'
else:
return []
# Record the current PSet (clarification: what if failed? Sample old again?)
# Using either the newly accepted PSet or the old PSet, propose the next PSet.
proposed_pset = self.try_to_choose_new_pset(index)
if proposed_pset is None:
if np.all(self.wait_for_sync):
# Do the replica exchange, then propose n new psets so all chains resume
self.wait_for_sync = [False] * self.num_parallel
return self.replica_exchange()
elif min(self.iteration) >= self.max_iterations:
print0('Overall move accept rate: %f' % (self.accepted/self.attempts))
if not self.sa:
self.update_histograms('_final')
return 'STOP'
else:
return []
proposed_pset.name = 'iter%irun%i' % (self.iteration[index], index)
# Note self.staged is empty unless we just resumed a run with added iterations and need to restart chains.
if len(self.staged) != 0:
toreturn = [proposed_pset] + self.staged
self.staged = []
return toreturn
return [proposed_pset]
def try_to_choose_new_pset(self, index):
"""
Helper function
Advances the iteration number, and tries to choose a new parameter set for chain index i
If that fails (e.g. due to a box constraint), keeps advancing iteration number and trying again.
If it hits an iteration where it has to stop and wait (a replica exchange iteration or the end), returns None
Otherwise returns the new PSet.
:param index:
:return:
"""
proposed_pset = None
# This part is a loop in case a box constraint makes a move automatically rejected.
loop_count = 0
while proposed_pset is None:
loop_count += 1
if loop_count == 20:
logger.warning('Instance %i spent 20 iterations at the same point' % index)
print1('One of your samples is stuck at the same point for 20+ iterations because it keeps '
'hitting box constraints. Consider using looser box constraints or a smaller '
'step_size.')
if loop_count == 1000:
logger.warning('Instance %i terminated after 1000 iterations at the same point' % index)
print1('Instance %i was terminated after it spent 1000 iterations stuck at the same point '
'because it kept hitting box constraints. Consider using looser box constraints or a '
'smaller step_size.' % index)
self.iteration[index] = self.max_iterations
self.iteration[index] += 1
# Check if it's time to do various things
if not self.sa:
if self.iteration[index] > self.burn_in and self.iteration[index] % self.sample_every == 0 \
and self.should_sample(index):
self.sample_pset(self.current_pset[index], self.ln_current_P[index])
if (self.iteration[index] > self.burn_in
and self.iteration[index] % (self.output_hist_every * self.sample_every) == 0
and self.iteration[index] == min(self.iteration)):
self.update_histograms('_%i' % self.iteration[index])
if self.iteration[index] == min(self.iteration):
if self.iteration[index] % self.config.config['output_every'] == 0:
self.output_results()
if self.iteration[index] % 10 == 0:
print1('Completed iteration %i of %i' % (self.iteration[index], self.max_iterations))
print2('Current move accept rate: %f' % (self.accepted/self.attempts))
if self.exchange_attempts > 0:
print2('Current replica exchange rate: %f' % (self.exchange_accepted / self.exchange_attempts))
else:
print2('Completed iteration %i of %i' % (self.iteration[index], self.max_iterations))
logger.info('Completed %i iterations' % self.iteration[index])
logger.info('Current move accept rate: %f' % (self.accepted/self.attempts))
if self.exchange_attempts > 0:
logger.info('Current replica exchange rate: %f' % (self.exchange_accepted / self.exchange_attempts))
if self.sa:
logger.debug('Current betas: ' + str(self.betas))
print2('Current -Ln Likelihoods: ' + str(self.ln_current_P))
if self.iteration[index] >= self.max_iterations:
logger.info('Finished replicate number %i' % index)
print2('Finished replicate number %i' % index)
return None
if self.iteration[index] % self.exchange_every == 0:
# Need to wait for the rest of the chains to catch up to do replica exchange
self.wait_for_sync[index] = True
return None
proposed_pset = self.choose_new_pset(self.current_pset[index])
return proposed_pset
def should_sample(self, index):
"""
Checks whether this replica index is one that gets sampled.
For mcmc, always True. For pt, must be a replica at the max beta
"""
return (index + 1) % self.betas_per_group == 0 if self.pt else True
def choose_new_pset(self, oldpset):
"""
Helper function to perturb the old PSet, generating a new proposed PSet
If the new PSet fails automatically because it violates box constraints, returns None.
:param oldpset: The PSet to be changed
:type oldpset: PSet
:return: the new PSet
"""
delta_vector = {k: np.random.normal() for k in oldpset.keys()}
delta_vector_magnitude = np.sqrt(sum([x ** 2 for x in delta_vector.values()]))
delta_vector_normalized = {k: self.step_size * delta_vector[k] / delta_vector_magnitude for k in oldpset.keys()}
new_vars = []
for v in oldpset:
# For box constraints, need special treatment to keep correct statistics
# If we tried to leave the box, the move automatically fails, we should increment the iteration counter
# and retry.
# The same could happen if normal_var's try to go below 0
try:
new_var = v.add(delta_vector_normalized[v.name])
except OutOfBoundsException:
logger.debug('Rejected a move because %s=%.2E moved by %f, outside the box constraint [%.2E, %.2E]' %
(v.name, oldpset[v.name], delta_vector_normalized[v.name], v.lower_bound, v.upper_bound))
return None
new_vars.append(new_var)
return PSet(new_vars)
def replica_exchange(self):
"""
Performs replica exchange for parallel tempering.
Then proposes n new parameter sets to resume all chains after the exchange.
:return: List of n PSets to run
"""
logger.info('Performing replica exchange on iteration %i' % self.iteration[0])
# Who exchanges with whom is a little complicated. Each replica tries one exchange with a replica at the next
# beta. But if we have multiple reps per beta, then the exchanges aren't necessarily within the same group of
# reps. We use this random permutation to determine which groups exchange.
for i in range(self.betas_per_group - 1):
permutation = np.random.permutation(range(self.reps_per_beta))
for group in range(self.reps_per_beta):
# Determine the 2 indices we're exchanging, ind_hi and ind_lo
ind_hi = self.betas_per_group * group + i
other_group = permutation[group]
ind_lo = self.betas_per_group * other_group + i + 1
# Consider exchanging index ind_hi (higher T) with ind_lo (lower T)
ln_p_exchange = min(0., -(self.betas[ind_lo]-self.betas[ind_hi]) * (self.ln_current_P[ind_lo]-self.ln_current_P[ind_hi]))
# Scratch work: Should there be a - sign in front? You want to always accept if moving the better answer
# to the lower temperature. ind_lo has lower T so higher beta, so the first term is positive. The second
# term is positive if ind_lo is better. But you want a positive final answer when ind_hi, currently at
# higher T, is better. So you need a - sign.
self.exchange_attempts += 1
if np.random.random() < np.exp(ln_p_exchange):
# Do the exchange
logger.debug('Exchanging individuals %i and %i' % (ind_hi, ind_lo))
self.exchange_accepted += 1
hold_pset = self.current_pset[ind_hi]
hold_p = self.ln_current_P[ind_hi]
self.current_pset[ind_hi] = self.current_pset[ind_lo]
self.ln_current_P[ind_hi] = self.ln_current_P[ind_lo]
self.current_pset[ind_lo] = hold_pset
self.ln_current_P[ind_lo] = hold_p
# Propose new psets - it's more complicated because of going out of box, and other counters.
proposed = []
for j in range(self.num_parallel):
proposed_pset = self.try_to_choose_new_pset(j)
if proposed_pset is None:
if np.all(self.wait_for_sync):
logger.error('Aborting because no changes were made between one replica exchange and the next.')
print0("I seem to have gone from one replica exchange to the next replica exchange without "
"proposing a single valid move. Something is probably wrong for this to happen, so I'm "
"going to stop.")
return 'STOP'
elif min(self.iteration) >= self.max_iterations:
return 'STOP'
else:
# Iteration number got off by 1 because try_to_choose_new_pset() was called twice: once a while ago
# when it reached the exchange point and returned None, and a second time just now.
# Need to correct for that here.
self.iteration[j] -= 1
proposed_pset.name = 'iter%irun%i' % (self.iteration[j], j)
proposed.append(proposed_pset)
return proposed
def cleanup(self):
"""Called when quitting due to error.
Save the histograms in addition to the usual algorithm cleanup"""
super().cleanup()
self.update_histograms('_end')
def add_iterations(self, n):
oldmax = self.max_iterations
self.max_iterations += n
# Any chains that already completed need to be restarted with a new proposed parameter set
for index in range(self.num_parallel):
if self.iteration[index] >= oldmax:
ps = self.try_to_choose_new_pset(index)
if ps:
# Add to a list of new psets to run that will be submitted when the first result comes back.
ps.name = 'iter%irun%i' % (self.iteration[index], index)
logger.debug('Added PSet %s to BayesAlgorithm.staged to resume a chain' % (ps.name))
self.staged.append(ps)
class Adaptive_MCMC(BayesianAlgorithm):
def __init__(self, config): # expdata, objective, priorfile, gamma=0.1):
super(Adaptive_MCMC, self).__init__(config)
# set the params decleared in the configuaration file
if self.config.config['normalization']:
self.norm = self.config.config['normalization']
else:
self.norm = None
self.time = self.config.config['time_length']
self.adaptive = self.config.config['adaptive']
# The iteration number that the adaptive starts at
self.valid_range = self.burn_in + self.adaptive
# The length of the ouput arrays and the number of iterations before they are written out
self.arr_length = 1
# set recorders
self.acceptances = 0
self.acceptance_rates = 0
self.attempts = 0
self.factor = [0] * self.num_parallel
self.staged = []
self.alpha = [0] * self.num_parallel
# start lists
self.current_param_set = [0] * self.num_parallel
self.current_param_set_diff = [0] * self.num_parallel
self.scores = np.zeros((self.num_parallel, self.arr_length))
# set arrays for features and graphs
self.parameter_index = np.zeros((self.num_parallel, self.arr_length, len(self.variables)))
self.samples_file = None
self.mu = np.zeros((self.num_parallel, 1, len(self.variables)))
# warm start features
os.makedirs(self.config.config['output_dir'] + '/adaptive_files', exist_ok=True)
os.makedirs(self.config.config['output_dir'] + '/Results/A_MCMC/Runs', exist_ok=True)
os.makedirs(self.config.config['output_dir'] + '/Results/Histograms/', exist_ok=True)
if self.config.config['output_trajectory']:
self.output_columns = []
for i in self.config.config['output_trajectory']:
new = i.replace(',', '')
self.output_columns.append(new)
self.output_run_current = {}
self.output_run_MLE = {}
self.output_run_all = {}
for i in self.output_columns:
for k in self.time.keys():
if '_Cum' in i:
self.output_run_current[k + i] = np.zeros((self.num_parallel, 1, self.time[k]+1))
self.output_run_MLE[k + i] = np.zeros((self.num_parallel, 1, self.time[k]+1))
self.output_run_all[k + i] = np.zeros((self.num_parallel, 1, self.time[k]+1))
else:
self.output_run_current[k + i] = np.zeros((self.num_parallel, 1, self.time[k]+1))
self.output_run_MLE[k + i] = np.zeros((self.num_parallel, 1, self.time[k]+1))
self.output_run_all[k + i] = np.zeros((self.num_parallel, 1, self.time[k]+1))
if self.config.config['output_noise_trajectory']:
self.output_noise_columns = []
for i in self.config.config['output_noise_trajectory']:
new = i.replace(',', '')
self.output_noise_columns.append(new)
self.output_run_noise_current = {}
self.output_run_noise_MLE = {}
self.output_run_noise_all = {}
for i in self.output_noise_columns:
for k in self.time.keys():
if '_Cum' in i:
self.output_run_noise_current[k + i] = np.zeros((self.num_parallel, 1, self.time[k]+1))
self.output_run_noise_MLE[k + i] = np.zeros((self.num_parallel, 1, self.time[k]+1))
self.output_run_noise_all[k + i] = np.zeros((self.num_parallel, 1, self.time[k]+1))
else:
self.output_run_noise_current[k + i] = np.zeros((self.num_parallel, 1, self.time[k]+1))
self.output_run_noise_MLE[k + i] = np.zeros((self.num_parallel, 1, self.time[k]+1))
self.output_run_noise_all[k + i] = np.zeros((self.num_parallel, 1, self.time[k]+1))
if self.config.config['continue_run'] == 1:
self.diff = [self.step_size] * self.num_parallel
self.diff_best = np.loadtxt(self.config.config['output_dir'] + '/adaptive_files/diff.txt')
self.diffMatrix = np.zeros((self.num_parallel, len(self.variables), len(self.variables)))
self.diffMatrix_log = np.zeros((self.num_parallel, len(self.variables), len(self.variables)))
if self.adaptive != 1:
self.mle_best = np.loadtxt(self.config.config['output_dir'] + '/adaptive_files/MLE_params.txt')
self.diffMatrix_best = np.loadtxt(self.config.config['output_dir'] + '/adaptive_files/diffMatrix.txt')
for i in range(self.num_parallel):
self.diffMatrix[i] = np.loadtxt(self.config.config['output_dir'] + '/adaptive_files/diffMatrix.txt')
self.diff[i] = np.loadtxt(self.config.config['output_dir'] + '/adaptive_files/diff.txt')
else:
self.mle_best = np.zeros((self.arr_length, len(self.variables)))
self.diff = [self.step_size] * self.num_parallel
self.diff_best = self.step_size
self.diffMatrix = np.zeros((self.num_parallel, len(self.variables), len(self.variables)))
# make sure that the adaptive and burn in iterations are less then the max iterations
if self.adaptive + self.burn_in >= self.max_iterations - 1:
raise PybnfError('The max iterations must be at least 2 more then the sum of the adaptive and burn-in iterations.')
''' Used for resuming runs and adding iterations'''
def reset(self, bootstrap=None):
super(Adaptive_MCMC, self).reset(bootstrap)
self.current_pset = None
self.ln_current_P = None
self.iteration = [0] * self.num_parallel
self.wait_for_sync = [False] * self.num_parallel
self.samples_file = None
def start_run(self):
"""
Called by the scheduler at the start of a fitting run.
Must return a list of PSets that the scheduler should run.
:return: list of PSets
"""
print2(
'Running Adaptive Markov Chain Monte Carlo on %i independent replicates in parallel, for %i iterations each.'
% (self.num_parallel, self.max_iterations))
return super(Adaptive_MCMC, self).start_run(setup_samples = False)
def got_result(self, res):
"""
Called by the scheduler when a simulation is completed, with the pset that was run, and the resulting simulation
data
:param res: PSet that was run in this simulation
:type res: Result
:return: List of PSet(s) to be run next.
"""
pset = res.pset
score = res.score
# Figure out which parallel run this is from based on the .name field.
m = re.search(r'(?<=run)\d+', pset.name)
index = int(m.group(0))
lnprior = self.ln_prior(pset)
lnlikelihood = -score
lnposterior = lnlikelihood + lnprior
self.accept = False
self.attempts += 1
# Decide whether to accept move
if lnposterior > self.ln_current_P[index] or np.isnan(self.ln_current_P[index]):
self.accept = True
self.alpha[index] = 1
else:
self.alpha[index] = np.exp((lnposterior-self.ln_current_P[index]))
if np.random.random() < self.alpha[index]:
self.accept = True
# if accept then update the lists
if self.accept == True:
self.current_pset[index] = pset
self.acceptances += 1
self.list_trajactory = []
self.cp = []
for i in self.current_pset[index]:
self.cp.append(i.value)
self.current_param_set[index] = self.cp
# Keep track of the overall best chain and its adaptive features
if lnposterior > max(self.ln_current_P):
self.mle_best = self.current_param_set[index]
self.diffMatrix_best = self.diffMatrix[index]
self.diff_best = self.diff[index]
if self.iteration[index] == 0:
self.mle_best = self.current_param_set[index]
self.diffMatrix_best = np.eye(len(self.variables))
self.diff_best = self.diff[index]
# The order of varible reassignment is very important here
self.ln_current_P[index] = lnposterior
if self.config.config['parallelize_models'] != 1:
res.out = res.simdata
if isinstance(res.out, FailedSimulation):
pass
else:
if self.config.config['output_trajectory']:
for l in self.output_columns:
for i in res.out:
for j in res.out[i]:
if l in res.out[i][j].cols:
if self.norm:
res.out[i][j].normalize(self.norm)
column = res.out[i][j].cols[l]
self.list_trajactory = []
for z in res.out[i][j].data:
self.list_trajactory.append(z.data[column])
if '_Cum' in l:
getFirstValue = np.concatenate((self.list_trajactory[0], | np.diff(self.list_trajactory) | numpy.diff |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
class Kmeans:
def __init__(self, k=2, max_iter=250, tolerance=0, method='Elkan'):
"""
Initialises a Kmeans clustering algorithm.
Args:
- k: Number of clusters. Defaults to 2.
- max_iter: Maximum number of iterations to be performed before the Kmeans algorithm terminates. Defaults to 250.
- tolerance: Threshold distance change for each centroid to terminate algorithm. Defaults to 0.
When the distance rate of change for each centroid between 2 subsequent iterations is lower than the tolerance, the algorithm terminates.
- method: 'classic' or 'Elkan'. Determines whether the classic Kmeans or Elkan's accelerated Kmeans algorithm will be used. Defaults to 'Elkan'.
"""
assert method in ['classic','Elkan'], "Method argument not valid"
self.k = k
self.max_iter = max_iter
self.tol = tolerance
self.method = method
def fit(self, data):
'''
Finds k centroids for a dataset of numeric points.
Args:
- data: Numpy array or pandas DataFrame of numerical values.
'''
pointsArray = np.array(data)
## Initializing k random centroids within the bounds of the data points
self.centroids = {}
self.labels = [0 for point in pointsArray]
initCentroids = []
for dim in range(pointsArray.shape[1]):
dim_min = np.min(pointsArray, axis=0)[dim]
dim_max = np.max(pointsArray, axis=0)[dim]
newCentroid = (dim_max-dim_min)*np.random.random_sample([1,self.k])+dim_min
initCentroids = np.append(initCentroids,newCentroid)
initCentroids = initCentroids.reshape((pointsArray.shape[1],self.k)).T
self.centroids = dict(zip(list(range(self.k)),initCentroids))
## Classic Kmeans
if self.method == 'classic':
for i in range(self.max_iter):
self.classifications = {}
self.pointsClassif = {}
for i in range(self.k):
self.classifications[i] = []
self.pointsClassif[i] = []
for point in pointsArray:
distances = [np.linalg.norm(point-self.centroids[centroid]) for centroid in self.centroids]
classification = distances.index(min(distances))
self.classifications[classification].append(point)
prevCentroids = dict(self.centroids)
for classification in self.classifications:
if len(self.classifications[classification]) == 0:
pass
else:
self.centroids[classification] = | np.average(self.classifications[classification],axis=0) | numpy.average |
import unittest
from unittest.mock import MagicMock, patch
import math
import os
import tempfile
import time
import numpy as np
from PyQt5.QtTest import QTest, QSignalSpy
from PyQt5.QtCore import Qt, QPoint
from extra_foam.config import (
AnalysisType, config, Normalizer, RoiCombo, RoiFom, RoiProjType
)
from extra_foam.gui import mkQApp
from extra_foam.gui.image_tool import ImageToolWindow
from extra_foam.logger import logger
from extra_foam.pipeline.data_model import ImageData, ProcessedData, RectRoiGeom
from extra_foam.pipeline.exceptions import ImageProcessingError
from extra_foam.pipeline.tests import _TestDataMixin
from extra_foam.processes import wait_until_redis_shutdown
from extra_foam.services import Foam
from extra_foam.database import Metadata, MetaProxy
app = mkQApp()
logger.setLevel('CRITICAL')
_tmp_cfg_dir = tempfile.mkdtemp()
def setup_module(module):
from extra_foam import config
module._backup_ROOT_PATH = config.ROOT_PATH
config.ROOT_PATH = _tmp_cfg_dir
def teardown_module(module):
os.rmdir(_tmp_cfg_dir)
from extra_foam import config
config.ROOT_PATH = module._backup_ROOT_PATH
class TestImageTool(unittest.TestCase, _TestDataMixin):
@classmethod
def setUpClass(cls):
config.load('LPD', 'FXE')
cls.foam = Foam().init()
cls.gui = cls.foam._gui
cls.train_worker = cls.foam.train_worker
cls.pulse_worker = cls.foam.pulse_worker
cls._meta = MetaProxy()
actions = cls.gui._tool_bar.actions()
cls._action = actions[3]
assert("Image tool" == cls._action.text())
@classmethod
def tearDownClass(cls):
cls.foam.terminate()
wait_until_redis_shutdown()
os.remove(config.config_file)
def setUp(self):
# construct a fresh ImageToolWindow for each test
self.gui._image_tool = ImageToolWindow(queue=self.gui._queue,
pulse_resolved=self.gui._pulse_resolved,
parent=self.gui)
self.image_tool = self.gui._image_tool
self.view = self.image_tool._corrected_view.imageView
self.view.setImageData(None)
self.view._image = None
def testGeneral(self):
self.assertEqual(10, len(self.image_tool._ctrl_widgets))
self.assertTrue(self.image_tool._pulse_resolved)
self.assertTrue(self.image_tool._image_ctrl_widget._pulse_resolved)
def testUpdateImage(self):
widget = self.image_tool._image_ctrl_widget
# test default values
self.assertFalse(widget.update_image_btn.isEnabled())
self.assertTrue(widget.auto_update_cb.isChecked())
self.assertTrue(self.image_tool._auto_update)
# test enabled and disable "update image" button
widget.auto_update_cb.setChecked(False)
self.assertTrue(widget.update_image_btn.isEnabled())
self.assertFalse(self.image_tool._auto_update)
widget.auto_update_cb.setChecked(True)
self.assertFalse(widget.update_image_btn.isEnabled())
self.assertTrue(self.image_tool._auto_update)
# test update image manually
self.image_tool.updateWidgets = MagicMock()
widget.auto_update_cb.setChecked(False)
widget.update_image_btn.clicked.emit()
self.image_tool.updateWidgets.assert_called_once_with(True)
def testRoiCtrlWidget(self):
roi_ctrls = self.image_tool._corrected_view._roi_ctrl_widget._roi_ctrls
proc = self.pulse_worker._image_roi
self.assertEqual(4, len(roi_ctrls))
proc.update()
for i, ctrl in enumerate(roi_ctrls, 1):
# test real ROI position and size matches the numbers in the GUI
self.assertListEqual([int(ctrl._px_le.text()), int(ctrl._py_le.text())],
list(ctrl._roi.pos()))
self.assertListEqual([int(ctrl._width_le.text()), int(ctrl._height_le.text())],
list(ctrl._roi.size()))
# test default values
self.assertListEqual(RectRoiGeom.INVALID, getattr(proc, f"_geom{i}"))
for ctrl in roi_ctrls:
self.assertFalse(ctrl._activate_cb.isChecked())
self.assertFalse(ctrl._lock_cb.isChecked())
self.assertFalse(ctrl._width_le.isEnabled())
self.assertFalse(ctrl._height_le.isEnabled())
self.assertFalse(ctrl._px_le.isEnabled())
self.assertFalse(ctrl._py_le.isEnabled())
roi1_ctrl = roi_ctrls[0]
roi1 = self.view._rois[0]
self.assertIs(roi1_ctrl._roi, roi1)
# activate ROI1 ctrl
QTest.mouseClick(roi1_ctrl._activate_cb, Qt.LeftButton,
pos=QPoint(2, roi1_ctrl._activate_cb.height()/2))
self.assertTrue(roi1_ctrl._activate_cb.isChecked())
proc.update()
self.assertTupleEqual((int(roi1_ctrl._width_le.text()), int(roi1_ctrl._height_le.text())),
tuple(roi1.size()))
self.assertTupleEqual((int(roi1_ctrl._px_le.text()), int(roi1_ctrl._py_le.text())),
tuple(roi1.pos()))
# use keyClicks to test that the QLineEdit is enabled
roi1_ctrl._width_le.clear()
QTest.keyClicks(roi1_ctrl._width_le, "10")
QTest.keyPress(roi1_ctrl._width_le, Qt.Key_Enter)
roi1_ctrl._height_le.clear()
QTest.keyClicks(roi1_ctrl._height_le, "30")
QTest.keyPress(roi1_ctrl._height_le, Qt.Key_Enter)
self.assertTupleEqual((10, 30), tuple(roi1.size()))
# ROI can be outside of the image
roi1_ctrl._px_le.clear()
QTest.keyClicks(roi1_ctrl._px_le, "-1")
QTest.keyPress(roi1_ctrl._px_le, Qt.Key_Enter)
roi1_ctrl._py_le.clear()
QTest.keyClicks(roi1_ctrl._py_le, "-3")
QTest.keyPress(roi1_ctrl._py_le, Qt.Key_Enter)
self.assertTupleEqual((-1, -3), tuple(roi1.pos()))
proc.update()
self.assertListEqual([-1, -3, 10, 30], proc._geom1)
# lock ROI ctrl
QTest.mouseClick(roi1_ctrl._lock_cb, Qt.LeftButton,
pos=QPoint(2, roi1_ctrl._lock_cb.height()/2))
self.assertTrue(roi1_ctrl._activate_cb.isChecked())
self.assertTrue(roi1_ctrl._lock_cb.isChecked())
self.assertFalse(roi1_ctrl._width_le.isEnabled())
self.assertFalse(roi1_ctrl._height_le.isEnabled())
self.assertFalse(roi1_ctrl._px_le.isEnabled())
self.assertFalse(roi1_ctrl._py_le.isEnabled())
# deactivate ROI ctrl
QTest.mouseClick(roi1_ctrl._activate_cb, Qt.LeftButton,
pos=QPoint(2, roi1_ctrl._activate_cb.height()/2))
self.assertFalse(roi1_ctrl._activate_cb.isChecked())
self.assertTrue(roi1_ctrl._lock_cb.isChecked())
self.assertFalse(roi1_ctrl._width_le.isEnabled())
self.assertFalse(roi1_ctrl._height_le.isEnabled())
self.assertFalse(roi1_ctrl._px_le.isEnabled())
self.assertFalse(roi1_ctrl._py_le.isEnabled())
def testMovingAverageQLineEdit(self):
# TODO: remove it in the future
widget = self.image_tool._image_ctrl_widget
# moving average is disabled
self.assertFalse(widget.moving_avg_le.isEnabled())
@patch("extra_foam.gui.plot_widgets.image_views.ImageAnalysis."
"onThresholdMaskChange")
@patch("extra_foam.gui.mediator.Mediator.onImageThresholdMaskChange")
def testThresholdMask(self, on_mask_mediator, on_mask):
widget = self.image_tool._image_ctrl_widget
widget.threshold_mask_le.clear()
QTest.keyClicks(widget.threshold_mask_le, "1, 10")
QTest.keyPress(widget.threshold_mask_le, Qt.Key_Enter)
on_mask.assert_called_once_with((1, 10))
on_mask_mediator.assert_called_once_with((1, 10))
def testAutoLevel(self):
widget = self.image_tool._image_ctrl_widget
spy = QSignalSpy(self.image_tool._mediator.reset_image_level_sgn)
widget.auto_level_btn.clicked.emit()
self.assertEqual(1, len(spy))
def testReferenceCtrlWidget(self):
widget = self.image_tool._reference_view._ctrl_widget
corrected = self.image_tool._reference_view._corrected
proc = self.pulse_worker._image_proc
data, _ = self.data_with_assembled(1001, (4, 10, 10))
# test setting reference (no image)
QTest.mouseClick(widget._set_ref_btn, Qt.LeftButton)
ref = proc._ref_sub.update(proc._reference)
self.assertIsNone(ref)
# test setting reference
corrected._image = 2 * np.ones((10, 10), np.float32)
QTest.mouseClick(widget._set_ref_btn, Qt.LeftButton)
ref = proc._ref_sub.update(corrected.image.copy())
np.testing.assert_array_equal(corrected.image, ref)
# test setting reference multiple times
for i in range(5):
corrected._image = np.random.rand(10, 10).astype(np.float32)
QTest.mouseClick(widget._set_ref_btn, Qt.LeftButton)
ref = proc._ref_sub.update(None)
np.testing.assert_array_equal(corrected.image, ref)
# test removing reference
QTest.mouseClick(widget._remove_ref_btn, Qt.LeftButton)
ref = proc._ref_sub.update(corrected.image.copy())
self.assertIsNone(ref)
# ------------------------------
# test load and remove reference
# ------------------------------
# Here we test that "proc._ref_sub.update()" works properly. The rest
# is done in the unittests of ImageProcessor.
ref_gt = np.ones([2, 2], dtype=np.float32)
def _read_image_side_effect(fn):
if fn == "reference/file/path":
return ref_gt
# caveat: first establish the connection
proc._cal_sub.update(None, None)
with patch('extra_foam.gui.ctrl_widgets.ref_image_ctrl_widget.read_image',
side_effect=_read_image_side_effect):
with patch('extra_foam.gui.ctrl_widgets.ref_image_ctrl_widget.QFileDialog.getOpenFileName',
return_value=["reference/file/path"]):
QTest.mouseClick(widget._load_ref_btn, Qt.LeftButton)
self.assertEqual("reference/file/path", widget._ref_fp_le.text())
ref = proc._ref_sub.update(None)
np.testing.assert_array_equal(ref, ref_gt)
QTest.mouseClick(widget._remove_ref_btn, Qt.LeftButton)
self.assertEqual("", widget._ref_fp_le.text())
ref = proc._ref_sub.update(ref_gt)
self.assertIsNone(ref)
def testDrawMask(self):
# TODO: test by really drawing something on ImageTool
from extra_foam.ipc import ImageMaskPub
pub = ImageMaskPub()
proc = self.pulse_worker._image_proc
data, _ = self.data_with_assembled(1001, (4, 10, 10))
# trigger the lazily evaluated subscriber
proc.process(data)
mask_gt = np.zeros(data['assembled']['data'].shape[-2:], dtype=np.bool)
# test default
np.testing.assert_array_equal(proc._image_mask, mask_gt)
# test changing mask
pub.add((0, 0, 2, 3))
mask_gt[0:3, 0:2] = True
# test adding mask
n_attempts = 0
# repeat to prevent random failure
while n_attempts < 10:
n_attempts += 1
proc.process(data)
# np.testing.assert_array_equal(mask_gt, proc._image_mask)
if (mask_gt == proc._image_mask).all():
break
time.sleep(0.001)
# add one more mask region
pub.add((1, 1, 2, 3))
proc.process(data)
mask_gt[1:4, 1:3] = True
np.testing.assert_array_equal(mask_gt, proc._image_mask)
# test erasing mask
pub.erase((2, 2, 3, 3))
proc.process(data)
mask_gt[2:5, 2:5] = False
np.testing.assert_array_equal(mask_gt, proc._image_mask)
# test trashing mask
action = self.image_tool._tool_bar.actions()[2]
action.trigger()
proc.process(data)
np.testing.assert_array_equal( | np.zeros_like(mask_gt, dtype=np.bool) | numpy.zeros_like |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 21 14:48:57 2021
@author: <NAME>
"""
import pandas as pd, numpy as np, os, igraph as ig, leidenalg as la
import cvxpy as cp
from sklearn.neighbors import NearestNeighbors, radius_neighbors_graph
from kneed import KneeLocator
from sklearn.utils.validation import check_symmetric
from scipy.sparse import csr_matrix
from matplotlib import pyplot as plt
from sklearn.neighbors import kneighbors_graph
from Bipartite_Ensembling import BGPA
def read_in_data(directory_names, years):
data = {}
for year in years:
data_modes=[]
for directory in directory_names:
for filename in os.listdir(os.path.join('C:\\Users\\Gian Maria\\Desktop\\Unitn\\Iain\\CORRECT_DATA\\Data', directory)):
if year in filename:
datum = pd.read_csv(os.path.join('C:\\Users\\Gian Maria\\Desktop\\Unitn\\Iain\\CORRECT_DATA\\Data',directory, filename), index_col=0)
datum.fillna(value=0, inplace=True)
data_modes.append(datum)
data_modes_index = np.unique(np.concatenate([mode.index for mode in data_modes]))
data_modes = [mode.reindex(data_modes_index) for mode in data_modes]
data_modes = [mode.fillna(value=0) for mode in data_modes]
data[year] = data_modes.copy()
return data
class Leiden_Unimodal:
def __init__(self, obj_type='RB_Mod', resolution=1.0, n_iterations =-1):
obj_types = {'CPM': la.CPMVertexPartition,
'RBER': la.RBERVertexPartition,
'RB_Mod': la.RBConfigurationVertexPartition,
'Mod': la.ModularityVertexPartition,
'Surprise': la.SurpriseVertexPartition
}
self.obj_type = obj_type
self.obj_func = obj_types[obj_type]
self.resolution = resolution
self.n_iterations = n_iterations
def fit_transform(self, graph):
if type(graph) is ig.Graph:
G =graph
else:
G = self._scipy_to_igraph(graph)
if self.obj_type in ['CPM', 'RBER', 'RB_Mod']:
partition = la.find_partition(G, self.obj_func, n_iterations=self.n_iterations,
resolution_parameter=self.resolution)
else:
partition = la.find_partition(G, self.obj_func, n_iterations=self.iterations)
self.modularity_ = partition.quality()
self.labels_ = np.array(partition.membership)
return self.labels_
def _scipy_to_igraph(self, matrix):
# matrix.eliminate_zeros()
sources, targets = matrix.nonzero()
weights = matrix[sources, targets]
graph = ig.Graph(n=matrix.shape[0], edges=list(zip(sources, targets)), directed=True, edge_attrs={'weight': weights})
try:
check_symmetric(matrix, raise_exception=True)
graph = graph.as_undirected()
except ValueError:
pass
return graph
class Leiden_Multiplex:
def __init__(self, obj_types=None, resolutions=None, modal_weights=None, n_iterations=-1):
self.obj_types = obj_types
self.resolutions = resolutions
self.modal_weights = modal_weights
self.n_iterations = n_iterations
def fit_transform(self, graphs):
obj_table = {'CPM': la.CPMVertexPartition,
'RBER': la.RBERVertexPartition,
'RB_Mod': la.RBConfigurationVertexPartition,
'Mod': la.ModularityVertexPartition,
'Surprise': la.SurpriseVertexPartition
}
G=[]
for graph in graphs:
if type(graph) is ig.Graph:
G.append(graph)
else:
G.append(self._scipy_to_igraph(graph))
optimiser = la.Optimiser()
partitions = []
for i in range(len(G)):
if self.obj_types is None:
partitions.append(la.RBConfigurationVertexPartition(G[i], resolution_parameter=1.0))
elif self.resolutions is None:
obj = obj_table[self.obj_types[i]]
partitions.append(obj(G[i]))
else:
obj = obj_table[self.obj_types[i]]
partitions.append(obj(G[i], resolution_parameter=self.resolutions[i]))
if self.modal_weights is None:
diff = optimiser.optimise_partition_multiplex(partitions, n_iterations=self.n_iterations)
else:
diff = optimiser.optimise_partition_multiplex(partitions, layer_weights = self.modal_weights, n_iterations=self.n_iterations)
self.modularities = [part.modularity for part in partitions]
self.labels_ = np.array(partitions[0].membership)
return self.labels_
def _scipy_to_igraph(self, matrix):
matrix.eliminate_zeros()
sources, targets = matrix.nonzero()
weights = matrix[sources, targets]
graph = ig.Graph(n=matrix.shape[0], edges=list(zip(sources, targets)), directed=True, edge_attrs={'weight': weights})
try:
check_symmetric(matrix, raise_exception=True)
graph = graph.as_undirected()
except ValueError:
pass
return graph
class MVMC:
def __init__(self, n_iterations=-1, max_clusterings=20,
resolution_tol=1e-2, weight_tol=1e-2, verbose=False):
self.n_iterations = n_iterations
self.max_clusterings = max_clusterings
self.resolution_tol = resolution_tol
self.weight_tol = weight_tol
self.verbose = verbose
def fit_transform(self, graphs):
G=[]
for graph in graphs:
if type(graph) is ig.Graph:
G.append(graph)
else:
G.append(self._scipy_to_igraph(graph))
if self.verbose:
for i in range(len(G)):
print("View Graph {}: num_nodes: {}, num_edges: {}, directed: {}, num_components: {}, num_isolates: {}"
.format(i, G[i].vcount(), G[i].ecount(), G[i].is_directed(),
len(G[i].components(mode='WEAK').sizes()), G[i].components(mode='WEAK').sizes().count(1)))
self.weights = []
self.resolutions =[]
self.best_modularity =-np.inf
self.best_clustering = None
self.best_resolutions = None
self.best_weights = None
self.modularities =[]
self.clusterings =[]
self.final_iteration = 0
self.best_iteration = 0
weights = [1]*len(G)
resolutions =[1]*len(G)
for iterate in range(self.max_clusterings):
partitions = []
for i in range(len(G)):
partitions.append(la.RBConfigurationVertexPartition(G[i], resolution_parameter=resolutions[i]))
optimiser = la.Optimiser()
diff = optimiser.optimise_partition_multiplex(partitions, layer_weights = weights, n_iterations=self.n_iterations)
self.clusterings.append(np.array(partitions[0].membership))
self.modularities.append([part.quality()/(part.graph.ecount() if part.graph.is_directed() else 2*part.graph.ecount())
for part in partitions])
self.weights.append(weights.copy())
self.resolutions.append(resolutions.copy())
self.final_iteration +=1
if self.verbose:
print("--------")
print("Iteration: {} \n Modularities: {} \n Resolutions: {} \n Weights: {}"
.format(self.final_iteration, self.modularities[-1], resolutions, weights))
# if np.sum(np.array(self.weights[-1]) * np.array(self.modularities[-1])) > self.best_modularity:
self.best_clustering = self.clusterings[-1]
self.best_modularity = np.sum(np.array(self.weights[-1]) * np.array(self.modularities[-1]))
self.best_resolutions = self.resolutions[-1]
self.best_weights = self.weights[-1]
self.best_iteration = self.final_iteration
theta_in, theta_out = self._calculate_edge_probabilities(G)
for i in range(len(G)):
resolutions[i] = (theta_in[i] - theta_out[i])/ (np.log(theta_in[i]) - np.log(theta_out[i]))
weights[i] = (np.log(theta_in[i]) - np.log(theta_out[i]))/(np.mean([np.log(theta_in[j]) - np.log(theta_out[j]) for j in range(len(G))]))
if (np.all(np.abs(np.array(self.resolutions[-1])-np.array(resolutions)) <= self.resolution_tol)
and np.all(np.abs(np.array(self.weights[-1])-np.array(weights)) <= self.resolution_tol)):
break
else:
best_iteration = np.argmax([np.sum(np.array(self.weights[i]) * np.array(self.modularities[i]))
for i in range(len(self.modularities))])
self.best_clustering = self.clusterings[best_iteration]
self.best_modularity = np.sum(np.array(self.weights[best_iteration]) * np.array(self.modularities[best_iteration]))
self.best_resolutions = self.resolutions[best_iteration]
self.best_weights = self.weights[best_iteration]
self.best_iteration = best_iteration
if self.verbose:
print("MVMC did not converge, best result found: Iteration: {}, Modularity: {}, Resolutions: {}, Weights: {}"
.format(self.best_iteration, self.best_modularity, self.best_resolutions, self.best_weights))
return self.best_clustering
def _scipy_to_igraph(self, matrix):
matrix.eliminate_zeros()
sources, targets = matrix.nonzero()
weights = list(matrix.data)
graph = ig.Graph(n=matrix.shape[0], edges=list(zip(sources, targets)), directed=True, edge_attrs={'weight': weights})
try:
check_symmetric(matrix, raise_exception=True)
graph = graph.as_undirected()
except ValueError:
pass
if not graph.is_weighted():
graph.es['weight'] = [1.0] * graph.ecount()
return graph
def _calculate_edge_probabilities(self, G):
theta_in =[]
theta_out =[]
clusters = self.clusterings[-1].copy()
for i in range(len(G)):
m_in = 0
m = sum(e['weight'] for e in G[i].es)
kappa =[]
G[i].vs['clusters'] = clusters
for cluster in np.unique(clusters):
nodes = G[i].vs.select(clusters_eq=cluster)
m_in += sum(e['weight'] for e in G[i].subgraph(nodes).es)
if G[i].is_directed():
degree_products = np.outer(np.array(G[i].strength(nodes, mode = 'IN', weights='weight')),
np.array(G[i].strength(nodes, mode = 'OUT', weights='weight')))
np.fill_diagonal(degree_products,0)
kappa.append(np.sum(degree_products, dtype=np.int64))
else:
kappa.append(np.sum(np.array(G[i].strength(nodes, weights='weight')), dtype=np.int64)**2)
if G[i].is_directed():
if m_in <=0:
# Case when there are no internal edges; every node in its own cluster
theta_in.append(1/G[i].ecount())
else:
theta_in.append((m_in)/(np.sum(kappa, dtype=np.int64)/(2*m)))
if m-m_in <=0:
# Case when all edges are internal; 1 cluster or a bunch of disconnected clusters
theta_out.append(1/G[i].ecount())
else:
theta_out.append((m-m_in)/(m- | np.sum(kappa, dtype=np.int64) | numpy.sum |
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lingvo Jax transformer layers."""
import itertools
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import numpy as jnp
from jax import test_util
from lingvo.core import batch_major_attention
from lingvo.jax import base_layer
from lingvo.jax import py_utils
from lingvo.jax import test_utils
from lingvo.jax.layers import attentions
from lingvo.jax.layers import transformers
import numpy as np
import tensorflow.compat.v2 as tf
class TransformersTest(test_util.JaxTestCase):
def setUp(self):
super().setUp()
np.random.seed(123456)
tf.random.set_seed(123)
@parameterized.parameters(*list(itertools.product([True, False], repeat=3)))
def test_transformer_layer(self, mask_self_attention, packed_input,
cross_attention):
p = transformers.TransformerLayer.Params().Set(
name='jax_transformer_layer',
input_dims=32,
hidden_dims=128,
num_heads=8,
mask_self_attention=mask_self_attention,
packed_input=packed_input,
cross_attention=cross_attention)
seq_len = np.random.randint(10, 32)
batch_size = 10
transformer_layer = p.Instantiate()
prng_key = jax.random.PRNGKey(seed=123)
initial_vars = transformer_layer.InstantiateVariables(prng_key)
npy_inputs = np.random.normal(
1.0, 0.5, [batch_size, seq_len, p.input_dims]).astype('float32')
inputs = jnp.asarray(npy_inputs)
npy_paddings = np.random.randint(0, 1,
[batch_size, seq_len]).astype('float32')
paddings = jnp.asarray(npy_paddings)
causal_mask = None
segment_mask = None
tf_segment_mask = None
attention_mask = attentions.ConvertPaddingsToMask(paddings)
if mask_self_attention:
causal_mask = attentions.CausalMask(inputs)
attention_mask = jnp.minimum(attention_mask, causal_mask)
if packed_input:
segment_ids = np.random.random_integers(0, 2, [batch_size, seq_len])
segment_mask = attentions.SegmentMask(segment_ids, dtype=np.float32)
attention_mask = jnp.minimum(attention_mask, segment_mask)
if mask_self_attention:
tf_segment_mask = batch_major_attention.CausalSegmentMask(
segment_ids, tf.float32)
else:
tf_segment_mask = batch_major_attention.SegmentMask(
segment_ids, segment_ids)
cross_inputs = None
cross_attention_mask = None
tf_cross_inputs = None
tf_cross_paddings = None
tf_cross_segment_mask = None
if cross_attention:
cross_seq_len = np.random.randint(10, 128)
npy_cross_inputs = np.random.normal(
1.0, 0.5, [batch_size, cross_seq_len, p.input_dims]).astype('float32')
cross_inputs = jnp.asarray(npy_cross_inputs)
tf_cross_inputs = tf.constant(npy_cross_inputs, dtype=tf.float32)
npy_cross_paddings = np.random.randint(
0, 1, [batch_size, cross_seq_len]).astype('float32')
cross_paddings = jnp.asarray(npy_cross_paddings)
cross_attention_mask = attentions.ConvertPaddingsToMask(cross_paddings)
tf_cross_paddings = tf.constant(npy_cross_paddings, dtype=tf.float32)
if packed_input:
source_segment_ids = np.random.random_integers(
0, 2, [batch_size, cross_seq_len])
cross_segment_mask = attentions.SegmentMask(
segment_ids, source_segment_ids, dtype=np.float32)
cross_attention_mask = jnp.minimum(cross_attention_mask,
cross_segment_mask)
tf_cross_segment_mask = batch_major_attention.SegmentMask(
segment_ids, source_segment_ids)
with base_layer.JaxContext.NewContext(
prng_key=prng_key, global_step=jnp.array(0, dtype=jnp.uint32)):
outputs, _ = transformer_layer.FProp(
initial_vars,
inputs,
paddings,
attention_mask=attention_mask,
cross_inputs=cross_inputs,
cross_attention_mask=cross_attention_mask)
logging.info('initial_vars in transformer layer = %s', initial_vars)
# Test whether tf Transformer layer returns same output
# Modify initial_vars to use TF compatible params
tf_initial_vars = test_utils.ReplaceJaxAttentionVarsToTf(
initial_vars, cross_attention)
tf_initial_vars = test_utils.ToTfNmap(tf_initial_vars)
logging.info('tf_initial_vars in transformer layer = %s', initial_vars)
tf_p = batch_major_attention.TransformerLayer.Params().Set(
name='tf_transformer_layer',
input_dim=p.input_dims,
num_heads=p.num_heads,
mask_self_atten=mask_self_attention,
packed_input=packed_input,
has_aux_atten=cross_attention)
tf_p.tr_fflayer_tpl.hidden_dim = p.hidden_dims
tf_p.tr_fflayer_tpl.fflayer_tpl.batch_norm = False
tf_p.tr_fflayer_tpl.fflayer_tpl.has_bias = True
tf_transformer_layer = tf_p.Instantiate()
tf_output, _ = tf_transformer_layer.FProp(
tf_initial_vars,
tf.constant(npy_inputs, dtype=tf.float32),
paddings=test_utils.ToTfNmap(npy_paddings),
segment_mask=tf_segment_mask,
aux_vec=tf_cross_inputs,
aux_paddings=tf_cross_paddings,
aux_segment_mask=test_utils.ToTfNmap(tf_cross_segment_mask))
np_outputs = test_utils.ToNp(outputs)
tf_np_outputs = test_utils.ToNp(tf_output)
self.assertAllClose(tf_np_outputs, np_outputs, atol=1e-5)
@parameterized.parameters((True, True), (False, True), (True, False),
(False, False))
def test_transformer_layer_extendstep(self, packed_input, cross_attention):
p = transformers.TransformerLayer.Params().Set(
name='jax_transformer_layer',
input_dims=8,
hidden_dims=32,
num_heads=4,
mask_self_attention=True,
packed_input=packed_input,
cross_attention=cross_attention)
seq_len = 5
batch_size = 4
transformer_layer = p.Instantiate()
prng_key = jax.random.PRNGKey(seed=123)
initial_vars = transformer_layer.InstantiateVariables(prng_key)
initial_states = transformer_layer.InitStates(initial_vars, batch_size,
seq_len)
npy_inputs = np.random.normal(
1.0, 0.5, [batch_size, seq_len, p.input_dims]).astype('float32')
inputs = jnp.asarray(npy_inputs)
npy_paddings = np.random.randint(0, 1,
[batch_size, seq_len]).astype('float32')
paddings = jnp.asarray(npy_paddings)
attention_mask = attentions.ConvertPaddingsToMask(paddings)
segment_mask = None
causal_mask = attentions.CausalMask(inputs)
attention_mask = jnp.minimum(causal_mask, attention_mask)
if packed_input:
segment_ids = np.random.random_integers(0, 2, [batch_size, seq_len])
segment_mask = attentions.SegmentMask(segment_ids, dtype=np.float32)
attention_mask = jnp.minimum(attention_mask, segment_mask)
cross_inputs = None
cross_paddings = None
cross_attention_mask = None
if cross_attention:
cross_seq_len = np.random.randint(10, 32)
npy_cross_inputs = np.random.normal(
1.0, 0.5, [batch_size, cross_seq_len, p.input_dims]).astype('float32')
cross_inputs = jnp.asarray(npy_cross_inputs)
npy_cross_paddings = np.random.randint(
0, 1, [batch_size, cross_seq_len]).astype('float32')
cross_paddings = jnp.asarray(npy_cross_paddings)
cross_attention_mask = attentions.ConvertPaddingsToMask(cross_paddings)
if packed_input:
source_segment_ids = np.random.random_integers(
0, 2, [batch_size, cross_seq_len])
cross_segment_mask = attentions.SegmentMask(
segment_ids, source_segment_ids, dtype=np.float32)
cross_attention_mask = jnp.minimum(cross_attention_mask,
cross_segment_mask)
with base_layer.JaxContext.NewContext(
prng_key=prng_key, global_step=jnp.array(0, dtype=jnp.uint32)):
fprop_outputs, _ = transformer_layer.FProp(
initial_vars,
inputs,
paddings,
attention_mask=attention_mask,
cross_inputs=cross_inputs,
cross_attention_mask=cross_attention_mask)
decoder_outputs = jnp.zeros(shape=[seq_len, batch_size, p.input_dims])
atten_states = initial_states
for t in range(seq_len):
attention_mask_t = attention_mask[:, :, t, :]
cross_attention_mask_t = cross_attention_mask
if cross_attention:
cross_attention_mask_t = cross_attention_mask[:, :, t, :]
cross_attention_mask_t = np.expand_dims(
cross_attention_mask_t, axis=2)
atten_states, encoded = transformer_layer.ExtendStep(
initial_vars,
atten_states,
inputs=inputs[:, t, :],
time_step=t,
attention_mask=attention_mask_t,
cross_inputs=cross_inputs,
cross_attention_mask=cross_attention_mask_t)
decoder_outputs = decoder_outputs.at[t].set(encoded)
decoder_out_transposed = jnp.transpose(decoder_outputs, [1, 0, 2])
logging.info('initial_vars in transformer layer = %s', initial_vars)
np_fprop_outputs = test_utils.ToNp(fprop_outputs)
np_decoder_outputs = test_utils.ToNp(decoder_out_transposed)
self.assertAllClose(np_fprop_outputs, np_decoder_outputs, atol=1e-5)
@parameterized.parameters((True, True, True), (True, False, True),
(True, True, False), (False, True, True),
(True, False, False), (False, True, False),
(False, False, True), (False, False, False))
def test_stacked_transformer_layer(self, mask_self_attention, packed_input,
cross_attention):
p = transformers.StackedTransformerLayers.Params().Set(
name='jax_stacked_transformer_layer',
model_dims=16,
hidden_dims=64,
num_heads=8,
mask_self_attention=mask_self_attention,
num_layers=4,
packed_input=packed_input,
cross_attention=cross_attention)
seq_len = np.random.randint(10, 32)
batch_size = 10
stacked_transformer_layer = p.Instantiate()
prng_key = jax.random.PRNGKey(seed=123)
initial_vars = stacked_transformer_layer.InstantiateVariables(prng_key)
npy_inputs = np.random.normal(
1.0, 0.5, [batch_size, seq_len, p.model_dims]).astype('float32')
inputs = jnp.asarray(npy_inputs)
npy_paddings = np.random.randint(0, 1,
[batch_size, seq_len]).astype('float32')
paddings = jnp.asarray(npy_paddings)
segment_mask = None
tf_segment_mask = None
if packed_input:
segment_ids = np.random.random_integers(0, 2, [batch_size, seq_len])
segment_mask = attentions.SegmentMask(segment_ids, dtype=np.float32)
if mask_self_attention:
tf_segment_mask = batch_major_attention.CausalSegmentMask(
segment_ids, tf.float32)
else:
tf_segment_mask = batch_major_attention.SegmentMask(
segment_ids, segment_ids)
cross_inputs = None
cross_paddings = None
cross_segment_mask = None
tf_cross_inputs = None
tf_cross_paddings = None
tf_cross_segment_mask = None
if cross_attention:
cross_seq_len = np.random.randint(10, 64)
npy_cross_inputs = np.random.normal(
1.0, 0.5, [batch_size, cross_seq_len, p.model_dims]).astype('float32')
cross_inputs = jnp.asarray(npy_cross_inputs)
tf_cross_inputs = tf.constant(npy_cross_inputs, dtype=tf.float32)
npy_cross_paddings = np.random.randint(
0, 1, [batch_size, cross_seq_len]).astype('float32')
cross_paddings = jnp.asarray(npy_cross_paddings)
tf_cross_paddings = tf.constant(npy_cross_paddings, dtype=tf.float32)
if packed_input:
source_segment_ids = np.random.random_integers(
0, 2, [batch_size, cross_seq_len])
cross_segment_mask = attentions.SegmentMask(
segment_ids, source_segment_ids, dtype=np.float32)
tf_cross_segment_mask = batch_major_attention.SegmentMask(
segment_ids, source_segment_ids)
with base_layer.JaxContext.NewContext(
prng_key=prng_key, global_step=jnp.array(0, dtype=jnp.uint32)):
outputs = stacked_transformer_layer.FProp(
initial_vars,
inputs,
paddings,
segment_mask=segment_mask,
cross_inputs=cross_inputs,
cross_paddings=cross_paddings,
cross_segment_mask=cross_segment_mask)
logging.info('initial_vars in transformer layer = %s', initial_vars)
# Test whether tf Transformer layer returns same output
# Modify initial_vars to use TF compatible params
tf_initial_vars = py_utils.NestedMap()
tf_initial_vars.x_layers = []
for jax_initial_vars in initial_vars.x_layers:
tf_layer_vars = test_utils.ReplaceJaxAttentionVarsToTf(
jax_initial_vars, cross_attention)
tf_initial_vars.x_layers.append(tf_layer_vars)
tf_initial_vars = test_utils.ToTfNmap(tf_initial_vars)
logging.info('tf_initial_vars in transformer layer = %s', initial_vars)
tf_p = batch_major_attention.StackedTransformerLayers.Params().Set(
name='tf_transformer_layer',
mdl_dim=p.model_dims,
hidden_dim=p.hidden_dims,
num_atten_heads=p.num_heads,
mask_self_atten=mask_self_attention,
num_layers=p.num_layers,
packed_input=packed_input,
has_aux_atten=cross_attention)
tf_p.transformer_layer_params_tpl.tr_fflayer_tpl.fflayer_tpl.batch_norm = (
False)
tf_p.transformer_layer_params_tpl.tr_fflayer_tpl.fflayer_tpl.has_bias = True
tf_stacked_transformer_layer = tf_p.Instantiate()
tf_output, _ = tf_stacked_transformer_layer.FProp(
tf_initial_vars,
test_utils.ToTfNmap(npy_inputs),
paddings=test_utils.ToTfNmap(npy_paddings),
segment_mask=test_utils.ToTfNmap(tf_segment_mask),
aux_vec=test_utils.ToTfNmap(tf_cross_inputs),
aux_paddings=test_utils.ToTfNmap(tf_cross_paddings),
aux_segment_mask=test_utils.ToTfNmap(tf_cross_segment_mask))
np_outputs = test_utils.ToNp(outputs)
tf_np_outputs = test_utils.ToNp(tf_output)
self.assertAllClose(tf_np_outputs, np_outputs, atol=1e-5)
@parameterized.parameters(*list(itertools.product([True, False], repeat=3)))
def test_repeated_stacked_xformer_layer(self, mask_self_attention,
packed_input, cross_attention):
model_dims = 16
p1 = transformers.StackedTransformerLayers.Params().Set(
name='jax_stacked_transformer_layer',
model_dims=model_dims,
hidden_dims=64,
num_heads=8,
mask_self_attention=mask_self_attention,
num_layers=4,
packed_input=packed_input,
cross_attention=cross_attention)
p2 = transformers.StackedTransformerLayersRepeated.Params().Set(
name='jax_stacked_transformer_layer_repeated',
model_dims=model_dims,
hidden_dims=64,
num_heads=8,
mask_self_attention=mask_self_attention,
num_layers=4,
packed_input=packed_input,
cross_attention=cross_attention)
seq_len = np.random.randint(10, 32)
batch_size = 10
stacked_transformer_layer = p1.Instantiate()
repeated_transformer_layer = p2.Instantiate()
prng_key = jax.random.PRNGKey(seed=123)
initial_vars = stacked_transformer_layer.InstantiateVariables(prng_key)
repeated_transformer_layer.InstantiateVariableConfigs()
def _StackVars(*args):
args = [x[jnp.newaxis, :] for x in args]
return jnp.vstack(args)
stacked_vars = py_utils.NestedMap(
repeat=py_utils.NestedMap(
sub=tf.nest.map_structure(_StackVars, *initial_vars.x_layers)))
npy_inputs = np.random.normal(
1.0, 0.5, [batch_size, seq_len, model_dims]).astype('float32')
inputs = jnp.asarray(npy_inputs)
npy_paddings = np.random.randint(0, 1,
[batch_size, seq_len]).astype('float32')
paddings = jnp.asarray(npy_paddings)
segment_mask = None
if packed_input:
segment_ids = np.random.random_integers(0, 2, [batch_size, seq_len])
segment_mask = attentions.SegmentMask(segment_ids, dtype=np.float32)
cross_inputs = None
cross_paddings = None
cross_segment_mask = None
if cross_attention:
cross_seq_len = np.random.randint(10, 64)
npy_cross_inputs = np.random.normal(
1.0, 0.5, [batch_size, cross_seq_len, model_dims]).astype('float32')
cross_inputs = jnp.asarray(npy_cross_inputs)
npy_cross_paddings = np.random.randint(
0, 1, [batch_size, cross_seq_len]).astype('float32')
cross_paddings = jnp.asarray(npy_cross_paddings)
if packed_input:
source_segment_ids = np.random.random_integers(
0, 2, [batch_size, cross_seq_len])
cross_segment_mask = attentions.SegmentMask(
segment_ids, source_segment_ids, dtype=np.float32)
with base_layer.JaxContext.NewContext(
prng_key=jax.random.PRNGKey(seed=1234),
global_step=jnp.array(0, dtype=jnp.uint32)):
outputs = stacked_transformer_layer.FProp(
initial_vars,
inputs,
paddings,
segment_mask=segment_mask,
cross_inputs=cross_inputs,
cross_paddings=cross_paddings,
cross_segment_mask=cross_segment_mask)
outputs_repeated = repeated_transformer_layer.FProp(
stacked_vars,
inputs,
paddings,
segment_mask=segment_mask,
cross_inputs=cross_inputs,
cross_paddings=cross_paddings,
cross_segment_mask=cross_segment_mask)
self.assertAllClose(outputs, outputs_repeated)
@parameterized.parameters(*list(itertools.product([True, False], repeat=5)))
def test_stacked_transformer_layer_extendstep(self, packed_input,
cross_attention,
enable_while_loop,
use_repeat_layer, combine_qkv):
if cross_attention and combine_qkv:
self.skipTest('combine_qkv optimization only works for self-attention')
if use_repeat_layer:
layer_params = transformers.StackedTransformerLayersRepeated.Params()
else:
layer_params = transformers.StackedTransformerLayers.Params()
p = layer_params.Set(
name='jax_transformer_layer',
model_dims=8,
hidden_dims=32,
num_heads=2,
mask_self_attention=True,
packed_input=packed_input,
cross_attention=cross_attention,
num_layers=2,
enable_while_loop=enable_while_loop)
p.transformer_layer_params_tpl.tr_atten_tpl.combine_qkv = combine_qkv
seq_len = 5
batch_size = 4
stacked_transformer_layer = p.Instantiate()
prng_key = jax.random.PRNGKey(seed=123)
initial_vars = stacked_transformer_layer.InstantiateVariables(prng_key)
initial_states = stacked_transformer_layer.InitStates(
initial_vars, batch_size, seq_len)
npy_inputs = np.random.normal(
1.0, 0.5, [batch_size, seq_len, p.model_dims]).astype('float32')
inputs = jnp.asarray(npy_inputs)
npy_paddings = np.random.randint(0, 1,
[batch_size, seq_len]).astype('float32')
paddings = jnp.asarray(npy_paddings)
attention_mask = attentions.ConvertPaddingsToMask(paddings)
segment_mask = None
if packed_input:
segment_ids = np.random.random_integers(0, 2, [batch_size, seq_len])
segment_mask = attentions.SegmentMask(segment_ids, dtype=np.float32)
cross_inputs = None
cross_paddings = None
cross_segment_mask = None
if cross_attention:
cross_seq_len = np.random.randint(10, 32)
npy_cross_inputs = np.random.normal(
1.0, 0.5, [batch_size, cross_seq_len, p.model_dims]).astype('float32')
cross_inputs = jnp.asarray(npy_cross_inputs)
npy_cross_paddings = np.random.randint(
0, 1, [batch_size, cross_seq_len]).astype('float32')
cross_paddings = jnp.asarray(npy_cross_paddings)
if packed_input:
source_segment_ids = np.random.random_integers(
0, 2, [batch_size, cross_seq_len])
cross_segment_mask = attentions.SegmentMask(
segment_ids, source_segment_ids, dtype=np.float32)
prng_key = jax.random.PRNGKey(seed=123)
global_step = jnp.array(0, dtype=jnp.uint64)
with base_layer.JaxContext.NewContext(
prng_key=prng_key, global_step=global_step):
fprop_outputs = stacked_transformer_layer.FProp(
initial_vars,
inputs,
paddings,
segment_mask=segment_mask,
cross_inputs=cross_inputs,
cross_paddings=cross_paddings,
cross_segment_mask=cross_segment_mask)
decoder_outputs = jnp.zeros(shape=[seq_len, batch_size, p.model_dims])
atten_states = initial_states
for t in range(seq_len):
segment_mask_t = attention_mask[:, :, t, :]
cross_segment_mask_t = cross_segment_mask
if segment_mask is not None:
segment_mask_t = jnp.minimum(segment_mask_t, segment_mask[:, :, t, :])
if cross_segment_mask is not None:
cross_segment_mask_t = cross_segment_mask[:, :, t, :]
atten_states, encoded = stacked_transformer_layer.ExtendStep(
initial_vars,
atten_states,
inputs=inputs[:, t, :],
time_step=t,
segment_mask=segment_mask_t,
cross_inputs=cross_inputs,
cross_paddings=cross_paddings,
cross_segment_mask=cross_segment_mask_t)
decoder_outputs = decoder_outputs.at[t].set(encoded)
decoder_out_transposed = jnp.transpose(decoder_outputs, [1, 0, 2])
logging.info('initial_vars in transformer layer = %s', initial_vars)
np_fprop_outputs = test_utils.ToNp(fprop_outputs)
np_decoder_outputs = test_utils.ToNp(decoder_out_transposed)
self.assertAllClose(np_fprop_outputs, np_decoder_outputs, atol=1e-5)
@parameterized.parameters((True, True), (False, True), (True, False),
(False, False))
def test_stacked_transformer_layer_while_loop(self, packed_input,
cross_attention):
num_layers = 2
p1 = transformers.StackedTransformerLayers.Params().Set(
name='jax_transformer_layer',
model_dims=8,
hidden_dims=32,
num_heads=2,
mask_self_attention=True,
packed_input=packed_input,
cross_attention=cross_attention,
num_layers=num_layers,
enable_while_loop=False)
p2 = transformers.StackedTransformerLayers.Params().Set(
name='jax_transformer_layer',
model_dims=8,
hidden_dims=32,
num_heads=2,
mask_self_attention=True,
packed_input=packed_input,
cross_attention=cross_attention,
num_layers=num_layers,
enable_while_loop=True)
seq_len = 5
batch_size = 4
layer1 = p1.Instantiate()
layer2 = p2.Instantiate()
prng_key = jax.random.PRNGKey(seed=123)
initial_vars = layer1.InstantiateVariables(prng_key)
layer2.InstantiateVariableConfigs()
npy_inputs = np.random.normal(
1.0, 0.5, [batch_size, seq_len, p1.model_dims]).astype('float32')
inputs = jnp.asarray(npy_inputs)
npy_paddings = np.random.randint(0, 1,
[batch_size, seq_len]).astype('float32')
paddings = jnp.asarray(npy_paddings)
segment_mask = None
if packed_input:
segment_ids = np.random.random_integers(0, 2, [batch_size, seq_len])
segment_mask = attentions.SegmentMask(segment_ids, dtype=np.float32)
cross_inputs = None
cross_paddings = None
cross_segment_mask = None
if cross_attention:
cross_seq_len = | np.random.randint(10, 32) | numpy.random.randint |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.