prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import batoid
import numpy as np
from test_helpers import timer, init_gpu, rays_allclose, checkAngle, do_pickle
@timer
def test_properties():
rng = np.random.default_rng(5)
size = 10
for i in range(100):
x = rng.normal(size=size)
y = rng.normal(size=size)
z = rng.normal(size=size)
vx = rng.normal(size=size)
vy = rng.normal(size=size)
vz = rng.normal(size=size)
t = rng.normal(size=size)
w = rng.normal(size=size)
fx = rng.normal(size=size)
vig = rng.choice([True, False], size=size)
fa = rng.choice([True, False], size=size)
cs = batoid.CoordSys(
origin=rng.normal(size=3),
rot=batoid.RotX(rng.normal())@batoid.RotY(rng.normal())
)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, w, fx, vig, fa, cs)
np.testing.assert_array_equal(rv.x, x)
np.testing.assert_array_equal(rv.y, y)
np.testing.assert_array_equal(rv.z, z)
np.testing.assert_array_equal(rv.r[:, 0], x)
np.testing.assert_array_equal(rv.r[:, 1], y)
np.testing.assert_array_equal(rv.r[:, 2], z)
np.testing.assert_array_equal(rv.vx, vx)
np.testing.assert_array_equal(rv.vy, vy)
np.testing.assert_array_equal(rv.vz, vz)
np.testing.assert_array_equal(rv.v[:, 0], vx)
np.testing.assert_array_equal(rv.v[:, 1], vy)
np.testing.assert_array_equal(rv.v[:, 2], vz)
np.testing.assert_array_equal(rv.k[:, 0], rv.kx)
np.testing.assert_array_equal(rv.k[:, 1], rv.ky)
np.testing.assert_array_equal(rv.k[:, 2], rv.kz)
np.testing.assert_array_equal(rv.t, t)
np.testing.assert_array_equal(rv.wavelength, w)
np.testing.assert_array_equal(rv.flux, fx)
np.testing.assert_array_equal(rv.vignetted, vig)
np.testing.assert_array_equal(rv.failed, fa)
assert rv.coordSys == cs
rv._syncToDevice()
do_pickle(rv)
@timer
def test_positionAtTime():
rng = np.random.default_rng(57)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
# Try with default t=0 first
rv = batoid.RayVector(x, y, z, vx, vy, vz)
np.testing.assert_equal(rv.x, x)
np.testing.assert_equal(rv.y, y)
np.testing.assert_equal(rv.z, z)
np.testing.assert_equal(rv.vx, vx)
np.testing.assert_equal(rv.vy, vy)
np.testing.assert_equal(rv.vz, vz)
np.testing.assert_equal(rv.t, 0.0)
np.testing.assert_equal(rv.wavelength, 0.0)
for t1 in [0.0, 1.0, -1.1, 2.5]:
np.testing.assert_equal(
rv.positionAtTime(t1),
rv.r + t1 * rv.v
)
# Now add some random t's
t = rng.uniform(-1.0, 1.0, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t)
np.testing.assert_equal(rv.x, x)
np.testing.assert_equal(rv.y, y)
np.testing.assert_equal(rv.z, z)
np.testing.assert_equal(rv.vx, vx)
np.testing.assert_equal(rv.vy, vy)
np.testing.assert_equal(rv.vz, vz)
np.testing.assert_equal(rv.t, t)
np.testing.assert_equal(rv.wavelength, 0.0)
for t1 in [0.0, 1.4, -1.3, 2.1]:
np.testing.assert_equal(
rv.positionAtTime(t1),
rv.r + rv.v*(t1-rv.t)[:,None]
)
@timer
def test_propagate():
rng = np.random.default_rng(577)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
# Try with default t=0 first
rv = batoid.RayVector(x, y, z, vx, vy, vz)
for t1 in [0.0, 1.0, -1.1, 2.5]:
rvcopy = rv.copy()
r1 = rv.positionAtTime(t1)
rvcopy.propagate(t1)
np.testing.assert_equal(
rvcopy.r,
r1
)
np.testing.assert_equal(
rvcopy.v,
rv.v
)
np.testing.assert_equal(
rvcopy.t,
t1
)
# Now add some random t's
t = rng.uniform(-1.0, 1.0, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t)
for t1 in [0.0, 1.0, -1.1, 2.5]:
rvcopy = rv.copy()
r1 = rv.positionAtTime(t1)
rvcopy.propagate(t1)
np.testing.assert_equal(
rvcopy.r,
r1
)
np.testing.assert_equal(
rvcopy.v,
rv.v
)
np.testing.assert_equal(
rvcopy.t,
t1
)
@timer
def test_phase():
rng = np.random.default_rng(5772)
size = 10_000
for n in [1.0, 1.3]:
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0/(n*n) - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, wavelength)
# First explicitly check that phase is 0 at position and time of individual
# rays
for i in rng.choice(size, size=10):
np.testing.assert_equal(
rv.phase(rv.r[i], rv.t[i])[i],
0.0
)
# Now use actual formula
# phi = k.(r-r0) - (t-t0)omega
# k = 2 pi v / lambda |v|^2
# omega = 2 pi / lambda
# |v| = 1 / n
for r1, t1 in [
((0, 0, 0), 0),
((0, 1, 2), 3),
((-1, 2, 4), -1),
((0, 1, -4), -2)
]:
phi = np.einsum("ij,ij->i", rv.v, r1-rv.r)
phi *= n*n
phi -= (t1-rv.t)
phi *= 2*np.pi/wavelength
np.testing.assert_allclose(
rv.phase(r1, t1),
phi,
rtol=0,
atol=1e-7
)
for i in rng.choice(size, size=10):
s = slice(i, i+1)
rvi = batoid.RayVector(
x[s], y[s], z[s],
vx[s], vy[s], vz[s],
t[s].copy(), wavelength[s].copy()
)
# Move integer number of wavelengths ahead
ti = rvi.t[0]
wi = rvi.wavelength[0]
r1 = rvi.positionAtTime(ti + 5123456789*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=2e-5)
# Half wavelength
r1 = rvi.positionAtTime(ti + 6987654321.5*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, -1.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=2e-5)
# Quarter wavelength
r1 = rvi.positionAtTime(ti + 0.25*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 1.0, rtol=0, atol=2e-5)
# Three-quarters wavelength
r1 = rvi.positionAtTime(ti + 7182738495.75*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, -1.0, rtol=0, atol=2e-5)
# We can also keep the position the same and change the time in
# half/quarter integer multiples of the period.
a = rvi.amplitude(rvi.r[0], rvi.t[0]+5e9*wi)
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+5.5)*wi)
np.testing.assert_allclose(a.real, -1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+2.25)*wi)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, -1.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+1.75)*wi)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 1.0, rtol=0, atol=1e-5)
# If we pick a point anywhere along a vector originating at the ray
# position, but orthogonal to its direction of propagation, then we
# should get phase = 0 (mod 2pi).
v1 = np.array([1.0, 0.0, 0.0])
v1 = np.cross(rvi.v[0], v1)
p1 = rvi.r[0] + v1
a = rvi.amplitude(p1, rvi.t[0])
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
@timer
def test_sumAmplitude():
import time
rng = np.random.default_rng(57721)
size = 10_000
for n in [1.0, 1.3]:
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0/(n*n) - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, wavelength)
satime = 0
atime = 0
for r1, t1 in [
((0, 0, 0), 0),
((0, 1, 2), 3),
((-1, 2, 4), -1),
((0, 1, -4), -2)
]:
at0 = time.time()
s1 = rv.sumAmplitude(r1, t1)
at1 = time.time()
s2 = np.sum(rv.amplitude(r1, t1))
at2 = time.time()
np.testing.assert_allclose(s1, s2, rtol=0, atol=1e-11)
satime += at1-at0
atime += at2-at1
# print(f"sumAplitude() time: {satime}")
# print(f"np.sum(amplitude()) time: {atime}")
@timer
def test_equals():
import time
rng = np.random.default_rng(577215)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
flux = rng.uniform(0.9, 1.1, size=size)
vignetted = rng.choice([True, False], size=size)
failed = rng.choice([True, False], size=size)
args = x, y, z, vx, vy, vz, t, wavelength, flux, vignetted, failed
rv = batoid.RayVector(*args)
rv2 = rv.copy()
assert rv == rv2
for i in range(len(args)):
newargs = [args[i].copy() for i in range(len(args))]
ai = newargs[i]
if ai.dtype == float:
ai[0] = 1.2+ai[0]*3.45
elif ai.dtype == bool:
ai[0] = not ai[0]
# else panic!
rv2 = batoid.RayVector(*newargs)
assert rv != rv2
# Repeat, but force comparison on device
rv2 = rv.copy()
rv._rv.x.syncToDevice()
rv._rv.y.syncToDevice()
rv._rv.z.syncToDevice()
rv._rv.vx.syncToDevice()
rv._rv.vy.syncToDevice()
rv._rv.vz.syncToDevice()
rv._rv.t.syncToDevice()
rv._rv.wavelength.syncToDevice()
rv._rv.flux.syncToDevice()
rv._rv.vignetted.syncToDevice()
rv._rv.failed.syncToDevice()
assert rv == rv2
for i in range(len(args)):
newargs = [args[i].copy() for i in range(len(args))]
ai = newargs[i]
if ai.dtype == float:
ai[0] = 1.2+ai[0]*3.45
elif ai.dtype == bool:
ai[0] = not ai[0]
# else panic!
rv2 = batoid.RayVector(*newargs)
assert rv != rv2
@timer
def test_asGrid():
rng = np.random.default_rng(5772156)
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
nx = 1
while (nx%2) == 1:
nx = rng.integers(10, 21)
lx = rng.uniform(1.0, 10.0)
dx = lx/(nx-2)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
# Some things that should be equivalent
grid1 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=lx, dirCos=dirCos
)
grid2 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, dx=dx, dirCos=dirCos
)
grid3 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
dx=dx, lx=lx, dirCos=dirCos
)
grid4 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0.0), dirCos=dirCos
)
theta_x, theta_y = batoid.utils.dirCosToField(*dirCos)
grid5 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0.0), theta_x=theta_x, theta_y=theta_y
)
rays_allclose(grid1, grid2)
rays_allclose(grid1, grid3)
rays_allclose(grid1, grid4)
rays_allclose(grid1, grid5)
# Check distance to chief ray
cridx = (nx//2)*nx+nx//2
obs_dist = np.sqrt(np.dot(grid1.r[cridx], grid1.r[cridx]))
np.testing.assert_allclose(obs_dist, backDist)
np.testing.assert_allclose(grid1.t, 0)
np.testing.assert_allclose(grid1.wavelength, wavelength)
np.testing.assert_allclose(grid1.vignetted, False)
np.testing.assert_allclose(grid1.failed, False)
np.testing.assert_allclose(grid1.vx, dirCos[0])
np.testing.assert_allclose(grid1.vy, dirCos[1])
np.testing.assert_allclose(grid1.vz, dirCos[2])
# Check distribution of points propagated to entrance pupil
pupil = batoid.Plane()
pupil.intersect(grid1)
np.testing.assert_allclose(np.diff(grid1.x)[0], dx)
np.testing.assert_allclose(np.diff(grid1.y)[0], 0, atol=1e-14)
np.testing.assert_allclose(np.diff(grid1.x)[nx-1], -dx*(nx-1))
np.testing.assert_allclose(np.diff(grid1.y)[nx-1], dx)
# Another set, but with odd nx
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
while (nx%2) == 0:
nx = rng.integers(10, 21)
lx = rng.uniform(1.0, 10.0)
dx = lx/(nx-1)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
grid1 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=lx, dirCos=dirCos
)
grid2 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, dx=dx, dirCos=dirCos
)
grid3 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0), dirCos=dirCos
)
# ... but the following is not equivalent, since default is to always
# infer an even nx and ny
# grid4 = batoid.RayVector.asGrid(
# backDist=backDist, wavelength=wavelength,
# dx=1/9, lx=1.0, dirCos=dirCos
# )
rays_allclose(grid1, grid2)
rays_allclose(grid1, grid3)
cridx = (nx*nx-1)//2
obs_dist = np.sqrt(np.dot(grid1.r[cridx], grid1.r[cridx]))
np.testing.assert_allclose(obs_dist, backDist)
np.testing.assert_allclose(grid1.t, 0)
np.testing.assert_allclose(grid1.wavelength, wavelength)
| np.testing.assert_allclose(grid1.vignetted, False) | numpy.testing.assert_allclose |
"""Functions to calculate mean squared displacements from trajectory data
This module includes functions to calculate mean squared displacements and
additional measures from input trajectory datasets as calculated by the
Trackmate ImageJ plugin.
"""
import warnings
import random as rand
import pandas as pd
import numpy as np
import numpy.ma as ma
import scipy.stats as stats
from scipy import interpolate
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
import diff_classifier.aws as aws
def nth_diff(dataframe, n=1, axis=0):
"""Calculates the nth difference between vector elements
Returns a new vector of size N - n containing the nth difference between
vector elements.
Parameters
----------
dataframe : pandas.core.series.Series of int or float
Input data on which differences are to be calculated.
n : int
Function calculated xpos(i) - xpos(i - n) for all values in pandas
series.
axis : {0, 1}
Axis along which differences are to be calculated. Default is 0. If 0,
input must be a pandas series. If 1, input must be a numpy array.
Returns
-------
diff : pandas.core.series.Series of int or float
Pandas series of size N - n, where N is the original size of dataframe.
Examples
--------
>>> df = np.ones((5, 10))
>>> nth_diff(df)
array([[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> df = np.ones((5, 10))
>>> nth_diff (df)
array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
"""
assert isinstance(n, int), "n must be an integer."
if dataframe.ndim == 1:
length = dataframe.shape[0]
if n <= length:
test1 = dataframe[:-n].reset_index(drop=True)
test2 = dataframe[n:].reset_index(drop=True)
diff = test2 - test1
else:
diff = np.array([np.nan, np.nan])
else:
length = dataframe.shape[0]
if n <= length:
if axis == 0:
test1 = dataframe[:-n, :]
test2 = dataframe[n:, :]
else:
test1 = dataframe[:, :-n]
test2 = dataframe[:, n:]
diff = test2 - test1
else:
diff = np.array([np.nan, np.nan])
return diff
def msd_calc(track, length=10):
"""Calculates mean squared displacement of input track.
Returns numpy array containing MSD data calculated from an individual track.
Parameters
----------
track : pandas.core.frame.DataFrame
Contains, at a minimum a 'Frame', 'X', and 'Y' column
Returns
-------
new_track : pandas.core.frame.DataFrame
Similar to input track. All missing frames of individual trajectories
are filled in with NaNs, and two new columns, MSDs and Gauss are added:
MSDs, calculated mean squared displacements using the formula
MSD = <(xpos-x0)**2>
Gauss, calculated Gaussianity
Examples
--------
>>> data1 = {'Frame': [1, 2, 3, 4, 5],
... 'X': [5, 6, 7, 8, 9],
... 'Y': [6, 7, 8, 9, 10]}
>>> df = pd.DataFrame(data=data1)
>>> new_track = msd.msd_calc(df, 5)
>>> data1 = {'Frame': [1, 2, 3, 4, 5],
... 'X': [5, 6, 7, 8, 9],
... 'Y': [6, 7, 8, 9, 10]}
>>> df = pd.DataFrame(data=data1)
>>> new_track = msd.msd_calc(df)
"""
meansd = np.zeros(length)
gauss = np.zeros(length)
new_frame = np.linspace(1, length, length)
old_frame = track['Frame']
oldxy = [track['X'], track['Y']]
fxy = [interpolate.interp1d(old_frame, oldxy[0], bounds_error=False,
fill_value=np.nan),
interpolate.interp1d(old_frame, oldxy[1], bounds_error=False,
fill_value=np.nan)]
intxy = [ma.masked_equal(fxy[0](new_frame), np.nan),
ma.masked_equal(fxy[1](new_frame), np.nan)]
data1 = {'Frame': new_frame,
'X': intxy[0],
'Y': intxy[1]
}
new_track = pd.DataFrame(data=data1)
for frame in range(0, length-1):
xy = [np.square(nth_diff(new_track['X'], n=frame+1)),
np.square(nth_diff(new_track['Y'], n=frame+1))]
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
meansd[frame+1] = np.nanmean(xy[0] + xy[1])
gauss[frame+1] = np.nanmean(xy[0]**2 + xy[1]**2
)/(2*(meansd[frame+1]**2))
new_track['MSDs'] = pd.Series(meansd, index=new_track.index)
new_track['Gauss'] = pd.Series(gauss, index=new_track.index)
return new_track
def all_msds(data):
"""Calculates mean squared displacements of a trajectory dataset
Returns numpy array containing MSD data of all tracks in a trajectory
pandas dataframe.
Parameters
----------
data : pandas.core.frame.DataFrame
Contains, at a minimum a 'Frame', 'Track_ID', 'X', and
'Y' column. Note: it is assumed that frames begins at 1, not 0 with this
function. Adjust before feeding into function.
Returns
-------
new_data : pandas.core.frame.DataFrame
Similar to input data. All missing frames of individual trajectories
are filled in with NaNs, and two new columns, MSDs and Gauss are added:
MSDs, calculated mean squared displacements using the formula
MSD = <(xpos-x0)**2>
Gauss, calculated Gaussianity
Examples
--------
>>> data1 = {'Frame': [1, 2, 3, 4, 5, 1, 2, 3, 4, 5],
... 'Track_ID': [1, 1, 1, 1, 1, 2, 2, 2, 2, 2],
... 'X': [5, 6, 7, 8, 9, 1, 2, 3, 4, 5],
... 'Y': [6, 7, 8, 9, 10, 2, 3, 4, 5, 6]}
>>> df = pd.DataFrame(data=data1)
>>> all_msds(df)
"""
trackids = data.Track_ID.unique()
partcount = trackids.shape[0]
length = int(max(data['Frame']))
new = {}
new['length'] = partcount*length
new['frame'] = np.zeros(new['length'])
new['ID'] = np.zeros(new['length'])
new['xy'] = [ | np.zeros(new['length']) | numpy.zeros |
import warnings
from logging import getLogger
from typing import Callable, Union, Iterable, Tuple, Sequence
import numpy as np
from matplotlib.axes import Axes
from matplotlib.colors import to_rgba
from pandas import DataFrame
from scipy import stats
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from seaborn.utils import iqr, remove_na
from seaborn.categorical import _CategoricalPlotter, _CategoricalScatterPlotter
__all__ = ["half_violinplot", "stripplot", "distplot"]
log = getLogger(__name__)
class _StripPlotter(_CategoricalScatterPlotter):
"""1-d scatterplot with categorical organization."""
def __init__(
self,
x,
y,
hue,
data,
order,
hue_order,
jitter,
dodge,
orient,
color,
palette,
width,
move,
):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, 1)
# Set object attributes
self.dodge = dodge
self.width = width
self.move = move
if jitter == 1: # Use a good default for `jitter = True`
jlim = 0.1
else:
jlim = float(jitter)
if self.hue_names is not None and dodge:
jlim /= len(self.hue_names)
self.jitterer = stats.uniform(-jlim, jlim * 2).rvs
def draw_stripplot(self, ax, kws):
"""Draw the points onto `ax`."""
# Set the default zorder to 2.1, so that the points
# will be drawn on top of line elements (like in a boxplot)
for i, group_data in enumerate(self.plot_data):
if self.plot_hues is None or not self.dodge:
if self.hue_names is None:
hue_mask = np.ones(group_data.size, np.bool)
else:
hue_mask = np.array(
[h in self.hue_names for h in self.plot_hues[i]],
np.bool,
)
# Broken on older numpys
# hue_mask = np.in1d(self.plot_hues[i], self.hue_names)
strip_data = group_data[hue_mask]
# Plot the points in centered positions
cat_pos = self.move + np.ones(strip_data.size) * i
cat_pos += self.jitterer(len(strip_data))
kws.update(c=self.point_colors[i][hue_mask])
if self.orient == "v":
ax.scatter(cat_pos, strip_data, **kws)
else:
ax.scatter(strip_data, cat_pos, **kws)
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
hue_mask = self.plot_hues[i] == hue_level
strip_data = group_data[hue_mask]
# Plot the points in centered positions
center = i + offsets[j]
cat_pos = self.move + np.ones(strip_data.size) * center
cat_pos += self.jitterer(len(strip_data))
kws.update(c=self.point_colors[i][hue_mask])
if self.orient == "v":
ax.scatter(cat_pos, strip_data, **kws)
else:
ax.scatter(strip_data, cat_pos, **kws)
def plot(self, ax, kws):
"""Make the plot."""
self.draw_stripplot(ax, kws)
self.add_legend_data(ax)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _Half_ViolinPlotter(_CategoricalPlotter):
def __init__(
self,
x,
y,
hue,
data,
order,
hue_order,
bw,
cut,
scale,
scale_hue,
alpha,
gridsize,
width,
inner,
split,
dodge,
orient,
linewidth,
color,
palette,
saturation,
offset,
):
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, saturation)
self.estimate_densities(bw, cut, scale, scale_hue, gridsize)
self.gridsize = gridsize
self.width = width
self.dodge = dodge
self.offset = offset
self.alpha = alpha
if inner is not None:
if not any(
[
inner.startswith("quart"),
inner.startswith("box"),
inner.startswith("stick"),
inner.startswith("point"),
]
):
err = "Inner style '{}' not recognized".format(inner)
raise ValueError(err)
self.inner = inner
if split and self.hue_names is not None and len(self.hue_names) < 2:
msg = "There must be at least two hue levels to use `split`.'"
raise ValueError(msg)
self.split = split
if linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
self.linewidth = linewidth
def estimate_densities(self, bw, cut, scale, scale_hue, gridsize):
"""Find the support and density for all of the data."""
# Initialize data structures to keep track of plotting data
if self.hue_names is None:
support = []
density = []
counts = np.zeros(len(self.plot_data))
max_density = np.zeros(len(self.plot_data))
else:
support = [[] for _ in self.plot_data]
density = [[] for _ in self.plot_data]
size = len(self.group_names), len(self.hue_names)
counts = np.zeros(size)
max_density = np.zeros(size)
for i, group_data in enumerate(self.plot_data):
# Option 1: we have a single level of grouping
# --------------------------------------------
if self.plot_hues is None:
# Strip missing datapoints
kde_data = remove_na(group_data)
# Handle special case of no data at this level
if kde_data.size == 0:
support.append(np.array([]))
density.append(np.array([1.0]))
counts[i] = 0
max_density[i] = 0
continue
# Handle special case of a single unique datapoint
elif np.unique(kde_data).size == 1:
support.append(np.unique(kde_data))
density.append(np.array([1.0]))
counts[i] = 1
max_density[i] = 0
continue
# Fit the KDE and get the used bandwidth size
kde, bw_used = self.fit_kde(kde_data, bw)
# Determine the support grid and get the density over it
support_i = self.kde_support(kde_data, bw_used, cut, gridsize)
density_i = kde.evaluate(support_i)
# Update the data structures with these results
support.append(support_i)
density.append(density_i)
counts[i] = kde_data.size
max_density[i] = density_i.max()
# Option 2: we have nested grouping by a hue variable
# ---------------------------------------------------
else:
for j, hue_level in enumerate(self.hue_names):
# Handle special case of no data at this category level
if not group_data.size:
support[i].append(np.array([]))
density[i].append(np.array([1.0]))
counts[i, j] = 0
max_density[i, j] = 0
continue
# Select out the observations for this hue level
hue_mask = self.plot_hues[i] == hue_level
# Strip missing datapoints
kde_data = remove_na(group_data[hue_mask])
# Handle special case of no data at this level
if kde_data.size == 0:
support[i].append(np.array([]))
density[i].append(np.array([1.0]))
counts[i, j] = 0
max_density[i, j] = 0
continue
# Handle special case of a single unique datapoint
elif np.unique(kde_data).size == 1:
support[i].append(np.unique(kde_data))
density[i].append(np.array([1.0]))
counts[i, j] = 1
max_density[i, j] = 0
continue
# Fit the KDE and get the used bandwidth size
kde, bw_used = self.fit_kde(kde_data, bw)
# Determine the support grid and get the density over it
support_ij = self.kde_support(
kde_data, bw_used, cut, gridsize
)
density_ij = kde.evaluate(support_ij)
# Update the data structures with these results
support[i].append(support_ij)
density[i].append(density_ij)
counts[i, j] = kde_data.size
max_density[i, j] = density_ij.max()
# Scale the height of the density curve.
# For a violinplot the density is non-quantitative.
# The objective here is to scale the curves relative to 1 so that
# they can be multiplied by the width parameter during plotting.
if scale == "area":
self.scale_area(density, max_density, scale_hue)
elif scale == "width":
self.scale_width(density)
elif scale == "count":
self.scale_count(density, counts, scale_hue)
else:
raise ValueError("scale method '{}' not recognized".format(scale))
# Set object attributes that will be used while plotting
self.support = support
self.density = density
def fit_kde(self, x, bw):
"""Estimate a KDE for a vector of data with flexible bandwidth."""
# Allow for the use of old scipy where `bw` is fixed
try:
kde = stats.gaussian_kde(x, bw)
except TypeError:
kde = stats.gaussian_kde(x)
if bw != "scott": # scipy default
msg = (
"Ignoring bandwidth choice, "
"please upgrade scipy to use a different bandwidth."
)
warnings.warn(msg, UserWarning)
# Extract the numeric bandwidth from the KDE object
bw_used = kde.factor
# At this point, bw will be a numeric scale factor.
# To get the actual bandwidth of the kernel, we multiple by the
# unbiased standard deviation of the data, which we will use
# elsewhere to compute the range of the support.
bw_used = bw_used * x.std(ddof=1)
return kde, bw_used
def kde_support(self, x, bw, cut, gridsize):
"""Define a grid of support for the violin."""
support_min = x.min() - bw * cut
support_max = x.max() + bw * cut
return np.linspace(support_min, support_max, gridsize)
def scale_area(self, density, max_density, scale_hue):
"""Scale the relative area under the KDE curve.
This essentially preserves the "standard" KDE scaling, but the
resulting maximum density will be 1 so that the curve can be
properly multiplied by the violin width.
"""
if self.hue_names is None:
for d in density:
if d.size > 1:
d /= max_density.max()
else:
for i, group in enumerate(density):
for d in group:
if scale_hue:
max = max_density[i].max()
else:
max = max_density.max()
if d.size > 1:
d /= max
def scale_width(self, density):
"""Scale each density curve to the same height."""
if self.hue_names is None:
for d in density:
d /= d.max()
else:
for group in density:
for d in group:
d /= d.max()
def scale_count(self, density, counts, scale_hue):
"""Scale each density curve by the number of observations."""
if self.hue_names is None:
if counts.max() == 0:
d = 0
else:
for count, d in zip(counts, density):
d /= d.max()
d *= count / counts.max()
else:
for i, group in enumerate(density):
for j, d in enumerate(group):
if counts[i].max() == 0:
d = 0
else:
count = counts[i, j]
if scale_hue:
scaler = count / counts[i].max()
else:
scaler = count / counts.max()
d /= d.max()
d *= scaler
@property
def dwidth(self):
if self.hue_names is None or not self.dodge:
return self.width / 2
elif self.split:
return self.width / 2
else:
return self.width / (2 * len(self.hue_names))
def draw_violins(self, ax):
"""Draw the violins onto `ax`."""
fill_func = ax.fill_betweenx if self.orient == "v" else ax.fill_between
for i, group_data in enumerate(self.plot_data):
kws = dict(
edgecolor=self.gray, linewidth=self.linewidth, alpha=self.alpha
)
# Option 1: we have a single level of grouping
# --------------------------------------------
if self.plot_hues is None:
support, density = self.support[i], self.density[i]
# Handle special case of no observations in this bin
if support.size == 0:
continue
# Handle special case of a single observation
elif support.size == 1:
val = np.asscalar(support)
d = np.asscalar(density)
self.draw_single_observation(ax, i, val, d)
continue
# Draw the violin for this group
grid = | np.ones(self.gridsize) | numpy.ones |
import time
import numpy as np
import scipy.integrate
import scipy.linalg
import ross
from ross.units import Q_, check_units
from .abs_defect import Defect
from .integrate_solver import Integrator
__all__ = [
"Rubbing",
]
class Rubbing(Defect):
"""Contains a rubbing model for applications on finite element models of rotative machinery.
The reference coordenates system is: z-axis throught the shaft center; x-axis and y-axis in the sensors' planes
Parameters
----------
dt : float
Time step.
tI : float
Initial time.
tF : float
Final time.
deltaRUB : float
Distance between the housing and shaft surface.
kRUB : float
Contact stiffness.
cRUB : float
Contact damping.
miRUB : float
Friction coefficient.
posRUB : int
Node where the rubbing is ocurring.
speed : float, pint.Quantity
Operational speed of the machine. Default unit is rad/s.
unbalance_magnitude : array
Array with the unbalance magnitude. The unit is kg.m.
unbalance_phase : array
Array with the unbalance phase. The unit is rad.
torque : bool
Set it as True to consider the torque provided by the rubbing, by default False.
print_progress : bool
Set it True, to print the time iterations and the total time spent, by default False.
Returns
-------
A force to be applied on the shaft.
References
----------
.. [1] <NAME>., <NAME>., &<NAME>.(2002). Linear and Nonlinear Rotordynamics: A Modern Treatment with Applications, pp. 215-222 ..
Examples
--------
>>> from ross.defects.rubbing import rubbing_example
>>> probe1 = (14, 0)
>>> probe2 = (22, 0)
>>> response = rubbing_example()
>>> results = response.run_time_response()
>>> fig = response.plot_dfft(probe=[probe1, probe2], range_freq=[0, 100], yaxis_type="log")
>>> # fig.show()
"""
@check_units
def __init__(
self,
dt,
tI,
tF,
deltaRUB,
kRUB,
cRUB,
miRUB,
posRUB,
speed,
unbalance_magnitude,
unbalance_phase,
torque=False,
print_progress=False,
):
self.dt = dt
self.tI = tI
self.tF = tF
self.deltaRUB = deltaRUB
self.kRUB = kRUB
self.cRUB = cRUB
self.miRUB = miRUB
self.posRUB = posRUB
self.speed = speed
self.speedI = speed
self.speedF = speed
self.DoF = np.arange((self.posRUB * 6), (self.posRUB * 6 + 6))
self.torque = torque
self.unbalance_magnitude = unbalance_magnitude
self.unbalance_phase = unbalance_phase
self.print_progress = print_progress
if len(self.unbalance_magnitude) != len(self.unbalance_phase):
raise Exception(
"The unbalance magnitude vector and phase must have the same size!"
)
def run(self, rotor):
"""Calculates the shaft angular position and the unbalance forces at X / Y directions.
Parameters
----------
rotor : ross.Rotor Object
6 DoF rotor model.
"""
self.rotor = rotor
self.n_disk = len(self.rotor.disk_elements)
if self.n_disk != len(self.unbalance_magnitude):
raise Exception("The number of discs and unbalances must agree!")
self.ndof = rotor.ndof
self.iteration = 0
self.radius = rotor.df_shaft.iloc[self.posRUB].o_d / 2
self.ndofd = np.zeros(len(self.rotor.disk_elements))
for ii in range(self.n_disk):
self.ndofd[ii] = (self.rotor.disk_elements[ii].n) * 6
self.lambdat = 0.00001
# Faxial = 0
# TorqueI = 0
# TorqueF = 0
self.sA = (
self.speedI * np.exp(-self.lambdat * self.tF)
- self.speedF * np.exp(-self.lambdat * self.tI)
) / (np.exp(-self.lambdat * self.tF) - np.exp(-self.lambdat * self.tI))
self.sB = (self.speedF - self.speedI) / (
np.exp(-self.lambdat * self.tF) - np.exp(-self.lambdat * self.tI)
)
# sAT = (
# TorqueI * np.exp(-lambdat * self.tF) - TorqueF * np.exp(-lambdat * self.tI)
# ) / (np.exp(-lambdat * self.tF) - np.exp(-lambdat * self.tI))
# sBT = (TorqueF - TorqueI) / (
# np.exp(-lambdat * self.tF) - np.exp(-lambdat * self.tI)
# )
# self.SpeedV = sA + sB * np.exp(-lambdat * t)
# self.TorqueV = sAT + sBT * np.exp(-lambdat * t)
# self.AccelV = -lambdat * sB * np.exp(-lambdat * t)
# Determining the modal matrix
self.K = self.rotor.K(self.speed)
self.C = self.rotor.C(self.speed)
self.G = self.rotor.G()
self.M = self.rotor.M()
self.Kst = self.rotor.Kst()
V1, ModMat = scipy.linalg.eigh(
self.K,
self.M,
type=1,
turbo=False,
)
ModMat = ModMat[:, :12]
self.ModMat = ModMat
# Modal transformations
self.Mmodal = ((ModMat.T).dot(self.M)).dot(ModMat)
self.Cmodal = ((ModMat.T).dot(self.C)).dot(ModMat)
self.Gmodal = ((ModMat.T).dot(self.G)).dot(ModMat)
self.Kmodal = ((ModMat.T).dot(self.K)).dot(ModMat)
self.Kstmodal = ((ModMat.T).dot(self.Kst)).dot(ModMat)
y0 = np.zeros(24)
t_eval = np.arange(self.tI, self.tF + self.dt, self.dt)
# t_eval = np.arange(self.tI, self.tF, self.dt)
T = t_eval
self.angular_position = (
self.sA * T
- (self.sB / self.lambdat) * np.exp(-self.lambdat * T)
+ (self.sB / self.lambdat)
)
self.Omega = self.sA + self.sB * np.exp(-self.lambdat * T)
self.AccelV = -self.lambdat * self.sB * np.exp(-self.lambdat * T)
self.tetaUNB = np.zeros((len(self.unbalance_phase), len(self.angular_position)))
unbx = np.zeros(len(self.angular_position))
unby = np.zeros(len(self.angular_position))
FFunb = np.zeros((self.ndof, len(t_eval)))
self.forces_rub = np.zeros((self.ndof, len(t_eval)))
for ii in range(self.n_disk):
self.tetaUNB[ii, :] = (
self.angular_position + self.unbalance_phase[ii] + np.pi / 2
)
unbx = self.unbalance_magnitude[ii] * (self.AccelV) * (
np.cos(self.tetaUNB[ii, :])
) - self.unbalance_magnitude[ii] * ((self.Omega**2)) * (
np.sin(self.tetaUNB[ii, :])
)
unby = -self.unbalance_magnitude[ii] * (self.AccelV) * (
np.sin(self.tetaUNB[ii, :])
) - self.unbalance_magnitude[ii] * (self.Omega**2) * (
np.cos(self.tetaUNB[ii, :])
)
FFunb[int(self.ndofd[ii]), :] += unbx
FFunb[int(self.ndofd[ii] + 1), :] += unby
self.Funbmodal = (self.ModMat.T).dot(FFunb)
self.inv_Mmodal = np.linalg.pinv(self.Mmodal)
t1 = time.time()
x = Integrator(
self.tI,
y0,
self.tF,
self.dt,
self._equation_of_movement,
self.print_progress,
)
x = x.rk4()
t2 = time.time()
if self.print_progress:
print(f"Time spent: {t2-t1} s")
self.displacement = x[:12, :]
self.velocity = x[12:, :]
self.time_vector = t_eval
self.response = self.ModMat.dot(self.displacement)
def _equation_of_movement(self, T, Y, i):
"""Calculates the displacement and velocity using state-space representation in the modal domain.
Parameters
----------
T : float
Iteration time.
Y : array
Array of displacement and velocity, in the modal domain.
i : int
Iteration step.
Returns
-------
new_Y : array
Array of the new displacement and velocity, in the modal domain.
"""
positions = Y[:12]
velocity = Y[12:] # velocity in space state
positionsFis = self.ModMat.dot(positions)
velocityFis = self.ModMat.dot(velocity)
Frub, ft = self._rub(positionsFis, velocityFis, self.Omega[i])
self.forces_rub[:, i] = ft
ftmodal = (self.ModMat.T).dot(ft)
# proper equation of movement to be integrated in time
new_V_dot = (
ftmodal
+ self.Funbmodal[:, i]
- ((self.Cmodal + self.Gmodal * self.Omega[i])).dot(velocity)
- ((self.Kmodal + self.Kstmodal * self.AccelV[i]).dot(positions))
).dot(self.inv_Mmodal)
new_X_dot = velocity
new_Y = np.zeros(24)
new_Y[:12] = new_X_dot
new_Y[12:] = new_V_dot
return new_Y
def _rub(self, positionsFis, velocityFis, ang):
self.F_k = np.zeros(self.ndof)
self.F_c = np.zeros(self.ndof)
self.F_f = np.zeros(self.ndof)
self.y = np.concatenate((positionsFis, velocityFis))
ii = 0 + 6 * self.posRUB # rubbing position
self.radial_displ_node = np.sqrt(
self.y[ii] ** 2 + self.y[ii + 1] ** 2
) # radial displacement
self.radial_displ_vel_node = np.sqrt(
self.y[ii + self.ndof] ** 2 + self.y[ii + 1 + self.ndof] ** 2
) # velocity
self.phi_angle = np.arctan2(self.y[ii + 1], self.y[ii])
if self.radial_displ_node >= self.deltaRUB:
self.F_k[ii] = self._stiffness_force(self.y[ii])
self.F_k[ii + 1] = self._stiffness_force(self.y[ii + 1])
self.F_c[ii] = self._damping_force(self.y[ii + self.ndof])
self.F_c[ii + 1] = self._damping_force(self.y[ii + 1 + self.ndof])
Vt = -self.y[ii + self.ndof + 1] * np.sin(self.phi_angle) + self.y[
ii + self.ndof
] * np.cos(self.phi_angle)
if Vt + ang * self.radius > 0:
self.F_f[ii] = -self._tangential_force(self.F_k[ii], self.F_c[ii])
self.F_f[ii + 1] = self._tangential_force(
self.F_k[ii + 1], self.F_c[ii + 1]
)
if self.torque:
self.F_f[ii + 5] = self._torque_force(
self.F_f[ii], self.F_f[ii + 1], self.y[ii]
)
elif Vt + ang * self.radius < 0:
self.F_f[ii] = self._tangential_force(self.F_k[ii], self.F_c[ii])
self.F_f[ii + 1] = -self._tangential_force(
self.F_k[ii + 1], self.F_c[ii + 1]
)
if self.torque:
self.F_f[ii + 5] = self._torque_force(
self.F_f[ii], self.F_f[ii + 1], self.y[ii]
)
return self._combine_forces(self.F_k, self.F_c, self.F_f)
def _stiffness_force(self, y):
"""Calculates the stiffness force
Parameters
----------
y : float
Displacement value.
Returns
-------
force : numpy.float64
Force magnitude.
"""
force = (
-self.kRUB
* (self.radial_displ_node - self.deltaRUB)
* y
/ abs(self.radial_displ_node)
)
return force
def _damping_force(self, y):
"""Calculates the damping force
Parameters
----------
y : float
Displacement value.
Returns
-------
force : numpy.float64
Force magnitude.
"""
force = (
-self.cRUB
* (self.radial_displ_vel_node)
* y
/ abs(self.radial_displ_vel_node)
)
return force
def _tangential_force(self, F_k, F_c):
"""Calculates the tangential force
Parameters
----------
y : float
Displacement value.
Returns
-------
force : numpy.float64
Force magnitude.
"""
force = self.miRUB * (abs(F_k + F_c))
return force
def _torque_force(self, F_f, F_fp, y):
"""Calculates the torque force
Parameters
----------
y : float
Displacement value.
Returns
-------
force : numpy.float64
Force magnitude.
"""
force = self.radius * (
np.sqrt(F_f**2 + F_fp**2) * y / abs(self.radial_displ_node)
)
return force
def _combine_forces(self, F_k, F_c, F_f):
"""Mounts the final force vector.
Parameters
----------
F_k : numpy.ndarray
Stiffness force vector.
F_c : numpy.ndarray
Damping force vector.
F_f : numpy.ndarray
Tangential force vector.
Returns
-------
Frub : numpy.ndarray
Final force vector for each degree of freedom.
FFrub : numpy.ndarray
Final force vector.
"""
Frub = F_k[self.DoF] + F_c[self.DoF] + F_f[self.DoF]
FFrub = F_k + F_c + F_f
return Frub, FFrub
@property
def forces(self):
pass
def base_rotor_example():
"""Internal routine that create an example of a rotor, to be used in
the associated misalignment problems as a prerequisite.
This function returns an instance of a 6 DoF rotor, with a number of
components attached. As this is not the focus of the example here, but
only a requisite, see the example in "rotor assembly" for additional
information on the rotor object.
Returns
-------
rotor : ross.Rotor Object
An instance of a flexible 6 DoF rotor object.
Examples
--------
>>> rotor = base_rotor_example()
>>> rotor.Ip
0.015118294226367068
"""
steel2 = ross.Material(
name="Steel", rho=7850, E=2.17e11, Poisson=0.2992610837438423
)
# Rotor with 6 DoFs, with internal damping, with 10 shaft elements, 2 disks and 2 bearings.
i_d = 0
o_d = 0.019
n = 33
# fmt: off
L = np.array(
[0 , 25, 64, 104, 124, 143, 175, 207, 239, 271,
303, 335, 345, 355, 380, 408, 436, 466, 496, 526,
556, 586, 614, 647, 657, 667, 702, 737, 772, 807,
842, 862, 881, 914]
)/ 1000
# fmt: on
L = [L[i] - L[i - 1] for i in range(1, len(L))]
shaft_elem = [
ross.ShaftElement6DoF(
material=steel2,
L=l,
idl=i_d,
odl=o_d,
idr=i_d,
odr=o_d,
alpha=8.0501,
beta=1.0e-5,
rotary_inertia=True,
shear_effects=True,
)
for l in L
]
Id = 0.003844540885417
Ip = 0.007513248437500
disk0 = ross.DiskElement6DoF(n=12, m=2.6375, Id=Id, Ip=Ip)
disk1 = ross.DiskElement6DoF(n=24, m=2.6375, Id=Id, Ip=Ip)
kxx1 = 4.40e5
kyy1 = 4.6114e5
kzz = 0
cxx1 = 27.4
cyy1 = 2.505
czz = 0
kxx2 = 9.50e5
kyy2 = 1.09e8
cxx2 = 50.4
cyy2 = 100.4553
bearing0 = ross.BearingElement6DoF(
n=4, kxx=kxx1, kyy=kyy1, cxx=cxx1, cyy=cyy1, kzz=kzz, czz=czz
)
bearing1 = ross.BearingElement6DoF(
n=31, kxx=kxx2, kyy=kyy2, cxx=cxx2, cyy=cyy2, kzz=kzz, czz=czz
)
rotor = ross.Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1])
return rotor
def rubbing_example():
"""Create an example of a rubbing defect.
This function returns an instance of a rubbing defect. The purpose is to make
available a simple model so that a doctest can be written using it.
Returns
-------
rubbing : ross.Rubbing Object
An instance of a rubbing model object.
Examples
--------
>>> rubbing = rubbing_example()
>>> rubbing.speed
125.66370614359172
"""
rotor = base_rotor_example()
rubbing = rotor.run_rubbing(
dt=0.0001,
tI=0,
tF=0.5,
deltaRUB=7.95e-5,
kRUB=1.1e6,
cRUB=40,
miRUB=0.3,
posRUB=12,
speed=Q_(1200, "RPM"),
unbalance_magnitude= | np.array([5e-4, 0]) | numpy.array |
import talib
import numpy as np
import jtrade.core.instrument.equity as Equity
# ========== TECH OVERLAP INDICATORS **START** ==========
def BBANDS(equity, start=None, end=None, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0):
"""Bollinger Bands
:param timeperiod:
:param nbdevup:
:param nbdevdn:
:param matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
upperband, middleband, lowerband = talib.BBANDS(close, timeperiod=timeperiod, nbdevup=nbdevup, nbdevdn=nbdevdn, matype=matype)
return upperband, middleband, lowerband
def DEMA(equity, start=None, end=None, timeperiod=30):
"""Double Exponential Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.DEMA(close, timeperiod=timeperiod)
return real
def EMA(equity, start=None, end=None, timeperiod=30):
"""Exponential Moving Average
NOTE: The EMA function has an unstable period.
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.EMA(close, timeperiod=timeperiod)
return real
def HT_TRENDLINE(equity, start=None, end=None):
"""Hilbert Transform - Instantaneous Trendline
NOTE: The HT_TRENDLINE function has an unstable period.
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.HT_TRENDLINE(close)
return real
def KAMA(equity, start=None, end=None, timeperiod=30):
"""Kaufman Adaptive Moving Average
NOTE: The KAMA function has an unstable period.
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.KAMA(close, timeperiod=timeperiod)
return real
def MA(equity, start=None, end=None, timeperiod=30, matype=0):
"""Moving average
:param timeperiod:
:param matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.MA(close, timeperiod=timeperiod, matype=matype)
return real
def MAMA(equity, start=None, end=None, fastlimit=0, slowlimit=0):
"""MESA Adaptive Moving Average
NOTE: The MAMA function has an unstable period.
:param fastlimit:
:param slowlimit:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
mama, fama = talib.MAMA(close, fastlimit=fastlimit, slowlimit=slowlimit)
return mama, fama
def MAVP(equity, periods, start=None, end=None, minperiod=2, maxperiod=30, matype=0):
"""Moving average with variable period
:param periods:
:param minperiod:
:param maxperiod:
:param matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.MAVP(close, periods, minperiod=minperiod, maxperiod=maxperiod, matype=matype)
return real
def MIDPOINT(equity, start=None, end=None, timeperiod=14):
"""MidPoint over period
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.MIDPOINT(close, timeperiod=timeperiod)
return real
def MIDPRICE(equity, start=None, end=None, timeperiod=14):
"""Midpoint Price over period
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.MIDPRICE(high, low, timeperiod=timeperiod)
return real
def SAR(equity, start=None, end=None, acceleration=0, maximum=0):
"""Parabolic SAR
:param acceleration:
:param maximum:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.SAR(high, low, acceleration=acceleration, maximum=maximum)
return real
def SAREXT(equity, start=None, end=None, startvalue=0, offsetonreverse=0, accelerationinitlong=0, accelerationlong=0,
accelerationmaxlong=0, accelerationinitshort=0, accelerationshort=0, accelerationmaxshort=0):
"""Parabolic SAR - Extended
:param startvalue:
:param offsetonreverse:
:param accelerationinitlong:
:param accelerationlong:
:param accelerationmaxlong:
:param accelerationinitshort:
:param accelerationshort:
:param accelerationmaxshort:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.SAREXT(high, low, startvalue=startvalue, offsetonreverse=offsetonreverse, accelerationinitlong=accelerationinitlong,
accelerationlong=accelerationlong, accelerationmaxlong=accelerationmaxlong, accelerationinitshort=accelerationinitshort,
accelerationshort=accelerationshort, accelerationmaxshort=accelerationmaxshort)
return real
def SMA(equity, start=None, end=None, timeperiod=30):
"""Simple Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.SMA(close, timeperiod=timeperiod)
return real
def T3(equity, start=None, end=None, timeperiod=5, vfactor=0):
"""Triple Exponential Moving Average (T3)
NOTE: The T3 function has an unstable period.
:param timeperiod:
:param vfactor:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.T3(close, timeperiod=timeperiod, vfactor=vfactor)
return real
def TEMA(equity, start=None, end=None, timeperiod=30):
"""Triple Exponential Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.TEMA(close, timeperiod=timeperiod)
return real
def TRIMA(equity, start=None, end=None, timeperiod=30):
"""Triangular Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.TRIMA(close, timeperiod=timeperiod)
return real
def WMA(equity, start=None, end=None, timeperiod=30):
"""Weighted Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.WMA(close, timeperiod=timeperiod)
return real
# ========== TECH OVERLAP INDICATORS **END** ==========
# ========== TECH MOMENTUM INDICATORS **START** ==========
def ADX(equity, start=None, end=None, timeperiod=14):
"""Average Directional Movement Index
NOTE: The ADX function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ADX(high, low, close, timeperiod=timeperiod)
return real
def ADXR(equity, start=None, end=None, timeperiod=14):
"""Average Directional Movement Index Rating
NOTE: The ADXR function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ADXR(high, low, close, timeperiod=timeperiod)
return real
def APO(equity, start=None, end=None, fastperiod=12, slowperiod=26, matype=0):
"""Absolute Price Oscillator
:param fastperiod:
:param slowperiod:
:param matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.APO(close, fastperiod=fastperiod, slowperiod=slowperiod, matype=matype)
return real
def AROON(equity, start=None, end=None, timeperiod=14):
"""Aroon
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
aroondown, aroonup = talib.AROON(high, low, timeperiod=timeperiod)
return aroondown, aroonup
def AROONOSC(equity, start=None, end=None, timeperiod=14):
"""Aroon Oscillator
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.AROONOSC(high, low, timeperiod=timeperiod)
return real
def BOP(equity, start=None, end=None):
"""Balance Of Power
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.BOP(opn, high, low, close)
return real
def CCI(equity, start=None, end=None, timeperiod=14):
"""Commodity Channel Index
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.CCI(high, low, close, timeperiod=timeperiod)
return real
def CMO(equity, start=None, end=None, timeperiod=14):
"""Chande Momentum Oscillator
NOTE: The CMO function has an unstable period.
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.CMO(close, timeperiod=timeperiod)
return real
def DX(equity, start=None, end=None, timeperiod=14):
"""Directional Movement Index
NOTE: The DX function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.DX(high, low, close, timeperiod=timeperiod)
return real
def MACD(equity, start=None, end=None, fastperiod=12, slowperiod=26, signalperiod=9):
"""Moving Average Convergence/Divergence
:param fastperiod:
:param slowperiod:
:param signalperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
macd, macdsignal, macdhist = talib.MACD(close, fastperiod=fastperiod, slowperiod=slowperiod, signalperiod=signalperiod)
return macd, macdsignal, macdhist
def MACDEXT(equity, start=None, end=None, fastperiod=12, fastmatype=0, slowperiod=26, slowmatype=0, signalperiod=9, signalmatype=0):
"""MACD with controllable MA type
:param fastperiod:
:param fastmatype:
:param slowperiod:
:param slowmatype:
:param signalperiod:
:param signalmatype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
macd, macdsignal, macdhist = talib.MACDEXT(close, fastperiod=12, fastmatype=0, slowperiod=26, slowmatype=0,
signalperiod=9, signalmatype=0)
return macd, macdsignal, macdhist
def MACDFIX(equity, start=None, end=None, signalperiod=9):
"""Moving Average Convergence/Divergence Fix 12/26
:param signalperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
macd, macdsignal, macdhist = talib.MACDFIX(close, signalperiod=signalperiod)
return macd, macdsignal, macdhist
def MFI(equity, start=None, end=None, timeperiod=14):
"""Money Flow Index
NOTE: The MFI function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
volume = np.array(equity.hp.loc[start:end, 'volume'], dtype='f8')
real = talib.MFI(high, low, close, volume, timeperiod=timeperiod)
return real
def MINUS_DI(equity, start=None, end=None, timeperiod=14):
"""Minus Directional signal
NOTE: The MINUS_DI function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.MINUS_DI(high, low, close, timeperiod=timeperiod)
return real
def MINUS_DM(equity, start=None, end=None, timeperiod=14):
"""Minus Directional Movement
NOTE: The MINUS_DM function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.MINUS_DM(high, low, timeperiod=timeperiod)
return real
def MOM(equity, start=None, end=None, timeperiod=10):
"""Momentum
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.MOM(close, timeperiod=timeperiod)
return real
def PLUS_DI(equity, start=None, end=None, timeperiod=14):
"""Plus Directional signal
NOTE: The PLUS_DI function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.PLUS_DI(high, low, close, timeperiod=timeperiod)
return real
def PLUS_DM(equity, start=None, end=None, timeperiod=14):
"""Plus Directional Movement
NOTE: The PLUS_DM function has an unstable period.
:param timeperiod:
:return:
"""
high = | np.array(equity.hp.loc[start:end, 'high'], dtype='f8') | numpy.array |
# Copyright (C) 2021 Members of the Simons Observatory collaboration.
# Please refer to the LICENSE file in the root of this repository.
import os
import ref
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
import matplotlib.colors as colors
import matplotlib.cm as cm
from scipy.stats import chisquare
from waferscreen.data_io.explore_io import flagged_data, wafer_str_to_num, res_num_to_str, band_num_to_str,\
chip_id_str_to_chip_id_tuple
from waferscreen.data_io.s21_io import read_s21, ri_to_magphase
from waferscreen.analyze.lambfit import f0_of_I
from waferscreen.data_io.explore_io import CalcMetadataAtNeg95dbm, CalcMetadataAtNeg75dbm
report_markers = ["*", "v", "s", "<", "X",
"p", "^", "D", ">", "P"]
report_colors = ["seagreen", "crimson", "darkgoldenrod", "deepskyblue", "mediumblue", "rebeccapurple"]
def criteria_flagged_summary(flag_table_info, criteria_name, res_numbers, too_low=True):
for res_label in res_numbers:
summary_str = F"Criteria Flag: {criteria_name}"
if too_low:
summary_str += " too low."
else:
summary_str += " too high."
# if the resonator was flagged already for another reason we need to add a new line to the summary.
if res_label in flag_table_info:
flag_table_info[res_label] += "\n" + summary_str
else:
flag_table_info[res_label] = summary_str
return flag_table_info
def chi_squared_plot(ax, f_ghz_mean, chi_squared_for_resonators, res_nums_int, color, markersize, alpha,
x_label, y_label, x_ticks_on, max_chi_squared=None):
# calculate when the values are outside of the acceptance range
if max_chi_squared is None:
upper_bound = float("inf")
else:
upper_bound = float(max_chi_squared)
res_nums_too_high_chi_squared = set()
# turn on/off tick marks
if not x_ticks_on:
ax.tick_params(axis="x", labelbottom=False)
# loop to plot data one point at a time (add a identifying marker for each data point)
counter = 0
for f_ghz, chi_squared in zip(f_ghz_mean, chi_squared_for_resonators):
res_num = res_nums_int[counter]
marker = report_markers[res_num % len(report_markers)]
ax.plot(f_ghz, chi_squared.statistic, color=color, ls='None', marker=marker, markersize=markersize, alpha=alpha)
if upper_bound < chi_squared.statistic:
res_nums_too_high_chi_squared.add(res_num_to_str(res_num))
ax.plot(f_ghz, chi_squared.statistic, color="black", ls='None', marker="x", markersize=markersize + 2, alpha=1.0)
counter += 1
# boundary and average lines
if max_chi_squared is not None:
ax.axhline(y=max_chi_squared, xmin=0, xmax=1, color='red', linestyle='dashdot')
# tick marks and axis labels
if x_label is None:
if x_ticks_on:
ax.set_xlabel("Average Resonator Center Frequency (GHz)")
else:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
# grid on major tick marks
ax.grid(b=True)
ax.set_yscale("log")
return ax, res_nums_too_high_chi_squared
def error_bar_report_plot(ax, xdata, ydata, yerr, res_nums_int, color="black", ls='None', markersize=10, alpha=0.7,
x_label=None, y_label=None, x_ticks_on=True,
min_y=None, max_y=None, average_y=None):
# calculate when the values are outside of the acceptance range
if min_y is None:
lower_bound = float("-inf")
else:
lower_bound = float(min_y)
if max_y is None:
upper_bound = float("inf")
else:
upper_bound = float(max_y)
res_nums_too_low = set()
res_nums_too_high = set()
# turn on/off tick marks
if not x_ticks_on:
ax.tick_params(axis="x", labelbottom=False)
# loop to plot data one point at a time (add a identifying marker for each data point)
counter = 0
for x_datum, y_datum, y_datum_err in zip(xdata, ydata, yerr):
res_num = res_nums_int[counter]
marker = report_markers[res_num % len(report_markers)]
ax.errorbar(x_datum, y_datum, yerr=y_datum_err,
color=color, ls=ls, marker=marker, markersize=markersize, alpha=alpha)
if y_datum < lower_bound:
res_nums_too_low.add(res_num_to_str(res_num))
ax.plot(x_datum, y_datum, color="black", ls=ls, marker="x", markersize=markersize + 2, alpha=1.0)
elif upper_bound < y_datum:
res_nums_too_high.add(res_num_to_str(res_num))
ax.plot(x_datum, y_datum, color="black", ls=ls, marker="x", markersize=markersize + 2, alpha=1.0)
counter += 1
# boundary and average lines
if min_y is not None:
ax.axhline(y=min_y, xmin=0, xmax=1, color='red', linestyle='dashed')
if average_y is not None:
ax.axhline(y=average_y, xmin=0, xmax=1, color='green', linestyle='dotted')
if max_y is not None:
ax.axhline(y=max_y, xmin=0, xmax=1, color='red', linestyle='dashdot')
# tick marks and axis labels
if x_label is None:
if x_ticks_on:
ax.set_xlabel("Average Resonator Center Frequency (GHz)")
else:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
# grid on major tick marks
ax.grid(b=True)
return ax, res_nums_too_low, res_nums_too_high
def hist_report_plot(ax, data, bins=10, color="blue", x_label=None, y_label=None, alpha=0.5):
ax.tick_params(axis="y", labelleft=False)
ax.tick_params(axis="x", labelbottom=False)
ax.hist(data, bins=bins, color=color, orientation='horizontal', alpha=alpha)
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
ax.tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False)
return ax
def rug_plot(ax, xdata, y_min, y_max, color="blue",
f_ghz_residuals_for_res_plot_shifted=None, ua_arrays_for_resonators=None):
# Lambda fit Residuals zen plot (the lines are reminiscent of the waves in a zen rock garden)
if f_ghz_residuals_for_res_plot_shifted is not None:
norm = colors.Normalize(vmin=0.0, vmax=0.01)
cmap = plt.get_cmap('gist_ncar_r')
scalar_map = cm.ScalarMappable(norm=norm, cmap=cmap)
y_span = y_max - y_min
# # the background color of the plot is an indicator that this feature is triggered.
# ax.set_facecolor("black")
# get the x axis limits prior to this plot
x_min, x_max = ax.get_xlim()
for f_ghz_plot_residuals, ua_array in zip(f_ghz_residuals_for_res_plot_shifted, ua_arrays_for_resonators):
ua_min = np.min(ua_array)
ua_max = np.max(ua_array)
ua_span = ua_max - ua_min
ua_array_normalized_for_plot = (((ua_array - ua_min) / ua_span) * y_span) + y_min
f_ghz_residuals_span = np.max(f_ghz_plot_residuals) - np.min(f_ghz_plot_residuals)
color_val = scalar_map.to_rgba(f_ghz_residuals_span)
ax.plot(f_ghz_plot_residuals, ua_array_normalized_for_plot, color=color_val, linewidth=0.5)
# reset the x limits, do not let the residuals dictate the x limits of this plot
ax.set_xlim((x_min, x_max))
# 'threads' of the rug plot
ax.tick_params(axis="y", labelleft=False)
for f_centers in xdata:
f_len = len(f_centers)
alpha = min(25.0 / f_len, 1.0)
for f_center in list(f_centers):
ax.plot((f_center, f_center), (y_min, y_max), ls='solid', linewidth=0.1, color=color, alpha=alpha)
ax.set_ylim(bottom=0, top=1)
ax.tick_params(axis='y', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
left=False, # ticks along the bottom edge are off
right=False, # ticks along the top edge are off
labelleft=False)
ax.tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=True, # ticks along the top edge are off
labelbottom=False,
labeltop=True)
ax.xaxis.tick_top()
ax.set_xlabel('X LABEL')
ax.set_xlabel(F"Frequency (GHz)")
ax.xaxis.set_label_position('top')
return ax
def band_plot(ax, f_ghz, mag_dbm, f_centers_ghz_all, res_nums, band_str):
# data math
mag_dbm_mean = np.mean(mag_dbm)
plot_s21_mag = mag_dbm - mag_dbm_mean
plot_mag_min = np.min(plot_s21_mag)
plot_mag_max = np.max(plot_s21_mag)
ave_f_centers = [np.mean(f_centers) for f_centers in f_centers_ghz_all]
# band boundary calculations
band_dict = ref.band_params[band_str]
trans = mtransforms.blended_transform_factory(ax.transData, ax.transAxes)
in_band = [band_dict['min_GHz'] <= one_f <= band_dict['max_GHz'] for one_f in f_ghz]
left_of_band = [one_f < band_dict['min_GHz'] for one_f in f_ghz]
right_of_band = [band_dict['max_GHz'] < one_f for one_f in f_ghz]
ax.fill_between((band_dict['min_GHz'], band_dict['max_GHz']), 0, 1,
facecolor='cornflowerblue', alpha=0.5, transform=trans)
# the smurf keep out zones
for keepout_min, keepout_max in ref.smurf_keepout_zones_ghz:
ax.fill_between((keepout_min, keepout_max), 0, 1, facecolor='black', alpha=0.5, transform=trans)
# the spectrum part of the plot
ax.plot(f_ghz[in_band], plot_s21_mag[in_band], color="darkorchid", linewidth=1)
ax.plot(f_ghz[left_of_band], plot_s21_mag[left_of_band], color="black", linewidth=1)
ax.plot(f_ghz[right_of_band], plot_s21_mag[right_of_band], color="black", linewidth=1)
# Key markers and labels
res_labels = [res_num.replace("Res", " ") for res_num in res_nums]
res_nums_int = [int(res_num.replace("Res", "")) for res_num in res_nums]
counter_f_ghz = 0
counter_ave_centers = 0
while counter_ave_centers < len(ave_f_centers) and counter_f_ghz < len(f_ghz):
single_f_ghz = f_ghz[counter_f_ghz]
single_f_center = ave_f_centers[counter_ave_centers]
if single_f_center < single_f_ghz:
# the point is trigger to plot the when the above is true
marker = report_markers[res_nums_int[counter_ave_centers] % len(report_markers)]
ax.plot(single_f_center, 0.0, color='black', alpha=0.5, marker=marker, markersize=10)
ax.text(single_f_center, plot_mag_min, res_labels[counter_ave_centers], color='black', rotation=300,
horizontalalignment='center', verticalalignment='bottom', fontsize=8)
counter_ave_centers += 1
else:
counter_f_ghz += 1
# plot details
ax.tick_params(axis="x", labelbottom=False)
ax.set_ylabel("dB")
ax.tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False)
ax.set_ylim(bottom=None, top=plot_mag_max)
return ax
def report_key(ax, leglines, leglabels, summary_info, res_flags=None):
ax.tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False)
ax.tick_params(axis='y', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
left=False, # ticks along the bottom edge are off
right=False, # ticks along the top edge are off
labelleft=False)
ax.text(0.5, 1, summary_info, color='black',
horizontalalignment='center', verticalalignment='top', multialignment='left', fontsize=10)
ax.set_xlim(left=0, right=1)
ax.set_ylim(bottom=0, top=1)
# ax.set_title("KEY")
ax.legend(leglines, leglabels, loc=8, numpoints=5, handlelength=3, fontsize=10)
if res_flags is not None:
omitted_res = []
for res_flag in res_flags:
omitted_res.append([res_num_to_str(res_flag.seed_res_num), res_flag.type])
ax.table(omitted_res, loc='center', fontsize=10)
return ax
def report_plot_init(num_of_scatter_hist_x=3, num_of_scatter_hist_y=2):
"""
Three Major Regions
1) Top: Frequency Rug Plot 2) Middle: Resonator Spectrum 3) Bottom: Scatter plots with side histograms
definitions for the axes
:param num_of_scatter_hist_x: int
:param num_of_scatter_hist_y: int
:return:
"""
left = 0.05
bottom = 0.05
right = 0.99
top = 0.95
major12_region_spacing = 0.000
major32_region_spacing = 0.001
major_regions_y = (0.50, top - 0.15)
key_margin_x = 0.85
key_space = 0.003
scatter_hist_little_space = 0.005
scatter_hist_bigger_vspace = 0.005
scatter_hist_bigger_hspace = 0.060
scatter_to_hist_ratio = 1.6
# 0) A plot used as a Key
key_top = top
key_bottom = major_regions_y[0] + major32_region_spacing
key_height = key_top - key_bottom
key_left = key_margin_x + key_space
key_right = right
key_width = key_right - key_left
key_cood = [key_left, key_bottom, key_width, key_height]
# 1) Top: Frequency Rug Plot
rug_top = top
rug_bottom = major_regions_y[1] + major12_region_spacing
rug_height = rug_top - rug_bottom
rug_left = left
rug_right = key_margin_x - key_space
rug_width = rug_right - rug_left
rug_cood = [rug_left, rug_bottom, rug_width, rug_height]
# 2) Middle: Resonator Spectrum
res_spec_top = major_regions_y[1] - major12_region_spacing
res_spec_bottom = major_regions_y[0] + major32_region_spacing
res_spec_height = res_spec_top - res_spec_bottom
res_spec_left = left
res_spec_right = key_margin_x - key_space
res_spec_width = res_spec_right - res_spec_left
res_spec_cood = [res_spec_left, res_spec_bottom, res_spec_width, res_spec_height]
# 3) Bottom:Scatter plots with side histograms
shist_top = major_regions_y[0] - major32_region_spacing
available_plot_y_per_histogram = (((shist_top - bottom) - ((num_of_scatter_hist_y - 1) * scatter_hist_bigger_vspace))
/ num_of_scatter_hist_y)
available_plot_x_per_histogram = (((right - left) - ((num_of_scatter_hist_x - 1) * scatter_hist_bigger_hspace))
/ num_of_scatter_hist_x) - scatter_hist_little_space
scat_width = available_plot_x_per_histogram * scatter_to_hist_ratio / (scatter_to_hist_ratio + 1.0)
hist_width = available_plot_x_per_histogram - scat_width
hist_coords = []
scatter_coords = []
for yhist_index in range(num_of_scatter_hist_y):
shist_bottom = shist_top - available_plot_y_per_histogram
shist_height = shist_top - shist_bottom
scat_left = left
for xhist_index in range(num_of_scatter_hist_x):
hist_left = scat_left + scat_width + scatter_hist_little_space
scatter_coords.append([scat_left, shist_bottom, scat_width, shist_height])
hist_coords.append([hist_left, shist_bottom, hist_width, shist_height])
scat_left = hist_left + hist_width + scatter_hist_bigger_hspace
shist_top = shist_bottom - scatter_hist_bigger_vspace
# initialize the plot
fig = plt.figure(figsize=(25, 10))
ax_key = fig.add_axes(key_cood, frameon=False)
ax_res_spec = fig.add_axes(res_spec_cood, frameon=False)
ax_rug = fig.add_axes(rug_cood, sharex=ax_res_spec, frameon=False)
axes_scatter = [fig.add_axes(scatter_cood, sharex=ax_res_spec, frameon=False) for scatter_cood in scatter_coords]
axes_hist = [fig.add_axes(hist_coord, sharey=ax_scatter, frameon=False)
for hist_coord, ax_scatter in zip(hist_coords, axes_scatter)]
axes_shist = [(ax_scatter, ax_hist) for ax_scatter, ax_hist in zip(axes_scatter, axes_hist)]
return fig, ax_key, ax_res_spec, ax_rug, axes_shist
def f_ghz_from_lambda_fit(lambda_fit, ua_array):
def lamb_fit_these_params(ua):
f_ghz = f0_of_I(ramp_current_amps=ua * 1.0e-6, ramp_current_amps_0=lambda_fit.i0fit,
m=lambda_fit.mfit, f2=lambda_fit.f2fit, P=lambda_fit.pfit, lamb=lambda_fit.lambfit)
return f_ghz
return np.fromiter(map(lamb_fit_these_params, ua_array), dtype=float)
def single_lamb_to_report_plot(axes, res_set, color, leglines, leglabels, band_str, flag_table_info, ordered_res_strs,
markersize=8, alpha=0.5):
summary_info = {}
# check to make sure we have date for all the requested resonator numbers
for res_str in list(ordered_res_strs):
if not hasattr(res_set, res_str):
ordered_res_strs.remove(res_str)
if "lost_res_nums" in summary_info.keys():
summary_info["lost_res_nums"].add(res_str)
else:
summary_info["lost_res_nums"] = {res_str}
# do some data analysis
lamb_values = np.array([res_set.__getattribute__(res_str).lamb_fit.lambfit for res_str in ordered_res_strs])
lamb_value_errs = np.array([res_set.__getattribute__(res_str).lamb_fit.lambfit_err
for res_str in ordered_res_strs])
flux_ramp_pp_khz = np.array([res_set.__getattribute__(res_str).lamb_fit.pfit * 1.0e6
for res_str in ordered_res_strs])
flux_ramp_pp_khz_errs = np.array([res_set.__getattribute__(res_str).lamb_fit.pfit_err * 1.0e6
for res_str in ordered_res_strs])
conversion_factor = (ref.phi_0 / (2.0 * np.pi)) * 1.0e12
fr_squid_mi_pH = np.array([res_set.__getattribute__(res_str).lamb_fit.mfit * conversion_factor
for res_str in ordered_res_strs])
fr_squid_mi_pH_err = np.array([res_set.__getattribute__(res_str).lamb_fit.mfit_err * conversion_factor
for res_str in ordered_res_strs])
port_powers_dbm = np.array([res_set.__getattribute__(res_str).metadata["port_power_dbm"]
for res_str in ordered_res_strs])
# This should be a real calibration not this hacky one size fits all subtraction, I hate that I wrote this
at_res_power_dbm = []
for res_str, port_power_dbm in zip(ordered_res_strs, port_powers_dbm):
wafer_num = res_set.__getattribute__(res_str).metadata["wafer"]
if wafer_num < 12.5:
# with warm 20 dBm attenuator that make the VNA output unleveled
at_res_power_dbm.append(port_power_dbm - 75.0)
else:
# no warm 20 dBm attenuator on the input
at_res_power_dbm.append(port_power_dbm - 55.0)
at_res_power_dbm_mean = np.mean(at_res_power_dbm)
# initialize some useful parameters
f_centers_ghz_all = []
f_centers_ghz_mean = []
f_centers_ghz_std = []
q_i_mean = []
q_i_std = []
q_c_mean = []
q_c_std = []
impedance_ratio_mean = []
impedance_ratio_std = []
non_linear_mean = []
non_linear_std = []
for res_str in ordered_res_strs:
single_lamb = res_set.__getattribute__(res_str)
f_centers_this_lamb = np.array([res_params.fcenter_ghz for res_params in single_lamb.res_fits])
f_centers_ghz_all.append(f_centers_this_lamb)
f_centers_ghz_mean.append(np.mean(f_centers_this_lamb))
f_centers_ghz_std.append(np.std(f_centers_this_lamb))
q_is_this_lamb = np.array([res_params.q_i for res_params in single_lamb.res_fits])
q_i_mean.append(np.mean(q_is_this_lamb))
q_i_std.append(np.std(q_is_this_lamb))
q_cs_this_lamb = np.array([res_params.q_c for res_params in single_lamb.res_fits])
q_c_mean.append(np.mean(q_cs_this_lamb))
q_c_std.append( | np.std(q_cs_this_lamb) | numpy.std |
import cv2
import os
import time
import numpy as np
from sklearn.model_selection import train_test_split
from skimage.color import rgb2gray
from skimage.transform import resize as imgresize
from scipy import linalg
import scipy.ndimage as ndi
from scipy.signal import convolve2d
import random
from keras.utils import to_categorical
from tqdm import tqdm
# keras.io
def rotation(x, rg, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0.):
""" Like in keras lib, but
rg: random angel
"""
theta = np.deg2rad(rg)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[int(row_axis)], x.shape[int(col_axis)]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def shift(x, wrg, hrg, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0.):
""" Like in keras lib, but
wrg, hrg: random number
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = hrg * h
ty = wrg * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def zoom(x, zoom_range, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0.):
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
zx, zy = zoom_range
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=2,
fill_mode='nearest',
cval=0.):
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis+1)
return x
def tranformation(x, config):
if config['flag']:
if config['flip']:
x = x[:, ::-1, :]
if config['rot']['flag']:
x = rotation(x, config['rot']['rg'])
if config['shift']['flag']:
x = rotation(x, config['shift']['wrg'], config['shift']['hrg'])
if config['zoom']['flag']:
x = zoom(x, config['zoom']['zoom_range'])
return x
def msqr(x1,x2):
return np.mean(np.sqrt(np.power(x1-x2,2)))
def new_frame(x1,x2, op, kernel_size=4):
w, h, _ = np.shape(x1)
new_frame = np.zeros(shape=(w//kernel_size, h//kernel_size, 1))
if op == 'msqr':
x1 = np.asarray(x1)/255
x2 = np.asarray(x2)/255
func = lambda n1, n2: msqr(n1, n2)
else:
raise "Undefined fuction: {0}".format(op)
for i in range(0, w//kernel_size):
for j in range(0, h//kernel_size):
sx1 = x1[i*kernel_size:(i+1)*kernel_size, j*kernel_size:(j+1)*kernel_size,:]
sx2 = x2[i*kernel_size:(i+1)*kernel_size, j*kernel_size:(j+1)*kernel_size,:]
new_frame[i,j,0] = func(sx1,sx2)
return new_frame
class DataSet:
def __init__(self,
nframe=16,
fstride=1,
train_size=0.7,
size=[224,224, 3],
random_state=131,
name = "",
filepaths=[],
y=[],
kernel_size=4):
y = to_categorical(y=y, num_classes=6)
train_fp, valid_fp, train_y, valid_y = train_test_split(filepaths,
y,
random_state=random_state,
shuffle=True,
train_size=train_size,
stratify=y
)
self.train_fp = train_fp
self.valid_fp = valid_fp
self.train_y = train_y
self.valid_y = valid_y
self.nframe = nframe
self.fstride = fstride
self.size = size
self.random_state = random_state
self.train_size = train_size
self.name = name
self.kernel_size = kernel_size
def _augmentation(self, img, config={}):
img = rgb2gray(img)
img = img.reshape([self.size[0]//self.kernel_size, self.size[1]//self.kernel_size, 1])
img = tranformation(img, config)
return img
def _reader(self, filepath, aug_config={'flag':False}):
if aug_config['flag']:
rg = random.uniform(-aug_config['rg'], aug_config['rg'])
wrg = random.uniform(-aug_config['wrg'], aug_config['wrg'])
hrg = random.uniform(-aug_config['hrg'], aug_config['hrg'])
zoom_range = np.random.uniform(1-aug_config['zoom'], 1+aug_config['zoom'], 2)
fr = bool(0.5<random.uniform(0, 1))
fs = bool(0.5<random.uniform(0, 1))
fz = bool(0.5<random.uniform(0, 1))
ff = bool(0.5<random.uniform(0, 1))
config = {
'flag':True,
'rot':{
'flag':fr,
'rg': rg
},
'shift':{
'flag':fs,
'wrg': wrg,
'hrg': hrg
},
'zoom':{
'flag':fz,
'zoom_range':zoom_range
},
'flip': ff
}
else:
config = {
'flag':False
}
video = []
cap = cv2.VideoCapture(filepath)
while(cap.isOpened()):
ret, img = cap.read()
if ret==True:
img = self._augmentation(img, config=config)
video.append(img)
else:
break
cap.release()
return np.array(video)
def _vis_video(self, video, y, name='standartName.avi'):
out = cv2.VideoWriter(name,
cv2.VideoWriter_fourcc(*'DIVX'),
10,
(self.size[0]//self.kernel_size, self.size[1]//self.kernel_size),
False)
for frame in video:
frame = frame*255
frame = frame.astype(np.uint8)
print(y)
#cv2.putText(frame, str(y),(10,10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0,255), 2)
out.write(frame.reshape((self.size[0]//self.kernel_size, self.size[1]//self.kernel_size)))
cv2.imshow('frame', frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
time.sleep(0.1)
out.release()
cv2.destroyAllWindows()
def visualizer(self, num=10, type='train', aug_config={'flag':False}):
if type=='train':
gen = self.train_gen(batch_size=num, aug_config=aug_config)
else:
gen = self.valid_gen(batch_size=num)
x, y = next(gen)
for i in range(num):
self._vis_video(video=x[i], y=y[i].argmax(-1), name=str(i)+'_'+str(y[i].argmax(-1))+'.avi')
def make_set(self, op='msqr', name='train'):
if name=='train':
path = self.train_fp
elif name=='valid':
path = self.valid_fp
else:
raise 'name must be train or valid'
for i, fp in tqdm(enumerate(path),ascii=True, desc='Make {0} Set'.format(name)):
out = cv2.VideoWriter(name+'_set/'+str(i)+'.avi',
cv2.VideoWriter_fourcc(*'DIVX'),
5,
(self.size[0]//self.kernel_size, self.size[1]//self.kernel_size),
False)
i,j=0,1
video = []
cap = cv2.VideoCapture(fp)
pr_img = np.zeros(shape=(self.size[0]//self.kernel_size, self.size[1]//self.kernel_size,1))
last_img = | np.zeros(shape=(self.size[0]//self.kernel_size, self.size[1]//self.kernel_size)) | numpy.zeros |
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions that operate on voxels."""
import numpy as np
import tensorflow as tf
from tensorflow_graphics.math.interpolation import trilinear
import tensorflow_graphics.projects.radiance_fields.utils as utils
def get_mask_voxels(shape=(1, 128, 128, 128, 1), dtype=np.float32):
"""Generates a mask for a voxel grid by removing the borders."""
voxels = | np.ones(shape, dtype=dtype) | numpy.ones |
from __future__ import print_function
import cv2
import argparse
import numpy as np
from PIL import Image
import operator
import copy
import numpy as np
from keras.preprocessing import image
import tensorflow as tf
from skimage.segmentation import clear_border
from keras.models import load_model
from load import *
#show image
def show_image(img,title):
#cv2.namedWindow(title, cv2.WINDOW_NORMAL)
cv2.imshow(title, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
ap = argparse.ArgumentParser()
args = vars(ap.parse_args())
img = cv2.imread('img/image4.jpg', cv2.IMREAD_GRAYSCALE)
show_image(img,"title")
#gurultu azaltma
def pre_process_image(img, skip_dilate=False):
proc = cv2.GaussianBlur(img.copy(), (9, 9),0)
proc = cv2.adaptiveThreshold(proc, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 5)
proc = cv2.bitwise_not(proc, proc)
if not skip_dilate:
kernel = np.array([[0., 1., 0.], [1., 1., 1.], [0., 1., 0.]],np.uint8)
proc = cv2.dilate(proc, kernel)
return proc
def findCorners(img):
h,contours, hierarchy = cv2.findContours(processed, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
polygon = contours[0]
# Largest image height_px_1 = box[0][1] - box[3][1]
bottom_right, _ = max(enumerate([pt[0][0] + pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
top_left, _ = min(enumerate([pt[0][0] + pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
bottom_left, _ = min(enumerate([pt[0][0] - pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
top_right, _ = max(enumerate([pt[0][0] - pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
# Return an array of all 4 points using the indices
return [polygon[top_left][0], polygon[top_right][0], polygon[bottom_right][0], polygon[bottom_left][0]]
"""
def findCorners(img):
contours, hierarchy = cv2.findContours(processed, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
polygon = contours[0]
# Largest image height_px_1 = box[0][1] - box[3][1]
bottom_right, _ = max(enumerate([pt[0][0] + pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
top_left, _ = min(enumerate([pt[0][0] + pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
bottom_left, _ = min(enumerate([pt[0][0] - pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
top_right, _ = max(enumerate([pt[0][0] - pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
# Return an array of all 4 points using the indices
return [polygon[top_left][0], polygon[top_right][0], polygon[bottom_right][0], polygon[bottom_left][0]]
"""
def display_points(in_img, points, radius=5, colour=(255, 255, 255)):
img = in_img.copy()
if len(colour) == 3:
if len(img.shape) == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
elif img.shape[2] == 1:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
for point in points:
cv2.circle(img, tuple(int(x) for x in point), radius, colour, -1)
show_image(img,"display_points")
return img
#sudokoyu ortalama
def distance_between(p1, p2):
a = p2[0] - p1[0]
b = p2[1] - p1[1]
return np.sqrt((a ** 2) + (b ** 2))
def display_rects(in_img, rects, colour=255):
img = in_img.copy()
for rect in rects:
cv2.rectangle(img, tuple(int(x) for x in rect[0]), tuple(int(x) for x in rect[1]), colour)
show_image(img,"display_rects")
return img
def crop_and_warp(img, crop_rect):
top_left, top_right, bottom_right, bottom_left = crop_rect[0], crop_rect[1], crop_rect[2], crop_rect[3]
src = np.array([top_left, top_right, bottom_right, bottom_left], dtype='float32')
side = max([
distance_between(bottom_right, top_right),
distance_between(top_left, bottom_left),
distance_between(bottom_right, bottom_left),
distance_between(top_left, top_right)
])
dst = | np.array([[0, 0], [side - 1, 0], [side - 1, side - 1], [0, side - 1]], dtype='float32') | numpy.array |
__author__ = '<NAME>'
import numpy as np
import cv2
import math
def estimate_camera(model3D, fidu_XY, pose_db_on=False):
if pose_db_on:
rmat, tvec = calib_camera(model3D, fidu_XY, pose_db_on=True)
tvec = tvec.reshape(3,1)
else:
rmat, tvec = calib_camera(model3D, fidu_XY)
RT = np.hstack((rmat, tvec))
projection_matrix = model3D.out_A * RT
return projection_matrix, model3D.out_A, rmat, tvec
def calib_camera(model3D, fidu_XY, pose_db_on=False):
#compute pose using refrence 3D points + query 2D point
## np.arange(68)+1 since matlab starts from 1
if pose_db_on:
rvecs = fidu_XY[0:3]
tvec = fidu_XY[3:6]
else:
goodind = np.setdiff1d(np.arange(68)+1, model3D.indbad)
goodind=goodind-1
fidu_XY = fidu_XY[goodind,:]
ret, rvecs, tvec = cv2.solvePnP(model3D.model_TD, fidu_XY, model3D.out_A, None, None, None, False)
rmat, jacobian = cv2.Rodrigues(rvecs, None)
inside = calc_inside(model3D.out_A, rmat, tvec, model3D.size_U[1], model3D.size_U[0], model3D.model_TD)
if(inside == 0):
tvec = -tvec
t = np.pi
RRz180 = np.asmatrix([np.cos(t), -np.sin(t), 0, np.sin(t), np.cos(t), 0, 0, 0, 1]).reshape((3, 3))
rmat = RRz180*rmat
return rmat, tvec
def get_yaw(rmat):
modelview = rmat
modelview = np.zeros( (3,4 ))
modelview[0:3,0:3] = rmat.transpose()
modelview = modelview.reshape(12)
# Code converted from function: getEulerFromRot()
angle_y = -math.asin( modelview[2] ) # Calculate Y-axis angle
C = math.cos( angle_y)
angle_y = math.degrees(angle_y)
if np.absolute(C) > 0.005: # Gimball lock?
trX = modelview[10] / C # No, so get X-axis angle
trY = -modelview[6] / C
angle_x = math.degrees( math.atan2( trY, trX ) )
trX = modelview[0] / C # Get z-axis angle
trY = - modelview[1] / C
angle_z = math.degrees( math.atan2( trY, trX) )
else:
# Gimball lock has occured
angle_x = 0
trX = modelview[5]
trY = modelview[4]
angle_z = math.degrees( math.atan2( trY, trX) )
# Adjust to current mesh setting
angle_x = 180 - angle_x
angle_y = angle_y
angle_z = -angle_z
out_pitch = angle_x
out_yaw = angle_y
out_roll = angle_z
return out_yaw
def get_opengl_matrices(camera_matrix, rmat, tvec, width, height):
projection_matrix = np.asmatrix(np.zeros((4,4)))
near_plane = 0.0001
far_plane = 10000
fx = camera_matrix[0,0]
fy = camera_matrix[1,1]
px = camera_matrix[0,2]
py = camera_matrix[1,2]
projection_matrix[0, 0] = 2.0 * fx / width
projection_matrix[1, 1] = 2.0 * fy / height
projection_matrix[0, 2] = 2.0 * (px / width) - 1.0
projection_matrix[1, 2] = 2.0 * (py / height) - 1.0
projection_matrix[2, 2] = -(far_plane + near_plane) / (far_plane - near_plane)
projection_matrix[3, 2] = -1
projection_matrix[2, 3] = -2.0 * far_plane * near_plane / (far_plane - near_plane)
deg = 180
t = deg*np.pi/180.
RRz=np.asmatrix([np.cos(t), -np.sin(t), 0, np.sin(t), np.cos(t), 0, 0, 0, 1]).reshape((3, 3))
RRy=np.asmatrix([np.cos(t), 0, np.sin(t), 0, 1, 0, -np.sin(t), 0, | np.cos(t) | numpy.cos |
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 14 16:38:27 2021
@author: jbren
"""
from SKEMPI import skempi_final as db
import numpy as np
import protein
import Mutation
import re
PDB_Loc="C:\\Users\\jbren\\OneDrive\\Documents\\Optimus\\SKEMPI2_PDBs\\PDBs"
fasta_Loc='C:\\Users\\jbren\\OneDrive\\Documents\\Optimus\\seq'
fasta_db_loc='C:\\Users\\jbren\\OneDrive\\Documents\\Optimus\\Pfam-A.fasta\\Pfam-A.fasta'
# Use this function to loop over the entire protein complex
def loop_over_proteins (db):
# Returns an array of the protein codes
pdbs= db['#Pdb'].str.slice(start=0, stop=4, step=1)
# Returns only the unique values from the array
uniq_pdbs=pdbs.unique()
i=0
p=protein.ProteinMethods(PDB_Loc)
while i<10: #len(uniq_pdbs):
i+=1
# Use this function to loop over the chains within a PDB
def loop_over_chains (db,RunBlastFlag,ReadBlastFlag):
# Returns only the unique values from the the three columns selected
# see https://stackoverflow.com/questions/26977076/pandas-unique-values-multiple-columns
# for diffent ways to do this
uniq_chains= | np.unique(db[['#Pdb', 'Prot1Chain','Prot2Chain']]) | numpy.unique |
# -*- coding: utf-8 -*-
"""Console script to generate goals for real_robots"""
import click
import numpy as np
from real_robots.envs import Goal
import gym
import math
basePosition = None
def pairwise_distances(a):
b = a.reshape(a.shape[0], 1, a.shape[1])
return np.sqrt(np.einsum('ijk, ijk->ij', a-b, a-b))
def runEnv(env, max_t=1000):
reward = 0
done = False
action = np.zeros(env.action_space.shape[0])
objects = env.robot.used_objects[1:]
positions = np.vstack([env.get_obj_pose(obj) for obj in objects])
still = False
stable = 0
for t in range(max_t):
old_positions = positions
observation, reward, done, _ = env.step(action)
positions = np.vstack([env.get_obj_pose(obj) for obj in objects])
maxPosDiff = 0
maxOrientDiff = 0
for i, obj in enumerate(objects):
posDiff = np.linalg.norm(old_positions[i][:3] - positions[i][:3])
q1 = old_positions[i][3:]
q2 = positions[i][3:]
orientDiff = min(np.linalg.norm(q1 - q2), np.linalg.norm(q1+q2))
maxPosDiff = max(maxPosDiff, posDiff)
maxOrientDiff = max(maxOrientDiff, orientDiff)
if maxPosDiff < 0.0001 and maxOrientDiff < 0.001 and t > 10:
stable += 1
else:
stable = 0
if stable > 20:
still = True
break
pos_dict = {}
for obj in objects:
pos_dict[obj] = env.get_obj_pose(obj)
print("Exiting environment after {} timesteps..".format(t))
if not still:
print("Failed because maxPosDiff:{:.6f},"
"maxOrientDiff:{:.6f}".format(maxPosDiff, maxOrientDiff))
return observation['retina'], pos_dict, not still, t
class Position:
def __init__(self, start_state=None, fixed_state=None, retina=None):
self.start_state = start_state
self.fixed_state = fixed_state
self.retina = retina
def generatePosition(env, obj, fixed=False, tablePlane=None):
if tablePlane is None:
min_x = -.2
max_x = .2
elif tablePlane:
min_x = -.2
max_x = .1 # 0.05 real, .1 prudent
else:
min_x = 0 # 0.05 mustard
max_x = .2
min_y = -.5
max_y = .5
x = np.random.rand()*(max_x-min_x)+min_x
y = | np.random.rand() | numpy.random.rand |
""" render_fmo.py renders obj file to rgb image with fmo model
Aviable function:
- clear_mash: delete all the mesh in the secene
- scene_setting_init: set scene configurations
- node_setting_init: set node configurations
- render: render rgb image for one obj file and one viewpoint
- render_obj: wrapper function for render() render
- init_all: a wrapper function, initialize all configurations
= set_image_path: reset defualt image output folder
author baiyu
modified by rozumden
"""
import sys
import os
import random
import pickle
import bpy
import glob
import numpy as np
from mathutils import Vector
from mathutils import Euler
import cv2
from PIL import Image
from skimage.draw import line_aa
from scipy import signal
from skimage.measure import regionprops
# import moviepy.editor as mpy
from array2gif import write_gif
abs_path = os.path.abspath(__file__)
sys.path.append(os.path.dirname(abs_path))
from render_helper import *
from settings import *
import settings
import pdb
def renderTraj(pars, H):
## Input: pars is either 2x2 (line) or 2x3 (parabola)
if pars.shape[1] == 2:
pars = np.concatenate( (pars, np.zeros((2,1))),1)
ns = 2
else:
ns = 5
ns = np.max([2, ns])
rangeint = np.linspace(0,1,ns)
for timeinst in range(rangeint.shape[0]-1):
ti0 = rangeint[timeinst]
ti1 = rangeint[timeinst+1]
start = pars[:,0] + pars[:,1]*ti0 + pars[:,2]*(ti0*ti0)
end = pars[:,0] + pars[:,1]*ti1 + pars[:,2]*(ti1*ti1)
start = np.round(start).astype(np.int32)
end = np.round(end).astype(np.int32)
rr, cc, val = line_aa(start[0], start[1], end[0], end[1])
valid = np.logical_and(np.logical_and(rr < H.shape[0], cc < H.shape[1]), np.logical_and(rr > 0, cc > 0))
rr = rr[valid]
cc = cc[valid]
val = val[valid]
if len(H.shape) > 2:
H[rr, cc, 0] = 0
H[rr, cc, 1] = 0
H[rr, cc, 2] = val
else:
H[rr, cc] = val
return H
def open_log(temp_folder = g_temp): # redirect output to log file
logfile = os.path.join(temp_folder,'blender_render.log')
try:
os.remove(logfile)
except OSError:
pass
open(logfile, 'a').close()
old = os.dup(1)
sys.stdout.flush()
os.close(1)
os.open(logfile, os.O_WRONLY)
return old
def close_log(old): # disable output redirection
os.close(1)
os.dup(old)
os.close(old)
def clear_mesh():
""" clear all meshes in the secene
"""
bpy.ops.object.select_all(action='DESELECT')
for obj in bpy.data.objects:
if obj.type == 'MESH':
obj.select = True
bpy.ops.object.delete()
for block in bpy.data.meshes:
if block.users == 0:
bpy.data.meshes.remove(block)
for block in bpy.data.materials:
if block.users == 0:
bpy.data.materials.remove(block)
for block in bpy.data.textures:
if block.users == 0:
bpy.data.textures.remove(block)
for block in bpy.data.images:
if block.users == 0:
bpy.data.images.remove(block)
def scene_setting_init(use_gpu):
"""initialize blender setting configurations
"""
sce = bpy.context.scene.name
bpy.data.scenes[sce].render.engine = g_engine_type
bpy.data.scenes[sce].cycles.film_transparent = g_use_film_transparent
#output
bpy.data.scenes[sce].render.image_settings.color_mode = g_rgb_color_mode
bpy.data.scenes[sce].render.image_settings.color_depth = g_rgb_color_depth
bpy.data.scenes[sce].render.image_settings.file_format = g_rgb_file_format
bpy.data.scenes[sce].render.use_overwrite = g_depth_use_overwrite
bpy.data.scenes[sce].render.use_file_extension = g_depth_use_file_extension
if g_ambient_light:
world = bpy.data.worlds['World']
world.use_nodes = True
bg = world.node_tree.nodes['Background']
bg.inputs[0].default_value[:3] = g_bg_color
bg.inputs[1].default_value = 1.0
#dimensions
bpy.data.scenes[sce].render.resolution_x = g_resolution_x
bpy.data.scenes[sce].render.resolution_y = g_resolution_y
bpy.data.scenes[sce].render.resolution_percentage = g_resolution_percentage
if use_gpu:
bpy.data.scenes[sce].render.engine = 'CYCLES' #only cycles engine can use gpu
bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral
bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral
bpy.context.user_preferences.addons['cycles'].preferences.devices[0].use = False
bpy.context.user_preferences.addons['cycles'].preferences.devices[1].use = True
ndev = len(bpy.context.user_preferences.addons['cycles'].preferences.devices)
print('Number of devices {}'.format(ndev))
for ki in range(2,ndev):
bpy.context.user_preferences.addons['cycles'].preferences.devices[ki].use = False
bpy.context.user_preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'
# bpy.types.CyclesRenderSettings.device = 'GPU'
bpy.data.scenes[sce].cycles.device = 'GPU'
def node_setting_init():
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
links = tree.links
for node in tree.nodes:
tree.nodes.remove(node)
render_layer_node = tree.nodes.new('CompositorNodeRLayers')
image_output_node = tree.nodes.new('CompositorNodeOutputFile')
image_output_node.base_path = g_syn_rgb_folder
links.new(render_layer_node.outputs[0], image_output_node.inputs[0])
# image_output_node = bpy.context.scene.node_tree.nodes[1]
image_output_node.base_path = g_temp
image_output_node.file_slots[0].path = 'image-######.png' # blender placeholder #
def render(obj_path, viewpoint, temp_folder):
"""render rbg image
render a object rgb image by a given camera viewpoint and
choose random image as background, only render one image
at a time.
Args:
obj_path: a string variable indicate the obj file path
viewpoint: a vp parameter(contains azimuth,elevation,tilt angles and distance)
"""
vp = viewpoint
cam_location = camera_location(vp.azimuth, vp.elevation, vp.distance)
cam_rot = camera_rot_XYZEuler(vp.azimuth, vp.elevation, vp.tilt)
cam_obj = bpy.data.objects['Camera']
cam_obj.location[0] = cam_location[0]
cam_obj.location[1] = cam_location[1]
cam_obj.location[2] = cam_location[2]
cam_obj.rotation_euler[0] = cam_rot[0]
cam_obj.rotation_euler[1] = cam_rot[1]
cam_obj.rotation_euler[2] = cam_rot[2]
if not os.path.exists(g_syn_rgb_folder):
os.mkdir(g_syn_rgb_folder)
obj = bpy.data.objects['model_normalized']
ni = g_fmo_steps
maxlen = 0.5
maxrot = 1.57/6
tri = 0
# rot_base = np.array([math.pi/2,0,0])
while tri <= g_max_trials:
do_repeat = False
tri += 1
if not g_apply_texture:
for oi in range(len(bpy.data.objects)):
if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP':
continue
for tempi in range(len(bpy.data.objects[oi].data.materials)):
if bpy.data.objects[oi].data.materials[tempi].alpha != 1.0:
return True, True ## transparent object
los_start = Vector((random.uniform(-maxlen/10, maxlen/10), random.uniform(-maxlen, maxlen), random.uniform(-maxlen, maxlen)))
loc_step = Vector((random.uniform(-maxlen/10, maxlen/10), random.uniform(-maxlen, maxlen), random.uniform(-maxlen, maxlen)))/ni
rot_base = np.array((random.uniform(0, 2*math.pi), random.uniform(0, 2*math.pi), random.uniform(0, 2*math.pi)))
rot_step = np.array((random.uniform(-maxrot, maxrot), random.uniform(-maxrot, maxrot), random.uniform(-maxrot, maxrot)))/ni
old = open_log(temp_folder)
for ki in [0, ni-1]+list(range(1,ni-1)):
for oi in range(len(bpy.data.objects)):
if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP':
continue
bpy.data.objects[oi].location = los_start + loc_step*ki
bpy.data.objects[oi].rotation_euler = Euler(rot_base + (rot_step*ki))
bpy.context.scene.frame_set(ki + 1)
bpy.ops.render.render(write_still=True) #start rendering
if ki == 0 or ki == (ni-1):
Mt = cv2.imread(os.path.join(bpy.context.scene.node_tree.nodes[1].base_path,'image-{:06d}.png'.format(ki+1)),cv2.IMREAD_UNCHANGED)[:,:,-1] > 0
is_border = ((Mt[0,:].sum()+Mt[-1,:].sum()+Mt[:,0].sum()+Mt[:,-1].sum()) > 0) or Mt.sum()==0
if is_border:
if ki == 0:
close_log(old)
return False, True ## sample different starting viewpoint
else:
do_repeat = True ## just sample another motion direction
if do_repeat:
break
close_log(old)
if do_repeat == False:
break
if do_repeat: ## sample different starting viewpoint
return False, True
return False, False
def make_fmo(path, gt_path, video_path):
n_im = 5
background_images = os.listdir(g_background_image_path)
seq_name = random.choice(background_images)
seq_images = glob.glob(os.path.join(g_background_image_path,seq_name,"*.jpg"))
if len(seq_images) <= n_im:
seq_images = glob.glob(os.path.join(g_background_image_path,seq_name,"*.png"))
seq_images.sort()
bgri = random.randint(n_im,len(seq_images)-1)
bgr_path = seq_images[bgri]
B0 = cv2.imread(bgr_path)/255
B = cv2.resize(B0, dsize=(int(g_resolution_x*g_resolution_percentage/100), int(g_resolution_y*g_resolution_percentage/100)), interpolation=cv2.INTER_CUBIC)
B[B > 1] = 1
B[B < 0] = 0
FH = np.zeros(B.shape)
MH = np.zeros(B.shape[:2])
pars = np.array([[(B.shape[0]-1)/2-1, (B.shape[1]-1)/2-1], [1.0, 1.0]]).T
FM = np.zeros(B.shape[:2]+(4,g_fmo_steps,))
centroids = np.zeros((2,g_fmo_steps))
for ki in range(g_fmo_steps):
FM[:,:,:,ki] = cv2.imread(os.path.join(gt_path,'image-{:06d}.png'.format(ki+1)),cv2.IMREAD_UNCHANGED)/g_rgb_color_max
props = regionprops((FM[:,:,-1,ki]>0).astype(int))
if len(props) != 1:
return False
centroids[:,ki] = props[0].centroid
for ki in range(g_fmo_steps):
F = FM[:,:,:-1,ki]*FM[:,:,-1:,ki]
M = FM[:,:,-1,ki]
if ki < g_fmo_steps-1:
pars[:,1] = centroids[:,ki+1] - centroids[:,ki]
H = renderTraj(pars, np.zeros(B.shape[:2]))
H /= H.sum()*g_fmo_steps
for kk in range(3):
FH[:,:,kk] += signal.fftconvolve(H, F[:,:,kk], mode='same')
MH += signal.fftconvolve(H, M, mode='same')
Im = FH + (1 - MH)[:,:,np.newaxis]*B
Im[Im > 1] = 1
Im[Im < 0] = 0
if g_skip_low_contrast:
Diff = np.sum(np.abs(Im - B),2)
meanval = | np.mean(Diff[MH > 0.05]) | numpy.mean |
import sys
import logging
from log import logging_conf
import time
import numpy as np
import matplotlib.pyplot as plt
from brica import VirtualTimeScheduler, Timing
from matchernet.ekf import BundleEKFContinuousTime, MatcherEKF
from matchernet import fn
from matchernet.observer import Observer
from matchernet.state_space_model_2d import StateSpaceModel2Dim
from matchernet import utils
from matchernet.utils import print_flush
logging_conf.set_logger_config("./log/logging.json")
logger = logging.getLogger(__name__)
mu0 = | np.array([0, 1.0], dtype=np.float32) | numpy.array |
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test evaluating_rewards.tabular."""
from typing import Tuple
import hypothesis
from hypothesis import strategies as st
from hypothesis.extra import numpy as hp_numpy
import numpy as np
import pytest
from evaluating_rewards.distances import tabular
# pylint:disable=no-value-for-parameter
# pylint gets confused with hypothesis draw magic
@st.composite
def distribution(draw, shape) -> np.ndarray:
"""Search strategy for a probability distribution of given shape."""
nonneg_elements = st.floats(min_value=0, max_value=1, allow_nan=False, allow_infinity=False)
arr = draw(hp_numpy.arrays(np.float128, shape, elements=nonneg_elements, fill=st.nothing()))
hypothesis.assume(np.any(arr > 0))
return arr / | np.sum(arr) | numpy.sum |
from quad import QuadratureInfo
import matplotlib.pyplot as plt
import numpy as np
# import numpy.linalg as la
from kernels import eval_sp_dp_QBX, eval_target, bvp, sommerfeld
from kernels import Images_Integral
plt.gca().set_aspect("equal")
xs = -2
ys = 2
k = 10.2
alpha = k # CFIE parameter
beta = 2.04
interval = 15
C = 1
m = int(np.floor(np.log(k / ys * C) / | np.log(2) | numpy.log |
import string
import sys
import numpy as np
import sklearn
from datetime import datetime
import buffering
import pathfinder
import utils
from configuration import config, set_configuration
import logger
import app
import torch
import os
import cPickle
from torch.autograd import Variable
import argparse
parser = argparse.ArgumentParser(description='Evaluate dataset on trained model.')
save_dir = "../data/temp/"
parser.add_argument("config_name", type=str, help="Config name")
parser.add_argument("eval", type=str, help="test/valid/feat/train/test_tta/valid_tta")
parser.add_argument("--dump", type=int, default=0, help="Should we store the predictions in raw format")
parser.add_argument("--best", type=int, default=0, help="Should we use the best model instead of the last model")
args = parser.parse_args()
config_name = args.config_name
set_configuration('configs', config_name)
all_tta_feat = args.eval == 'all_tta_feat'
feat = args.eval == 'feat'
train = args.eval == 'train'
train_tta = args.eval == 'train_tta'
train_tta_feat = args.eval == 'train_tta_feat'
valid = args.eval == 'valid'
valid_tta = args.eval == 'valid_tta'
valid_tta_feat = args.eval == 'valid_tta_feat'
valid_tta_majority = args.eval == 'valid_tta_majority'
test = args.eval == 'test'
test_tta = args.eval == 'test_tta'
test_tta_feat = args.eval == 'test_tta_feat'
test_tta_majority = args.eval == 'test_tta_majority'
dump = args.dump
best = args.best
# metadata
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, config_name, best=best)
metadata = utils.load_pkl(metadata_path)
expid = metadata['experiment_id']
if best:
expid += "-best"
print("logs")
# logs
logs_dir = utils.get_dir_path('logs', pathfinder.METADATA_PATH)
sys.stdout = logger.Logger(logs_dir + '/%s-test.log' % expid)
sys.stderr = sys.stdout
print("prediction path")
# predictions path
predictions_dir = utils.get_dir_path('model-predictions', pathfinder.METADATA_PATH)
outputs_path = predictions_dir + '/' + expid
if valid_tta_feat or test_tta_feat or all_tta_feat or train_tta_feat:
outputs_path += '/features'
utils.auto_make_dir(outputs_path)
if dump:
prediction_dump = os.path.join(outputs_path, expid + "_" + args.eval + "_predictions.p")
print('Build model')
model = config().build_model()
model.l_out.load_state_dict(metadata['param_values'])
model.l_out.cuda()
model.l_out.eval()
criterion = config().build_objective()
if test:
data_iterator = config().test_data_iterator
elif feat:
data_iterator = config().feat_data_iterator
def get_preds_targs(data_iterator):
print('Data')
print('n', sys.argv[2], ': %d' % data_iterator.nsamples)
validation_losses = []
preds = []
targs = []
ids = []
for n, (x_chunk, y_chunk, id_chunk) in enumerate(buffering.buffered_gen_threaded(data_iterator.generate())):
inputs, labels = Variable(torch.from_numpy(x_chunk).cuda(), volatile=True), Variable(
torch.from_numpy(y_chunk).cuda(), volatile=True)
predictions = model.l_out(inputs)
loss = criterion(predictions, labels)
validation_losses.append(loss.cpu().data.numpy()[0])
targs.append(y_chunk)
if feat:
for idx, img_id in enumerate(id_chunk):
np.savez(open(outputs_path + '/' + str(img_id) + '.npz', 'w'), features=predictions[idx])
preds.append(predictions.cpu().data.numpy())
# print id_chunk, targets, loss
if n % 50 == 0:
print(n, 'batches processed')
ids.append(id_chunk)
preds = np.concatenate(preds)
targs = np.concatenate(targs)
ids = np.stack(ids)
print('Validation loss', np.mean(validation_losses))
return preds, targs, ids
def get_preds_targs_tta(data_iterator, aggregation="mean", threshold=0.5):
print('Data')
print('n', sys.argv[2], ': %d' % data_iterator.nsamples)
# validation_losses = []
preds = []
targs = []
ids = []
for n, (x_chunk, y_chunk, id_chunk) in enumerate(buffering.buffered_gen_threaded(data_iterator.generate())):
# load chunk to GPU
# if n == 10:
# break
inputs, labels = Variable(torch.from_numpy(x_chunk).cuda(), volatile=True), Variable(
torch.from_numpy(y_chunk).cuda(), volatile=True)
predictions = model.l_out(inputs)
predictions = predictions.cpu().data.numpy()
if aggregation == "majority":
final_prediction = np.zeros((predictions.shape[1],))
for dim in range(predictions.shape[1]):
count = np.bincount(predictions[:, dim] > threshold, minlength=2)
final_prediction[dim] = 1 if count[1] >= predictions.shape[0] / 2.0 else 0
elif aggregation == "mean":
final_prediction = np.mean(predictions, axis=0)
# avg_loss = np.mean(loss, axis=0)
# validation_losses.append(avg_loss)
targs.append(y_chunk[0])
ids.append(id_chunk)
preds.append(final_prediction)
if n % 1000 == 0:
print(n, 'batches processed')
preds = np.stack(preds)
targs = np.stack(targs)
ids = np.stack(ids)
# print 'Validation loss', np.mean(validation_losses)
return preds, targs, ids
def get_preds_targs_tta_feat(data_iterator, prelabel=''):
print('Data')
print('n', sys.argv[2], ': %d' % data_iterator.nsamples)
for n, (x_chunk, y_chunk, id_chunk) in enumerate(buffering.buffered_gen_threaded(data_iterator.generate())):
# load chunk to GPU
# if n == 10:
# break
inputs, labels = Variable(torch.from_numpy(x_chunk).cuda(), volatile=True), Variable(
torch.from_numpy(y_chunk).cuda(), volatile=True)
predictions = model.l_out(inputs, feat=True)
predictions = predictions.cpu().data.numpy()
# final_prediction = np.mean(predictions.cpu().data.numpy(), axis=0)
# avg_loss = np.mean(loss, axis=0)
# validation_losses.append(avg_loss)
# print(predictions.shape)
# print(id_chunk)
for i in range(predictions.shape[0]):
file = open(os.path.join(outputs_path, prelabel + str(id_chunk) + "_" + str(i) + ".npy"), "wb")
np.save(file, predictions[i])
file.close()
if n % 1000 == 0:
print(n, 'batches processed')
if train_tta_feat:
train_it = config().tta_train_data_iterator
get_preds_targs_tta_feat(train_it)
if all_tta_feat:
all_it = config().tta_all_data_iterator
get_preds_targs_tta_feat(all_it)
if train or train_tta:
if train:
train_it = config().trainset_valid_data_iterator
preds, targs, ids = get_preds_targs(train_it)
elif train_tta:
train_it = config().tta_train_data_iterator
preds, targs, ids = get_preds_targs_tta(train_it)
if dump:
file = open(prediction_dump, "wb")
cPickle.dump([preds, targs, ids], file)
file.close()
if valid_tta_feat:
valid_it = config().tta_valid_data_iterator
get_preds_targs_tta_feat(valid_it)
if valid or valid_tta or valid_tta_majority:
if valid:
valid_it = config().valid_data_iterator
preds, targs, ids = get_preds_targs(valid_it)
elif valid_tta:
valid_it = config().tta_valid_data_iterator
preds, targs, ids = get_preds_targs_tta(valid_it)
elif valid_tta_majority:
valid_it = config().tta_valid_data_iterator
preds, targs, ids = get_preds_targs_tta(valid_it, aggregation="majority", threshold=0.53)
if dump:
file = open(prediction_dump, "wb")
cPickle.dump([preds, targs, ids], file)
file.close()
tps = [np.sum(qpreds[:, i] * targs[:, i]) for i in range(17)]
fps = [ | np.sum(qpreds[:, i] * (1 - targs[:, i])) | numpy.sum |
from labelmodels.label_model import ClassConditionalLabelModel, LearningConfig, init_random
import numpy as np
from scipy import sparse
import torch
from torch import nn
class HMM(ClassConditionalLabelModel):
"""A generative label model that treats a sequence of true class labels as a
Markov chain, as in a hidden Markov model, and treats all labeling functions
as conditionally independent given the corresponding true class label, as
in a Naive Bayes model.
Proposed for crowdsourced sequence annotations in: <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. Aggregating and Predicting
Sequence Labels from Crowd Annotations. In Annual Meeting of the Association
for Computational Linguistics, 2017.
"""
def __init__(self, num_classes, num_lfs, init_acc=.9, acc_prior=1,
balance_prior=1):
"""Constructor.
Initializes labeling function accuracies using optional argument and all
other model parameters uniformly.
:param num_classes: number of target classes, i.e., binary
classification = 2
:param num_lfs: number of labeling functions to model
:param init_acc: initial estimated labeling function accuracy, must
be a float in [0,1]
:param acc_prior: strength of regularization of estimated labeling
function accuracies toward their initial values
"""
super().__init__(num_classes, num_lfs, init_acc, acc_prior)
self.start_balance = nn.Parameter(torch.zeros([num_classes]))
self.transitions = nn.Parameter(torch.zeros([num_classes, num_classes]))
self.balance_prior = balance_prior
def forward(self, votes, seq_starts):
"""
Computes log likelihood of sequence of labeling function outputs for
each (sequence) example in batch.
For efficiency, this function prefers that votes is an instance of
scipy.sparse.coo_matrix. You can avoid a conversion by passing in votes
with this class.
:param votes: m x n matrix in {0, ..., k}, where m is the sum of the
lengths of the sequences in the batch, n is the number of
labeling functions and k is the number of classes
:param seq_starts: vector of length l of row indices in votes indicating
the start of each sequence, where l is the number of
sequences in the batch. So, votes[seq_starts[i]] is
the row vector of labeling function outputs for the
first element in the ith sequence
:return: vector of length l, where element is the log-likelihood of the
corresponding sequence of outputs in votes
"""
jll = self._get_labeling_function_likelihoods(votes)
norm_start_balance = self._get_norm_start_balance()
norm_transitions = self._get_norm_transitions()
for i in range(0, votes.shape[0]):
if i in seq_starts:
jll[i] += norm_start_balance
else:
joint_class_pair = jll[i-1, :].clone().unsqueeze(1)
joint_class_pair = joint_class_pair.repeat(1, self.num_classes)
joint_class_pair += norm_transitions
jll[i] += joint_class_pair.logsumexp(0)
seq_ends = [x - 1 for x in seq_starts] + [votes.shape[0]-1]
seq_ends.remove(-1)
mll = torch.logsumexp(jll[seq_ends], dim=1)
return mll
def estimate_label_model(self, votes, seq_starts, config=None):
"""Estimates the parameters of the label model based on observed
labeling function outputs.
Note that a minibatch's size refers to the number of sequences in the
minibatch.
:param votes: m x n matrix in {0, ..., k}, where m is the sum of the
lengths of the sequences in the data, n is the number of
labeling functions and k is the number of classes
:param seq_starts: vector of length l of row indices in votes indicating
the start of each sequence, where l is the number of
sequences in the batch. So, votes[seq_starts[i]] is
the row vector of labeling function outputs for the
first element in the ith sequence
:param config: optional LearningConfig instance. If None, initialized
with default constructor
"""
if config is None:
config = LearningConfig()
# Initializes random seed
init_random(config.random_seed)
# Converts to CSR and integers to standardize input
votes = sparse.csr_matrix(votes, dtype=np.int)
seq_starts = np.array(seq_starts, dtype=np.int)
batches = self._create_minibatches(
votes, seq_starts, config.batch_size, shuffle_seqs=True)
self._do_estimate_label_model(batches, config)
def get_most_probable_labels(self, votes, seq_starts):
"""
Computes the most probable underlying sequence of labels given function
outputs
:param votes: m x n matrix in {0, ..., k}, where m is the sum of the
lengths of the sequences in the data, n is the number of
labeling functions and k is the number of classes
:param seq_starts: vector of length l of row indices in votes indicating
the start of each sequence, where l is the number of
sequences in the batch. So, votes[seq_starts[i]] is
the row vector of labeling function outputs for the
first element in the ith sequence
:return: vector of length m, where element is the most likely predicted labels
"""
# Converts to CSR and integers to standardize input
votes = sparse.csr_matrix(votes, dtype=np.int)
seq_starts = np.array(seq_starts, dtype=np.int)
out = np.ndarray((votes.shape[0],), dtype=np.int)
out_prob = np.ndarray((votes.shape[0],), dtype=object)
offset = 0
for votes, seq_starts in self._create_minibatches(votes, seq_starts, 32):
jll = self._get_labeling_function_likelihoods(votes)
norm_start_balance = self._get_norm_start_balance()
norm_transitions = self._get_norm_transitions()
T = votes.shape[0]
bt = torch.zeros([T, self.num_classes])
bts = torch.zeros([T, self.num_classes, self.num_classes])
for i in range(0, T):
if i in seq_starts:
jll[i] += norm_start_balance
else:
p = jll[i-1].clone().unsqueeze(1).repeat(
1, self.num_classes) + norm_transitions
jll[i] += torch.max(p, dim=0)[0]
bt[i, :] = torch.argmax(p, dim=0)
bts[i, :, :] = p
jll = torch.exp(jll)
seq_ends = [x - 1 for x in seq_starts] + [votes.shape[0] - 1]
res = []
res_prob = []
j = T-1
while j >= 0:
if j in seq_ends:
res.append(torch.argmax(jll[j, :]).item())
res_prob.append(jll[j,:].detach().numpy())
if j in seq_starts:
j -= 1
continue
res.append(int(bt[j, res[-1]].item()))
res_prob.append(torch.exp(bts[j,:,res[-1]]).detach().numpy())
j -= 1
res = [x + 1 for x in res]
res.reverse()
res_prob.reverse()
for i in range(len(res)):
out[offset + i] = res[i]
out_prob[offset + i] = res_prob[i]
offset += len(res)
return out, out_prob
def get_label_distribution(self, votes, seq_starts):
"""Returns the unary and pairwise marginals over true labels estimated
by the model.
:param votes: m x n matrix in {0, ..., k}, where m is the sum of the
lengths of the sequences in the data, n is the number of
labeling functions and k is the number of classes
:param seq_starts: vector of length l of row indices in votes indicating
the start of each sequence, where l is the number of
sequences in the batch. So, votes[seq_starts[i]] is
the row vector of labeling function outputs for the
first element in the ith sequence
:return: p_unary, p_pairwise where p_unary is a m x k matrix representing
the marginal distributions over individual labels, and p_pairwise
is a m x k x k tensor representing pairwise marginals over the
ith and (i+1)th labels. For the last element in a sequence, the
k x k matrix will be all zeros.
"""
# Converts to CSR and integers to standardize input
votes = sparse.csr_matrix(votes, dtype=np.int)
seq_starts = np.array(seq_starts, dtype=np.int)
out_unary = np.zeros((votes.shape[0], self.num_classes))
out_pairwise = | np.zeros((votes.shape[0], self.num_classes, self.num_classes)) | numpy.zeros |
# importing dependencies
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix # For computing accuracy
class BaseRegressor():
def __init__(self, num_feats, learning_rate=0.1, tol=0.001, max_iter=100, batch_size=12):
# initializing parameters
self.W = np.random.randn(num_feats + 1).flatten()
# assigning hyperparameters
self.lr = learning_rate
self.tol = tol
self.max_iter = max_iter
self.batch_size = batch_size
self.num_feats = num_feats
# defining list for storing loss history
self.loss_history_train = []
self.loss_history_val = []
def calculate_gradient(self, X, y):
pass
def loss_function(self, y_true, y_pred):
pass
def make_prediction(self, X):
pass
def train_model(self, X_train, y_train, X_val, y_val):
# Padding data with vector of ones for bias term
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
# Defining intitial values for while loop
prev_update_size = 1
iteration = 1
# Gradient descent
while prev_update_size > self.tol and iteration < self.max_iter:
# Shuffling the training data for each epoch of training
shuffle_arr = np.concatenate([X_train, np.expand_dims(y_train, 1)], axis=1)
# In place shuffle
np.random.shuffle(shuffle_arr)
X_train = shuffle_arr[:, :-1]
y_train = shuffle_arr[:, -1].flatten()
num_batches = int(X_train.shape[0]/self.batch_size) + 1
X_batch = np.array_split(X_train, num_batches)
y_batch = np.array_split(y_train, num_batches)
# Generating list to save the param updates per batch
update_size_epoch = []
# Iterating through batches (full for loop is one epoch of training)
for X_train, y_train in zip(X_batch, y_batch):
# Making prediction on batch
y_pred = self.make_prediction(X_train)
# Calculating loss
loss_train = self.loss_function(X_train, y_train)
# Adding current loss to loss history record
self.loss_history_train.append(loss_train)
# Storing previous weights and bias
prev_W = self.W
# Calculating gradient of loss function with respect to each parameter
grad = self.calculate_gradient(X_train, y_train)
# Updating parameters
new_W = prev_W - self.lr * grad
self.W = new_W
# Saving step size
update_size_epoch.append(np.abs(new_W - prev_W))
# Validation pass
loss_val = self.loss_function(X_val, y_val)
self.loss_history_val.append(loss_val)
# Defining step size as the average over the past epoch
prev_update_size = np.mean(np.array(update_size_epoch))
# Updating iteration number
iteration += 1
def plot_loss_history(self):
"""
Plots the loss history after training is complete.
"""
loss_hist = self.loss_history_train
loss_hist_val = self.loss_history_val
assert len(loss_hist) > 0, "Need to run training before plotting loss history"
fig, axs = plt.subplots(2, figsize=(8,8))
fig.suptitle('Loss History')
axs[0].plot(np.arange(len(loss_hist)), loss_hist)
axs[0].set_title('Training Loss')
axs[1].plot(np.arange(len(loss_hist_val)), loss_hist_val)
axs[1].set_title('Validation Loss')
plt.xlabel('Steps')
axs[0].set_ylabel('Train Loss')
axs[1].set_ylabel('Val Loss')
fig.tight_layout()
plt.show()
# import required modules
class LogisticRegression(BaseRegressor):
def __init__(self, num_feats, learning_rate=0.1, tol=0.0001, max_iter=100, batch_size=12):
super().__init__(num_feats, learning_rate, tol, max_iter, batch_size)
def set_W(self, W):
"""
Set initial value of W (for unit testing purposes)
Params:
W (np.ndarray): weight vector with bias term (initialized)
"""
self.W = W
def get_W(self) -> np.ndarray:
"""
Returns self.W for unit testing purposes
Returns:
gradients for given loss function (np.ndarray)
"""
return self.W
def get_accuracy(self, X, y) -> float:
"""
Returns the accuracy of predictions given true labels
Params:
X (np.ndarray): feature values
y (np.array): labels corresponding to X
Returns:
Accuracy of predictions on dataset X defined as (TN+TP)/(TN+TP+FN+FP)
"""
y_pred = self.make_prediction(X)
y_pred[y_pred>=0.5] = 1
y_pred[y_pred<0.5] = 0
cf = confusion_matrix(y, y_pred)
# Compute accuracy of predictions
accuracy = (cf[0,0] + cf[1,1]) / np.sum(cf)
return accuracy
def calculate_gradient(self, X, y) -> np.ndarray:
"""
TODO: write function to calculate gradient of the
logistic loss function to update the weights
Params:
X (np.ndarray): feature values
y (np.array): labels corresponding to X
Returns:
gradients for given loss function (np.ndarray)
"""
# X.shape = 1600 X 7
# y.shape = 1600 X 1
num_labels = y.shape[0]
y_pred = self.make_prediction(X) # y_pred.shape = 1600 X 1
grad = (1/num_labels) * (X.T @ (y_pred - y)) # grad.shape = 7 X 1
return grad
def loss_function(self, X, y) -> float:
"""
TODO: get y_pred from input X and implement binary cross
entropy loss function. Binary cross entropy loss assumes that
the classification is either 1 or 0, not continuous, making
it more suited for (binary) classification.
Params:
X (np.ndarray): feature values
y (np.array): labels corresponding to X
Returns:
average loss
"""
# X.shape = 1600 X 7
# y.shape = 1600 X 1
y_pred = self.make_prediction(X) # y_pred.shape = 1600 X 1
y_0 = (1-y) * | np.log(1-y_pred) | numpy.log |
# *** tensorrt校准模块 ***
import os
import torch
import torch.nn.functional as F
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
import ctypes
import logging
import util_trt
logger = logging.getLogger(__name__)
ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_char_p
ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [ctypes.py_object, ctypes.c_char_p]
# calibrator
class Calibrator(trt.IInt8EntropyCalibrator2):
def __init__(self, input_layers, stream, cache_file=""):
trt.IInt8EntropyCalibrator2.__init__(self)
self.input_layers = input_layers
self.stream = stream
self.d_input = cuda.mem_alloc(self.stream.calibration_data.nbytes)
self.cache_file = cache_file
stream.reset()
def get_batch_size(self):
return self.stream.batch_size
def get_batch(self, bindings, names):
batch = self.stream.next_batch()
if not batch.size:
return None
cuda.memcpy_htod(self.d_input, batch)
for i in self.input_layers[0]:
assert names[0] != i
bindings[0] = int(self.d_input)
return bindings
def read_calibration_cache(self):
# If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None.
if os.path.exists(self.cache_file):
with open(self.cache_file, "rb") as f:
logger.info("Using calibration cache to save time: {:}".format(self.cache_file))
return f.read()
def write_calibration_cache(self, cache):
with open(self.cache_file, "wb") as f:
logger.info("Caching calibration data for future use: {:}".format(self.cache_file))
f.write(cache)
# calibration_stream
# mnist/cifar...
class ImageBatchStream():
def __init__(self, dataset, transform, batch_size, img_size, max_batches):
self.transform = transform
self.batch_size = batch_size
self.max_batches = max_batches
self.dataset = dataset
self.calibration_data = np.zeros((batch_size,) + img_size, dtype=np.float32) # This is a data holder for the calibration
self.batch_count = 0
def reset(self):
self.batch_count = 0
def next_batch(self):
if self.batch_count < self.max_batches:
for i in range(self.batch_size):
x = self.dataset[i + self.batch_size * self.batch_count]
x = util_trt.to_numpy(x).astype(dtype=np.float32)
if self.transform:
x = self.transform(x)
self.calibration_data[i] = x.data
self.batch_count += 1
return np.ascontiguousarray(self.calibration_data, dtype=np.float32)
else:
return np.array([])
'''
# ocr
class OCRBatchStream():
def __init__(self, dataset, transform, batch_size, img_size, max_batches):
self.transform = transform
self.batch_size = batch_size
self.img_size = img_size
self.max_batches = max_batches
self.dataset = dataset
self.args = load_default_param()
self.calibration_data = np.zeros((self.batch_size, *self.img_size), dtype=np.float32) # This is a data holder for the calibration
self.batch_count = 0
def reset(self):
self.batch_count = 0
def next_batch(self):
if self.batch_count < self.max_batches:
for i in range(self.batch_size):
x = self.dataset[i + self.batch_size * self.batch_count]['img']
x = torch.FloatTensor(x)
x = util_trt.to_numpy(x).astype(dtype=np.float32)
x = np.transpose(x ,(1, 2, 0))
# ----------------- resize -----------------
select_size_list = self.args.select_size_list
resize_size = select_resize_size(x, select_size_list)
input_img, ori_scale_img = resize_img(x, resize_size, self.args)
# ----------------- crop -----------------
sub_imgs, sub_img_indexes, sub_img_tensors = crop_img_trt(input_img, resize_size, self.args)
for k in range(len(sub_img_tensors)):
if sub_img_tensors[k].shape[2] not in select_size_list or sub_img_tensors[k].shape[3] not in select_size_list:
print('size pad error!', sub_img_tensors[k].shape)
sys.exit()
for k in range(len(sub_img_tensors)):
if len(sub_img_tensors[k].shape) == 3 and sub_img_tensors[k].shape[0] != 3:
sub_img_tensors[k] = np.transpose(sub_img_tensors[k], (2, 0, 1))
sub_img_tensors[k] = sub_img_tensors[k][np.newaxis, ...].copy()
x = sub_img_tensors[k]
# You should implement your own data pipeline for writing the calibration_data
if self.transform:
x = self.transform(x)
self.calibration_data[i] = x.data
self.batch_count += 1
return np.ascontiguousarray(self.calibration_data, dtype=np.float32)
else:
return np.array([])
'''
# segmentation
class SegBatchStream():
def __init__(self, dataset, transform, batch_size, img_size, max_batches):
self.transform = transform
self.batch_size = batch_size
self.img_size = img_size
self.max_batches = max_batches
self.dataset = dataset
self.calibration_data = np.zeros((self.batch_size, *self.img_size), dtype=np.float32) # This is a data holder for the calibration
self.batch_count = 0
def reset(self):
self.batch_count = 0
def next_batch(self):
if self.batch_count < self.max_batches:
for i in range(self.batch_size):
x = self.dataset[i + self.batch_size * self.batch_count]['img_data'][0]
x = F.interpolate(x, size=(self.img_size[1], self.img_size[2]))
x = util_trt.to_numpy(x).astype(dtype=np.float32)
if self.transform:
x = self.transform(x)
self.calibration_data[i] = x.data
self.batch_count += 1
return np.ascontiguousarray(self.calibration_data, dtype=np.float32)
else:
return | np.array([]) | numpy.array |
"""
最一開始的程式,都沒動過
"""
import numpy as np
from numpy import genfromtxt
import matplotlib.pyplot as plt
import pylab as pl
import os
from scipy import signal,interpolate
from scipy.signal import find_peaks
from scipy.fftpack import fft,ifft
from scipy.signal import butter, lfilter
import random
def Phase_difference(unwarp_phase):
phase_diff = np.zeros((len(unwarp_phase),))
for i in range(len(unwarp_phase)-1):
phase_diff[i] = unwarp_phase[i+1] - unwarp_phase[i]
return phase_diff
def Remove_impulse_noise(phase_diff, thr):
removed_noise = np.zeros((len(phase_diff),))
for i in range(1, len(phase_diff)-1):
forward = phase_diff[i] - phase_diff[i-1]
backward = phase_diff[i] - phase_diff[i+1]
#print(forward, backward)
if (forward > thr and backward > thr) or (forward < -thr and backward < -thr):
removed_noise[i] = phase_diff[i-1] + (phase_diff[i+1] - phase_diff[i-1])/2
removed_noise[i] = phase_diff[i]
return removed_noise
def Amplify_signal(removed_noise):
return removed_noise*1.0
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
def normolize(data):
output=(data-np.min(data))/(np.max(data)-np.min(data))
return output
def MLR(data,delta):
data_s=np.copy(data)
mean=np.copy(data)
m=np.copy(data)
b=np.copy(data)
#calculate m
for t in range(len(data)):
# constraint
if ((t-delta)<0 or (t+delta)>(len(data)-1)):
None
# if the sliding window is in the boundary
else:
mean[t]=(np.sum(data[int(t-delta):int(t+delta+1)]))/(2*delta+1)
# calaulate the sigma
mtmp=0
for j in range(-delta,delta+1):
mtmp=mtmp+(j*(data[j+t]-mean[t]))
m[t] = 3*mtmp/(delta*(2*delta+1)*(delta+1))
b[t] = mean[t]-(t*m[t])
for t in range(len(data)):
# constraint
# if the sliding window is in the boundary
if ((t-2*delta)>=0 and (t+2*delta)<=(len(data)-1)):
# calaulate smooth ECG
tmp=0
for i in range(-delta,delta+1):
tmp=tmp+(t*m[t+i]+b[t+i])
# print(i)
data_s[t]=tmp/(2*delta+1)
else:
data_s[t]=data[t]
return data_s
def feature_detection(data):
data_v=np.copy(data)
feature_peak, _ = find_peaks(data)
feature_valley, _ = find_peaks(-data)
data_v=np.multiply(np.square(data),np.sign(data))
return feature_peak,feature_valley,data_v
def feature_compress(feature_peak,feature_valley,time_thr,signal):
feature_compress_peak=np.empty([1,0])
feature_compress_valley=np.empty([1,0])
# sort all the feature
feature=np.append(feature_peak,feature_valley)
feature=np.sort(feature)
# grouping the feature
ltera=0
while(ltera < (len(feature)-1)):
# record start at valley or peak (peak:0 valley:1)
i, = np.where(feature_peak == feature[ltera])
if(i.size==0):
start=1
else:
start=0
ltera_add=ltera
while(feature[ltera_add+1]-feature[ltera_add]<time_thr):
# skip the feature which is too close
ltera_add=ltera_add+1
#break the loop if it is out of boundary
if(ltera_add >= (len(feature)-1)):
break
# record end at valley or peak (peak:0 valley:1)
i, = np.where(feature_peak == feature[ltera_add])
if(i.size==0):
end=1
else:
end=0
# if it is too close
if (ltera!=ltera_add):
# situation1: began with valley end with valley
if(start==1 and end==1):
# using the lowest feature as represent
tmp=( | np.min(signal[feature[ltera:ltera_add]]) | numpy.min |
from __future__ import print_function
"""
Classes that provide support functions for minis_methods,
including fitting, smoothing, filtering, and some analysis.
Test run timing:
cb: 0.175 s (with cython version of algorithm); misses overlapping events
aj: 0.028 s, plus gets overlapping events
July 2017
Note: all values are MKS (Seconds, plus Volts, Amps)
per acq4 standards...
"""
import numpy as np
import scipy.signal
from dataclasses import dataclass, field
import traceback
from typing import Union, List
import timeit
from scipy.optimize import curve_fit
import lmfit
import pylibrary.tools.digital_filters as dfilt
from pylibrary.tools.cprint import cprint
@dataclass
class Filtering:
LPF_applied: bool=False
HPF_applied: bool=False
LPF_frequency: Union[float, None]= None
HPF_frequency: Union[float, None]= None
def def_empty_list():
return [0] # [0.0003, 0.001] # in seconds (was 0.001, 0.010)
def def_empty_list2():
return [[None]] # [0.0003, 0.001] # in seconds (was 0.001, 0.010)
@dataclass
class AverageEvent:
"""
The AverageEvent class holds the averaged events from all
traces/trials
"""
averaged : bool= False # set flags in case of no events found
avgeventtb:Union[List, np.ndarray] = field(
default_factory=def_empty_list)
avgevent: Union[List, np.ndarray] =field(
default_factory=def_empty_list)
Nevents: int = 0
avgnpts: int = 0
fitted :bool = False
fitted_tau1 :float = np.nan
fitted_tau2 :float = np.nan
Amplitude :float = np.nan
avg_fiterr :float = np.nan
risetenninety:float = np.nan
decaythirtyseven:float = np.nan
@dataclass
class Summaries:
"""
The Summaries dataclass holdes the results of the
individual events that were detected,
as well as the results of various fits
and the averge fit
"""
onsets: Union[List, np.ndarray] = field(
default_factory=def_empty_list2)
peaks: Union[List, np.ndarray] = field(
default_factory=def_empty_list)
smpkindex: Union[List, np.ndarray] = field(
default_factory=def_empty_list)
smoothed_peaks : Union[List, np.ndarray] = field(
default_factory=def_empty_list)
amplitudes : Union[List, np.ndarray] = field(
default_factory=def_empty_list)
Qtotal : Union[List, np.ndarray] = field(
default_factory=def_empty_list)
individual_events: bool = False
average: object = AverageEvent()
allevents: Union[List, np.ndarray] = field(
default_factory=def_empty_list)
event_trace_list : Union[List] = field(
default_factory=def_empty_list)
class MiniAnalyses:
def __init__(self):
"""
Base class for Clements-Bekkers and Andrade-Jonas methods
Provides template generation, and summary analyses
Allows use of common methods between different algorithms
"""
self.verbose = False
self.ntraces = 1
self.filtering = Filtering()
self.risepower = 4.0
self.min_event_amplitude = 5.0e-12 # pA default
self.Criterion = [None]
self.template = None
self.template_tmax = 0.
self.analysis_window=[None, None] # specify window or entire data set
super().__init__()
def setup(
self,
ntraces: int = 1,
tau1: Union[float, None] = None,
tau2: Union[float, None] = None,
template_tmax: float = 0.05,
dt_seconds: Union[float, None] = None,
delay: float = 0.0,
sign: int = 1,
eventstartthr: Union[float, None] = None,
risepower: float = 4.0,
min_event_amplitude: float = 5.0e-12,
threshold:float = 2.5,
global_SD:Union[float, None] = None,
analysis_window:[Union[float, None], Union[float, None]] = [None, None],
lpf:Union[float, None] = None,
hpf:Union[float, None] = None,
notch:Union[float, None] = None,
) -> None:
"""
Just store the parameters - will compute when needed
Use of globalSD and threshold:
if glboal SD is None, we use the threshold as it.
If Global SD has a value, then we use that rather than the
current trace SD for threshold determinations
"""
cprint('r', 'SETUP***')
assert sign in [-1, 1] # must be selective, positive or negative events only
self.ntraces = ntraces
self.Criterion = [[] for x in range(ntraces)]
self.sign = sign
self.taus = [tau1, tau2]
self.dt_seconds = dt_seconds
self.template_tmax = template_tmax
self.idelay = int(delay / self.dt_seconds) # points delay in template with zeros
self.template = None # reset the template if needed.
self.eventstartthr = eventstartthr
self.risepower = risepower
self.min_event_amplitude = min_event_amplitude
self.threshold = threshold
self.sdthr = self.threshold # for starters
self.analysis_window = analysis_window
self.lpf = lpf
self.hpf = hpf
self.notch = notch
self.reset_filtering()
def set_sign(self, sign: int = 1):
self.sign = sign
def set_dt_seconds(self, dt_seconds:Union[None, float] = None):
self.dt_seconds = dt_seconds
def set_risepower(self, risepower: float = 4):
if risepower > 0 and risepower <= 8:
self.risepower = risepower
else:
raise ValueError("Risepower must be 0 < n <= 8")
# def set_notch(self, notches):
# if isinstance(nothce, float):
# notches = [notches]
# elif isinstance(notches, None):
# self.notch = None
# self.Notch_applied = False
# return
# elif isinstance(notches, list):
# self.notch = notches
# else:
# raise ValueError("set_notch: Notch must be list, float or None")
def _make_template(self):
"""
Private function: make template when it is needed
"""
tau_1, tau_2 = self.taus # use the predefined taus
t_psc = np.arange(0, self.template_tmax, self.dt_seconds)
self.t_template = t_psc
Aprime = (tau_2 / tau_1) ** (tau_1 / (tau_1 - tau_2))
self.template = np.zeros_like(t_psc)
tm = (
1.0
/ Aprime
* (
(1 - (np.exp(-t_psc / tau_1))) ** self.risepower
* np.exp((-t_psc / tau_2))
)
)
# tm = 1./2. * (np.exp(-t_psc/tau_1) - np.exp(-t_psc/tau_2))
if self.idelay > 0:
self.template[self.idelay :] = tm[: -self.idelay] # shift the template
else:
self.template = tm
if self.sign > 0:
self.template_amax = np.max(self.template)
else:
self.template = -self.template
self.template_amax = np.min(self.template)
def reset_filtering(self):
self.filtering.LPF_applied = False
self.filtering.HPF_applied = False
self.filtering.Notch_applied = False
def LPFData(
self, data: np.ndarray, lpf: Union[float, None] = None, NPole: int = 8
) -> np.ndarray:
assert (not self.filtering.LPF_applied) # block repeated application of filtering
cprint('y', f"minis_methods_common, LPF data: {lpf:f}")
# old_data = data.copy()
if lpf is not None :
# cprint('y', f" ... lpf at {lpf:f}")
if lpf > 0.49 / self.dt_seconds:
raise ValueError(
"lpf > Nyquist: ", lpf, 0.49 / self.dt_seconds, self.dt_seconds, 1.0 / self.dt_seconds
)
data = dfilt.SignalFilter_LPFButter(data, lpf, 1./self.dt_seconds, NPole=8)
self.filtering.LPF = lpf
self.filtering.LPF_applied = True
# import matplotlib.pyplot as mpl
# print(old_data.shape[0]*self.dt_seconds)
# tb = np.arange(0, old_data.shape[0]*self.dt_seconds, self.dt_seconds)
# print(tb.shape)
# mpl.plot(tb, old_data, 'b-')
# mpl.plot(tb, data, 'k-')
# mpl.show()
# exit()
return data
def HPFData(self, data:np.ndarray, hpf: Union[float, None] = None, NPole: int = 8) -> np.ndarray:
assert (not self.filtering.HPF_applied) # block repeated application of filtering
if hpf is None or hpf == 0.0 :
return data
if len(data.shape) == 1:
ndata = data.shape[0]
else:
ndata = data.shape[1]
nyqf = 0.5 * ndata * self.dt_seconds
# cprint('y', f"minis_methods: hpf at {hpf:f}")
if hpf < 1.0 / nyqf: # duration of a trace
raise ValueError(
"hpf < Nyquist: ",
hpf,
"nyquist",
1.0 / nyqf,
"ndata",
ndata,
"dt in seconds",
self.dt_seconds,
"sampelrate",
1.0 / self.dt,
)
data = dfilt.SignalFilter_HPFButter(data-data[0], hpf, 1.0 / self.dt_seconds, NPole=4)
self.filtering.HPF = hpf
self.filtering.HPF_applied = True
return data
# def NotchData(self, data:np.ndarray, notch: Union[list, None] = None) -> np.ndarray:
# assert (not self.filtering.notch_applied) # block repeated application of filtering
# if notch is None or len(notch) == 0 :
# return data
# if len(data.shape) == 1:
# ndata = data.shape[0]
# else:
# ndata = data.shape[1]
#
# data[i] = dfilt.NotchFilter(
# data[i]-data[0],
# notch,
# Q=20.0,
# samplefreq=1.0 / self.dt_seconds,
# )
# self.filtering.notch = notch
# self.filtering.Notch_applied = True
#
# return data
def prepare_data(self, data):
"""
This function prepares the incoming data for the mini analyses.
1. Clip the data in time (remove sections with current or voltage steps)
2. Filter the data (LPF, HPF)
"""
# cprint('r', 'Prepare data')
self.timebase = np.arange(0.0, data.shape[0] * self.dt_seconds, self.dt_seconds)
if self.analysis_window[1] is not None:
jmax = np.argmin(np.fabs(self.timebase - self.analysis_window[1]))
else:
jmax = len(self.timebase)
if self.analysis_window[0] is not None:
jmin = np.argmin(np.fabs(self.timebase) - self.analysis_window[0])
else:
jmin = 0
data = data[jmin:jmax]
if self.verbose:
if self.lpf is not None:
cprint('y', f"minis_methods_common, prepare_data: LPF: {self.lpf:.1f} Hz")
else:
cprint('r', f"minis_methods_common, no LPF applied")
if self.hpf is not None:
cprint('y', f"minis_methods_common, prepare_data: HPF: {self.hpf:.1f} Hz")
else:
cprint('r', f"minis_methods_common, no HPF applied")
if isinstance(self.lpf, float):
data = self.LPFData(data, lpf=self.lpf)
if isinstance(self.hpf, float):
data = self.HPFData(data, hpf=self.hpf)
# if isinstance(self.notch, list):
# data = self.HPFData(data, notch=self.notch)
self.data = data
self.timebase = self.timebase[jmin:jmax]
def moving_average(self, a, n: int = 3) -> (np.array, int):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
# return ret[n - 1 :] / n, n
return ret[int(n/2):] / n, n # re-align array
def remove_outliers(self, x:np.ndarray, scale:float=3.0) -> np.ndarray:
a = np.array(x)
upper_quartile = np.percentile(a, 75)
lower_quartile = np.percentile(a, 25)
IQR = (upper_quartile - lower_quartile) * scale
quartileSet = (lower_quartile - IQR, upper_quartile + IQR)
result = np.where(((a >= quartileSet[0]) & (a <= quartileSet[1])), a, np.nan)
# import matplotlib.pyplot as mpl
# mpl.plot(x)
# mpl.plot(result)
# mpl.show()
return result
def summarize(self, data, order: int = 11, verbose: bool = False) -> None:
"""
compute intervals, peaks and ampitudes for all found events in a
trace or a group of traces
filter out events that are less than min_event_amplitude
"""
i_decay_pts = int(2.0 * self.taus[1] / self.dt_seconds) # decay window time (points) Units all seconds
assert i_decay_pts > 5
self.Summary = Summaries() # a single summary class is created
ndata = len(data)
# set up arrays : note construction to avoid "same memory but different index" problem
self.Summary.onsets = [[] for x in range(ndata)]
self.Summary.peaks = [[] for x in range(ndata)]
self.Summary.smoothed_peaks = [[] for x in range(ndata)]
self.Summary.smpkindex = [[] for x in range(ndata)]
self.Summary.amplitudes = [[] for x in range(ndata)]
self.Summary.filtered_traces = [[] for x in range(ndata)]
avgwin = (
5 # int(1.0/self.dt_seconds) # 5 point moving average window for peak detection
)
mwin = int((0.50) / self.dt_seconds)
if self.sign > 0:
nparg = np.greater
else:
nparg = np.less
self.intervals = []
self.timebase = np.arange(0., data.shape[1]*self.dt_seconds, self.dt_seconds)
nrejected_too_small = 0
for itrial, dataset in enumerate(data): # each trial/trace
if len(self.onsets[itrial]) == 0: # original events
continue
# cprint('c', f"Onsets found: {len(self.onsets[itrial]):d} in trial {itrial:d}")
acceptlist_trial = []
self.intervals.append(np.diff(self.timebase[self.onsets[itrial]])) # event intervals
# cprint('y', f"Summarize: trial: {itrial:d} onsets: {len(self.onsets[itrial]):d}")
# print('onsets: ', self.onsets[itrial])
ev_accept = []
for j, onset in enumerate(self.onsets[itrial]): # for all of the events in this trace
if self.sign > 0 and self.eventstartthr is not None:
if dataset[onset] < self.eventstartthr:
# print('pos sign: data onset < eventstartthr')
continue
if self.sign < 0 and self.eventstartthr is not None:
if dataset[onset] > -self.eventstartthr:
# print('neg sign: data onset > eventstartthr')
continue
event_data = dataset[onset : (onset + mwin)] # get this event
# print('onset, mwin: ', onset, mwin)
svwinlen = event_data.shape[0]
if svwinlen > 11:
svn = 11
else:
svn = svwinlen
if (
svn % 2 == 0
): # if even, decrease by 1 point to meet ood requirement for savgol_filter
svn -= 1
if svn > 3: # go ahead and filter
p = scipy.signal.argrelextrema(
scipy.signal.savgol_filter(
event_data, svn, 2
),
nparg,
order=order,
)[0]
else: # skip filtering
p = scipy.signal.argrelextrema(
event_data,
nparg,
order=order,
)[0]
# print('len(p): ', len(p), svn, event_data)
if len(p) > 0:
# print('p, idecay onset: ', len(p), i_decay_pts, onset)
i_end = i_decay_pts + onset # distance from peak to end
i_end = min(dataset.shape[0], i_end) # keep within the array limits
if j < len(self.onsets[itrial]) - 1:
if i_end > self.onsets[itrial][j + 1]:
i_end = (
self.onsets[itrial][j + 1] - 1
) # only go to next event start
windowed_data = dataset[onset : i_end]
# print('onset, iend: ', onset, i_end)
# import matplotlib.pyplot as mpl
# fx, axx = mpl.subplots(1,1)
# axx.plot(self.timebase[onset:i_end], dataset[onset:i_end], 'g-')
# mpl.show()
move_avg, n = self.moving_average(
windowed_data,
n=min(avgwin, len(windowed_data)),
)
# print('moveavg: ', move_avg)
# print(avgwin, len(windowed_data))
# print('windowed_data: ', windowed_data)
if self.sign > 0:
smpk = np.argmax(move_avg) # find peak of smoothed data
rawpk = np.argmax(windowed_data) # non-smoothed
else:
smpk = np.argmin(move_avg)
rawpk = np.argmin(windowed_data)
if self.sign*(move_avg[smpk] - windowed_data[0]) < self.min_event_amplitude:
nrejected_too_small += 1
# print(f"Event too small: {1e12*self.sign*(move_avg[smpk] - windowed_data[0]):6.1f} vs. thresj: {1e12*self.min_event_amplitude:6.1f} pA")
continue # filter out events smaller than the amplitude
else:
# print('accept: ', j)
ev_accept.append(j)
# cprint('m', f"Extending for trial: {itrial:d}, {len(self.Summary.onsets[itrial]):d}, onset={onset}")
self.Summary.onsets[itrial].append(onset)
self.Summary.peaks[itrial].append(onset + rawpk)
self.Summary.amplitudes[itrial].append(windowed_data[rawpk])
self.Summary.smpkindex[itrial].append(onset + smpk)
self.Summary.smoothed_peaks[itrial].append(move_avg[smpk])
acceptlist_trial.append(j)
self.onsets[itrial] = self.onsets[itrial][ev_accept] # reduce to the accepted values only
# self.Summary.smoothed_peaks = np.array(self.Summary.smoothed_peaks)
# self.Summary.amplitudes = np.array(self.Summary.amplitudes)
print(f"Rejected {nrejected_too_small:6d} events (threshold = {1e12*self.min_event_amplitude:6.1f} pA)")
self.average_events(
data,
)
# print(self.Summary.average.avgevent)
if self.Summary.average.averaged:
self.fit_average_event(
tb=self.Summary.average.avgeventtb,
avgevent=self.Summary.average.avgevent,
initdelay=0.,
debug=False)
else:
if verbose:
print("No events found")
return
def measure_events(self, data:object, eventlist: list) -> dict:
# compute simple measurements of events (area, amplitude, half-width)
#
# cprint('r', 'MEASURE EVENTS')
assert data.ndim == 1
self.measured = False
# treat like averaging
tdur = np.max((np.max(self.taus) * 5.0, 0.010)) # go 5 taus or 10 ms past event
tpre = 0.0 # self.taus[0]*10.
self.avgeventdur = tdur
self.tpre = tpre
self.avgnpts = int((tpre + tdur) / self.dt_seconds) # points for the average
npre = int(tpre / self.dt_seconds) # points for the pre time
npost = int(tdur / self.dt_seconds)
avg = np.zeros(self.avgnpts)
avgeventtb = np.arange(self.avgnpts) * self.dt_seconds
# assert True == False
allevents = np.zeros((len(eventlist), self.avgnpts))
k = 0
pkt = 0 # np.argmax(self.template) # accumulate
meas = {"Q": [], "A": [], "HWup": [], "HWdown": [], "HW": []}
for j, i in enumerate(eventlist):
ix = i + pkt # self.idelay
if (ix + npost) < len(self.data) and (ix - npre) >= 0:
allevents[k, :] = data[ix - npre : ix + npost]
k = k + 1
if k > 0:
allevents = allevents[0:k, :] # trim unused
for j in range(k):
ev_j = scipy.signal.savgol_filter(
self.sign * allevents[j, :], 7, 2, mode="nearest"
) # flip sign if negative
ai = np.argmax(ev_j)
if ai == 0:
continue # skip events where max is first point
q = np.sum(ev_j) * tdur
meas["Q"].append(q)
meas["A"].append(ev_j[ai])
hw_up = self.dt_seconds * np.argmin(np.fabs((ev_j[ai] / 2.0) - ev_j[:ai]))
hw_down = self.dt_seconds * np.argmin(np.fabs(ev_j[ai:] - (ev_j[ai] / 2.0)))
meas["HWup"].append(hw_up)
meas["HWdown"].append(hw_down)
meas["HW"].append(hw_up + hw_down)
self.measured = True
self.Summary.allevents = allevents
else:
self.measured = False
self.Summary.allevents = None
return meas
def average_events(self, data: np.ndarray) -> tuple:
"""
compute average event with length of template
Parameters
----------
eventlist : list
List of event onset indices into the arrays
Expect a 2-d list (traces x onsets)
"""
# cprint('r', 'AVERAGE EVENTS')
self.Summary.average.averaged = False
tdur = np.max((np.max(self.taus) * 5.0, 0.010)) # go 5 taus or 10 ms past event
tpre = 1e-3 # self.taus[0]*10.
avgeventdur = tdur
self.tpre = tpre
avgnpts = int((tpre + tdur) / self.dt_seconds) # points for the average
npre = int(tpre / self.dt_seconds) # points for the pre time
npost = int(tdur / self.dt_seconds)
print('npre, npost avgnpts: ', npre, npost, avgnpts)
avg = np.zeros(avgnpts)
avgeventtb = np.arange(avgnpts) * self.dt_seconds
n_events = sum([len(events) for events in self.Summary.onsets])
allevents = np.zeros((n_events, avgnpts))
event_trace = [[]]*n_events
k = 0
pkt = 0
n_incomplete_events = 0
for itrace, onsets in enumerate(self.Summary.onsets):
# cprint('c', f"Trace: {itrace: d}, # onsets: {len(onsets):d}")
for j, event_onset in enumerate(onsets):
ix = event_onset + pkt # self.idelay
# print('itrace, ix, npre, npost: ', itrace, ix, npre, npost, data[itrace].shape[0])
if (ix + npost) < data[itrace].shape[0] and (ix - npre) >= 0:
allevents[k, :] = data[itrace, (ix - npre) : (ix + npost)]
allevents[k, :] -= np.mean(allevents[k, 0:npre])
else:
allevents[k, :] = np.nan*allevents[k,:]
n_incomplete_events += 1
event_trace[k] = [itrace, j]
k = k + 1
if n_incomplete_events > 0:
cprint("y", f"{n_incomplete_events:d} were excluded because they were incomplete (too close to end of trace)")
# tr_incl = [u[0] for u in event_trace]
# print(set(tr_incl), len(set(tr_incl)), len(event_trace))
# exit()
# print('k: ', k)
if k > 0:
self.Summary.average.averaged = True
self.Summary.average.avgnpts = avgnpts
self.Summary.average.Nevents = k
self.Summary.allevents = allevents
self.Summary.average.avgeventtb = avgeventtb
avgevent = np.nanmean(allevents, axis=0)
# print(allevents)
# import matplotlib.pyplot as mpl
# f, ax = mpl.subplots(1,1)
# ax.plot(allevents.T, 'k', alpha=0.3)
# ax.plot(avgevent, 'r', linewidth=3)
# mpl.show()
# print(avgevent)
# exit(1)
self.Summary.average.avgevent = avgevent# - np.mean(avgevent[:3])
self.Summary.event_trace_list = event_trace
return
else:
self.Summary.average.avgnpts = 0
self.Summary.average.avgevent = []
self.Summary.average.allevents = []
self.Summary.average.avgeventtb = []
self.Summary.average.averaged = False
self.Summary.event_trace_list = []
return
def average_events_subset(self, data: np.ndarray, eventlist:list) -> tuple:
"""
compute average event with length of template
Parameters
----------
data:
1-d numpy array of the data
eventlist : list
List of event onset indices into the arrays
Expect a 1-d list (traces x onsets)
"""
assert data.ndim == 1
# cprint('r', 'AVERAGE EVENTS')
tdur = np.max((np.max(self.taus) * 5.0, 0.010)) # go 5 taus or 10 ms past event
tpre = 0.0 # self.taus[0]*10.
avgeventdur = tdur
self.tpre = tpre
avgnpts = int((tpre + tdur) / self.dt_seconds) # points for the average
npre = int(tpre / self.dt_seconds) # points for the pre time
npost = int(tdur / self.dt_seconds)
avg = np.zeros(avgnpts)
avgeventtb = np.arange(avgnpts) * self.dt_seconds
n_events = sum([len(events) for events in self.Summary.onsets])
allevents = np.zeros((n_events, avgnpts))
event_trace = [None]*n_events
k = 0
pkt = 0
for itrace, event_onset in enumerate(eventlist):
# cprint('c', f"Trace: {itrace: d}, # onsets: {len(onsets):d}")
ix = event_onset + pkt # self.idelay
# print('itrace, ix, npre, npost: ', itrace, ix, npre, npost)
if (ix + npost) < data.shape[0] and (ix - npre) >= 0:
allevents[k, :] = data[(ix - npre) : (ix + npost)]
k = k + 1
return np.mean(allevents, axis=0), avgeventtb, allevents
def doubleexp(
self,
p: list,
x: np.ndarray,
y: Union[None, np.ndarray],
risepower: float,
fixed_delay: float = 0.0,
mode: int = 0,
) -> np.ndarray:
"""
Calculate a double expoential EPSC-like waveform with the rise to a power
to make it sigmoidal
"""
# fixed_delay = p[3] # allow to adjust; ignore input value
ix = np.argmin(np.fabs(x - fixed_delay))
tm = np.zeros_like(x)
tm[ix:] = p[0] * (1.0 - np.exp(-(x[ix:] - fixed_delay) / p[1])) ** risepower
tm[ix:] *= np.exp(-(x[ix:] - fixed_delay) / p[2])
if mode == 0:
return tm - y
elif mode == 1:
return np.linalg.norm(tm - y)
elif mode == -1:
return tm
else:
raise ValueError(
"doubleexp: Mode must be 0 (diff), 1 (linalg.norm) or -1 (just value)"
)
def risefit(
self,
p: list,
x: np.ndarray,
y: Union[None, np.ndarray],
risepower: float,
mode: int = 0,
) -> np.ndarray:
"""
Calculate a delayed EPSC-like waveform rise shape with the rise to a power
to make it sigmoidal, and an adjustable delay
input data should only be the rising phase.
p is in order: [amplitude, tau, delay]
"""
assert mode in [-1, 0, 1]
# if np.isnan(p[0]):
# try:
# x = 1./p[0]
# except Exception as e:
# track = traceback.format_exc()
# print(track)
# exit(0)
# # assert not np.isnan(p[0])
ix = np.argmin(np.fabs(x - p[2]))
tm = np.zeros_like(x)
expf = (x[ix:] - p[2]) / p[1]
pclip = 1.0e3
nclip = 0.0
try:
expf[expf > pclip] = pclip
expf[expf < -nclip] = -nclip
except:
print(pclip, nclip)
print(expf)
exit(1)
tm[ix:] = p[0] * (1.0 - np.exp(-expf)) ** risepower
if mode == 0:
return tm - y
elif mode == 1:
return np.linalg.norm(tm - y)
elif mode == -1:
return tm
else:
raise ValueError(
"doubleexp: Mode must be 0 (diff), 1 (linalg.norm) or -1 (just value)"
)
def decayexp(
self,
p: list,
x: np.ndarray,
y: Union[None, np.ndarray],
fixed_delay: float = 0.0,
mode: int = 0,
):
"""
Calculate an exponential decay (falling phase fit)
"""
tm = p[0] * np.exp(-(x - fixed_delay) / p[1])
if mode == 0:
return tm - y
elif mode == 1:
return np.linalg.norm(tm - y)
elif mode == -1:
return tm
else:
raise ValueError(
"doubleexp: Mode must be 0 (diff), 1 (linalg.norm) or -1 (just value)"
)
def fit_average_event(
self,
tb,
avgevent,
debug: bool = False,
label: str = "",
inittaus: List = [0.001, 0.005],
initdelay: Union[float, None] = None,
) -> None:
"""
Fit the averaged event to a double exponential epsc-like function
Operates on the AverageEvent data structure
"""
# tsel = np.argwhere(self.avgeventtb > self.tpre)[0] # only fit data in event, not baseline
tsel = 0 # use whole averaged trace
self.tsel = tsel
self.tau1 = inittaus[0]
self.tau2 = inittaus[1]
self.tau2_range = 10.0
self.tau1_minimum_factor = 5.0
time_past_peak = 2.5e-4
self.fitted_tau1 = np.nan
self.fitted_tau2 = np.nan
self.Amplitude = np.nan
# peak_pos = np.argmax(self.sign*self.avgevent[self.tsel:])
# decay_fit_start = peak_pos + int(time_past_peak/self.dt_seconds)
# init_vals = [self.sign*10., 1.0, 4., 0.]
# init_vals_exp = [20., 5.0]
# bounds_exp = [(0., 0.5), (10000., 50.)]
cprint('m', 'Fitting average event')
res, rdelay = self.event_fitter(
tb,
avgevent,
time_past_peak=time_past_peak,
initdelay=initdelay,
debug=debug,
label=label,
)
# print('rdelay: ', rdelay)
if res is None:
cprint('r', 'average fit result is None')
self.fitted = False
return
self.fitresult = res.x
self.Amplitude = self.fitresult[0]
self.fitted_tau1 = self.fitresult[1]
self.fitted_tau2 = self.fitresult[2]
self.bfdelay = rdelay
self.avg_best_fit = self.doubleexp(
self.fitresult,
tb[self.tsel :],
np.zeros_like(tb[self.tsel :]),
risepower=self.risepower,
mode=0,
fixed_delay=self.bfdelay,
)
self.avg_best_fit = self.sign * self.avg_best_fit
fiterr = np.linalg.norm(self.avg_best_fit -
avgevent[self.tsel :])
self.avg_fiterr = fiterr
ave = self.sign * avgevent
ipk = np.argmax(ave)
pk = ave[ipk]
p10 = 0.1 * pk
p90 = 0.9 * pk
p37 = 0.37 * pk
try:
i10 = np.argmin(np.fabs(ave[:ipk] - p10))
except:
self.fitted = False
return
i90 = np.argmin(np.fabs(ave[:ipk] - p90))
i37 = np.argmin(np.fabs(ave[ipk:] - p37))
self.risetenninety = self.dt_seconds * (i90 - i10)
self.decaythirtyseven = self.dt_seconds * (i37 - ipk)
self.Qtotal = self.dt_seconds * np.sum(avgevent[self.tsel :])
self.fitted = True
def fit_individual_events(self, onsets: np.ndarray) -> None:
"""
Fitting individual events
Events to be fit are selected from the entire event pool as:
1. events that are completely within the trace, AND
2. events that do not overlap other events
Fit events are further classified according to the fit error
"""
if (
not self.averaged or not self.fitted
): # averaging should be done first: stores events for convenience and gives some tau estimates
print("Require fit of averaged events prior to fitting individual events")
raise (ValueError)
time_past_peak = 0.75 # msec - time after peak to start fitting
# allocate arrays for results. Arrays have space for ALL events
# okevents, notok, and evok are indices
nevents = len(self.Summary.allevents) # onsets.shape[0]
self.ev_fitamp = np.zeros(nevents) # measured peak amplitude from the fit
self.ev_A_fitamp = np.zeros(
nevents
) # fit amplitude - raw value can be quite different than true amplitude.....
self.ev_tau1 = np.zeros(nevents)
self.ev_tau2 = np.zeros(nevents)
self.ev_1090 = np.zeros(nevents)
self.ev_2080 = np.zeros(nevents)
self.ev_amp = np.zeros(nevents) # measured peak amplitude from the event itself
self.ev_Qtotal = np.zeros(
nevents
) # measured charge of the event (integral of current * dt)
self.fiterr = np.zeros(nevents)
self.bfdelay = np.zeros(nevents)
self.best_fit = np.zeros((nevents, self.avgeventtb.shape[0]))
self.best_decay_fit = np.zeros((nevents, self.avgeventtb.shape[0]))
self.tsel = 0
self.tau2_range = 10.0
self.tau1_minimum_factor = 5.0
# prescreen events
minint = self.avgeventdur # msec minimum interval between events.
self.fitted_events = (
[]
) # events that can be used (may not be all events, but these are the events that were fit)
for i in range(nevents):
te = self.timebase[onsets[i]] # get current event
try:
tn = self.timebase[onsets[i + 1]] # check time to next event
if tn - te < minint: # event is followed by too soon by another event
continue
except:
pass # just handle trace end condition
try:
tp = self.timebase[onsets[i - 1]] # check previous event
if (
te - tp < minint
): # if current event too close to a previous event, skip
continue
self.fitted_events.append(i) # passes test, include in ok events
except:
pass
for n, i in enumerate(self.fitted_events):
try:
max_event = np.max(self.sign * self.Summary.allevents[i, :])
except:
print("minis_methods eventfitter")
print("fitted: ", self.fitted_events)
print("i: ", i)
print("allev: ", self.Summary.allevents)
print("len allev: ", len(self.Summary.allevents), len(onsets))
raise ValueError('Fit failed)')
res, rdelay = self.event_fitter(
self.avgeventtb, self.Summmary.allevents[i, :], time_past_peak=time_past_peak
)
if res is None: # skip events that won't fit
continue
self.fitresult = res.x
# lmfit version - fails for odd reason
# dexpmodel = Model(self.doubleexp)
# params = dexpmodel.make_params(A=-10., tau_1=0.5, tau_2=4.0, dc=0.)
# self.fitresult = dexpmodel.fit(self.avgevent[tsel:], params, x=self.avgeventtb[tsel:])
self.ev_A_fitamp[i] = self.fitresult[0]
self.ev_tau1[i] = self.fitresult[1]
self.ev_tau2[i] = self.fitresult[2]
self.bfdelay[i] = rdelay
self.fiterr[i] = self.doubleexp(
self.fitresult,
self.avgeventtb,
self.sign * self.Summary.allevents[i, :],
risepower=self.risepower,
fixed_delay=self.bfdelay[i],
mode=1,
)
self.best_fit[i] = self.doubleexp(
self.fitresult,
self.avgeventtb,
| np.zeros_like(self.avgeventtb) | numpy.zeros_like |
"""
Outlier outlier_plotting in seaborn.
"""
import warnings
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
# noinspection PyProtectedMember
from seaborn.categorical import _CategoricalPlotter
import pandas as pd
from typing import Union, List, Tuple
def _plot_outliers(ax, outliers, plot_extents: np.ndarray, orient: str = 'v',
group: int = 0, padding: float = .05, outlier_hues: List = None, fmt: str = '.2g'):
def _vals_to_str(vals, val_categories):
def _format_val(val):
return ("{:" + fmt + "}").format(val)
if val_categories is None:
vals = sorted(vals, reverse=True)
return '\n'.join([_format_val(val) for val in vals])
texts = []
df = pd.DataFrame({'val': vals, 'cat': val_categories})
for cat in sorted(df.cat.unique()):
cat_vals = df[df.cat == cat].val
texts.append(str(cat) + ':\t' + '\n\t'.join([_format_val(val) for val in cat_vals]))
return '\n'.join(texts).expandtabs()
def _add_margin(lo: float, hi: float, mrg: float = .1, rng: Union[None, float] = None):
rng = hi - lo if rng is None else rng
return lo - mrg * rng, hi + mrg * rng
def _set_limits(t: plt.text):
def _get_bbox_pos():
plt.gcf().canvas.draw()
return t.get_window_extent().inverse_transformed(plt.gca().transData)
old_extents = ax.get_ylim() if is_v else ax.get_xlim()
val_coords = np.array(_get_bbox_pos()).T[dim_sel]
new_extents = [np.min([val_coords, old_extents]), np.max([val_coords, old_extents])]
lim_setter = ax.set_ylim if is_v else ax.set_xlim
# if new extents are more than .5 times larger as old extents with padding, we assume _get_bbox_pos failed.
if (np.diff(new_extents) / np.diff(old_extents)) > 1 + padding + .5:
warnings.warn('Determining text position failed, cannot set new plot extent. '
'Please modify margin if any text is cut off.'.format(padding))
new_extents = old_extents
lim_setter(new_extents)
def _plot_text(is_low: bool):
is_relevant = outliers < vmin if is_low else outliers > vmax
points = outliers[is_relevant]
point_hues = None if outlier_hues is None else outlier_hues[is_relevant]
if not len(points) > 0:
return
val_pos = _add_margin(vmin, vmax, padding)[0 if is_low else 1]
props = dict(boxstyle='round', facecolor='wheat', alpha=0.1)
text = _vals_to_str(points, point_hues)
if is_v:
t = ax.text(group, val_pos, text, ha='center', multialignment='right',
va='top' if is_low else 'bottom', bbox=props)
else:
t = ax.text(val_pos, group, text, va='center', multialignment='right',
ha='right' if is_low else 'left', bbox=props)
_set_limits(t)
is_v = orient == 'v'
dim_sel = 1 if is_v else 0
vmin, vmax = plot_extents[dim_sel]
_plot_text(True), _plot_text(False)
def _add_margins(ax: plt.Axes, plot_data: np.ndarray, cutoff_lo: float, cutoff_hi: float, orient: str, margin: float):
def _quantized_abs_ceil(x, q=0.5):
return np.ceil(np.abs(x) / q) * q * np.sign(x)
if orient == 'v':
old_extents, lim_setter = ax.get_ylim(), ax.set_ylim
ax.set_xlim(*list(map(_quantized_abs_ceil, ax.get_xlim())))
else:
old_extents, lim_setter = ax.get_xlim(), ax.set_xlim
ax.set_ylim(*list(map(_quantized_abs_ceil, ax.get_ylim())))
if np.min(plot_data) < cutoff_lo:
lim_setter([old_extents[0] - margin * np.diff(old_extents), None])
if np.max(plot_data) > cutoff_hi:
lim_setter([None, old_extents[1] + margin * np.diff(old_extents)])
def _get_inlier_data(data: pd.Series, plot_data, cutoff_lo: float, cutoff_hi: float) -> \
Union[pd.Series, pd.DataFrame]:
inlier_data = data[np.logical_and(cutoff_lo <= plot_data, plot_data <= cutoff_hi)]
if len(inlier_data) == 0:
raise UserWarning('No inliers in data, please modify inlier_range!')
return inlier_data.reset_index(drop=True)
def handle_outliers(data: Union[pd.DataFrame, pd.Series, np.ndarray, None] = None,
x: Union[pd.Series, np.ndarray, str, None] = None,
y: Union[pd.Series, np.ndarray, str, None] = None,
hue: Union[pd.Series, np.ndarray, str, None] = None, plotter: callable = sns.swarmplot,
inlier_range: float = 1.5, padding: float = .05,
margin: float = .1, fmt='.2g', **kwargs) -> plt.axes:
"""
Remove outliers from the plot and show them as text boxes. Works well with `sns.violinplot`, `sns.swarmplot`,
`sns.boxplot` and the like. Does *not* work with axis grids.
data: pd.DataFrame
Dataset for outlier_plotting. Expected to be long-form.
x: str
names of x variables in data
y: str
names of y variables in data
hue: str
names of hue variables in data. Not fully supported.
plotter: callable
`seaborn` outlier_plotting function that works with long-form data.
inlier_range: float
Proportion of the IQR past the low and high quartiles to extend the original plot. Points outside this range
will be identified as outliers.
padding: float
Padding in % of figure size between original plot and text boxes.
margin: float
Margin in % of figure size between text boxes and axis extent.
fmt: str
String formatting code to use when adding annotations.
kwargs: key, value mappings
Other keyword arguments are passed through to the plotter.
Returns
-------
ax: matplotlib Axes
The Axes object with the plot drawn onto it.
"""
def _get_info() -> Tuple[Union[str, None], Union[str, None], List, str, Union[str, None]]:
cp: _CategoricalPlotter = _CategoricalPlotter()
cp.establish_variables(x=x, y=y, data=data, hue=hue)
orientation = 'h' if plotter == sns.kdeplot else cp.orient
return cp.value_label, cp.group_label, cp.group_names, orientation, cp.hue_title
def _get_plot_data() -> np.ndarray:
if data is not None:
return data[value_label].values if value_label is not None else np.array(data)
ret = kwargs[_which_data_var()]
return ret.values if type(ret) == pd.Series else ret
def _which_data_var() -> str:
if data is not None:
return 'data'
else:
if x is not None and y is not None:
return 'y' if orient == 'v' else 'x'
return 'y' if x is None else 'x'
def _which_group_var() -> str:
if _which_data_var() == 'data':
return 'data'
return 'x' if _which_data_var() == 'y' else 'y'
def _get_cutoffs(a: np.array, quantiles=(.25, .75)) -> Tuple[float, float]:
quartiles = np.quantile(a, list(quantiles))
iqr = | np.diff(quartiles) | numpy.diff |
"""
Base module for the DMD: `fit` method must be implemented in inherited classes
"""
from __future__ import division
from builtins import object
from builtins import range
from os.path import splitext
import warnings
import pickle
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from past.utils import old_div
from .dmdoperator import DMDOperator
mpl.rcParams["figure.max_open_warning"] = 0
class DMDBase(object):
"""
Dynamic Mode Decomposition base class.
:param svd_rank: the rank for the truncation; If 0, the method computes the
optimal rank and uses it for truncation; if positive interger, the
method uses the argument for the truncation; if float between 0 and 1,
the rank is the number of the biggest singular values that are needed
to reach the 'energy' specified by `svd_rank`; if -1, the method does
not compute truncation.
:type svd_rank: int or float
:param int tlsq_rank: rank truncation computing Total Least Square. Default
is 0, that means no truncation.
:param bool exact: flag to compute either exact DMD or projected DMD.
Default is False.
:param opt: If True, amplitudes are computed like in optimized DMD (see
:func:`~dmdbase.DMDBase._compute_amplitudes` for reference). If
False, amplitudes are computed following the standard algorithm. If
`opt` is an integer, it is used as the (temporal) index of the snapshot
used to compute DMD modes amplitudes (following the standard
algorithm).
The reconstruction will generally be better in time instants near the
chosen snapshot; however increasing `opt` may lead to wrong results
when the system presents small eigenvalues. For this reason a manual
selection of the number of eigenvalues considered for the analyisis may
be needed (check `svd_rank`). Also setting `svd_rank` to a value
between 0 and 1 may give better results. Default is False.
:type opt: bool or int
:param rescale_mode: Scale Atilde as shown in
10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its
eigendecomposition. None means no rescaling, 'auto' means automatic
rescaling using singular values, otherwise the scaling factors.
:type rescale_mode: {'auto'} or None or numpy.ndarray
:param bool forward_backward: If True, the low-rank operator is computed
like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is
False.
:param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by
magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary
part to break ties) if `sorted_eigs='real'`. Default: False.
:type sorted_eigs: {'real', 'abs'} or False
:cvar dict original_time: dictionary that contains information about the
time window where the system is sampled:
- `t0` is the time of the first input snapshot;
- `tend` is the time of the last input snapshot;
- `dt` is the delta time between the snapshots.
:cvar dict dmd_time: dictionary that contains information about the time
window where the system is reconstructed:
- `t0` is the time of the first approximated solution;
- `tend` is the time of the last approximated solution;
- `dt` is the delta time between the approximated solutions.
"""
def __init__(
self,
svd_rank=0,
tlsq_rank=0,
exact=False,
opt=False,
rescale_mode=None,
forward_backward=False,
sorted_eigs=False,
):
self._Atilde = DMDOperator(
svd_rank=svd_rank,
exact=exact,
rescale_mode=rescale_mode,
forward_backward=forward_backward,
sorted_eigs=sorted_eigs,
)
self._tlsq_rank = tlsq_rank
self.original_time = None
self.dmd_time = None
self._opt = opt
self._b = None # amplitudes
self._snapshots = None
self._snapshots_shape = None
@property
def opt(self):
return self._opt
@property
def tlsq_rank(self):
return self._tlsq_rank
@property
def svd_rank(self):
return self.operator._svd_rank
@property
def rescale_mode(self):
return self.operator._rescale_mode
@property
def exact(self):
return self.operator._exact
@property
def forward_backward(self):
return self.operator._forward_backward
@property
def dmd_timesteps(self):
"""
Get the timesteps of the reconstructed states.
:return: the time intervals of the original snapshots.
:rtype: numpy.ndarray
"""
return np.arange(
self.dmd_time["t0"],
self.dmd_time["tend"] + self.dmd_time["dt"],
self.dmd_time["dt"],
)
@property
def original_timesteps(self):
"""
Get the timesteps of the original snapshot.
:return: the time intervals of the original snapshots.
:rtype: numpy.ndarray
"""
return np.arange(
self.original_time["t0"],
self.original_time["tend"] + self.original_time["dt"],
self.original_time["dt"],
)
@property
def modes(self):
"""
Get the matrix containing the DMD modes, stored by column.
:return: the matrix containing the DMD modes.
:rtype: numpy.ndarray
"""
return self.operator.modes
@property
def atilde(self):
"""
Get the reduced Koopman operator A, called A tilde.
:return: the reduced Koopman operator A.
:rtype: numpy.ndarray
"""
return self.operator.as_numpy_array
@property
def operator(self):
"""
Get the instance of DMDOperator.
:return: the instance of DMDOperator
:rtype: DMDOperator
"""
return self._Atilde
@property
def eigs(self):
"""
Get the eigenvalues of A tilde.
:return: the eigenvalues from the eigendecomposition of `atilde`.
:rtype: numpy.ndarray
"""
return self.operator.eigenvalues
def _translate_eigs_exponent(self, tpow):
"""
Transforms the exponent of the eigenvalues in the dynamics formula
according to the selected value of `self.opt` (check the documentation
for `opt` in :func:`__init__ <dmdbase.DMDBase.__init__>`).
:param tpow: the exponent(s) of Sigma in the original DMD formula.
:type tpow: int or np.ndarray
:return: the exponent(s) adjusted according to `self.opt`
:rtype: int or np.ndarray
"""
if isinstance(self.opt, bool):
amplitudes_snapshot_index = 0
else:
amplitudes_snapshot_index = self.opt
if amplitudes_snapshot_index < 0:
# we take care of negative indexes: -n becomes T - n
return tpow - (self.snapshots.shape[1] + amplitudes_snapshot_index)
else:
return tpow - amplitudes_snapshot_index
@property
def dynamics(self):
"""
Get the time evolution of each mode.
.. math::
\\mathbf{x}(t) \\approx
\\sum_{k=1}^{r} \\boldsymbol{\\phi}_{k} \\exp \\left( \\omega_{k} t
\\right) b_{k} = \\sum_{k=1}^{r} \\boldsymbol{\\phi}_{k} \\left(
\\lambda_{k} \\right)^{\\left( t / \\Delta t \\right)} b_{k}
:return: the matrix that contains all the time evolution, stored by
row.
:rtype: numpy.ndarray
"""
temp = np.repeat(
self.eigs[:, None], self.dmd_timesteps.shape[0], axis=1
)
tpow = old_div(
self.dmd_timesteps - self.original_time["t0"],
self.original_time["dt"],
)
# The new formula is x_(k+j) = \Phi \Lambda^k \Phi^(-1) x_j.
# Since j is fixed, for a given snapshot "u" we have the following
# formula:
# x_u = \Phi \Lambda^{u-j} \Phi^(-1) x_j
# Therefore tpow must be scaled appropriately.
tpow = self._translate_eigs_exponent(tpow)
return np.power(temp, tpow) * self.amplitudes[:, None]
@property
def reconstructed_data(self):
"""
Get the reconstructed data.
:return: the matrix that contains the reconstructed snapshots.
:rtype: numpy.ndarray
"""
return self.modes.dot(self.dynamics)
@property
def snapshots(self):
"""
Get the original input data.
:return: the matrix that contains the original snapshots.
:rtype: numpy.ndarray
"""
return self._snapshots
@property
def frequency(self):
"""
Get the amplitude spectrum.
:return: the array that contains the frequencies of the eigenvalues.
:rtype: numpy.ndarray
"""
return np.log(self.eigs).imag / (2 * np.pi * self.original_time["dt"])
@property
def growth_rate(self): # To check
"""
Get the growth rate values relative to the modes.
:return: the Floquet values
:rtype: numpy.ndarray
"""
return self.eigs.real / self.original_time["dt"]
@property
def amplitudes(self):
"""
Get the coefficients that minimize the error between the original
system and the reconstructed one. For futher information, see
`dmdbase._compute_amplitudes`.
:return: the array that contains the amplitudes coefficient.
:rtype: numpy.ndarray
"""
return self._b
def fit(self, X):
"""
Abstract method to fit the snapshots matrices.
Not implemented, it has to be implemented in subclasses.
"""
raise NotImplementedError(
"Subclass must implement abstract method {}.fit".format(
self.__class__.__name__
)
)
def save(self, fname):
"""
Save the object to `fname` using the pickle module.
:param str fname: the name of file where the reduced order model will
be saved.
Example:
>>> from pydmd import DMD
>>> dmd = DMD(...) # Construct here the rom
>>> dmd.fit(...)
>>> dmd.save('pydmd.dmd')
"""
with open(fname, "wb") as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
@staticmethod
def load(fname):
"""
Load the object from `fname` using the pickle module.
:return: The `ReducedOrderModel` loaded
Example:
>>> from pydmd import DMD
>>> dmd = DMD.load('pydmd.dmd')
>>> print(dmd.reconstructed_data)
"""
with open(fname, "rb") as output:
dmd = pickle.load(output)
return dmd
@staticmethod
def _col_major_2darray(X):
"""
Private method that takes as input the snapshots and stores them into a
2D matrix, by column. If the input data is already formatted as 2D
array, the method saves it, otherwise it also saves the original
snapshots shape and reshapes the snapshots.
:param X: the input snapshots.
:type X: int or numpy.ndarray
:return: the 2D matrix that contains the flatten snapshots, the shape
of original snapshots.
:rtype: numpy.ndarray, tuple
"""
# If the data is already 2D ndarray
if isinstance(X, np.ndarray) and X.ndim == 2:
snapshots = X
snapshots_shape = None
else:
input_shapes = [np.asarray(x).shape for x in X]
if len(set(input_shapes)) != 1:
raise ValueError("Snapshots have not the same dimension.")
snapshots_shape = input_shapes[0]
snapshots = np.transpose([np.asarray(x).flatten() for x in X])
# check condition number of the data passed in
cond_number = np.linalg.cond(snapshots)
if cond_number > 10e4:
warnings.warn(
"Input data matrix X has condition number {}. "
"""Consider preprocessing data, passing in augmented data
matrix, or regularization methods.""".format(
cond_number
)
)
return snapshots, snapshots_shape
def _optimal_dmd_matrixes(self):
# compute the vandermonde matrix
vander = np.vander(self.eigs, len(self.dmd_timesteps), True)
# perform svd on all the snapshots
U, s, V = np.linalg.svd(self._snapshots, full_matrices=False)
P = np.multiply(
np.dot(self.modes.conj().T, self.modes),
np.conj(np.dot(vander, vander.conj().T)),
)
tmp = np.linalg.multi_dot([U, np.diag(s), V]).conj().T
q = np.conj(np.diag(np.linalg.multi_dot([vander, tmp, self.modes])))
return P, q
def _compute_amplitudes(self):
"""
Compute the amplitude coefficients. If `self.opt` is False the
amplitudes are computed by minimizing the error between the modes and
the first snapshot; if `self.opt` is True the amplitudes are computed
by minimizing the error between the modes and all the snapshots, at the
expense of bigger computational cost.
This method uses the class variables self._snapshots (for the
snapshots), self.modes and self.eigs.
:return: the amplitudes array
:rtype: numpy.ndarray
References for optimal amplitudes:
Jovanovic et al. 2014, Sparsity-promoting dynamic mode decomposition,
https://hal-polytechnique.archives-ouvertes.fr/hal-00995141/document
"""
if isinstance(self.opt, bool) and self.opt:
# b optimal
a = np.linalg.solve(*self._optimal_dmd_matrixes())
else:
if isinstance(self.opt, bool):
amplitudes_snapshot_index = 0
else:
amplitudes_snapshot_index = self.opt
a = np.linalg.lstsq(
self.modes,
self._snapshots.T[amplitudes_snapshot_index],
rcond=None,
)[0]
return a
def _enforce_ratio(self, goal_ratio, supx, infx, supy, infy):
"""
Computes the right value of `supx,infx,supy,infy` to obtain the desired
ratio in :func:`plot_eigs`. Ratio is defined as
::
dx = supx - infx
dy = supy - infy
max(dx,dy) / min(dx,dy)
:param float goal_ratio: the desired ratio.
:param float supx: the old value of `supx`, to be adjusted.
:param float infx: the old value of `infx`, to be adjusted.
:param float supy: the old value of `supy`, to be adjusted.
:param float infy: the old value of `infy`, to be adjusted.
:return tuple: a tuple which contains the updated values of
`supx,infx,supy,infy` in this order.
"""
dx = supx - infx
if dx == 0:
dx = 1.0e-16
dy = supy - infy
if dy == 0:
dy = 1.0e-16
ratio = max(dx, dy) / min(dx, dy)
if ratio >= goal_ratio:
if dx < dy:
goal_size = dy / goal_ratio
supx += (goal_size - dx) / 2
infx -= (goal_size - dx) / 2
elif dy < dx:
goal_size = dx / goal_ratio
supy += (goal_size - dy) / 2
infy -= (goal_size - dy) / 2
return (supx, infx, supy, infy)
def _plot_limits(self, narrow_view):
if narrow_view:
supx = max(self.eigs.real) + 0.05
infx = min(self.eigs.real) - 0.05
supy = max(self.eigs.imag) + 0.05
infy = min(self.eigs.imag) - 0.05
return self._enforce_ratio(8, supx, infx, supy, infy)
else:
return np.max(np.ceil(np.absolute(self.eigs)))
def plot_eigs(
self,
show_axes=True,
show_unit_circle=True,
figsize=(8, 8),
title="",
narrow_view=False,
dpi=None,
filename=None,
):
"""
Plot the eigenvalues.
:param bool show_axes: if True, the axes will be showed in the plot.
Default is True.
:param bool show_unit_circle: if True, the circle with unitary radius
and center in the origin will be showed. Default is True.
:param tuple(int,int) figsize: tuple in inches defining the figure
size. Default is (8, 8).
:param str title: title of the plot.
:param narrow_view bool: if True, the plot will show only the smallest
rectangular area which contains all the eigenvalues, with a padding
of 0.05. Not compatible with `show_axes=True`. Default is False.
:param dpi int: If not None, the given value is passed to
``plt.figure``.
:param str filename: if specified, the plot is saved at `filename`.
"""
if self.eigs is None:
raise ValueError(
"The eigenvalues have not been computed."
"You have to perform the fit method."
)
if dpi is not None:
plt.figure(figsize=figsize, dpi=dpi)
else:
plt.figure(figsize=figsize)
plt.title(title)
plt.gcf()
ax = plt.gca()
(points,) = ax.plot(
self.eigs.real, self.eigs.imag, "bo", label="Eigenvalues"
)
if narrow_view:
supx, infx, supy, infy = self._plot_limits(narrow_view)
# set limits for axis
ax.set_xlim((infx, supx))
ax.set_ylim((infy, supy))
# x and y axes
if show_axes:
endx = np.min([supx, 1.0])
ax.annotate(
"",
xy=(endx, 0.0),
xytext=(np.max([infx, -1.0]), 0.0),
arrowprops=dict(arrowstyle=("->" if endx == 1.0 else "-")),
)
endy = np.min([supy, 1.0])
ax.annotate(
"",
xy=(0.0, endy),
xytext=(0.0, np.max([infy, -1.0])),
arrowprops=dict(arrowstyle=("->" if endy == 1.0 else "-")),
)
else:
# set limits for axis
limit = self._plot_limits(narrow_view)
ax.set_xlim((-limit, limit))
ax.set_ylim((-limit, limit))
# x and y axes
if show_axes:
ax.annotate(
"",
xy=(np.max([limit * 0.8, 1.0]), 0.0),
xytext=(np.min([-limit * 0.8, -1.0]), 0.0),
arrowprops=dict(arrowstyle="->"),
)
ax.annotate(
"",
xy=(0.0, np.max([limit * 0.8, 1.0])),
xytext=(0.0, np.min([-limit * 0.8, -1.0])),
arrowprops=dict(arrowstyle="->"),
)
plt.ylabel("Imaginary part")
plt.xlabel("Real part")
if show_unit_circle:
unit_circle = plt.Circle(
(0.0, 0.0),
1.0,
color="green",
fill=False,
label="Unit circle",
linestyle="--",
)
ax.add_artist(unit_circle)
# Dashed grid
gridlines = ax.get_xgridlines() + ax.get_ygridlines()
for line in gridlines:
line.set_linestyle("-.")
ax.grid(True)
# legend
if show_unit_circle:
ax.add_artist(
plt.legend(
[points, unit_circle],
["Eigenvalues", "Unit circle"],
loc="best",
)
)
else:
ax.add_artist(plt.legend([points], ["Eigenvalues"], loc="best"))
ax.set_aspect("equal")
if filename:
plt.savefig(filename)
else:
plt.show()
def plot_modes_2D(
self,
index_mode=None,
filename=None,
x=None,
y=None,
order="C",
figsize=(8, 8),
):
"""
Plot the DMD Modes.
:param index_mode: the index of the modes to plot. By default, all
the modes are plotted.
:type index_mode: int or sequence(int)
:param str filename: if specified, the plot is saved at `filename`.
:param numpy.ndarray x: domain abscissa.
:param numpy.ndarray y: domain ordinate
:param order: read the elements of snapshots using this index order,
and place the elements into the reshaped array using this index
order. It has to be the same used to store the snapshot. 'C' means
to read/ write the elements using C-like index order, with the last
axis index changing fastest, back to the first axis index changing
slowest. 'F' means to read / write the elements using Fortran-like
index order, with the first index changing fastest, and the last
index changing slowest. Note that the 'C' and 'F' options take no
account of the memory layout of the underlying array, and only
refer to the order of indexing. 'A' means to read / write the
elements in Fortran-like index order if a is Fortran contiguous in
memory, C-like order otherwise.
:type order: {'C', 'F', 'A'}, default 'C'.
:param tuple(int,int) figsize: tuple in inches defining the figure
size. Default is (8, 8).
"""
if self.modes is None:
raise ValueError(
"The modes have not been computed."
"You have to perform the fit method."
)
if x is None and y is None:
if self._snapshots_shape is None:
raise ValueError(
"No information about the original shape of the snapshots."
)
if len(self._snapshots_shape) != 2:
raise ValueError(
"The dimension of the input snapshots is not 2D."
)
# If domain dimensions have not been passed as argument,
# use the snapshots dimensions
if x is None and y is None:
x = | np.arange(self._snapshots_shape[0]) | numpy.arange |
import pytest
import numpy as np
import lentil
def test_boundary():
mask = lentil.hexagon((256, 256), 100, rotate=True)
bounds = lentil.boundary(mask)
assert np.array_equal(bounds, [28, 228, 42, 214])
def test_rebin():
img = np.array([[1, 1, 2, 2], [1, 1, 2, 2], [3, 3, 4, 4], [3, 3, 4, 4]])
factor = 2
img_rebinned = lentil.util.rebin(img, factor)
assert np.array_equal(img_rebinned, np.array([[4, 8], [12, 16]]))
def test_rebin_cube():
img = np.zeros((2, 4, 4))
img[0] = np.array([[1, 1, 2, 2], [1, 1, 2, 2], [3, 3, 4, 4], [3, 3, 4, 4]])
img[1] = np.array([[2, 2, 4, 4], [2, 2, 4, 4], [6, 6, 8, 8], [6, 6, 8, 8]])
factor = 2
img_rebinned = lentil.util.rebin(img, factor)
img_rebinned_expected = np.zeros((2, 2, 2))
img_rebinned_expected[0] = np.array([[4, 8], [12, 16]])
img_rebinned_expected[1] = np.array([[8, 16], [24, 32]])
assert np.array_equal(img_rebinned, img_rebinned_expected)
def test_rescale_unitary():
a = np.random.uniform(low=0, high=1, size=(100, 100))
b = lentil.util.rescale(a, scale=0.5, order=3, mode='nearest', unitary=True)
assert np.isclose(np.sum(a), np.sum(b))
def test_pad():
img = np.ones((3, 3))
out = lentil.util.pad(img, (5, 5))
truth = np.zeros((5, 5))
truth[1:4, 1:4] = 1
assert | np.array_equal(out, truth) | numpy.array_equal |
import copy
import numpy as np
import open3d as o3d
from tqdm import tqdm
from scipy import stats
import utils_o3d as utils
def remove_ground_plane(pcd, z_thresh=-2.7):
cropped = copy.deepcopy(pcd)
cropped_points = np.array(cropped.points)
cropped_points = cropped_points[cropped_points[:, -1] > z_thresh]
pcd_final = o3d.geometry.PointCloud()
pcd_final.points = o3d.utility.Vector3dVector(cropped_points)
return pcd_final
def remove_y_plane(pcd, y_thresh=5):
cropped = copy.deepcopy(pcd)
cropped_points = np.array(cropped.points)
cropped_points = cropped_points[cropped_points[:, 0] < y_thresh]
cropped_points[:, -1] = -cropped_points[:, -1]
pcd_final = o3d.geometry.PointCloud()
pcd_final.points = o3d.utility.Vector3dVector(cropped_points)
return pcd_final
def compute_features(pcd, voxel_size, normals_nn=100, features_nn=120, downsample=True):
normals_radius = voxel_size * 2
features_radius = voxel_size * 4
# Downsample the point cloud using Voxel grids
if downsample:
print(':: Input size:', np.array(pcd.points).shape)
pcd_down = utils.downsample_point_cloud(pcd, voxel_size)
print(':: Downsample with a voxel size %.3f' % voxel_size)
print(':: Downsample size', np.array(pcd_down.points).shape)
else: pcd_down = copy.deepcopy(pcd)
# Estimate normals
print(':: Estimate normal with search radius %.3f' % normals_radius)
pcd_down.estimate_normals(
o3d.geometry.KDTreeSearchParamHybrid(radius=normals_radius, max_nn=normals_nn))
# Compute FPFH features
print(':: Compute FPFH feature with search radius %.3f' % features_radius)
features = o3d.registration.compute_fpfh_feature(pcd_down,
o3d.geometry.KDTreeSearchParamHybrid(radius=features_radius, max_nn=features_nn))
return pcd_down, features
def match_features(pcd0, pcd1, feature0, feature1, thresh=None, display=False):
pcd0, pcd1 = copy.deepcopy(pcd0), copy.deepcopy(pcd1)
print(':: Input size 0:', np.array(pcd0.points).shape)
print(':: Input size 1:', np.array(pcd1.points).shape)
print(':: Features size 0:', np.array(feature0.data).shape)
print(':: Features size 1:', np.array(feature1.data).shape)
utils.paint_uniform_color(pcd0, color=[1, 0.706, 0])
utils.paint_uniform_color(pcd1, color=[0, 0.651, 0.929])
scores, indices = [], []
fpfh_tree = o3d.geometry.KDTreeFlann(feature1)
for i in tqdm(range(len(pcd0.points)), desc=':: Feature Matching'):
[_, idx, _] = fpfh_tree.search_knn_vector_xd(feature0.data[:, i], 1)
scores.append(np.linalg.norm(pcd0.points[i] - pcd1.points[idx[0]]))
indices.append([i, idx[0]])
scores, indices = np.array(scores), np.array(indices)
median = np.median(scores)
if thresh is None: thresh = median
inliers_idx = np.where(scores <= thresh)[0]
pcd0_idx = indices[inliers_idx, 0]
pcd1_idx = indices[inliers_idx, 1]
print(':: Score stats: Min=%0.3f, Max=%0.3f, Median=%0.3f, N<Thresh=%d' % (
np.min(scores), np.max(scores), median, len(inliers_idx)))
if display:
for i, j in zip(pcd0_idx, pcd1_idx):
pcd0.colors[i] = [1, 0, 0]
pcd1.colors[j] = [1, 0, 0]
utils.display([pcd0, pcd1])
return pcd0_idx, pcd1_idx
def estimate_scale(pcd0, pcd1, pcd0_idx, pcd1_idx, top_percent=1.0,
ransac_iters=5000, sample_size=50):
points0 = np.asarray(pcd0.points)[pcd0_idx]
points1 = np.asarray(pcd1.points)[pcd1_idx]
mean0 = np.mean(points0, axis=0)
mean1 = np.mean(points1, axis=0)
top_count = int(top_percent * len(pcd0_idx))
assert top_count > sample_size, 'top_count <= sample_size'
scales = []
for i in tqdm(range(ransac_iters), desc=':: Scale Estimation RANSAC'):
args = np.random.choice(top_count, sample_size, replace=False)
points0_r = points0[args]
points1_r = points1[args]
score0 = np.sum((points0_r - mean0) ** 2, axis=1)
score1 = np.sum((points1_r - mean1) ** 2, axis=1)
scale = np.sqrt(np.mean(score1) / | np.mean(score0) | numpy.mean |
#!/usr/bin/env python3
# Date: 2020/01/05
# Author: Armit
# 主成分分析PCA基本思想:特征值、特征向量
# 构建所有特征的协方差矩阵,求该矩阵的特征值最大的几个方向的特征向量、即主成分
# 第一主成分总是和第二主成分正交、其余同理,最后张开的子空间也是"方的"
import random
import numpy as np
import matplotlib.pyplot as plt
def get_data(N=100, begin=0, end=10):
fx = lambda x: 1.7 * x + 0.4
data = np.array([np.array([x, fx(x) + random.random() * random.randrange(4)])
for x in np.linspace(begin, end, N)
for _ in range(random.randrange(6))])
return data
def pca(data, top_n_feat=100): # data = [(ft1, ft2, ..., ftm), ...]
# 去平均值/中心化
data_mean = | np.mean(data, axis=0) | numpy.mean |
import numpy as np
def plot_correlation_matrix(matrix_colors, x_labels, y_labels, pdf_file_name='',
title='correlation', vmin=-1, vmax=1, color_map='RdYlGn', x_label='', y_label='', top=20,
matrix_numbers=None, print_both_numbers=True):
"""Create and plot correlation matrix.
:param matrix_colors: input correlation matrix
:param list x_labels: Labels for histogram x-axis bins
:param list y_labels: Labels for histogram y-axis bins
:param str pdf_file_name: if set, will store the plot in a pdf file
:param str title: if set, title of the plot
:param float vmin: minimum value of color legend (default is -1)
:param float vmax: maximum value of color legend (default is +1)
:param str x_label: Label for histogram x-axis
:param str y_label: Label for histogram y-axis
:param str color_map: color map passed to matplotlib pcolormesh. (default is 'RdYlGn')
:param int top: only print the top 20 characters of x-labels and y-labels. (default is 20)
:param matrix_numbers: input matrix used for plotting numbers. (default it matrix_colors)
"""
# basic matrix checks
assert matrix_colors.shape[0] == len(y_labels), 'matrix_colors shape inconsistent with number of y-labels'
assert matrix_colors.shape[1] == len(x_labels), 'matrix_colors shape inconsistent with number of x-labels'
if matrix_numbers is None:
matrix_numbers = matrix_colors
print_both_numbers = False # only one set of numbers possible
else:
assert matrix_numbers.shape[0] == len(y_labels), 'matrix_numbers shape inconsistent with number of y-labels'
assert matrix_numbers.shape[1] == len(x_labels), 'matrix_numbers shape inconsistent with number of x-labels'
# import matplotlib here to prevent import before setting backend in
# core.execution.eskapade_run
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import colors
fig, ax = plt.subplots(figsize=(7, 5))
# cmap = 'RdYlGn' #'YlGn'
norm = colors.Normalize(vmin=vmin, vmax=vmax)
img = ax.pcolormesh(matrix_colors, cmap=color_map, edgecolor='w', linewidth=1, norm=norm)
# set x-axis properties
def tick(lab):
"""Get tick."""
if isinstance(lab, (float, int)):
lab = 'NaN' if np.isnan(lab) else '{0:.1f}'.format(lab)
lab = str(lab)
if len(lab) > top:
lab = lab[:17] + '...'
return lab
# reduce default fontsizes in case too many labels?
nlabs = max(len(y_labels), len(x_labels))
fontsize_factor = 1
if nlabs >= 10:
fontsize_factor = 0.55
if nlabs >= 20:
fontsize_factor = 0.25
# make plot look pretty
ax.set_title(title, fontsize=14 * fontsize_factor)
ax.set_yticks(np.arange(len(y_labels)) + 0.5)
ax.set_xticks(np.arange(len(x_labels)) + 0.5)
ax.set_yticklabels([tick(lab) for lab in y_labels], rotation='horizontal', fontsize=10 * fontsize_factor)
ax.set_xticklabels([tick(lab) for lab in x_labels], rotation='vertical', fontsize=10 * fontsize_factor)
if x_label:
ax.set_xlabel(x_label, fontsize=12 * fontsize_factor)
if y_label:
ax.set_ylabel(y_label, fontsize=12 * fontsize_factor)
fig.colorbar(img)
# annotate with correlation values
numbers_set = [matrix_numbers] if not print_both_numbers else [matrix_numbers, matrix_colors]
for i, _ in enumerate(x_labels):
for j, _ in enumerate(y_labels):
point_color = float(matrix_colors[j][i])
white_cond = (point_color < 0.7 * vmin) or (point_color >= 0.7 * vmax) or np.isnan(point_color)
y_offset = 0.5
for m, matrix in enumerate(numbers_set):
if print_both_numbers:
if m == 0:
y_offset = 0.7
elif m == 1:
y_offset = 0.25
point = float(matrix[j][i])
if | np.isnan(point) | numpy.isnan |
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import IPython.display as dis
import matplotlib.animation as animation
from PIL import Image
import os
import pandas as pd
import seaborn as sns
import geopandas as gpd
import libpysal
import pysal
# from libpysal.weights.contiguity import Queen
from libpysal.weights import Queen, Rook, KNN, Kernel
from splot.libpysal import plot_spatial_weights
from shapely.ops import cascaded_union
class Graph:
def __init__(self, G):
self.nodes = np.array(G.nodes())
edges_arr = list(G.edges())
self.edges = self.extract_edges(self.nodes, edges_arr, G)
self.opinions = np.random.randint(2, size=self.nodes.size)
# self.level = self.edges[0].shape[-1]
self.voting_prefferences = | np.ones((self.nodes.size, 2)) | numpy.ones |
from acados_settings import acados_settings
import time
import os
import numpy as np
import matplotlib.pyplot as plt
from plotFnc import *
from utils import *
from trajectory import *
# mpc and simulation parameters
Tf = 1 # prediction horizon
N = 100 # number of discretization steps
Ts = Tf / N # sampling time[s]
T_hover = 2 # hovering time[s]
T_traj = 20.00 # trajectory time[s]
T = T_hover + T_traj # total simulation time
# constants
g = 9.81 # m/s^2
# measurement noise bool
noisy_measurement = False
# input noise bool
noisy_input = False
# extended kalman filter bool
# extended_kalman_filter = False
# generate circulare trajectory with velocties
traj_with_vel = False
# use a single reference point
ref_point = False
# import trajectory with positions and velocities and inputs
import_trajectory = True
# bool to save measurements and inputs as .csv files
save_data = True
# load model and acados_solver
model, acados_solver, acados_integrator = acados_settings(Ts, Tf, N)
# dimensions
nx = model.x.size()[0]
nu = model.u.size()[0]
ny = nx + nu
N_hover = int(T_hover * N / Tf)
N_traj = int(T_traj * N / Tf)
Nsim = int(T * N / Tf)
# initialize data structs
simX = np.ndarray((Nsim+1, nx))
simU = np.ndarray((Nsim, nu))
tot_comp_sum = 0
tcomp_max = 0
# set initial condition for acados integrator
xcurrent = model.x0.reshape((nx,))
simX[0, :] = xcurrent
## creating or extracting trajectory
# circular trajectory
if ref_point == False and import_trajectory == False:
# creating a reference trajectory
show_ref_traj = False
radius = 1 # m
freq = 6 * np.pi/10 # frequency
# without velocity
if traj_with_vel == False:
x, y, z = trajectory_generator3D(
xcurrent, N_hover, N_traj, N, radius, show_ref_traj)
ref_traj = np.stack((x, y, z), 1)
else:
# with velocity
x, y, z, vx, vy, vz = trajectory_generotaor3D_with_vel(
xcurrent, N_hover, model, radius, freq, T_traj, Tf, Ts)
ref_traj = np.stack((x, y, z, vx, vy, vz), 1)
# reference point
elif ref_point == True and import_trajectory == False:
X0 = xcurrent
x_ref_point = 0
y_ref_point = 1.2
z_ref_point = 1.0
X_ref = np.array([x_ref_point, y_ref_point, z_ref_point])
# imported trajectory
elif ref_point == False and import_trajectory == True:
T, ref_traj, ref_U, w_ref = readTrajectory(T_hover, N, Ts)
Nsim = int((T-Tf) * N / Tf)
predX = np.ndarray((Nsim+1, nx))
simX = np.ndarray((Nsim+1, nx))
simU = np.ndarray((Nsim, nu))
simX[0, :] = xcurrent
'''
elif ref_point == False and import_trajectory == True:
T, ref_traj, ref_U = readTrajectory(T_hover, N)
Nsim = int((T-Tf) * N / Tf)
predX = np.ndarray((Nsim+1, nx))
simX = np.ndarray((Nsim+1, nx))
simU = np.ndarray((Nsim, nu))
simX[0, :] = xcurrent
'''
# N_steps, x, y, z = trajectory_generator(T, Nsim, traj, show_ref_traj)
# ref_traj = np.stack((x, y, z), 1)
# closed loop
for i in range(Nsim):
# updating references
if ref_point == False and import_trajectory == False:
if traj_with_vel == False:
for j in range(N):
yref = np.array([x[i+j], y[i+j], z[i+j], 1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, model.params.m * g, 0.0, 0.0, 0.0])
acados_solver.set(j, "yref", yref)
yref_N = np.array([x[i+N], y[i+N], z[i+N], 1.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
acados_solver.set(N, "yref", yref_N)
else:
for j in range(N):
yref = np.array([x[i+j], y[i+j], z[i+j], 1.0, 0.0, 0.0, 0.0,
vx[i+j], vy[i+j], vz[i+j], model.params.m * g, 0.0, 0.0, 0.0])
acados_solver.set(j, "yref", yref)
yref_N = np.array([x[i+N], y[i+N], z[i+N], 1.0,
0.0, 0.0, 0.0, vx[i+N], vy[i+N], vz[i+N]])
acados_solver.set(N, "yref", yref_N)
elif ref_point == True and import_trajectory == False:
for j in range(N):
if i < N_hover:
yref = np.array([X0[0], X0[1], X0[2], 1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, model.params.m * g, 0.0, 0.0, 0.0])
acados_solver.set(j, "yref", yref)
else:
yref = np.array([x_ref_point, y_ref_point, z_ref_point, 1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, model.params.m * g, 0.0, 0.0, 0.0])
acados_solver.set(j, "yref", yref)
if i < N_hover:
yref_N = np.array(
[X0[0], X0[1], X0[2], 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
acados_solver.set(N, "yref", yref_N)
else:
yref_N = np.array(
[x_ref_point, y_ref_point, z_ref_point, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
acados_solver.set(N, "yref", yref_N)
elif ref_point == False and import_trajectory == True:
# if i == Nsim-5:
# print(f'i={i}')
for j in range(N):
x = ref_traj[i+j, 0]
y = ref_traj[i+j, 1]
z = ref_traj[i+j, 2]
qw = ref_traj[i+j, 3]
qx = ref_traj[i+j, 4]
qy = ref_traj[i+j, 5]
qz = ref_traj[i+j, 6]
vx = ref_traj[i+j, 7]
vy = ref_traj[i+j, 8]
vz = ref_traj[i+j, 9]
T_ref = ref_U[i+j, 0] * 2 # Thrust
wx_ref = w_ref[i+j,0] # Angular rate around x
wy_ref = w_ref[i+j,1] # Angular rate around y
wz_ref = w_ref[i+j,2] # Angular rate around z
yref = np.array([x, y, z, qw, qx, qy, qz, vx, vy,
vz, T_ref, wx_ref, wy_ref, wz_ref])
acados_solver.set(j, "yref", yref)
x_e = ref_traj[i+N, 0]
y_e = ref_traj[i+N, 1]
z_e = ref_traj[i+N, 2]
qw_e = ref_traj[i+N, 3]
qx_e = ref_traj[i+N, 4]
qy_e = ref_traj[i+N, 5]
qz_e = ref_traj[i+N, 6]
vx_e = ref_traj[i+N, 7]
vy_e = ref_traj[i+N, 8]
vz_e = ref_traj[i+N, 9]
yref_N = np.array([x_e, y_e, z_e, qw_e, qx_e,
qy_e, qz_e, vx_e, vy_e, vz_e])
acados_solver.set(N, "yref", yref_N)
# solve ocp for a fixed reference
acados_solver.set(0, "lbx", xcurrent)
acados_solver.set(0, "ubx", xcurrent)
comp_time = time.time()
status = acados_solver.solve()
if status != 0:
print("acados returned status {} in closed loop iteration {}.".format(status, i))
# manage timings
elapsed = time.time() - comp_time
tot_comp_sum += elapsed
if elapsed > tcomp_max:
tcomp_max = elapsed
# get solution from acados_solver
u0 = acados_solver.get(0, "u")
# x4 = acados_solver.get(4, "x") # used to compensate for delays
# storing results from acados solver
simU[i, :] = u0
# add noise to measurement
if noisy_measurement == True:
xcurrent = add_measurement_noise(xcurrent)
# simulate the system
acados_integrator.set("x", xcurrent)
acados_integrator.set("u", u0)
status = acados_integrator.solve()
if status != 0:
raise Exception(
'acados integrator returned status {}. Exiting.'.format(status))
# get state
xcurrent = acados_integrator.get("x")
# make sure that the quaternion is unit
# xcurrent = ensure_unit_quat(xcurrent)
# store state
simX[i+1, :] = xcurrent
# root mean squared error on each axis
# rmse_x, rmse_y, rmse_z = rmseX(simX, ref_traj)
# print the computation times
print("Total computation time: {}".format(tot_comp_sum))
print("Average computation time: {}".format(tot_comp_sum / Nsim))
print("Maximum computation time: {}".format(tcomp_max))
simX_euler = np.zeros((simX.shape[0], 3))
for i in range(simX.shape[0]):
simX_euler[i, :] = quaternion_to_euler(simX[i, 3:7])
simX_euler = R2D(simX_euler)
# print(simX_euler)
if import_trajectory == False:
t = | np.arange(0, T, Ts) | numpy.arange |
import os
import re
import numpy as np
import GCRCatalogs
import multiprocessing
import time
import scipy.spatial as scipy_spatial
from lsst.utils import getPackageDir
from lsst.sims.utils import defaultSpecMap
from lsst.sims.photUtils import BandpassDict, Bandpass, Sed, CosmologyObject
__all__ = ["disk_re", "bulge_re", "sed_filter_names_from_catalog", "sed_from_galacticus_mags"]
_galaxy_sed_dir = os.path.join(getPackageDir('sims_sed_library'), 'galaxySED')
disk_re = re.compile(r'sed_(\d+)_(\d+)_disk$')
bulge_re = re.compile(r'sed_(\d+)_(\d+)_bulge$')
def sed_filter_names_from_catalog(catalog):
"""
Takes an already-loaded GCR catalog and returns the names, wavelengths,
and widths of the SED-defining bandpasses
Parameters
----------
catalog -- is a catalog loaded with GCR.load_catalog()
Returns
-------
A dict keyed to 'bulge' and 'disk'. The values in this dict will
be dicts keyed to 'filter_name', 'wav_min', 'wav_width'. The
corresponding values are:
filter_name -- list of the names of the columns defining the SED
wav_min -- list of the minimum wavelengths of SED-defining bandpasses (in nm)
wav_width -- list of the widths of the SED-defining bandpasses (in nm)
All outputs will be returned in order of increasing wav_min
"""
all_quantities = catalog.list_all_quantities()
bulge_names = []
bulge_wav_min = []
bulge_wav_width = []
disk_names = []
disk_wav_min = []
disk_wav_width = []
for qty_name in all_quantities:
disk_match = disk_re.match(qty_name)
if disk_match is not None:
disk_names.append(qty_name)
disk_wav_min.append(0.1*float(disk_match[1])) # 0.1 converts to nm
disk_wav_width.append(0.1*float(disk_match[2]))
bulge_match = bulge_re.match(qty_name)
if bulge_match is not None:
bulge_names.append(qty_name)
bulge_wav_min.append(0.1*float(bulge_match[1]))
bulge_wav_width.append(0.1*float(bulge_match[2]))
disk_wav_min = np.array(disk_wav_min)
disk_wav_width = np.array(disk_wav_width)
disk_names = np.array(disk_names)
sorted_dex = | np.argsort(disk_wav_min) | numpy.argsort |
import numpy as np
def multitask_rollout(
env,
agent,
max_path_length=np.inf,
render=False,
render_kwargs=None,
observation_key=None,
desired_goal_key=None,
get_action_kwargs=None,
return_dict_obs=False,
):
if render_kwargs is None:
render_kwargs = {}
if get_action_kwargs is None:
get_action_kwargs = {}
dict_obs = []
dict_next_obs = []
observations = []
actions = []
rewards = []
terminals = []
agent_infos = []
env_infos = []
next_observations = []
path_length = 0
agent.reset()
o = env.reset()
if render:
env.render(**render_kwargs)
while path_length < max_path_length:
dict_obs.append(o)
goal = o[desired_goal_key]
if observation_key:
o = o[observation_key]
new_obs = | np.hstack((o, goal)) | numpy.hstack |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 19 13:16:25 2015
@author: hbanks
Brevity required, prudence preferred
"""
import os
import io
import glob
import errno
import copy
import json
import time
import warnings
import numpy as np
from scipy.optimize import curve_fit
import scipy.interpolate as spi
import scipy.optimize as spo
import scipy.integrate as intgt
import scipy.fftpack as fft
import scipy.special as spl
import matplotlib.pyplot as plt
import scipy.ndimage as ndimage
import itertools as itt
import multiprocessing as mp
import sys
sys.path.append('/Users/marketing/Desktop/HSG-turbo/')
import hsganalysis.QWPProcessing as qwp
from hsganalysis.QWPProcessing.extractMatrices import makeT,saveT
np.set_printoptions(linewidth=500)
# One of the main results is the HighSidebandCCD.sb_results array. These are the
# various mappings between index and real value
# I deally, this code should be converted to pandas to avoid this issue,
# but that's outside the scope of current work.
# [sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error, Gauss linewidth (eV), Linewidth error (eV)]
# [ 0 , 1 , 2, , 3 , 4 , 5 , 6 ]
class sbarr(object):
SBNUM = 0
CENFREQ = 1
CENFREQERR = 2
AREA = 3
AREAERR = 4
WIDTH = 5
WIDTHERR = 6
####################
# Objects
####################
class CCD(object):
def __init__(self, fname, spectrometer_offset=None):
"""
This will read the appropriate file and make a basic CCD object. Fancier
things will be handled with the sub classes.
Creates:
self.parameters = Dictionary holding all of the information from the
data file, which comes from the JSON encoded header in the data
file
self.description = string that is the text box from data taking GUI
self.raw_data = raw data output by measurement software, wavelength vs.
data, errors. There may be text for some of the entries
corresponding to text used for Origin imports, but they
should appear as np.nan
self.ccd_data = semi-processed 1600 x 3 array of photon energy vs. data with standard error of mean at that pixel
calculated by taking multiple images. Standard error is calculated from
the data collection software
Most subclasses should make a self.proc_data, which will do whatever
processing is required to the ccd_data, such as normalizing, taking ratios,
etc.
:param fname: file name where the data is saved
:type fname: str
:param spectrometer_offset: if the spectrometer won't go where it's told, use this to correct the wavelengths (nm)
:type spectrometer_offset: float
"""
self.fname = fname
# Checking restrictions from Windows path length limits. Check if you can
# open the file:
try:
with open(fname) as f: pass
except FileNotFoundError:
# Couldn't find the file. Could be you passed the wrong one, but I'm
# finding with a large number of subfolders for polarimetry stuff,
# you end up exceeding Windows' filelength limit.
# Haven't tested on Mac or UNC moutned drives (e.g \\128.x.x.x\Sherwin\)
fname = r"\\?\\" + os.path.abspath(fname)
# Read in the JSON-formatted parameter string.
# The lines are all prepended by '#' for easy numpy importing
# so loop over all those lines
with open(fname, 'r') as f:
param_str = ''
line = f.readline()
while line[0] == '#':
### changed 09/17/18
# This line assumed there was a single '#'
# param_str += line[1:]
# while this one handles everal (because I found old files
# which had '## <text>...'
param_str += line.replace("#", "")
line = f.readline()
# Parse the JSON string
try:
self.parameters = json.loads(param_str)
except json.JSONDecodeError:
# error from _really_ old data where comments were dumped after a
# single-line json dumps
self.parameters=json.loads(param_str.splitlines()[0])
# Spec[trometer] steps are set to define the same physical data, but taken at
# different spectrometer center wavelengths. This value is used later
# for stitching these scans together
try:
self.parameters["spec_step"] = int(self.parameters["spec_step"])
except (ValueError, KeyError):
# If there isn't a spe
self.parameters["spec_step"] = 0
# Slice through 3 to get rid of comments/origin info.
# Would likely be better to check np.isnan() and slicing out those nans.
# I used flipup so that the x-axis is an increasing function of frequency
self.raw_data = np.flipud(np.genfromtxt(fname, comments='#', delimiter=',')[3:])
# The camera chip is 1600 pixels wide. This line was redudent with the [3:]
# slice above and served to make sure there weren't extra stray bad lines
# hanging around.
#
# This should also be updated some day to compensate for any horizontal bining
# on the chip, or masking out points that are bad (cosmic ray making it
# through processing, room lights or monitor lines interfering with signal)
self.ccd_data = np.array(self.raw_data[:1600, :])
# Check to see if the spectrometer offset is set. This isn't specified
# during data collection. This is a value that can be appended
# when processing if it's realized the data is offset.
# This allows the offset to be specified and kept with the data file itself,
# instead of trying to do it in individual processing scripts
#
# It's allowed as a kwarg parameter in this script for trying to determine
# what the correct offset should be
if spectrometer_offset is not None or "offset" in self.parameters:
try:
self.ccd_data[:, 0] += float(self.parameters["offset"])
except:
self.ccd_data[:, 0] += spectrometer_offset
# Convert from nm to eV
# self.ccd_data[:, 0] = 1239.84 / self.ccd_data[:, 0]
self.ccd_data[:, 0] = photon_converter["nm"]["eV"](self.ccd_data[:, 0])
class Photoluminescence(CCD):
def __init__(self, fname):
"""
This object handles PL-type data. The only distinction from the parent class
is that the CCD data gets normalized to the exposure time to make different
exposures directly comparable.
creates:
self.proc_data = self.ccd_data divided by the exposure time
units: PL counts / second
:param fname: name of the file
:type fname: str
"""
super(Photoluminescence, self).__init__(fname)
# Create a copy of the array , and then normalize the signal and the errors
# by the exposure time
self.proc_data = | np.array(self.ccd_data) | numpy.array |
import torch
import os
from torch.distributions import Normal
import gym
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import cv2
from itertools import permutations
import h5py
from sklearn.feature_selection import mutual_info_regression
import matplotlib.ticker as ticker
from a2c_ppo_acktr.envs import FetchWrapper #TODO remove fetch and add meta-world instead
from a2c_ppo_acktr.utils import load_expert
#TODO remove any 'fetch' related thing from the repo
from a2c_ppo_acktr.utils import generate_latent_codes
class Base:
def __init__(self, args, env, actor_critic, filename, obsfilt, vae_data):
if args.fetch_env:
self.env = FetchWrapper(env)
else:
self.env = env
self.actor_critic = actor_critic
self.args = args
self.obsfilt = obsfilt
self.filename = filename
self.vae_data = vae_data
self.vae_mus = vae_data[0]
assert not args.vanilla, 'Vanilla GAIL benchmarking not implemented'
self.max_episode_steps = self._get_max_episode_steps()
def resolve_latent_code(self, states, actions, i):
def _get_sog(args, actor_critic):
from a2c_ppo_acktr.algo.sog import OneHotSearch, BlockCoordinateSearch
if args.latent_optimizer == 'bcs':
SOG = BlockCoordinateSearch
elif args.latent_optimizer == 'ohs':
SOG = OneHotSearch
else:
raise NotImplementedError
return SOG(actor_critic, args)
device = self.args.device
if self.args.sog_gail:
sog = _get_sog(self.args, self.actor_critic)
return sog.resolve_latent_code(torch.from_numpy(self.obsfilt(states[i].cpu().numpy(), update=False)).float().to(device), actions[i].to(device))[:1]
elif self.args.vae_gail:
return self.vae_mus[i]
elif self.args.infogail:
return generate_latent_codes(self.args, count=1, eval=True)
else:
raise NotImplementedError
def _get_max_episode_steps(self):
return {
'Circles-v0': 1000,
'AntDir-v0': 200,
'HalfCheetahVel-v0': 200,
'FetchReach-v0': 50,
'HopperVel-v0': 1000,
'Walker-v0': 1000,
'HumanoidDir-v0': 1000,
}.get(self.args.env_name, 200)
class Play(Base):
def __init__(self, **kwargs):
super(Play, self).__init__(**kwargs)
if self.args.fetch_env:
self.max_episode_steps = self.env.env._max_episode_steps
else:
max_episode_time = 10
dt = kwargs['env'].dt
self.max_episode_steps = int(max_episode_time / dt)
def play(self):
args = self.args
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
if args.fetch_env:
video_size = (500, 500)
else:
video_size = (250, 250)
video_writer = cv2.VideoWriter(f'{self.filename}.avi', fourcc, 1 / self.env.dt, video_size)
s = self.env.reset()
if (args.fetch_env or args.mujoco) and args.continuous and args.env_name != 'HalfCheetahVel-v0':
expert = torch.load(args.expert_filename, map_location=args.device)
count = 30 if args.fetch_env else 5 # for fetch, try 30 of the goals from the expert trajectories
####### recover expert embeddings #######
expert_len = len(expert['states'])
sample_idx = torch.randint(low=0, high=expert_len, size=(count,))
states, actions, desired_goals = [expert.get(key, [None]*expert_len)[sample_idx] for key in ('states', 'actions', 'desired_goal')] # only keep the trajectories specified by `sample_idx`
latent_codes = [self.resolve_latent_code(states, actions, i) for i in range(len(states))]
else:
# if 'Humanoid' in args.env_name and args.continuous:
# expert = load_expert(args.expert_filename, device=args.device)
# ####### recover expert embeddings #######
# count = 5
# sample_idx = torch.randint(low=0, high=len(expert['states']), size=(count,))
# states, actions = [expert[key][sample_idx] for key in ('states', 'actions')] # only keep the trajectories specified by `sample_idx`
# latent_codes = [self.resolve_latent_code(torch.from_numpy(self.obsfilt(states.cpu().numpy(), update=False)).float().to(args.device), actions, i) for i in len(states)]
# expert = load_expert(args.expert_filename, device=args.device)
# ####### recover expert embeddings #######
# latent_codes = list()
# possible_angles = torch.unique(expert['angles'])
# sog = self._get_sog()
# for angle in possible_angles:
# sample_idx = expert['angles'] == angle
# states, actions = [expert[key][sample_idx] for key in ('states', 'actions')] # only keep the trajectories specified by `sample_idx`
# from tqdm import tqdm
# mode_list = []
# for (traj_states, traj_actions) in tqdm(zip(states, actions)):
# mode_list.append(sog.resolve_latent_code(torch.from_numpy(self.obsfilt(traj_states.cpu().numpy(), update=False)).float().to(args.device), traj_actions)[0])
# latent_codes.append(torch.stack(mode_list).mean(0))
count = None
if args.vae_gail and args.env_name == 'HalfCheetahVel-v0':
count = 30
latent_codes = generate_latent_codes(args, count=count, vae_data=self.vae_data, eval=True)
for j, latent_code in enumerate(latent_codes):
latent_code = latent_code
episode_reward = 0
if args.fetch_env:
self.env.set_desired_goal(desired_goals[j].cpu().numpy())
self.env._max_env_steps = 100
# print(desired_goals[j])
print(f'traj #{j+1}/{len(latent_codes)}')
for step in range(self.max_episode_steps):
s = self.obsfilt(s, update=False)
s_tensor = torch.tensor(s, dtype=torch.float32, device=args.device)[None]
with torch.no_grad():
_, actions_tensor, _ = self.actor_critic.act(s_tensor, latent_code, deterministic=True)
action = actions_tensor[0].cpu().numpy()
s, r, done, _ = self.env.step(action)
episode_reward += r
if done:
break
I = self.env.render(mode='rgb_array')
I = cv2.cvtColor(I, cv2.COLOR_RGB2BGR)
I = cv2.resize(I, video_size)
video_writer.write(I)
if args.fetch_env:
pass
# achieved_goal = self.env.unwrapped.sim.data.get_site_xpos("robot0:grip").copy()
# success = self.env.unwrapped._is_success(achieved_goal, self.env.unwrapped.goal)
# print('success' if success else 'failed')
else:
print(f"episode reward:{episode_reward:3.3f}")
s = self.env.reset()
video_writer.write(np.zeros([*video_size, 3], dtype=np.uint8))
self.env.close()
video_writer.release()
cv2.destroyAllWindows()
class Plot(Base):
def plot(self):
return {'Circles-v0': self._circles_ellipses,
'Ellipses-v0': self._circles_ellipses,
'HalfCheetahVel-v0': self._halfcheetahvel,
'AntDir-v0': self._ant,
'FetchReach-v1': self._fetch,
'Walker2dVel-v0': self._walker_hopper,
'HopperVel-v0': self._walker_hopper,
'HumanoidDir-v0': self._humanoid,
}.get(self.args.env_name, lambda: None)()
def _circles_ellipses(self):
args, actor_critic, filename = self.args, self.actor_critic, self.filename
fig = plt.figure(figsize=(2, 3), dpi=300)
plt.set_cmap('gist_rainbow')
# plotting the actual circles/ellipses
if args.env_name == 'Circles-v0':
for r in args.radii:
t = np.linspace(0, 2 * np.pi, 200)
plt.plot(r * np.cos(t), r * np.sin(t) + r, color='#d0d0d0')
elif args.env_name == 'Ellipses-v0':
for rx, ry in np.array(args.radii).reshape(-1, 2):
t = np.linspace(0, 2 * np.pi, 200)
plt.plot(rx * np.cos(t), ry * np.sin(t) + ry, color='#d0d0d0')
max_r = np.max(np.abs(args.radii))
plt.axis('equal')
# plt.axis('off')
# Turn off tick labels
ax = fig.add_subplot(1, 1, 1)
ax.xaxis.set_major_locator(ticker.MultipleLocator(10.00))
ax.yaxis.set_major_locator(ticker.MultipleLocator(10.00))
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
plt.xlim([-1 * max_r, 1 * max_r])
plt.ylim([-1.5 * max_r, 2.5 * max_r])
import gym_sog
env = gym.make(args.env_name, args=args)
obs = env.reset()
device = next(actor_critic.parameters()).device
count = 3
latent_codes = generate_latent_codes(args, count=count, vae_data=self.vae_data, eval=True)
# generate rollouts and plot them
for j, latent_code in enumerate(latent_codes):
latent_code = latent_code.unsqueeze(0)
for i in range(self.max_episode_steps):
# randomize latent code at each step in case of vanilla gail
if args.vanilla:
latent_code = generate_latent_codes(args)
# interacting with env
with torch.no_grad():
# an extra 0'th dimension is because actor critic works with "environment vectors" (see the training code)
obs = self.obsfilt(obs, update=False)
obs_tensor = torch.tensor(obs, dtype=torch.float32, device=device)[None]
_, actions_tensor, _ = actor_critic.act(obs_tensor, latent_code, deterministic=True)
action = actions_tensor[0].cpu().numpy()
obs, _, _, _ = env.step(action)
# plotting the trajectory
plt.plot(env.loc_history[:, 0], env.loc_history[:, 1], color=plt.cm.Dark2.colors[j])
if args.vanilla:
break # one trajectory in vanilla mode is enough. if not, then rollout for each separate latent code
else:
obs = env.reset()
env.close()
plt.savefig(filename + '.png')
plt.close()
def _fetch(self):
args = self.args
actor_critic = self.actor_critic
obsfilt = self.obsfilt
filename = self.filename
# TODO
expert = load_expert(args.expert_filename)
count = 100 # how many number of expert trajectories
sample_idx = np.random.randint(low=0, high=len(expert['states']), size=(count,))
# sample_idx = np.arange(len(expert['states']))
states, actions, desired_goals = [expert[key][sample_idx] for key in ('states', 'actions', 'desired_goal')] # only keep the trajectories specified by `sample_idx`
# init env
env = gym.make(args.env_name)
# init plots
matplotlib.rcParams['legend.fontsize'] = 10
fig = plt.figure(figsize=(3,3), dpi=300)
ax = fig.gca(projection='3d')
normalize = lambda x: (x - np.array([1.34183226, 0.74910038, 0.53472284]))/.15 # map the motion range to the unit cube
s = self.env.reset()
for i in range(len(states)):
env.unwrapped.goal = desired_goals[i].cpu().numpy()
latent_code = self.resolve_latent_code(states, actions, i)
achieved_goals = np.zeros([env._max_episode_steps, 3])
s = env.reset()['observation']
for step in range(env._max_episode_steps):
s = obsfilt(s, update=False)
s_tensor = torch.tensor(s, dtype=torch.float32, device=args.device)[None]
with torch.no_grad():
_, actions_tensor, _ = actor_critic.act(s_tensor, latent_code, deterministic=True)
action = actions_tensor[0].cpu().numpy()
obs, _, _, _ = env.step(action)
s = obs['observation']
achieved_goals[step] = normalize(obs['achieved_goal'])
ax.plot(*achieved_goals.T)
if not args.infogail:
ax.scatter(*normalize(desired_goals).T)
ax.set_xlim([-1,1])
ax.set_ylim([-1,1])
ax.set_zlim([-1,1])
plt.savefig(f'{filename}.png')
def _halfcheetahvel(self):
device = self.args.device
filename = self.filename
if self.args.vae_gail:
# 2100 x 1 or 2100 x 20
latent_codes = self.vae_mus
# 30 x 70 x 1 x 1 or 30 x 70 x 1 x 20
latent_codes = latent_codes.reshape(30, 70, 1, -1)
# latent_codes = latent_codes[:, :num_repeats]
x = np.linspace(1.5, 3, 30)
else:
num_codes, num_repeats = 100, 30
cdf = np.linspace(.1, .9, num_codes)
m = Normal(torch.tensor([0.0]), torch.tensor([1.0]))
# num_codes
latent_codes = m.icdf(torch.tensor(cdf, dtype=torch.float32)).to(device)
# num_codes x num_repeats x 1 x 1
latent_codes = latent_codes[:, None, None, None].expand(-1, num_repeats, -1, -1)
x = cdf
vel_mean = []
vel_std = []
for j, latent_code_group in enumerate(latent_codes):
print(f'{j+1} / {len(latent_codes)}')
vels = []
for k, latent_code in enumerate(latent_code_group):
print(f'\t - {k+1} / {len(latent_code_group)}')
s = self.env.reset()
for step in range(self.max_episode_steps):
s = self.obsfilt(s, update=False)
s_tensor = torch.tensor(s, dtype=torch.float32, device=device)[None]
with torch.no_grad():
_, actions_tensor, _ = self.actor_critic.act(s_tensor, latent_code, deterministic=True)
action = actions_tensor[0].cpu().numpy()
s, r, done, infos = self.env.step(action)
vels.append(infos['forward_vel'])
# fix for the dataset slight offset of the dataset that begins from 1.539 instead of accurate 1.5 #TODO modify the dataset instead
rescale = lambda input, input_low, input_high, output_low, output_high: ((input - input_low) / (input_high - input_low)) * (output_high - output_low) + output_low
vels = rescale(np.array(vels), 1.539, 3., 1.5, 3)
vel_mean.append(np.mean(vels))
vel_std.append( | np.std(vels) | numpy.std |
#MIT License
#
#Copyright (c) 2020 standupmaths
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
def xmaslight():
# This is the code from my
#NOTE THE LEDS ARE GRB COLOUR (NOT RGB)
# Here are the libraries I am currently using:
import time
import board
import neopixel
import re
import math
# FOR DEBUGGING PURPOSE
#import matplotlib.pyplot as plt
#import matplotlib.animation as animation
# You are welcome to add any of these:
# import random
import numpy
# import scipy
import sys
# If you want to have user changable values, they need to be entered from the command line
# so import sys sys and use sys.argv[0] etc
# some_value = int(sys.argv[0])
# IMPORT THE COORDINATES (please don't break this bit)
coordfilename = "Python/coords.txt"
# FOR DEBUGGING PURPOSE
#coordfilename = "xmastree2020/coords.txt"
fin = open(coordfilename,'r')
coords_raw = fin.readlines()
coords_bits = [i.split(",") for i in coords_raw]
coords = []
for slab in coords_bits:
new_coord = []
for i in slab:
new_coord.append(int(re.sub(r'[^-\d]','', i)))
coords.append(new_coord)
#set up the pixels (AKA 'LEDs')
PIXEL_COUNT = len(coords) # this should be 500
pixels = neopixel.NeoPixel(board.D18, PIXEL_COUNT, auto_write=False)
# FOR DEBUGGING PURPOSE
#pixels = [ 0 for i in range(PIXEL_COUNT) ]
# YOU CAN EDIT FROM HERE DOWN
# This program is intended to make a neuronal network out of the tree's LEDs.
#
# By neuronal network I mean:
# the light of each LED will be set according to a dynamic variable V
# that stands for a model of the electric potential in the membrane of a real neuron.
# And these 'neurons' (i.e., LEDs) will have connections between them
# that obey the dynamics of chemical synapses in the brain represented by the variable S.
# The network is built according to a 'cubic-like' lattice: i.e.,
# a given LED receives input from the closest LEDs in each of the 6 spatial directions.
# Thus, the 'synapse' is represented by a 'virtual' connection, and not a physical one (i.e., the LED wire)
# I implemented other 2 types of networks:
# a surface networks (only LEDs in the surface of the tree cone 'talk' to each other)
# a proximity network (only LEDs within a radius R of each other are connected)
#
# to visualize the network generated by this algorithm, please run
# python view_tree_network.py
# first we need to define (a lot of) functions
def memb_potential_to_01(V):
# V -> dynamic variable (defined in [-1,1])
# the formula below is just a smart way to map
# [-1,1] to [0,1], emphasizing bright colors (i.e., colors close to 1)
# [0,1] is then mapped on the color_arr below
if type(V) is numpy.ndarray:
return ((V[:,0]+1.0)*0.5)**4 # raising to 4 is just to emphasize bright colors
else:
return ((V+1.0)*0.5)**4 # raising to 4 is just to emphasize bright colors
def memb_potential_to_coloridx(V,n_colors):
# V -> dynamic variable
# n_colors -> total number of colors
return numpy.floor(n_colors*memb_potential_to_01(V)).astype(int)
def create_input_lists(neigh):
# given a list of neighbors, where neigh[i] is a list of inputs to node i
# generate the list of inputs to be used in the simulation
presyn_neuron_list = [n for sublist in neigh for n in sublist]
cs = numpy.insert(numpy.cumsum([ n.size for n in neigh ]),0,0)
input_list = [ numpy.arange(a,b) for a,b in zip(cs[:-1],cs[1:]) ]
return input_list,presyn_neuron_list
def generate_list_of_neighbors(r,R=0.0,on_conic_surface_only=False):
# generates a network of "pixels"
# each pixel in position r[i,:] identifies its 6 closest neighbors and should receive a connection from it
# if R is given, includes all pixels within a radius R of r[i,:] as a neighbor
# the 6 neighbors are chosen such that each one is positioned to the left, right, top, bottom, front or back of each pixel (i.e., a simple attempt of a cubic lattice)
#
# r -> position vector (each line is the position of each pixel)
# R -> neighborhood ball around each pixel
# on_conic_surface_only -> if true, only links pixels that are on the conic shell of the tree
#
# returns:
# list of neighbors
# neigh[i] -> list of 6 "pixels" closest to i
def is_left_neigh(u,v):
# u and v are two vectors on the x,y plane
# u may be a list of vectors (one vector per row)
return numpy.dot(u,[-v[1],v[0]])>0.0 # # the vector [-v[1],v[0]] is the 90-deg CCW rotated version of v
def get_first_val_not_in_list(v,l): # auxiliary function
# returns first value in v that is not in l
if v.size == 0:
return None
n = len(v)
i = 0
while i < n:
if not (v[i] in l):
return v[i]
i+=1
if on_conic_surface_only:
# only adds 4 neighbors (top, bottom, left, right) that are outside of the cone defined by the estimated tree cone parameters
# cone equation (x**2 + y**2)/c**2 = (z-z0)**2
z0 = numpy.max(r[:,2]) # cone height above the z=0 plane
h = z0 + numpy.abs(numpy.min(r[:,2])) # cone total height
base_r = (numpy.max( (numpy.max(r[:,1]),numpy.max(r[:,0])) ) + numpy.abs(numpy.min( ( numpy.min(r[:,1]),numpy.min(r[:,0]) ) )))/2.0 # cone base radius
c = base_r / h # cone opening radius (defined by wolfram https://mathworld.wolfram.com/Cone.html )
#z_cone = lambda x,y,z0,c,s: z0+s*numpy.sqrt((x**2+y**2)/(c**2)) # s is the concavity of the cone: -1 turned down, +1 turned up
cone_r_sqr = lambda z,z0,c: (c*(z-z0))**2
outside_cone = (r[:,0]**2+r[:,1]**2) > cone_r_sqr(r[:,2],z0,c)
pixel_list = numpy.nonzero(outside_cone)[0]
r_out = r[outside_cone,:]
neigh = [ numpy.array([],dtype=int) for i in range(r.shape[0]) ]
for i,r0 in enumerate(r_out):
# a radius is not given, hence returns a crystalline-like cubic-like structure :P
pixel_list_sorted = numpy.argsort(numpy.linalg.norm(r_out-r0,axis=1)) # sorted by Euler distance to r0
rs = r_out[pixel_list_sorted,:] # list of positions from the closest to the farthest one to r0
local_neigh_list = [] # local neighbor list
x1_neigh = get_first_val_not_in_list(numpy.nonzero( is_left_neigh(rs[:,:2],r0[:2]) )[0],local_neigh_list) # gets first neighbor to the left that is not added yet
if x1_neigh:
local_neigh_list.append(x1_neigh)
x2_neigh = get_first_val_not_in_list(numpy.nonzero( numpy.logical_not(is_left_neigh(rs[:,:2],r0[:2])) )[0],local_neigh_list) # gets first neighbor to the right that is not added yet
if x2_neigh:
local_neigh_list.append(x2_neigh)
z1_neigh = get_first_val_not_in_list(numpy.nonzero(rs[:,2]<r0[2])[0],local_neigh_list) # gets first neighbor to the top that is not added yet
if z1_neigh:
local_neigh_list.append(z1_neigh)
z2_neigh = get_first_val_not_in_list(numpy.nonzero(rs[:,2]>r0[2])[0],local_neigh_list) # gets first neighbor to the bottom that is not added yet
if z2_neigh:
local_neigh_list.append(z2_neigh)
neigh[pixel_list[i]] = pixel_list[pixel_list_sorted[local_neigh_list]] # adds neighbors
return neigh
neigh = []
for r0 in r:
if (R>0.0): # a neighborhood radius is given
neigh.append(numpy.nonzero(numpy.linalg.norm(r-r0,axis=1)<R)[0])
else:
# a radius is not given, hence returns a crystalline-like cubic-like structure :P
pixel_list_sorted = numpy.argsort(numpy.linalg.norm(r-r0,axis=1)) # sorted by Euler distance to r0
rs = r[pixel_list_sorted,:] # list of positions from the closest to the farthest one to r0
local_neigh_list = [] # local neighbor list
x1_neigh = get_first_val_not_in_list(numpy.nonzero(rs[:,0]<r0[0])[0],local_neigh_list) # gets first neighbor to the left that is not added yet
if x1_neigh:
local_neigh_list.append(x1_neigh)
x2_neigh = get_first_val_not_in_list(numpy.nonzero(rs[:,0]>r0[0])[0],local_neigh_list) # gets first neighbor to the right that is not added yet
if x2_neigh:
local_neigh_list.append(x2_neigh)
y1_neigh = get_first_val_not_in_list(numpy.nonzero(rs[:,1]<r0[1])[0],local_neigh_list) # gets first neighbor to the back that is not added yet
if y1_neigh:
local_neigh_list.append(y1_neigh)
y2_neigh = get_first_val_not_in_list(numpy.nonzero(rs[:,1]>r0[1])[0],local_neigh_list) # gets first neighbor to the front that is not added yet
if y2_neigh:
local_neigh_list.append(y2_neigh)
z1_neigh = get_first_val_not_in_list(numpy.nonzero(rs[:,2]<r0[2])[0],local_neigh_list) # gets first neighbor to the top that is not added yet
if z1_neigh:
local_neigh_list.append(z1_neigh)
z2_neigh = get_first_val_not_in_list(numpy.nonzero(rs[:,2]>r0[2])[0],local_neigh_list) # gets first neighbor to the bottom that is not added yet
if z2_neigh:
local_neigh_list.append(z2_neigh)
neigh.append(pixel_list_sorted[local_neigh_list]) # adds neighbors
return neigh
def build_network(r_nodes,R=0.0,conic_surface_only=False):
# r_nodes vector of coordinates of each pixel
# R connection radius
# if R is zero, generates an attempt of a cubic-like lattice, otherwise connects all pixels within a radius R of each other
neigh = generate_list_of_neighbors(r_nodes,R,on_conic_surface_only=conic_surface_only)
# creates the interaction lists between dynamic variables
input_list,presyn_neuron_list = create_input_lists(neigh)
# creates dynamic variables
N = len(neigh) # number of neurons (or pixels)
Nsyn = len(presyn_neuron_list)
V = numpy.zeros((N,3)) # membrane potential (dynamic variables) of each neuron (pixel)
S = numpy.zeros((Nsyn,2)) # synaptic current input generated by each pixel towards each of its postsynaptic pixels
return V,S,input_list,presyn_neuron_list
def get_neuron_resting_state(neuron_map_iter,par,T=20000):
V = -0.9*numpy.ones((1,3))
t = 0
while t<T:
V = neuron_map_iter(0,V,par,[],0.0)
t+=1
return V
def set_initial_condition(V,neuron_map_iter,parNeuron,V0_type=None):
if type(V0_type) is type(None):
V0 = get_neuron_resting_state(neuron_map_iter,parNeuron)
else:
if type(V0_type) is str:
if V0_type == 'rest':
V0 = get_neuron_resting_state(neuron_map_iter,parNeuron)
elif V0_type == 'random':
V = 2.0*numpy.random.random_sample(V.shape)-1.0
return V
else:
raise ValueError('V0_type must be either an array or list with 3 elements or one of the following: rest, random')
elif type(V0_type) is list:
V0 = numpy.asarray(V0_type)
else:
if type(V0_type) is numpy.ndarray:
V0 = V0_type
else:
raise ValueError('V0_type must be either an array or list with 3 elements or one of the following: rest, random')
i = 0
while i < V.shape[0]:
V[i,:] = V0.copy()
i+=1
return V
def synapse_map(i,S,par,Vpre):
# par[0] -> J, par[1] -> noise amplitude, par[2] -> 1/tau_f, par[3] -> 1/tau_g
thetaJ = par[0] + (par[1] * numpy.random.random()) if Vpre > 0.0 else 0.0
S[i,0] = (1.0 - par[2]) * S[i,0] + S[i,1]
S[i,1] = (1.0 - par[3]) * S[i,1] + thetaJ
return S
def logistic_func(u):
return u / (1 + (u if u > 0.0 else -u)) # u/(1+|u|)
def neuron_map_log(i,V,par,S,Iext):
# par[0] -> K, par[1] -> 1/T, par[2] -> d, par[3] -> l, par[4] -> xR
Vprev = V[i,0]
V[i,0] = logistic_func((V[i,0] - par[0] * V[i,1] + V[i,2] + numpy.sum(S)+Iext)*par[1])
V[i,1] = Vprev
V[i,2] = (1.0 - par[2]) * V[i,2] - par[3] * (Vprev - par[4])
return V
def neuron_map_tanh(i,V,par,S,Iext):
# par[0] -> K, par[1] -> 1/T, par[2] -> d, par[3] -> l, par[4] -> xR
Vprev = V[i,0]
V[i,0] = numpy.tanh((V[i,0] - par[0] * V[i,1] + V[i,2] + numpy.sum(S)+Iext)*par[1])
V[i,1] = Vprev
V[i,2] = (1.0 - par[2]) * V[i,2] - par[3] * (Vprev - par[4])
return V
def network_time_step(neuron_map_iter,V,parNeuron,input_list,S,presyn_neuron_list,parSyn,P_poisson):
# neuron_map_iter -> function that iterates the neuron (either neuron_map_log or neuron_map_tanh)
# V -> numpy.ndarray with shape (N,3), where N is the number of neurons (pixels), containing the membrane potential of neurons
# parNeuron -> list of six parameters that is passed to the neuron_map_iter
# input_list -> list of neighbors of each pixel, such that element i: list of neighbors (rows of S) that send input to pixel i
# S -> numpy.ndarray with shape (Nsyn,2), where Nsyn is the total number of synapses (connections between pixels), containg the synaptic current of each connection
# presyn_neuron_list -> list of presynaptic neurons (i.e. rows of V) that generates each synapse, such that pixel given by presyn_neuron_list[i] generates synapse S[i,:]
# parSyn -> list of 4 parameters that is passed to synapse_map function
# P_poisson -> probability of generating a random activation of a random pixel
if numpy.random.random() < P_poisson:
k = | numpy.random.randint(V.shape[0]) | numpy.random.randint |
'''
Consumption-saving model with durable good
<NAME>
'''
import numpy as np
from HARK.ConsumptionSaving.ConsGenIncProcessModel import MargValueFunc2D
from HARK.ConsumptionSaving.ConsIndShockModel import IndShockConsumerType, ConsumerSolution
import HARK.distribution
from HARK.utilities import CRRAutility, CRRAutilityP, CRRAutilityP_inv, CRRAutility_inv
from HARK.interpolation import Curvilinear2DInterp
class BabyDurableConsumerType(IndShockConsumerType):
time_inv = IndShockConsumerType.time_inv_ + ['DurCost0', 'DurCost1']
def _init_(self, cycles=1, verbose=1, quiet=False, **kwds):
IndShockConsumerType.__init__(cycles=cycles, verbose=verbose, quiet=quiet, **kwds)
self.solveOnePeriod = solveBabyDurable
def update(self):
IndShockConsumerType.update(self)
self.updateDeprFacDstn()
self.updateDNrmGrid()
self.updateShockDstn()
def updateDeprFacDstn(self):
bot = self.DeprFacMean - 0.5 * self.DeprFacSpread
top = self.DeprFacMean + 0.5 * self.DeprFacSpread
N = self.DerpFacCount
Uni_DeprFacDstn = HARK.distribution.Uniform(bot, top)
self.DeprFacDstn = Uni_DeprFacDstn.approx(N)
self.addToTimeInv('DeprFacDstn')
def updateDNrmGrid(self):
self.DNrmGrid = np.linspace(self.DNrmMin, self.DNrMax, self.DnrmCount)
self.addToTimeInv('DNrmGrid')
def updateShockDstn(self):
self.ShockDstn = list()
for j in range(0, self.T_age):
self.ShockDstn[j] = HARK.distribution.combineIndepDstns(self.IncomeDstn[j], self.DeprFacDstn)
self.addToTimeVary('ShockDstn')
def solveBabyDurable(self, solution_next, ShockDstn, LivPrb, DiscFac, CRRA, Rfree, PermGroFac, aXtraGrid, DNrmGrid,
alpha, kappa0, kappa):
'''
u: utility
uPC marginal utility with respect to C
uPD marginal utility with respect to D
uinv_C inverse utility with respect to C
uinv_D inverse utility with respect to D
uPinv_C inverse marginal utility with respect to C
uPinv_D inverse marginal utility with respect to D
'''
u = lambda C, D: CRRAutility(C, CRRA) * CRRAutility((D / C) ** alpha, CRRA)
uPC = lambda C, D: CRRAutility(D ** alpha, CRRA) * (1 - alpha) * C ** (-alpha - CRRA + alpha * CRRA)
uPD = lambda C, D: CRRAutility(C ** (1 - alpha), CRRA) * alpha * D ** (alpha - alpha * CRRA - 1)
uinv_C = lambda u, D: (CRRAutility_inv(u, CRRA) * D ** (-alpha)) ** (1 / (1 - alpha))
uinv_D = lambda u, C: (CRRAutility_inv(u, CRRA) * C ** (alpha - 1)) ** (1 / alpha)
uPinv_C = lambda uPC, D: ((1 - CRRA) * uPC / ((1 - alpha) * D ** (alpha - alpha * CRRA))) ** (-1 / (alpha + CRRA + alpha * CRRA))
uPinv_D = lambda uPD, C: ((1 - CRRA) * uPD / (alpha * C ** ((1 - CRRA)(1 - alpha))) ** (1 / (alpha - alpha * CRRA - 1)))
gfunc = lambda n: kappa0 * n ** kappa
'''
2. unpack IncomeDstn into its component arrays: probabilities, transitory shocks
permannent shock and depreciation shocks
ShockProbs: shock probability
PermshockVals: permanment shock
TranshockVals: transitory shock
DeprshockVals: depreciation shock
'''
ShockProbs = ShockDstn.pmf
PermshockVals = ShockDstn.X[0]
TranshockVals = ShockDstn.X[1]
DeprshockVals = ShockDstn.X[2]
'''
3. Make tiled versions of the probability and shock vectors
these should have shape (aXtraGrid.size, DNrmGrid.size,ShockProbs.size)
'''
aXtraGrid_size = aXtraGrid.size
DNrmGrid_size = DNrmGrid.size
ShockProbs_size = ShockProbs.size
PermShk = np.tile(PermshockVals, aXtraGrid_size).transpose()
TranShk = np.tile(TranshockVals, aXtraGrid_size).transpose()
DeprShk = np.tile(DeprshockVals, DNrmGrid_size).transpose()
ShkProbs = np.tile(ShockProbs, ShockProbs_size).transpose()
'''
4. Make tiled version of DNrmGrid that has the same shape as the tiled shock arrays.
Also make a tiled version of aXtraGrid with the same shape
aNrm: a_t
dNrm: D_t
'''
aNrmNow = np.asarray(aXtraGrid)
TranShkCount = TranShk.size
aNrm = np.tile(aNrmNow, (TranShkCount, 1))
dNrmNow = np.asarray(DNrmGrid)
DeprShkCount = DeprShk.size
dNrm = np.tile(dNrmNow, (DeprShkCount, 1))
'''
5.Using the intertemporal transition equations, make arrays named mNrmNextArray
and dNrmNextArray representing realizations of m_t+1 and d_t+1 from each end-of-period state
when each different shock combination arrives
'''
mNrmNextArray = Rfree / (PermGroFac * PermShk) * aNrm + TranShk
dNrmNextArray = (1 - DeprShk) * dNrm / (PermGroFac * PermShk)
'''
6.Make arrays called dvdmNextArray and dvddNextArray by evaluating next period' marginal value functions
at the future state realizations arrays. These functions can be found in solution_next.dvdmFunc and
solution_next.dvddFunc
'''
dvdmNextArray = solution_next.dvdmFunc
dvddNextArray = solution_next.dvddFunc
'''
7. Calculate end-of-period expected marginal value function, along with the tiled arrays that have been
constructed. You will probably want to multiply by the shock probabilities and sum along axis=2. These
arrays should be called EndOfPrddvada and EndOfPrddvdD.
'''
EndOfPrddvda = Rfree * DiscFac * LivPrb * (PermGroFac * PermShk) ** (-CRRA) * np.sum(
dvdmNextArray(mNrmNextArray, dNrmNextArray) * ShkProbs, axis=2)
EndOfPrddvdD = Rfree * DiscFac * LivPrb * (1 - DeprShk) * (PermGroFac * PermShk) ** (-CRRA) * np.sum(
dvddNextArray(mNrmNextArray, dNrmNextArray) * ShkProbs, axis=2)
'''
8. Algebraically solve FOC-c for c_t, and use EndOfPrddvada to calculate c_t for each end-of-period
state (a_t, D_t). Call the result cNrmArray
'''
cNrmArray = (EndOfPrddvda * dNrm ** (alpha * CRRA - alpha) / (1 - alpha)) ** (1 / ((1 - alpha)(1 - CRRA) - 1))
'''
9.Algebraically solve FOC-n for n_t, and use EndOfPrddvada, EndOfPrddvdD, and cNrmArray to calculate
n_t for each end-of-period state (a_t, D_t). Call the result nNrmArray
'''
nNrmArray = ((alpha * cNrmArray ** ((1 - alpha) * (1 - CRRA)) * dNrm ** (alpha - CRRA * alpha - 1) +
EndOfPrddvdD) / (EndOfPrddvda * kappa * kappa0)) ** (1 / (kappa - 1))
'''
10. Use the inverted intra-period transition equations to calculate the associate endogenous
gridpoints (m_t, d_t), calling the resulting arrays mNrmArray and dNrmArray
'''
mNrmArray = aNrm + cNrmArray + gfunc(nNrmArray)
dNrmArray = dNrm - nNrmArray
'''
11. The envelope condition for m_t does not take its standard form here.
Instead, make an array called dvNvrsdmArray that equals the inverse marginal utility function
evaluated at EndOfPrddvda
Envelope condition: v'(m) = u'(c(m))
'''
dvNvrsdmArray = (EndOfPrddvda * dNrm ** (CRRA - 1) / (1 - alpha)) ** (1 / (-alpha - CRRA + alpha * CRRA)) + aNrm + gfunc(nNrmArray)
'''
12.The envelope condition for d_t: v_d(m_t, d_t) =u_d(c_t, D_t) + EndOfPrddvdD
Run the array through the inverse marginal utility function, calling dvdNvrsddArray
'''
dvNvrsddArray = uPD(dvNvrsdmArray, dNrm) + EndOfPrddvdD
'''
13 Concatenate a column of zeros of length DNrmGrid.size onto the left side of mNrmArray,
cNrrmArray, and nNrmArray, and a column of DNrmGrid onto dNrmArray
'''
mNrmArray = np.insert(mNrmArray, 0, 0, 0.0, axis=1)
cNrmArray = | np.insert(cNrmArray, 0, 0.0, axis=1) | numpy.insert |
import numpy as np
import scipy.misc
import scipy.ndimage
import scipy.stats
import scipy.io
from vmaf.config import VmafConfig
from vmaf.tools.misc import index_and_value_of_min
__copyright__ = "Copyright 2016-2017, Netflix, Inc."
__license__ = "Apache, Version 2.0"
def _gauss_window(lw, sigma):
sd = float(sigma)
lw = int(lw)
weights = [0.0] * (2 * lw + 1)
weights[lw] = 1.0
sum = 1.0
sd *= sd
for ii in range(1, lw + 1):
tmp = np.exp(-0.5 * float(ii * ii) / sd)
weights[lw + ii] = tmp
weights[lw - ii] = tmp
sum += 2.0 * tmp
for ii in range(2 * lw + 1):
weights[ii] /= sum
return weights
def _hp_image(image):
extend_mode = 'reflect'
image = np.array(image).astype(np.float32)
w, h = image.shape
mu_image = | np.zeros((w, h)) | numpy.zeros |
"""
Reads either pickle or mat files and plots the results.
-- <EMAIL>
-- <EMAIL>
Usage:
python plotting.py --filelist <file containing list of pickle or mat file paths>
python plotting.py --file <pickle or mat file path>
"""
from __future__ import division
# pylint: disable=invalid-name
# pylint: disable=redefined-builtin
# pylint: disable=too-many-locals
import os
import pickle
import argparse
import warnings
import matplotlib.pyplot as plt
import matplotlib
from scipy.io import loadmat
import numpy as np
matplotlib.rcParams['mathtext.fontset'] = 'custom'
matplotlib.rcParams['mathtext.rm'] = 'Bitstream Vera Sans'
matplotlib.rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic'
matplotlib.rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold'
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
def rgba(red, green, blue, a):
'''rgba: generates matplotlib compatible rgba values from html-style rgba values
'''
return (red / 255.0, green / 255.0, blue / 255.0, a)
def hex(hexstring):
'''hex: generates matplotlib-compatible rgba values from html-style hex colors
'''
if hexstring[0] == '#':
hexstring = hexstring[1:]
red = int(hexstring[:2], 16)
green = int(hexstring[2:4], 16)
blue = int(hexstring[4:], 16)
return rgba(red, green, blue, 1.0)
def transparent(red, green, blue, _, opacity=0.5):
'''transparent: converts a rgba color to a transparent opacity
'''
return (red, green, blue, opacity)
def read_results(file_path):
"""reads experiment result data from a '.m' file
:file_path: the path to the file
:returns: a dataframe object with all the various pieces of data
"""
if file_path.endswith('.mat'):
results = loadmat(file_path)
elif file_path.endswith('.p'):
with open(file_path, 'rb') as pickleF:
res = pickle.load(pickleF)
pickleF.close()
results = {}
for key in list(res.keys()):
if not hasattr(res[key], '__len__'):
results[key] = np.array(res[key])
elif isinstance(res[key], str):
results[key] = np.array(res[key])
elif isinstance(res[key], list):
results[key] = np.array(res[key])
elif isinstance(res[key], np.ndarray):
val = np.zeros(res[key].shape, dtype=res[key].dtype)
for idx, x in np.ndenumerate(res[key]):
if isinstance(x, list):
val[idx] = np.array(x)
else:
val[idx] = x
results[key] = val
else:
results[key] = res[key]
else:
raise ValueError('Wrong file format. It has to be either mat or pickle file')
return results
def get_plot_info(
meth_curr_opt_vals,
cum_costs,
meth_costs,
grid_pts,
outlier_frac,
init_opt_vals
):
"""generates means and standard deviation for the method's output
"""
num_experiments = len(meth_curr_opt_vals)
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=FutureWarning)
idx = np.where(meth_curr_opt_vals == '-')
if idx[0].size != 0:
num_experiments = idx[0][0]
outlier_low_idx = max( | np.round(outlier_frac * num_experiments) | numpy.round |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import astropy.units as u
from p_winds import hydrogen, helium, tools, parker
# HD 209458 b
R_pl = (1.39 * u.jupiterRad).value
M_pl = (0.73 * u.jupiterMass).value
m_dot = (5E10 * u.g / u.s).value
T_0 = (9E3 * u.K).value
h_fraction = 0.90
he_fraction = 1 - h_fraction
he_h_fraction = he_fraction / h_fraction
average_f_ion = 0.0
average_mu = (1 + 4 * he_h_fraction) / (1 + he_h_fraction + average_f_ion)
# In the initial state, the fraction of singlet and triplet helium is 1E-6, and
# the optical depths are null
initial_state = np.array([1.0, 0.0])
r = np.logspace(0, np.log10(20), 100) # Radius in unit of planetary radii
data_test_url = 'https://raw.githubusercontent.com/ladsantos/p-winds/main/data/solar_spectrum_scaled_lambda.dat'
# Let's test if the code is producing reasonable outputs. The ``ion_fraction()``
# function for HD 209458 b should produce a profile with an ion fraction of
# approximately one near the planetary surface, and approximately 4E-4 in the
# outer layers.
def test_population_fraction_spectrum():
units = {'wavelength': u.angstrom, 'flux': u.erg / u.s / u.cm ** 2 /
u.angstrom}
spectrum = tools.make_spectrum_from_file(data_test_url, units)
# First calculate the hydrogen ion fraction
f_r, mu_bar = hydrogen.ion_fraction(r, R_pl, T_0, h_fraction, m_dot, M_pl,
average_mu, spectrum_at_planet=spectrum,
relax_solution=True, exact_phi=True,
return_mu=True)
# Calculate the structure
vs = parker.sound_speed(T_0, mu_bar) # Speed of sound (km/s, assumed to be
# constant)
rs = parker.radius_sonic_point(M_pl,
vs) # Radius at the sonic point (jupiterRad)
rhos = parker.density_sonic_point(m_dot, rs,
vs) # Density at the sonic point (g/cm^3)
# Some useful arrays for the modeling
r_array = r * R_pl / rs # Radius in unit of radius at
# sonic point
v_array, rho_array = parker.structure(r_array)
# Now calculate the population of helium
f_he_1_odeint, f_he_3_odeint = helium.population_fraction(
r, v_array, rho_array, f_r,
R_pl, T_0, h_fraction, vs, rs, rhos, spectrum_at_planet=spectrum,
initial_state=initial_state, relax_solution=True)
# Assert if all values of the fractions are between 0 and 1
n_neg = len(np.where(f_he_1_odeint < 0)[0]) + \
len( | np.where(f_he_3_odeint < 0) | numpy.where |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 19:51:51 2020
@author: Philipe_Leal
"""
import pandas as pd
import numpy as np
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import os, sys
import matplotlib.ticker as mticker
pd.set_option('display.max_rows', 5000)
pd.set_option('display.max_columns', 5000)
import xarray as xr
def get_k_nearest_values_from_vector(arr, k):
idx = np.argsort(arr)
if k<0:
return arr, idx
if k>0:
return arr[idx[:k]], idx
def apply_kernel_over_distance_array(dd, p):
dd = dd**(-p) # dd = weight = 1.0 / (dd**power)
dd = dd/np.sum(dd) # normalized weights
return dd
def get_interpolatedValue(xd,yd,Vd, xpp,ypp, p,smoothing, k=4):
dx = xpp - xd; dy = ypp - yd
dd = np.sqrt(dx**p + dy**p + smoothing**p) # distance
dd = np.where(np.abs(dd) <10**(-3), 10**(-3), dd) # limit too small distances
dd_Nearest, idx = get_k_nearest_values_from_vector(dd, k) # getting k nearest
dd_Nearest = apply_kernel_over_distance_array(dd_Nearest, p)
Vd[np.isnan(Vd)] = 0 # Setting nans to zero
if k>0:
K_nearest_Values = Vd[idx[:k]]
else:
K_nearest_Values = Vd
print('dd_Nearest shape', dd_Nearest.shape)
print('K_nearest_Values shape', K_nearest_Values.shape)
Vi = np.dot(dd_Nearest,K_nearest_Values) # interpolated value = scalar product <weight, value>
return Vi
def interpolateDistInvers(xd,yd,Vd, xp,yp, power,smoothing, k):
nx = len(xp)
VI = np.zeros_like(xp) # initialize the output with zero
for i in range(nx): # run through the output grid
VI[i] = get_interpolatedValue(xd,yd,Vd, xp[i],yp[i], power, smoothing, k)
return VI.T
def run_InvDist_interpolation(output_grid,
input_grid,
input_grid_values,
power=2.0,
smoothing=0.1,
k=4):
xp, yp = output_grid.T
xs, ys = input_grid.T
#---- run through the grid and get the interpolated value at each point
Vp = interpolateDistInvers(xs,ys,input_grid_values, xp,yp, power,smoothing, k)
return Vp
def xr_to_2D_array(da, xcor='xc', ycor='yc'):
data = da.data.ravel()
xcor = da.coords[xcor].data.ravel()
ycor = da.coords[ycor].data.ravel()
print(xcor.shape, ycor.shape, data.shape)
input_grid = np.stack([xcor, ycor], axis=1)
return input_grid, data
def create_output_grid(xmin, xmax, xres, ymin, ymax, yres):
Xcoords = | np.arange(xmin,xmax, xres) | numpy.arange |
import numpy as np
import cv2
import warnings
warnings.filterwarnings('ignore')
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import os
import scipy
import imageio
from scipy.ndimage import gaussian_filter1d, gaussian_filter
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from matplotlib.colors import ListedColormap
import statsmodels.api as sm
import pandas as pd
from statsmodels.stats.anova import AnovaRM
from sklearn import linear_model
from helper_code.registration_funcs import model_arena, get_arena_details
from helper_code.processing_funcs import speed_colors
from helper_code.analysis_funcs import *
from important_code.shuffle_test import permutation_test, permutation_correlation
plt.rcParams.update({'font.size': 30})
def plot_traversals(self):
''' plot all traversals across the arena '''
# initialize parameters
sides = ['back', 'front']
# sides = ['back']
types = ['spontaneous'] #, 'evoked']
fast_color = np.array([.5, 1, .5])
slow_color = np.array([1, .9, .9])
edge_vector_color = np.array([1, .95, .85])
homing_vector_color = np.array([.725, .725, .725])
edge_vector_color = np.array([.98, .9, .6])**4
homing_vector_color = np.array([0, 0, 0])
non_escape_color = np.array([0,0,0])
condition_colors = [[.5,.5,.5], [.3,.5,.8], [0,.7,1]]
time_thresh = 15 #20 for ev comparison
speed_thresh = 2
p = 0
HV_cutoff = .681 # .5 for exploratory analysis
# initialize figures
fig, fig2, fig3, ax, ax2, ax3 = initialize_figures_traversals(self) #, types = len(types)+1)
# initialize lists for stats
all_data = []
all_conditions = []
edge_vector_time_all = np.array([])
# loop over spontaneous vs evoked
for t, type in enumerate(types):
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
strategies = [0, 0, 0]
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# initialize the arena
arena, arena_color, scaling_factor, obstacle = initialize_arena(self, sub_experiments, sub_conditions)
path_ax, path_fig = get_arena_plot(obstacle, sub_conditions, sub_experiments)
# initialize edginess
all_traversals_edgy = {}
all_traversals_homy = {}
proportion_edgy = {}
for s in sides:
all_traversals_edgy[s] = []
all_traversals_homy[s] = []
proportion_edgy[s] = []
m = 0
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
# loop over each mouse in the experiment
for i, mouse in enumerate(self.analysis[experiment][condition]['back traversal']):
mouse_data = []
print(mouse)
# loop over back and front sides
for s, start in enumerate(sides):
if start == 'front' and type == 'evoked': continue
# find all the paths across the arena
traversal = self.analysis[experiment][condition][start + ' traversal'][mouse]
# get the duration of those paths
# duration = traversal[t*5+3]
if traversal:
if traversal[t*5]:
x_end_loc = np.array([x_loc[-1] * scaling_factor for x_loc in np.array(traversal[t * 5 + 0])[:, 0]])
if traversal[4] < 10: continue
number_of_edge_vectors = np.sum((np.array(traversal[t*5+3]) < speed_thresh) * \
(np.array(traversal[t*5+2]) > HV_cutoff) * \
# (abs(x_end_loc - 50) < 30) * \
(np.array(traversal[t*5+1]) < time_thresh*30*60) ) / min(traversal[4], time_thresh) * time_thresh
# print(traversal[4])
number_of_homing_vectors = np.sum((np.array(traversal[t*5+3]) < speed_thresh) * \
(np.array(traversal[t*5+2]) < HV_cutoff) * \
# (abs(x_end_loc - 50) < 30) * \
(np.array(traversal[t*5+1]) < time_thresh*30*60) )/ min(traversal[4], time_thresh) * time_thresh
all_traversals_edgy[start].append( number_of_edge_vectors )
all_traversals_homy[start].append(number_of_homing_vectors)
# print(number_of_edge_vectors)
mouse_data.append(number_of_edge_vectors)
# get the time of edge vectors
if condition == 'obstacle' and 'wall' in experiment:
edge_vector_idx = ( (np.array(traversal[t * 5 + 3]) < speed_thresh) * (np.array(traversal[t * 5 + 2]) > HV_cutoff) )
edge_vector_time = np.array(traversal[t*5+1])[edge_vector_idx] / 30 / 60
edge_vector_time_all = np.concatenate((edge_vector_time_all, edge_vector_time))
# prop_edgy = np.sum((np.array(traversal[t*5 + 3]) < speed_thresh) * \
# (np.array(traversal[t*5 + 2]) > HV_cutoff) * \
# (np.array(traversal[t * 5 + 1]) < time_thresh * 30 * 60)) / \
# np.sum((np.array(traversal[t * 5 + 3]) < speed_thresh) * \
# (np.array(traversal[t * 5 + 1]) < time_thresh * 30 * 60))
else:
all_traversals_edgy[start].append(0)
all_traversals_homy[start].append(0)
# if np.isnan(prop_edgy): prop_edgy = .5
# prop_edgy = prop_edgy / .35738
# proportion_edgy[start].append(prop_edgy)
traversal_coords = np.array(traversal[t*5+0])
pre_traversal = np.array(traversal[10])
else:
# all_traversals_edginess[start].append(0)
continue
m += .5
# loop over all paths
show = False
if show and traversal:
for trial in range(traversal_coords.shape[0]):
# make sure it qualifies
if traversal[t * 5 + 3][trial] > speed_thresh: continue
if traversal[t*5+1][trial] > time_thresh*30*60: continue
if not len(pre_traversal[0][0]): continue
# if abs(traversal_coords[trial][0][-1]*scaling_factor - 50) > 30: continue
# downsample to get even coverage
# if c == 2 and np.random.random() > (59 / 234): continue
# if c == 1 and np.random.random() > (59 / 94): continue
if traversal[t*5+2][trial]> HV_cutoff: plot_color = edge_vector_color
else: plot_color = homing_vector_color
display_traversal(scaling_factor, traversal_coords, pre_traversal, trial, path_ax, plot_color)
if mouse_data:
# all_data.append(mouse_data)
all_conditions.append(c)
# save image
path_fig.savefig(os.path.join(self.summary_plots_folder, self.labels[c] + ' traversals.eps'), format='eps', bbox_inches='tight', pad_inches=0)
# plot the data
if type == 'spontaneous' and len(sides) > 1:
plot_number_edgy = np.array(all_traversals_edgy['front']).astype(float) + np.array(all_traversals_edgy['back']).astype(float)
plot_number_homy = np.array(all_traversals_homy['front']).astype(float) + np.array(all_traversals_homy['back']).astype(float)
print(np.sum(plot_number_edgy + plot_number_homy))
# plot_proportion_edgy = (np.array(proportion_edgy['front']).astype(float) + np.array(proportion_edgy['back']).astype(float)) / 2
plot_proportion_edgy = plot_number_edgy / (plot_number_edgy + plot_number_homy)
all_data.append(plot_number_edgy)
else:
plot_number_edgy = np.array(all_traversals_edgy[sides[0]]).astype(float)
plot_number_homy = np.array(all_traversals_homy[sides[0]]).astype(float)
plot_proportion_edgy = plot_number_edgy / (plot_number_edgy + plot_number_homy)
# plot_proportion_edgy = np.array(proportion_edgy[sides[0]]).astype(float)
for i, (plot_data, ax0) in enumerate(zip([plot_number_edgy, plot_number_homy], [ax, ax3])): #, plot_proportion_edgy , ax2
print(plot_data)
print(np.sum(plot_data))
# plot each trial
# scatter_axis = scatter_the_axis( (p*4/3+.5/3), plot_data)
ax0.scatter(np.ones_like(plot_data)* (p*4/3+.5/3)* 3 - .2, plot_data, color=[0,0,0, .4], edgecolors='none', s=25, zorder=99)
# do kde
# if i==0: bw = .5
# else: bw = .02
bw = .5
kde = fit_kde(plot_data, bw=bw)
plot_kde(ax0, kde, plot_data, z=4 * p + .8, vertical=True, normto=.3, color=[.5, .5, .5], violin=False, clip=True)
ax0.plot([4 * p + -.2, 4 * p + -.2], [np.percentile(plot_data, 25), np.percentile(plot_data, 75)], color = [0,0,0])
ax0.plot([4 * p + -.4, 4 * p + -.0], [np.percentile(plot_data, 50), np.percentile(plot_data, 50)], color = [1,1,1], linewidth = 2)
# else:
# # kde = fit_kde(plot_data, bw=.03)
# # plot_kde(ax0, kde, plot_data, z=4 * p + .8, vertical=True, normto=1.2, color=[.5, .5, .5], violin=False, clip=True)
# bp = ax0.boxplot([plot_data, [0, 0]], positions=[4 * p + -.2, -10], showfliers=False, zorder=99)
# ax0.set_xlim([-1, 4 * len(self.experiments) - 1])
p+=1
# plot a stacked bar of strategies
# fig3 = plot_strategies(strategies, homing_vector_color, non_escape_color, edge_vector_color)
# fig3.savefig(os.path.join(self.summary_plots_folder, 'Traversal categories - ' + self.labels[c] + '.png'), format='png', bbox_inches = 'tight', pad_inches = 0)
# fig3.savefig(os.path.join(self.summary_plots_folder, 'Traversal categories - ' + self.labels[c] + '.eps'), format='eps', bbox_inches = 'tight', pad_inches = 0)
# make timing hist
plt.figure()
bins = np.arange(0,22.5,2.5)
plt.hist(edge_vector_time_all, bins = bins, color = [0,0,0], weights = np.ones_like(edge_vector_time_all) / 2.5 / m) #condition_colors[c])
plt.ylim([0,2.1])
plt.show()
# # save the plot
fig.savefig(os.path.join(self.summary_plots_folder, 'Traversal # EVS comparison.png'), format='png', bbox_inches='tight', pad_inches=0)
fig.savefig(os.path.join(self.summary_plots_folder, 'Traversal # EVS comparison.eps'), format='eps', bbox_inches='tight', pad_inches=0)
fig3.savefig(os.path.join(self.summary_plots_folder, 'Traversal # HVS comparison.png'), format='png', bbox_inches='tight', pad_inches=0)
fig3.savefig(os.path.join(self.summary_plots_folder, 'Traversal # HVS comparison.eps'), format='eps', bbox_inches='tight', pad_inches=0)
group_A = [[d] for d in all_data[0]]
group_B = [[d] for d in all_data[2]]
permutation_test(group_A, group_B, iterations = 100000, two_tailed = False)
group_A = [[d] for d in all_data[2]]
group_B = [[d] for d in all_data[1]]
permutation_test(group_A, group_B, iterations = 10000, two_tailed = True)
# fig2.savefig(os.path.join(self.summary_plots_folder, 'Traversal proportion edgy.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig2.savefig(os.path.join(self.summary_plots_folder, 'Traversal proportion edgy.eps'), format='eps', bbox_inches='tight', pad_inches=0)
plt.show()
def plot_speed_traces(self, speed = 'absolute'):
''' plot the speed traces '''
max_speed = 60
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# get the number of trials
number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
RT, end_idx, scaling_factor, speed_traces, subgoal_speed_traces, time, time_axis, trial_num = \
initialize_variables(number_of_trials, self,sub_experiments)
# create custom colormap
colormap = speed_colormap(scaling_factor, max_speed, n_bins=256, v_min=0, v_max=max_speed)
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
# loop over each mouse
for i, mouse in enumerate(self.analysis[experiment][condition]['speed']):
# control analysis
if self.analysis_options['control'] and not mouse=='control': continue
if not self.analysis_options['control'] and mouse=='control': continue
# loop over each trial
for trial in range(len(self.analysis[experiment][condition]['speed'][mouse])):
if trial > 2: continue
trial_num = fill_in_trial_data(RT, condition, end_idx, experiment, mouse, scaling_factor, self,
speed_traces, subgoal_speed_traces, time, trial, trial_num)
# print some useful metrics
print_metrics(RT, end_idx, number_of_mice, number_of_trials)
# put the speed traces on the plot
fig = show_speed_traces(colormap, condition, end_idx, experiment, number_of_trials, speed, speed_traces, subgoal_speed_traces, time_axis, max_speed)
# save the plot
fig.savefig(os.path.join(self.summary_plots_folder,'Speed traces - ' + self.labels[c] + '.png'), format='png', bbox_inches = 'tight', pad_inches = 0)
fig.savefig(os.path.join(self.summary_plots_folder,'Speed traces - ' + self.labels[c] + '.eps'), format='eps', bbox_inches = 'tight', pad_inches = 0)
plt.show()
print('done')
def plot_escape_paths(self):
''' plot the escape paths '''
# initialize parameters
edge_vector_color = [np.array([1, .95, .85]), np.array([.98, .9, .6])**4]
homing_vector_color = [ np.array([.725, .725, .725]), np.array([0, 0, 0])]
non_escape_color = np.array([0,0,0])
fps = 30
escape_duration = 18 #6 #9 for food # 18 for U
min_distance_to_shelter = 30
HV_cutoff = 0.681 #.75 #.7
# initialize all data for stats
all_data = [[], [], [], []]
all_conditions = []
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# initialize the arena
arena, arena_color, scaling_factor, obstacle = initialize_arena(self, sub_experiments, sub_conditions)
# more arena stuff for this analysis type
arena_reference = arena_color.copy()
arena_color[arena_reference == 245] = 255
get_arena_details(self, experiment=sub_experiments[0])
shelter_location = [s / scaling_factor / 10 for s in self.shelter_location]
# initialize strategy array
strategies = np.array([0,0,0])
path_ax, path_fig = get_arena_plot(obstacle, sub_conditions, sub_experiments)
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
if 'void' in experiment or 'dark' in experiment or ('off' in experiment and condition == 'no obstacle') or 'quick' in experiment:
escape_duration = 18
elif 'food' in experiment:
escape_duration = 9
else:
escape_duration = 12
# loop over each mouse
for i, mouse in enumerate(self.analysis[experiment][condition]['speed']):
print(mouse)
# control analysis
if self.analysis_options['control'] and not mouse=='control': continue
if not self.analysis_options['control'] and mouse=='control': continue
# color based on visual vs tactile obst avoidance
# if mouse == 'CA7190' or mouse == 'CA3210' or mouse == 'CA3155' or mouse == 'CA8100':
# edge_vector_color = [np.array([.6, .4, .99]),np.array([.6, .4, .99])]
# homing_vector_color = [np.array([.6, .4, .99]),np.array([.6, .4, .99])]
# else:
# edge_vector_color = [np.array([.8, .95, 0]),np.array([.8, .95, 0])]
# homing_vector_color = [np.array([.8, .95, 0]),np.array([.8, .95, 0])]
# show escape paths
show_escape_paths(HV_cutoff, arena, arena_color, arena_reference, c, condition, edge_vector_color, escape_duration, experiment, fps,
homing_vector_color, min_distance_to_shelter, mouse, non_escape_color, scaling_factor, self, shelter_location, strategies, path_ax,
determine_strategy = False) #('dark' in experiment and condition=='obstacle'))
# save image
# scipy.misc.imsave(os.path.join(self.summary_plots_folder, 'Escape paths - ' + self.labels[c] + '.png'), arena_color[:,:,::-1])
imageio.imwrite(os.path.join(self.summary_plots_folder, 'Escape paths - ' + self.labels[c] + '.png'), arena_color[:,:,::-1])
path_fig.savefig(os.path.join(self.summary_plots_folder, 'Escape plot - ' + self.labels[c] + '.png'), format='png', bbox_inches='tight', pad_inches=0)
path_fig.savefig(os.path.join(self.summary_plots_folder, 'Escape plot - ' + self.labels[c] + '.eps'), format='eps', bbox_inches='tight', pad_inches=0)
# plot a stacked bar of strategies
fig = plot_strategies(strategies, homing_vector_color, non_escape_color, edge_vector_color)
fig.savefig(os.path.join(self.summary_plots_folder, 'Escape categories - ' + self.labels[c] + '.png'), format='png', bbox_inches = 'tight', pad_inches = 0)
fig.savefig(os.path.join(self.summary_plots_folder, 'Escape categories - ' + self.labels[c] + '.eps'), format='eps', bbox_inches = 'tight', pad_inches = 0)
plt.show()
print('escape')
# strategies = np.array([4,5,0])
# fig = plot_strategies(strategies, homing_vector_color, non_escape_color, edge_vector_color)
# plt.show()
# fig.savefig(os.path.join(self.summary_plots_folder, 'Trajectory by previous edge-vectors 2.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig.savefig(os.path.join(self.summary_plots_folder, 'Trajectory by previous edge-vectors 2.eps'), format='eps', bbox_inches='tight', pad_inches=0)
# group_A = [[0],[1],[0,0,0],[0,0],[0,1],[1,0],[0,0,0]]
# group_B = [[1,0,0],[0,0,0,0],[0,0,0],[1,0,0],[0,0,0]]
# permutation_test(group_B, group_A, iterations = 10000, two_tailed = False)
obstacle = [[0],[1],[0,0,0],[0,0],[0,1],[1],[0,0,0], [1]]
# obstacle_exp = [[0,1],[0,0,0,0,1],[0,1],[0]]
open_field = [[1,0,0,0,0],[0,0,0,0,0],[0,0,0,0],[1,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0,0,0]]
# U_shaped = [[0,1],[1,1], [1,1], [0,0,1], [0,0,0], [0], [1], [0], [0,1], [0,1,0,0], [0,0,0]]
# permutation_test(open_field, obstacle, iterations = 10000, two_tailed = False)
# do same edgy homing then stop to both
obstacle = [[0],[1],[0,0,0],[0,0],[0,1],[1],[0,0,0], [1], [1], [0,0,0]]
open_field = [[1],[0,0,0],[0,0,0],[1,0,0],[0,0,0],[0,0,1]] #stop at 3 trials
# do same edgy homing then stop to both --> exclude non escapes
obstacle = [[0],[1],[0,0,0],[0],[0,1],[1],[0,0,0], [1], [1], [0,0,0]]
open_field = [[1],[0,0],[0,0,0],[1,0,0],[0,0,0],[0,1]] #stop at 3 trials
def plot_edginess(self):
# initialize parameters
fps = 30
escape_duration = 12 #9 #6
HV_cutoff = .681 #.681
ETD = 10 #10
traj_loc = 40
edge_vector_color = np.array([.98, .9, .6])**5
edge_vector_color = np.array([.99, .94, .6]) ** 3
# edge_vector_color = np.array([.99, .95, .6]) ** 5
homing_vector_color = np.array([0, 0, 0])
# homing_vector_color = np.array([.85, .65, .8])
# edge_vector_color = np.array([.65, .85, .7])
# colors for diff conditions
colors = [np.array([.7, 0, .3]), np.array([0, .8, .5])]
colors = [np.array([.3,.3,.3]), np.array([1, .2, 0]), np.array([0, .8, .4]), np.array([0, .7, .9])]
colors = [np.array([.3, .3, .3]), np.array([1, .2, 0]), np.array([.7, 0, .7]), np.array([0, .7, .9]), np.array([0,1,0])]
# colors = [np.array([0, 0, 0]), np.array([0, 0, 0]),np.array([0, 0, 0]), np.array([0, 0, 0])]
offset = [0,.2, .2, 0]
# initialize figures
fig, fig2, fig3, fig4, _, ax, ax2, ax3 = initialize_figures(self)
# initialize all data for stats
all_data = [[],[],[],[]]
all_conditions = []
mouse_ID = []; m = 1
dist_data_EV_other_all = []
delta_ICs, delta_x_end = [], []
time_to_shelter, was_escape = [], []
repetitions = 1
for rand_select in range(repetitions):
m = -1
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
num_trials_total = 0
num_trials_escape = 0
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# get the number of trials
number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
t_total = 0
# initialize array to fill in with each trial's data
edginess, end_idx, time_since_down, time_to_shelter, time_to_shelter_all, prev_edginess, scaling_factor, time_in_center, trial_num, _, _, dist_to_SH, dist_to_other_SH = \
initialize_variable_edginess(number_of_trials, self, sub_experiments)
mouse_ID_trial = edginess.copy()
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
if 'void' in experiment or 'dark' in experiment or ('off' in experiment and condition == 'no obstacle') or 'quick' in experiment:
escape_duration = 18
elif 'food' in experiment:
escape_duration = 12
else: escape_duration = 12
# elif 'up' in experiment and 'probe' in condition:
# escape_duration = 12
# loop over each mouse
for i, mouse in enumerate(self.analysis[experiment][condition]['start time']):
m+=1
# initialize mouse data for stats
mouse_data = [[],[],[],[]]
print(mouse)
skip_mouse = False
if self.analysis_options['control'] and not mouse=='control': continue
if not self.analysis_options['control'] and mouse=='control': continue
# loop over each trial
prev_homings = []
x_edges_used = []
t = 0
for trial in range(len(self.analysis[experiment][condition]['end time'][mouse])):
trial_num += 1
# impose conditions
if 'food' in experiment:
if t > 12: continue
if condition == 'no obstacle' and self.analysis[experiment][condition]['start time'][mouse][trial] < 20: continue
num_trials_total += 1
elif 'void' in experiment:
if t > 5: continue
else:
if t>2: continue
# if trial > 2: continue
num_trials_total += 1
# if trial!=2: continue
# if 'off' in experiment and trial: continue
# if trial < 3 and 'wall down' in experiment: continue
# if condition == 'obstacle' and not 'non' in experiment and \
# self.analysis[experiment][condition]['start time'][mouse][trial] < 20: continue
# if c == 0 and not (trial > 0): continue
# if c == 1 and not (trial): continue
# if c == 2 and not (trial == 0): continue
# if trial and ('lights on off' in experiment and not 'baseline' in experiment): continue
if 'Square' in experiment:
HV_cutoff = .56
HV_cutoff = 0
y_idx = self.analysis[experiment][condition]['path'][mouse][trial][1]
if y_idx[0] * scaling_factor > 50: continue
else:
# skip certain trials
y_start = self.analysis[experiment][condition]['path'][mouse][trial][1][0] * scaling_factor
x_start = self.analysis[experiment][condition]['path'][mouse][trial][0][0] * scaling_factor
# print(y_start)
# print(x_start)
if y_start > 25: continue
if abs(x_start-50) > 30: continue
end_idx[trial_num] = self.analysis[experiment][condition]['end time'][mouse][trial]
RT = self.analysis[experiment][condition]['RT'][mouse][trial]
if np.isnan(end_idx[trial_num]) or (end_idx[trial_num] > escape_duration * fps):
# if not ('up' in experiment and 'probe' in condition and not np.isnan(RT)):
# mouse_data[3].append(0)
continue
''' check for previous edgy homings '''
# if 'dark' in experiment or True:
# num_prev_edge_vectors, x_edge = get_num_edge_vectors(self, experiment, condition, mouse, trial)
# # print(num_prev_edge_vectors)
# if num_prev_edge_vectors and c: continue
# if not num_prev_edge_vectors and not c: continue
# if num_prev_edge_vectors < 3 and (c==0): continue
# if num_prev_edge_vectors > 0 and c < 4: continue
# if t>1 and c == 2: continue
# if num_prev_edge_vectors >= 2: print('prev edgy homing'); continue
# if x_edge in x_edges_used: print('prev edgy escape'); continue
#
# print('-----------' + mouse + '--------------')
#
# if self.analysis[experiment][condition]['edginess'][mouse][trial] <= HV_cutoff:
# print(' HV ')
# else:
# print(' EDGY ')
# # edgy trial has occurred
# print('EDGY TRIAL ' + str(trial))
# x_edges_used.append(x_edge)
#
# # select only *with* prev homings
# if not num_prev_edge_vectors:
# if not x_edge in x_edges_used:
# if self.analysis[experiment][condition]['edginess'][mouse][trial] > HV_cutoff:
# x_edges_used.append(x_edge)
# continue
# print(t)
num_trials_escape += 1
# add data
edginess[trial_num] = self.analysis[experiment][condition]['edginess'][mouse][trial]
time_since_down[trial_num] = np.sqrt((x_start - 50)**2 + (y_start - 50)**2 )# self.analysis[experiment][condition]['start angle'][mouse][trial]
print(edginess[trial_num])
if 'Square' in experiment:
if edginess[trial_num] <=-.3: # and False: #.15
edginess[trial_num] = np.nan
continue
# edginess to current edge as opposed to specific edge
if (('moves left' in experiment and condition == 'no obstacle') \
or ('moves right' in experiment and condition== 'obstacle')): # and False:
if edginess[trial_num] <= -0: # and False:
edginess[trial_num] = np.nan
continue
edginess[trial_num] = edginess[trial_num] - 1
# shelter edginess
if False:
y_pos = self.analysis[experiment][condition]['path'][mouse][trial][1][:int(end_idx[trial_num])] * scaling_factor
x_pos = self.analysis[experiment][condition]['path'][mouse][trial][0][:int(end_idx[trial_num])] * scaling_factor
# get the latter phase traj
y_pos_1 = 55
y_pos_2 = 65
x_pos_1 = x_pos[np.argmin(abs(y_pos - y_pos_1))]
x_pos_2 = x_pos[np.argmin(abs(y_pos - y_pos_2))]
#where does it end up
slope = (y_pos_2 - y_pos_1) / (x_pos_2 - x_pos_1)
intercept = y_pos_1 - x_pos_1 * slope
x_pos_proj = (80 - intercept) / slope
# compared to
x_pos_shelter_R = 40 #40.5 # defined as mean of null dist
# if 'long' in self.labels[c]:
# x_pos_shelter_R += 18
# compute the metric
shelter_edginess = (x_pos_proj - x_pos_shelter_R) / 18
edginess[trial_num] = -shelter_edginess
# if condition == 'obstacle' and 'left' in experiment:edginess[trial_num] = -edginess[trial_num] # for putting conditions together
# get previous edginess #TEMPORARY COMMENT
# if not t:
# SH_data = self.analysis[experiment][condition]['prev homings'][mouse][-1]
# time_to_shelter.append(np.array(SH_data[2]))
# was_escape.append(np.array(SH_data[4]))
if False: # or True:
time_to_shelter, SR = get_prev_edginess(ETD, condition, experiment, mouse, prev_edginess, dist_to_SH, dist_to_other_SH,
scaling_factor, self, traj_loc, trial, trial_num, edginess, delta_ICs, delta_x_end)
print(prev_edginess[trial_num])
print(trial + 1)
print('')
# get time in center
# time_in_center[trial_num] = self.analysis[experiment][condition]['time exploring obstacle'][mouse][trial]
# time_in_center[trial_num] = num_PORHVs
# if num_PORHVs <= 1:
# edginess[trial_num] = np.nan
# continue
# if (prev_edginess[trial_num] < HV_cutoff and not t) or skip_mouse:
# edginess[trial_num] = np.nan
# skip_mouse = True
# continue
''' qualify by prev homings '''
# if prev_edginess[trial_num] < .4: # and c:
# edginess[trial_num] = np.nan
# prev_edginess[trial_num] = np.nan
# continue
num_prev_edge_vectors, x_edge = get_num_edge_vectors(self, experiment, condition, mouse, trial, ETD = 10)
# print(str(num_prev_edge_vectors) + ' EVs')
#
# if not num_prev_edge_vectors >= 1 and c ==0:
# edginess[trial_num] = np.nan
# t+=1
# continue
# if not num_prev_edge_vectors < 1 and c ==1:
# edginess[trial_num] = np.nan
# t+=1
# continue
# print(num_prev_edge_vectors)
# if num_prev_edge_vectors !=0 and c==3:
# edginess[trial_num] = np.nan
# t+=1
# continue
# if num_prev_edge_vectors != 1 and c == 2:
# edginess[trial_num] = np.nan
# t += 1
# continue
# if num_prev_edge_vectors != 2 and num_prev_edge_vectors != 3 and c ==1:
# edginess[trial_num] = np.nan
# t += 1
# continue
#
# if num_prev_edge_vectors < 4 and c ==0:
# edginess[trial_num] = np.nan
# t += 1
# continue
#
# print(trial + 1)
# print(prev_edginess[trial_num])
# print(edginess[trial_num])
# print('')
# print(t)
# get time since obstacle removal?
# time_since_down[trial_num] = self.analysis[experiment][condition]['start time'][mouse][trial] - self.analysis[experiment]['probe']['start time'][mouse][0]
# add data for stats
mouse_data[0].append(int(edginess[trial_num] > HV_cutoff))
mouse_data[1].append(edginess[trial_num])
mouse_data[2].append(prev_edginess[trial_num])
mouse_data[3].append(self.analysis[experiment][condition]['start time'][mouse][trial] - self.analysis[experiment][condition]['start time'][mouse][0])
mouse_ID_trial[trial_num] = m
t += 1
t_total += 1
#append data for stats
if mouse_data[0]:
all_data[0].append(mouse_data[0])
all_data[1].append(mouse_data[1])
all_data[2].append(mouse_data[2])
all_data[3].append(mouse_data[3])
all_conditions.append(c)
mouse_ID.append(m); m+= 1
else:
print(mouse)
print('0 trials')
# get prev homings
time_to_shelter_all.append(time_to_shelter)
dist_data_EV_other_all = np.append(dist_data_EV_other_all, dist_to_other_SH[edginess > HV_cutoff])
# print(t_total)
''' plot edginess by condition '''
# get the data
# data = abs(edginess)
data = edginess
plot_data = data[~np.isnan(data)]
# print(np.percentile(plot_data, 25))
# print(np.percentile(plot_data, 50))
# print(np.percentile(plot_data, 75))
# print(np.mean(plot_data > HV_cutoff))
# plot each trial
scatter_axis = scatter_the_axis(c, plot_data)
ax.scatter(scatter_axis[plot_data>HV_cutoff], plot_data[plot_data>HV_cutoff], color=edge_vector_color[::-1], s=15, zorder = 99)
ax.scatter(scatter_axis[plot_data<=HV_cutoff], plot_data[plot_data<=HV_cutoff], color=homing_vector_color[::-1], s=15, zorder = 99)
bp = ax.boxplot([plot_data, [0,0]], positions = [3 * c - .2, -10], showfliers=False, zorder=99)
plt.setp(bp['boxes'], color=[.5,.5,.5], linewidth = 2)
plt.setp(bp['whiskers'], color=[.5,.5,.5], linewidth = 2)
plt.setp(bp['medians'], linewidth=2)
ax.set_xlim([-1, 3 * len(self.experiments) - 1])
# ax.set_ylim([-.1, 1.15])
ax.set_ylim([-.1, 1.3])
#do kde
try:
if 'Square' in experiment:
kde = fit_kde(plot_data, bw=.06)
plot_kde(ax, kde, plot_data, z=3*c + .3, vertical=True, normto=.8, color=[.5,.5,.5], violin=False, clip=False, cutoff = HV_cutoff+0.0000001, cutoff_colors = [homing_vector_color[::-1], edge_vector_color[::-1]])
ax.set_ylim([-1.5, 1.5])
else:
kde = fit_kde(plot_data, bw=.04)
plot_kde(ax, kde, plot_data, z=3*c + .3, vertical=True, normto=1.3, color=[.5,.5,.5], violin=False, clip=True, cutoff = HV_cutoff, cutoff_colors = [homing_vector_color[::-1], edge_vector_color[::-1]])
except: pass
# plot the polar plot or initial trajectories
# plt.figure(fig4.number)
fig4 = plt.figure(figsize=( 5, 5))
# ax4 = plt.subplot(1,len(self.experiments),len(self.experiments) - c, polar=True)
ax4 = plt.subplot(1, 1, 1, polar=True)
plt.axis('off')
ax.margins(0, 0)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
ax4.set_xlim([-np.pi / 2 - .1, 0])
# ax4.set_xlim([-np.pi - .1, 0])
mean_value_color = max(0, min(1, np.mean(plot_data)))
mean_value_color = np.sum(plot_data > HV_cutoff) / len(plot_data)
mean_value = np.mean(plot_data)
value_color = mean_value_color * edge_vector_color[::-1] + (1 - mean_value_color) * homing_vector_color[::-1]
ax4.arrow(mean_value + 3 * np.pi / 2, 0, 0, 1.9, color=[abs(v)**1 for v in value_color], alpha=1, width = 0.05, linewidth=2)
ax4.plot([0, 0 + 3 * np.pi / 2], [0, 2.25], color=[.5,.5,.5], alpha=1, linewidth=1, linestyle = '--')
ax4.plot([0, 1 + 3 * np.pi / 2], [0, 2.25], color=[.5,.5,.5], alpha=1, linewidth=1, linestyle = '--')
# ax4.plot([0, -1 + 3 * np.pi / 2], [0, 2.25], color=[.5, .5, .5], alpha=1, linewidth=1, linestyle='--')
scatter_axis_EV = scatter_the_axis_polar(plot_data[plot_data > HV_cutoff], 2.25, 0) #0.05
scatter_axis_HV = scatter_the_axis_polar(plot_data[plot_data <= HV_cutoff], 2.25, 0)
ax4.scatter(plot_data[plot_data > HV_cutoff] + 3 * np.pi/2, scatter_axis_EV, s = 30, color=edge_vector_color[::-1], alpha = .8, edgecolors = None)
ax4.scatter(plot_data[plot_data <= HV_cutoff] + 3 * np.pi/2, scatter_axis_HV, s = 30, color=homing_vector_color[::-1], alpha=.8, edgecolors = None)
fig4.savefig(os.path.join(self.summary_plots_folder, 'Angle comparison - ' + self.labels[c] + '.png'), format='png', transparent=True, bbox_inches='tight', pad_inches=0)
fig4.savefig(os.path.join(self.summary_plots_folder, 'Angle comparison - ' + self.labels[c] + '.eps'), format='eps', transparent=True, bbox_inches='tight', pad_inches=0)
# print(len(plot_data))
if len(plot_data) > 1 and False: # or True:
''' plot the correlation '''
# do both prev homings and time in center # np.array(time_since_down) # 'Time since removal'
for plot_data_corr, fig_corr, ax_corr, data_label in zip([prev_edginess, time_in_center], [fig2, fig3], [ax2, ax3], ['Prior homings','Exploration']): #
plot_data_corr = plot_data_corr[~np.isnan(data)]
# plot data
ax_corr.scatter(plot_data_corr, plot_data, color=colors[c], s=60, alpha=1, edgecolors=colors[c]/2, linewidth=1) #color=[.5, .5, .5] #edgecolors=[.2, .2, .2]
# do correlation
r, p = scipy.stats.pearsonr(plot_data_corr, plot_data)
print(r, p)
# do linear regression
plot_data_corr, prediction = do_linear_regression(plot_data, plot_data_corr)
# plot linear regresssion
ax_corr.plot(plot_data_corr, prediction['Pred'].values, color=colors[c], linewidth=1, linestyle='--', alpha=.7) #color=[.0, .0, .0]
ax_corr.fill_between(plot_data_corr, prediction['lower'].values, prediction['upper'].values, color=colors[c], alpha=.075) #color=[.2, .2, .2]
fig_corr.savefig(os.path.join(self.summary_plots_folder, 'Edginess by ' + data_label + ' - ' + self.labels[c] + '.png'), format='png')
fig_corr.savefig(os.path.join(self.summary_plots_folder, 'Edginess by ' + data_label + ' - ' + self.labels[c] + '.eps'), format='eps')
# test correlation and stats thru permutation test
# data_x = list(np.array(all_data[2])[np.array(all_conditions) == c])
# data_y = list(np.array(all_data[1])[np.array(all_conditions) == c])
# permutation_correlation(data_x, data_y, iterations=10000, two_tailed=False, pool_all = True)
print(num_trials_escape)
print(num_trials_total)
print(num_trials_escape / num_trials_total)
# save the plot
fig.savefig(os.path.join(self.summary_plots_folder, 'Edginess comparison.png'), format='png', bbox_inches='tight', pad_inches=0)
fig.savefig(os.path.join(self.summary_plots_folder, 'Edginess comparison.eps'), format='eps', bbox_inches='tight', pad_inches=0)
# fig5.savefig(os.path.join(self.summary_plots_folder, 'Angle dist comparison.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig5.savefig(os.path.join(self.summary_plots_folder, 'Angle dist comparison.eps'), format='eps', bbox_inches='tight', pad_inches=0)
plt.show()
time_to_shelter_all = np.concatenate(list(flatten(time_to_shelter_all))).astype(float)
np.percentile(time_to_shelter_all, 25)
np.percentile(time_to_shelter_all, 75)
group_A = list(np.array(all_data[0])[np.array(all_conditions) == 2])
group_B = list(np.array(all_data[0])[np.array(all_conditions) == 3])
permutation_test(group_A, group_B, iterations = 10000, two_tailed = False)
group_A = list(np.array(all_data[1])[(np.array(all_conditions) == 1) + (np.array(all_conditions) == 2)])
group_B = list(np.array(all_data[1])[np.array(all_conditions) == 3])
permutation_test(group_A, group_B, iterations = 10000, two_tailed = False)
import pandas
df = pandas.DataFrame(data={"mouse_id": mouse_ID, "condition": all_conditions, "x-data": all_data[2], "y-data": all_data[1]})
df.to_csv("./Foraging Path Types.csv", sep=',', index=False)
group_B = list(flatten(np.array(all_data[0])[np.array(all_conditions) == 1]))
np.sum(group_B) / len(group_B)
np.percentile(abs(time_since_down[edginess < HV_cutoff]), 50)
np.percentile(abs(time_since_down[edginess < HV_cutoff]), 25)
np.percentile(abs(time_since_down[edginess < HV_cutoff]), 75)
np.percentile(abs(time_since_down[edginess > HV_cutoff]), 50)
np.percentile(abs(time_since_down[edginess > HV_cutoff]), 25)
np.percentile(abs(time_since_down[edginess > HV_cutoff]), 75)
group_A = [[d] for d in abs(time_since_down[edginess > HV_cutoff])]
group_B = [[d] for d in abs(time_since_down[edginess < HV_cutoff])]
permutation_test(group_A, group_B, iterations=10000, two_tailed=True)
WE = np.concatenate(was_escape)
TTS_spont = np.concatenate(time_to_shelter)[~WE]
TTS_escape = np.concatenate(time_to_shelter)[WE]
trials = np.array(list(flatten(all_data[3])))
edgy = np.array(list(flatten(all_data[0])))
np.mean(edgy[trials == 0])
np.mean(edgy[trials == 1])
np.mean(edgy[trials == 2])
np.mean(edgy[trials == 3])
np.mean(edgy[trials == 4])
np.mean(edgy[trials == 5])
np.mean(edgy[trials == 6])
np.mean(edgy[trials == 7])
np.mean(edgy[trials == 8])
np.mean(edgy[trials == 9])
np.mean(edgy[trials == 10])
np.mean(edgy[trials == 11])
np.mean(edgy[trials == 12])
np.mean(edgy[trials == 13])
'''
TRADITIONAL METRICS
'''
def plot_metrics_by_strategy(self):
''' plot the escape paths '''
# initialize parameters
edge_vector_color = np.array([1, .95, .85])
homing_vector_color = np.array([.725, .725, .725])
non_escape_color = | np.array([0,0,0]) | numpy.array |
# -*- coding: utf-8 -*-
import ctypes as ct
import numpy as np
import numpy.ctypeslib as ctl
import time
import copy
from .estimator import Estimator
from .lib import NullableFloatArrayType
class EstimQueue_Results:
def __init__(self, param_names, sampleshape, estim, diag, crlb,
iterations, chisq, roipos, samples, ids):
self.estim = estim
self.sampleshape = sampleshape
self.diagnostics = diag
self.crlb = crlb
self.roipos = roipos
self.iterations = iterations
self.chisq = chisq
self.ids = ids
self.samples = samples
self.param_names = param_names
def CRLB(self):
return self.crlb
def SortByID(self, isUnique=False):
if isUnique:
order = np.arange(len(self.ids))
order[self.ids] = order*1
else:
order = np.argsort(self.ids)
self.Filter(order)
return order
def Filter(self, indices):
if indices.dtype == bool:
indices = np.nonzero(indices)[0]
if len(indices) != len(self.ids):
print(f"Removing {len(self.ids)-len(indices)}/{len(self.ids)}")
self.estim = self.estim[indices]
self.diagnostics = self.diagnostics[indices]
self.crlb = self.crlb[indices]
self.roipos = self.roipos[indices]
self.iterations = self.iterations[indices]
self.chisq = self.chisq[indices]
self.ids = self.ids[indices]
if self.samples is not None:
self.samples = self.samples[indices]
return indices
def FilterXY(self, minX, minY, maxX, maxY):
return self.Filter(np.where(
np.logical_and(
np.logical_and(self.estim[:,0]>minX, self.estim[:,1]>minY),
np.logical_and(self.estim[:,0]<maxX, self.estim[:,1]<maxY)))[0])
def Clone(self):
return copy.deepcopy(self)
def ColIdx(self, *names):
return np.squeeze(np.array([self.param_names.index(n) for n in names],dtype=np.int))
EstimResultDType = np.dtype([
('id','<i4'),('chisq','<f4'),('iterations','<i4')
])
class EstimQueue:
def __init__(self, estim:Estimator, batchSize=256, maxQueueLenInBatches=5, numStreams=-1, keepSamples=False, ctx=None):
if ctx is None:
self.ctx = estim.ctx
else:
self.ctx = ctx
lib = self.ctx.smlm.lib
self.estim = estim
self.batchSize = batchSize
InstancePtrType = ct.c_void_p
# DLL_EXPORT LocalizationQueue* EstimQueue_CreateQueue(PSF* psf, int batchSize, int maxQueueLen, int numStreams);
self._EstimQueue_Create = lib.EstimQueue_Create
self._EstimQueue_Create.argtypes = [
InstancePtrType,
ct.c_int32,
ct.c_int32,
ct.c_bool,
ct.c_int32,
ct.c_void_p]
self._EstimQueue_Create.restype = InstancePtrType
# DLL_EXPORT void EstimQueue_Delete(LocalizationQueue* queue);
self._EstimQueue_Delete= lib.EstimQueue_Delete
self._EstimQueue_Delete.argtypes = [InstancePtrType]
# DLL_EXPORT void EstimQueue_Schedule(LocalizationQueue* q, int numspots, const int *ids, const float* h_samples,
# const float* h_constants, const int* h_roipos);
self._EstimQueue_Schedule = lib.EstimQueue_Schedule
self._EstimQueue_Schedule.argtypes = [
InstancePtrType,
ct.c_int32,
ctl.ndpointer(np.int32, flags="aligned, c_contiguous"), # ids
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # samples
NullableFloatArrayType, #initial
NullableFloatArrayType, #const
ctl.ndpointer(np.int32, flags="aligned, c_contiguous"), # roipos
]
# DLL_EXPORT void EstimQueue_Flush(LocalizationQueue* q);
self._EstimQueue_Flush = lib.EstimQueue_Flush
self._EstimQueue_Flush.argtypes = [InstancePtrType]
# DLL_EXPORT bool EstimQueue_IsIdle(LocalizationQueue* q);
self._EstimQueue_IsIdle = lib.EstimQueue_IsIdle
self._EstimQueue_IsIdle.argtypes = [InstancePtrType]
self._EstimQueue_IsIdle.restype = ct.c_bool
# DLL_EXPORT int EstimQueue_GetResultCount(LocalizationQueue* q);
self._EstimQueue_GetResultCount = lib.EstimQueue_GetResultCount
self._EstimQueue_GetResultCount.argtypes = [InstancePtrType]
self._EstimQueue_GetResultCount.restype = ct.c_int32
self._EstimQueue_GetQueueLength = lib.EstimQueue_GetQueueLength
self._EstimQueue_GetQueueLength.argtypes = [InstancePtrType]
self._EstimQueue_GetQueueLength.restype = ct.c_int32
# // Returns the number of actual returned localizations.
# // Results are removed from the queue after copying to the provided memory
# DLL_EXPORT int EstimQueue_GetResults(LocalizationQueue* q, int maxresults, float* estim, float* diag, float *fi);
self._EstimQueue_GetResults = lib.EstimQueue_GetResults
self._EstimQueue_GetResults.argtypes = [
InstancePtrType,
ct.c_int32,
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # estim
NullableFloatArrayType, # diag
NullableFloatArrayType, # crlb
ctl.ndpointer(np.int32, flags="aligned, c_contiguous"), # roipos
NullableFloatArrayType, # samples
ctl.ndpointer(EstimResultDType, flags="aligned, c_contiguous"), # results
]
self._EstimQueue_GetResults.restype = ct.c_int32
self.param_names = estim.param_names
self.inst = self._EstimQueue_Create(estim.inst,batchSize, maxQueueLenInBatches, keepSamples, numStreams,
self.ctx.inst if self.ctx else None)
if not self.inst:
raise RuntimeError("Unable to create PSF MLE Queue with given PSF")
def __enter__(self):
return self
def __exit__(self, *args):
self.Destroy()
def Destroy(self):
self._EstimQueue_Delete(self.inst)
def Flush(self):
self._EstimQueue_Flush(self.inst)
def WaitUntilDone(self):
self.Flush()
while not self.IsIdle():
time.sleep(0.05)
def IsIdle(self):
return self._EstimQueue_IsIdle(self.inst)
def Schedule(self, samples, roipos=None, ids=None, initial=None, constants=None):
samples = np.ascontiguousarray(samples,dtype=np.float32)
numspots = len(samples)
if roipos is None:
roipos = np.zeros((numspots,self.estim.indexdims),dtype=np.int32)
if constants is not None:
constants = np.ascontiguousarray(constants,dtype=np.float32)
if constants.size != numspots*self.estim.numconst:
raise ValueError(f'Estimator is expecting constants array with shape {(numspots,self.estim.numconst)}. Given: {constants.shape}')
else:
assert(self.estim.numconst==0)
if initial is not None:
initial = np.ascontiguousarray(initial, dtype=np.float32)
assert(np.array_equal(initial.shape, [numspots, self.estim.numparams]))
roipos = np.ascontiguousarray(roipos, dtype=np.int32)
if not np.array_equal(roipos.shape, [numspots, self.estim.indexdims]):
raise ValueError(f'Incorrect shape for ROI positions: {roipos.shape} given, expecting {[numspots, self.estim.indexdims]}')
assert self.estim.samplecount*numspots==samples.size
if ids is None:
ids = np.zeros(numspots,dtype=np.int32)
else:
assert len(ids) == len(samples)
ids = np.ascontiguousarray(ids,dtype=np.int32)
self._EstimQueue_Schedule(self.inst, numspots, ids, samples, initial, constants, roipos)
def GetQueueLength(self):
return self._EstimQueue_GetQueueLength(self.inst)
def GetResultCount(self):
return self._EstimQueue_GetResultCount(self.inst)
def GetResults(self,maxResults=None, getSampleData=False) -> EstimQueue_Results: #
count = self._EstimQueue_GetResultCount(self.inst)
if maxResults is not None and count>maxResults:
count=maxResults
K = self.estim.NumParams()
estim = np.zeros((count, K),dtype=np.float32)
diag = np.zeros((count, self.estim.NumDiag()), dtype=np.float32)
crlb = | np.zeros((count, K), dtype=np.float32) | numpy.zeros |
'''
@FileName : data_parser.py
@EditTime : 2021-11-29 13:59:47
@Author : <NAME>
@Email : <EMAIL>
@Description :
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import sys
import os
import os.path as osp
import platform
import json
from collections import namedtuple
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset
Keypoints = namedtuple('Keypoints',
['keypoints', 'gender_gt', 'gender_pd'])
Keypoints.__new__.__defaults__ = (None,) * len(Keypoints._fields)
def read_keypoints(keypoint_fn, num_people, num_joint):
if not os.path.exists(keypoint_fn):
keypoints = [np.zeros((num_joint, 3))] * num_people # keypoints may not exist
flags = np.zeros((num_people,))
valid = 0
return keypoints, flags, valid
with open(keypoint_fn) as keypoint_file:
data = json.load(keypoint_file)
valid = 1
keypoints = []
flags = np.zeros((len(data['people'])))
for idx, person_data in enumerate(data['people']):
if person_data is None:
body_keypoints = np.zeros((num_joint, 3),
dtype=np.float32)
else:
flags[idx] = 1
body_keypoints = np.array(person_data['pose_keypoints_2d'],
dtype=np.float32)
body_keypoints = body_keypoints.reshape([-1, 3])
keypoints.append(body_keypoints)
return keypoints[:num_people], flags[:num_people], valid
def read_joints(keypoint_fn, use_hands=True, use_face=True,
use_face_contour=False):
"""
load 3D annotation
"""
with open(keypoint_fn) as keypoint_file:
data = json.load(keypoint_file)
keypoints = []
gender_pd = []
gender_gt = []
for idx, person_data in enumerate(data['people']):
try:
body_keypoints = np.array(person_data['pose_keypoints_3d'],
dtype=np.float32)
body_keypoints = body_keypoints.reshape([-1, 4])
if use_hands:
left_hand_keyp = np.array(
person_data['hand_left_keypoints_3d'],
dtype=np.float32).reshape([-1, 4])
right_hand_keyp = np.array(
person_data['hand_right_keypoints_3d'],
dtype=np.float32).reshape([-1, 4])
body_keypoints = np.concatenate(
[body_keypoints, left_hand_keyp, right_hand_keyp], axis=0)
if use_face:
# TODO: Make parameters, 17 is the offset for the eye brows,
# etc. 51 is the total number of FLAME compatible landmarks
face_keypoints = np.array(
person_data['face_keypoints_3d'],
dtype=np.float32).reshape([-1, 4])[17: 17 + 51, :]
contour_keyps = np.array(
[], dtype=body_keypoints.dtype).reshape(0, 4)
if use_face_contour:
contour_keyps = np.array(
person_data['face_keypoints_3d'],
dtype=np.float32).reshape([-1, 4])[:17, :]
body_keypoints = np.concatenate(
[body_keypoints, face_keypoints, contour_keyps], axis=0)
keypoints.append(body_keypoints)
except:
keypoints = None
if 'gender_pd' in person_data:
gender_pd.append(person_data['gender_pd'])
if 'gender_gt' in person_data:
gender_gt.append(person_data['gender_gt'])
return Keypoints(keypoints=keypoints, gender_pd=gender_pd,
gender_gt=gender_gt)
def smpl_to_annotation(model_type='smpl', use_hands=False, use_face=False,
use_face_contour=False, pose_format='coco17'):
if pose_format == 'halpe':
if model_type == 'smplhalpe':
# Halpe to SMPL
return np.array([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25],
dtype=np.int32)
else:
raise ValueError('Unknown model type: {}'.format(model_type))
class FittingData(Dataset):
NUM_BODY_JOINTS = 17
NUM_HAND_JOINTS = 20
def __init__(self, data_folder, img_folder='images',
keyp_folder='keypoints',
use_hands=False,
use_face=False,
dtype=torch.float32,
model_type='smplx',
joints_to_ign=None,
use_face_contour=False,
pose_format='coco17',
use_3d=False,
use_hip=True,
frames=1,
num_people=1,
**kwargs):
super(FittingData, self).__init__()
self.use_hands = use_hands
self.use_face = use_face
self.model_type = model_type
self.dtype = dtype
self.use_3d = use_3d
self.use_hip = use_hip
self.joints_to_ign = joints_to_ign
self.use_face_contour = use_face_contour
self.pose_format = pose_format
if self.pose_format == 'halpe':
self.NUM_BODY_JOINTS = 26
self.num_joints = (self.NUM_BODY_JOINTS +
2 * self.NUM_HAND_JOINTS * use_hands)
self.data_folder = data_folder
self.img_folder = osp.join(data_folder, img_folder)
self.keyp_folder = osp.join(data_folder, keyp_folder)
img_serials = sorted(os.listdir(self.img_folder))
self.img_paths = []
for i_s in img_serials:
i_s_dir = osp.join(self.img_folder, i_s)
img_cameras = sorted(os.listdir(i_s_dir))
this_serials = []
for i_cam in img_cameras:
i_c_dir = osp.join(i_s_dir, i_cam)
cam_imgs = [osp.join(i_s, i_cam, img_fn)
for img_fn in os.listdir(i_c_dir)
if img_fn.endswith('.png') or
img_fn.endswith('.jpg') and
not img_fn.startswith('.')]
cam_imgs = sorted(cam_imgs)
this_serials.append(cam_imgs)
self.img_paths.append(this_serials)
self.cnt = 0
self.serial_cnt = 0
self.max_frames = frames
self.min_frames = 13
self.num_people = num_people
# if len(cam_imgs) < frames:
# self.frames = len(cam_imgs)
# else:
self.frames = frames
def get_model2data(self):
# Map SMPL to Halpe
return np.array([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25],
dtype=np.int32)
def get_left_shoulder(self):
return 2
def get_right_shoulder(self):
return 5
def get_joint_weights(self):
# The weights for the joint terms in the optimization
optim_weights = np.ones(self.num_joints + 2 * self.use_hands +
self.use_face * 51 +
17 * self.use_face_contour,
dtype=np.float32)
# Neck, Left and right hip
# These joints are ignored because SMPL has no neck joint and the
# annotation of the hips is ambiguous.
# if self.joints_to_ign is not None and -1 not in self.joints_to_ign:
# optim_weights[self.joints_to_ign] = 0.
# return torch.tensor(optim_weights, dtype=self.dtype)
if (self.pose_format != 'lsp14' and self.pose_format != 'halpe') or not self.use_hip:
optim_weights[11] = 0.
optim_weights[12] = 0.
return torch.tensor(optim_weights, dtype=self.dtype)
def __len__(self):
return len(self.img_paths)
def __getitem__(self, idx):
img_path = self.img_paths[idx]
return self.read_item(img_path)
def read_item(self, img_paths):
"""Load keypoints according to img name"""
keypoints = []
total_flags = []
count = 0
for imgs in img_paths:
cam_keps = []
cam_flag = []
for img in imgs:
if platform.system() == 'Windows':
seq_name, cam_name, f_name = img.split('\\')
else:
seq_name, cam_name, f_name = img.split('/')
index = f_name.split('.')[0]
keypoint_fn = osp.join(self.keyp_folder, seq_name, cam_name, '%s_keypoints.json' %index)
keypoints_, flags, valid = read_keypoints(keypoint_fn, self.num_people, self.NUM_BODY_JOINTS)
count += valid
cam_flag.append(flags)
cam_keps.append(keypoints_)
keypoints.append(cam_keps)
total_flags.append(cam_flag)
total_flags = | np.array(total_flags, dtype=np.int) | numpy.array |
import sys
sys.path.append('..')
from neml import interpolate, solvers, models, elasticity, ri_flow, hardening, surfaces, visco_flow, general_flow, creep, damage
from common import *
import unittest
import numpy as np
import numpy.linalg as la
class CommonStandardDamageModel(object):
"""
Tests that apply to any standard damage model
"""
def effective(self, s):
sdev = make_dev(s)
return np.sqrt(3.0/2.0 * np.dot(sdev, sdev))
def test_damage(self):
d_model = self.model.damage(self.d_np1, self.d_n, self.e_np1, self.e_n,
self.s_np1, self.s_n, self.T_np1, self.T_n, self.t_np1, self.t_n)
S = self.elastic.S(self.T_np1)
dS = self.s_np1 - self.s_n
dee = np.dot(S, dS)
de = self.e_np1 - self.e_n
dp = np.sqrt(2.0/3.0 * (np.dot(de, de) + np.dot(dee, dee) -
2.0 * np.dot(dee, de)))
f = self.model.f(self.s_np1, self.d_np1, self.T_np1)
d_calcd = self.d_n + f * dp
self.assertTrue(np.isclose(d_model, d_calcd))
def test_function_derivative_s(self):
d_model = self.model.df_ds(self.stress, self.d_np1, self.T)
d_calcd = differentiate(lambda x: self.model.f(x, self.d_np1, self.T),
self.stress)
self.assertTrue(np.allclose(d_model, d_calcd))
def test_function_derivative_d(self):
d_model = self.model.df_dd(self.stress, self.d, self.T)
d_calcd = differentiate(lambda x: self.model.f(self.s_np1, x, self.T),
self.d)
self.assertTrue(np.isclose(d_model, d_calcd))
class CommonScalarDamageModel(object):
def test_ndamage(self):
self.assertEqual(self.model.ndamage, 1)
def test_init_damage(self):
self.assertTrue(np.allclose(self.model.init_damage(), np.zeros((1,))))
def test_ddamage_ddamage(self):
dd_model = self.model.ddamage_dd(self.d_np1, self.d_n, self.e_np1, self.e_n,
self.s_np1, self.s_n, self.T_np1, self.T_n, self.t_np1, self.t_n)
dfn = lambda d: self.model.damage(d, self.d_n, self.e_np1, self.e_n,
self.s_np1, self.s_n, self.T_np1, self.T_n, self.t_np1, self.t_n)
dd_calcd = differentiate(dfn, self.d_np1)
self.assertTrue(np.isclose(dd_model, dd_calcd))
def test_ddamage_dstrain(self):
dd_model = self.model.ddamage_de(self.d_np1, self.d_n, self.e_np1, self.e_n,
self.s_np1, self.s_n, self.T_np1, self.T_n, self.t_np1, self.t_n)
dfn = lambda e: self.model.damage(self.d_np1, self.d_n, e, self.e_n,
self.s_np1, self.s_n, self.T_np1, self.T_n, self.t_np1, self.t_n)
dd_calcd = differentiate(dfn, self.e_np1)[0]
self.assertTrue(np.allclose(dd_model, dd_calcd, rtol = 1.0e-3))
def test_ddamage_dstress(self):
dd_model = self.model.ddamage_ds(self.d_np1, self.d_n, self.e_np1, self.e_n,
self.s_np1, self.s_n, self.T_np1, self.T_n, self.t_np1, self.t_n)
dfn = lambda s: self.model.damage(self.d_np1, self.d_n, self.e_np1, self.e_n,
s, self.s_n, self.T_np1, self.T_n, self.t_np1, self.t_n)
dd_calcd = differentiate(dfn, self.s_np1)[0]
self.assertTrue(np.allclose(dd_model, dd_calcd, rtol = 1.0e-3))
def test_nparams(self):
self.assertEqual(self.model.nparams, 7)
def test_init_x(self):
trial_state = self.model.make_trial_state(
self.e_np1, self.e_n,
self.T_np1, self.T_n, self.t_np1, self.t_n,
self.s_n, self.hist_n, self.u_n, self.p_n)
me = np.array(list(self.s_n) + [self.d_n])
them = self.model.init_x(trial_state)
self.assertTrue(np.allclose(me, them))
def test_R(self):
trial_state = self.model.make_trial_state(
self.e_np1, self.e_n,
self.T_np1, self.T_n, self.t_np1, self.t_n,
self.s_n, self.hist_n, self.u_n, self.p_n)
R, J = self.model.RJ(self.x_trial, trial_state)
s_trial = self.x_trial[:6]
w_trial = self.x_trial[6]
R_calc = np.zeros((7,))
s_p_np1, h_p, A_p, u_p, p_p =self.bmodel.update_sd(self.e_np1, self.e_n,
self.T_np1, self.T_n,
self.t_np1, self.t_n, self.s_n / (1-self.d_n), self.hist_n[1:],
self.u_n, self.p_n)
R_calc[:6] = s_trial - (1-w_trial) * s_p_np1
d_np1 = self.model.damage(w_trial, self.d_n, self.e_np1, self.e_n,
s_trial / (1 - w_trial), self.s_n / (1-self.d_n), self.T_np1,
self.T_n, self.t_np1, self.t_n)
R_calc[6] = w_trial - d_np1
self.assertTrue(np.allclose(R_calc, R))
def test_jacobian(self):
trial_state = self.model.make_trial_state(
self.e_np1, self.e_n,
self.T_np1, self.T_n, self.t_np1, self.t_n,
self.s_n, self.hist_n, self.u_n, self.p_n)
R, J = self.model.RJ(self.x_trial, trial_state)
Jnum = differentiate(lambda x: self.model.RJ(x, trial_state)[0],
self.x_trial)
self.assertTrue(np.allclose(J, Jnum, rtol = 1.0e-3))
class CommonDamagedModel(object):
def test_nstore(self):
self.assertEqual(self.model.nstore, self.bmodel.nstore + self.model.ndamage)
def test_store(self):
base = self.bmodel.init_store()
damg = self.model.init_damage()
comp = list(damg) + list(base)
fromm = self.model.init_store()
self.assertTrue(np.allclose(fromm, comp))
def test_tangent_proportional_strain(self):
t_n = 0.0
e_n = np.zeros((6,))
s_n = np.zeros((6,))
hist_n = self.model.init_store()
u_n = 0.0
p_n = 0.0
for m in np.linspace(0,1,self.nsteps+1)[1:]:
t_np1 = m * self.ttarget
e_np1 = m * self.etarget
trial_state = self.model.make_trial_state(
e_np1, e_n,
self.T, self.T, t_np1, t_n,
s_n, hist_n, u_n, p_n)
s_np1, hist_np1, A_np1, u_np1, p_np1 = self.model.update_sd(
e_np1, e_n, self.T, self.T, t_np1, t_n, s_n, hist_n,
u_n, p_n)
A_num = differentiate(lambda e: self.model.update_sd(e, e_n,
self.T, self.T, t_np1, t_n, s_n, hist_n, u_n, p_n)[0], e_np1)
self.assertTrue(np.allclose(A_num, A_np1, rtol = 1.0e-3, atol = 1.0e-1))
e_n = np.copy(e_np1)
s_n = np.copy(s_np1)
hist_n = np.copy(hist_np1)
u_n = u_np1
p_n = p_np1
t_n = t_np1
class TestClassicalDamage(unittest.TestCase, CommonScalarDamageModel,
CommonDamagedModel):
def setUp(self):
self.E = 92000.0
self.nu = 0.3
self.s0 = 180.0
self.Kp = 1000.0
self.H = 1000.0
self.elastic = elasticity.IsotropicLinearElasticModel(self.E, "youngs",
self.nu, "poissons")
surface = surfaces.IsoKinJ2()
iso = hardening.LinearIsotropicHardeningRule(self.s0, self.Kp)
kin = hardening.LinearKinematicHardeningRule(self.H)
hrule = hardening.CombinedHardeningRule(iso, kin)
flow = ri_flow.RateIndependentAssociativeFlow(surface, hrule)
self.bmodel = models.SmallStrainRateIndependentPlasticity(self.elastic,
flow)
self.xi = 0.478
self.phi = 1.914
self.A = 10000000.0
self.model = damage.ClassicalCreepDamageModel_sd(
self.elastic,
self.A, self.xi, self.phi, self.bmodel)
self.stress = np.array([100,-50.0,300.0,-99,50.0,125.0])
self.T = 100.0
self.s_np1 = self.stress
self.s_n = np.array([-25,150,250,-25,-100,25])
self.d_np1 = 0.5
self.d_n = 0.4
self.e_np1 = np.array([0.1,-0.01,0.15,-0.05,-0.1,0.15])
self.e_n = np.array([-0.05,0.025,-0.1,0.2,0.11,0.13])
self.T_np1 = self.T
self.T_n = 90.0
self.t_np1 = 1.0
self.t_n = 0.0
self.u_n = 0.0
self.p_n = 0.0
# This is a rather boring baseline history state to probe, but I can't
# think of a better way to get a "generic" history from a generic model
self.hist_n = np.array([self.d_n] + list(self.bmodel.init_store()))
self.x_trial = np.array([50,-25,150,-150,190,100.0] + [0.41])
self.nsteps = 10
self.etarget = np.array([0.1,-0.025,0.02,0.015,-0.02,-0.05])
self.ttarget = 10.0
class TestPowerLawDamage(unittest.TestCase, CommonStandardDamageModel,
CommonScalarDamageModel, CommonDamagedModel):
def setUp(self):
self.E = 92000.0
self.nu = 0.3
self.s0 = 180.0
self.Kp = 1000.0
self.H = 1000.0
self.elastic = elasticity.IsotropicLinearElasticModel(self.E, "youngs",
self.nu, "poissons")
surface = surfaces.IsoKinJ2()
iso = hardening.LinearIsotropicHardeningRule(self.s0, self.Kp)
kin = hardening.LinearKinematicHardeningRule(self.H)
hrule = hardening.CombinedHardeningRule(iso, kin)
flow = ri_flow.RateIndependentAssociativeFlow(surface, hrule)
self.bmodel = models.SmallStrainRateIndependentPlasticity(self.elastic,
flow)
self.A = 8.0e-6
self.a = 2.2
self.model = damage.NEMLPowerLawDamagedModel_sd(self.elastic, self.A, self.a,
self.bmodel)
self.stress = np.array([100,-50.0,300.0,-99,50.0,125.0])
self.T = 100.0
self.d = 0.45
self.s_np1 = self.stress
self.s_n = np.array([-25,150,250,-25,-100,25])
self.d_np1 = 0.5
self.d_n = 0.4
self.e_np1 = np.array([0.1,-0.01,0.15,-0.05,-0.1,0.15])
self.e_n = np.array([-0.05,0.025,-0.1,0.2,0.11,0.13])
self.T_np1 = self.T
self.T_n = 90.0
self.t_np1 = 1.0
self.t_n = 0.0
self.u_n = 0.0
self.p_n = 0.0
# This is a rather boring baseline history state to probe, but I can't
# think of a better way to get a "generic" history from a generic model
self.hist_n = np.array([self.d_n] + list(self.bmodel.init_store()))
self.x_trial = np.array([50,-25,150,-150,190,100.0] + [0.41])
self.nsteps = 10
self.etarget = np.array([0.1,-0.025,0.02,0.015,-0.02,-0.05])
self.ttarget = 10.0
def test_function(self):
f_model = self.model.f(self.stress, self.d_np1, self.T)
f_calcd = self.A * self.effective(self.stress) ** self.a
self.assertTrue(np.isclose(f_model, f_calcd))
class TestExponentialDamage(unittest.TestCase, CommonStandardDamageModel,
CommonScalarDamageModel, CommonDamagedModel):
def setUp(self):
self.E = 92000.0
self.nu = 0.3
self.s0 = 180.0
self.Kp = 1000.0
self.H = 1000.0
self.elastic = elasticity.IsotropicLinearElasticModel(self.E,
"youngs", self.nu, "poissons")
surface = surfaces.IsoKinJ2()
iso = hardening.LinearIsotropicHardeningRule(self.s0, self.Kp)
kin = hardening.LinearKinematicHardeningRule(self.H)
hrule = hardening.CombinedHardeningRule(iso, kin)
flow = ri_flow.RateIndependentAssociativeFlow(surface, hrule)
self.bmodel = models.SmallStrainRateIndependentPlasticity(self.elastic,
flow)
self.W0 = 10.0
self.k0 = 0.0001
self.a = 2.0
self.model = damage.NEMLExponentialWorkDamagedModel_sd(
self.elastic, self.W0, self.k0,
self.a, self.bmodel)
self.stress = np.array([100,-50.0,300.0,-99,50.0,125.0])
self.T = 100.0
self.d = 0.45
self.s_np1 = self.stress
self.s_n = np.array([-25,150,250,-25,-100,25])
self.d_np1 = 0.5
self.d_n = 0.4
self.e_np1 = np.array([0.1,-0.01,0.15,-0.05,-0.1,0.15])
self.e_n = np.array([-0.05,0.025,-0.1,0.2,0.11,0.13])
self.T_np1 = self.T
self.T_n = 90.0
self.t_np1 = 1.0
self.t_n = 0.0
self.u_n = 0.0
self.p_n = 0.0
# This is a rather boring baseline history state to probe, but I can't
# think of a better way to get a "generic" history from a generic model
self.hist_n = np.array([self.d_n] + list(self.bmodel.init_store()))
self.x_trial = np.array([50,-25,150,-150,190,100.0] + [0.41])
self.nsteps = 10
self.etarget = np.array([0.1,-0.025,0.02,0.015,-0.02,-0.05])
self.ttarget = 10.0
def test_function(self):
f_model = self.model.f(self.stress, self.d_np1, self.T)
f_calcd = (self.d_np1 + self.k0) ** self.a * self.effective(self.stress) / self.W0
self.assertTrue( | np.isclose(f_model, f_calcd) | numpy.isclose |
""" Tests for coordinate transforms """
import pytest
import numpy as np
import spharpy.samplings as samplings
from spharpy.samplings import spherical_voronoi
def test_sph2cart():
rad, theta, phi = 1, np.pi/2, 0
x, y, z = samplings.sph2cart(rad, theta, phi)
(1, 0, 0) == pytest.approx((x, y, z), abs=2e-16, rel=2e-16)
def test_sph2cart_array():
rad = np.ones(6)
theta = np.array([np.pi/2, np.pi/2, np.pi/2, np.pi/2, 0, np.pi])
phi = np.array([0, np.pi, np.pi/2, np.pi*3/2, 0, 0])
x, y, z = samplings.sph2cart(rad, theta, phi)
xx = np.array([1, -1, 0, 0, 0, 0])
yy = np.array([0, 0, 1, -1, 0, 0])
zz = np.array([0, 0, 0, 0, 1, -1])
np.testing.assert_allclose(xx, x, atol=1e-15)
np.testing.assert_allclose(yy, y, atol=1e-15)
np.testing.assert_allclose(zz, z, atol=1e-15)
def test_cart2sph_array():
x = np.array([1, -1, 0, 0, 0, 0])
y = np.array([0, 0, 1, -1, 0, 0])
z = np.array([0, 0, 0, 0, 1, -1])
rr, tt, pp = samplings.cart2sph(x, y, z)
rad = np.ones(6)
theta = np.array([np.pi/2, np.pi/2, np.pi/2, np.pi/2, 0, np.pi])
phi = np.array([0, np.pi, np.pi/2, np.pi*3/2, 0, 0])
np.testing.assert_allclose(rad, rr, atol=1e-15)
np.testing.assert_allclose(phi, pp, atol=1e-15)
np.testing.assert_allclose(theta, tt, atol=1e-15)
def test_cart2latlon_array():
x = np.array([1, -1, 0, 0, 0, 0])
y = np.array([0, 0, 1, -1, 0, 0])
z = np.array([0, 0, 0, 0, 1, -1])
rr, tt, pp = samplings.cart2latlon(x, y, z)
rad = np.ones(6)
theta = np.array([0, 0, 0, 0, np.pi/2, -np.pi/2])
phi = | np.array([0, np.pi, np.pi/2, -np.pi/2, 0, 0]) | numpy.array |
from scipy.integrate import *
import scipy.optimize
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from functools import partial
import os, sys
periSampl = 1000
class Parameters:
mu0 = 4 * 3.1415927 * 1e-7
gamma = 2.2128e5
alpha = 0.01
Js = 1
K1 = -181476 #[A/m] # old:-Js**2/(2*mu0) # (185296)
K12 = 0#-159/10# # K1/1000#-7320.113
RAHE = 1#1#1
RPHE = 0.1
RAMR = 1
d = 2e-9 #(0.6+1.2+1.1) * 1e-9
frequency = 0.1e9
currentd = float(sys.argv[1]) * 1e10
hbar = 1.054571e-34
e = 1.602176634e-19
mu0 = 4 * 3.1415927 * 1e-7
easy_axis = np.array([0,0,1])
easy_axis2 = np.array([1,0,0])
p_axis = np.array([0,-1,0])
etadamp = 0.01
etafield = 0.05 # etafield/etadamp=eta
eta = etafield/etadamp
hext = np.array([1.0 * K1/Js,0,0])
area = (2e-6 * 6e-9)
result = []
tEvol = [] #Time evolution of: Time
mEvol = [] # Magnetization direction
mxhEvol = [] # Fieldlike term
mxmxhEvol = [] # Dampinglike term
HsotEvol = [] # Magnitude of DT & FT
DHEvol = [] # Current induced fields \Delta H
#-------------------FFT functions-------------------#
def lockin(sig, t, f, ph):
ref = np.cos(2 * 2*np.pi*f*t + ph/180.0*np.pi)
#ref = np.sin(2*np.pi*f*t + ph/180.0*np.pi)
comp = np.multiply(sig,ref)
#print(t[-1]) #plot real part fft
return comp.mean()*2
def fft(sig, t, f):
sample_dt = np.mean(np.diff(t))
N = len(t)
yfft = | np.fft.rfft(sig) | numpy.fft.rfft |
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file was forked from https://github.com/marvis/pytorch-yolo2 ,
# licensed under the MIT License (see LICENSE.external for more details).
import time
import math
import numpy as np
import nnabla
import argparse
import struct # get_image_size
import imghdr # get_image_size
def sigmoid(x):
return 1.0/(math.exp(-x)+1.)
def bbox_iou(box1, box2, x1y1x2y2=True):
if x1y1x2y2:
mx = min(box1[0], box2[0])
Mx = max(box1[2], box2[2])
my = min(box1[1], box2[1])
My = max(box1[3], box2[3])
w1 = box1[2] - box1[0]
h1 = box1[3] - box1[1]
w2 = box2[2] - box2[0]
h2 = box2[3] - box2[1]
else:
mx = min(box1[0]-box1[2]/2.0, box2[0]-box2[2]/2.0)
Mx = max(box1[0]+box1[2]/2.0, box2[0]+box2[2]/2.0)
my = min(box1[1]-box1[3]/2.0, box2[1]-box2[3]/2.0)
My = max(box1[1]+box1[3]/2.0, box2[1]+box2[3]/2.0)
w1 = box1[2]
h1 = box1[3]
w2 = box2[2]
h2 = box2[3]
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
carea = 0
if cw <= 0 or ch <= 0:
return 0.0
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
uarea = area1 + area2 - carea
return carea/uarea
def bbox_iou_numpy(box1, box2, x1y1x2y2=True):
if x1y1x2y2:
mx = np.min((box1[0], box2[0]))
Mx = np.max((box1[2], box2[2]))
my = np.min((box1[1], box2[1]))
My = np.max((box1[3], box2[3]))
w1 = box1[2] - box1[0]
h1 = box1[3] - box1[1]
w2 = box2[2] - box2[0]
h2 = box2[3] - box2[1]
else:
mx = np.min((box1[0]-box1[2]/2.0, box2[0]-box2[2]/2.0))
Mx = np.max((box1[0]+box1[2]/2.0, box2[0]+box2[2]/2.0))
my = np.min((box1[1]-box1[3]/2.0, box2[1]-box2[3]/2.0))
My = np.max((box1[1]+box1[3]/2.0, box2[1]+box2[3]/2.0))
w1 = box1[2]
h1 = box1[3]
w2 = box2[2]
h2 = box2[3]
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
carea = 0
if cw <= 0 or ch <= 0:
return 0.0
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
uarea = area1 + area2 - carea
return carea/uarea
def bbox_ious(boxes1, boxes2, x1y1x2y2=True):
if x1y1x2y2:
mx = np.minimum(boxes1[0], boxes2[0])
Mx = np.maximum(boxes1[2], boxes2[2])
my = np.minimum(boxes1[1], boxes2[1])
My = np.maximum(boxes1[3], boxes2[3])
w1 = boxes1[2] - boxes1[0]
h1 = boxes1[3] - boxes1[1]
w2 = boxes2[2] - boxes2[0]
h2 = boxes2[3] - boxes2[1]
else:
mx = np.minimum(boxes1[0]-boxes1[2]/2.0, boxes2[0]-boxes2[2]/2.0)
Mx = np.maximum(boxes1[0]+boxes1[2]/2.0, boxes2[0]+boxes2[2]/2.0)
my = np.minimum(boxes1[1]-boxes1[3]/2.0, boxes2[1]-boxes2[3]/2.0)
My = np.maximum(boxes1[1]+boxes1[3]/2.0, boxes2[1]+boxes2[3]/2.0)
w1 = boxes1[2]
h1 = boxes1[3]
w2 = boxes2[2]
h2 = boxes2[3]
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
mask = ((cw <= 0) + (ch <= 0) > 0)
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
carea[mask] = 0
uarea = area1 + area2 - carea
return carea/uarea
def bbox_ious_numpy(boxes1, boxes2, x1y1x2y2=True):
if x1y1x2y2:
mx = np.minimum(boxes1[0], boxes2[0])
Mx = np.maximum(boxes1[2], boxes2[2])
my = np.minimum(boxes1[1], boxes2[1])
My = | np.maximum(boxes1[3], boxes2[3]) | numpy.maximum |
"""
Verify that the near fields, when evaluated far from the origin, approximate the expressions for the angular far fields
"""
import numpy as np
import miepy
import pytest
### parameters
nm = 1e-9
wav = 600*nm
k = 2*np.pi/wav
width = 100*nm
polarization = [1,1j]
### angular grid
radius = 150.3*wav
theta = np.linspace(0., np.pi, 4)
phi = np.linspace(0, 2*np.pi, 5)[:-1]
THETA, PHI = np.meshgrid(theta, phi, indexing='ij')
X, Y, Z = miepy.coordinates.sph_to_cart(radius, THETA, PHI)
@pytest.mark.parametrize("source,atol,rtol", [
(miepy.sources.gaussian_beam(width=width, polarization=polarization), 0, 2e-2),
(miepy.sources.hermite_gaussian_beam(2, 0, width=width, polarization=polarization), 0, 2e-2),
(miepy.sources.laguerre_gaussian_beam(1, 1, width=width, polarization=polarization), 1, 5e-2),
])
def test_source_electric_field_near_to_far(source, atol, rtol):
"""
Compare E-field of source in far field using near and far field expressions
Expressions are expected to converge in the limit r -> infinity
"""
E1 = source.E_field(X, Y, Z, k, sampling=300)
E1 = miepy.coordinates.vec_cart_to_sph(E1, THETA, PHI)[1:]
E2 = source.E_angular(THETA, PHI, k, radius=radius)
assert np.allclose(E1, E2, atol=atol, rtol=rtol)
@pytest.mark.parametrize("source,atol,rtol", [
(miepy.sources.gaussian_beam(width=width, polarization=polarization), 0, 2e-2),
(miepy.sources.hermite_gaussian_beam(2, 0, width=width, polarization=polarization), 0, 2e-2),
(miepy.sources.laguerre_gaussian_beam(1, 1, width=width, polarization=polarization), 1, 5e-2),
])
def test_source_magnetic_field_near_to_far(source, atol, rtol):
"""
Compare H-field of source in far field using near and far field expressions
Expressions are expected to converge in the limit r -> infinity
"""
H1 = source.H_field(X, Y, Z, k, sampling=300)
H1 = miepy.coordinates.vec_cart_to_sph(H1, THETA, PHI)[1:]
H2 = source.H_angular(THETA, PHI, k, radius=radius)
assert np.allclose(H1, H2, atol=atol, rtol=rtol)
def test_cluster_field_near_to_far():
"""
Compare scattered E/H-field of a cluster in far field using near and far field expressions
Expressions are expected to converge in the limit r -> infinity
"""
x = np.linspace(-600*nm, 600*nm, 3)
y = np.linspace(-600*nm, 600*nm, 3)
cluster = miepy.sphere_cluster(position=[[xv, yv, 0] for xv in x for yv in y],
radius=100*nm,
material=miepy.constant_material(index=2),
wavelength=wav,
source=miepy.sources.plane_wave([1,1]),
lmax=3)
theta = np.linspace(0, np.pi, 5)
phi = | np.linspace(0, 2*np.pi, 5) | numpy.linspace |
# -*- coding: utf-8 -*-
# file: data_utils.py
# author: albertopaz <<EMAIL>>
# Copyright (C) 2019. All Rights Reserved.
import re
import os
import pickle
import numpy as np
import spacy
import itertools
import pandas as pd
from tqdm import tqdm
from dan_parser import Parser
from torch.utils.data import Dataset
def make_dependency_aware(dataset, raw_data_path, boc):
dat_fname = re.findall(r'datasets/(.*?)\..',raw_data_path)[0].lower()
dan_data_path = os.path.join('data/dan/dan_{}.dat'.format( dat_fname))
if os.path.exists(dan_data_path):
print('loading dan inputs:', dat_fname)
awareness = pickle.load(open(dan_data_path, 'rb'))
else:
awareness = []
print('parsing...')
dp = Parser(boc = boc)
for i in tqdm(dataset):
x = dp.parse(i['text_string'], i['aspect_position'][0],
i['aspect_position'][1])
awareness.append(x)
pickle.dump(awareness, open(dan_data_path, 'wb'))
# merge regular inputs dictionary with the dan dictionary
dataset_v2 = [{**dataset[i], **awareness[i]} for i in range(len(dataset))]
return dataset_v2
# creates a file with the concepts found accorss all datasets
def build_boc(fnames, dat_fname):
boc_path = os.path.join('data/embeddings', dat_fname)
affective_space_path = 'data/embeddings/affectivespace/affectivespace.csv'
if os.path.exists(boc_path):
print('loading bag of concepts:', dat_fname)
boc = pickle.load(open(boc_path, 'rb'))
else:
dan = Parser()
concepts = []
for fname in fnames:
fin = open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
lines = fin.readlines()
fin.close()
for i in range(0, len(lines), 3):
text_left, _, text_right = [s.lower().strip() for s in lines[i].partition("$T$")]
aspect = lines[i + 1].lower().strip()
text_raw = text_left + " " + aspect + " " + text_right
doc = dan.nlp(text_raw)
for tok in doc:
c = dan.extract_concepts(tok, in_bag = False)
if c != None: concepts.append(c)
concepts = list(itertools.chain(*concepts))
concepts = [i['text'] for i in concepts]
concepts = list(set(concepts))
print('total concepts found: ', len(concepts))
# keep only the ones that match with affective space
affective_space = pd.read_csv(affective_space_path, header = None)
bag_of_concepts = []
for key in list(affective_space[0]):
if key in concepts: bag_of_concepts.append(key)
print('total concepts keept: ', len(bag_of_concepts))
text = " ".join(bag_of_concepts)
boc = Tokenizer(max_seq_len = 5)
boc.fit_on_text(text)
boc.tokenize = None # remove spacy model
pickle.dump(boc, open(boc_path, 'wb'))
return boc
def build_tokenizer(fnames, max_seq_len, dat_fname):
tokenizer_path = os.path.join('data/embeddings', dat_fname)
if os.path.exists(tokenizer_path):
print('loading tokenizer:', dat_fname)
tokenizer = pickle.load(open(tokenizer_path, 'rb'))
else:
text = ''
for fname in fnames:
fin = open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
lines = fin.readlines()
fin.close()
for i in range(0, len(lines), 3):
text_left, _, text_right = [s.lower().strip() for s in lines[i].partition("$T$")]
aspect = lines[i + 1].lower().strip()
text_raw = text_left + " " + aspect + " " + text_right
text += text_raw + " "
tokenizer = Tokenizer(max_seq_len)
tokenizer.fit_on_text(text)
tokenizer.tokenize = None
pickle.dump(tokenizer, open(tokenizer_path, 'wb'))
return tokenizer
def _load_word_vec(path, word2idx=None):
word_vec = {}
if path[-3:] == 'csv':
fin = pd.read_csv(path, header = None)
for row in range(len(fin)):
vec = fin.iloc[row].values
if word2idx is None or vec[0] in word2idx.keys():
word_vec[vec[0]] = np.asarray(vec[1:], dtype = 'float32')
else:
fin = open(path, 'r', encoding='utf-8', newline='\n', errors='ignore')
for line in fin:
tokens = line.rstrip().split()
if word2idx is None or tokens[0] in word2idx.keys():
word_vec[tokens[0]] = np.asarray(tokens[1:], dtype='float32')
return word_vec
def build_embedding_matrix(word2idx, embed_dim, dat_fname):
embed_path = os.path.join('data/embeddings', dat_fname)
if os.path.exists(embed_path):
print('loading embedding_matrix:', dat_fname)
embedding_matrix = pickle.load(open(embed_path, 'rb'))
else:
print('loading word vectors...')
if dat_fname.split('_')[0] == '100':
embedding_matrix = np.zeros((len(word2idx) + 2, 100)) # idx 0 and len(word2idx)+1 are all-zeros
fname = 'data/embeddings/affectivespace/affectivespace.csv'
else:
embedding_matrix = np.zeros((len(word2idx) + 2, embed_dim)) # idx 0 and len(word2idx)+1 are all-zeros
fname = 'data/embeddings/glove/glove.42B.300d.txt'
word_vec = _load_word_vec(fname, word2idx=word2idx)
print('building embedding_matrix:', dat_fname)
for word, i in word2idx.items():
vec = word_vec.get(word)
if vec is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = vec
pickle.dump(embedding_matrix, open(embed_path, 'wb'))
return embedding_matrix
def pad_and_truncate(sequence, maxlen, dtype='int64', padding='post', truncating='post', value=0):
x = ( | np.ones(maxlen) | numpy.ones |
# neural network functions and classes
import numpy as np
import random
import json
import cma
from es import SimpleGA, CMAES, PEPG, OpenES
from env import make_env
def sigmoid(x):
return 1 / (1 + | np.exp(-x) | numpy.exp |
#!/usr/bin/env python
"""
Evaluation of conformal predictors.
"""
# Authors: <NAME>
# TODO: cross_val_score/run_experiment should possibly allow multiple to be evaluated on identical folding
from __future__ import division
from cqr.nonconformist_base import RegressorMixin, ClassifierMixin
import sys
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.base import clone, BaseEstimator
class BaseIcpCvHelper(BaseEstimator):
"""Base class for cross validation helpers.
"""
def __init__(self, icp, calibration_portion):
super(BaseIcpCvHelper, self).__init__()
self.icp = icp
self.calibration_portion = calibration_portion
def predict(self, x, significance=None):
return self.icp.predict(x, significance)
class ClassIcpCvHelper(BaseIcpCvHelper, ClassifierMixin):
"""Helper class for running the ``cross_val_score`` evaluation
method on IcpClassifiers.
See also
--------
IcpRegCrossValHelper
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.ensemble import RandomForestClassifier
>>> from cqr.nonconformist import IcpClassifier
>>> from cqr.nonconformist import ClassifierNc, MarginErrFunc
>>> from cqr.nonconformist import ClassIcpCvHelper
>>> from cqr.nonconformist import class_mean_errors
>>> from cqr.nonconformist import cross_val_score
>>> data = load_iris()
>>> nc = ProbEstClassifierNc(RandomForestClassifier(), MarginErrFunc())
>>> icp = IcpClassifier(nc)
>>> icp_cv = ClassIcpCvHelper(icp)
>>> cross_val_score(icp_cv,
... data.data,
... data.target,
... iterations=2,
... folds=2,
... scoring_funcs=[class_mean_errors],
... significance_levels=[0.1])
... # doctest: +SKIP
class_mean_errors fold iter significance
0 0.013333 0 0 0.1
1 0.080000 1 0 0.1
2 0.053333 0 1 0.1
3 0.080000 1 1 0.1
"""
def __init__(self, icp, calibration_portion=0.25):
super(ClassIcpCvHelper, self).__init__(icp, calibration_portion)
def fit(self, x, y):
split = StratifiedShuffleSplit(y, n_iter=1,
test_size=self.calibration_portion)
for train, cal in split:
self.icp.fit(x[train, :], y[train])
self.icp.calibrate(x[cal, :], y[cal])
class RegIcpCvHelper(BaseIcpCvHelper, RegressorMixin):
"""Helper class for running the ``cross_val_score`` evaluation
method on IcpRegressors.
See also
--------
IcpClassCrossValHelper
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.ensemble import RandomForestRegressor
>>> from cqr.nonconformist import IcpRegressor
>>> from cqr.nonconformist import RegressorNc, AbsErrorErrFunc
>>> from cqr.nonconformist import RegIcpCvHelper
>>> from cqr.nonconformist import reg_mean_errors
>>> from cqr.nonconformist import cross_val_score
>>> data = load_boston()
>>> nc = RegressorNc(RandomForestRegressor(), AbsErrorErrFunc())
>>> icp = IcpRegressor(nc)
>>> icp_cv = RegIcpCvHelper(icp)
>>> cross_val_score(icp_cv,
... data.data,
... data.target,
... iterations=2,
... folds=2,
... scoring_funcs=[reg_mean_errors],
... significance_levels=[0.1])
... # doctest: +SKIP
fold iter reg_mean_errors significance
0 0 0 0.185771 0.1
1 1 0 0.138340 0.1
2 0 1 0.071146 0.1
3 1 1 0.043478 0.1
"""
def __init__(self, icp, calibration_portion=0.25):
super(RegIcpCvHelper, self).__init__(icp, calibration_portion)
def fit(self, x, y):
split = train_test_split(x, y, test_size=self.calibration_portion)
x_tr, x_cal, y_tr, y_cal = split[0], split[1], split[2], split[3]
self.icp.fit(x_tr, y_tr)
self.icp.calibrate(x_cal, y_cal)
# -----------------------------------------------------------------------------
#
# -----------------------------------------------------------------------------
def cross_val_score(model,x, y, iterations=10, folds=10, fit_params=None,
scoring_funcs=None, significance_levels=None,
verbose=False):
"""Evaluates a conformal predictor using cross-validation.
Parameters
----------
model : object
Conformal predictor to evaluate.
x : numpy array of shape [n_samples, n_features]
Inputs of data to use for evaluation.
y : numpy array of shape [n_samples]
Outputs of data to use for evaluation.
iterations : int
Number of iterations to use for evaluation. The data set is randomly
shuffled before each iteration.
folds : int
Number of folds to use for evaluation.
fit_params : dictionary
Parameters to supply to the conformal prediction object on training.
scoring_funcs : iterable
List of evaluation functions to apply to the conformal predictor in each
fold. Each evaluation function should have a signature
``scorer(prediction, y, significance)``.
significance_levels : iterable
List of significance levels at which to evaluate the conformal
predictor.
verbose : boolean
Indicates whether to output progress information during evaluation.
Returns
-------
scores : pandas DataFrame
Tabulated results for each iteration, fold and evaluation function.
"""
fit_params = fit_params if fit_params else {}
significance_levels = (significance_levels if significance_levels
is not None else np.arange(0.01, 1.0, 0.01))
df = pd.DataFrame()
columns = ['iter',
'fold',
'significance',
] + [f.__name__ for f in scoring_funcs]
for i in range(iterations):
idx = np.random.permutation(y.size)
x, y = x[idx, :], y[idx]
cv = KFold(y.size, folds)
for j, (train, test) in enumerate(cv):
if verbose:
sys.stdout.write('\riter {}/{} fold {}/{}'.format(
i + 1,
iterations,
j + 1,
folds
))
m = clone(model)
m.fit(x[train, :], y[train], **fit_params)
prediction = m.predict(x[test, :], significance=None)
for k, s in enumerate(significance_levels):
scores = [scoring_func(prediction, y[test], s)
for scoring_func in scoring_funcs]
df_score = pd.DataFrame([[i, j, s] + scores],
columns=columns)
df = df.append(df_score, ignore_index=True)
return df
def run_experiment(models, csv_files, iterations=10, folds=10, fit_params=None,
scoring_funcs=None, significance_levels=None,
normalize=False, verbose=False, header=0):
"""Performs a cross-validation evaluation of one or several conformal
predictors on a collection of data sets in csv format.
Parameters
----------
models : object or iterable
Conformal predictor(s) to evaluate.
csv_files : iterable
List of file names (with absolute paths) containing csv-data, used to
evaluate the conformal predictor.
iterations : int
Number of iterations to use for evaluation. The data set is randomly
shuffled before each iteration.
folds : int
Number of folds to use for evaluation.
fit_params : dictionary
Parameters to supply to the conformal prediction object on training.
scoring_funcs : iterable
List of evaluation functions to apply to the conformal predictor in each
fold. Each evaluation function should have a signature
``scorer(prediction, y, significance)``.
significance_levels : iterable
List of significance levels at which to evaluate the conformal
predictor.
verbose : boolean
Indicates whether to output progress information during evaluation.
Returns
-------
scores : pandas DataFrame
Tabulated results for each data set, iteration, fold and
evaluation function.
"""
df = pd.DataFrame()
if not hasattr(models, '__iter__'):
models = [models]
for model in models:
is_regression = model.get_problem_type() == 'regression'
n_data_sets = len(csv_files)
for i, csv_file in enumerate(csv_files):
if verbose:
print('\n{} ({} / {})'.format(csv_file, i + 1, n_data_sets))
data = pd.read_csv(csv_file, header=header)
x, y = data.values[:, :-1], data.values[:, -1]
x = np.array(x, dtype=np.float64)
if normalize:
if is_regression:
y = y - y.min() / (y.max() - y.min())
else:
for j, y_ in enumerate(np.unique(y)):
y[y == y_] = j
scores = cross_val_score(model, x, y, iterations, folds,
fit_params, scoring_funcs,
significance_levels, verbose)
ds_df = pd.DataFrame(scores)
ds_df['model'] = model.__class__.__name__
try:
ds_df['data_set'] = csv_file.split('/')[-1]
except:
ds_df['data_set'] = csv_file
df = df.append(ds_df)
return df
# -----------------------------------------------------------------------------
# Validity measures
# -----------------------------------------------------------------------------
def reg_n_correct(prediction, y, significance=None):
"""Calculates the number of correct predictions made by a conformal
regression model.
"""
if significance is not None:
idx = int(significance * 100 - 1)
prediction = prediction[:, :, idx]
low = y >= prediction[:, 0]
high = y <= prediction[:, 1]
correct = low * high
return y[correct].size
def reg_mean_errors(prediction, y, significance):
"""Calculates the average error rate of a conformal regression model.
"""
return 1 - reg_n_correct(prediction, y, significance) / y.size
def class_n_correct(prediction, y, significance):
"""Calculates the number of correct predictions made by a conformal
classification model.
"""
labels, y = np.unique(y, return_inverse=True)
prediction = prediction > significance
correct = np.zeros((y.size,), dtype=bool)
for i, y_ in enumerate(y):
correct[i] = prediction[i, int(y_)]
return np.sum(correct)
def class_mean_errors(prediction, y, significance=None):
"""Calculates the average error rate of a conformal classification model.
"""
return 1 - (class_n_correct(prediction, y, significance) / y.size)
def class_one_err(prediction, y, significance=None):
"""Calculates the error rate of conformal classifier predictions containing
only a single output label.
"""
labels, y = | np.unique(y, return_inverse=True) | numpy.unique |
"""
Segment song motifs by finding maxima in spectrogram cross correlations.
"""
__date__ = "April 2019 - November 2020"
from affinewarp import ShiftWarping
import h5py
from itertools import repeat
from joblib import Parallel, delayed
import matplotlib.pyplot as plt
plt.switch_backend('agg')
try: # Numba >= 0.52
from numba.core.errors import NumbaPerformanceWarning
except ModuleNotFoundError:
try: # Numba <= 0.45
from numba.errors import NumbaPerformanceWarning
except (NameError, ModuleNotFoundError):
pass
import numpy as np
from scipy.io import wavfile
from scipy.io.wavfile import WavFileWarning
from scipy.signal import stft
from scipy.ndimage.filters import gaussian_filter
import os
import umap
import warnings
from ava.plotting.tooltip_plot import tooltip_plot
EPSILON = 1e-9
def get_template(feature_dir, p, smoothing_kernel=(0.5, 0.5), verbose=True):
"""
Create a linear feature template given exemplar spectrograms.
Parameters
----------
feature_dir : str
Directory containing multiple audio files to average together.
p : dict
Parameters. Must contain keys: ``'fs'``, ``'min_freq'``, ``'max_freq'``,
``'nperseg'``, ``'noverlap'``, ``'spec_min_val'``, ``'spec_max_val'``.
smoothing_kernel : tuple of floats, optional
Each spectrogram is blurred using a gaussian kernel with the following
bandwidths, in bins. Defaults to ``(0.5, 0.5)``.
verbose : bool, optional
Defaults to ``True``.
Returns
-------
template : np.ndarray
Spectrogram template.
"""
filenames = [os.path.join(feature_dir, i) for i in os.listdir(feature_dir) \
if _is_wav_file(i)]
specs = []
for i, filename in enumerate(filenames):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=WavFileWarning)
fs, audio = wavfile.read(filename)
assert fs == p['fs'], "Found samplerate="+str(fs)+\
", expected "+str(p['fs'])
spec, dt = _get_spec(fs, audio, p)
spec = gaussian_filter(spec, smoothing_kernel)
specs.append(spec)
min_time_bins = min(spec.shape[1] for spec in specs)
specs = np.array([i[:,:min_time_bins] for i in specs])
# Average over all the templates.
template = np.mean(specs, axis=0)
# Normalize to unit norm.
template -= np.mean(template)
template /= np.sum(np.power(template, 2)) + EPSILON
if verbose:
duration = min_time_bins * dt
print("Made template from", len(filenames), "files. Duration:", duration)
return template
def segment_files(audio_dirs, segment_dirs, template, p, num_mad=2.0, \
min_dt=0.05, n_jobs=1, verbose=True):
"""
Write segments to text files.
Parameters
----------
audio_dirs : list of str
Audio directories.
segment_dirs : list of str
Corresponding directories containing segmenting decisions.
template : numpy.ndarray
Spectrogram template.
p : dict
Parameters. Must contain keys: ``'fs'``, ``'min_freq'``, ``'max_freq'``,
``'nperseg'``, ``'noverlap'``, ``'spec_min_val'``, ``'spec_max_val'``.
num_mad : float, optional
Number of median absolute deviations for cross-correlation threshold.
Defaults to ``2.0``.
min_dt : float, optional
Minimum duration between cross correlation maxima. Defaults to ``0.05``.
n_jobs : int, optional
Number of jobs for parallelization. Defaults to ``1``.
verbose : bool, optional
Defaults to ``True``.
Returns
-------
result : dict
Maps audio filenames to segments (numpy.ndarrays).
"""
# Collect all the filenames we need to parallelize.
all_audio_fns = []
all_seg_dirs = []
for audio_dir, segment_dir in zip(audio_dirs, segment_dirs):
if not os.path.exists(segment_dir):
os.makedirs(segment_dir)
audio_fns = [os.path.join(audio_dir, i) for i in os.listdir(audio_dir) \
if _is_wav_file(i)]
all_audio_fns = all_audio_fns + audio_fns
all_seg_dirs = all_seg_dirs + [segment_dir]*len(audio_fns)
# Segment.
if verbose:
print("Segmenting files. n =",len(all_audio_fns))
gen = zip(all_seg_dirs, all_audio_fns, repeat(template), repeat(p), \
repeat(num_mad), repeat(min_dt))
res = Parallel(n_jobs=n_jobs)(delayed(_segment_file)(*args) for args in gen)
# Write results.
result = {}
num_segments = 0
for segment_dir, audio_fn, segments in res:
result[audio_fn] = segments
segment_fn = os.path.split(audio_fn)[-1][:-4] + '.txt'
segment_fn = os.path.join(segment_dir, segment_fn)
np.savetxt(segment_fn, segments, fmt='%.5f')
num_segments += len(segments)
if verbose:
print("\tFound", num_segments, "segments.")
print("\tDone.")
# Return a dictionary mapping audio filenames to segments.
return result
def read_segment_decisions(audio_dirs, segment_dirs, verbose=True):
"""
Returns the same data as ``segment_files``.
Parameters
----------
audio_dirs : list of str
Audio directories.
segment_dirs : list of str
Segment directories.
verbose : bool, optional
Defaults to ``True``.
Returns
-------
result : dict
Maps audio filenames to segments.
"""
if verbose:
print("Reading segments...")
result = {}
n_segs = 0
for audio_dir, segment_dir in zip(audio_dirs, segment_dirs):
audio_fns = [os.path.join(audio_dir, i) for i in os.listdir(audio_dir) \
if _is_wav_file(i)]
for audio_fn in audio_fns:
segment_fn = os.path.split(audio_fn)[-1][:-4] + '.txt'
segment_fn = os.path.join(segment_dir, segment_fn)
segments = np.loadtxt(segment_fn).reshape(-1,2)
result[audio_fn] = segments
n_segs += len(segments)
if verbose:
print("\tFound", n_segs, "segments.")
print("\tDone.")
return result
def _segment_file(segment_dir, filename, template, p, num_mad=2.0, min_dt=0.05,\
min_extra_time_bins=5):
"""
Match linear spetrogram features and extract times where features align.
Parameters
----------
segment_dir : str
Segment directory.
filename : str
Audio filename.
template : numpy.ndarray
Spectrogram template.
p : dict
Parameters. Must contain keys: ``'fs'``, ``'min_freq'``, ``'max_freq'``,
``'nperseg'``, ``'noverlap'``, ``'spec_min_val'``, ``'spec_max_val'``.
num_mad : float, optional
Number of median absolute deviations for cross-correlation threshold.
Defaults to ``2.0``.
min_dt : float, optional
...
min_extra_time_bins : int, optional
...
Returns
-------
segment_dir : str
Copied from input parameters.
filename : str
Copied from input parameters.
segments : numpy.ndarray
Onsets and offsets.
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=WavFileWarning)
fs, audio = wavfile.read(filename)
assert fs == p['fs'], "Found samplerate="+str(fs)+", expected "+str(p['fs'])
if len(audio) < p['nperseg']:
warnings.warn(
"Found an audio file that is too short to make a spectrogram: "+\
filename + "\nSamples: "+str(len(audio))+"\np[\'nperseg\']: "+\
str(p['nperseg']),
UserWarning
)
return segment_dir, filename, np.zeros((0, 2))
big_spec, dt = _get_spec(fs, audio, p)
spec_len = template.shape[1]
template = template.flatten()
if big_spec.shape[1] - spec_len < min_extra_time_bins:
d1, d2 = dt*spec_len, dt*big_spec.shape[1]
warnings.warn(
"Found an audio file that is too short to extract segments from: "+\
filename + "\nTemplate duration: "+str(d1)+"\nFile duration: "+\
str(d2)+"\nConsider reducing the template duration.",
UserWarning
)
return segment_dir, filename, np.zeros((0, 2))
# Compute normalized cross-correlation.
result = np.zeros(big_spec.shape[1] - spec_len)
for i in range(len(result)):
temp = big_spec[:,i:i+spec_len].flatten()
temp -= | np.mean(temp) | numpy.mean |
import math
import os, random
import cv2, argparse
import numpy as np
# Sunny
def change_brightness(image):
image_HLS = cv2.cvtColor(image, cv2.COLOR_RGB2HLS) # Conversion to HLS
image_HLS = np.array(image_HLS, dtype=np.float64)
random_brightness_coefficient = np.random.uniform() + 0.5 # generates value between 0.5 and 1.5
image_HLS[:, :, 1] = image_HLS[:, :, 1] * random_brightness_coefficient # scale pixel values up or down for channel 1(Lightness)
image_HLS[:, :, 1][image_HLS[:, :, 1] > 255] = 255 # Sets all values above 255 to 255
image_HLS = np.array(image_HLS, dtype=np.uint8)
image_RGB = cv2.cvtColor(image_HLS, cv2.COLOR_HLS2RGB) # Conversion to RGB
return image_RGB
def flare_source(image, point, radius, src_color):
overlay = image.copy()
output = image.copy()
num_times = radius // 10
alpha = np.linspace(0.0, 1, num=num_times)
rad = np.linspace(1, radius, num=num_times)
for i in range(num_times):
cv2.circle(overlay, point, int(rad[i]), src_color, -1)
alp = alpha[num_times - i - 1] * alpha[num_times - i - 1] * alpha[num_times - i - 1]
cv2.addWeighted(overlay, alp, output, 1 - alp, 0, output)
return output
def add_sun_flare_line(flare_center, angle, imshape):
x = []
y = []
for rand_x in range(0, imshape[1], 10):
rand_y = math.tan(angle) * (rand_x - flare_center[0]) + flare_center[1]
x.append(rand_x)
y.append(2 * flare_center[1] - rand_y)
return x, y
def add_sun_process(image, no_of_flare_circles, flare_center, src_radius, x, y, src_color):
overlay = image.copy()
output = image.copy()
imshape = image.shape
for i in range(no_of_flare_circles):
alpha = random.uniform(0.05, 0.2)
r = random.randint(0, len(x) - 1)
rad = random.randint(1, imshape[0] // 100 - 2)
cv2.circle(overlay, (int(x[r]), int(y[r])), rad * rad * rad, (
random.randint(max(src_color[0] - 50, 0), src_color[0]),
random.randint(max(src_color[1] - 50, 0), src_color[1]),
random.randint(max(src_color[2] - 50, 0), src_color[2])), -1)
cv2.addWeighted(overlay, alpha, output, 1 - alpha, 0, output)
output = flare_source(output, (int(flare_center[0]), int(flare_center[1])), src_radius, src_color)
return output
def sun_flare(image, flare_center=-1, no_of_flare_circles=8, src_radius=400, src_color=(255, 255, 255)):
angle = -1
angle = angle % (2 * math.pi)
if type(image) is list:
image_RGB = []
image_list = image
image_shape = image_list[0].shape
for img in image_list:
angle_t = random.uniform(0, 2 * math.pi)
if angle_t == math.pi / 2:
angle_t = 0
if flare_center == -1:
flare_center_t = (random.randint(0, image_shape[1]), random.randint(0, image_shape[0] // 2))
else:
flare_center_t = flare_center
x, y = add_sun_flare_line(flare_center_t, angle_t, image_shape)
output = add_sun_process(img, no_of_flare_circles, flare_center_t, src_radius, x, y, src_color)
image_RGB.append(output)
else:
image_shape = image.shape
if angle == -1:
angle_t = random.uniform(0, 2 * math.pi)
if angle_t == math.pi / 2:
angle_t = 0
else:
angle_t = angle
if flare_center == -1:
flare_center_t = (random.randint(0, image_shape[1]), random.randint(0, image_shape[0] // 2))
else:
flare_center_t = flare_center
x, y = add_sun_flare_line(flare_center_t, angle_t, image_shape)
output = add_sun_process(image, no_of_flare_circles, flare_center_t, src_radius, x, y, src_color)
image_RGB = output
return image_RGB
def generate_random_circles(imshape, slant, drop_length):
drops = []
size = int(imshape[0] * imshape[1] / 300)
for i in range(size): # If You want heavy rain, try increasing this
# if slant<0:
# x= np.random.randint(slant,imshape[1])
# else:
x = np.random.randint(0, imshape[1] - slant)
y = | np.random.randint(0, imshape[0] - drop_length) | numpy.random.randint |
import matplotlib.pyplot as plt
import numpy as np
from latency_lists import *
flask_get = np.array(flask_get) * 1000
flask_post = np.array(flask_post) * 1000
flask_delete = np.array(flask_delete) * 1000
cherry_get = np.array(cherry_get) * 1000
cherry_post = np.array(cherry_post) * 1000
cherry_delete = | np.array(cherry_delete) | numpy.array |
'''
DESCRIPTION
----------
An assortment of code written for sanity checks on our 2017 TESS GI proposal
about difference imaging of clusters.
Most of this involving parsing Kharchenko et al (2013)'s table, hence the name
`parse_MWSC.py`.
The tools here do things like:
* Find how many open clusters we could observe
* Find how many member stars within those we could observe
* Compute TESS mags for everything (mostly via `ticgen`)
* Estimate blending effects, mainly through the dilution (computed just by
summing magnitudes appropriately)
* Using K+13's King profile fits, estimate the surface density of member stars.
It turns out that this radically underestimates the actual surface density
of stars (because of all the background blends). Moreover, for purposes of
motivating our difference imaging, "the number of stars in your aperture"
is more relevant than "a surface density", and even more relevant than both
of those is dilution.
So I settled on the dilution calculation.
The plotting scripts here also make the skymap figure of the proposal. (Where
are the clusters on the sky?)
USAGE
----------
From /src/, select desired functions from __main__ below. Then:
>>> python parse_MWSC.py > output.log
'''
import matplotlib.pyplot as plt, seaborn as sns
import pandas as pd, numpy as np
from astropy.table import Table
from astropy.io import ascii
from astropy.coordinates import SkyCoord
import astropy.units as u
from math import pi
import pickle, os
from scipy.interpolate import interp1d
global COLORS
COLORS = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
# cite:
#
# <NAME>. & <NAME>. 2017, ticgen: A tool for calculating a TESS
# magnitude, and an expected noise level for stars to be observed by TESS.,
# v1.0.0, Zenodo, doi:10.5281/zenodo.888217
#
# and Stassun & friends (2017).
#import ticgen as ticgen
# # These two, from the website
# # http://dc.zah.uni-heidelberg.de/mwsc/q/clu/form
# # are actually outdated or something. They provided too few resuls..
# close_certain = pd.read_csv('../data/MWSC_search_lt_2000_pc_type_certain.csv')
# close_junk = pd.read_csv('../data/MWSC_search_lt_2000_pc_type_certain.csv')
def get_cluster_data():
# Downloaded the MWSC from
# http://cdsarc.u-strasbg.fr/viz-bin/Cat?cat=J%2FA%2BA%2F558%2FA53&target=http&
tab = Table.read('../data/Kharchenko_2013_MWSC.vot', format='votable')
df = tab.to_pandas()
for colname in ['Type', 'Name', 'n_Type', 'SType']:
df[colname] = [e.decode('utf-8') for e in list(df[colname])]
# From erratum:
# For the Sun-like star, a 4 Re planet produces a transit depth of 0.13%. The
# limiting magnitude for transits to be detectable is about I_C = 11.4 . This
# also corresponds to K_s ~= 10.6 and a maximum distance of 290 pc, assuming no
# extinction.
cinds = np.array(df['d']<500)
close = df[cinds]
finds = np.array(df['d']<1000)
far = df[finds]
N_c_r0 = int(np.sum(close['N1sr0']))
N_c_r1 = int(np.sum(close['N1sr1']))
N_c_r2 = int(np.sum(close['N1sr2']))
N_f_r0 = int(np.sum(far['N1sr0']))
N_f_r1 = int(np.sum(far['N1sr1']))
N_f_r2 = int(np.sum(far['N1sr2']))
type_d = {'a':'association', 'g':'globular cluster', 'm':'moving group',
'n':'nebulosity/presence of nebulosity', 'r':'remnant cluster',
's':'asterism', '': 'no label'}
ntype_d = {'o':'object','c':'candidate','':'no label'}
print('*'*50)
print('\nMilky Way Star Clusters (close := <500pc)'
'\nN_clusters: {:d}'.format(len(close))+\
'\nN_stars (in core): {:d}'.format(N_c_r0)+\
'\nN_stars (in central part): {:d}'.format(N_c_r1)+\
'\nN_stars (in cluster): {:d}'.format(N_c_r2))
print('\n'+'*'*50)
print('\nMilky Way Star Clusters (far := <1000pc)'
'\nN_clusters: {:d}'.format(len(far))+\
'\nN_stars (in core): {:d}'.format(N_f_r0)+\
'\nN_stars (in central part): {:d}'.format(N_f_r1)+\
'\nN_stars (in cluster): {:d}'.format(N_f_r2))
print('\n'+'*'*50)
####################
# Post-processing. #
####################
# Compute mean density
mean_N_star_per_sqdeg = df['N1sr2'] / (pi * df['r2']**2)
df['mean_N_star_per_sqdeg'] = mean_N_star_per_sqdeg
# Compute King profiles
king_profiles, theta_profiles = [], []
for rt, rc, k, d in zip(np.array(df['rt']),
np.array(df['rc']),
np.array(df['k']),
np.array(df['d'])):
sigma, theta = get_king_proj_density_profile(rt, rc, k, d)
king_profiles.append(sigma)
theta_profiles.append(theta)
df['king_profile'] = king_profiles
df['theta'] = theta_profiles
ra = np.array(df['RAJ2000'])
dec = np.array(df['DEJ2000'])
c = SkyCoord(ra=ra*u.degree, dec=dec*u.degree, frame='icrs')
galactic_long = np.array(c.galactic.l)
galactic_lat = np.array(c.galactic.b)
ecliptic_long = np.array(c.barycentrictrueecliptic.lon)
ecliptic_lat = np.array(c.barycentrictrueecliptic.lat)
df['galactic_long'] = galactic_long
df['galactic_lat'] = galactic_lat
df['ecliptic_long'] = ecliptic_long
df['ecliptic_lat'] = ecliptic_lat
cinds = np.array(df['d']<500)
close = df[cinds]
finds = np.array(df['d']<1000)
far = df[finds]
return close, far, df
def distance_histogram(df):
plt.close('all')
f,ax = plt.subplots(figsize=(4,4))
hist, bin_edges = np.histogram(
df['d'],
bins=np.append(np.logspace(1,6,1e3), 1e7),
normed=False)
ax.step(bin_edges[:-1], np.cumsum(hist), 'k-', where='post')
ax.set_xlabel('distance [pc]')
ax.set_ylabel('cumulative N clusters in MWSC')
ax.set_xlim([5e1,1e4])
ax.set_xscale('log')
ax.set_yscale('log')
f.tight_layout()
f.savefig('d_cumdistribn_MWSC.pdf', dpi=300, bbox_inches='tight')
def angular_scale_cumdist(close, far):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
plt.close('all')
f,ax = plt.subplots(figsize=(4,4))
axt = ax.twiny()
scale_d = {'r0': 'angular radius of the core (0 if no core)',
'r1': '"central" radius',
'r2': 'cluster radius'}
ix = 0
for t, dat in [('$d<0.5$ kpc',close), ('$d<1$ kpc',far)]:
for k in ['r2']:
hist, bin_edges = np.histogram(
dat[k],
bins=np.append(np.logspace(-2,1,1e3), 1e7),
normed=False)
ax.step(bin_edges[:-1], np.cumsum(hist),
where='post', label=t+' '+scale_d[k])
ix += 1
def tick_function(angle_deg):
tess_px = 21*u.arcsec
vals = angle_deg/tess_px.to(u.deg).value
return ['%.1f' % z for z in vals]
ax.legend(loc='upper left', fontsize='xx-small')
ax.set_xlabel('ang scale [deg]')
ax.set_ylabel('cumulative N clusters in MWSC')
ax.set_xscale('log')
#ax.set_yscale('log')
axt.set_xscale('log')
axt.set_xlim(ax.get_xlim())
new_tick_locations = np.array([1e-2, 1e-1, 1e0, 1e1])
axt.set_xticks(new_tick_locations)
axt.set_xticklabels(tick_function(new_tick_locations))
axt.set_xlabel('angular scale [TESS pixels]')
f.tight_layout()
f.savefig('angscale_cumdistribn_MWSC.pdf', dpi=300, bbox_inches='tight')
def angular_scale_hist(close, far):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
plt.close('all')
f,ax = plt.subplots(figsize=(4,4))
axt = ax.twiny()
scale_d = {'r0': 'angular radius of the core (0 if no core)',
'r1': '"central" radius',
'r2': 'cluster radius'}
ix = 0
for t, dat in [('$d<0.5$ kpc',close), ('$d<1$ kpc',far)]:
for k in ['r2']:
hist, bin_edges = np.histogram(
dat[k],
bins=np.append(np.logspace(-2,1,7), 1e7),
normed=False)
ax.step(bin_edges[:-1], hist, where='post', label=t+' '+scale_d[k],
alpha=0.7)
ix += 1
def tick_function(angle_deg):
tess_px = 21*u.arcsec
vals = angle_deg/tess_px.to(u.deg).value
return ['%.1f' % z for z in vals]
ax.legend(loc='best', fontsize='xx-small')
ax.set_xlabel('ang scale [deg]')
ax.set_ylabel('N clusters in MWSC')
ax.set_xscale('log')
#ax.set_yscale('log')
axt.set_xscale('log')
axt.set_xlim(ax.get_xlim())
new_tick_locations = np.array([1e-2, 1e-1, 1e0, 1e1])
axt.set_xticks(new_tick_locations)
axt.set_xticklabels(tick_function(new_tick_locations))
axt.set_xlabel('angular scale [TESS pixels]')
f.tight_layout()
f.savefig('angscale_distribn_MWSC.pdf', dpi=300, bbox_inches='tight')
def mean_density_hist(close, far):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
plt.close('all')
f,ax = plt.subplots(figsize=(4,4))
axt = ax.twiny()
ix = 0
for t, dat in [('$d<0.5$ kpc',close), ('$d<1$ kpc',far)]:
hist, bin_edges = np.histogram(
dat['mean_N_star_per_sqdeg'],
bins=np.append(np.logspace(0,4,9), 1e7),
normed=False)
ax.step(bin_edges[:-1], hist, where='post', label=t,
alpha=0.7)
ix += 1
def tick_function(N_star_per_sqdeg):
tess_px = 21*u.arcsec
tess_px_area = tess_px**2
deg_per_tess_px = tess_px_area.to(u.deg**2).value
vals = N_star_per_sqdeg * deg_per_tess_px
outstrs = ['%.1E'%z for z in vals]
outstrs = ['$'+o[0] + r'\! \cdot \! 10^{\mathrm{-}' + o[-1] + r'}$' \
for o in outstrs]
return outstrs
ax.legend(loc='best', fontsize='xx-small')
ax.set_xlabel('mean areal density [stars/$\mathrm{deg}^{2}$]')
ax.set_ylabel('N clusters in MWSC')
ax.set_xscale('log')
#ax.set_yscale('log')
axt.set_xscale('log')
axt.set_xlim(ax.get_xlim())
new_tick_locations = np.logspace(0,4,5)
axt.set_xticks(new_tick_locations)
axt.set_xticklabels(tick_function(new_tick_locations))
axt.set_xlabel('mean areal density [stars/$\mathrm{(TESS\ px)}^{2}$]')
f.tight_layout()
f.savefig('mean_density_distribn_MWSC.pdf', dpi=300, bbox_inches='tight')
def plot_king_profiles(close, far):
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
plt.close('all')
f, axs = plt.subplots(figsize=(4,7), nrows=2, ncols=1, sharex=True)
for theta, profile in zip(close['theta'], close['king_profile']):
axs[0].plot(theta, profile, alpha=0.2, c=colors[0])
for theta, profile in zip(far['theta'], far['king_profile']):
axs[1].plot(theta, profile, alpha=0.1, c=colors[1])
# Add text in top right.
axs[0].text(0.95, 0.95, '$d < 500\ \mathrm{pc}$', verticalalignment='top',
horizontalalignment='right', transform=axs[0].transAxes,
fontsize='large')
axs[1].text(0.95, 0.95, '$d < 1\ \mathrm{kpc}$', verticalalignment='top',
horizontalalignment='right', transform=axs[1].transAxes,
fontsize='large')
xmin, xmax = 1, 1e3
for ax in axs:
ax.set_xscale('log')
ax.set_xlim([xmin, xmax])
if ax == axs[1]:
ax.xaxis.set_ticks_position('both')
ax.set_xlabel('angular distance [TESS px]')
ax.tick_params(which='both', direction='in', zorder=0)
ax.set_ylabel(r'$\Sigma(r)$ [stars/$\mathrm{(TESS\ px)}^{2}$]')
f.tight_layout(h_pad=0)
f.savefig('king_density_profiles_close_MWSC.pdf', dpi=300,
bbox_inches='tight')
def get_king_proj_density_profile(r_t, r_c, k, d):
'''
r_t: King's tidal radius [pc]
r_c: King's core radius [pc]
k: normalization [pc^{-2}]
d: distance [pc]
returns density profile in number per sq tess pixel
'''
# Eq 4 of Ernst et al, 2010 https://arxiv.org/pdf/1009.0710.pdf
# citing King (1962).
r = np.logspace(-2, 2.4, num=int(2e4))
X = 1 + (r/r_c)**2
C = 1 + (r_t/r_c)**2
vals = k * (X**(-1/2) - C**(-1/2))**2
#NOTE: this fails when r_t does not exist. This might be important...
vals[r>r_t] = 0
# vals currently in number per square parsec. want in number per TESS px.
# first convert to number per square arcsec
# N per sq arcsec. First term converts to 1/AU^2. Then the angular surface
# density scales as the square of the distance (same number of things,
# smaller angle)
sigma = vals * 206265**(-2) * d**2
tess_px = 21*u.arcsec
arcsec_per_px = 21
sigma_per_sq_px = sigma * arcsec_per_px**2 # N per px^2
# r is in pc. we want the profile vs angular distance.
AU_per_pc = 206265
r *= AU_per_pc # r now in AU
theta = r / d # angular distance in arcsec
tess_px = 21 # arcsec per px
theta *= (1/tess_px) # angular distance in px
return sigma_per_sq_px, theta
def make_wget_script(df):
'''
to download stellar data for each cluster, need to run a script of wgets.
this function makes the script.
'''
# get MWSC ids in "0012", "0007" format
mwsc = np.array(df['MWSC'])
mwsc_ids = np.array([str(int(f)).zfill(4) for f in mwsc])
names = np.array(df['Name'])
f = open('../data/MWSC_stellar_data/get_stellar_data.sh', 'w')
outstrs = []
for mwsc_id, name in zip(mwsc_ids, names):
startstr = 'wget '+\
'ftp://cdsarc.u-strasbg.fr/pub/cats/J/A%2BA/558/A53/stars/2m_'
middlestr = str(mwsc_id) + '_' + str(name)
endstr = '.dat.bz2 ;\n'
outstr = startstr + middlestr + endstr
outstrs.append(outstr)
f.writelines(outstrs)
f.close()
print('made wget script!')
def get_stellar_data_too(df, savstr, p_0=61):
'''
args:
savstr (str): gets the string used to ID the output pickle
p_0: probability for inclusion. See Eqs in Kharchenko+ 2012. p_0=61 (not
sure why not 68.27) is 1 sigma members by kinematic and photometric
membership probability, also accounting for spatial step function and
proximity within stated cluster radius.
call after `get_cluster_data`.
This function reads the Kharchenko+ 2013 "stars/*" tables for each cluster,
and selects the stars that are "most probably cluster members, that is,
stars with kinematic and photometric membership probabilities >61%".
(See Kharchenko+ 2012 for definitions of these probabilities)
It then computes T mags for all of the members.
For each cluster, it computes surface density vs angular distance from
cluster center.
%%%Method 1 (outdated):
%%%Interpolating these results over the King profiles, it associates a surface
%%% density with each star.
%%%(WARNING: how many clusters do not have King profiles?)
Method 2 (used):
Associate a surface density with each star by counting stars in annuli.
This is also not very useful.
It then returns "close", "far", and the entire dataframe
'''
names = np.array(df['Name'])
r2s = np.array(df['r2']) # cluster radius (deg)
# get MWSC ids in "0012", "0007" format
mwsc = np.array(df['MWSC'])
mwsc_ids = np.array([str(int(f)).zfill(4) for f in mwsc])
readme = '../data/stellar_data_README'
outd = {}
# loop over clusters
ix = 0
for mwsc_id, name, r2 in list(zip(mwsc_ids, names, r2s)):
print('\n'+50*'*')
print('{:d}. {:s}: {:s}'.format(ix, str(mwsc_id), str(name)))
outd[name] = {}
middlestr = str(mwsc_id) + '_' + str(name)
fpath = '../data/MWSC_stellar_data/2m_'+middlestr+'.dat'
if name != 'Melotte_20':
tab = ascii.read(fpath, readme=readme)
else:
continue
# Select 1-sigma cluster members by photometry & kinematics.
# From Kharchenko+ 2012, also require that:
# * the 2MASS flag Qflg is "A" (i.e., signal-to-noise ratio
# S/N > 10) in each photometric band for stars fainter than
# Ks = 7.0;
# * the mean errors of proper motions are smaller than 10 mas/yr
# for stars with δ ≥ −30deg , and smaller than 15 mas/yr for
# δ < −30deg.
inds = (tab['Ps'] == 1)
inds &= (tab['Pkin'] > p_0)
inds &= (tab['PJKs'] > p_0)
inds &= (tab['PJH'] > p_0)
inds &= (tab['Rcl'] < r2)
inds &= ( ((tab['Ksmag']>7) & (tab['Qflg']=='AAA')) | (tab['Ksmag']<7))
pm_inds = ((tab['e_pm'] < 10) & (tab['DEdeg']>-30)) | \
((tab['e_pm'] < 15) & (tab['DEdeg']<=-30))
inds &= pm_inds
members = tab[inds]
mdf = members.to_pandas()
# Compute T mag and 1-sigma, 1 hour integration noise using Mr Tommy
# B's ticgen utility. NB relevant citations are listed at top.
# NB I also modified his code to fix the needlessly complicated
# np.savetxt formatting.
mags = mdf[['Bmag', 'Vmag', 'Jmag', 'Hmag', 'Ksmag']]
mags.to_csv('temp.csv', index=False)
ticgen.ticgen_csv({'input_fn':'temp.csv'})
temp = pd.read_csv('temp.csv-ticgen.csv')
member_T_mags = np.array(temp['Tmag'])
noise = np.array(temp['noise_1sig'])
mdf['Tmag'] = member_T_mags
mdf['noise_1hr'] = noise
#########################################################################
## METHOD #1 to assign surface densities:
## The King profile for the cluster is already known. Assign each member
## star a surface density from the King profile evaluated at the member
## star's angular position.
#king_profile = np.array(df.loc[df['Name']==name, 'king_profile'])[0]
#king_theta = np.array(df.loc[df['Name']==name, 'theta'])[0]
## theta is saved in units of TESS px. Get each star's distance from the
## center in TESS pixels.
#arcsec_per_tesspx = 21
#Rcl = np.array(mdf['Rcl'])*u.deg
#dists_from_center = np.array(Rcl.to(u.arcsec).value/arcsec_per_tesspx)
## interpolate over the King profile
#func = interp1d(theta, king_profile, fill_value='extrapolate')
#try:
# density_per_sq_px = func(dists_from_center)
#except:
# print('SAVED OUTPUT TO ../data/Kharachenko_full.p')
# pickle.dump(outd, open('../data/Kharachenko_full.p', 'wb'))
# print('interpolation failed. check!')
# import IPython; IPython.embed()
#mdf['density_per_sq_px'] = density_per_sq_px
#########################################################################
#########################################################################
# METHOD #2 for surface densities (because Method #1 only counts
# member stars!).
# Just count stars in annuli.
king_profile = np.array(df.loc[df['Name']==name, 'king_profile'])[0]
king_theta = np.array(df.loc[df['Name']==name, 'theta'])[0]
inds = (tab['Rcl'] < r2)
stars_in_annulus = tab[inds]
sia = stars_in_annulus.to_pandas()
arcsec_per_tesspx = 21
Rcl = np.array(sia['Rcl'])*u.deg
dists_from_center = np.array(Rcl.to(u.arcsec).value/arcsec_per_tesspx)
maxdist = ((r2*u.deg).to(u.arcsec).value/arcsec_per_tesspx)
n_pts = np.min((50, int(len(sia)/2)))
angsep_grid = np.linspace(0, maxdist, num=n_pts)
# Attempt to compute Tmags for everything. Only count stars with
# T<limiting magnitude as "contaminants" (anything else is probably too
# faint to really matter!)
mags = sia[['Bmag', 'Vmag', 'Jmag', 'Hmag', 'Ksmag']]
mags.to_csv('temp.csv', index=False)
ticgen.ticgen_csv({'input_fn':'temp.csv'})
temp = pd.read_csv('temp.csv-ticgen.csv')
T_mags = np.array(temp['Tmag'])
all_dists = dists_from_center[(T_mags > 0) & (T_mags < 17) & \
(np.isfinite(T_mags))]
N_in_bin, edges = np.histogram(
all_dists,
bins=angsep_grid,
normed=False)
# compute empirical surface density, defined on the midpoints
outer, inner = angsep_grid[1:], angsep_grid[:-1]
sigma = N_in_bin / (pi * (outer**2 - inner**2))
midpoints = angsep_grid[:-1] + np.diff(angsep_grid)/2
# interpolate over the empirical surface density as a function of
# angular separation to assign surface densities to member stars.
func = interp1d(midpoints, sigma, fill_value='extrapolate')
member_Rcl = np.array(mdf['Rcl'])*u.deg
member_dists_from_center = np.array(member_Rcl.to(u.arcsec).value/\
arcsec_per_tesspx)
try:
member_density_per_sq_px = func(member_dists_from_center)
except:
print('SAVED OUTPUT TO ../data/Kharachenko_full_{:s}.p'.format(savstr))
pickle.dump(outd, open(
'../data/Kharachenko_full_{:s}.p'.format(savstr), 'wb'))
print('interpolation failed. check!')
import IPython; IPython.embed()
mdf['density_per_sq_px'] = member_density_per_sq_px
#########################################################################
N_catalogd = int(df.loc[df['Name']==name, 'N1sr2'])
N_my_onesigma = int(len(mdf))
got_Tmag = (np.array(mdf['Tmag']) > 0)
N_with_Tmag = len(mdf[got_Tmag])
print('N catalogued as in cluster: {:d}'.format(N_catalogd))
print('N I got as in cluster: {:d}'.format(N_my_onesigma))
print('N of them with Tmag: {:d}'.format(N_with_Tmag))
diff = abs(N_catalogd - N_with_Tmag)
if diff > 5:
print('\nWARNING: my cuts different from Kharachenko+ 2013!!')
lens = np.array([len(member_T_mags),
len(noise),
len(member_dists_from_center),
len(member_density_per_sq_px)])
np.testing.assert_equal(lens, lens[0]*np.ones_like(lens))
# for members
outd[name]['Tmag'] = np.array(mdf['Tmag'])
outd[name]['noise_1hr'] = np.array(mdf['noise_1hr'])
outd[name]['Rcl'] = member_dists_from_center
outd[name]['density_per_sq_px'] = member_density_per_sq_px
# Ocassionally, do some output plots to compare profiles
if ix%50 == 0:
plt.close('all')
f, ax=plt.subplots()
ax.scatter(member_dists_from_center, member_density_per_sq_px)
ax.plot(king_theta, king_profile)
ax.set_ylim([0,np.max((np.max(member_density_per_sq_px),
np.max(king_profile) ) )])
ax.set_xlim([0, 1.02*np.max(member_dists_from_center)])
ax.set_xlabel('angular sep [TESS px]')
ax.set_ylabel('surface density (line: King model, dots: empirical'
' [per tess px area]', fontsize='xx-small')
f.savefig('king_v_empirical/{:s}_{:d}.pdf'.format(name, ix),
bbox_inches='tight')
del mdf
ix += 1
print(50*'*')
print('SAVED OUTPUT TO ../data/Kharchenko_full_{:s}.p'.format(savstr))
pickle.dump(outd, open(
'../data/Kharchenko_full_{:s}.p'.format(savstr), 'wb'))
print(50*'*')
close = df[df['d'] < 500]
far = df[df['d'] < 1000]
return close, far, df
def get_dilutions_and_distances(df, savstr, faintest_Tmag=16, p_0=61):
'''
args:
savstr (str): gets the string used to ID the output pickle
p_0: probability for inclusion. See Eqs in Kharchenko+ 2012. p_0=61 (not
sure why not 68.27) is 1 sigma members by kinematic and photometric
membership probability, also accounting for spatial step function and
proximity within stated cluster radius.
call after `get_cluster_data`.
This function reads the Kharchenko+ 2013 "stars/*" tables for each cluster,
and selects the stars that are "most probably cluster members, that is,
stars with kinematic and photometric membership probabilities >61%".
(See Kharchenko+ 2012 for definitions of these probabilities)
It then computes T mags for all of the members.
For each cluster member, it then finds all cataloged stars (not necessarily
cluster members) within 2, 3, 4, 5, 6 TESS pixels.
It sums the fluxes, and computes a dilution.
It saves (for each cluster member):
* number of stars in various apertures
* dilution for various apertures
* distance of cluster member
* Tmag of cluster member
* noise_1hr for cluster member
* ra,dec for cluster member
'''
names = np.array(df['Name'])
r2s = np.array(df['r2'])
# get MWSC ids in "0012", "0007" format
mwsc = np.array(df['MWSC'])
mwsc_ids = np.array([str(int(f)).zfill(4) for f in mwsc])
readme = '../data/stellar_data_README'
outd = {}
# loop over clusters
ix = 0
start, step = 3, 7
for mwsc_id, name, r2 in list(zip(mwsc_ids, names, r2s))[start::step]:
print('\n'+50*'*')
print('{:d}. {:s}: {:s}'.format(ix, str(mwsc_id), str(name)))
outd[name] = {}
outpath = '../data/MWSC_dilution_calc/{:s}.csv'.format(str(name))
if os.path.exists(outpath):
print('found {:s}, continue'.format(outpath))
continue
middlestr = str(mwsc_id) + '_' + str(name)
fpath = '../data/MWSC_stellar_data/2m_'+middlestr+'.dat'
if name not in ['Melotte_20', 'Sco_OB4']:
tab = ascii.read(fpath, readme=readme)
else:
continue
# Select 1-sigma cluster members by photometry & kinematics.
# From Kharchenko+ 2012, also require that:
# * the 2MASS flag Qflg is "A" (i.e., signal-to-noise ratio
# S/N > 10) in each photometric band for stars fainter than
# Ks = 7.0;
# * the mean errors of proper motions are smaller than 10 mas/yr
# for stars with δ ≥ −30deg , and smaller than 15 mas/yr for
# δ < −30deg.
inds = (tab['Ps'] == 1)
inds &= (tab['Pkin'] > p_0)
inds &= (tab['PJKs'] > p_0)
inds &= (tab['PJH'] > p_0)
inds &= (tab['Rcl'] < r2)
inds &= ( ((tab['Ksmag']>7) & (tab['Qflg']=='AAA')) | (tab['Ksmag']<7))
pm_inds = ((tab['e_pm'] < 10) & (tab['DEdeg']>-30)) | \
((tab['e_pm'] < 15) & (tab['DEdeg']<=-30))
inds &= pm_inds
members = tab[inds]
mdf = members.to_pandas()
# Compute T mag and 1-sigma, 1 hour integration noise using Mr Tommy
# B's ticgen utility. NB relevant citations are listed at top.
# NB I also modified his code to fix the needlessly complicated
# np.savetxt formatting.
mags = mdf[['Bmag', 'Vmag', 'Jmag', 'Hmag', 'Ksmag']]
mags.to_csv('temp{:s}.csv'.format(name), index=False)
ticgen.ticgen_csv({'input_fn':'temp{:s}.csv'.format(name)})
temp = pd.read_csv('temp{:s}.csv-ticgen.csv'.format(name))
member_T_mags = np.array(temp['Tmag'])
member_noise = np.array(temp['noise_1sig'])
mdf['Tmag'] = member_T_mags
mdf['noise_1hr'] = member_noise
desired_Tmag_inds = ((member_T_mags > 0) & (member_T_mags < faintest_Tmag) & \
( | np.isfinite(member_T_mags) | numpy.isfinite |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import argparse
from scipy.stats import gamma
from scipy.optimize import minimize,fmin_l_bfgs_b
#import autograd.numpy as np
#from autograd import grad, jacobian, hessian
def measure_sensitivity(X):
N = len(X)
Ds = 1/N * (np.abs(np.max(X) - np.min(X)))
return(Ds)
def measure_sensitivity_private(distribution, N, theta_vector, cliplo, cliphi):
#computed on a surrogate sample different than the one being analyzed
if distribution == 'poisson':
theta = theta_vector[0]
if cliphi == np.inf:
Xprime = np.random.poisson(theta, size=1000)
Xmax, Xmin = np.max(Xprime), np.min(Xprime)
else:
Xmax, Xmin = cliphi, cliplo
Ds = 1/N * (np.abs(Xmax - Xmin))
if distribution == 'gaussian':
theta, sigma = theta_vector[0], theta_vector[1]
if cliphi == np.inf:
Xprime = np.random.normal(theta, sigma, size=1000)
Xmax, Xmin = np.max(Xprime), np.min(Xprime)
else:
Xmax, Xmin = cliphi, cliplo
Ds = 1/N * (np.abs(Xmax - Xmin))
if distribution == 'gaussian2':
theta, sigma = theta_vector[0], theta_vector[1]
if cliphi == np.inf:
Xprime = np.random.normal(theta, sigma, size=1000)
Xmax, Xmin = np.max(Xprime), np.min(Xprime)
else:
Xmax, Xmin = cliphi, cliplo
Ds1 = 1/N * (np.abs(Xmax - Xmin))
Ds2 = 2/N * (np.abs(Xmax - Xmin))
Ds = [Ds1, Ds2]
if distribution == 'gamma':
theta, theta2 = theta_vector[0], theta_vector[1]
if cliphi == np.inf:
Xprime = np.random.gamma(theta2, theta, size=1000)
Xmax, Xmin = np.max(Xprime), np.min(Xprime)
else:
Xmax, Xmin = cliphi, cliplo
Ds = 1/N * (np.abs(Xmax - Xmin))
if distribution == 'gaussianMV':
theta, theta2 = theta_vector[0], theta_vector[1]
K = len(theta)
Xprime = np.random.multivariate_normal(theta, theta2, size=1000)
Xmax, Xmin = np.max(Xprime, axis=0), np.min(Xprime,axis=0)
Ds = 1/N * (np.abs(Xmax.T-Xmin.T))
return(Ds, [Xmin, Xmax])
def A_SSP(X, Xdistribution, privately_computed_Ds, laplace_noise_scale, theta_vector, rho):
N = len(X)
if Xdistribution == 'poisson':
s = 1/N * np.sum(X)
z = np.random.laplace(loc=s, scale=privately_computed_Ds/laplace_noise_scale, size = 1)
theta_hat_given_s = s
theta_hat_given_z = z
return({'0priv': theta_hat_given_z, '0basic': theta_hat_given_s})
if Xdistribution == 'gaussian':
s = 1/N * np.sum(X)
z = np.random.laplace(loc=s, scale=privately_computed_Ds/laplace_noise_scale, size = 1)
theta_hat_given_s = s
theta_hat_given_z = z
return({'0priv': theta_hat_given_z, '0basic': theta_hat_given_s})
if Xdistribution == 'gaussian2':
s1 = 1/N * np.sum(X)
s2 = 1/N * np.sum(np.abs(X-s1)) # see du et al 2020
z1 = np.random.laplace(loc=s1, scale=privately_computed_Ds[0]/(laplace_noise_scale*rho), size = 1)
z2 = np.random.laplace(loc=s2, scale=privately_computed_Ds[1]/(laplace_noise_scale*(1-rho)), size = 1)
theta_hat_given_s = s1
theta2_hat_given_s = np.sqrt(np.pi/2) * max(0.00000001, s2)
theta_hat_given_z = z1
theta2_hat_given_z = np.sqrt(np.pi/2) * max(0.00000001, z2)
return({'0priv': theta_hat_given_z, '1priv': theta2_hat_given_z, '0basic': theta_hat_given_s, '1basic': theta2_hat_given_s})
if Xdistribution == 'gamma':
K = theta_vector[1]
s = 1/N * np.sum(X)
z = np.random.laplace(loc=s, scale=privately_computed_Ds/laplace_noise_scale, size = 1)
theta_hat_given_s = 1/K * s
theta_hat_given_z = 1/K * z
return({'1priv': theta_hat_given_z, '1basic': theta_hat_given_s})
if Xdistribution == 'gaussianMV':
mu = theta_vector[0]
Sigma = theta_vector[1]
s = 1/N * np.sum(X, axis=0)
z = np.random.laplace(loc=s, scale=privately_computed_Ds/laplace_noise_scale)
theta_hat_given_s = s
theta_hat_given_z = z
return({'1priv': theta_hat_given_z, '1basic': theta_hat_given_s})
def A_SSP_autodiff(X, Xdistribution, privately_computed_Ds, laplace_noise_scale, theta_vector):
N = len(X)
theta_init = [1.9, 3.9]
if Xdistribution == 'poisson':
s = 1/N * | np.sum(X) | numpy.sum |
"""
Mestrado em Engenharia de Telecomunicacoes
<NAME>
02/03/2016
"""
from os.path import abspath, join, dirname
from scipy.signal import convolve2d
import matplotlib.pyplot as ppl
import time
import numpy as np
import skimage
import sys
import cv2
import math
sys.path.insert(0, abspath(join(dirname(__file__), '..')))
from data_information import dcm_information as di
from skimage.segmentation import clear_border
# from withinSkull import withinSkull
from mls_parzen import mls_parzen, conv2
# import FolClustering as fl
# from AreaLabel import AreaLabel
from cv2 import connectedComponentsWithStats, CV_8U
import Adriell_defs as ad
import pywt
resultTotalDensRLSParzen2 = []
tempTotalDensRLSParzen2 = []
debug = False
count_debug = 1
flag = 1
for r in range(1, 2):
print("Repeticao", r)
tempDensRLSParzen = []
time_total = []
for z in range(1, 101):
# Carregar a imagem Original
imgOrig = di.load_dcm("../datasets/OriginalCT/Imagem{}.dcm".format(z))
# img_GT = cv2.imread("../datasets/resultados_GT/Imagem{}.png".format(z))
img_original = np.copy(imgOrig)
img_original= cv2.resize(img_original, (256, 256))
# img_original,var2,var3,var4=ad.haar(img_original)
# img_original=np.uint8(img_original)
# img_GT = cv2.resize(img_GT, (256, 256))
# TODO - Calcular o inicio do tempo aqui
start = time.time()
img_res= (img_original-1024)
img_oss,img_oss_ant=ad.within_skull(img_res)
img_oss,img_oss_erode=ad.image_morfologic1(img_oss_ant,1,2)
# ==============================================================================================================
M = cv2.moments(img_oss)
cY = int(M["m10"] / M["m00"])
cX = int(M["m01"] / M["m00"])
img_oss_center=(cX,cY)
img_oss[cX,cY]=0
img_si,img_si_ant,img_si_ant_show= ad.mult(img_res,img_oss,img_oss_erode)
# =========================================================================================================
img_si_ant,img_si_ant_median= ad.thresolded_img(img_si_ant)
# =====================================================================================================================
img_si_ant,img_si_after_erode = ad.image_morfologic2(img_si_ant)
# =============================================================================================================
n, labels, stats, centroids = cv2.connectedComponentsWithStats(img_si_ant)
# =============================================================================================================
img_filtered= ad.centroid_operations1(centroids,img_oss_center,stats,n,labels)
# =============================================================================================================
M = cv2.moments(img_filtered)
cY = int(M["m10"] / M["m00"])
cX = int(M["m01"] / M["m00"])
img_filtered_center = (cX, cY)
# ===========================================================================================================================
n, labels, stats, centroids = cv2.connectedComponentsWithStats(img_si_after_erode)
img_final=ad.centroid_operations2(centroids,img_filtered_center,labels,n)
X = | np.asarray(img_si_ant_show, np.int16) | numpy.asarray |
import pybullet as p
import pybullet_data
from PhysicalEngine.utils import macro_const
import numpy as np
import time
const = macro_const.Const()
def create_floor(position=(0,0,0), orientation=(0,0,0), urdf=None, color=None, texture=None, friction=0.1, client=0):
"""
Create floor
-------------
urdf[.urdf]: plane urdf
color[list]: RGBA, plane color.
texture[.jpg/.png]: texture image
friction[float]: lateral friction of the floor
Return:
--------
floorID: floor ID.
"""
p.setAdditionalSearchPath(pybullet_data.getDataPath())
if urdf is None:
urdf = 'plane.urdf'
planeID = p.loadURDF('plane.urdf', basePosition=position, baseOrientation=p.getQuaternionFromEuler(orientation))
if color is not None:
# Load texture
p.changeVisualShape(planeID, -1, rgbaColor=color, physicsClientId=client)
if texture is not None:
textureID = p.loadTexture(texture)
p.changeVisualShape(planeID, -1, textureUniqueId=textureID, physicsClientId=client)
# Change lateral friction
p.changeDynamics(planeID, -1, lateralFriction=friction)
return planeID
def create_cylinder(position, radius, height, orientation=(0,0,0), color=None, texture=None, mass=1, friction=0.1, client=0, isCollision=True):
"""
create cylinder in physical scene.
----------------------
position[3-element tuple]: Center position of the cylinder
orientation[3-element tuple]: Euler orientation, listed as (roll, yaw, pitch)
radius[float]: radius of the cylinder
height[float]: height of the cylinder
color[4-element tuple]: rgba color
texture[.jpg/.png]: texture image
mass[float]: mass of the cylinder
friction[float]: lateral friction
isCollision[Bool]: is collision or not.
Return:
-------
cylinderID: cylinder ID
"""
cylinderVisualShape = p.createVisualShape(shapeType=p.GEOM_CYLINDER,
radius=radius,
length=height,
physicsClientId=client)
if isCollision:
cylinderCollisionShape = p.createCollisionShape(
shapeType=p.GEOM_CYLINDER,
radius=radius,
height=height,
physicsClientId=client)
else:
cylinderCollisionShape = const.NULL_OBJ
cylinderID = p.createMultiBody(
baseMass=mass,
baseCollisionShapeIndex=cylinderCollisionShape,
baseVisualShapeIndex=cylinderVisualShape,
basePosition=position,
baseOrientation=p.getQuaternionFromEuler(orientation),
physicsClientId=client
)
if color is not None:
# change color
p.changeVisualShape(cylinderID, -1, rgbaColor=color, physicsClientId=client)
if texture is not None:
# change texture
textureID = p.loadTexture(texture)
p.changeVisualShape(cylinderID, -1, textureUniqueId=textureID, physicsClientId=client)
# Set lateral friction
p.changeDynamics(cylinderID, -1, lateralFriction=friction)
return cylinderID
def create_box(position, size, orientation=(0,0,0), color=None, texture=None, mass=1, friction=0.1, client=0, isCollision=True):
"""
create one box in physical scene
--------------------------
position[3-element tuple]: position (center) of the box
orientation[3-element tuple]: Euler orientation, listed as (roll, yaw, pitch)
size[3-element tuple]: size of the box
color[list]: RGBA color
texture[.jpg/.png]: texture image
mass[float]: mass of the box
friction[float]: lateral friction
client: physics client
isCollision[Bool]: Consider collision or not.
Return:
-------
boxID: box ID
"""
boxVisualShape = p.createVisualShape(shapeType=p.GEOM_BOX,
halfExtents=np.array(size)/2,
rgbaColor=color,
physicsClientId=client)
if isCollision:
boxCollisionShape = p.createCollisionShape(shapeType=p.GEOM_BOX,
halfExtents=np.array(size)/2,
physicsClientId=client)
else:
boxCollisionShape = const.NULL_OBJ
boxID = p.createMultiBody(baseVisualShapeIndex=boxVisualShape,
baseCollisionShapeIndex=boxCollisionShape,
basePosition=position,
baseOrientation=p.getQuaternionFromEuler(orientation),
baseMass=mass,
physicsClientId=client)
if color is not None:
# change color
p.changeVisualShape(boxID, -1, rgbaColor=color, physicsClientId=client)
if texture is not None:
# change texture
textureID = p.loadTexture(texture)
p.changeVisualShape(boxID, -1, textureUniqueId=textureID, physicsClientId=client)
# Set lateral friction
p.changeDynamics(boxID, -1, lateralFriction=friction)
return boxID
def check_box_in_ground(boxID, ground_height=0.0, tol=0.001):
"""
Check if box located in ground
-------------------------
boxID[int]: box ID
ground_height[float]: baseline of the ground, if height of ground is not 0, provide it here.
tol[float]: tolence that boxs located on ground.
Return:
-------
Bool: in/not on ground
"""
minPos, maxPos = p.getAABB(boxID)
if np.abs(minPos[-1]) < tol + ground_height:
return True
else:
return False
def object_overlap_correct(boxIDs, velocity_tol=0.005):
"""
Correct objects to prevent interobject penetrations
Using Simulation to re-organize objects' configuration
--------------------------------------------
boxIDs[list]: box IDs
velocity_tol[float]: velocity tolerance, default is 0.005
Return:
-------
box_pos_all[three-dimensional list]: corrected configuration, position.
box_ori_all[three-dimensional list]: corrected configuration, orientation.
"""
while 1:
p.setGravity(0,0,0)
velocity = 0
p.stepSimulation()
for boxID in boxIDs:
lin_vec_tmp, ang_vec_tmp = p.getBaseVelocity(boxID)
velocity_tmp = np.sum(np.array(lin_vec_tmp)**2+np.array(ang_vec_tmp)**2)
velocity += velocity_tmp
# print(' Velocity: {}'.format(velocity))
p.resetBaseVelocity(boxID, [0,0,0], [0,0,0])
if velocity < velocity_tol:
break
# Get position of each box
box_pos_all = []
box_ori_all = []
for boxID in boxIDs:
box_pos, box_ori = p.getBasePositionAndOrientation(boxID)
box_pos_all.append(box_pos)
box_ori_all.append(box_ori)
return box_pos_all, box_ori_all
def adjust_box_size(boxIDs, sigma):
"""
Adjust object size by adding shape noise to each object object.
----------------------
boxIDs[list]: All boxes in the configuration
sigma[float]: Shape noise was added as a horizontal gaussian noise.
The gaussian noise follows N~(0, sigma)
Return:
-------
box_size_all: new shape of each box
box_pos_all: new position of each box
box_ori_all: new orientation of each box
"""
box_size_all = []
box_pos_all = []
box_ori_all = []
box_color_all = []
box_mass_all = []
box_friction_all = []
for i, boxID in enumerate(boxIDs):
# Get box shape
box_size = np.array(p.getVisualShapeData(boxID)[0][3])
box_color = p.getVisualShapeData(boxID)[0][-1]
box_color_all.append(box_color)
# Get box dynamics
box_mass = p.getDynamicsInfo(boxID,-1)[0]
box_friction = p.getDynamicsInfo(boxID,-1)[1]
box_mass_all.append(box_mass)
box_friction_all.append(box_friction)
# Get box position
box_pos, box_ori = p.getBasePositionAndOrientation(boxID)
box_pos_all.append(box_pos)
box_ori_all.append(box_ori)
# Prepare Gaussian noise
size_nos = np.random.normal(0, sigma, 3)
size_nos[-1] = 0
# Change Shape
box_size_nos = box_size + size_nos
box_size_all.append(box_size_nos)
# Remove bodies
for i, boxID in enumerate(boxIDs):
p.removeBody(boxID)
boxID = create_box(box_pos_all[i], box_size_all[i], box_color_all[i], mass=box_mass_all[i], friction=box_friction_all[i])
# Position correction, in case of interobject penetration
box_pos_all, box_ori_all = object_overlap_correct(boxIDs)
return box_size_all, box_pos_all, box_ori_all
def adjust_confg_position_gaussian(boxIDs, sigma):
"""
Adjust configuration by adding position noise to each box object.
-------------------------------
boxIDs[list]: All boxes in the configuration
sigma[float]: Position noise was added as a horizontal gaussian noise.
The gaussian noise follows N~(0, sigma)
Return:
-------
box_pos_all: new position of each box
box_ori_all: new orientation of each box
"""
for boxID in boxIDs:
# Get box position
box_pos, box_ori = p.getBasePositionAndOrientation(boxID)
# Prepare Gaussian Noise
pos_nos = np.random.normal(0, sigma, 3)
# No noise along Z axis
pos_nos[-1] = 0
# print('Noise is {}'.format(pos_nos))
# Add noise
box_pos_nos = box_pos + pos_nos
p.resetBasePositionAndOrientation(boxID, box_pos_nos, box_ori)
# Position correction, in case of interobject penetration
box_pos_all, box_ori_all = object_overlap_correct(boxIDs)
return box_pos_all, box_ori_all
def adjust_confg_position_fixdistance(boxIDs, magnitude):
"""
Adjust configuration with specific distance from each of original box object.
Moving distance was randomized sampled from a uniform distribution.
--------------------------------------
boxIDs[list]: All boxes in the configuration
magnitude[float]: Moving distance.
Return:
--------
box_pos_all: new position of each box
box_ori_all: new orientation of each box
"""
for boxID in boxIDs:
# Get box position
box_pos, box_ori = p.getBasePositionAndOrientation(boxID)
# Prepare angle sampled from uniform distribution
angle = np.random.uniform(0, 2*const.PI)
pos_nos = np.array([magnitude*np.cos(angle), magnitude*np.sin(angle), 0])
# print('Noise is {}'.format(pos_nos))
# Add noise
box_pos_nos = box_pos + pos_nos
p.resetBasePositionAndOrientation(boxID, box_pos_nos, box_ori)
# Position correction, in case of interobject penetration
box_pos_all, box_ori_all = object_overlap_correct(boxIDs)
return box_pos_all, box_ori_all
def prepare_force_noise_inground(boxIDs, f_mag, f_angle, ground_height=0.0):
"""
Prepare force noise.
Force noise was only added into the box which located in ground.
------------------
boxIDs[list]: All boxes in the configuration.
f_mag[float]: Force Magnitude.
f_angle[float]: Force Angle, need to transfer into radian measure.
ground_height[float]: ground height.
Return:
-------
targboxID[int]: the box ID needed to be forced.
forceObj[Three-dimension list]: Force vector to be applied.
PosObj[Three-dimension list]: Position vector to be applied.
"""
# Find Box located in the ground ----------
isground = []
for boxID in boxIDs:
isground.append(check_box_in_ground(boxID, ground_height=ground_height))
if sum(isground)>0:
targbox = np.random.choice(np.where(isground)[0])
else:
# Exception happened.
print('No box was found located in the ground.')
targbox = None
if targbox is not None:
# Force was interacted into this box.
targboxID = boxIDs[targbox]
# Get Position of this targbox
box_pos, box_ori = p.getBasePositionAndOrientation(targboxID)
# Prepare force -----------------------
# Force orientation: uniform over the range [0, 360]
forceObj = [f_mag*np.cos(f_angle), f_mag*np.sin(f_angle), 0]
posObj = box_pos
else:
targboxID = None
forceObj = None
posObj = None
return targboxID, forceObj, posObj
def prepare_force_allblocks(boxIDs, f_mag, f_angle):
"""
Prepare force noise
Force noise was added into all boxes of the configuration
----------------
boxIDs[list]: All boxes in the configurations.
f_mag[float]: Force Magnitude.
f_angle[float]: Force Angle, need to transfer into radian measure.
Return:
--------
forceObj[list]: Force vector to be appied. Each element is a three dimensional list indicated force in x, y and z axis. Noted that force=0 in z axis.
PosObj[list]: Position vector to be applied. Each element is a three dimensional list indicated position in x, y and z axis. The positionObj inherited from position of each box.
"""
forceObj = []
PosObj = []
for boxID in boxIDs:
box_pos, box_ori = p.getBasePositionAndOrientation(boxID)
# Prepare force
# Force orientation: uniform over the range [0, 360]
forceVector = [f_mag*np.cos(f_angle), f_mag*np.sin(f_angle), 0]
forceObj.append(forceVector)
PosObj.append(box_pos)
return forceObj, PosObj
def examine_stability(box_pos_ori, box_pos_fin, tol=0.01):
"""
Examine the stability of the configuration.
Stability was evaluated by checking position difference of the original configuration and the final configuration.
---------------------------
box_pos_ori[three-dim list]: original box positions
box_pos_fin[three-dim list]: final box positions
Return:
-------
isstable[bool list]: whether configuration is stable or not, each element represent one box in the configuration.
True for stable and False for unstable.
"""
assert len(box_pos_ori) == len(box_pos_fin), "Need to use the same configuration."
box_num = len(box_pos_ori)
isstable = []
for i in range(box_num):
# Consider its z axis shift
pos_diff = (box_pos_ori[i][-1] - box_pos_fin[i][-1])**2
if pos_diff > tol:
# print('Box {} moved'.format(i+1))
isstable.append(False)
else:
isstable.append(True)
return isstable
def run_IPE(boxIDs, pos_sigma, force_magnitude, force_time=0.2, ground_height=0.0, n_iter=1000, shownotion=True):
"""
Run model of intuitive physical engine. Add position noise and force noise to the configuration and evaluate confidence under each parameter pair.
Note that for position noise, we adjust position of each box, for force noise, we add force to the box that located in the ground (randomly add forces to one of the boxes). Direction of force was uniformly sampled under range around [0, 2*PI]
-------------------
boxIDs[list]: box IDs.
pos_sigma[float]: Position noise was added as a horizontal gaussian noise. The gaussian noise follows N~(0, sigma).
force_magnitude[float]: force magnitude.
force_time[float]: add force within the first n seconds.
ground_height[float]: ground height.
n_iter[int]: IPE iterations.
shownotion[bool]: Whether show notation of stability. By default is True.
Return:
--------
confidence[bool list]: stability confidence
"""
print('IPE Simulation with parameters {}:{}'.format(pos_sigma, force_magnitude))
confidence = []
# Record initial configuration
box_pos_ini, box_ori_ini = [], []
for boxID in boxIDs:
box_pos_tmp, box_ori_tmp = p.getBasePositionAndOrientation(boxID)
box_pos_ini.append(box_pos_tmp)
box_ori_ini.append(box_ori_tmp)
# Start Simulation
for n in range(n_iter):
# First, adjust position of the configuration.
box_pos_adj, box_ori_adj = adjust_confg_position_gaussian(boxIDs, pos_sigma)
for i, boxID in enumerate(boxIDs):
p.resetBasePositionAndOrientation(boxID, box_pos_adj[i], box_ori_adj[i])
# Second, prepare force noise
# force angle generated uniformly
force_angle = np.random.uniform(0, 2*const.PI)
targboxID, force_arr, position_arr = prepare_force_noise_inground(boxIDs, force_magnitude, force_angle, ground_height=ground_height)
if targboxID is not None:
# targboxID is None indicated no box located in the ground.
# No need to do simulation for we have no idea on the force during simulation.
# Here, simulation could be done
# Get original position of each box
# For we examine position difference along z axis, here we do not record orientation of each box.
box_pos_ori = []
for boxID in boxIDs:
box_pos_tmp, _ = p.getBasePositionAndOrientation(boxID)
box_pos_ori.append(box_pos_tmp)
# Simulation
# Set gravity
p.setGravity(0,0,const.GRAVITY)
for i in range(400):
p.stepSimulation()
time.sleep(const.TIME_STEP)
if i<force_time/(const.TIME_STEP): # Add force within the first 200ms
# Add force to the target box
p.applyExternalForce(targboxID, -1, force_arr, position_arr, p.WORLD_FRAME)
# Evaluate stability
# Get base position of the configuration
box_pos_fin = []
for boxID in boxIDs:
box_pos_tmp, _ = p.getBasePositionAndOrientation(boxID)
box_pos_fin.append(box_pos_tmp)
# Examine stability
isstable = examine_stability(box_pos_ori, box_pos_fin)
if shownotion:
if (True in isstable):
print(' {}:Unstable'.format(n+1))
else:
print(' {}:Stable'.format(n+1))
confidence.append(True in isstable)
# Finally, initialize configuration
for i, boxID in enumerate(boxIDs):
p.resetBasePositionAndOrientation(boxID, box_pos_ini[i], box_ori_ini[i])
return confidence
def place_boxes_on_space(box_num, box_size_all, pos_range_x = (-1, 1), pos_range_y = (-1, 1), overlap_thr_x=0.50, overlap_thr_y=0.50):
"""
Place boxes on space one by one.
This algorithm was set as follows:
We iteratively generate box with its horizontal position located within the pos_range. If it did not overlap with previous generated boxes, then it located in the ground. If it overlapped with some of previous boxes, we placed it to the highest position among all previous boxes.
Larger pos_range ensures lower height of this configuration, otherwise also works.
--------------------------
box_num[int]: the number of boxes.
box_size_all[list/tuple]: box size list/tuple. Each element in the list was a three-dimensional list. The actual size of the box was randomly selected from this list.
pos_range_x[two-element tuple]: the maximum horizontal position in x axis that the box could be placed. Tuple indicated (min_x, max_x) in the space.
pos_range_y[two-element tuple]: the maximum horizontal position in y axis that the box could be placed. Tuple indicated (min_y, max_y) in the space.
overlap_thr_x[float]: A parameter to control stability of the stimuli. It indicated the minimal overlap between two touched boxes when the stimuli was generated. The value means proportion of the length in x axis of the present box.
overlap_thr_y[float]: A parameter to control stability of the stimuli. It indicated the minimal overlap between two touched boxes when the stimuli was generated. The value means proportion of the length in y axis of the present box.
Returns:
box_pos[list]: all box positions in the configuration.
boxsize_idx_all[list]: indices which corresponding to box_size_all in order to save the potential size of each box.
"""
box_pos_all = []
boxsize_idx_all = []
for n in range(box_num):
present_box_num = len(box_pos_all)
# Decide size of the box
box_size_idx = np.random.choice(len(box_size_all))
box_size_now = box_size_all[box_size_idx]
assert len(box_size_now) == 3, "Size of the box is a three-dimensional list."
# Randomly generate a position for the box
x_pos = np.random.uniform(pos_range_x[0], pos_range_x[1])
y_pos = np.random.uniform(pos_range_y[0], pos_range_y[1])
# Check if this box was overlapped with previous boxes
if present_box_num == 0:
# If no previous box, place the new one in the ground.
box_pos_all.append([x_pos, y_pos, box_size_now[-1]/2])
else:
# If there are boxes, examine if the new one overlapped with previous configuration.
z_pos = 0 + box_size_now[-1]/2
for i, box_pos_prev in enumerate(box_pos_all):
# Get box size
box_size_prev = box_size_all[boxsize_idx_all[i]]
pos_x_diff = np.abs(x_pos-box_pos_prev[0])
pos_y_diff = np.abs(y_pos-box_pos_prev[1])
# Overlap in x/y axis
overlap_x = (box_size_now[0]+box_size_prev[0])/2 - pos_x_diff
overlap_y = (box_size_now[1]+box_size_prev[1])/2 - pos_y_diff
if (overlap_x>0) & (overlap_y>0):
# Exclude situations that two boxes with small overlapping.
# We correct the position of the present box.
if overlap_x < overlap_thr_x * box_size_now[0]:
# If overlap is too small, then correct it into a fix distance: overlap_thr_x*box_size_now
x_correct_dist = (box_size_now[0]+box_size_prev[0])/2 - overlap_thr_x * box_size_now[0]
if x_pos < box_pos_prev[0]:
x_pos = box_pos_prev[0] - x_correct_dist
else:
x_pos = box_pos_prev[0] + x_correct_dist
if overlap_y < overlap_thr_y * box_size_now[1]:
# Same judgment in y axis.
y_correct_dist = (box_size_now[1]+box_size_prev[1])/2 - overlap_thr_y * box_size_now[1]
if y_pos < box_pos_prev[1]:
y_pos = box_pos_prev[1] - y_correct_dist
else:
y_pos = box_pos_prev[1] + y_correct_dist
# Overlap, check if we need to update z axis of the new box.
z_obj_prev = box_pos_prev[-1] + box_size_prev[-1]/2 + box_size_now[-1]/2
if z_obj_prev > z_pos:
z_pos = 1.0*z_obj_prev
else:
# No overlap just pass this iteration.
pass
box_pos_all.append([x_pos, y_pos, z_pos])
boxsize_idx_all.append(box_size_idx)
return box_pos_all, boxsize_idx_all
def overlap_between_twoboxes(boxID1, boxID2):
"""
Calculate overlap between two boxes in an axis.
----------------------------------
boxID1[int]: ID of the first box
boxID2[int]: ID of the second box
Return:
----------
overlap[three-dimensional array]: overlap in three axis. Note that the negative overlap indicated no overlap.
"""
# Get shape of the two boxes
boxshape1 = np.array(p.getVisualShapeData(boxID1)[0][3])
boxshape2 = np.array(p.getVisualShapeData(boxID2)[0][3])
# Get position of the two boxes
boxpos1 = np.array(p.getBasePositionAndOrientation(boxID1)[0])
boxpos2 = np.array(p.getBasePositionAndOrientation(boxID2)[0])
# Overlap = 0.5(shape1+shape2) - posdiff
overlap = 0.5*(boxshape1+boxshape2)- | np.abs(boxpos1-boxpos2) | numpy.abs |
#!/usr/bin/env python
import sys
sys.path.append('../neural_networks')
import numpy as np
import numpy.matlib
import pickle
import copy
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
import os
import time
import copy
from gym_collision_avoidance.envs.policies.CADRL.scripts.neural_networks import neural_network_regr_multi as nn
from gym_collision_avoidance.envs.policies.CADRL.scripts.multi import pedData_processing_multi as pedData
from gym_collision_avoidance.envs.policies.CADRL.scripts.neural_networks.nn_training_param import NN_training_param
from gym_collision_avoidance.envs.policies.CADRL.scripts.neural_networks.multiagent_network_param import Multiagent_network_param
from gym_collision_avoidance.envs.policies.CADRL.scripts.multi import global_var as gb
# setting up global variables
COLLISION_COST = gb.COLLISION_COST
DIST_2_GOAL_THRES = gb.DIST_2_GOAL_THRES
GETTING_CLOSE_PENALTY = gb.GETTING_CLOSE_PENALTY
GETTING_CLOSE_RANGE = gb.GETTING_CLOSE_RANGE
EPS = gb.EPS
# terminal states
NON_TERMINAL = gb.NON_TERMINAL
COLLIDED = gb.COLLIDED
REACHED_GOAL = gb.REACHED_GOAL
# plotting colors
plt_colors = gb.plt_colors
GAMMA = gb.RL_gamma
DT_NORMAL = gb.RL_dt_normal
SMOOTH_COST = gb.SMOOTH_COST
# for 'rotate_constr'
TURNING_LIMIT = np.pi/6.0
# neural network
NN_ranges = gb.NN_ranges
# calculate the minimum distance between two line segments
# not counting the starting point
def find_dist_between_segs(x1, x2, y1, y2):
# x1.shape = (2,)
# x2.shape = (num_actions,2)
# y1.shape = (2,)
# y2.shape = (num_actions,2)
if_one_pt = False
if x2.shape == (2,):
x2 = x2.reshape((1,2))
y2 = y2.reshape((1,2))
if_one_pt = True
start_dist = np.linalg.norm(x1 - y1)
end_dist = np.linalg.norm(x2 - y2, axis=1)
critical_dist = end_dist.copy()
# start_dist * np.ones((num_pts,)) # initialize
# critical points (where d/dt = 0)
z_bar = (x2 - x1) - (y2 - y1) # shape = (num_actions, 2)
inds = np.where((np.linalg.norm(z_bar,axis=1)>0))[0]
t_bar = - np.sum((x1-y1) * z_bar[inds,:], axis=1) \
/ np.sum(z_bar[inds,:] * z_bar[inds,:], axis=1)
t_bar_rep = np.matlib.repmat(t_bar, 2, 1).transpose()
dist_bar = np.linalg.norm(x1 + (x2[inds,:]-x1) * t_bar_rep \
- y1 - (y2[inds,:]-y1) * t_bar_rep, axis=1)
inds_2 = np.where((t_bar > 0) & (t_bar < 1.0))
critical_dist[inds[inds_2]] = dist_bar[inds_2]
# end_dist = end_dist.clip(min=0, max=start_dist)
min_dist = np.amin(np.vstack((end_dist, critical_dist)), axis=0)
# print 'min_dist', min_dist
if if_one_pt:
return min_dist[0]
else:
return min_dist
''' calculate distance between point p3 and
line segment p1->p2'''
def distPointToSegment(p1, p2, p3):
#print p1
#print p2
#print p3
d = p2 - p1
#print 'd', d
#print '(p3-p1)', (p3-p1)
#print 'linalg.norm(d) ** 2', linalg.norm(d) ** 2.0
if np.linalg.norm(d) < EPS:
u = 0.0
else:
u = np.dot(d, (p3-p1)) / (np.linalg.norm(d) ** 2.0)
u = max(0.0, min(u, 1.0))
inter = p1 + u * d
dist = np.linalg.norm(p3 - inter)
return dist
def generate_rand_test_case_multi(num_agents, side_length, speed_bnds, radius_bnds, \
is_end_near_bnd = False, is_static = False):
# num_agents_sampled = np.random.randint(2, high=num_agents+1)
num_agents_sampled = num_agents
# num_agents_sampled = 2
random_case = np.random.rand()
if is_static == True:
test_case = generate_static_case(num_agents_sampled, side_length, \
speed_bnds, radius_bnds)
# else:
# if random_case < 0.5:
# test_case = generate_swap_case(num_agents_sampled, side_length, \
# speed_bnds, radius_bnds)
# elif random_case > 0.5:
# test_case = generate_circle_case(num_agents_sampled, side_length, \
# speed_bnds, radius_bnds)
else:
if random_case < 0.15:
test_case = generate_swap_case(num_agents_sampled, side_length, \
speed_bnds, radius_bnds)
elif random_case > 0.15 and random_case < 0.3:
test_case = generate_circle_case(num_agents_sampled, side_length, \
speed_bnds, radius_bnds)
else:
# is_static == False:
test_case = generate_rand_case(num_agents_sampled, side_length, speed_bnds, radius_bnds, \
is_end_near_bnd = is_end_near_bnd)
return test_case
def generate_rand_case(num_agents, side_length, speed_bnds, radius_bnds, \
is_end_near_bnd=False):
test_case = np.zeros((num_agents, 6))
# if_oppo = np.random.rand() > 0.8
for i in range(num_agents):
# radius
test_case[i,5] = (radius_bnds[1] - radius_bnds[0]) \
* np.random.rand() + radius_bnds[0]
counter = 0
s1 = (speed_bnds[1] - speed_bnds[0]) * np.random.rand() + speed_bnds[0]
s2 = (speed_bnds[1] - speed_bnds[0]) * np.random.rand() + speed_bnds[0]
test_case[i,4] = max(s1, s2)
while True:
# generate random starting/ending points
counter += 1
side_length *= 1.01
start = side_length * 2 * np.random.rand(2,) - side_length
end = side_length * 2 * np.random.rand(2,) - side_length
# make end point near the goal
if is_end_near_bnd == True:
# left, right, top, down
random_side = np.random.randint(4)
if random_side == 0:
end[0] = np.random.rand() * 0.1 * \
side_length - side_length
elif random_side == 1:
end[0] = np.random.rand() * 0.1 * \
side_length + 0.9 * side_length
elif random_side == 2:
end[1] = np.random.rand() * 0.1 * \
side_length - side_length
elif random_side == 3:
end[1] = np.random.rand() * 0.1 * \
side_length + 0.9 * side_length
else:
assert(0)
# agent 1 & 2 in opposite directions
# if i == 0 and if_oppo == True:
# start[0] = 0; start[1] = 0
# end[0] = (5-1) * np.random.rand() + 1; end[1] = 0
# elif i == 1 and if_oppo == True:
# start[0] = (1-0.5) * np.random.rand() + 1.0; start[1] = np.random.rand() * 0.5 - 0.25
# end[0] = (-1-(-5)) * np.random.rand() -5; end[1] = np.random.rand() * 0.5 - 0.25
# if colliding with previous test cases
if_collide = False
for j in range(i):
radius_start = test_case[j,5] + test_case[i,5] + GETTING_CLOSE_RANGE
radius_end = test_case[j,5] + test_case[i,5] + GETTING_CLOSE_RANGE
# start
if np.linalg.norm(start - test_case[j,0:2] ) < radius_start:
if_collide = True
break
# end
if np.linalg.norm(end - test_case[j,2:4]) < radius_end:
if_collide = True
break
if if_collide == True:
continue
# if straight line is permited
if i >=1:
if_straightLineSoln = True
for j in range(0,i):
x1 = test_case[j,0:2]; x2 = test_case[j,2:4];
y1 = start; y2 = end;
s1 = test_case[j,4]; s2 = test_case[i,4];
radius = test_case[j,5] + test_case[i,5] + GETTING_CLOSE_RANGE
if if_permitStraightLineSoln(x1, x2, s1, y1, y2, s2, radius) == False:
# print 'num_agents %d; i %d; j %d'% (num_agents, i, j)
if_straightLineSoln = False
break
if if_straightLineSoln == True:
continue
if np.linalg.norm(start-end) > side_length * 0.5:
break
# record test case
test_case[i,0:2] = start
test_case[i,2:4] = end
# test_case[i,4] = (speed_bnds[1] - speed_bnds[0]) \
# * np.random.rand() + speed_bnds[0]
return test_case
def generate_easy_rand_case(num_agents, side_length, speed_bnds, radius_bnds, agent_separation, \
is_end_near_bnd=False):
test_case = np.zeros((num_agents, 6))
# align agents so they just have to go approximately horizontal to their goal (above one another)
agent_pos = agent_separation*np.arange(num_agents)
np.random.shuffle(agent_pos)
for i in range(num_agents):
radius = np.random.uniform(radius_bnds[0], radius_bnds[1])
speed = | np.random.uniform(speed_bnds[0], speed_bnds[1]) | numpy.random.uniform |
#!/usr/bin/env python
u"""
read_cryosat_L1b.py
Written by <NAME> (02/2020)
Reads CryoSat Level-1b data products from baselines A, B and C
Reads CryoSat Level-1b netCDF4 data products from baseline D
Supported CryoSat Modes: LRM, SAR, SARin, FDM, SID, GDR
INPUTS:
full_filename: full path of CryoSat .DBL or .nc file
OUTPUTS:
Location: Time and Orbit Group
Data: Measurements Group
Geometry: External Corrections Group
Waveform_1Hz: Average Waveforms Group
Waveform_20Hz: Waveforms Group (with SAR/SARIN Beam Behavior Parameters)
METADATA: MPH, SPH and DSD Header data
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
UPDATE HISTORY:
Updated 02/2020: tilde-expansion of cryosat-2 files before opening
add scale factors function for converting packed units in binary files
convert from hard to soft tabulation
Updated 11/2019: empty placeholder dictionary for baseline D DSD headers
Updated 09/2019: added netCDF4 read function for baseline D
will output with same variable names as the binary read functions
Updated 04/2019: USO correction signed 32 bit int
Updated 10/2018: updated header read functions for python3
Updated 05/2016: using __future__ print and division functions
Written 03/2016
"""
from __future__ import print_function
from __future__ import division
import os
import re
import netCDF4
import numpy as np
#-- PURPOSE: Initiate L1b MDS variables for CryoSat Baselines A and B
def cryosat_baseline_AB(fid, n_records, MODE):
n_SARIN_RW = 512
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
#-- CryoSat-2 Time and Orbit Group
Location = {}
#-- Time: day part
Location['Day'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Time: second part
Location['Second'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Time: microsecond part
Location['Micsec'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- USO correction factor
Location['USO_Corr'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Mode ID
Location['Mode_ID'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Source sequence counter
Location['SSC'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Instrument configuration
Location['Inst_config'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Record Counter
Location['Rec_Count'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lat'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lon'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Location['Alt'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
Location['Alt_rate'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
#-- ITRF= International Terrestrial Reference Frame
Location['Sat_velocity'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Real beam direction vector. In CRF: packed units (micro-m, 1e-6 m)
#-- CRF= CryoSat Reference Frame.
Location['Real_beam'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Interferometric baseline vector. In CRF: packed units (micro-m, 1e-6 m)
Location['Baseline'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Measurement Confidence Data Flags
#-- Generally the MCD flags indicate problems when set
#-- If MCD is 0 then no problems or non-nominal conditions were detected
#-- Serious errors are indicated by setting bit 31
Location['MCD'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
Data_20Hz = {}
#-- Window Delay reference (two-way) corrected for instrument delays
Data_20Hz['TD'] = np.zeros((n_records,n_blocks),dtype=np.int64)
#-- H0 Initial Height Word from telemetry
Data_20Hz['H_0'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- COR2 Height Rate: on-board tracker height rate over the radar cycle
Data_20Hz['COR2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Coarse Range Word (LAI) derived from telemetry
Data_20Hz['LAI'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Fine Range Word (FAI) derived from telemetry
Data_20Hz['FAI'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
#-- Gain calibration corrections are applied (Sum of AGC stages 1 and 2
#-- plus the corresponding corrections) (dB/100)
Data_20Hz['AGC_CH1'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
#-- Gain calibration corrections are applied (dB/100)
Data_20Hz['AGC_CH2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH1'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Transmit Power in microWatts
Data_20Hz['TX_Power'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Doppler range correction: Radial component (mm)
#-- computed for the component of satellite velocity in the nadir direction
Data_20Hz['Doppler_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Range Correction: transmit-receive antenna (mm)
#-- Calibration correction to range on channel 1 computed from CAL1.
Data_20Hz['TR_inst_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Range Correction: receive-only antenna (mm)
#-- Calibration correction to range on channel 2 computed from CAL1.
Data_20Hz['R_inst_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Gain Correction: transmit-receive antenna (dB/100)
#-- Calibration correction to gain on channel 1 computed from CAL1
Data_20Hz['TR_inst_gain'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Gain Correction: receive-only (dB/100)
#-- Calibration correction to gain on channel 2 computed from CAL1
Data_20Hz['R_inst_gain'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Internal Phase Correction (microradians)
Data_20Hz['Internal_phase'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- External Phase Correction (microradians)
Data_20Hz['External_phase'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Noise Power measurement (dB/100): converted from telemetry units to be
#-- the noise floor of FBR measurement echoes.
#-- Set to -9999.99 when the telemetry contains zero.
Data_20Hz['Noise_power'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Phase slope correction (microradians)
#-- Computed from the CAL-4 packets during the azimuth impulse response
#-- amplitude (SARIN only). Set from the latest available CAL-4 packet.
Data_20Hz['Phase_slope'] = np.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Spares1'] = np.zeros((n_records,n_blocks,4),dtype=np.int8)
#-- CryoSat-2 External Corrections Group
Geometry = {}
#-- Dry Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['dryTrop'] = np.zeros((n_records),dtype=np.int32)
#-- Wet Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['wetTrop'] = np.zeros((n_records),dtype=np.int32)
#-- Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['InvBar'] = np.zeros((n_records),dtype=np.int32)
#-- Delta Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['DAC'] = np.zeros((n_records),dtype=np.int32)
#-- GIM Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_GIM'] = np.zeros((n_records),dtype=np.int32)
#-- Model Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_model'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean tide Correction packed units (mm, 1e-3 m)
Geometry['ocTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
Geometry['lpeTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean loading tide Correction packed units (mm, 1e-3 m)
Geometry['olTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Solid Earth tide Correction packed units (mm, 1e-3 m)
Geometry['seTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)
Geometry['gpTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Surface Type: enumerated key to classify surface at nadir
#-- 0 = Open Ocean
#-- 1 = Closed Sea
#-- 2 = Continental Ice
#-- 3 = Land
Geometry['Surf_type'] = np.zeros((n_records),dtype=np.uint32)
Geometry['Spare1'] = np.zeros((n_records,4),dtype=np.int8)
#-- Corrections Status Flag
Geometry['Corr_status'] = np.zeros((n_records),dtype=np.uint32)
#-- Correction Error Flag
Geometry['Corr_error'] = np.zeros((n_records),dtype=np.uint32)
Geometry['Spare2'] = np.zeros((n_records,4),dtype=np.int8)
#-- CryoSat-2 Average Waveforms Groups
Waveform_1Hz = {}
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_LRM_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_SAR_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Same as the LRM/SAR groups but the waveform array is 512 bins instead of
#-- 128 and the number of echoes averaged is different.
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_SARIN_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
#-- CryoSat-2 Waveforms Groups
#-- Beam Behavior Parameters
Beam_Behavior = {}
#-- Standard Deviation of Gaussian fit to range integrated stack power.
Beam_Behavior['SD'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack Center: Mean of Gaussian fit to range integrated stack power.
Beam_Behavior['Center'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack amplitude parameter scaled in dB/100.
Beam_Behavior['Amplitude'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- 3rd moment: providing the degree of asymmetry of the range integrated
#-- stack power distribution.
Beam_Behavior['Skewness'] = np.zeros((n_records,n_blocks),dtype=np.int16)
#-- 4th moment: Measure of peakiness of range integrated stack power distribution.
Beam_Behavior['Kurtosis'] = np.zeros((n_records,n_blocks),dtype=np.int16)
Beam_Behavior['Spare'] = np.zeros((n_records,n_blocks,n_BeamBehaviourParams-5),dtype=np.int16)
#-- CryoSat-2 mode specific waveforms
Waveform_20Hz = {}
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Averaged Power Echo Waveform [128]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_LRM_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Averaged Power Echo Waveform [128]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_SAR_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Beam behaviour parameters
Waveform_20Hz['Beam'] = Beam_Behavior
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Averaged Power Echo Waveform [512]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Beam behaviour parameters
Waveform_20Hz['Beam'] = Beam_Behavior
#-- Coherence [512]: packed units (1/1000)
Waveform_20Hz['Coherence'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.int16)
#-- Phase Difference [512]: packed units (microradians)
Waveform_20Hz['Phase_diff'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.int32)
#-- for each record in the CryoSat file
for r in range(n_records):
#-- CryoSat-2 Time and Orbit Group
for b in range(n_blocks):
Location['Day'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Second'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Micsec'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['USO_Corr'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Mode_ID'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Location['SSC'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Location['Inst_config'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Rec_Count'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Lat'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Lon'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Alt'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Alt_rate'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Sat_velocity'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['Real_beam'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['Baseline'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['MCD'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
for b in range(n_blocks):
Data_20Hz['TD'][r,b] = np.fromfile(fid,dtype='>i8',count=1)
Data_20Hz['H_0'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['COR2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['LAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['FAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['AGC_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['AGC_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_gain_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_gain_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TX_Power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Doppler_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['R_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['R_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Internal_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['External_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Noise_power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Phase_slope'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Spares1'][r,b,:] = np.fromfile(fid,dtype='>i1',count=4)
#-- CryoSat-2 External Corrections Group
Geometry['dryTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['wetTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['InvBar'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['DAC'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Iono_GIM'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Iono_model'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['ocTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['lpeTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['olTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['seTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['gpTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Surf_type'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Spare1'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
Geometry['Corr_status'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Corr_error'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Spare2'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
#-- CryoSat-2 Average Waveforms Groups
if (MODE == 'LRM'):
#-- Low-Resolution Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SAR'):
#-- SAR Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SIN'):
#-- SARIN Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
#-- CryoSat-2 Waveforms Groups
if (MODE == 'LRM'):
#-- Low-Resolution Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SAR'):
#-- SAR Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-5))
elif (MODE == 'SIN'):
#-- SARIN Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-5))
Waveform_20Hz['Coherence'][r,b,:] = np.fromfile(fid,dtype='>i2',count=n_SARIN_RW)
Waveform_20Hz['Phase_diff'][r,b,:] = np.fromfile(fid,dtype='>i4',count=n_SARIN_RW)
#-- Bind all the bits of the l1b_mds together into a single dictionary
CS_l1b_mds = {}
CS_l1b_mds['Location'] = Location
CS_l1b_mds['Data'] = Data_20Hz
CS_l1b_mds['Geometry'] = Geometry
CS_l1b_mds['Waveform_1Hz'] = Waveform_1Hz
CS_l1b_mds['Waveform_20Hz'] = Waveform_20Hz
#-- return the output dictionary
return CS_l1b_mds
#-- PURPOSE: Initiate L1b MDS variables for CryoSat Baseline C
def cryosat_baseline_C(fid, n_records, MODE):
n_SARIN_BC_RW = 1024
n_SARIN_RW = 512
n_SAR_BC_RW = 256
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
#-- CryoSat-2 Time and Orbit Group
Location = {}
#-- Time: day part
Location['Day'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Time: second part
Location['Second'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Time: microsecond part
Location['Micsec'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- USO correction factor
Location['USO_Corr'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Mode ID
Location['Mode_ID'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Source sequence counter
Location['SSC'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Instrument configuration
Location['Inst_config'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Record Counter
Location['Rec_Count'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lat'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lon'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Location['Alt'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
Location['Alt_rate'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
#-- ITRF= International Terrestrial Reference Frame
Location['Sat_velocity'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Real beam direction vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
#-- CRF= CryoSat Reference Frame.
Location['Real_beam'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Interferometric baseline vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
Location['Baseline'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Star Tracker ID
Location['ST_ID'] = np.zeros((n_records,n_blocks),dtype=np.int16)
#-- Antenna Bench Roll Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Roll'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Antenna Bench Pitch Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Pitch'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Antenna Bench Yaw Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Yaw'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Measurement Confidence Data Flags
#-- Generally the MCD flags indicate problems when set
#-- If MCD is 0 then no problems or non-nominal conditions were detected
#-- Serious errors are indicated by setting bit 31
Location['MCD'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
Location['Spares'] = np.zeros((n_records,n_blocks,2),dtype=np.int16)
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
Data_20Hz = {}
#-- Window Delay reference (two-way) corrected for instrument delays
Data_20Hz['TD'] = np.zeros((n_records,n_blocks),dtype=np.int64)
#-- H0 Initial Height Word from telemetry
Data_20Hz['H_0'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- COR2 Height Rate: on-board tracker height rate over the radar cycle
Data_20Hz['COR2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Coarse Range Word (LAI) derived from telemetry
Data_20Hz['LAI'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Fine Range Word (FAI) derived from telemetry
Data_20Hz['FAI'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
#-- Gain calibration corrections are applied (Sum of AGC stages 1 and 2
#-- plus the corresponding corrections) (dB/100)
Data_20Hz['AGC_CH1'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
#-- Gain calibration corrections are applied (dB/100)
Data_20Hz['AGC_CH2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH1'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Transmit Power in microWatts
Data_20Hz['TX_Power'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Doppler range correction: Radial component (mm)
#-- computed for the component of satellite velocity in the nadir direction
Data_20Hz['Doppler_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Range Correction: transmit-receive antenna (mm)
#-- Calibration correction to range on channel 1 computed from CAL1.
Data_20Hz['TR_inst_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Range Correction: receive-only antenna (mm)
#-- Calibration correction to range on channel 2 computed from CAL1.
Data_20Hz['R_inst_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Gain Correction: transmit-receive antenna (dB/100)
#-- Calibration correction to gain on channel 1 computed from CAL1
Data_20Hz['TR_inst_gain'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Gain Correction: receive-only (dB/100)
#-- Calibration correction to gain on channel 2 computed from CAL1
Data_20Hz['R_inst_gain'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Internal Phase Correction (microradians)
Data_20Hz['Internal_phase'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- External Phase Correction (microradians)
Data_20Hz['External_phase'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Noise Power measurement (dB/100)
Data_20Hz['Noise_power'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Phase slope correction (microradians)
#-- Computed from the CAL-4 packets during the azimuth impulse response
#-- amplitude (SARIN only). Set from the latest available CAL-4 packet.
Data_20Hz['Phase_slope'] = np.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Spares1'] = np.zeros((n_records,n_blocks,4),dtype=np.int8)
#-- CryoSat-2 External Corrections Group
Geometry = {}
#-- Dry Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['dryTrop'] = np.zeros((n_records),dtype=np.int32)
#-- Wet Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['wetTrop'] = np.zeros((n_records),dtype=np.int32)
#-- Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['InvBar'] = np.zeros((n_records),dtype=np.int32)
#-- Delta Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['DAC'] = np.zeros((n_records),dtype=np.int32)
#-- GIM Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_GIM'] = np.zeros((n_records),dtype=np.int32)
#-- Model Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_model'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean tide Correction packed units (mm, 1e-3 m)
Geometry['ocTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
Geometry['lpeTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean loading tide Correction packed units (mm, 1e-3 m)
Geometry['olTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Solid Earth tide Correction packed units (mm, 1e-3 m)
Geometry['seTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)
Geometry['gpTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Surface Type: enumerated key to classify surface at nadir
#-- 0 = Open Ocean
#-- 1 = Closed Sea
#-- 2 = Continental Ice
#-- 3 = Land
Geometry['Surf_type'] = np.zeros((n_records),dtype=np.uint32)
Geometry['Spare1'] = np.zeros((n_records,4),dtype=np.int8)
#-- Corrections Status Flag
Geometry['Corr_status'] = np.zeros((n_records),dtype=np.uint32)
#-- Correction Error Flag
Geometry['Corr_error'] = np.zeros((n_records),dtype=np.uint32)
Geometry['Spare2'] = np.zeros((n_records,4),dtype=np.int8)
#-- CryoSat-2 Average Waveforms Groups
Waveform_1Hz = {}
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_LRM_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_SAR_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Same as the LRM/SAR groups but the waveform array is 512 bins instead of
#-- 128 and the number of echoes averaged is different.
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_SARIN_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
#-- CryoSat-2 Waveforms Groups
#-- Beam Behavior Parameters
Beam_Behavior = {}
#-- Standard Deviation of Gaussian fit to range integrated stack power.
Beam_Behavior['SD'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack Center: Mean of Gaussian fit to range integrated stack power.
Beam_Behavior['Center'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack amplitude parameter scaled in dB/100.
Beam_Behavior['Amplitude'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- 3rd moment: providing the degree of asymmetry of the range integrated
#-- stack power distribution.
Beam_Behavior['Skewness'] = np.zeros((n_records,n_blocks),dtype=np.int16)
#-- 4th moment: Measure of peakiness of range integrated stack power distribution.
Beam_Behavior['Kurtosis'] = np.zeros((n_records,n_blocks),dtype=np.int16)
#-- Standard deviation as a function of boresight angle (microradians)
Beam_Behavior['SD_boresight_angle'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack Center angle as a function of boresight angle (microradians)
Beam_Behavior['Center_boresight_angle'] = np.zeros((n_records,n_blocks),dtype=np.int16)
Beam_Behavior['Spare'] = np.zeros((n_records,n_blocks,n_BeamBehaviourParams-7),dtype=np.int16)
#-- CryoSat-2 mode specific waveform variables
Waveform_20Hz = {}
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Averaged Power Echo Waveform [128]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_LRM_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Averaged Power Echo Waveform [256]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_SAR_BC_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Beam behaviour parameters
Waveform_20Hz['Beam'] = Beam_Behavior
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Averaged Power Echo Waveform [1024]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_SARIN_BC_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Beam behaviour parameters
Waveform_20Hz['Beam'] = Beam_Behavior
#-- Coherence [1024]: packed units (1/1000)
Waveform_20Hz['Coherence'] = np.zeros((n_records,n_blocks,n_SARIN_BC_RW),dtype=np.int16)
#-- Phase Difference [1024]: packed units (microradians)
Waveform_20Hz['Phase_diff'] = np.zeros((n_records,n_blocks,n_SARIN_BC_RW),dtype=np.int32)
#-- for each record in the CryoSat file
for r in range(n_records):
#-- CryoSat-2 Time and Orbit Group
for b in range(n_blocks):
Location['Day'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Second'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Micsec'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['USO_Corr'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Mode_ID'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Location['SSC'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Location['Inst_config'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Rec_Count'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Lat'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Lon'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Alt'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Alt_rate'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Sat_velocity'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['Real_beam'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['Baseline'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['ST_ID'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Location['Roll'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Pitch'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Yaw'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['MCD'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Spares'][r,b,:] = np.fromfile(fid,dtype='>i2',count=2)
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
for b in range(n_blocks):
Data_20Hz['TD'][r,b] = np.fromfile(fid,dtype='>i8',count=1)
Data_20Hz['H_0'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['COR2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['LAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['FAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['AGC_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['AGC_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_gain_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_gain_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TX_Power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Doppler_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['R_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['R_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Internal_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['External_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Noise_power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Phase_slope'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Spares1'][r,b,:] = np.fromfile(fid,dtype='>i1',count=4)
#-- CryoSat-2 External Corrections Group
Geometry['dryTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['wetTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['InvBar'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['DAC'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Iono_GIM'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Iono_model'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['ocTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['lpeTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['olTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['seTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['gpTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Surf_type'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Spare1'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
Geometry['Corr_status'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Corr_error'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Spare2'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
#-- CryoSat-2 Average Waveforms Groups
if (MODE == 'LRM'):
#-- Low-Resolution Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SAR'):
#-- SAR Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SIN'):
#-- SARIN Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
#-- CryoSat-2 Waveforms Groups
if (MODE == 'LRM'):
#-- Low-Resolution Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SAR'):
#-- SAR Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_BC_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['SD_boresight_angle'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center_boresight_angle'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-7))
elif (MODE == 'SIN'):
#-- SARIN Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_BC_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['SD_boresight_angle'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center_boresight_angle'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-7))
Waveform_20Hz['Coherence'][r,b,:] = np.fromfile(fid,dtype='>i2',count=n_SARIN_BC_RW)
Waveform_20Hz['Phase_diff'][r,b,:] = np.fromfile(fid,dtype='>i4',count=n_SARIN_BC_RW)
#-- Bind all the bits of the l1b_mds together into a single dictionary
CS_l1b_mds = {}
CS_l1b_mds['Location'] = Location
CS_l1b_mds['Data'] = Data_20Hz
CS_l1b_mds['Geometry'] = Geometry
CS_l1b_mds['Waveform_1Hz'] = Waveform_1Hz
CS_l1b_mds['Waveform_20Hz'] = Waveform_20Hz
#-- return the output dictionary
return CS_l1b_mds
#-- PURPOSE: Initiate L1b MDS variables for CryoSat Baseline D (netCDF4)
def cryosat_baseline_D(full_filename, MODE, UNPACK=False):
#-- open netCDF4 file for reading
fid = netCDF4.Dataset(os.path.expanduser(full_filename),'r')
#-- use original unscaled units unless UNPACK=True
fid.set_auto_scale(UNPACK)
#-- get dimensions
ind_first_meas_20hz_01 = fid.variables['ind_first_meas_20hz_01'][:].copy()
ind_meas_1hz_20_ku = fid.variables['ind_meas_1hz_20_ku'][:].copy()
n_records = len(ind_first_meas_20hz_01)
n_SARIN_D_RW = 1024
n_SARIN_RW = 512
n_SAR_D_RW = 256
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
#-- CryoSat-2 Time and Orbit Group
Location = {}
#-- MDS Time
Location['Time'] = np.ma.zeros((n_records,n_blocks))
Location['Time'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
time_20_ku = fid.variables['time_20_ku'][:].copy()
#-- Time: day part
Location['Day'] = np.ma.zeros((n_records,n_blocks))
Location['Day'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
#-- Time: second part
Location['Second'] = np.ma.zeros((n_records,n_blocks))
Location['Second'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
#-- Time: microsecond part
Location['Micsec'] = np.ma.zeros((n_records,n_blocks))
Location['Micsec'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
#-- USO correction factor
Location['USO_Corr'] = np.ma.zeros((n_records,n_blocks))
Location['USO_Corr'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
uso_cor_20_ku = fid.variables['uso_cor_20_ku'][:].copy()
#-- Mode ID
Location['Mode_ID'] = np.ma.zeros((n_records,n_blocks))
Location['Mode_ID'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_mode_op_20_ku =fid.variables['flag_instr_mode_op_20_ku'][:].copy()
#-- Mode Flags
Location['Mode_flags'] = np.ma.zeros((n_records,n_blocks))
Location['Mode_flags'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_mode_flags_20_ku =fid.variables['flag_instr_mode_flags_20_ku'][:].copy()
#-- Platform attitude control mode
Location['Att_control'] = np.ma.zeros((n_records,n_blocks))
Location['Att_control'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_mode_att_ctrl_20_ku =fid.variables['flag_instr_mode_att_ctrl_20_ku'][:].copy()
#-- Instrument configuration
Location['Inst_config'] = np.ma.zeros((n_records,n_blocks))
Location['Inst_config'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_flags_20_ku = fid.variables['flag_instr_conf_rx_flags_20_ku'][:].copy()
#-- acquisition band
Location['Inst_band'] = np.ma.zeros((n_records,n_blocks))
Location['Inst_band'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_bwdt_20_ku = fid.variables['flag_instr_conf_rx_bwdt_20_ku'][:].copy()
#-- instrument channel
Location['Inst_channel'] = np.ma.zeros((n_records,n_blocks))
Location['Inst_channel'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_in_use_20_ku = fid.variables['flag_instr_conf_rx_in_use_20_ku'][:].copy()
#-- tracking mode
Location['Tracking_mode'] = np.ma.zeros((n_records,n_blocks))
Location['Tracking_mode'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_trk_mode_20_ku = fid.variables['flag_instr_conf_rx_trk_mode_20_ku'][:].copy()
#-- Source sequence counter
Location['SSC'] = np.ma.zeros((n_records,n_blocks))
Location['SSC'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
seq_count_20_ku = fid.variables['seq_count_20_ku'][:].copy()
#-- Record Counter
Location['Rec_Count'] = np.ma.zeros((n_records,n_blocks))
Location['Rec_Count'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
rec_count_20_ku = fid.variables['rec_count_20_ku'][:].copy()
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lat'] = np.ma.zeros((n_records,n_blocks))
Location['Lat'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
lat_20_ku = fid.variables['lat_20_ku'][:].copy()
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lon'] = np.ma.zeros((n_records,n_blocks))
Location['Lon'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
lon_20_ku = fid.variables['lon_20_ku'][:].copy()
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Location['Alt'] = np.ma.zeros((n_records,n_blocks))
Location['Alt'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
alt_20_ku = fid.variables['alt_20_ku'][:].copy()
#-- Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
Location['Alt_rate'] = np.ma.zeros((n_records,n_blocks))
Location['Alt_rate'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
orb_alt_rate_20_ku = fid.variables['orb_alt_rate_20_ku'][:].copy()
#-- Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
#-- ITRF= International Terrestrial Reference Frame
Location['Sat_velocity'] = np.ma.zeros((n_records,n_blocks,3))
Location['Sat_velocity'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
sat_vel_vec_20_ku = fid.variables['sat_vel_vec_20_ku'][:].copy()
#-- Real beam direction vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
#-- CRF= CryoSat Reference Frame.
Location['Real_beam'] = np.ma.zeros((n_records,n_blocks,3))
Location['Real_beam'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
beam_dir_vec_20_ku = fid.variables['beam_dir_vec_20_ku'][:].copy()
#-- Interferometric baseline vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
Location['Baseline'] = np.ma.zeros((n_records,n_blocks,3))
Location['Baseline'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
inter_base_vec_20_ku = fid.variables['inter_base_vec_20_ku'][:].copy()
#-- Star Tracker ID
Location['ST_ID'] = np.ma.zeros((n_records,n_blocks))
Location['ST_ID'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_str_in_use_20_ku = fid.variables['flag_instr_conf_rx_str_in_use_20_ku'][:].copy()
#-- Antenna Bench Roll Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Roll'] = np.ma.zeros((n_records,n_blocks))
Location['Roll'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
off_nadir_roll_angle_str_20_ku = fid.variables['off_nadir_roll_angle_str_20_ku'][:].copy()
#-- Antenna Bench Pitch Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Pitch'] = np.ma.zeros((n_records,n_blocks))
Location['Pitch'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
off_nadir_pitch_angle_str_20_ku = fid.variables['off_nadir_pitch_angle_str_20_ku'][:].copy()
#-- Antenna Bench Yaw Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Yaw'] = np.ma.zeros((n_records,n_blocks))
Location['Yaw'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
off_nadir_yaw_angle_str_20_ku = fid.variables['off_nadir_yaw_angle_str_20_ku'][:].copy()
#-- Measurement Confidence Data Flags
#-- Generally the MCD flags indicate problems when set
#-- If MCD is 0 then no problems or non-nominal conditions were detected
#-- Serious errors are indicated by setting bit 31
Location['MCD'] = np.ma.zeros((n_records,n_blocks))
Location['MCD'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_mcd_20_ku = fid.variables['flag_mcd_20_ku'][:].copy()
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
Data_20Hz = {}
#-- Window Delay reference (two-way) corrected for instrument delays
Data_20Hz['TD'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['TD'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
window_del_20_ku = fid.variables['window_del_20_ku'][:].copy()
#-- H0 Initial Height Word from telemetry
Data_20Hz['H_0'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['H_0'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
h0_applied_20_ku = fid.variables['h0_applied_20_ku'][:].copy()
#-- COR2 Height Rate: on-board tracker height rate over the radar cycle
Data_20Hz['COR2'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['COR2'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
cor2_applied_20_ku = fid.variables['cor2_applied_20_ku'][:].copy()
#-- Coarse Range Word (LAI) derived from telemetry
Data_20Hz['LAI'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['LAI'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
h0_lai_word_20_ku = fid.variables['h0_lai_word_20_ku'][:].copy()
#-- Fine Range Word (FAI) derived from telemetry
Data_20Hz['FAI'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['FAI'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
h0_fai_word_20_ku = fid.variables['h0_fai_word_20_ku'][:].copy()
#-- Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
#-- Gain calibration corrections are applied (Sum of AGC stages 1 and 2
#-- plus the corresponding corrections) (dB/100)
Data_20Hz['AGC_CH1'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['AGC_CH1'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
agc_ch1_20_ku = fid.variables['agc_ch1_20_ku'][:].copy()
#-- Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
#-- Gain calibration corrections are applied (dB/100)
Data_20Hz['AGC_CH2'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['AGC_CH2'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
agc_ch2_20_ku = fid.variables['agc_ch2_20_ku'][:].copy()
#-- Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH1'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['TR_gain_CH1'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
tot_gain_ch1_20_ku = fid.variables['tot_gain_ch1_20_ku'][:].copy()
#-- Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH2'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['TR_gain_CH2'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
tot_gain_ch2_20_ku = fid.variables['tot_gain_ch2_20_ku'][:].copy()
#-- Transmit Power in microWatts
Data_20Hz['TX_Power'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['TX_Power'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
transmit_pwr_20_ku = fid.variables['transmit_pwr_20_ku'][:].copy()
#-- Doppler range correction: Radial component (mm)
#-- computed for the component of satellite velocity in the nadir direction
Data_20Hz['Doppler_range'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Doppler_range'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
dop_cor_20_ku = fid.variables['dop_cor_20_ku'][:].copy()
#-- Value of Doppler Angle for the first single look echo (1e-7 radians)
Data_20Hz['Doppler_angle_start'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Doppler_angle_start'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
dop_angle_start_20_ku = fid.variables['dop_angle_start_20_ku'][:].copy()
#-- Value of Doppler Angle for the last single look echo (1e-7 radians)
Data_20Hz['Doppler_angle_stop'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Doppler_angle_stop'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
dop_angle_stop_20_ku = fid.variables['dop_angle_stop_20_ku'][:].copy()
#-- Instrument Range Correction: transmit-receive antenna (mm)
#-- Calibration correction to range on channel 1 computed from CAL1.
Data_20Hz['TR_inst_range'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['TR_inst_range'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
instr_cor_range_tx_rx_20_ku = fid.variables['instr_cor_range_tx_rx_20_ku'][:].copy()
#-- Instrument Range Correction: receive-only antenna (mm)
#-- Calibration correction to range on channel 2 computed from CAL1.
Data_20Hz['R_inst_range'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['R_inst_range'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
instr_cor_range_rx_20_ku = fid.variables['instr_cor_range_rx_20_ku'][:].copy()
#-- Instrument Gain Correction: transmit-receive antenna (dB/100)
#-- Calibration correction to gain on channel 1 computed from CAL1
Data_20Hz['TR_inst_gain'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['TR_inst_gain'].mask = | np.zeros((n_records,n_blocks),dtype=np.bool) | numpy.zeros |
#import sys
#sys.path.insert(0,'/usr/local/lib/python2.7/site-packages')
import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from scipy.stats import norm
from scipy.special import gamma
from scipy.misc import logsumexp
import pdb
from operator import itemgetter
from scipy.interpolate import spline
import copy
import time
COLORS = ['g','k','r','b','c','m','y','burlywood','chartreuse','0.8','0.6', '0.4', '0.2']
MARKER = ['-','--', '*-', '+-','1-','o-','x-','1','2','3']
T_chain = 5000
T_loop = 5000
T_grid5 = 15000
T_grid10 = 20000
T_minimaze = 20000
T_maze = 40000
EVAL_RUNS = 10
EVAL_NUM = 100
EVAL_STEPS = 50
EVAL_EPS = 0.0
REW_VAR = 0.00001
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38
)
def colorize(string, color, bold=False, highlight = False):
attr = []
num = color2num[color]
if highlight: num += 10
attr.append(unicode(num))
if bold: attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
def discrete_phi(state, action, dim, anum):
phi = np.zeros(dim, dtype=np.float)
phi[state*anum+action] = 1.0
return phi
def img_preprocess(org_img):
imgGray = cv2.cvtColor( org_img, cv2.COLOR_RGB2GRAY )
resizedImg = cv2.resize(np.reshape(imgGray, org_img.shape[:-1]), (84, 110))
cropped = resizedImg[18:102,:]
cropped = cropped.astype(np.float32)
cropped *= (1.0/255.0)
return cropped
def rbf(state, action, dim, const=1.0):
n = dim
c1 = np.reshape(np.array([-np.pi/4.0, 0.0, np.pi/4.0]),(3,1)) # For inverted pendulum
c2 = np.reshape(np.array([-1.0,0.0,1.0]), (1,3)) # For inverted pendulum
#basis = 1/np.sqrt(np.exp((c1-state[0])**2)*np.exp((c2-state[1])**2))
basis = np.exp(-0.5*(c1-state[0])**2)*np.exp(-0.5*(c2-state[1])**2)
basis = np.append(basis.flatten(), const)
phi = np.zeros(3*n, dtype=np.float32)
phi[action*n:(action+1)*n] = basis
return phi
def normalGamma(x1, x2, mu, l , a, b):
const = np.sqrt(l/2/np.pi, dtype=np.float32)*(b**a)/gamma(a)
exp_input = np.maximum(-10.0,-0.5*l*x2*(x1-mu)**2-b*x2)
output = const*x2**(a-0.5)*np.exp(exp_input, dtype=np.float32)
if(np.isnan(output)).any():
pdb.set_trace()
return output
def mean_max_graph(mu,test_var):
d = np.arange(0.1,1.0,0.1)
var = np.array([0.1,1.0,10.0,20.0,30.0,40.0,50.0,80.0,100.0])
if not(test_var in var):
raise ValueError('the input variance value does not exist')
for (i,v) in enumerate(var):
if test_var == v:
idx = i
r = len(var)
c = len(d)
d,var = np.meshgrid(d,var)
mu_bar = d*(1+d)/(1+d**2)*mu
var_bar = d**2/(1+d**2)*var
v = np.sqrt(var*d**2 + var_bar)
mean = np.zeros(d.shape)
for i in range(r):
for j in range(c):
mean[i,j] = mu_bar[i,j] + var_bar[i,j]*norm.pdf(mu_bar[i,j],d[i,j]*mu, v[i,j])/norm.cdf(mu_bar[i,j],d[i,j]*mu,v[i,j])
fig = plt.figure(1)
ax = fig.gca(projection='3d')
surf = ax.plot_surface(d, var, mu*np.ones(d.shape), color='r', linewidth=0, antialiased=False)
ax.plot_wireframe(d, var, mean)#, rstride=10, cstride=10)
plt.figure(2)
plt.plot(d[idx,:],mean[idx,:])
plt.plot(d[idx,:],mu*np.ones((d[idx,:]).shape),'r')
plt.title("For Mean="+str(mu)+" and Variance="+str(test_var))
plt.xlabel("discount factor")
plt.ylabel("Mean Value")
plt.show()
def maxGaussian(means, sds):
"""
INPUT:
means: a numpy array of Gaussian mean values of (next state, action) pairs for all available actions.
sds: a numpy array of Gaussian SD values of (next state,action) pairs for all available actions.
obs:
mean and variance of the distribution
"""
num_interval = 500
interval = 12.0*max(sds)/float(num_interval)
x = np.arange(min(means-6.0*sds),max(means+6.0*sds),interval)
eps = 1e-5*np.ones(x.shape) # 501X1
max_p = np.zeros(x.shape) # 501X1
cdfs = [np.maximum(eps,norm.cdf(x,means[i], sds[i])) for i in range(len(means))]
for i in range(len(means)):
max_p += norm.pdf(x,means[i],sds[i])/cdfs[i]
max_p*=np.prod(np.array(cdfs),0)
z = np.sum(max_p)*interval
max_p = max_p/z # Normalization
max_mean = np.inner(x,max_p)*interval
return max_mean,np.inner(x**2,max_p)*interval- max_mean**2
def posterior_numeric(n_means, n_vars, c_mean, c_var, rew, dis, terminal, num_interval=2000,
width = 10.0, varTH = 1e-10, noise=0.0):
# ADFQ-Numeric
# Not for Batch
#c_var = c_var + 0.01
if terminal:
new_var = 1.0/(1.0/c_var + 1.0/REW_VAR)
new_sd = np.sqrt(new_var)
new_mean = new_var*(c_mean/c_var + rew/REW_VAR)
interval = width*new_sd/float(num_interval)
x = np.arange(new_mean-0.5*width*new_sd,new_mean+0.5*width*new_sd, interval)
return new_mean, new_var, (x, norm.pdf(x, new_mean, new_sd))
target_means = rew + dis*np.array(n_means, dtype=np.float64)
target_vars = dis*dis*(np.array(n_vars, dtype = np.float64) + noise)
anum = len(n_means)
bar_vars = 1.0/(1.0/c_var + 1.0/target_vars)
bar_means = bar_vars*(c_mean/c_var + target_means/target_vars)
add_vars = c_var+target_vars
sd_range = np.sqrt(np.append(target_vars, bar_vars))
mean_range = np.append(target_means, bar_means)
x_max = max(mean_range+0.5*width*sd_range)
x_min = min(mean_range-0.5*width*sd_range)
interval = (x_max-x_min)/float(num_interval)
x = np.arange(x_min,x_max, interval)
x = np.append(x, x[-1]+interval)
#mean_range = np.concatenate((target_means,[c_mean]))
#sd_range = np.sqrt(np.concatenate((dis*dis*n_vars, [c_var])))
#interval = (width*max(sd_range)+10.0)/float(num_interval)
#x = np.arange(min(mean_range-0.5*width*sd_range-5.0), max(mean_range+0.5*width*sd_range+5.0), interval)
cdfs = np.array([norm.cdf(x, target_means[i], np.sqrt(target_vars[i])) for i in range(anum)])
nonzero_ids = []
for a_cdf in cdfs:
if a_cdf[0]>0.0:
nonzero_ids.append(0)
else:
for (i,v) in enumerate(a_cdf):
if v > 0.0:
nonzero_ids.append(i)
break
if len(nonzero_ids) != anum:
print("CDF peak is outside of the range")
pdb.set_trace()
log_probs = []
min_id = len(x) # To find the maximum length non-zero probability vector over all actions.
log_max_prob = -10**100
for b in range(anum):
min_id = min(min_id, nonzero_ids[b])
idx = max([nonzero_ids[c] for c in range(anum) if c!=b]) # For the product of CDF part, valid id should consider all actions except b
tmp = -np.log(2*np.pi)-0.5*np.log(add_vars[b])-0.5*np.log(bar_vars[b])-0.5*(c_mean-target_means[b])**2/add_vars[b] \
- 0.5*(x[idx:]-bar_means[b])**2/bar_vars[b] \
+ np.sum([np.log(cdfs[c, idx:]) for c in range(anum) if c!=b], axis=0)
log_max_prob = max(log_max_prob, max(tmp))
log_probs.append(tmp)
probs = [np.exp(lp-log_max_prob) for lp in log_probs]
probs_l = []
for p in probs:
probs_l.append(np.concatenate((np.zeros(len(x) - min_id -len(p),), p)))
prob_tot = np.sum(np.array(probs_l),axis=0)
if np.sum(prob_tot) == 0.0:
pdb.set_trace()
prob_tot = prob_tot/np.sum(prob_tot, dtype=np.float32)/interval
x = x[min_id:]
new_mean = interval*np.inner(x, prob_tot)
new_var = interval*np.inner((x-new_mean)**2, prob_tot)
if np.isnan(new_var):
print("variance is NaN")
pdb.set_trace()
return new_mean, np.maximum(varTH, new_var), (x, prob_tot)
def posterior_approx(n_means, n_vars, c_mean, c_var, rew, dis, terminal, logEps = - 1e+20,
varTH = 1e-10, asymptotic=False, batch=False, noise=0.0):
# ADFQ-Approx
# TO DO : Convert To Batch-able
if batch:
batch_size = len(n_means)
c_mean = np.reshape(c_mean, (batch_size,1))
c_var = np.reshape(c_var, (batch_size,1))
rew = np.reshape(rew, (batch_size,1))
terminal = np.reshape(terminal, (batch_size,1))
else:
if terminal==1:
var_new = 1./(1./c_var + 1./REW_VAR)
mean_new = var_new*(c_mean/c_var + rew/REW_VAR)
return mean_new, var_new, (n_means, n_vars, np.ones(n_means.shape))
target_means = rew + dis*np.array(n_means, dtype=np.float64)
target_vars = dis*dis*(np.array(n_vars, dtype = np.float64) + noise)
bar_vars = 1.0/(1.0/c_var + 1.0/target_vars)
bar_means = bar_vars*(c_mean/c_var + target_means/target_vars)
add_vars = c_var + target_vars
sorted_idx = np.argsort(target_means, axis=int(batch))
if batch:
ids = range(0,batch_size)
bar_targets = target_means[ids,sorted_idx[:,-1], np.newaxis]*np.ones(target_means.shape)
bar_targets[ids, sorted_idx[:,-1]] = target_means[ids, sorted_idx[:,-2]]
else:
bar_targets = target_means[sorted_idx[-1]]*np.ones(target_means.shape)
bar_targets[sorted_idx[-1]] = target_means[sorted_idx[-2]]
thetas = np.heaviside(bar_targets-bar_means,0.0)
if asymptotic and (n_vars <= varTH).all() and (c_var <= varTH):
min_b = np.argmin((target_means-c_mean)**2-2*add_vars*logEps*thetas)
weights = np.zeros(np.shape(target_means))
weights[min_b] = 1.0
return bar_means[min_b], np.maximum(varTH, bar_vars[min_b]), (bar_means, bar_vars, weights)
log_weights = -0.5*(np.log(2*np.pi)+np.log(add_vars)+(c_mean-target_means)**2/add_vars) + logEps*thetas
log_weights = log_weights - np.max(log_weights, axis=int(batch), keepdims=batch)
log_weights = log_weights - logsumexp(log_weights, axis=int(batch), keepdims=batch) # normalized!
weights = np.exp(log_weights, dtype=np.float64)
mean_new = np.sum(np.multiply(weights, bar_means), axis=int(batch), keepdims=batch)
var_new = np.maximum(varTH, np.sum(np.multiply(weights,bar_means**2+bar_vars), axis=int(batch), keepdims=batch) - mean_new**2)
var_new = (1.-terminal)*var_new + terminal*1./(1./c_var + 1./REW_VAR)
mean_new = (1.-terminal)*mean_new + terminal*var_new*(c_mean/c_var + rew/REW_VAR)
if np.isnan(mean_new).any() or np.isnan(var_new).any():
pdb.set_trace()
return mean_new, var_new, (bar_means, bar_vars, weights)
def posterior_approx_log(n_means, n_logvars, c_mean, c_logvar, rew, dis, terminal, eps = 0.01,
logvarTH = -100.0, batch=False):
# ADFQ-Approx using log variance
# TO DO : Convert To Batch-able
if batch:
batch_size = len(n_means)
c_mean = np.reshape(c_mean, (batch_size,1))
c_logvar = np.reshape(c_logvar, (batch_size,1))
rew = np.reshape(rew, (batch_size,1))
terminal = np.reshape(terminal, (batch_size,1))
target_means = rew + dis*np.array(n_means, dtype=np.float32)
target_logvars = 2*np.log(dis)+np.array(n_logvars, dtype = np.float32)
bar_logvars = -np.array([logsumexp([-c_logvar, -tv]) for tv in target_logvars])
bar_means = np.exp(bar_logvars-c_logvar)*c_mean + np.exp(bar_logvars-target_logvars)*target_means
if (n_logvars <= logvarTH).all() and (c_logvar <= logvarTH):
min_b = np.argmin(abs(target_means-c_mean))
weights = np.zeros(np.shape(target_means))
weights[min_b] = 1.0
return bar_means[min_b], np.maximum(logvarTH, bar_logvars[min_b]), (bar_means, bar_logvars, weights)
add_logvars = np.array([logsumexp([c_logvar, tv]) for tv in target_logvars])
sorted_idx = np.argsort(target_means, axis=int(batch))
bar_targets = target_means[sorted_idx[-1]]*np.ones(target_means.shape)
bar_targets[sorted_idx[-1]] = target_means[sorted_idx[-2]]
thetas = np.heaviside(bar_targets-bar_means,0.0)
if (add_logvars < logvarTH).any():
print(add_logvars)
log_c = np.maximum(-20.0,-0.5*(np.log(2*np.pi)+add_logvars+((c_mean-target_means)**2)/np.exp(add_logvars))) #(batch_size X anum)
#max_log_c = max(log_c)
c = np.exp(log_c) #np.exp(log_c-max_log_c)
logZ = np.log(np.dot(1. - (1.-eps)*thetas, c))
logmean_new = np.log(np.dot(bar_means*(1. - (1.-eps)*thetas), c)) - logZ
log_moment2 = np.log(np.dot((bar_means**2+np.exp(bar_logvars))*(1. - (1.-eps)*thetas), c)) - logZ
min_term = min(log_moment2, 2*logmean_new)
logvar_new = max(logvarTH, min_term + np.log(np.maximum(1e-10, np.exp(log_moment2-min_term) - np.exp(2*logmean_new-min_term)) ) )
logvar_new = (1.-terminal)*logvar_new - terminal*logsumexp([-c_logvar, -np.log(REW_VAR)])
mean_new = (1.-terminal)*np.exp(logmean_new)+ terminal*(c_mean*np.exp(logvar_new-c_logvar) + rew*np.exp(logvar_new-np.log(REW_VAR)))
if np.isnan(mean_new).any() or np.isnan(logvar_new).any() or np.isinf(-logvar_new).any():
pdb.set_trace()
weights = np.exp(log_c + np.log(1.-(1.-eps)*thetas) - logZ) #- max_log_c
return mean_new, logvar_new, (bar_means, bar_logvars, weights)
def posterior_approx_log_v2(n_means, n_logvars, c_mean, c_logvar, rew, dis, terminal,
logEps = -1e+20, logvarTH = -100.0, batch=False):
# ADFQ-Approx using log variance
# Version 2 - considering smaller epsilon
# TO DO : Convert To Batch-able
if batch:
batch_size = len(n_means)
c_mean = np.reshape(c_mean, (batch_size,1))
c_logvar = np.reshape(c_logvar, (batch_size,1))
rew = np.reshape(rew, (batch_size,1))
terminal = np.reshape(terminal, (batch_size,1))
target_means = rew + dis*np.array(n_means, dtype=np.float32)
target_logvars = 2*np.log(dis)+np.array(n_logvars, dtype = np.float32)
bar_logvars = -np.array([logsumexp([-c_logvar, -tv]) for tv in target_logvars])
bar_means = np.exp(bar_logvars-c_logvar)*c_mean + np.exp(bar_logvars-target_logvars)*target_means
if (n_logvars <= logvarTH).all() and (c_logvar <= logvarTH):
min_b = np.argmin(abs(target_means-c_mean))
weights = np.zeros(np.shape(target_means))
weights[min_b] = 1.0
return bar_means[min_b], np.maximum(logvarTH, bar_logvars[min_b]), (bar_means, bar_logvars, weights)
add_logvars = np.array([logsumexp([c_logvar, tv]) for tv in target_logvars])
sorted_idx = np.argsort(target_means, axis=int(batch))
bar_targets = target_means[sorted_idx[-1]]*np.ones(target_means.shape)
bar_targets[sorted_idx[-1]] = target_means[sorted_idx[-2]]
thetas = np.heaviside(bar_targets-bar_means,0.0)
if (add_logvars < logvarTH).any():
print(add_logvars)
log_weights = -0.5*(np.log(2*np.pi)+add_logvars+((c_mean-target_means)**2)/np.exp(add_logvars)) + logEps*thetas #(batch_size X anum)
log_weights = log_weights - max(log_weights)
log_weights = log_weights - logsumexp(log_weights) # normalized!
weights = np.exp(log_weights)
logmean_new = np.log(np.dot(weights,bar_means))
log_moment2 = np.log(np.dot(bar_means**2+np.exp(bar_logvars), weights))
min_term = min(log_moment2, 2*logmean_new)
if np.exp(log_moment2-min_term) - np.exp(2*logmean_new-min_term) <= 0.0:
logvar_new = logvarTH
else:
logvar_new = max(logvarTH, min_term + np.log(np.exp(log_moment2-min_term) - np.exp(2*logmean_new-min_term)))
logvar_new = (1.-terminal)*logvar_new - terminal*logsumexp([-c_logvar, -np.log(REW_VAR)])
mean_new = (1.-terminal)*np.exp(logmean_new)+ terminal*(c_mean*np.exp(logvar_new-c_logvar) + rew*np.exp(logvar_new-np.log(REW_VAR)))
if np.isnan(mean_new).any() or np.isnan(logvar_new).any() or np.isinf(-logvar_new).any():
pdb.set_trace()
return mean_new, logvar_new, (bar_means, bar_logvars, weights)
def posterior_soft_approx(n_means, n_vars, c_mean, c_var, rew, dis, terminal, varTH = 1e-10,
matching=True, ratio = False, scale = False, c_scale = 1.0, asymptotic=False, plot=False, noise=0.0, batch=False):
# ADFQ-SoftApprox
# Need New name
# Not batch yet
if terminal == 1:
var_new = 1./(1./c_var + c_scale/REW_VAR)
if not(scale):
var_new = np.maximum(varTH, var_new)
mean_new = var_new*(c_mean/c_var + rew/REW_VAR*c_scale)
return mean_new, var_new, (n_means, n_vars, np.ones(n_means.shape))
target_means = rew + dis*np.array(n_means, dtype=np.float64)
target_vars = dis*dis*(np.array(n_vars, dtype = np.float64) + noise/c_scale)
if matching:
if ratio or (asymptotic and (n_vars <= varTH).all() and (c_var <= varTH)):
stats = posterior_match_ratio_helper(target_means, target_vars, c_mean, c_var, dis)
b_rep = np.argmin(stats[:,2])
weights = np.zeros((len(stats),))
weights[b_rep] = 1.0
return stats[b_rep, 0], np.maximum(varTH, stats[b_rep, 1]), (stats[:,0], stats[:,1], weights)
elif scale :
stats = posterior_match_scale_helper(target_means, target_vars, c_mean, c_var, dis, c_scale = c_scale, plot=plot)
else:
stats = posterior_match_helper(target_means, target_vars, c_mean, c_var, dis, plot=plot)
else:
stats = posterior_soft_helper(target_means, target_vars, c_mean, c_var)
k = stats[:,2] - max(stats[:,2])
weights = np.exp(k - logsumexp(k), dtype=np.float64) # normalized.
mean_new = np.sum(weights*stats[:,0], axis = int(batch))
if scale:
#var_new = np.dot(weights,stats[:,0]**2/c_scale + stats[:,1]) - mean_new**2/c_scale
var_new = np.dot(weights, stats[:,1]) + (np.dot(weights,stats[:,0]**2) - mean_new**2)/c_scale
if var_new <= 0.0:
#print("variance equals or below 0")
pdb.set_trace()
var_new = varTH
else:
var_new = np.maximum(varTH, np.dot(weights,stat[:,0]**2 + stats[:,1]) - mean_new**2)
if np.isnan(mean_new).any() or np.isnan(var_new).any():
pdb.set_trace()
return mean_new, var_new, (stats[:,0], stats[:,1], weights)
def posterior_match_helper(target_means, target_vars, c_mean, c_var, discount, plot=False):
# Matching a Gaussian distribution with the first and second derivatives.
dis2 = discount*discount
sorted_idx = np.flip(np.argsort(target_means),axis=0)
bar_vars = 1.0/(1.0/c_var + 1.0/target_vars)
bar_means = bar_vars*(c_mean/c_var + target_means/target_vars)
add_vars = c_var+target_vars
log_weights = -0.5*(np.log(2*np.pi) + np.log(add_vars) + (c_mean-target_means)**2/add_vars) #(batch_size X anum)
anum = len(sorted_idx)
stats = []
for b in sorted_idx:
b_primes = [c for c in sorted_idx if c!=b] # From large to small.
upper = 10000
for i in range(anum):
if i == (anum-1):
lower = -10000
else:
lower = target_means[b_primes[i]]
var_star = 1./(1/bar_vars[b]+sum(1/target_vars[b_primes[:i]]))
mu_star = (bar_means[b]/bar_vars[b]+ sum(target_means[b_primes[:i]]/target_vars[b_primes[:i]]))*var_star
if (np.float16(mu_star) >= np.float16(lower)) and (np.float16(mu_star) <= np.float16(upper)):
k = log_weights[b]+0.5*(np.log(var_star)-np.log(bar_vars[b])) \
-0.5*(mu_star-bar_means[b])**2/bar_vars[b] \
-0.5*sum([np.maximum(target_means[c]-mu_star, 0.0)**2/target_vars[c] for c in b_primes])
#-0.5*sum((target_means[b_primes[:i]]-mu_star)**2/target_vars[b_priems[:i]])
stats.append((mu_star, var_star, k))
break
upper = lower
if not(len(stats)== anum):
pdb.set_trace()
if plot:
x = np.arange(min(target_means)+2,max(target_means)+10,0.4)
f, ax_set = plt.subplots(anum, sharex=True, sharey=False)
for (i,b) in enumerate(sorted_idx):
b_primes = [c for c in sorted_idx if c!=b]
true = np.exp(log_weights[b])/np.sqrt(2*np.pi*bar_vars[b])*np.exp(-0.5*(x-bar_means[b])**2/bar_vars[b] \
- 0.5*np.sum([(np.maximum(target_means[c]-x,0.0))**2/target_vars[c] for c in b_primes],axis=0))
approx = np.exp(stats[i][2])/np.sqrt(2*np.pi*stats[i][1])*np.exp(-0.5*(x-stats[i][0])**2/stats[i][1])
ax_set[b].plot(x,true)
ax_set[b].plot(x, approx,'+', markersize=8)
plt.show()
return np.array([stats[sorted_idx[b]] for b in range(anum)], dtype=np.float64)
def posterior_match_scale_helper(target_means, target_vars, c_mean, c_var, discount, c_scale, plot=False):
# Matching a Gaussian distribution with the first and second derivatives.
# target vars and c_var are not the true variance but scaled variance.
dis2 = discount*discount
rho_vars = c_var/target_vars*dis2
sorted_idx = np.flip(np.argsort(target_means),axis=0)
bar_vars = 1.0/(1.0/c_var + 1.0/target_vars) # scaled
bar_means = bar_vars*(c_mean/c_var + target_means/target_vars)
add_vars = c_var+target_vars # scaled
anum = len(sorted_idx)
stats = []
log_weights = -0.5*(np.log(2*np.pi) + np.log(add_vars) + (c_mean-target_means)**2/add_vars)
for (j,b) in enumerate(sorted_idx):
b_primes = [c for c in sorted_idx if c!=b] # From large to small.
upper = 10000
tmp_vals = []
for i in range(anum):
lower = 1e-5 if i==(anum-1) else target_means[b_primes[i]]
mu_star = np.float64((bar_means[b]+sum(target_means[b_primes[:i]]*rho_vars[b_primes[:i]])/(dis2+rho_vars[b])) \
/(1.+sum(rho_vars[b_primes[:i]])/(dis2+rho_vars[b])))
tmp_vals.append((lower, mu_star, upper))
if (np.float32(mu_star) >= np.float32(lower)) and (np.float32(mu_star) <= np.float32(upper)):
var_star = 1./(1/bar_vars[b]+sum(1/target_vars[b_primes[:i]])) # scaled
k = 0.5*(np.log(var_star) - np.log(bar_vars[b]) - np.log(2*np.pi) - | np.log(add_vars[b]) | numpy.log |
import numpy
from coopihc.inference.GoalInferenceWithUserPolicyGiven import (
GoalInferenceWithUserPolicyGiven,
)
from coopihc.policy.ELLDiscretePolicy import ELLDiscretePolicy
from coopihc.space.State import State
from coopihc.space.StateElement import StateElement
from coopihc.space.Space import Space
# ----- needed before testing engine
action_state = State()
action_state["action"] = StateElement(
values=None,
spaces=Space([ | numpy.array([-3, -2, -1, 0, 1, 2, 3], dtype=numpy.int16) | numpy.array |
import mmcv
import numpy as np
import os
from collections import OrderedDict
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.geometry_utils import view_points
from os import path as osp
from pyquaternion import Quaternion
from shapely.geometry import MultiPoint, box
from typing import List, Tuple, Union
from mmdet3d.datasets import NuScenesDataset
nus_categories = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle',
'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone',
'barrier')
def create_nuscenes_infos(root_path,
info_prefix,
version='v1.0-trainval',
max_sweeps=10):
"""Create info file of nuscene dataset.
Given the raw data, generate its related info file in pkl format.
Args:
root_path (str): Path of the data root.
info_prefix (str): Prefix of the info file to be generated.
version (str): Version of the data.
Default: 'v1.0-trainval'
max_sweeps (int): Max number of sweeps.
Default: 10
"""
from nuscenes.nuscenes import NuScenes
nusc = NuScenes(version=version, dataroot=root_path, verbose=True)
from nuscenes.utils import splits
available_vers = ['v1.0-trainval', 'v1.0-test', 'v1.0-mini']
assert version in available_vers
if version == 'v1.0-trainval':
train_scenes = splits.train
val_scenes = splits.val
elif version == 'v1.0-test':
train_scenes = splits.test
val_scenes = []
elif version == 'v1.0-mini':
train_scenes = splits.mini_train
val_scenes = splits.mini_val
else:
raise ValueError('unknown')
# filter existing scenes.
available_scenes = get_available_scenes(nusc)
available_scene_names = [s['name'] for s in available_scenes]
train_scenes = list(
filter(lambda x: x in available_scene_names, train_scenes))
val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes))
train_scenes = set([
available_scenes[available_scene_names.index(s)]['token']
for s in train_scenes
])
val_scenes = set([
available_scenes[available_scene_names.index(s)]['token']
for s in val_scenes
])
test = 'test' in version
if test:
print('test scene: {}'.format(len(train_scenes)))
else:
print('train scene: {}, val scene: {}'.format(
len(train_scenes), len(val_scenes)))
train_nusc_infos, val_nusc_infos = _fill_trainval_infos(
nusc, train_scenes, val_scenes, test, max_sweeps=max_sweeps)
metadata = dict(version=version)
if test:
print('test sample: {}'.format(len(train_nusc_infos)))
data = dict(infos=train_nusc_infos, metadata=metadata)
info_path = osp.join(root_path,
'{}_infos_test.pkl'.format(info_prefix))
mmcv.dump(data, info_path)
else:
print('train sample: {}, val sample: {}'.format(
len(train_nusc_infos), len(val_nusc_infos)))
data = dict(infos=train_nusc_infos, metadata=metadata)
info_path = osp.join(root_path,
'{}_infos_train.pkl'.format(info_prefix))
mmcv.dump(data, info_path)
data['infos'] = val_nusc_infos
info_val_path = osp.join(root_path,
'{}_infos_val.pkl'.format(info_prefix))
mmcv.dump(data, info_val_path)
def get_available_scenes(nusc):
"""Get available scenes from the input nuscenes class.
Given the raw data, get the information of available scenes for
further info generation.
Args:
nusc (class): Dataset class in the nuScenes dataset.
Returns:
available_scenes (list[dict]): List of basic information for the
available scenes.
"""
available_scenes = []
print('total scene num: {}'.format(len(nusc.scene)))
for scene in nusc.scene:
scene_token = scene['token']
scene_rec = nusc.get('scene', scene_token)
sample_rec = nusc.get('sample', scene_rec['first_sample_token'])
sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP'])
has_more_frames = True
scene_not_exist = False
while has_more_frames:
lidar_path, boxes, _ = nusc.get_sample_data(sd_rec['token'])
lidar_path = str(lidar_path)
if os.getcwd() in lidar_path:
# path from lyftdataset is absolute path
lidar_path = lidar_path.split(f'{os.getcwd()}/')[-1]
# relative path
if not mmcv.is_filepath(lidar_path):
scene_not_exist = True
break
else:
break
if scene_not_exist:
continue
available_scenes.append(scene)
print('exist scene num: {}'.format(len(available_scenes)))
return available_scenes
def _fill_trainval_infos(nusc,
train_scenes,
val_scenes,
test=False,
max_sweeps=10,
trans_data_for_show=False,
root_save_path=None,
need_val_scenes=True):
"""Generate the train/val infos from the raw data.
Args:
nusc (:obj:`NuScenes`): Dataset class in the nuScenes dataset.
train_scenes (list[str]): Basic information of training scenes.
val_scenes (list[str]): Basic information of validation scenes.
test (bool): Whether use the test mode. In the test mode, no
annotations can be accessed. Default: False.
max_sweeps (int): Max number of sweeps. Default: 10.
Returns:
tuple[list[dict]]: Information of training set and validation set
that will be saved to the info file.
"""
train_nusc_infos = []
val_nusc_infos = []
if trans_data_for_show:
point_label_mapping = []
for name in NuScenesDataset.POINT_CLASS_GENERAL:
point_label_mapping.append(NuScenesDataset.POINT_CLASS_SEG.index(NuScenesDataset.PointClassMapping[name]))
point_label_mapping = np.array(point_label_mapping, dtype=np.uint8)
set_idx = 0
L_cam_path = []
R_cam_path = []
root_save_path = '/data/jk_save_dir/temp_dir'
assert root_save_path != None
from mmdet3d.core.bbox import box_np_ops as box_np_ops
for sample in mmcv.track_iter_progress(nusc.sample):
lidar_token = sample['data']['LIDAR_TOP']
sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP'])
cs_record = nusc.get('calibrated_sensor',
sd_rec['calibrated_sensor_token'])
pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token'])
lidar_path, boxes, _ = nusc.get_sample_data(lidar_token)
mmcv.check_file_exist(lidar_path)
if need_val_scenes and sample['scene_token'] in train_scenes:
continue
info = {
'lidar_path': lidar_path,
'token': sample['token'],
'sweeps': [],
'cams': dict(),
'lidar2ego_translation': cs_record['translation'],
'lidar2ego_rotation': cs_record['rotation'],
'ego2global_translation': pose_record['translation'],
'ego2global_rotation': pose_record['rotation'],
'timestamp': sample['timestamp'],
}
l2e_r = info['lidar2ego_rotation']
l2e_t = info['lidar2ego_translation']
e2g_r = info['ego2global_rotation']
e2g_t = info['ego2global_translation']
l2e_r_mat = Quaternion(l2e_r).rotation_matrix
e2g_r_mat = Quaternion(e2g_r).rotation_matrix
# obtain 6 image's information per frame
camera_types = [
'CAM_FRONT',
'CAM_FRONT_RIGHT',
'CAM_FRONT_LEFT',
'CAM_BACK',
'CAM_BACK_LEFT',
'CAM_BACK_RIGHT',
]
for cam in camera_types:
cam_token = sample['data'][cam]
cam_path, _, cam_intrinsic = nusc.get_sample_data(cam_token)
cam_info = obtain_sensor2top(nusc, cam_token, l2e_t, l2e_r_mat,
e2g_t, e2g_r_mat, cam)
cam_info.update(cam_intrinsic=cam_intrinsic)
info['cams'].update({cam: cam_info})
# obtain sweeps for a single key-frame
sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP'])
sweeps = []
while len(sweeps) < max_sweeps:
if not sd_rec['prev'] == '':
sweep = obtain_sensor2top(nusc, sd_rec['prev'], l2e_t,
l2e_r_mat, e2g_t, e2g_r_mat, 'lidar')
sweeps.append(sweep)
sd_rec = nusc.get('sample_data', sd_rec['prev'])
else:
break
info['sweeps'] = sweeps
# obtain annotation
if not test:
annotations = [
nusc.get('sample_annotation', token)
for token in sample['anns']
]
locs = np.array([b.center for b in boxes]).reshape(-1, 3)
dims = np.array([b.wlh for b in boxes]).reshape(-1, 3)
rots = np.array([b.orientation.yaw_pitch_roll[0]
for b in boxes]).reshape(-1, 1)
velocity = np.array(
[nusc.box_velocity(token)[:2] for token in sample['anns']])
valid_flag = np.array(
[(anno['num_lidar_pts'] + anno['num_radar_pts']) > 0
for anno in annotations],
dtype=bool).reshape(-1)
# convert velo from global to lidar
for i in range(len(boxes)):
velo = np.array([*velocity[i], 0.0])
velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(
l2e_r_mat).T
velocity[i] = velo[:2]
names = [b.name for b in boxes]
for i in range(len(names)):
if names[i] in NuScenesDataset.NameMapping:
names[i] = NuScenesDataset.NameMapping[names[i]]
names = np.array(names)
# we need to convert rot to SECOND format.
gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1)
assert len(gt_boxes) == len(
annotations), f'{len(gt_boxes)}, {len(annotations)}'
info['gt_boxes'] = gt_boxes
info['gt_names'] = names
info['gt_velocity'] = velocity.reshape(-1, 2)
info['num_lidar_pts'] = np.array(
[a['num_lidar_pts'] for a in annotations])
info['num_radar_pts'] = np.array(
[a['num_radar_pts'] for a in annotations])
info['valid_flag'] = valid_flag
# lidarseg labels
lidarseg_labels_filepath = os.path.join(nusc.dataroot,
nusc.get('lidarseg', lidar_token)['filename'])
info['lidar_pts_labels_filepath'] = lidarseg_labels_filepath
if trans_data_for_show:
need_raw_data, need_calib, need_pts_label, need_bbox = False, True, False, False
points = np.fromfile(lidar_path, dtype=np.float32).reshape(-1, 5)
file_name = str(set_idx).zfill(6)
if need_raw_data:
# point cloud
pts_save_path = os.path.join(root_save_path, 'point_cloud')
mmcv.mkdir_or_exist(pts_save_path)
postfix = '.bin'
points.tofile(pts_save_path+ '/' + file_name + postfix)
if need_pts_label:
lidarseg_labels_filepath = os.path.join(nusc.dataroot,
nusc.get('lidarseg', lidar_token)['filename'])
pts_semantic_mask = np.fromfile(lidarseg_labels_filepath, dtype=np.uint8).reshape([-1, 1])
pts_semantic_mask = point_label_mapping[pts_semantic_mask]
need_ins_label = True
if need_ins_label:
# for instance label
isinstance_label = np.zeros_like(pts_semantic_mask)
point_indices = box_np_ops.points_in_rbbox(points, gt_boxes, origin=(0.5, 0.5, 0.5))
point_indices = point_indices.transpose()
for idx, cls_name in enumerate(names):
#ins label from 1, not 0
isinstance_label[point_indices[idx]] = idx + 1
pts_semantic_mask = np.concatenate((pts_semantic_mask, isinstance_label), axis=-1)
sem_save_path = os.path.join(root_save_path, 'sem_ins_mask')
mmcv.mkdir_or_exist(sem_save_path)
postfix = '.bin'
# we use uint8 as the mask label file type
pts_semantic_mask = np.array(pts_semantic_mask, dtype=np.uint8)
pts_semantic_mask.tofile(sem_save_path + '/' + file_name + postfix)
if need_calib:
#cam_list = ['CAM_FRONT', 'CAM_BACK']
cam_list = ['CAM_FRONT_RIGHT', 'CAM_FRONT_LEFT']
for cam_name in cam_list:
calib_s2l_r = info['cams'][cam_name]['sensor2lidar_rotation']
calib_s2l_t = info['cams'][cam_name]['sensor2lidar_translation'].reshape(1, 3)
calib_s2l_i = info['cams'][cam_name]['cam_intrinsic']
calib = np.concatenate((calib_s2l_r, calib_s2l_t, calib_s2l_i), axis=0)
postfix = '.txt'
calib_save_path = os.path.join(root_save_path, cam_name)
mmcv.mkdir_or_exist(calib_save_path)
np.savetxt(calib_save_path+'/'+file_name+postfix, calib, fmt='%.16f')
#need name??
_, image_name = os.path.split(info['cams'][cam_name]['data_path'])
line = image_name + ' ' + file_name + '.jpg'
if cam_name == 'CAM_FRONT_LEFT':
L_cam_path.append(line)
elif cam_name == 'CAM_FRONT_RIGHT':
R_cam_path.append(line)
if need_bbox:
save_boxes = np.zeros((0, 10))
num_bbox = len(info['gt_names'])
if num_bbox > 0:
mask = [name in nus_categories for name in info['gt_names']]
mask = np.array(mask)
save_gt_names = info['gt_names'][mask]
save_gt_boxes = info['gt_boxes'][mask]
save_gt_boxes = save_gt_boxes.reshape(-1, 7)
save_label = [nus_categories.index(name) for name in save_gt_names]
save_label = np.array(save_label).reshape(-1, 1)
save_boxes = save_gt_boxes[:, [0, 1, 2, 5, 3, 4, 6]]
save_boxes = save_boxes.reshape(-1, 7)
save_conf = np.zeros_like(save_label)
save_id = np.zeros_like(save_conf)
# need velocity?
save_gt_velocity = info['gt_velocity'][mask]
save_gt_velocity = save_gt_velocity.reshape(-1, 2)
save_velocity_scale = np.linalg.norm(save_gt_velocity, axis=1, keepdims=True)
save_velocity_dir = np.arctan2(save_gt_velocity[:, 1], save_gt_velocity[:, 0])
save_velocity_dir =save_velocity_dir.reshape(-1, 1)
save_velocity_scale[np.isnan(save_velocity_scale)] = 0.
save_velocity_dir[np.isnan(save_velocity_dir)] = 0.
save_boxes = np.concatenate((save_label, save_boxes, save_conf, save_id, save_velocity_scale, save_velocity_dir), axis=-1)
postfix = '.txt'
bbox_save_path = os.path.join(root_save_path, 'bbox_with_velocity')
mmcv.mkdir_or_exist(bbox_save_path)
np.savetxt(bbox_save_path+'/'+file_name+postfix, save_boxes, fmt='%.3f')
set_idx = set_idx + 1
if sample['scene_token'] in train_scenes:
train_nusc_infos.append(info)
else:
val_nusc_infos.append(info)
L_cam_path = np.array(L_cam_path)
R_cam_path = np.array(R_cam_path)
np.savetxt(root_save_path + '/' + 'L_image_name.txt', L_cam_path, fmt='%s')
| np.savetxt(root_save_path + '/' + 'R_image_name.txt', R_cam_path, fmt='%s') | numpy.savetxt |
import numpy as np
from astropy.io import fits
def stitch_all_images(all_hdus,date):
stitched_hdu_dict = {}
hdu_opamp_dict = {}
for (camera, filenum, imtype, opamp),hdu in all_hdus.items():
if (camera, filenum, imtype) not in hdu_opamp_dict.keys():
hdu_opamp_dict[(camera, filenum, imtype)] = {}
hdu_opamp_dict[(camera, filenum, imtype)][opamp] = hdu
for (camera, filenum, imtype),opampdict in hdu_opamp_dict.items():
outhdu = stitch_these_camera_data(opampdict)
outhdu.header.add_history("Stitched 4 opamps by quickreduce on {}".format(date))
stitched_hdu_dict[(camera, filenum, imtype, None)] = outhdu
return stitched_hdu_dict
def stitch_these_camera_data(hdudict):
xorients = {-1: 'l', 1: 'r'}
yorients = {-1: 'b', 1: 'u'}
img = {}
for opamp,hdu in hdudict.items():
header = hdu.header
xsign = np.sign(header['CHOFFX'])
ysign = np.sign(header['CHOFFY'])
location = yorients[ysign] + xorients[xsign]
#print("Imtype: {} In filenum: {} Camera: {} Opamp: {} located at {}".format(imtype, filenum, camera, opamp,
# location))
img[location] = hdu.data
trans = {}
## Transform opamps to the correct directions
trans['bl'] = img['bl']
trans['br'] = np.fliplr(img['br'])
trans['ul'] = | np.flipud(img['ul']) | numpy.flipud |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 17 17:09:03 2019
@author: duttar
"""
import numpy as np
import math
from scipy.integrate import quad
from scipy.optimize import leastsq
from scipy.sparse import lil_matrix
import sys
sys.path.append('additional_scripts/geompars/')
sys.path.append('additional_scripts/greens/')
from Gorkhamakemesh import *
from greenfunction import *
from collections import namedtuple
def calc_moment(trired, p, q, r, slipall):
'''
calculates the moment magnitude for the non-planar fault
'''
N = trired.shape[0]
moment = np.array([])
for i in range(N):
ind1 = trired[i,:]
ind = ind1.astype(int)
x = p[ind]
y = q[ind]
z = r[ind]
ons = np.array([1,1,1])
xymat = np.vstack((x,y,ons))
yzmat = | np.vstack((y,z,ons)) | numpy.vstack |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import itertools
import logging
import numpy as np
import scipy as sp
import torch
from ml.rl.evaluation.cpe import CpeEstimate
from ml.rl.evaluation.evaluation_data_page import EvaluationDataPage
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class WeightedSequentialDoublyRobustEstimator:
NUM_SUBSETS_FOR_CB_ESTIMATES = 25
CONFIDENCE_INTERVAL = 0.9
NUM_BOOTSTRAP_SAMPLES = 50
BOOTSTRAP_SAMPLE_PCT = 0.5
def __init__(self, gamma):
self.gamma = gamma
def estimate(
self,
edp: EvaluationDataPage,
num_j_steps,
whether_self_normalize_importance_weights,
) -> CpeEstimate:
# For details, visit https://arxiv.org/pdf/1604.00923.pdf Section 5, 7, 8
(
actions,
rewards,
logged_propensities,
target_propensities,
estimated_q_values,
) = WeightedSequentialDoublyRobustEstimator.transform_to_equal_length_trajectories(
edp.mdp_id,
edp.action_mask.cpu().numpy(),
edp.logged_rewards.cpu().numpy().flatten(),
edp.logged_propensities.cpu().numpy().flatten(),
edp.model_propensities.cpu().numpy(),
edp.model_values.cpu().numpy(),
)
num_trajectories = actions.shape[0]
trajectory_length = actions.shape[1]
j_steps = [float("inf")]
if num_j_steps > 1:
j_steps.append(-1)
if num_j_steps > 2:
interval = trajectory_length // (num_j_steps - 1)
j_steps.extend([i * interval for i in range(1, num_j_steps - 1)])
target_propensity_for_logged_action = np.sum(
np.multiply(target_propensities, actions), axis=2
)
estimated_q_values_for_logged_action = np.sum(
np.multiply(estimated_q_values, actions), axis=2
)
estimated_state_values = np.sum(
np.multiply(target_propensities, estimated_q_values), axis=2
)
importance_weights = target_propensity_for_logged_action / logged_propensities
importance_weights = np.cumprod(importance_weights, axis=1)
importance_weights = WeightedSequentialDoublyRobustEstimator.normalize_importance_weights(
importance_weights, whether_self_normalize_importance_weights
)
importance_weights_one_earlier = (
np.ones([num_trajectories, 1]) * 1.0 / num_trajectories
)
importance_weights_one_earlier = np.hstack(
[importance_weights_one_earlier, importance_weights[:, :-1]]
)
discounts = np.logspace(
start=0, stop=trajectory_length - 1, num=trajectory_length, base=self.gamma
)
j_step_return_trajectories = []
for j_step in j_steps:
j_step_return_trajectories.append(
WeightedSequentialDoublyRobustEstimator.calculate_step_return(
rewards,
discounts,
importance_weights,
importance_weights_one_earlier,
estimated_state_values,
estimated_q_values_for_logged_action,
j_step,
)
)
j_step_return_trajectories = np.array(j_step_return_trajectories)
j_step_returns = np.sum(j_step_return_trajectories, axis=1)
if len(j_step_returns) == 1:
weighted_doubly_robust = j_step_returns[0]
weighted_doubly_robust_std_error = 0.0
else:
# break trajectories into several subsets to estimate confidence bounds
infinite_step_returns = []
num_subsets = int(
min(
num_trajectories / 2,
WeightedSequentialDoublyRobustEstimator.NUM_SUBSETS_FOR_CB_ESTIMATES,
)
)
interval = num_trajectories / num_subsets
for i in range(num_subsets):
trajectory_subset = np.arange(
int(i * interval), int((i + 1) * interval)
)
importance_weights = (
target_propensity_for_logged_action[trajectory_subset]
/ logged_propensities[trajectory_subset]
)
importance_weights = np.cumprod(importance_weights, axis=1)
importance_weights = WeightedSequentialDoublyRobustEstimator.normalize_importance_weights(
importance_weights, whether_self_normalize_importance_weights
)
importance_weights_one_earlier = (
np.ones([len(trajectory_subset), 1]) * 1.0 / len(trajectory_subset)
)
importance_weights_one_earlier = np.hstack(
[importance_weights_one_earlier, importance_weights[:, :-1]]
)
infinite_step_return = np.sum(
WeightedSequentialDoublyRobustEstimator.calculate_step_return(
rewards[trajectory_subset],
discounts,
importance_weights,
importance_weights_one_earlier,
estimated_state_values[trajectory_subset],
estimated_q_values_for_logged_action[trajectory_subset],
float("inf"),
)
)
infinite_step_returns.append(infinite_step_return)
# Compute weighted_doubly_robust mean point estimate using all data
weighted_doubly_robust = self.compute_weighted_doubly_robust_point_estimate(
j_steps,
num_j_steps,
j_step_returns,
infinite_step_returns,
j_step_return_trajectories,
)
# Use bootstrapping to compute weighted_doubly_robust standard error
bootstrapped_means = []
sample_size = int(
WeightedSequentialDoublyRobustEstimator.BOOTSTRAP_SAMPLE_PCT
* num_subsets
)
for _ in range(
WeightedSequentialDoublyRobustEstimator.NUM_BOOTSTRAP_SAMPLES
):
random_idxs = np.random.choice(num_j_steps, sample_size, replace=False)
random_idxs.sort()
wdr_estimate = self.compute_weighted_doubly_robust_point_estimate(
j_steps=[j_steps[i] for i in random_idxs],
num_j_steps=sample_size,
j_step_returns=j_step_returns[random_idxs],
infinite_step_returns=infinite_step_returns,
j_step_return_trajectories=j_step_return_trajectories[random_idxs],
)
bootstrapped_means.append(wdr_estimate)
weighted_doubly_robust_std_error = np.std(bootstrapped_means)
episode_values = np.sum(np.multiply(rewards, discounts), axis=1)
denominator = np.nanmean(episode_values)
if abs(denominator) < 1e-6:
return CpeEstimate(
raw=0.0, normalized=0.0, raw_std_error=0.0, normalized_std_error=0.0
)
return CpeEstimate(
raw=weighted_doubly_robust,
normalized=weighted_doubly_robust / denominator,
raw_std_error=weighted_doubly_robust_std_error,
normalized_std_error=weighted_doubly_robust_std_error / denominator,
)
def compute_weighted_doubly_robust_point_estimate(
self,
j_steps,
num_j_steps,
j_step_returns,
infinite_step_returns,
j_step_return_trajectories,
):
low_bound, high_bound = WeightedSequentialDoublyRobustEstimator.confidence_bounds(
infinite_step_returns,
WeightedSequentialDoublyRobustEstimator.CONFIDENCE_INTERVAL,
)
# decompose error into bias + variance
j_step_bias = np.zeros([num_j_steps])
where_lower = np.where(j_step_returns < low_bound)[0]
j_step_bias[where_lower] = low_bound - j_step_returns[where_lower]
where_higher = np.where(j_step_returns > high_bound)[0]
j_step_bias[where_higher] = j_step_returns[where_higher] - high_bound
covariance = np.cov(j_step_return_trajectories)
error = covariance + j_step_bias.T * j_step_bias
# minimize mse error
constraint = {"type": "eq", "fun": lambda x: np.sum(x) - 1.0}
x = np.zeros([len(j_steps)])
res = sp.optimize.minimize(
mse_loss,
x,
args=error,
constraints=constraint,
bounds=[(0, 1) for _ in range(x.shape[0])],
)
x = np.array(res.x)
return float( | np.dot(x, j_step_returns) | numpy.dot |
import sys
sys.path.append("/ubc_primitives/primitives/regCCFS/src")
import scipy.io
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as tri
plt.style.use('seaborn-white')
from predict_from_CCF import predictFromCCF
from utils.commonUtils import islogical
from utils.ccfUtils import mat_unique
def plotCCFDecisionSurface(name, CCF, x1Lims, x2Lims, XTrain, X, Y, nx1Res=250, nx2Res=250, n_contours_or_vals=[], plot_X=True):
xi = np.linspace(x1Lims[0], x1Lims[1], nx1Res)
yi = np.linspace(x2Lims[0], x2Lims[1], nx2Res)
x1, x2 = np.meshgrid(xi, yi)
x1i = np.expand_dims(x1.flatten(order='F'), axis=1)
x2i = np.expand_dims(x2.flatten(order='F'), axis=1)
XTest = np.concatenate((x1i, x2i), axis=1)
preds, _, _ = predictFromCCF(CCF, XTest)
uniquePreds, _, _ = mat_unique(preds)
nVals = uniquePreds.size
numericPreds = np.empty((nVals, 1))
numericPreds.fill(np.nan)
numericPreds = preds
numericPreds = np.reshape(numericPreds, (x1.shape), order='F')
if len(n_contours_or_vals) == 0:
if nVals >= (preds.shape[0])/2:
# Presumably regression
n_contours_or_vals = 50
else:
n_contours_or_vals = np.arange(1.5, (nVals-0.5)+1, 1)
colors = ['g', 'm', 'k', 'y']
markers = ['x', '+', 'o', '*']
# Plot Classes
if Y.shape[1] != 1:
Y = np.sum(np.multiply(Y, np.arange(1, Y.shape[1]+1)), axis=1)
elif islogical(Y):
Y = Y + 1
plt.contourf(x1, x2, numericPreds, n_contours_or_vals, cmap=plt.cm.get_cmap('viridis'))
plt.contour(x1, x2, numericPreds, n_contours_or_vals, colors='k') # negative contours will be dashed by default
if plot_X:
for k in range(1, | np.max(Y) | numpy.max |
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageColor, ImageFont, ImageFile
from urllib.request import urlopen, Request
from io import BytesIO
import joblib
from open_images_starter import text, visual
from open_images_starter.region import Region
import os
ImageFile.LOAD_TRUNCATED_IMAGES = True
id2rot = joblib.load(os.path.join('data','id2rot.joblib'))
id2set = joblib.load(os.path.join('data','id2set.joblib'))
mid2label = joblib.load(os.path.join('data','mid2label.joblib'))
def id2url(index):
return 'https://s3.amazonaws.com/open-images-dataset/' + id2set[index] + '/' + index + '.jpg'
def get_image_from_s3(index, save_path=None, show=False):
image = get_image(id2url(index), rotate=id2rot[index])
if save_path:
os.makedirs(os.path.dirname(save_path), exist_ok=True)
image.save(save_path, format="JPEG", quality=90)
if show:
display_image(image)
return image
def get_image(url, rotate='auto'):
request = Request(url, headers={'User-Agent': "Magic Browser"})
response = urlopen(request, timeout=30)
image_data = response.read()
image_data = BytesIO(image_data)
pil_image = Image.open(image_data)
if rotate=='auto':
rotate = None
try:
exif = pil_image._getexif()
if exif[274] == 3:
rotate = 180
elif exif[274] == 6:
rotate = 270
elif exif[274] == 8:
rotate = 90
except:
pass
rotate = np.nan_to_num(rotate)
if rotate:
pil_image = pil_image.rotate(rotate, expand=True)
return pil_image.convert('RGBA').convert('RGB')
def display_image(image):
plt.figure(figsize=(20, 15))
plt.grid(False)
plt.imshow(image)
plt.draw()
while not plt.waitforbuttonpress(0): pass
plt.close()
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color,
font,
thickness=4,
display_str_list=()):
"""Adds a bounding box to an image."""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
draw.line([(left, top), (left, bottom), (right, bottom), (right, top),
(left, top)],
width=thickness,
fill=color)
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle([(left, text_bottom - text_height - 2 * margin),
(left + text_width, text_bottom)],
fill=color)
draw.text((left + margin, text_bottom - text_height - margin),
display_str,
fill="black",
font=font)
text_bottom -= text_height - 2 * margin
def get_image_boxes(objects, index):
result = {"detection_boxes": [], "detection_class_names": []}
for cat, obj in objects.items():
for box, id in zip(*obj): #box = ymin, xmin, ymax, xmax
if id == index:
result["detection_boxes"].append(box)
result["detection_class_names"].append(cat)
return result
def draw_boxes(image, boxes, class_names, scores=None, max_boxes=None, min_score=None, uid=None, style='new', save_path=None, show=False, show_size=False):
"""Overlay labeled boxes on an image with formatted scores and label names."""
used_classes = set()
used_uid = set()
inds = []
for i in range(len(boxes)):
if (min_score is None or scores is None or scores[i] >= min_score) and (uid is None or uid[i] not in used_uid):
inds.append(i)
used_classes.add(class_names[i])
if uid is not None:
used_uid.add(uid[i])
if max_boxes is not None and len(inds)>=max_boxes:
break
if style == 'new':
colors = visual.generate_colors(len(used_classes), saturation=0.8, hue_offset=0.35, hue_range=0.5)
color_map = dict(zip(used_classes, colors))
else:
colors = list(ImageColor.colormap.values())
font = ImageFont.load_default()
image = np.asarray(image).copy()
for i in inds:
ymin, xmin, ymax, xmax = tuple(boxes[i].tolist())
class_name = class_names[i]
display_str = mid2label[class_name]
if scores is not None:
display_str = "{}: {}%".format(display_str, int(100 * scores[i]))
if show_size:
display_str = "{} ({:.4f})".format(display_str, (xmax-xmin)*(ymax-ymin))
if style=='new':
im_height, im_width = image.shape[:2]
region = Region(xmin*im_width,xmax*im_width,ymin*im_height,ymax*im_height)
image = visual.draw_regions(image, [region], color=(0, 0, 0), thickness=10, strength=0.3)
image = visual.draw_regions(image, [region], color=color_map[class_name], thickness=4, overlay=True)
image = text.label_region(image, display_str, region, color=color_map[class_name],
bg_opacity=0.7, overlay=True, font_size=20, inside=region.top <= 30)
else:
color = colors[hash(class_name) % len(colors)]
image_pil = Image.fromarray( | np.uint8(image) | numpy.uint8 |
#!/usr/bin/python
# -*- coding:utf-8 -*-
import csv
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import sys
if __name__ == "__main__":
name = sys.argv[1]
path = "D:\\2018BK\\overfit\\"+name
# pandas读入
data = pd.read_csv(path) # TV、Radio、Newspaper、Sales
print(data)
x = data[['0', '1', '2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39']]
# x = data[['TV', 'Radio']]
y = data[['40']]
print(x)
print(y)
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1)
print('x_test',x_test)
# print x_train, y_train
linreg = LinearRegression()
model = linreg.fit(x_train, y_train)
print(model)
print("系数为:",linreg.coef_)
print(linreg.intercept_)
y_hat = linreg.predict( | np.array(x_test) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import sys
# from scipy.optimize import curve_fit
np.random.seed(42)
def nn_sum(x, i, j, k, L):
"""
Args:
x: Spin configuration
i, j, k: Indices describing the position of one spin
L: System side length
Returns:
Sum of the spins in x which are nearest neighbors of (i, j, k)
"""
L_m = L - 1 # save for cheap reuse
result = x[(i+1) if i < L_m else 0, j, k] + \
x[(i-1) if i > 0 else L_m, j, k]
result += x[i, (j+1) if j < L_m else 0, k] + \
x[i, (j-1) if j > 0 else L_m, k]
result += x[i, j, (k+1) if k < L_m else 0] + \
x[i, j, (k-1) if k > 0 else L_m]
return int(result)
def move(x, E, J, L, n_moves=0, E_max=0, E_d=0, E_init=0):
"""
Args:
x: Spin configuration
E: Current energy of system x
J: Coupling constant
L: System side length
n_moves: Number of moves == flip attempts
E_max: Energy limit
E_d: Demon energy storage level
E_init: If given and non-zero: Target E_ens to reach during init
Returns:
x, E, demon energy distribution E_d_distr and calculated inverse
temperature beta after n_moves Monte Carlo steps
"""
two_J = 2 * J # for cheaper reuse
if E_init != 0: # init part: bring the energy to starting level
print('Starting from energy: {}, raising to: {}'.format(E, E_init))
while E < E_init:
i, j, k = np.random.randint(L, size=3)
x_old = x[i, j, k]
nn = nn_sum(x, i, j, k, L)
delta_E = two_J * x_old * nn
if delta_E > 0:
x[i, j, k] *= -1
E += delta_E
return x, E
else: # this is the main part: flip attempts using demon
ijk = np.random.randint(L, size=(n_moves, 3))
E_d_distr = | np.zeros(n_moves) | numpy.zeros |
import os
import sys
import numpy as np
import cv2
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
from nuscenes2kitti_object import nuscenes2kitti_object
import ipdb
from PIL import Image
def pto_depth_map(velo_points,
H=32, W=256, C=5, dtheta=np.radians(1.33), dphi=np.radians(90. / 256.0)):
"""
Ref:https://github.com/Durant35/SqueezeSeg/blob/master/src/nodes/segment_node.py
Project velodyne points into front view depth map.
:param velo_points: velodyne points in shape [:,4]
:param H: the row num of depth map, could be 64(default), 32, 16
:param W: the col num of depth map
:param C: the channel size of depth map
3 cartesian coordinates (x; y; z),
an intensity measurement and
range r = sqrt(x^2 + y^2 + z^2)
:param dtheta: the delta theta of H, in radian
:param dphi: the delta phi of W, in radian
:return: `depth_map`: the projected depth map of shape[H,W,C]
"""
x, y, z, i = velo_points[:, 1], -velo_points[:, 0], velo_points[:, 2], velo_points[:, 3]
d = np.sqrt(x ** 2 + y ** 2 + z ** 2)
r = np.sqrt(x ** 2 + y ** 2)
d[d == 0] = 0.000001
r[r == 0] = 0.000001
phi = np.radians(45.) - np.arcsin(y / r)
phi_ = (phi / dphi).astype(int)
phi_[phi_ < 0] = 0
phi_[phi_ >= W] = W-1
# print(np.min(phi_))
# print(np.max(phi_))
#
# print z
# print np.radians(2.)
# print np.arcsin(z/d)
theta = np.radians(2.) - np.arcsin(z / d)
# print theta
theta_ = (theta / dtheta).astype(int)
# print theta_
theta_[theta_ < 0] = 0
theta_[theta_ >= H] = H-1
# print theta,phi,theta_.shape,phi_.shape
# print(np.min((phi/dphi)),np.max((phi/dphi)))
# np.savetxt('./dump/'+'phi'+"dump.txt",(phi_).astype(np.float32), fmt="%f")
# np.savetxt('./dump/'+'phi_'+"dump.txt",(phi/dphi).astype(np.float32), fmt="%f")
# print(np.min(theta_))
# print(np.max(theta_))
depth_map = np.zeros((H, W, C))
# 5 channels according to paper
if C == 5:
depth_map[theta_, phi_, 0] = x
depth_map[theta_, phi_, 1] = y
depth_map[theta_, phi_, 2] = z
depth_map[theta_, phi_, 3] = i
depth_map[theta_, phi_, 4] = d
else:
depth_map[theta_, phi_, 0] = i
return depth_map
def keep_32(velo_points,
H=64, W=512, C=5, dtheta=np.radians(1.33), dphi=np.radians(90. / 512.0), odd=False,scale=1):
x, y, z= velo_points[:, 0], velo_points[:, 1], velo_points[:, 2]
d = np.sqrt(x ** 2 + y ** 2 + z ** 2)
r = np.sqrt(x ** 2 + y ** 2)
d[d == 0] = 0.000001
r[r == 0] = 0.000001
phi = np.radians(45.) - np.arcsin(y / r)
phi_ = (phi / dphi).astype(int)
phi_[phi_ < 0] = 0
phi_[phi_ >= W] = W-1
theta = np.radians(2.) - np.arcsin(z / d)
theta_ = (theta / dtheta).astype(int)
theta_[theta_ < 0] = 0
theta_[theta_ >= H] = H-1
if odd:
keep_v = | np.mod(theta_,2) | numpy.mod |
r"""Routines for running triangulations in paralle.
.. todo::
* parallelism through treading
"""
from cgal4py import PY_MAJOR_VERSION, _use_multiprocessing
from cgal4py.delaunay import Delaunay, tools, _get_Delaunay
from cgal4py import domain_decomp
from cgal4py.domain_decomp import GenericTree
import numpy as np
import os
import sys
import time
import copy
import struct
import pstats
if PY_MAJOR_VERSION == 2:
import cPickle as pickle
else:
import pickle
if _use_multiprocessing:
import multiprocessing as mp
from multiprocessing import Process as mp_Process
else:
mp = object
mp_Process = object
import warnings
from datetime import datetime
try:
from mpi4py import MPI
mpi_loaded = True
except:
mpi_loaded = False
warnings.warn("mpi4py could not be imported.")
import ctypes
def _get_mpi_type(np_type):
r"""Get the correpsonding MPI data type for a given numpy data type.
Args:
np_type (str, type): String identifying a numpy data type or a numpy
data type.
Returns:
int: MPI data type.
Raises:
ValueError: If `np_type` is not supported.
"""
if not mpi_loaded:
raise Exception("mpi4py could not be imported.")
if np_type in ('i', 'int32', np.int32):
mpi_type = MPI.INT
elif np_type in ('l', 'int64', np.int64):
mpi_type = MPI.LONG
elif np_type in ('f', 'float32', np.float32):
mpi_type = MPI.FLOAT
elif np_type in ('d', 'float64', np.float64):
mpi_type = MPI.DOUBLE
else:
raise ValueError("Unrecognized type: {}".format(np_type))
return mpi_type
def _generate_filename(name, unique_str=None, ext='.dat'):
fname = '{}{}'.format(name, ext)
if isinstance(unique_str, str):
fname = '{}_{}'.format(unique_str, fname)
return fname
def _prof_filename(unique_str=None):
return _generate_filename('prof', unique_str=unique_str, ext='.dat')
def _tess_filename(unique_str=None):
return _generate_filename('tess', unique_str=unique_str, ext='.dat')
def _vols_filename(unique_str=None):
return _generate_filename('vols', unique_str=unique_str, ext='.npy')
def _leaf_tess_filename(leaf_id, unique_str=None):
return _generate_filename('leaf{}'.format(leaf_id),
unique_str=unique_str, ext='.dat')
def _final_leaf_tess_filename(leaf_id, unique_str=None):
return _generate_filename('finaleleaf{}'.format(leaf_id),
unique_str=unique_str, ext='.dat')
def write_mpi_script(fname, read_func, taskname, unique_str=None,
use_double=False, use_python=False, use_buffer=False,
overwrite=False, profile=False, limit_mem=False,
suppress_final_output=False):
r"""Write an MPI script for calling MPI parallelized triangulation.
Args:
fname (str): Full path to file where MPI script will be saved.
read_func (func): Function for reading in points. The function should
return a dictionary with 'pts' key at a minimum corresponding to
the 2D array of points that should be triangulated. Additional
optional keys include:
* periodic (bool): True if the domain is periodic.
* left_edge (np.ndarray of float64): Left edges of the domain.
* right_edge (np.ndarray of float64): Right edges of the domain.
A list of lines resulting in the above mentioned dictionary is also
accepted.
taskname (str): Name of task to be passed to
:class:`cgal4py.parallel.DelaunayProcessMPI`.
unique_str (str, optional): Unique string identifying the domain
decomposition that is passed to `cgal4py.parallel.ParallelLeaf` for
file naming. Defaults to None.
use_double (bool, optional): If True, the triangulation is forced to
use 64bit integers reguardless of if there are too many points for
32bit. Otherwise 32bit integers are used so long as the number of
points is <=4294967295. Defaults to False.
use_python (bool, optional): If True, communications are done in python
using mpi4py. Otherwise, communications are done in C++ using MPI.
Defaults to False.
use_buffer (bool, optional): If True, communications are done by way of
buffers rather than pickling python objects. Defaults to False.
overwrite (bool): If True, any existing script with the same name is
overwritten. Defaults to False.
profile (bool, optional): If True, cProfile is used to profile the code
and output is printed to the screen. This can also be a string
specifying the full path to the file where the output should be
saved. Defaults to False.
limit_mem (bool, optional): If False, the triangulation results from
each process are moved to local memory using `multiprocessing`
pipes. If True, each process writes out tessellation info to
files which are then incrementally loaded as consolidation occurs.
Defaults to False.
suppress_final_output (bool, optional): If True, output of the result
to file is suppressed. This is mainly for testing purposes.
Defaults to False.
"""
if not mpi_loaded:
raise Exception("mpi4py could not be imported.")
if os.path.isfile(fname):
if overwrite:
os.remove(fname)
else:
return
readcmds = isinstance(read_func, list)
# Import lines
lines = [
"import numpy as np",
"from mpi4py import MPI"]
if profile:
lines += [
"import cProfile",
"import pstats"]
lines += ["from cgal4py import parallel"]
if not readcmds:
lines.append(
"from {} import {} as load_func".format(read_func.__module__,
read_func.__name__))
# Lines establishing variables
lines += [
"",
"comm = MPI.COMM_WORLD",
"size = comm.Get_size()",
"rank = comm.Get_rank()",
"",
"unique_str = '{}'".format(unique_str),
"use_double = {}".format(use_double),
"limit_mem = {}".format(limit_mem),
"use_python = {}".format(use_python),
"use_buffer = {}".format(use_buffer),
"suppress_final_output = {}".format(suppress_final_output),
""]
# Commands to read in data
lines += [
"if rank == 0:"]
if readcmds:
lines += [" "+l for l in read_func]
else:
lines.append(
" load_dict = load_func()")
lines += [
" pts = load_dict['pts']",
" left_edge = load_dict.get('left_edge', np.min(pts, axis=0))",
" right_edge = load_dict.get('right_edge', np.max(pts, axis=0))",
" periodic = load_dict.get('periodic', False)",
" tree = load_dict.get('tree', None)",
"else:",
" pts = None",
" left_edge = None",
" right_edge = None",
" periodic = None",
" tree = None"]
# Start profiler if desired
if profile:
lines += [
"if (rank == 0):",
" pr = cProfile.Profile()",
" pr.enable()",
""]
# Run
lines += [
"p = parallel.DelaunayProcessMPI('{}',".format(taskname),
" pts, tree, left_edge=left_edge, right_edge=right_edge,",
" periodic=periodic, use_double=use_double, unique_str=unique_str,",
" limit_mem=limit_mem, use_python=use_python,",
" use_buffer=use_buffer,",
" suppress_final_output=suppress_final_output)",
"p.run()"]
if profile:
lines += [
"",
"if (rank == 0):",
" pr.disable()"]
if isinstance(profile, str):
lines.append(
" pr.dump_stats('{}')".format(profile))
else:
lines.append(
" pstats.Stats(pr).sort_stats('time').print_stats(25)")
with open(fname, 'w') as f:
f.write("\n".join(lines))
def ParallelDelaunay(pts, tree, nproc, use_mpi=True, **kwargs):
r"""Return a triangulation that is constructed in parallel.
Args:
pts (np.ndarray of float64): (n,m) array of n m-dimensional
coordinates.
tree (object): Domain decomposition tree for splitting points among the
processes. Produced by :meth:`cgal4py.domain_decomp.tree`.
nproc (int): Number of processors that should be used.
use_mpi (bool, optional): If True, `mpi4py` is used for communications.
Otherwise `multiprocessing` is used. Defaults to True.
\*\*kwargs: Additional keywords arguments are passed to the correct
parallel implementation of the triangulation.
Returns:
:class:`cgal4py.delaunay.Delaunay2` or
:class:`cgal4py.delaunay.Delaunay3`: consolidated 2D or 3D
triangulation object.
"""
if use_mpi:
unique_str = datetime.today().strftime("%Y%j%H%M%S")
fpick = _generate_filename("dict", unique_str=unique_str)
out = dict(pts=pts, tree=GenericTree.from_tree(tree))
if PY_MAJOR_VERSION == 2:
with open(fpick, 'wb') as fd:
pickle.dump(out, fd, pickle.HIGHEST_PROTOCOL)
assert(os.path.isfile(fpick))
read_lines = ["import cPickle",
"with open('{}', 'rb') as fd:".format(fpick),
" load_dict = cPickle.load(fd)"]
else:
with open(fpick, 'wb') as fd:
pickle.dump(out, fd)
assert(os.path.isfile(fpick))
read_lines = ["import pickle",
"with open('{}', 'rb') as fd:".format(fpick),
" load_dict = pickle.load(fd)"]
ndim = tree.ndim
out = ParallelDelaunayMPI(read_lines, ndim, nproc, **kwargs)
os.remove(fpick)
else:
if _use_multiprocessing:
out = ParallelDelaunayMulti(pts, tree, nproc, **kwargs)
else:
raise RuntimeError("The multiprocessing version of parallelism " +
"is currently disabled. To enable it, set " +
"_use_multiprocessing to True in " +
"cgal4py/__init__.py.")
return out
def ParallelVoronoiVolumes(pts, tree, nproc, use_mpi=True, **kwargs):
r"""Return a triangulation that is constructed in parallel.
Args:
pts (np.ndarray of float64): (n,m) array of n m-dimensional
coordinates.
tree (object): Domain decomposition tree for splitting points among the
processes. Produced by :meth:`cgal4py.domain_decomp.tree`.
nproc (int): Number of processors that should be used.
use_mpi (bool, optional): If True, `mpi4py` is used for communications.
Otherwise `multiprocessing` is used. Defaults to True.
\*\*kwargs: Additional keywords arguments are passed to the correct
parallel implementation of the triangulation.
Returns:
np.ndarray of float64: (n,) array of n voronoi volumes for the provided
points.
"""
if use_mpi:
unique_str = datetime.today().strftime("%Y%j%H%M%S")
fpick = _generate_filename("dict", unique_str=unique_str)
out = dict(pts=pts, tree=GenericTree.from_tree(tree))
if PY_MAJOR_VERSION == 2:
with open(fpick, 'wb') as fd:
pickle.dump(out, fd, pickle.HIGHEST_PROTOCOL)
assert(os.path.isfile(fpick))
read_lines = ["import cPickle",
"with open('{}', 'rb') as fd:".format(fpick),
" load_dict = cPickle.load(fd)"]
else:
with open(fpick, 'wb') as fd:
pickle.dump(out, fd)
assert(os.path.isfile(fpick))
read_lines = ["import pickle",
"with open('{}', 'rb') as fd:".format(fpick),
" load_dict = pickle.load(fd)"]
ndim = tree.ndim
out = ParallelVoronoiVolumesMPI(read_lines, ndim, nproc, **kwargs)
os.remove(fpick)
else:
if _use_multiprocessing:
out = ParallelVoronoiVolumesMulti(pts, tree, nproc, **kwargs)
else:
raise RuntimeError("The multiprocessing version of parallelism " +
"is currently disabled. To enable it, set " +
"_use_multiprocessing to True in " +
"cgal4py/__init__.py.")
return out
def ParallelDelaunayMPI(*args, **kwargs):
r"""Return a triangulation that is constructed in parallel using MPI.
See :func:`cgal4py.parallel.ParallelMPI` for information on arguments.
Returns:
A Delaunay triangulation class like :class:`cgal4py.delaunay.Delaunay2`
(but for the appropriate number of dimensions) will be returned.
"""
return ParallelMPI('triangulate', *args, **kwargs)
def ParallelVoronoiVolumesMPI(*args, **kwargs):
r"""Return the voronoi cell volumes after constructing triangulation in
parallel using MPI. See :func:`cgal4py.parallel.ParallelMPI` for
information on arguments.
Returns:
np.ndarray of float64: (n,) array of n voronoi volumes for the provided
points.
"""
return ParallelMPI('volumes', *args, **kwargs)
def ParallelMPI(task, read_func, ndim, nproc, use_double=False,
limit_mem=False, use_python=False, use_buffer=False,
profile=False, suppress_final_output=False):
r"""Return results form a triangulation that is constructed in parallel
using MPI.
Args:
task (str): Task for which results should be returned.
Values include:
* 'triangulate': Return the Delaunay triangulation class.
* 'volumes': Return the volumes of the Voronoi cells associated with
each point.
read_func (func): Function for reading in points. The function should
return a dictionary with 'pts' key at a minimum corresponding to
the 2D array of points that should be triangulated. Additional
optional keys include:
* periodic (bool): True if the domain is periodic.
* left_edge (np.ndarray of float64): Left edges of the domain.
* right_edge (np.ndarray of float64): Right edges of the domain.
A list of lines resulting in the above mentioned dictionary is also
accepted.
ndim (int): Number of dimension in the domain.
nproc (int): Number of processors that should be used.
use_double (bool, optional): If True, the triangulation is forced to
use 64bit integers reguardless of if there are too many points for
32bit. Otherwise 32bit integers are used so long as the number of
points is <=4294967295. Defaults to False.
limit_mem (bool, optional): If False, the triangulation results from
each process are moved to local memory using `multiprocessing`
pipes. If True, each process writes out tessellation info to
files which are then incrementally loaded as consolidation occurs.
Defaults to False.
use_python (bool, optional): If True, communications are done in python
using mpi4py. Otherwise, communications are done in C++ using MPI.
Defaults to False.
use_buffer (bool, optional): If True, communications are done by way of
buffers rather than pickling python objects. Defaults to False.
profile (bool, optional): If True, cProfile is used to profile the code
and output is printed to the screen. This can also be a string
specifying the full path to the file where the output should be
saved. Defaults to False.
suppress_final_output (bool, optional): If True, output of the result
to file is suppressed. This is mainly for testing purposes.
Defaults to False.
Returns:
Dependent on task. For 'triangulate', a Delaunay triangulation class
like :class:`cgal4py.delaunay.Delaunay2` (but for the appropriate
number of dimensions will be returned. For 'volumes', a numpy
array of floating point volumes will be returned where values
less than zero indicate infinite volumes.
Raises:
ValueError: If the task is not one of the accepted values listed above.
RuntimeError: If the MPI script does not result in a file containing
the triangulation.
"""
if task not in ['triangulate', 'volumes']:
raise ValueError("Unsupported task: {}".format(task))
unique_str = datetime.today().strftime("%Y%j%H%M%S")
fscript = '{}_mpi.py'.format(unique_str)
write_mpi_script(fscript, read_func, task, limit_mem=limit_mem,
unique_str=unique_str, use_double=use_double,
use_python=use_python, use_buffer=use_buffer,
profile=profile,
suppress_final_output=suppress_final_output)
cmd = 'mpiexec -np {} python {}'.format(nproc, fscript)
os.system(cmd)
os.remove(fscript)
if suppress_final_output:
return
if task == 'triangulate':
fres = _tess_filename(unique_str=unique_str)
elif task == 'volumes':
fres = _vols_filename(unique_str=unique_str)
if os.path.isfile(fres):
with open(fres, 'rb') as fd:
if task == 'triangulate':
out = _get_Delaunay(ndim=ndim).from_serial_buffer(fd)
elif task == 'volumes':
out = np.frombuffer(bytearray(fd.read()), dtype='d')
os.remove(fres)
return out
else:
raise RuntimeError("The tessellation file does not exist. " +
"There must have been an error while running the " +
"parallel script.")
if _use_multiprocessing:
def ParallelDelaunayMulti(*args, **kwargs):
r"""Return a triangulation that is constructed in parallel using the
`multiprocessing` package. See :func:`cgal4py.parallel.ParallelMulti`
for information on arguments.
Returns:
A Delaunay triangulation class like :class:`cgal4py.delaunay.Delaunay2`
(but for the appropriate number of dimensions) will be returned.
"""
return ParallelMulti('triangulate', *args, **kwargs)
def ParallelVoronoiVolumesMulti(*args, **kwargs):
r"""Return the voronoi cell volumes after constructing triangulation in
parallel. See :func:`cgal4py.parallel.ParallelMulti` for information on
arguments.
Returns:
np.ndarray of float64: (n,) array of n voronoi volumes for the provided
points.
"""
return ParallelMulti('volumes', *args, **kwargs)
def ParallelMulti(task, pts, tree, nproc, use_double=False, limit_mem=False):
r"""Return results from a triangulation that is constructed in parallel
using the `multiprocessing` package.
Args:
task (str): Task for which results should be returned. Values
include:
* 'triangulate': Return the Delaunay triangulation class.
* 'volumes': Return the volumes of the Voronoi cells associated with
each point.
pts (np.ndarray of float64): (n,m) array of n m-dimensional
coordinates.
tree (object): Domain decomposition tree for splitting points among the
processes. Produced by :meth:`cgal4py.domain_decomp.tree`.
nproc (int): Number of processors that should be used.
use_double (bool, optional): If True, the triangulation is forced to
use 64bit integers reguardless of if there are too many points for
32bit. Otherwise 32bit integers are used so long as the number of
points is <=4294967295. Defaults to False.
limit_mem (bool, optional): If False, the triangulation results from
each process are moved to local memory using `multiprocessing`
pipes. If True, each process writes out tessellation info to
files which are then incrementally loaded as consolidation occurs.
Defaults to False.
Returns:
Dependent on task. For 'triangulate', a Delaunay triangulation class
like :class:`cgal4py.delaunay.Delaunay2` (but for the appropriate
number of dimensions) will be returned. For 'volumes', a numpy
array of floating point volumes will be returned where values
less than zero indicate infinite volumes.
"""
idxArray = mp.RawArray(ctypes.c_ulonglong, tree.idx.size)
ptsArray = mp.RawArray('d', pts.size)
memoryview(idxArray)[:] = tree.idx
memoryview(ptsArray)[:] = pts
# Split leaves
task2leaves = [[] for _ in range(nproc)]
for leaf in tree.leaves:
proc = leaf.id % nproc
task2leaves[proc].append(leaf)
left_edges = np.vstack([leaf.left_edge for leaf in tree.leaves])
right_edges = np.vstack([leaf.right_edge for leaf in tree.leaves])
# Create & execute processes
count = [mp.Value('i', 0), mp.Value('i', 0), mp.Value('i', 0)]
lock = mp.Condition()
queues = [mp.Queue() for _ in range(nproc+1)]
in_pipes = [None for _ in range(nproc)]
out_pipes = [None for _ in range(nproc)]
for i in range(nproc):
out_pipes[i], in_pipes[i] = mp.Pipe(True)
unique_str = datetime.today().strftime("%Y%j%H%M%S")
processes = [DelaunayProcessMulti(
task, _, task2leaves[_], ptsArray, idxArray,
left_edges, right_edges, queues, lock, count, in_pipes[_],
unique_str=unique_str, limit_mem=limit_mem) for _ in range(nproc)]
for p in processes:
p.start()
# Synchronize to ensure rapid receipt of output info from leaves
lock.acquire()
lock.wait()
lock.release()
# Setup methods for recieving leaf info
if task == 'triangulate':
serial = [None for _ in range(tree.num_leaves)]
def recv_leaf(p):
iid, s = processes[p].receive_result(out_pipes[p])
assert(tree.leaves[iid].id == iid)
serial[iid] = s
elif task == 'volumes':
vol = np.empty(pts.shape[0], pts.dtype)
def recv_leaf(p):
iid, ivol = processes[p].receive_result(out_pipes[p])
assert(tree.leaves[iid].id == iid)
vol[tree.idx[tree.leaves[iid].slice]] = ivol
# Recieve output from processes
proc_list = range(nproc)
# Version that takes whatever is available
total_count = 0
max_total_count = tree.num_leaves
while total_count != max_total_count:
for i in proc_list:
while out_pipes[i].poll():
recv_leaf(i)
total_count += 1
# Version that does processors in order
# for i in proc_list:
# for _ in range(len(task2leaves[i])):
# recv_leaf(i)
# Consolidate tessellation
if task == 'triangulate':
out = consolidate_tess(tree, serial, pts, use_double=use_double,
unique_str=unique_str, limit_mem=limit_mem)
elif task == 'volumes':
out = vol
# Close queues and processes
for p in processes:
p.join()
return out
# @profile
def consolidate_tess(tree, leaf_output, pts, use_double=False,
unique_str=None, limit_mem=False):
r"""Creates a single triangulation from the triangulations of leaves.
Args:
tree (object): Domain decomposition tree for splitting points among the
processes. Produced by :meth:`cgal4py.domain_decomp.tree`.
leaf_output (object): Output from each parallel leaf.
pts (np.ndarray of float64): (n,m) Array of n mD points.
use_double (bool, optional): If True, the triangulation is forced to
use 64bit integers reguardless of if there are too many points for
32bit. Otherwise 32bit integers are used so long as the number of
points is <=4294967295. Defaults to False.
unique_str (str, optional): Unique identifier for files in a run. If
`limit_mem == True` those files will be loaded and used to create
the consolidated tessellation. Defaults to None. If None, there is
a risk that multiple runs could be sharing files of the same name.
limit_mem (bool, optional): If False, the triangulation is consolidated
from partial triangulations on each leaf that already exist in
memory. If True, partial triangulations are loaded from files for
each leaf. Defaults to `False`.
Returns:
:class:`cgal4py.delaunay.Delaunay2` or
:class:`cgal4py.delaunay.Delaunay3`: consolidated 2D or 3D
triangulation object.
"""
npts = pts.shape[0]
ndim = pts.shape[1]
uint32_max = np.iinfo('uint32').max
if npts >= uint32_max:
use_double = True
if use_double:
idx_inf = np.uint64(np.iinfo('uint64').max)
else:
idx_inf = np.uint32(uint32_max)
# Loop over leaves adding them
if not limit_mem:
ncells_tot = 0
for s in leaf_output:
ncells_tot += np.int64(s[5])
if use_double:
cons = tools.ConsolidatedLeaves64(ndim, idx_inf, ncells_tot)
else:
cons = tools.ConsolidatedLeaves32(ndim, idx_inf, ncells_tot)
for i, leaf in enumerate(tree.leaves):
leaf_dtype = leaf_output[i][0].dtype
if leaf_dtype == np.uint64:
sleaf = tools.SerializedLeaf64(
leaf.id, ndim, leaf_output[i][0].shape[0],
leaf_output[i][2], leaf_output[i][0], leaf_output[i][1],
leaf_output[i][3], leaf_output[i][4],
leaf.start_idx, leaf.stop_idx)
elif leaf_dtype == np.uint32:
sleaf = tools.SerializedLeaf32(
leaf.id, ndim, leaf_output[i][0].shape[0],
leaf_output[i][2], leaf_output[i][0], leaf_output[i][1],
leaf_output[i][3], leaf_output[i][4],
leaf.start_idx, leaf.stop_idx)
else:
raise TypeError("Unsupported leaf type: {}".format(leaf_dtype))
cons.add_leaf(sleaf)
else:
ncells_tot = sum(leaf_output)
if use_double:
cons = tools.ConsolidatedLeaves64(ndim, idx_inf, ncells_tot)
else:
cons = tools.ConsolidatedLeaves32(ndim, idx_inf, ncells_tot)
for i, leaf in enumerate(tree.leaves):
fname = _final_leaf_tess_filename(leaf.id, unique_str=unique_str)
cons.add_leaf_fromfile(fname)
os.remove(fname)
cons.finalize()
cells = cons.verts
neigh = cons.neigh
# if np.sum(neigh == idx_inf) != 0:
# for i in range(ncells):
# print(i, cells[i, :], neigh[i, :])
# assert(np.sum(neigh == idx_inf) == 0)
# Do tessellation
T = Delaunay(np.zeros([0, ndim]), use_double=use_double)
T.deserialize_with_info(pts, tree.idx.astype(cells.dtype),
cells, neigh, idx_inf)
return T
def DelaunayProcessMPI(taskname, pts, tree=None,
left_edge=None, right_edge=None,
periodic=False, unique_str=None, use_double=False,
use_python=False, use_buffer=False, limit_mem=False,
suppress_final_output=False):
r"""Get object for coordinating MPI operations.
Args:
See :class:`cgal4py.parallel.DelaunayProcessMPI_Python` and
:class:`cgal4py.parallel.DelaunayProcessMPI_C` for information on
arguments.
Raises:
ValueError: if `task` is not one of the accepted values listed above.
Returns:
:class:`cgal4py.parallel.DelaunayProcessMPI_Python` if
`use_python == True`, :class:`cgal4py.parallel.DelaunayProcessMPI_C`
otherwise.
"""
if use_python:
out = DelaunayProcessMPI_Python(
taskname, pts, tree=tree, left_edge=left_edge,
right_edge=right_edge, periodic=periodic, unique_str=unique_str,
use_double=use_double, use_buffer=use_buffer,
limit_mem=limit_mem, suppress_final_output=suppress_final_output)
else:
out = DelaunayProcessMPI_C(
taskname, pts, left_edge=left_edge,
right_edge=right_edge, periodic=periodic, unique_str=unique_str,
use_double=use_double, limit_mem=limit_mem,
suppress_final_output=suppress_final_output)
return out
class DelaunayProcessMPI_C(object):
r"""Class for coordinating MPI operations in C. This serves as a wrapper
for :class:`cagl4py.delaunay.ParallelDelaunayD` to function the same as
:class:`cgal4py.parallel.DelaunayProcessMPI_Python`.
Args:
taskname (str): Key for the task that should be parallelized.
Options:
* 'triangulate': Perform triangulation and put serialized info in
the output queue.
* 'volumes': Perform triangulation and put volumes in output queue.
pts (np.ndarray of float64): Array of coordinates to triangulate.
left_edge (np.ndarray of float64, optional): Array of domain mins in
each dimension. If not provided, they are determined from the
points. Defaults to None.
right_edge (np.ndarray of float64, optional): Array of domain maxes in
each dimension. If not provided, they are determined from the
points. Defaults to None.
periodic (bool, optional): If True, the domain is assumed to be
periodic at its left/right edges in each dimension. Defaults to
False.
unique_str (str, optional): Unique string identifying the domain
decomposition that is passed to `cgal4py.parallel.ParallelLeaf` for
file naming. Defaults to None.
use_double (bool, optional): If True, 64 bit integers will be used for
the triangulation. Defaults to False.
limit_mem (bool, optional): If True, additional leaves are used and
as each process cycles through its subset, leaves are written to/
read from a file. Otherwise, all leaves are kept in memory at
all times. Defaults to False.
suppress_final_output (bool, optional): If True, output of the result
to file is suppressed. This is mainly for testing purposes.
Defaults to False.
Raises:
ValueError: if `task` is not one of the accepted values listed above.
"""
def __init__(self, taskname, pts, left_edge=None, right_edge=None,
periodic=False, unique_str=None, use_double=False,
limit_mem=False, suppress_final_output=False):
if not mpi_loaded:
raise Exception("mpi4py could not be imported.")
task_list = ['triangulate', 'volumes']
if taskname not in task_list:
raise ValueError('{} is not a valid task.'.format(taskname))
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
ndim = None
if rank == 0:
ndim = pts.shape[1]
if left_edge is None:
left_edge = pts.min(axis=0)
if right_edge is None:
right_edge = pts.max(axis=0)
ndim = comm.bcast(ndim, root=0)
Delaunay = _get_Delaunay(ndim, parallel=True, bit64=use_double)
self.PT = Delaunay(left_edge, right_edge, periodic=periodic,
limit_mem=limit_mem)
self.size = size
self.rank = rank
self.comm = comm
self.pts = pts
self.taskname = taskname
self.unique_str = unique_str
self.suppress_final_output = suppress_final_output
def output_filename(self):
if self.taskname == 'triangulate':
fname = _tess_filename(unique_str=self.unique_str)
elif self.taskname == 'volumes':
fname = _vols_filename(unique_str=self.unique_str)
return fname
def run(self):
r"""Perform necessary steps to complete the supplied task."""
self.PT.insert(self.pts)
if self.taskname == 'triangulate':
T = self.PT.consolidate_tess()
if (self.rank == 0):
if not self.suppress_final_output:
ftess = self.output_filename()
with open(ftess, 'wb') as fd:
T.serialize_to_buffer(fd, pts)
elif self.taskname == 'volumes':
vols = self.PT.consolidate_vols()
if (self.rank == 0):
if not self.suppress_final_output:
fvols = self.output_filename()
with open(fvols, 'wb') as fd:
fd.write(vols.tobytes())
class DelaunayProcessMPI_Python(object):
r"""Class for coordinating MPI operations in Python.
Args:
taskname (str): Key for the task that should be parallelized.
Options:
* 'triangulate': Perform triangulation and put serialized info in
the output queue.
* 'volumes': Perform triangulation and put volumes in output queue.
pts (np.ndarray of float64): Array of coordinates to triangulate.
tree (Tree, optional): Decomain decomposition tree. If not provided,
:func:`cgal4py.domain_decomp.tree` is used to construct one.
Defaults to None.
left_edge (np.ndarray of float64, optional): Array of domain mins in
each dimension. If not provided, they are determined from the
points. Defaults to None. This is not required if `tree` is
provided.
right_edge (np.ndarray of float64, optional): Array of domain maxes in
each dimension. If not provided, they are determined from the
points. Defaults to None. This is not required if `tree` is
provided.
periodic (bool, optional): If True, the domain is assumed to be
periodic at its left/right edges in each dimension. Defaults to
False. This is not required if `tree` is provided.
unique_str (str, optional): Unique string identifying the domain
decomposition that is passed to `cgal4py.parallel.ParallelLeaf` for
file naming. Defaults to None.
use_double (bool, optional): If True, 64 bit integers will be used for
the triangulation. Defaults to False.
use_buffer (bool, optional): If True, communications are done by way of
buffers rather than pickling python objects. Defaults to False.
limit_mem (bool, optional): If True, additional leaves are used and
as each process cycles through its subset, leaves are written to/
read from a file. Otherwise, all leaves are kept in memory at
all times. Defaults to False.
suppress_final_output (bool, optional): If True, output of the result
to file is suppressed. This is mainly for testing purposes.
Defaults to False.
Raises:
ValueError: if `task` is not one of the accepted values listed above.
"""
def __init__(self, taskname, pts, tree=None,
left_edge=None, right_edge=None,
periodic=False, unique_str=None, use_double=False,
use_buffer=False, limit_mem=False,
suppress_final_output=False):
if not mpi_loaded:
raise Exception("mpi4py could not be imported.")
task_list = ['triangulate', 'volumes']
if taskname not in task_list:
raise ValueError('{} is not a valid task.'.format(taskname))
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
# Domain decomp
task2leaves = None
left_edges = None
right_edges = None
if rank == 0:
if tree is None:
tree = domain_decomp.tree('kdtree', pts, left_edge, right_edge,
periodic=periodic, nleaves=size)
if not isinstance(tree, GenericTree):
tree = GenericTree.from_tree(tree)
task2leaves = [[] for _ in range(size)]
for leaf in tree.leaves:
leaf.pts = pts[tree.idx[leaf.start_idx:leaf.stop_idx]]
task = leaf.id % size
task2leaves[task].append(leaf)
left_edges = np.vstack([leaf.left_edge for leaf in tree.leaves])
right_edges = np.vstack([leaf.right_edge for leaf in tree.leaves])
# Communicate points
# TODO: Serialize & allow for use of buffer
leaves = comm.scatter(task2leaves, root=0)
pkg = (left_edges, right_edges, unique_str)
left_edges, right_edges, unique_str = comm.bcast(pkg, root=0)
nleaves = len(leaves)
# Set attributes
self._task = taskname
self._pts = pts
self._tree = tree
self._unique_str = unique_str
self._limit_mem = limit_mem
self._use_double = use_double
self._use_buffer = use_buffer
self._suppress_final_output = suppress_final_output
self._comm = comm
self._num_proc = size
self._proc_idx = rank
self._leaves = [ParallelLeaf(leaf, left_edges, right_edges,
unique_str=unique_str,
limit_mem=limit_mem) for leaf in leaves]
self._leafid2idx = {leaf.id:i for i,leaf in enumerate(leaves)}
ndim = left_edges.shape[1]
self._ndim = ndim
self._local_leaves = len(leaves)
self._total_leaves = 0
if self._local_leaves != 0:
self._total_leaves = leaves[0].num_leaves
self._done = False
self._task2leaf = {i:[] for i in range(size)}
for i in range(self._total_leaves):
task = i % size
self._task2leaf[task].append(i)
def output_filename(self):
if self._task == 'triangulate':
fname = _tess_filename(unique_str=self._unique_str)
elif self._task == 'volumes':
fname = _vols_filename(unique_str=self._unique_str)
return fname
def get_leaf(self, leafid):
r"""Return the leaf object associated wth a given leaf id.
Args:
leafid (int): Leaf ID.
"""
return self._leaves[self._leafid2idx[leafid]]
def tessellate_leaves(self):
r"""Performs the tessellation for each leaf on this process."""
for leaf in self._leaves:
leaf.tessellate()
def gather_leaf_arrays(self, local_arr, root=0):
r"""Gather arrays for all leaves to a single process.
Args:
local_arr (dict): Arrays to be gathered for each leaf ID.
root (int, optional): Process to which arrays should be gathered.
Defaults to 0.
Returns:
dict: Arrays for each leaf ID.
"""
total_arr = {}
if self._use_buffer: # pragma: no cover
leaf_ids = list(local_arr.keys())
np_dtype = local_arr[leaf_ids[0]].dtype
mpi_dtype = _get_mpi_type(np_dtype)
# Prepare things to send
scnt = np.array([len(leaf_ids)], 'int64')
sids = np.empty(2*len(leaf_ids), 'int64')
for i, k in enumerate(leaf_ids):
sids[2*i] = k
sids[2*i+1] = local_arr[k].size
sarr = np.concatenate([local_arr[k] for k in leaf_ids])
if sarr.dtype != np_dtype:
sarr = sarr.astype(np_dtype)
# Send the number of leaves on each processor
if self._proc_idx == root:
rcnt = np.empty(self._num_proc, scnt.dtype)
else:
rcnt = np.array([], scnt.dtype)
self._comm.Gather((scnt, _get_mpi_type(scnt.dtype)),
(rcnt, _get_mpi_type(rcnt.dtype)), root)
tot_nleaves = rcnt.sum()
# Send the ids and sizes of leaves
rids = np.empty(2*tot_nleaves, sids.dtype)
recv_buf = None
if self._proc_idx == root:
recv_buf = (rids, 2*rcnt, _get_mpi_type(rids.dtype))
self._comm.Gatherv((sids, _get_mpi_type(sids.dtype)),
recv_buf, root)
# Count number on each processor
arr_counts = np.zeros(self._num_proc, 'int')
if self._proc_idx == root:
j = 1
for i in range(self._num_proc):
for _ in range(rcnt[i]):
arr_counts[i] += rids[j]
j += 2
# Send the arrays for each leaf
rarr = np.empty(arr_counts.sum(), sarr.dtype)
recv_buf = None
if self._proc_idx == root:
recv_buf = (rarr, arr_counts, _get_mpi_type(rarr.dtype))
self._comm.Gatherv((sarr, _get_mpi_type(sarr.dtype)),
recv_buf, root)
# Parse out info for each leaf
if self._proc_idx == root:
curr = 0
for i in range(tot_nleaves):
j = 2*i
k = rids[j]
siz = rids[j + 1]
total_arr[k] = rarr[curr:(curr+siz)]
curr += siz
assert(curr == rarr.size)
else:
data = self._comm.gather(local_arr, root=root)
if self._proc_idx == root:
for x in data:
total_arr.update(x)
return total_arr
def alltoall_leaf_arrays(self, local_arr, dtype=None, return_counts=False,
leaf_counts=None, array_counts=None):
r"""Exchange arrays between leaves.
Args:
local_arr (dict): Arrays to be exchanged with other leaves. Keys
are 2-element tuples. The first element is the leaf the array
came from, the second element is the leaf the array should be
send to.
Returns:
dict: Incoming arrays where the keys are 2-element tuples as
described above.
"""
total_arr = {}
if self._use_buffer: # pragma: no cover
local_leaf_ids = list(local_arr.keys())
leaf_ids = [[] for i in range(self._num_proc)]
local_array_count = 0
for k in local_leaf_ids:
task = k[1] % self._num_proc
leaf_ids[task].append(k)
local_array_count += local_arr[k].size
# Get data type
if len(local_leaf_ids) == 0:
if dtype is None:
raise Exception("Nothing being sent from this process. " +
"Cannot determine type to be recieved.")
np_dtype = dtype
else:
np_dtype = local_arr[local_leaf_ids[0]].dtype
mpi_dtype = _get_mpi_type(np_dtype)
# Compute things to send
scnt = np.array([len(x) for x in leaf_ids], 'int64')
nleaves_send = scnt.sum()
array_counts_send = | np.zeros(self._num_proc, 'int64') | numpy.zeros |
from numpy import (
logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan,
ndarray, sqrt, nextafter, stack, errstate
)
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose,
)
class PhysicalQuantity(float):
def __new__(cls, value):
return float.__new__(cls, value)
def __add__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(x) + float(self))
__radd__ = __add__
def __sub__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(self) - float(x))
def __rsub__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(x) - float(self))
def __mul__(self, x):
return PhysicalQuantity(float(x) * float(self))
__rmul__ = __mul__
def __div__(self, x):
return PhysicalQuantity(float(self) / float(x))
def __rdiv__(self, x):
return PhysicalQuantity(float(x) / float(self))
class PhysicalQuantity2(ndarray):
__array_priority__ = 10
class TestLogspace:
def test_basic(self):
y = logspace(0, 6)
assert_(len(y) == 50)
y = logspace(0, 6, num=100)
assert_(y[-1] == 10 ** 6)
y = logspace(0, 6, endpoint=False)
assert_(y[-1] < 10 ** 6)
y = logspace(0, 6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
def test_start_stop_array(self):
start = array([0., 1.])
stop = array([6., 7.])
t1 = logspace(start, stop, 6)
t2 = stack([logspace(_start, _stop, 6)
for _start, _stop in zip(start, stop)], axis=1)
assert_equal(t1, t2)
t3 = logspace(start, stop[0], 6)
t4 = stack([logspace(_start, stop[0], 6)
for _start in start], axis=1)
assert_equal(t3, t4)
t5 = logspace(start, stop, 6, axis=-1)
assert_equal(t5, t2.T)
def test_dtype(self):
y = logspace(0, 6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = logspace(0, 6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = logspace(0, 6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
def test_physical_quantities(self):
a = PhysicalQuantity(1.0)
b = PhysicalQuantity(5.0)
assert_equal(logspace(a, b), logspace(1.0, 5.0))
def test_subclass(self):
a = array(1).view(PhysicalQuantity2)
b = array(7).view(PhysicalQuantity2)
ls = logspace(a, b)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, logspace(1.0, 7.0))
ls = logspace(a, b, 1)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, logspace(1.0, 7.0, 1))
class TestGeomspace:
def test_basic(self):
y = geomspace(1, 1e6)
assert_(len(y) == 50)
y = geomspace(1, 1e6, num=100)
assert_(y[-1] == 10 ** 6)
y = geomspace(1, 1e6, endpoint=False)
assert_(y[-1] < 10 ** 6)
y = geomspace(1, 1e6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
y = geomspace(8, 2, num=3)
assert_allclose(y, [8, 4, 2])
assert_array_equal(y.imag, 0)
y = geomspace(-1, -100, num=3)
assert_array_equal(y, [-1, -10, -100])
assert_array_equal(y.imag, 0)
y = geomspace(-100, -1, num=3)
assert_array_equal(y, [-100, -10, -1])
assert_array_equal(y.imag, 0)
def test_boundaries_match_start_and_stop_exactly(self):
# make sure that the boundaries of the returned array exactly
# equal 'start' and 'stop' - this isn't obvious because
# np.exp(np.log(x)) isn't necessarily exactly equal to x
start = 0.3
stop = 20.3
y = geomspace(start, stop, num=1)
assert_equal(y[0], start)
y = geomspace(start, stop, num=1, endpoint=False)
assert_equal(y[0], start)
y = geomspace(start, stop, num=3)
assert_equal(y[0], start)
assert_equal(y[-1], stop)
y = geomspace(start, stop, num=3, endpoint=False)
assert_equal(y[0], start)
def test_nan_interior(self):
with errstate(invalid='ignore'):
y = geomspace(-3, 3, num=4)
assert_equal(y[0], -3.0)
assert_(isnan(y[1:-1]).all())
assert_equal(y[3], 3.0)
with errstate(invalid='ignore'):
y = geomspace(-3, 3, num=4, endpoint=False)
assert_equal(y[0], -3.0)
assert_(isnan(y[1:]).all())
def test_complex(self):
# Purely imaginary
y = geomspace(1j, 16j, num=5)
assert_allclose(y, [1j, 2j, 4j, 8j, 16j])
assert_array_equal(y.real, 0)
y = geomspace(-4j, -324j, num=5)
assert_allclose(y, [-4j, -12j, -36j, -108j, -324j])
assert_array_equal(y.real, 0)
y = geomspace(1+1j, 1000+1000j, num=4)
assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j])
y = geomspace(-1+1j, -1000+1000j, num=4)
assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j])
# Logarithmic spirals
y = geomspace(-1, 1, num=3, dtype=complex)
assert_allclose(y, [-1, 1j, +1])
y = geomspace(0+3j, -3+0j, 3)
assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
y = geomspace(0+3j, 3+0j, 3)
assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j])
y = geomspace(-3+0j, 0-3j, 3)
assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j])
y = geomspace(0+3j, -3+0j, 3)
assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
y = geomspace(-2-3j, 5+7j, 7)
assert_allclose(y, [-2-3j, -0.29058977-4.15771027j,
2.08885354-4.34146838j, 4.58345529-3.16355218j,
6.41401745-0.55233457j, 6.75707386+3.11795092j,
5+7j])
# Type promotion should prevent the -5 from becoming a NaN
y = geomspace(3j, -5, 2)
assert_allclose(y, [3j, -5])
y = geomspace(-5, 3j, 2)
assert_allclose(y, [-5, 3j])
def test_dtype(self):
y = geomspace(1, 1e6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = geomspace(1, 1e6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = geomspace(1, 1e6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
# Native types
y = geomspace(1, 1e6, dtype=float)
assert_equal(y.dtype, dtype('float_'))
y = geomspace(1, 1e6, dtype=complex)
assert_equal(y.dtype, dtype('complex'))
def test_start_stop_array_scalar(self):
lim1 = array([120, 100], dtype="int8")
lim2 = array([-120, -100], dtype="int8")
lim3 = array([1200, 1000], dtype="uint16")
t1 = geomspace(lim1[0], lim1[1], 5)
t2 = geomspace(lim2[0], lim2[1], 5)
t3 = geomspace(lim3[0], lim3[1], 5)
t4 = geomspace(120.0, 100.0, 5)
t5 = geomspace(-120.0, -100.0, 5)
t6 = geomspace(1200.0, 1000.0, 5)
# t3 uses float32, t6 uses float64
assert_allclose(t1, t4, rtol=1e-2)
assert_allclose(t2, t5, rtol=1e-2)
assert_allclose(t3, t6, rtol=1e-5)
def test_start_stop_array(self):
# Try to use all special cases.
start = array([1.e0, 32., 1j, -4j, 1+1j, -1])
stop = array([1.e4, 2., 16j, -324j, 10000+10000j, 1])
t1 = geomspace(start, stop, 5)
t2 = stack([geomspace(_start, _stop, 5)
for _start, _stop in zip(start, stop)], axis=1)
assert_equal(t1, t2)
t3 = geomspace(start, stop[0], 5)
t4 = stack([geomspace(_start, stop[0], 5)
for _start in start], axis=1)
assert_equal(t3, t4)
t5 = geomspace(start, stop, 5, axis=-1)
assert_equal(t5, t2.T)
def test_physical_quantities(self):
a = PhysicalQuantity(1.0)
b = PhysicalQuantity(5.0)
assert_equal(geomspace(a, b), geomspace(1.0, 5.0))
def test_subclass(self):
a = array(1).view(PhysicalQuantity2)
b = array(7).view(PhysicalQuantity2)
gs = geomspace(a, b)
assert type(gs) is PhysicalQuantity2
assert_equal(gs, geomspace(1.0, 7.0))
gs = geomspace(a, b, 1)
assert type(gs) is PhysicalQuantity2
assert_equal(gs, geomspace(1.0, 7.0, 1))
def test_bounds(self):
assert_raises(ValueError, geomspace, 0, 10)
assert_raises(ValueError, geomspace, 10, 0)
assert_raises(ValueError, geomspace, 0, 0)
class TestLinspace:
def test_basic(self):
y = linspace(0, 10)
assert_(len(y) == 50)
y = linspace(2, 10, num=100)
assert_(y[-1] == 10)
y = linspace(2, 10, endpoint=False)
assert_(y[-1] < 10)
| assert_raises(ValueError, linspace, 0, 10, num=-1) | numpy.testing.assert_raises |
import talib
import numpy as np
import jtrade.core.instrument.equity as Equity
# ========== TECH OVERLAP INDICATORS **START** ==========
def BBANDS(equity, start=None, end=None, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0):
"""Bollinger Bands
:param timeperiod:
:param nbdevup:
:param nbdevdn:
:param matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
upperband, middleband, lowerband = talib.BBANDS(close, timeperiod=timeperiod, nbdevup=nbdevup, nbdevdn=nbdevdn, matype=matype)
return upperband, middleband, lowerband
def DEMA(equity, start=None, end=None, timeperiod=30):
"""Double Exponential Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.DEMA(close, timeperiod=timeperiod)
return real
def EMA(equity, start=None, end=None, timeperiod=30):
"""Exponential Moving Average
NOTE: The EMA function has an unstable period.
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.EMA(close, timeperiod=timeperiod)
return real
def HT_TRENDLINE(equity, start=None, end=None):
"""Hilbert Transform - Instantaneous Trendline
NOTE: The HT_TRENDLINE function has an unstable period.
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.HT_TRENDLINE(close)
return real
def KAMA(equity, start=None, end=None, timeperiod=30):
"""Kaufman Adaptive Moving Average
NOTE: The KAMA function has an unstable period.
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.KAMA(close, timeperiod=timeperiod)
return real
def MA(equity, start=None, end=None, timeperiod=30, matype=0):
"""Moving average
:param timeperiod:
:param matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.MA(close, timeperiod=timeperiod, matype=matype)
return real
def MAMA(equity, start=None, end=None, fastlimit=0, slowlimit=0):
"""MESA Adaptive Moving Average
NOTE: The MAMA function has an unstable period.
:param fastlimit:
:param slowlimit:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
mama, fama = talib.MAMA(close, fastlimit=fastlimit, slowlimit=slowlimit)
return mama, fama
def MAVP(equity, periods, start=None, end=None, minperiod=2, maxperiod=30, matype=0):
"""Moving average with variable period
:param periods:
:param minperiod:
:param maxperiod:
:param matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.MAVP(close, periods, minperiod=minperiod, maxperiod=maxperiod, matype=matype)
return real
def MIDPOINT(equity, start=None, end=None, timeperiod=14):
"""MidPoint over period
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.MIDPOINT(close, timeperiod=timeperiod)
return real
def MIDPRICE(equity, start=None, end=None, timeperiod=14):
"""Midpoint Price over period
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.MIDPRICE(high, low, timeperiod=timeperiod)
return real
def SAR(equity, start=None, end=None, acceleration=0, maximum=0):
"""Parabolic SAR
:param acceleration:
:param maximum:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.SAR(high, low, acceleration=acceleration, maximum=maximum)
return real
def SAREXT(equity, start=None, end=None, startvalue=0, offsetonreverse=0, accelerationinitlong=0, accelerationlong=0,
accelerationmaxlong=0, accelerationinitshort=0, accelerationshort=0, accelerationmaxshort=0):
"""Parabolic SAR - Extended
:param startvalue:
:param offsetonreverse:
:param accelerationinitlong:
:param accelerationlong:
:param accelerationmaxlong:
:param accelerationinitshort:
:param accelerationshort:
:param accelerationmaxshort:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.SAREXT(high, low, startvalue=startvalue, offsetonreverse=offsetonreverse, accelerationinitlong=accelerationinitlong,
accelerationlong=accelerationlong, accelerationmaxlong=accelerationmaxlong, accelerationinitshort=accelerationinitshort,
accelerationshort=accelerationshort, accelerationmaxshort=accelerationmaxshort)
return real
def SMA(equity, start=None, end=None, timeperiod=30):
"""Simple Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.SMA(close, timeperiod=timeperiod)
return real
def T3(equity, start=None, end=None, timeperiod=5, vfactor=0):
"""Triple Exponential Moving Average (T3)
NOTE: The T3 function has an unstable period.
:param timeperiod:
:param vfactor:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.T3(close, timeperiod=timeperiod, vfactor=vfactor)
return real
def TEMA(equity, start=None, end=None, timeperiod=30):
"""Triple Exponential Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.TEMA(close, timeperiod=timeperiod)
return real
def TRIMA(equity, start=None, end=None, timeperiod=30):
"""Triangular Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.TRIMA(close, timeperiod=timeperiod)
return real
def WMA(equity, start=None, end=None, timeperiod=30):
"""Weighted Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.WMA(close, timeperiod=timeperiod)
return real
# ========== TECH OVERLAP INDICATORS **END** ==========
# ========== TECH MOMENTUM INDICATORS **START** ==========
def ADX(equity, start=None, end=None, timeperiod=14):
"""Average Directional Movement Index
NOTE: The ADX function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ADX(high, low, close, timeperiod=timeperiod)
return real
def ADXR(equity, start=None, end=None, timeperiod=14):
"""Average Directional Movement Index Rating
NOTE: The ADXR function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ADXR(high, low, close, timeperiod=timeperiod)
return real
def APO(equity, start=None, end=None, fastperiod=12, slowperiod=26, matype=0):
"""Absolute Price Oscillator
:param fastperiod:
:param slowperiod:
:param matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.APO(close, fastperiod=fastperiod, slowperiod=slowperiod, matype=matype)
return real
def AROON(equity, start=None, end=None, timeperiod=14):
"""Aroon
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
aroondown, aroonup = talib.AROON(high, low, timeperiod=timeperiod)
return aroondown, aroonup
def AROONOSC(equity, start=None, end=None, timeperiod=14):
"""Aroon Oscillator
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.AROONOSC(high, low, timeperiod=timeperiod)
return real
def BOP(equity, start=None, end=None):
"""Balance Of Power
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.BOP(opn, high, low, close)
return real
def CCI(equity, start=None, end=None, timeperiod=14):
"""Commodity Channel Index
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.CCI(high, low, close, timeperiod=timeperiod)
return real
def CMO(equity, start=None, end=None, timeperiod=14):
"""Chande Momentum Oscillator
NOTE: The CMO function has an unstable period.
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.CMO(close, timeperiod=timeperiod)
return real
def DX(equity, start=None, end=None, timeperiod=14):
"""Directional Movement Index
NOTE: The DX function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.DX(high, low, close, timeperiod=timeperiod)
return real
def MACD(equity, start=None, end=None, fastperiod=12, slowperiod=26, signalperiod=9):
"""Moving Average Convergence/Divergence
:param fastperiod:
:param slowperiod:
:param signalperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
macd, macdsignal, macdhist = talib.MACD(close, fastperiod=fastperiod, slowperiod=slowperiod, signalperiod=signalperiod)
return macd, macdsignal, macdhist
def MACDEXT(equity, start=None, end=None, fastperiod=12, fastmatype=0, slowperiod=26, slowmatype=0, signalperiod=9, signalmatype=0):
"""MACD with controllable MA type
:param fastperiod:
:param fastmatype:
:param slowperiod:
:param slowmatype:
:param signalperiod:
:param signalmatype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
macd, macdsignal, macdhist = talib.MACDEXT(close, fastperiod=12, fastmatype=0, slowperiod=26, slowmatype=0,
signalperiod=9, signalmatype=0)
return macd, macdsignal, macdhist
def MACDFIX(equity, start=None, end=None, signalperiod=9):
"""Moving Average Convergence/Divergence Fix 12/26
:param signalperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
macd, macdsignal, macdhist = talib.MACDFIX(close, signalperiod=signalperiod)
return macd, macdsignal, macdhist
def MFI(equity, start=None, end=None, timeperiod=14):
"""Money Flow Index
NOTE: The MFI function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
volume = np.array(equity.hp.loc[start:end, 'volume'], dtype='f8')
real = talib.MFI(high, low, close, volume, timeperiod=timeperiod)
return real
def MINUS_DI(equity, start=None, end=None, timeperiod=14):
"""Minus Directional signal
NOTE: The MINUS_DI function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.MINUS_DI(high, low, close, timeperiod=timeperiod)
return real
def MINUS_DM(equity, start=None, end=None, timeperiod=14):
"""Minus Directional Movement
NOTE: The MINUS_DM function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.MINUS_DM(high, low, timeperiod=timeperiod)
return real
def MOM(equity, start=None, end=None, timeperiod=10):
"""Momentum
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.MOM(close, timeperiod=timeperiod)
return real
def PLUS_DI(equity, start=None, end=None, timeperiod=14):
"""Plus Directional signal
NOTE: The PLUS_DI function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.PLUS_DI(high, low, close, timeperiod=timeperiod)
return real
def PLUS_DM(equity, start=None, end=None, timeperiod=14):
"""Plus Directional Movement
NOTE: The PLUS_DM function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.PLUS_DM(high, low, timeperiod=timeperiod)
return real
def PPO(equity, start=None, end=None, fastperiod=12, slowperiod=26, matype=0):
"""Percentage Price Oscillator
:param fastperiod:
:param slowperiod:
:param matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.PPO(close, fastperiod=fastperiod, slowperiod=slowperiod, matype=matype)
return real
def ROC(equity, start=None, end=None, timeperiod=10):
"""Rate of change : ((price/prevPrice)-1)*100
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ROC(close, timeperiod=timeperiod)
return real
def ROCP(equity, start=None, end=None, timeperiod=10):
"""Rate of change Percentage: (price-prevPrice)/prevPrice
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ROCP(close, timeperiod=timeperiod)
return real
def ROCR(equity, start=None, end=None, timeperiod=10):
"""Rate of change ratio: (price/prevPrice)
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ROCR(close, timeperiod=timeperiod)
return real
def ROCR100(equity, start=None, end=None, timeperiod=10):
"""Rate of change ratio 100 scale: (price/prevPrice)*100
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ROCR100(close, timeperiod=timeperiod)
return real
def RSI(equity, start=None, end=None, timeperiod=14):
"""Relative Strength Index
NOTE: The RSI function has an unstable period.
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.RSI(close, timeperiod=timeperiod)
return real
def STOCH(equity, start=None, end=None, fastk_period=5, slowk_period=3, slowk_matype=0, slowd_period=3, slowd_matype=0):
"""Stochastic
:param fastk_period:
:param slowk_period:
:param slowk_matype:
:param slowd_period:
:param slowd_matype:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
slowk, slowd = talib.STOCH(high, low, close, fastk_period=fastk_period, slowk_period=slowk_period,
slowk_matype=slowk_matype, slowd_period=slowd_period, slowd_matype=slowd_matype)
return slowk, slowd
def STOCHF(equity, start=None, end=None, fastk_period=5, fastd_period=3, fastd_matype=0):
"""Stochastic Fast
:param fastk_period:
:param fastd_period:
:param fastd_matype:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
fastk, fastd = talib.STOCHF(high, low, close, fastk_period=fastk_period, fastd_period=fastd_period,
fastd_matype=fastd_matype)
return fastk, fastd
def STOCHRSI(equity, start=None, end=None, timeperiod=14, fastk_period=5, fastd_period=3, fastd_matype=0):
"""Stochastic Relative Strength Index
NOTE: The STOCHRSI function has an unstable period.
:param timeperiod:
:param fastk_period:
:param fastd_period:
:param fastd_matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
fastk, fastd = talib.STOCHRSI(close, timeperiod=timeperiod, fastk_period=fastk_period,
fastd_period=fastd_period, fastd_matype=fastd_matype)
return fastk, fastd
def TRIX(equity, start=None, end=None, timeperiod=30):
"""1-day Rate-Of-Change (ROC) of a Triple Smooth EMA
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.TRIX(close, timeperiod=timeperiod)
return real
def ULTOSC(equity, start=None, end=None, timeperiod1=7, timeperiod2=14, timeperiod3=28):
"""Ultimate Oscillator
:param timeperiod1:
:param timeperiod2:
:param timeperiod3:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ULTOSC(high, low, close, timeperiod1=timeperiod1, timeperiod2=timeperiod2, timeperiod3=timeperiod3)
def WILLR(equity, start=None, end=None, timeperiod=14):
"""Williams' %R
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.WILLR(high, low, close, timeperiod=14)
return real
# ========== TECH MOMENTUM INDICATORS **END** ==========
# ========== PRICE TRANSFORM FUNCTIONS **START** ==========
def AVGPRICE(equity, start=None, end=None):
"""Average Price
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.AVGPRICE(opn, high, low, close)
return real
def MEDPRICE(equity, start=None, end=None):
"""Median Price
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.MEDPRICE(high, low)
return real
def TYPPRICE(equity, start=None, end=None):
"""Typical Price
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.TYPPRICE(high, low, close)
return real
def WCLPRICE(equity, start=None, end=None):
"""Weighted Close Price
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.WCLPRICE(high, low, close)
return real
# ========== PRICE TRANSFORM FUNCTIONS **END** ==========
# ========== PATTERN RECOGNITION FUNCTIONS **START** ==========
def CDL2CROWS(equity, start=None, end=None):
"""Two Crows
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL2CROWS(opn, high, low, close)
return integer
def CDL3BLACKCROWS(equity, start=None, end=None):
"""Three Black Crows
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL3BLACKCROWS(opn, high, low, close)
return integer
def CDL3INSIDE(equity, start=None, end=None):
"""Three Inside Up/Down
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL3INSIDE(opn, high, low, close)
return integer
def CDL3LINESTRIKE(equity, start=None, end=None):
"""Three-Line Strike
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL3LINESTRIKE(opn, high, low, close)
return integer
def CDL3OUTSIDE(equity, start=None, end=None):
"""Three Outside Up/Down
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL3OUTSIDE(opn, high, low, close)
return integer
def CDL3STARSINSOUTH(equity, start=None, end=None):
"""Three Stars In The South
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL3STARSINSOUTH(opn, high, low, close)
return integer
def CDL3WHITESOLDIERS(equity, start=None, end=None):
"""Three Advancing White Soldiers
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL3WHITESOLDIERS(opn, high, low, close)
return integer
def CDLABANDONEDBABY(equity, start=None, end=None, penetration=0):
"""Abandoned Baby
:param penetration:
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLABANDONEDBABY(opn, high, low, close, penetration=penetration)
return integer
def CDLADVANCEBLOCK(equity, start=None, end=None):
"""Advance Block
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLADVANCEBLOCK(opn, high, low, close)
return integer
def CDLBELTHOLD(equity, start=None, end=None):
"""Belt-hold
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLBELTHOLD(opn, high, low, close)
return integer
def CDLBREAKAWAY(equity, start=None, end=None):
"""Breakaway
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLBREAKAWAY(opn, high, low, close)
return integer
def CDLCLOSINGMARUBOZU(equity, start=None, end=None):
"""Closing Marubozu
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLCLOSINGMARUBOZU(opn, high, low, close)
return integer
def CDLCONCEALBABYSWALL(equity, start=None, end=None):
"""Concealing Baby Swallow
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLCONCEALBABYSWALL(opn, high, low, close)
return integer
def CDLCOUNTERATTACK(equity, start=None, end=None):
"""Counterattack
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLCOUNTERATTACK(opn, high, low, close)
return integer
def CDLDARKCLOUDCOVER(equity, start=None, end=None, penetration=0):
"""Dark Cloud Cover
:param penetration:
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLDARKCLOUDCOVER(opn, high, low, close, penetration=penetration)
return integer
def CDLDOJI(equity, start=None, end=None):
"""Doji
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLDOJI(opn, high, low, close)
return integer
def CDLDOJISTAR(equity, start=None, end=None):
"""Doji Star
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLDOJISTAR(opn, high, low, close)
return integer
def CDLDRAGONFLYDOJI(equity, start=None, end=None):
"""Dragonfly Doji
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLDRAGONFLYDOJI(opn, high, low, close)
return integer
def CDLENGULFING(equity, start=None, end=None):
"""Engulfing Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLENGULFING(opn, high, low, close)
return integer
def CDLEVENINGDOJISTAR(equity, start=None, end=None, penetration=0):
"""Evening Doji Star
:param penetration:
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLEVENINGDOJISTAR(opn, high, low, close, penetration=penetration)
return integer
def CDLEVENINGSTAR(equity, start=None, end=None, penetration=0):
"""Evening Star
:param penetration:
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLEVENINGSTAR(opn, high, low, close, penetration=penetration)
return integer
def CDLGAPSIDESIDEWHITE(equity, start=None, end=None):
"""Up/Down-gap side-by-side white lines
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLGAPSIDESIDEWHITE(opn, high, low, close)
return integer
def CDLGRAVESTONEDOJI(equity, start=None, end=None):
"""Gravestone Doji
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLGRAVESTONEDOJI(opn, high, low, close)
return integer
def CDLHAMMER(equity, start=None, end=None):
"""Hammer
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLHAMMER(opn, high, low, close)
return integer
def CDLHANGINGMAN(equity, start=None, end=None):
"""Hanging Man
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLHANGINGMAN(opn, high, low, close)
return integer
def CDLHARAMI(equity, start=None, end=None):
"""Harami Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLHARAMI(opn, high, low, close)
return integer
def CDLHARAMICROSS(equity, start=None, end=None):
"""Harami Cross Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLHARAMICROSS(opn, high, low, close)
return integer
def CDLHIGHWAVE(equity, start=None, end=None):
"""High-Wave Candle
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLHIGHWAVE(opn, high, low, close)
return integer
def CDLHIKKAKE(equity, start=None, end=None):
"""Hikkake Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLHIKKAKE(opn, high, low, close)
return integer
def CDLHIKKAKEMOD(equity, start=None, end=None):
"""Modified Hikkake Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLHIKKAKEMOD(opn, high, low, close)
return integer
def CDLHOMINGPIGEON(equity, start=None, end=None):
"""Homing Pigeon
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLHOMINGPIGEON(opn, high, low, close)
return integer
def CDLIDENTICAL3CROWS(equity, start=None, end=None):
"""Identical Three Crows
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLIDENTICAL3CROWS(opn, high, low, close)
return integer
def CDLINNECK(equity, start=None, end=None):
"""In-Neck Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLINNECK(opn, high, low, close)
return integer
def CDLINVERTEDHAMMER(equity, start=None, end=None):
"""Inverted Hammer
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLINVERTEDHAMMER(opn, high, low, close)
return integer
def CDLKICKING(equity, start=None, end=None):
"""Kicking
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLKICKING(opn, high, low, close)
return integer
def CDLKICKINGBYLENGTH(equity, start=None, end=None):
"""Kicking - bull/bear determined by the longer marubozu
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLKICKINGBYLENGTH(opn, high, low, close)
return integer
def CDLLADDERBOTTOM(equity, start=None, end=None):
"""Ladder Bottom
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLLADDERBOTTOM(opn, high, low, close)
return integer
def CDLLONGLEGGEDDOJI(equity, start=None, end=None):
"""Long Legged Doji
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLLONGLEGGEDDOJI(opn, high, low, close)
return integer
def CDLLONGLINE(equity, start=None, end=None):
"""Long Line Candle
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLLONGLINE(opn, high, low, close)
return integer
def CDLMARUBOZU(equity, start=None, end=None):
"""Marubozu
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLMARUBOZU(opn, high, low, close)
return integer
def CDLMATCHINGLOW(equity, start=None, end=None):
"""Matching Low
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLMATCHINGLOW(opn, high, low, close)
return integer
def CDLMATHOLD(equity, start=None, end=None, penetration=0):
"""Mat Hold
:param penetration:
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLMATHOLD(opn, high, low, close, penetration=penetration)
return integer
def CDLMORNINGDOJISTAR(equity, start=None, end=None, penetration=0):
"""Morning Doji Star
:param penetration:
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLMORNINGDOJISTAR(opn, high, low, close, penetration=penetration)
return integer
def CDLMORNINGSTAR(equity, start=None, end=None, penetration=0):
"""Morning Star
:param penetration:
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLMORNINGSTAR(opn, high, low, close, penetration=penetration)
return integer
def CDLONNECK(equity, start=None, end=None):
"""On-Neck Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLONNECK(opn, high, low, close)
return integer
def CDLPIERCING(equity, start=None, end=None):
"""Piercing Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLPIERCING(opn, high, low, close)
return integer
def CDLRICKSHAWMAN(equity, start=None, end=None):
"""Rickshaw Man
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLRICKSHAWMAN(opn, high, low, close)
return integer
def CDLRISEFALL3METHODS(equity, start=None, end=None):
"""Rising/Falling Three Methods
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = | np.array(equity.hp.loc[start:end, 'high'], dtype='f8') | numpy.array |
from keras.preprocessing.image import ImageDataGenerator
from configuration import conf
import numpy as np
def rotate_270(data):
if conf.dataset_name == 'mnist':
img_shape = (data.shape[0], 28, 28, 1) if conf.is_conv else (data.shape[0], -1)
data = data.reshape(-1, 28, 28)
elif conf.dataset_name == 'timh':
img_shape = (data.shape[0], 28, 28, 1) if conf.is_conv else (data.shape[0], -1)
data = data.reshape(-1, 28, 28)
else:
img_shape = (data.shape[0], 32, 32, 3) if conf.is_conv else (data.shape[0], -1)
data = data.reshape(-1, 32, 32, 3)
return np.rot90(data, k=3, axes=(1, 2)).reshape(img_shape)
class Multi_view:
def __init__(self):
self.datagen = [
ImageDataGenerator(samplewise_center=True),
ImageDataGenerator(samplewise_std_normalization=True),
ImageDataGenerator(featurewise_center=True),
ImageDataGenerator(featurewise_std_normalization=True),
ImageDataGenerator(zca_whitening=True, zca_epsilon=0.1),
ImageDataGenerator(zca_whitening=True),
ImageDataGenerator(rotation_range=180),
ImageDataGenerator(width_shift_range=0.4),
ImageDataGenerator(height_shift_range=0.4),
ImageDataGenerator(horizontal_flip=True),
ImageDataGenerator(vertical_flip=True),
ImageDataGenerator(zoom_range=0.3),
ImageDataGenerator(shear_range=30), ]
def fit(self, x):
for gen in self.datagen:
gen.fit(x)
def flow(self, x, y):
augment_data = []
augment_label = []
for gen in self.datagen:
data, label = gen.flow(x, y, batch_size=conf.batch_size).next()
augment_data.append(data)
augment_label.append(label)
def augment(self, x, y=None, concat=False, num_runs=1):
augment_data = [x, rotate_270(x)]
augment_label = [y, y]
if y is None:
for _ in np.arange(num_runs):
for gen in self.datagen:
data = gen.flow(x, batch_size=x.shape[0]).next()
augment_data.append(data)
if concat:
return np.concatenate(augment_data)
return augment_data
for _ in np.arange(num_runs):
for gen in self.datagen:
data, label = gen.flow(x, y, batch_size=x.shape[0]).next()
augment_data.append(data)
augment_label.append(label)
if concat:
return np.concatenate(augment_data), np.concatenate(augment_label)
return augment_data, augment_label
def augment_test_data(self, x, num_runs=10):
augment_data = [x, rotate_270(x)]
y = | np.arange(x.shape[0]) | numpy.arange |
#!/usr/bin/env python3
"""
"""
import math
import numpy as np
import numpy.ma as ma
from astropy import units as u
from astropy.coordinates import SkyCoord, AltAz
from iminuit import Minuit
from scipy.optimize import minimize, least_squares
from scipy.stats import norm
from ctapipe.coordinates import (
NominalFrame,
TiltedGroundFrame,
GroundFrame,
project_to_ground,
)
from ctapipe.image import neg_log_likelihood, mean_poisson_likelihood_gaussian
from ctapipe.instrument import get_atmosphere_profile_functions
from ctapipe.containers import (
ReconstructedGeometryContainer,
ReconstructedEnergyContainer,
)
from ctapipe.reco.reco_algorithms import Reconstructor
from ctapipe.utils.template_network_interpolator import (
TemplateNetworkInterpolator,
TimeGradientInterpolator,
)
__all__ = ["ImPACTReconstructor", "energy_prior", "xmax_prior", "guess_shower_depth"]
def guess_shower_depth(energy):
"""
Simple estimation of depth of shower max based on the expected gamma-ray elongation
rate.
Parameters
----------
energy: float
Energy of the shower in TeV
Returns
-------
float: Expected depth of shower maximum
"""
x_max_exp = 300 + 93 * np.log10(energy)
return x_max_exp
def energy_prior(energy, index=-1):
return -2 * np.log(energy ** index)
def xmax_prior(energy, xmax, width=100):
x_max_exp = guess_shower_depth(energy)
diff = xmax - x_max_exp
return -2 * np.log(norm.pdf(diff / width))
class ImPACTReconstructor(Reconstructor):
"""This class is an implementation if the impact_reco Monte Carlo
Template based image fitting method from parsons14. This method uses a
comparision of the predicted image from a library of image
templates to perform a maximum likelihood fit for the shower axis,
energy and height of maximum.
Because this application is computationally intensive the usual
advice to use astropy units for all quantities is ignored (as
these slow down some computations), instead units within the class
are fixed:
- Angular units in radians
- Distance units in metres
- Energy units in TeV
References
----------
.. [parsons14] <NAME>, Astroparticle Physics 56 (2014), pp. 26-34
"""
# For likelihood calculation we need the with of the
# pedestal distribution for each pixel
# currently this is not availible from the calibration,
# so for now lets hard code it in a dict
ped_table = {
"LSTCam": 2.8,
"NectarCam": 2.3,
"FlashCam": 2.3,
"CHEC": 0.5,
"DUMMY": 0,
}
spe = 0.5 # Also hard code single p.e. distribution width
def __init__(
self,
root_dir=".",
minimiser="minuit",
prior="",
template_scale=1.0,
xmax_offset=0,
use_time_gradient=False,
):
"""
Create a new instance of ImPACTReconstructor
"""
# First we create a dictionary of image template interpolators
# for each telescope type
self.root_dir = root_dir
self.priors = prior
self.minimiser_name = minimiser
self.file_names = {
"CHEC": ["GCT_05deg_ada.template.gz", "GCT_05deg_time.template.gz"],
"LSTCam": ["LST_05deg.template.gz", "LST_05deg_time.template.gz"],
"NectarCam": ["MST_05deg.template.gz", "MST_05deg_time.template.gz"],
"FlashCam": ["MST_xm_full.fits"],
}
# We also need a conversion function from height above ground to
# depth of maximum To do this we need the conversion table from CORSIKA
(
self.thickness_profile,
self.altitude_profile,
) = get_atmosphere_profile_functions("paranal", with_units=False)
# Next we need the position, area and amplitude from each pixel in the event
# making this a class member makes passing them around much easier
self.pixel_x, self.pixel_y = None, None
self.image, self.time = None, None
self.tel_types, self.tel_id = None, None
# We also need telescope positions
self.tel_pos_x, self.tel_pos_y = None, None
# And the peak of the images
self.peak_x, self.peak_y, self.peak_amp = None, None, None
self.hillas_parameters, self.ped = None, None
self.prediction = dict()
self.time_prediction = dict()
self.array_direction = None
self.array_return = False
self.nominal_frame = None
# For now these factors are required to fix problems in templates
self.template_scale = template_scale
self.xmax_offset = xmax_offset
self.use_time_gradient = use_time_gradient
def initialise_templates(self, tel_type):
"""Check if templates for a given telescope type has been initialised
and if not do it and add to the dictionary
Parameters
----------
tel_type: dictionary
Dictionary of telescope types in event
Returns
-------
boolean: Confirm initialisation
"""
for t in tel_type:
if tel_type[t] in self.prediction.keys() or tel_type[t] == "DUMMY":
continue
self.prediction[tel_type[t]] = TemplateNetworkInterpolator(
self.root_dir + "/" + self.file_names[tel_type[t]][0]
)
if self.use_time_gradient:
self.time_prediction[tel_type[t]] = TimeGradientInterpolator(
self.root_dir + "/" + self.file_names[tel_type[t]][1]
)
return True
def get_hillas_mean(self):
"""This is a simple function to find the peak position of each image
in an event which will be used later in the Xmax calculation. Peak is
found by taking the average position of the n hottest pixels in the
image.
"""
peak_x = np.zeros([len(self.pixel_x)]) # Create blank arrays for peaks
# rather than a dict (faster)
peak_y = np.zeros(peak_x.shape)
peak_amp = np.zeros(peak_x.shape)
# Loop over all tels to take weighted average of pixel
# positions This loop could maybe be replaced by an array
# operation by a numpy wizard
# Maybe a vectorize?
tel_num = 0
for hillas in self.hillas_parameters:
peak_x[tel_num] = hillas.x.to(u.rad).value # Fill up array
peak_y[tel_num] = hillas.y.to(u.rad).value
peak_amp[tel_num] = hillas.intensity
tel_num += 1
self.peak_x = peak_x # * unit # Add to class member
self.peak_y = peak_y # * unit
self.peak_amp = peak_amp
# This function would be useful elsewhere so probably be implemented in a
# more general form
def get_shower_max(self, source_x, source_y, core_x, core_y, zen):
"""Function to calculate the depth of shower maximum geometrically
under the assumption that the shower maximum lies at the
brightest point of the camera image.
Parameters
----------
source_x: float
Event source position in nominal frame
source_y: float
Event source position in nominal frame
core_x: float
Event core position in telescope tilted frame
core_y: float
Event core position in telescope tilted frame
zen: float
Zenith angle of event
Returns
-------
float: Depth of maximum of air shower
"""
# Calculate displacement of image centroid from source position (in
# rad)
disp = np.sqrt((self.peak_x - source_x) ** 2 + (self.peak_y - source_y) ** 2)
# Calculate impact parameter of the shower
impact = np.sqrt(
(self.tel_pos_x - core_x) ** 2 + (self.tel_pos_y - core_y) ** 2
)
# Distance above telescope is ratio of these two (small angle)
height = impact / disp
weight = np.power(self.peak_amp, 0.0) # weight average by sqrt amplitude
# sqrt may not be the best option...
# Take weighted mean of estimates
mean_height = np.sum(height * weight) / np.sum(weight)
# This value is height above telescope in the tilted system,
# we should convert to height above ground
mean_height *= np.cos(zen)
# Add on the height of the detector above sea level
mean_height += 2150
if mean_height > 100000 or np.isnan(mean_height):
mean_height = 100000
# Lookup this height in the depth tables, the convert Hmax to Xmax
x_max = self.thickness_profile(mean_height)
# Convert to slant depth
x_max /= np.cos(zen)
return x_max + self.xmax_offset
@staticmethod
def rotate_translate(pixel_pos_x, pixel_pos_y, x_trans, y_trans, phi):
"""
Function to perform rotation and translation of pixel lists
Parameters
----------
pixel_pos_x: ndarray
Array of pixel x positions
pixel_pos_y: ndarray
Array of pixel x positions
x_trans: float
Translation of position in x coordinates
y_trans: float
Translation of position in y coordinates
phi: float
Rotation angle of pixels
Returns
-------
ndarray,ndarray: Transformed pixel x and y coordinates
"""
cosine_angle = np.cos(phi[..., np.newaxis])
sin_angle = np.sin(phi[..., np.newaxis])
pixel_pos_trans_x = (x_trans - pixel_pos_x) * cosine_angle - (
y_trans - pixel_pos_y
) * sin_angle
pixel_pos_trans_y = (pixel_pos_x - x_trans) * sin_angle + (
pixel_pos_y - y_trans
) * cosine_angle
return pixel_pos_trans_x, pixel_pos_trans_y
def image_prediction(self, tel_type, energy, impact, x_max, pix_x, pix_y):
"""Creates predicted image for the specified pixels, interpolated
from the template library.
Parameters
----------
tel_type: string
Telescope type specifier
energy: float
Event energy (TeV)
impact: float
Impact diance of shower (metres)
x_max: float
Depth of shower maximum (num bins from expectation)
pix_x: ndarray
X coordinate of pixels
pix_y: ndarray
Y coordinate of pixels
Returns
-------
ndarray: predicted amplitude for all pixels
"""
return self.prediction[tel_type](energy, impact, x_max, pix_x, pix_y)
def predict_time(self, tel_type, energy, impact, x_max):
"""Creates predicted image for the specified pixels, interpolated
from the template library.
Parameters
----------
tel_type: string
Telescope type specifier
energy: float
Event energy (TeV)
impact: float
Impact diance of shower (metres)
x_max: float
Depth of shower maximum (num bins from expectation)
Returns
-------
ndarray: predicted amplitude for all pixels
"""
return self.time_prediction[tel_type](energy, impact, x_max)
def get_likelihood(
self,
source_x,
source_y,
core_x,
core_y,
energy,
x_max_scale,
goodness_of_fit=False,
):
"""Get the likelihood that the image predicted at the given test
position matches the camera image.
Parameters
----------
source_x: float
Source position of shower in the nominal system (in deg)
source_y: float
Source position of shower in the nominal system (in deg)
core_x: float
Core position of shower in tilted telescope system (in m)
core_y: float
Core position of shower in tilted telescope system (in m)
energy: float
Shower energy (in TeV)
x_max_scale: float
Scaling factor applied to geometrically calculated Xmax
goodness_of_fit: boolean
Determines whether expected likelihood should be subtracted from result
Returns
-------
float: Likelihood the model represents the camera image at this position
"""
# First we add units back onto everything. Currently not
# handled very well, maybe in future we could just put
# everything in the correct units when loading in the class
# and ignore them from then on
zenith = (np.pi / 2) - self.array_direction.alt.to(u.rad).value
# Geometrically calculate the depth of maximum given this test position
x_max = self.get_shower_max(source_x, source_y, core_x, core_y, zenith)
x_max *= x_max_scale
# Calculate expected Xmax given this energy
x_max_exp = guess_shower_depth(energy) # / np.cos(20*u.deg)
# Convert to binning of Xmax
x_max_bin = x_max - x_max_exp
# Check for range
if x_max_bin > 200:
x_max_bin = 200
if x_max_bin < -100:
x_max_bin = -100
# Calculate impact distance for all telescopes
impact = np.sqrt(
(self.tel_pos_x - core_x) ** 2 + (self.tel_pos_y - core_y) ** 2
)
# And the expected rotation angle
phi = np.arctan2((self.tel_pos_x - core_x), (self.tel_pos_y - core_y)) * u.rad
# Rotate and translate all pixels such that they match the
# template orientation
pix_y_rot, pix_x_rot = self.rotate_translate(
self.pixel_x, self.pixel_y, source_x, source_y, phi
)
# In the interpolator class we can gain speed advantages by using masked arrays
# so we need to make sure here everything is masked
prediction = ma.zeros(self.image.shape)
prediction.mask = ma.getmask(self.image)
time_gradients = np.zeros((self.image.shape[0], 2))
# Loop over all telescope types and get prediction
for tel_type in np.unique(self.tel_types).tolist():
type_mask = self.tel_types == tel_type
prediction[type_mask] = self.image_prediction(
tel_type,
energy * np.ones_like(impact[type_mask]),
impact[type_mask],
x_max_bin * np.ones_like(impact[type_mask]),
-np.rad2deg(pix_x_rot[type_mask]),
np.rad2deg(pix_y_rot[type_mask]),
)
if self.use_time_gradient:
time_gradients[type_mask] = self.predict_time(
tel_type,
energy * np.ones_like(impact[type_mask]),
impact[type_mask],
x_max_bin * np.ones_like(impact[type_mask]),
)
if self.use_time_gradient:
time_mask = np.logical_and(np.invert(ma.getmask(self.image)), self.time > 0)
weight = np.sqrt(self.image) * time_mask
rv = norm()
sx = pix_x_rot * weight
sxx = pix_x_rot * pix_x_rot * weight
sy = self.time * weight
sxy = self.time * pix_x_rot * weight
d = weight.sum(axis=1) * sxx.sum(axis=1) - sx.sum(axis=1) * sx.sum(axis=1)
time_fit = (
weight.sum(axis=1) * sxy.sum(axis=1) - sx.sum(axis=1) * sy.sum(axis=1)
) / d
time_fit /= -1 * (180 / math.pi)
chi2 = -2 * np.log(
rv.pdf((time_fit - time_gradients.T[0]) / time_gradients.T[1])
)
# Likelihood function will break if we find a NaN or a 0
prediction[np.isnan(prediction)] = 1e-8
prediction[prediction < 1e-8] = 1e-8
prediction *= self.template_scale
# Get likelihood that the prediction matched the camera image
like = neg_log_likelihood(self.image, prediction, self.spe, self.ped)
like[np.isnan(like)] = 1e9
like *= np.invert(ma.getmask(self.image))
like = ma.MaskedArray(like, mask=ma.getmask(self.image))
array_like = like
if goodness_of_fit:
return np.sum(
like - mean_poisson_likelihood_gaussian(prediction, self.spe, self.ped)
)
prior_pen = 0
# Add prior penalities if we have them
array_like += 1e-8
if "energy" in self.priors:
prior_pen += energy_prior(energy, index=-1)
if "xmax" in self.priors:
prior_pen += xmax_prior(energy, x_max)
array_like += prior_pen / float(len(array_like))
if self.array_return:
array_like = array_like.ravel()
return array_like[np.invert(ma.getmask(array_like))]
final_sum = array_like.sum()
if self.use_time_gradient:
final_sum += chi2.sum() # * np.sum(ma.getmask(self.image))
return final_sum
def get_likelihood_min(self, x):
"""Wrapper class around likelihood function for use with scipy
minimisers
Parameters
----------
x: ndarray
Array of minimisation parameters
Returns
-------
float: Likelihood value of test position
"""
val = self.get_likelihood(x[0], x[1], x[2], x[3], x[4], x[5])
return val
def get_likelihood_nlopt(self, x, grad):
"""Wrapper class around likelihood function for use with scipy
minimisers
Parameters
----------
x: ndarray
Array of minimisation parameters
Returns
-------
float: Likelihood value of test position
"""
val = self.get_likelihood(x[0], x[1], x[2], x[3], x[4], x[5])
return val
def set_event_properties(
self,
image,
time,
pixel_x,
pixel_y,
type_tel,
tel_x,
tel_y,
array_direction,
hillas,
):
"""The setter class is used to set the event properties within this
class before minimisation can take place. This simply copies a
bunch of useful properties to class members, so that we can
use them later without passing all this information around.
Parameters
----------
image: dict
Amplitude of pixels in camera images
time: dict
Time information per each pixel in camera images
pixel_x: dict
X position of pixels in nominal system
pixel_y: dict
Y position of pixels in nominal system
type_tel: dict
Type of telescope
tel_x: dict
X position of telescope in TiltedGroundFrame
tel_y: dict
Y position of telescope in TiltedGroundFrame
array_direction: SkyCoord[AltAz]
Array pointing direction in the AltAz Frame
hillas: dict
dictionary with telescope IDs as key and
HillasParametersContainer instances as values
Returns
-------
None
"""
# First store these parameters in the class so we can use them
# in minimisation For most values this is simply copying
self.image = image
self.tel_pos_x = np.zeros(len(tel_x))
self.tel_pos_y = np.zeros(len(tel_x))
self.ped = np.zeros(len(tel_x))
self.tel_types, self.tel_id = list(), list()
max_pix_x = 0
px, py, pa, pt = list(), list(), list(), list()
self.hillas_parameters = list()
# So here we must loop over the telescopes
for x, i in zip(tel_x, range(len(tel_x))):
px.append(pixel_x[x].to(u.rad).value)
if len(px[i]) > max_pix_x:
max_pix_x = len(px[i])
py.append(pixel_y[x].to(u.rad).value)
pa.append(image[x])
pt.append(time[x])
self.tel_pos_x[i] = tel_x[x].to(u.m).value
self.tel_pos_y[i] = tel_y[x].to(u.m).value
self.ped[i] = self.ped_table[type_tel[x]]
self.tel_types.append(type_tel[x])
self.tel_id.append(x)
self.hillas_parameters.append(hillas[x])
# Most interesting stuff is now copied to the class, but to remove our requirement
# for loops we must copy the pixel positions to an array with the length of the
# largest image
# First allocate everything
shape = (len(tel_x), max_pix_x)
self.pixel_x, self.pixel_y = ma.zeros(shape), ma.zeros(shape)
self.image, self.time, self.ped = (
ma.zeros(shape),
ma.zeros(shape),
ma.zeros(shape),
)
self.tel_types = np.array(self.tel_types)
# Copy everything into our masked arrays
for i in range(len(tel_x)):
array_len = len(px[i])
self.pixel_x[i][:array_len] = px[i]
self.pixel_y[i][:array_len] = py[i]
self.image[i][:array_len] = pa[i]
self.time[i][:array_len] = pt[i]
self.ped[i][:array_len] = self.ped_table[self.tel_types[i]]
# Set the image mask
mask = self.image == 0.0
self.pixel_x[mask], self.pixel_y[mask] = ma.masked, ma.masked
self.image[mask] = ma.masked
self.time[mask] = ma.masked
self.array_direction = array_direction
self.nominal_frame = NominalFrame(origin=self.array_direction)
# Finally run some functions to get ready for the event
self.get_hillas_mean()
self.initialise_templates(type_tel)
def reset_interpolator(self):
"""
This function is needed in order to reset some variables in the interpolator
at each new event. Without this reset, a new event starts with information
from the previous event.
"""
list(self.prediction.values())[0].reset()
def predict(self, shower_seed, energy_seed):
"""Predict method for the ImPACT reconstructor.
Used to calculate the reconstructed ImPACT shower geometry and energy.
Parameters
----------
shower_seed: ReconstructedShowerContainer
Seed shower geometry to be used in the fit
energy_seed: ReconstructedEnergyContainer
Seed energy to be used in fit
Returns
-------
ReconstructedShowerContainer, ReconstructedEnergyContainer:
"""
self.reset_interpolator()
horizon_seed = SkyCoord(az=shower_seed.az, alt=shower_seed.alt, frame=AltAz())
nominal_seed = horizon_seed.transform_to(self.nominal_frame)
source_x = nominal_seed.fov_lon.to_value(u.rad)
source_y = nominal_seed.fov_lat.to_value(u.rad)
ground = GroundFrame(x=shower_seed.core_x, y=shower_seed.core_y, z=0 * u.m)
tilted = ground.transform_to(
TiltedGroundFrame(pointing_direction=self.array_direction)
)
tilt_x = tilted.x.to(u.m).value
tilt_y = tilted.y.to(u.m).value
zenith = 90 * u.deg - self.array_direction.alt
seeds = spread_line_seed(
self.hillas_parameters,
self.tel_pos_x,
self.tel_pos_y,
source_x,
source_y,
tilt_x,
tilt_y,
energy_seed.energy.value,
shift_frac=[1],
)[0]
# Perform maximum likelihood fit
fit_params, errors, like = self.minimise(
params=seeds[0],
step=seeds[1],
limits=seeds[2],
minimiser_name=self.minimiser_name,
)
# Create a container class for reconstructed shower
shower_result = ReconstructedGeometryContainer()
# Convert the best fits direction and core to Horizon and ground systems and
# copy to the shower container
nominal = SkyCoord(
fov_lon=fit_params[0] * u.rad,
fov_lat=fit_params[1] * u.rad,
frame=self.nominal_frame,
)
horizon = nominal.transform_to(AltAz())
shower_result.alt, shower_result.az = horizon.alt, horizon.az
tilted = TiltedGroundFrame(
x=fit_params[2] * u.m,
y=fit_params[3] * u.m,
pointing_direction=self.array_direction,
)
ground = project_to_ground(tilted)
shower_result.core_x = ground.x
shower_result.core_y = ground.y
shower_result.is_valid = True
# Currently no errors not available to copy NaN
shower_result.alt_uncert = np.nan
shower_result.az_uncert = np.nan
shower_result.core_uncert = np.nan
# Copy reconstructed Xmax
shower_result.h_max = fit_params[5] * self.get_shower_max(
fit_params[0],
fit_params[1],
fit_params[2],
fit_params[3],
zenith.to(u.rad).value,
)
shower_result.h_max *= np.cos(zenith)
shower_result.h_max_uncert = errors[5] * shower_result.h_max
shower_result.goodness_of_fit = like
# Create a container class for reconstructed energy
energy_result = ReconstructedEnergyContainer()
# Fill with results
energy_result.energy = fit_params[4] * u.TeV
energy_result.energy_uncert = errors[4] * u.TeV
energy_result.is_valid = True
return shower_result, energy_result
def minimise(self, params, step, limits, minimiser_name="minuit", max_calls=0):
"""
Parameters
----------
params: ndarray
Seed parameters for fit
step: ndarray
Initial step size in the fit
limits: ndarray
Fit bounds
minimiser_name: str
Name of minimisation method
max_calls: int
Maximum number of calls to minimiser
Returns
-------
tuple: best fit parameters and errors
"""
limits = np.asarray(limits)
if minimiser_name == "minuit":
self.min = Minuit(
self.get_likelihood,
print_level=1,
source_x=params[0],
error_source_x=step[0],
limit_source_x=limits[0],
fix_source_x=False,
source_y=params[1],
error_source_y=step[1],
limit_source_y=limits[1],
fix_source_y=False,
core_x=params[2],
error_core_x=step[2],
limit_core_x=limits[2],
fix_core_x=False,
core_y=params[3],
error_core_y=step[3],
limit_core_y=limits[3],
fix_core_y=False,
energy=params[4],
error_energy=step[4],
limit_energy=limits[4],
fix_energy=False,
x_max_scale=params[5],
error_x_max_scale=step[5],
limit_x_max_scale=limits[5],
fix_x_max_scale=False,
goodness_of_fit=False,
fix_goodness_of_fit=True,
errordef=1,
)
self.min.tol *= 1000
self.min.set_strategy(1)
self.min.migrad()
fit_params = self.min.values
errors = self.min.errors
return (
(
fit_params["source_x"],
fit_params["source_y"],
fit_params["core_x"],
fit_params["core_y"],
fit_params["energy"],
fit_params["x_max_scale"],
),
(
errors["source_x"],
errors["source_y"],
errors["core_x"],
errors["core_x"],
errors["energy"],
errors["x_max_scale"],
),
self.min.fval,
)
elif "nlopt" in minimiser_name:
import nlopt
opt = nlopt.opt(nlopt.LN_BOBYQA, 6)
opt.set_min_objective(self.get_likelihood_nlopt)
opt.set_initial_step(step)
opt.set_lower_bounds( | np.asarray(limits) | numpy.asarray |
import math
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
REWARD_SCHEME = 1
class CartPoleCustomEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 50
}
def __init__(self):
self.gravity = 9.8
self.masscart = 1.0
self.masspole = 0.1
self.total_mass = self.masspole + self.masscart
self.length = 0.5 # actually half the pole's length
self.polemass_length = self.masspole * self.length
self.force_mag = 10.0
# self.tau = 0.02 # seconds between state updates
self.tau = 0.1
self.kinematics_integrator = 'euler'
self.x_threshold = 0.5
self.x_dot_threshold = 2
# Cap max angular velocity
self.theta_dot_threshold = 8
high = np.array([
self.x_threshold,
self.x_dot_threshold,
1,
1,
self.theta_dot_threshold])
self.action_space = spaces.Box(low=-1, high=1, shape=(1,), dtype=np.float32)
self.observation_space = spaces.Box(-high, high, dtype=np.float32)
self.seed()
self.viewer = None
self.state = None
self.sum_sq_dist = 0
self.t = 0
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid" % (action, type(action))
state = self.state
x, x_dot, theta, theta_dot = state
force = self.force_mag * action[0]
if x <= -self.x_threshold and force < 0:
force = 0
if x >= self.x_threshold and force > 0:
force = 0
costheta = math.cos(theta)
sintheta = math.sin(theta)
temp = (force + self.polemass_length * theta_dot * theta_dot * sintheta) / self.total_mass
thetaacc = (self.gravity * sintheta - costheta * temp) / (self.length * (4.0/3.0 - self.masspole * costheta * costheta / self.total_mass))
xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass
if self.kinematics_integrator == 'euler':
x = x + self.tau * x_dot
x_dot = x_dot + self.tau * xacc
theta = theta + self.tau * theta_dot
theta_dot = theta_dot + self.tau * thetaacc
else: # semi-implicit euler
x_dot = x_dot + self.tau * xacc
x = x + self.tau * x_dot
theta_dot = theta_dot + self.tau * thetaacc
theta = theta + self.tau * theta_dot
if x < -self.x_threshold:
x = -self.x_threshold
x_dot = 0
if x > self.x_threshold:
x = self.x_threshold
x_dot = 0
theta_dot = np.clip(theta_dot, -self.theta_dot_threshold, self.theta_dot_threshold)
x_dot = np.clip(x_dot, -self.x_dot_threshold, self.x_dot_threshold)
theta = (theta + np.pi) % (2 * np.pi) - np.pi
self.state = (x, x_dot, theta, theta_dot)
if REWARD_SCHEME == 0:
if np.abs(theta) < np.pi / 10:
reward = 1
else:
reward = 0
elif REWARD_SCHEME == 1:
reward = - theta ** 2
else:
reward = 0
done = False
self.sum_sq_dist += theta ** 2
self.t += 1
info = {
'avg_sq_dist': self.sum_sq_dist / self.t,
}
return self.get_obs(), reward, done, info
def get_obs(self):
x, x_dot, theta, theta_dot = self.state
return np.array([x, x_dot, | np.cos(theta) | numpy.cos |
import unittest
import numpy as np
import tensorflow as tf
from flowdec import fft_utils_np, fft_utils_tf, test_utils
from numpy.testing import assert_array_equal, assert_almost_equal
class TestFFTUtils(unittest.TestCase):
def _test_padding(self, d, k, actual):
def tf_fn():
dt = tf.constant(d, dtype=tf.float32)
kt = tf.constant(k, dtype=tf.float32)
return fft_utils_tf.get_fft_pad_dims(dt, kt)
tf_res = test_utils.exec_tf(tf_fn)
np_res = fft_utils_np.get_fft_pad_dims(d, k)
self.assertTrue(type(tf_res) is np.ndarray)
self.assertTrue(type(np_res) is np.ndarray)
self.assertTrue(np.array_equal(tf_res, np_res))
self.assertTrue(np.array_equal(tf_res, actual))
def test_padding(self):
"""Verify padding operations implemented as TensorFlow ops"""
self._test_padding(np.ones(1), np.ones(1), np.array([1]))
self._test_padding(np.ones((10)), np.ones((5)), np.array([14]))
self._test_padding(np.ones((10, 5)), np.ones((5, 3)), np.array([14, 7]))
self._test_padding(np.ones((10, 5, 3)), np.ones((5, 3, 1)), np.array([14, 7, 3]))
def _test_optimize_padding(self, d, k, mode, actual):
def tf_fn():
dt = tf.constant(d, dtype=tf.float32)
kt = tf.constant(k, dtype=tf.float32)
pad_dims = fft_utils_tf.get_fft_pad_dims(dt, kt)
return fft_utils_tf.optimize_dims(pad_dims, mode)
tf_res = test_utils.exec_tf(tf_fn)
np_res = fft_utils_np.optimize_dims(fft_utils_np.get_fft_pad_dims(d, k), mode)
self.assertTrue(type(tf_res) is np.ndarray)
self.assertTrue(type(np_res) is np.ndarray)
assert_array_equal(tf_res, np_res)
assert_array_equal(tf_res, actual)
def test_optimize_padding(self):
"""Verify "round-up" of dimensions to those optimal for FFT"""
self._test_optimize_padding(np.ones((1)), np.ones((1)), 'log2', np.array([1]))
self._test_optimize_padding(np.ones((1)), np.ones((1)), '2357', np.array([2]))
self._test_optimize_padding(np.ones((10, 5)), np.ones((6, 3)), 'log2', np.array([16, 8]))
self._test_optimize_padding(np.ones((10, 5)), np.ones((7, 4)), 'log2', np.array([16, 8]))
self._test_optimize_padding(np.ones((10, 5)), np.ones((8, 5)), 'log2', np.array([32, 16]))
self._test_optimize_padding(np.ones((355, 11)), np.ones((1, 1)), '2357', np.array([360, 12]))
self._test_optimize_padding(np.ones((10, 5)), np.ones((1, 1)), '2357', np.array([10, 5]))
self._test_optimize_padding(np.ones((10, 6)), np.ones((8, 6)), '2357', np.array([18, 12]))
self._test_optimize_padding(np.ones((10, 5)), np.ones((6, 3)), 'none', np.array([15, 7]))
self._test_optimize_padding(np.ones((10, 5)), np.ones((7, 4)), 'none', np.array([16, 8]))
self._test_optimize_padding(np.ones((10, 5)), np.ones((8, 5)), 'none', np.array([17, 9]))
self._test_optimize_padding(np.ones((10, 5, 3)), np.ones((8, 5, 1)), 'log2', np.array([32, 16, 4]))
self._test_optimize_padding(np.ones((10, 5, 3)), np.ones((8, 5, 1)), '2357', np.array([18, 9, 3]))
# Test invalid padding mode
with self.assertRaises(ValueError):
self._test_optimize_padding(np.ones((1)), np.ones((1)), 'invalid_mode_name', np.array([1]))
def _test_shift(self, x, tf_shift_fn, np_shift_fn):
def tf_fn():
return tf_shift_fn(tf.constant(x))
x_shift_actual = test_utils.exec_tf(tf_fn)
x_shift_expect = np_shift_fn(x)
assert_array_equal(x_shift_actual, x_shift_expect)
def _test_all_shifts(self, tf_shift_fn, np_shift_fn):
# 1D Cases
self._test_shift(np.arange(99), tf_shift_fn, np_shift_fn)
self._test_shift(np.arange(100), tf_shift_fn, np_shift_fn)
# 2D Cases
x = np.reshape(np.arange(50), (25, 2))
self._test_shift(x, tf_shift_fn, np_shift_fn)
# 3D Cases
self._test_shift(np.reshape(np.arange(125), (5, 5, 5)), tf_shift_fn, np_shift_fn)
self._test_shift(np.reshape(np.arange(60), (3, 4, 5)), tf_shift_fn, np_shift_fn)
def test_fftshift(self):
self._test_all_shifts(fft_utils_tf.fftshift, np.fft.fftshift)
def test_ifftshift(self):
self._test_all_shifts(fft_utils_tf.ifftshift, np.fft.ifftshift)
def _test_convolution(self, d, k, mode, actual=None):
def tf_fn():
dt = tf.constant(d, dtype=tf.float32)
kt = tf.constant(k, dtype=tf.float32)
# Determine FFT dimensions and functions
pad_dims = fft_utils_tf.get_fft_pad_dims(dt, kt)
optim_dims = fft_utils_tf.optimize_dims(pad_dims, mode)
fft_fwd, fft_rev = fft_utils_tf.get_fft_tf_fns(dt.shape.ndims)
# Run convolution of data 'd' with kernel 'k'
dk_fft = fft_fwd(kt, fft_length=optim_dims)
dconv = fft_utils_tf.convolve(dt, dk_fft, optim_dims, fft_fwd, fft_rev)
# Extract patch from result matching dimensions of original data array
return fft_utils_tf.extract(dconv, tf.shape(dt), pad_dims)
tf_res = test_utils.exec_tf(tf_fn)
np_res = fft_utils_np.convolve(d, k)
assert_almost_equal(tf_res, np_res, decimal=3)
self.assertEquals(tf_res.shape, np_res.shape)
if actual is not None:
assert_array_equal(tf_res, actual)
def test_convolution(self):
#######################
# Verified Test Cases #
#######################
# * Validate that Numpy == TensorFlow == Manually Defined Expectation
for mode in fft_utils_tf.OPTIMAL_PAD_MODES:
# 1D Cases
actual = [1.]
self._test_convolution(np.ones((1)), np.ones((1)), mode, actual)
actual = [1., 2., 2.]
self._test_convolution(np.ones((3)), np.ones((2)), mode, actual)
# 2D Case
# FFT convolution should result in "lower-right" side
# sums of products of data with kernel values
# See [here](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.472.2396&rep=rep1&type=pdf)
# for some decent visual explanations of this
actual = np.array([
[1., 2., 2.],
[2., 4., 4.],
[2., 4., 4.]
])
self._test_convolution(np.ones((3, 3)), np.ones((2, 2)), mode, actual)
###########################
# Corroborated Test Cases #
###########################
# * Validate that Numpy == TensorFlow results only
# Test 1-3D cases with larger, rectangular dimensions and unit length axes
for shape in [
((1), (1)),
((1, 1), (1, 1)),
((100, 100), (10, 10)),
((100, 5), (10, 15)),
((7, 9), (19, 3)),
((3, 1), (1, 3)),
((2, 1, 2), (2, 1, 2)),
((1, 1, 1), (1, 1, 1))
]:
self._test_convolution(np.ones(shape[0]), np.ones(shape[1]), mode)
###############
# Error Cases #
###############
# * Validate conditions resulting in errors
mode = fft_utils_tf.OPTIMAL_PAD_MODES[0]
with self.assertRaises(ValueError):
# >= 4D should fail
self._test_convolution(np.ones((1, 1, 1, 1)), np.ones((1, 1, 1, 1)), mode)
with self.assertRaises(ValueError):
# Dimension mismatch for data and kernel should fail
self._test_convolution( | np.ones((1, 1)) | numpy.ones |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised))
# Copyright (c) 2016-2021, <NAME>; <NAME>
# All rights reserved.
# =============================================================================
# DOCS
# =============================================================================
"""Data abstraction layer.
This module defines the DecisionMatrix object, which internally encompasses
the alternative matrix, weights and objectives (MIN, MAX) of the criteria.
"""
# =============================================================================
# IMPORTS
# =============================================================================
import abc
import enum
import functools
import numpy as np
import pandas as pd
from pandas.io.formats import format as pd_fmt
import pyquery as pq
from .plot import DecisionMatrixPlotter
from ..utils import Bunch, doc_inherit
# =============================================================================
# CONSTANTS
# =============================================================================
class Objective(enum.Enum):
"""Representation of criteria objectives (Minimize, Maximize)."""
#: Internal representation of minimize criteria
MIN = -1
#: Internal representation of maximize criteria
MAX = 1
# INTERNALS ===============================================================
_MIN_STR = "\u25bc"
_MAX_STR = "\u25b2"
#: Another way to name the maximization criteria.
_MAX_ALIASES = frozenset(
[
MAX,
_MAX_STR,
max,
np.max,
np.nanmax,
np.amax,
"max",
"maximize",
"+",
">",
]
)
#: Another ways to name the minimization criteria.
_MIN_ALIASES = frozenset(
[
MIN,
_MIN_STR,
min,
np.min,
np.nanmin,
np.amin,
"min",
"minimize",
"<",
"-",
]
)
# CUSTOM CONSTRUCTOR ======================================================
@classmethod
def construct_from_alias(cls, alias):
"""Return the alias internal representation of the objective."""
if isinstance(alias, cls):
return alias
if isinstance(alias, str):
alias = alias.lower()
if alias in cls._MAX_ALIASES.value:
return cls.MAX
if alias in cls._MIN_ALIASES.value:
return cls.MIN
raise ValueError(f"Invalid criteria objective {alias}")
# METHODS =================================================================
def __str__(self):
"""Convert the objective to an string."""
return self.name
def to_string(self):
"""Return the printable representation of the objective."""
if self.value in Objective._MIN_ALIASES.value:
return Objective._MIN_STR.value
if self.value in Objective._MAX_ALIASES.value:
return Objective._MAX_STR.value
# =============================================================================
# DATA CLASS
# =============================================================================
class DecisionMatrix:
"""Representation of all data needed in the MCDA analysis.
This object gathers everything necessary to represent a data set used
in MCDA:
- An alternative matrix where each row is an alternative and each
column is of a different criteria.
- An optimization objective (Minimize, Maximize) for each criterion.
- A weight for each criterion.
- An independent type of data for each criterion
DecisionMatrix has two main forms of construction:
1. Use the default constructor of the DecisionMatrix class
:py:class:`pandas.DataFrame` where the index is the alternatives
and the columns are the criteria; an iterable with the objectives with
the same amount of elements that columns/criteria has the dataframe;
and an iterable with the weights also with the same amount of elements
as criteria.
.. code-block:: pycon
>>> import pandas as pd
>>> from skcriteria import DecisionMatrix, mkdm
>>> data_df = pd.DataFrame(
... [[1, 2, 3], [4, 5, 6]],
... index=["A0", "A1"],
... columns=["C0", "C1", "C2"]
... )
>>> objectives = [min, max, min]
>>> weights = [1, 1, 1]
>>> dm = DecisionMatrix(data_df, objectives, weights)
>>> dm
C0[▼ 1.0] C1[▲ 1.0] C2[▲ 1.0]
A0 1 2 3
A1 4 5 6
[2 Alternatives x 3 Criteria]
2. Use the classmethod `DecisionMatrix.from_mcda_data` which requests the
data in a more natural way for this type of analysis
(the weights, the criteria / alternative names, and the data types
are optional)
>>> DecisionMatrix.from_mcda_data(
... [[1, 2, 3], [4, 5, 6]],
... [min, max, min],
... [1, 1, 1])
C0[▼ 1.0] C1[▲ 1.0] C2[▲ 1.0]
A0 1 2 3
A1 4 5 6
[2 Alternatives x 3 Criteria]
For simplicity a function is offered at the module level analogous to
``from_mcda_data`` called ``mkdm`` (make decision matrix).
Parameters
----------
data_df: :py:class:`pandas.DatFrame`
Dataframe where the index is the alternatives and the columns
are the criteria.
objectives: :py:class:`numpy.ndarray`
Aan iterable with the targets with sense of optimality of every
criteria (You can use any alias defined in Objective)
the same length as columns/criteria has the data_df.
weights: :py:class:`numpy.ndarray`
An iterable with the weights also with the same amount of elements
as criteria.
"""
def __init__(self, data_df, objectives, weights):
self._data_df = (
data_df.copy()
if isinstance(data_df, pd.DataFrame)
else pd.DataFrame(data_df)
)
self._objectives = np.asarray(objectives, dtype=object)
self._weights = np.asanyarray(weights, dtype=float)
if not (
len(self._data_df.columns)
== len(self._weights)
== len(self._objectives)
):
raise ValueError(
"The number of weights, and objectives must be equal to the "
"number of criteria (number of columns in data_df)"
)
# CUSTOM CONSTRUCTORS =====================================================
@classmethod
def from_mcda_data(
cls,
matrix,
objectives,
weights=None,
alternatives=None,
criteria=None,
dtypes=None,
):
"""Create a new DecisionMatrix object.
This method receives the parts of the matrix, in what conceptually
the matrix of alternatives is usually divided
Parameters
----------
matrix: Iterable
The matrix of alternatives. Where every row is an alternative
and every column is a criteria.
objectives: Iterable
The array with the sense of optimality of every
criteria. You can use any alias provided by the objective class.
weights: Iterable o None (default ``None``)
Optional weights of the criteria. If is ``None`` all the criteria
are weighted with 1.
alternatives: Iterable o None (default ``None``)
Optional names of the alternatives. If is ``None``,
al the alternatives are names "A[n]" where n is the number of
the row of `matrix` statring at 0.
criteria: Iterable o None (default ``None``)
Optional names of the criteria. If is ``None``,
al the alternatives are names "C[m]" where m is the number of
the columns of `matrix` statring at 0.
dtypes: Iterable o None (default ``None``)
Optional types of the criteria. If is None, the type is inferred
automatically by pandas.
Returns
-------
:py:class:`DecisionMatrix`
A new decision matrix.
Example
-------
>>> DecisionMatrix.from_mcda_data(
... [[1, 2, 3], [4, 5, 6]],
... [min, max, min],
... [1, 1, 1])
C0[▼ 1.0] C1[▲ 1.0] C2[▲ 1.0]
A0 1 2 3
A1 4 5 6
[2 Alternatives x 3 Criteria]
For simplicity a function is offered at the module level analogous to
``from_mcda_data`` called ``mkdm`` (make decision matrix).
Notes
-----
This functionality generates more sensitive defaults than using the
constructor of the DecisionMatrix class but is slower.
"""
# first we need the number of alternatives and criteria
try:
a_number, c_number = np.shape(matrix)
except ValueError:
matrix_ndim = np.ndim(matrix)
raise ValueError(
f"'matrix' must have 2 dimensions, found {matrix_ndim} instead"
)
alternatives = np.asarray(
[f"A{idx}" for idx in range(a_number)]
if alternatives is None
else alternatives
)
if len(alternatives) != a_number:
raise ValueError(f"'alternatives' must have {a_number} elements")
criteria = np.asarray(
[f"C{idx}" for idx in range(c_number)]
if criteria is None
else criteria
)
if len(criteria) != c_number:
raise ValueError(f"'criteria' must have {c_number} elements")
weights = np.asarray(np.ones(c_number) if weights is None else weights)
data_df = pd.DataFrame(matrix, index=alternatives, columns=criteria)
if dtypes is not None and len(dtypes) != c_number:
raise ValueError(f"'dtypes' must have {c_number} elements")
elif dtypes is not None:
dtypes = {c: dt for c, dt in zip(criteria, dtypes)}
data_df = data_df.astype(dtypes)
return cls(data_df=data_df, objectives=objectives, weights=weights)
# MCDA ====================================================================
# This properties are usefull to access interactively to the
# underlying data a. Except for alternatives and criteria all other
# properties expose the data as dataframes or series
@property
def alternatives(self):
"""Names of the alternatives."""
return self._data_df.index.to_numpy()
@property
def criteria(self):
"""Names of the criteria."""
return self._data_df.columns.to_numpy()
@property
def weights(self):
"""Weights of the criteria."""
return pd.Series(
self._weights,
dtype=float,
index=self._data_df.columns,
name="Weights",
)
@property
def objectives(self):
"""Objectives of the criteria as ``Objective`` instances."""
return pd.Series(
[Objective.construct_from_alias(a) for a in self._objectives],
index=self._data_df.columns,
name="Objectives",
)
# READ ONLY PROPERTIES ====================================================
@property
def iobjectives(self):
"""Objectives of the criteria as ``int``.
- Minimize = Objective.MIN.value
- Maximize = Objective.MAX.value
"""
return pd.Series(
[o.value for o in self.objectives],
dtype=np.int8,
index=self._data_df.columns,
)
@property
def matrix(self):
"""Alternatives matrix as pandas DataFrame.
The matrix excludes weights and objectives.
If you want to create a DataFrame with objetvies and weights, use
``DecisionMatrix.to_dataframe()``
"""
return self._data_df.copy()
@property
def dtypes(self):
"""Dtypes of the criteria."""
return self._data_df.dtypes.copy()
@property
def plot(self):
"""Plot accessor."""
return DecisionMatrixPlotter(self)
# UTILITIES ===============================================================
def copy(self, **kwargs):
"""Return a deep copy of the current DecisionMatrix.
This method is also useful for manually modifying the values of the
DecisionMatrix object.
Parameters
----------
kwargs :
The same parameters supported by ``from_mcda_data()``. The values
provided replace the existing ones in the obSject to be copied.
Returns
-------
:py:class:`DecisionMatrix`
A new decision matrix.
"""
dmdict = self.to_dict()
dmdict.update(kwargs)
return self.from_mcda_data(**dmdict)
def to_dataframe(self):
"""Convert the entire DecisionMatrix into a dataframe.
The objectives and weights ara added as rows before the alternatives.
Returns
-------
:py:class:`pd.DataFrame`
A Decision matrix as pandas DataFrame.
Example
-------
.. code-block:: pycon
>>> dm = DecisionMatrix.from_mcda_data(
>>> dm
... [[1, 2, 3], [4, 5, 6]],
... [min, max, min],
... [1, 1, 1])
C0[▼ 1.0] C1[▲ 1.0] C2[▲ 1.0]
A0 1 2 3
A1 4 5 6
>>> dm.to_dataframe()
C0 C1 C2
objectives MIN MAX MIN
weights 1.0 1.0 1.0
A0 1 2 3
A1 4 5 6
"""
data = np.vstack((self.objectives, self.weights, self.matrix))
index = np.hstack((["objectives", "weights"], self.alternatives))
df = pd.DataFrame(data, index=index, columns=self.criteria, copy=True)
return df
def to_dict(self):
"""Return a dict representation of the data.
All the values are represented as numpy array.
"""
return {
"matrix": self.matrix.to_numpy(),
"objectives": self.iobjectives.to_numpy(),
"weights": self.weights.to_numpy(),
"dtypes": self.dtypes.to_numpy(),
"alternatives": self.alternatives,
"criteria": self.criteria,
}
def describe(self, **kwargs):
"""Generate descriptive statistics.
Descriptive statistics include those that summarize the central
tendency, dispersion and shape of a dataset's distribution,
excluding ``NaN`` values.
Parameters
----------
Same parameters as ``pandas.DataFrame.describe()``.
Returns
-------
``pandas.DataFrame``
Summary statistics of DecisionMatrix provided.
"""
return self._data_df.describe(**kwargs)
# CMP =====================================================================
@property
def shape(self):
"""Return a tuple with (number_of_alternatives, number_of_criteria).
dm.shape <==> np.shape(dm)
"""
return np.shape(self._data_df)
def __len__(self):
"""Return the number ot alternatives.
dm.__len__() <==> len(dm).
"""
return len(self._data_df)
def equals(self, other):
"""Return True if the decision matrix are equal.
This method calls `DecisionMatrix.aquals` whitout tolerance.
Parameters
----------
other : :py:class:`skcriteria.DecisionMatrix`
Other instance to compare.
Returns
-------
equals : :py:class:`bool:py:class:`
Returns True if the two dm are equals.
See Also
--------
aequals, :py:func:`numpy.isclose`, :py:func:`numpy.all`,
:py:func:`numpy.any`, :py:func:`numpy.equal`,
:py:func:`numpy.allclose`.
"""
return self.aequals(other, 0, 0, False)
def aequals(self, other, rtol=1e-05, atol=1e-08, equal_nan=False):
"""Return True if the decision matrix are equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
NaNs are treated as equal if they are in the same place and if
``equal_nan=True``. Infs are treated as equal if they are in the same
place and of the same sign in both arrays.
The proceeds as follows:
- If ``other`` is the same object return ``True``.
- If ``other`` is not instance of 'DecisionMatrix', has different shape
'criteria', 'alternatives' or 'objectives' returns ``False``.
- Next check the 'weights' and the matrix itself using the provided
tolerance.
Parameters
----------
other : :py:class:`skcriteria.DecisionMatrix`
Other instance to compare.
rtol : float
The relative tolerance parameter
(see Notes in :py:func:`numpy.allclose`).
atol : float
The absolute tolerance parameter
(see Notes in :py:func:`numpy.allclose`).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in dm will be
considered equal to NaN's in `other` in the output array.
Returns
-------
aequals : :py:class:`bool:py:class:`
Returns True if the two dm are equal within the given
tolerance; False otherwise.
See Also
--------
equals, :py:func:`numpy.isclose`, :py:func:`numpy.all`,
:py:func:`numpy.any`, :py:func:`numpy.equal`,
:py:func:`numpy.allclose`.
"""
return (self is other) or (
isinstance(other, DecisionMatrix)
and np.shape(self) == np.shape(other)
and np.array_equal(self.criteria, other.criteria)
and np.array_equal(self.alternatives, other.alternatives)
and | np.array_equal(self.objectives, other.objectives) | numpy.array_equal |
import numpy as np
import matplotlib.pyplot as plt
def load_data():
# 导入房价数据
datafile = '/home/aistudio/data/housing.data'
data = np.fromfile(datafile, sep=' ')
# 将原始数据进行Reshape操作 并且拆分成训练集和测试集
data = data.reshape([-1, 14])
offset = int(data.shape[0]*0.8)
train_data = data[:offset]
# 进行归一化处理
maximums, minimums, avgs = train_data.max(axis=0), train_data.min(axis=0), train_data.sum(axis=0) / train_data.shape[0]
for i in range(14):
data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i])
train_data = data[:offset]
test_data = data[offset:]
return train_data, test_data
class Network(object):
def __init__(self, num_of_weight):
np.random.seed(0)
# randn函数返回一组样本,具有标准正态分布,维度为[num_of_weight, 1]
self.w = np.random.randn(num_of_weight, 1)
self.b = 0.
def forword(self, x): # 前向计算
z = np.dot(x, self.w) + self.b
return z
def loss(self, z, y): # loss计算
error = z - y
cost = error * error
cost = np.mean(cost)
return cost
def gradient(self, x, y): # 计算梯度
z = self.forword(x)
gradient_w = | np.mean((z - y)*x, axis=0) | numpy.mean |
"""
author: <NAME>
"""
import numpy as np
import time
import copy
from numba import njit
from numba.typed import List
from gglasso.solver.ggl_helper import phiplus, prox_od_1norm, prox_2norm, prox_rank_norm
from gglasso.helper.ext_admm_helper import check_G
def ext_ADMM_MGL(S, lambda1, lambda2, reg , Omega_0, G,\
X0 = None, X1 = None, tol = 1e-5 , rtol = 1e-4, stopping_criterion = 'boyd',\
rho= 1., max_iter = 1000, verbose = False, measure = False, latent = False, mu1 = None):
"""
This is an ADMM algorithm for solving the Group Graphical Lasso problem
where not all instances have the same number of dimensions, i.e. some variables are present in some instances and not in others.
A group sparsity penalty is applied to all pairs of variables present in multiple instances.
IMPORTANT: As the arrays are non-conforming in dimensions here, we operate on dictionaries with keys 1,..,K (as int) and each value is a array of shape :math:`(p_k,p_k)`.
If ``latent=False``, this function solves
.. math::
\min_{\Omega,\Theta,\Lambda} \sum_{k=1}^K - \log \det(\Omega^{(k)}) + \mathrm{Tr}(S^{(k)}\Omega^{(k)}) + \sum_{k=1}^K \lambda_1 ||\Theta^{(k)}||_{1,od}
+ \sum_{l} \lambda_2 \\beta_l ||\Lambda_{[l]}||_2
s.t. \quad \Omega^{(k)} = \Theta^{(k)} \quad k=1,\dots,K
\quad \quad \Lambda^{(k)} = \Theta^{(k)} \quad k=1,\dots,K
where l indexes the groups of overlapping variables and :math:`\Lambda_{[l]}` is the array of all respective components.
To account for differing group sizes we multiply with :math:`\\beta_l`, the square root of the group size.
If ``latent=True``, this function solves
.. math::
\min_{\Omega,\Theta,\Lambda,L} \sum_{k=1}^K - \log \det(\Omega^{(k)}) + \mathrm{Tr}(S^{(k)}\Omega^{(k)}) + \sum_{k=1}^K \lambda_1 ||\Theta^{(k)}||_{1,od}
+ \sum_{l} \lambda_2 \\beta_l ||\Lambda_{[l]}||_2 +\sum_{k=1}^{K} \mu_{1,k} \|L^{(k)}\|_{\star}
s.t. \quad \Omega^{(k)} = \Theta^{(k)} - L^{(k)} \quad k=1,\dots,K
\quad \quad \Lambda^{(k)} = \Theta^{(k)} \quad k=1,\dots,K
Note:
* Typically, ``sol['Omega']`` is positive definite and ``sol['Theta']`` is sparse.
* We use scaled ADMM, i.e. X0 and X1 are the scaled (with 1/rho) dual variables for the equality constraints.
Parameters
----------
S : dict
empirical covariance matrices. S should have keys 1,..,K (as integers) and S[k] contains the :math:`(p_k,p_k)`-array of the empirical cov. matrix of the k-th instance.
Each S[k] needs to be symmetric and positive semidefinite.
lambda1 : float, positive
sparsity regularization parameter.
lambda2 : float, positive
group sparsity regularization parameter.
reg : str
so far only Group Graphical Lasso is available, hence choose 'GGL'.
Omega_0 : dict
starting point for the Omega variable. Should be of same form as S. If no better starting point is available, choose
Omega_0[k] = np.eye(p_k) for k=1,...,K
G : array
bookkeeping arrays which contains information where the respective entries for each group can be found.
X0 : dict, optional
starting point for the X0 variable. If not specified, it is set to zeros.
X1 : dict, optional
starting point for the X1 variable. If not specified, it is set to zeros.
rho : float, positive, optional
step size paramater for the augmented Lagrangian in ADMM. The default is 1. Tune this parameter for optimal performance.
max_iter : int, optional
maximum number of iterations. The default is 1000.
tol : float, positive, optional
tolerance for the primal residual. See "Distributed Optimization and Statistical Learning via the Alternating Direction Method of Multipliers", Boyd et al. for details.
The default is 1e-7.
rtol : float, positive, optional
tolerance for the dual residual. The default is 1e-4.
stopping_criterion : str, optional
* 'boyd': Stopping criterion after Boyd et al.
* 'kkt': KKT residual is chosen as stopping criterion. This is computationally expensive to compute.
The default is 'boyd'.
verbose : boolean, optional
verbosity of the solver. The default is False.
measure : boolean, optional
turn on/off measurements of runtime per iteration. The default is False.
latent : boolean, optional
Solve the GGL problem with or without latent variables (see above for the exact formulations).
The default is False.
mu1 : float, positive, optional
low-rank regularization parameter, possibly different for each instance k=1,..,K. Only needs to be specified if latent=True.
Returns
-------
sol : dict
contains the solution, i.e. Omega, Theta, X0, X1 (and L if latent=True) after termination. All elements are dictionaries with keys 1,..,K and (p_k,p_k)-arrays as values.
info : dict
status and measurement information from the solver.
"""
K = len(S.keys())
p = np.zeros(K, dtype= int)
for k in np.arange(K):
p[k] = S[k].shape[0]
if type(lambda1) == np.float64 or type(lambda1) == float:
lambda1 = lambda1*np.ones(K)
if latent:
if type(mu1) == np.float64 or type(mu1) == float:
mu1 = mu1*np.ones(K)
assert mu1 is not None
assert np.all(mu1 > 0)
assert min(lambda1.min(), lambda2) > 0
assert reg in ['GGL']
check_G(G, p)
assert rho > 0, "ADMM penalization parameter must be positive."
# initialize
Omega_t = Omega_0.copy()
Theta_t = Omega_0.copy()
L_t = dict()
for k in np.arange(K):
L_t[k] = np.zeros((p[k],p[k]))
# helper and dual variables
Lambda_t = Omega_0.copy()
Z_t = dict()
if X0 is None:
X0_t = dict()
for k in np.arange(K):
X0_t[k] = np.zeros((p[k],p[k]))
else:
X0_t = X0.copy()
if X1 is None:
X1_t = dict()
for k in np.arange(K):
X1_t[k] = np.zeros((p[k],p[k]))
else:
X1_t = X1.copy()
runtime = np.zeros(max_iter)
residual = np.zeros(max_iter)
status = ''
if verbose:
print("------------ADMM Algorithm for Multiple Graphical Lasso----------------")
if stopping_criterion == 'boyd':
hdr_fmt = "%4s\t%10s\t%10s\t%10s\t%10s"
out_fmt = "%4d\t%10.4g\t%10.4g\t%10.4g\t%10.4g"
print(hdr_fmt % ("iter", "r_t", "s_t", "eps_pri", "eps_dual"))
elif stopping_criterion == 'kkt':
hdr_fmt = "%4s\t%10s"
out_fmt = "%4d\t%10.4g"
print(hdr_fmt % ("iter", "kkt residual"))
##################################################################
### MAIN LOOP STARTS
##################################################################
for iter_t in np.arange(max_iter):
if measure:
start = time.time()
# Omega Update
Omega_t_1 = Omega_t.copy()
for k in np.arange(K):
W_t = Theta_t[k] - L_t[k] - X0_t[k] - (1/rho) * S[k]
eigD, eigQ = np.linalg.eigh(W_t)
Omega_t[k] = phiplus(beta = 1/rho, D = eigD, Q = eigQ)
# Theta Update
for k in np.arange(K):
V_t = (Omega_t[k] + L_t[k] + X0_t[k] + Lambda_t[k] - X1_t[k]) * 0.5
Theta_t[k] = prox_od_1norm(V_t, lambda1[k]/(2*rho))
#L Update
if latent:
for k in np.arange(K):
C_t = Theta_t[k] - X0_t[k] - Omega_t[k]
C_t = (C_t.T + C_t)/2
eigD, eigQ = np.linalg.eigh(C_t)
L_t[k] = prox_rank_norm(C_t, mu1[k]/rho, D = eigD, Q = eigQ)
# Lambda Update
Lambda_t_1 = Lambda_t.copy()
for k in np.arange(K):
Z_t[k] = Theta_t[k] + X1_t[k]
Lambda_t = prox_2norm_G(Z_t, G, lambda2/rho)
# X Update
for k in np.arange(K):
X0_t[k] += Omega_t[k] - Theta_t[k] + L_t[k]
X1_t[k] += Theta_t[k] - Lambda_t[k]
if measure:
end = time.time()
runtime[iter_t] = end-start
# Stopping condition
if stopping_criterion == 'boyd':
r_t,s_t,e_pri,e_dual = ADMM_stopping_criterion(Omega_t, Omega_t_1, Theta_t, L_t, Lambda_t, Lambda_t_1, X0_t, X1_t,\
S, rho, p, tol, rtol, latent)
residual[iter_t] = max(r_t,s_t)
if verbose:
print(out_fmt % (iter_t,r_t,s_t,e_pri,e_dual))
if (r_t <= e_pri) and (s_t <= e_dual):
status = 'optimal'
break
elif stopping_criterion == 'kkt':
eta_A = kkt_stopping_criterion(Omega_t, Theta_t, L_t, Lambda_t, dict((k, rho*v) for k,v in X0_t.items()), dict((k, rho*v) for k,v in X1_t.items()),\
S , G, lambda1, lambda2, reg, latent, mu1)
residual[iter_t] = eta_A
if verbose:
print(out_fmt % (iter_t,eta_A))
if eta_A <= tol:
status = 'optimal'
break
##################################################################
### MAIN LOOP FINISHED
##################################################################
# retrieve status (partially optimal or max iter)
if status != 'optimal':
if stopping_criterion == 'boyd':
if (r_t <= e_pri):
status = 'primal optimal'
elif (s_t <= e_dual):
status = 'dual optimal'
else:
status = 'max iterations reached'
else:
status = 'max iterations reached'
print(f"ADMM terminated after {iter_t+1} iterations with status: {status}.")
for k in np.arange(K):
assert abs(Omega_t[k].T - Omega_t[k]).max() <= 1e-5, "Solution is not symmetric"
assert abs(Theta_t[k].T - Theta_t[k]).max() <= 1e-5, "Solution is not symmetric"
assert abs(L_t[k].T - L_t[k]).max() <= 1e-5, "Solution is not symmetric"
D = np.linalg.eigvalsh(Theta_t[k]-L_t[k])
if D.min() <= 1e-5:
print("WARNING: Theta (Theta-L resp.) may be not positive definite -- increase accuracy!")
if latent:
D = np.linalg.eigvalsh(L_t[k])
if D.min() <= -1e-5:
print("WARNING: L may be not positive semidefinite -- increase accuracy!")
sol = {'Omega': Omega_t, 'Theta': Theta_t, 'L': L_t, 'X0': X0_t, 'X1': X1_t}
if measure:
info = {'status': status , 'runtime': runtime[:iter_t+1], 'residual': residual[:iter_t+1]}
else:
info = {'status': status}
return sol, info
def ADMM_stopping_criterion(Omega, Omega_t_1, Theta, L, Lambda, Lambda_t_1, X0, X1, S, rho, p, eps_abs, eps_rel, latent=False):
# X0, X1 are inputed as scaled dual vars., this is accounted for by factor rho in e_dual
K = len(S.keys())
if not latent:
for k in np.arange(K):
assert np.all(L[k]==0)
dim = ((p ** 2 + p) / 2).sum() # number of elements of off-diagonal matrix
D1 = np.sqrt(sum([np.linalg.norm(Omega[k])**2 + | np.linalg.norm(Lambda[k]) | numpy.linalg.norm |
"""
Plotting routines for visualizing performance of regression and classification models
"""
import os
import matplotlib
import sys
import pandas as pd
import numpy as np
import seaborn as sns
import umap
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from mpl_toolkits.mplot3d import Axes3D
from atomsci.ddm.pipeline import perf_data as perf
#matplotlib.style.use('ggplot')
matplotlib.rc('xtick', labelsize=12)
matplotlib.rc('ytick', labelsize=12)
matplotlib.rc('axes', labelsize=12)
#------------------------------------------------------------------------------------------------------------------------
def plot_pred_vs_actual(MP, epoch_label='best', threshold=None, error_bars=False, pdf_dir=None):
"""
Plot predicted vs actual values from a trained regression model for each split subset (train,
valid, and test).
Args:
MP (`ModelPipeline`): Pipeline object for a model that was trained in the current Python session.
epoch_label (str): Label for training epoch to draw predicted values from. Currently 'best' is the only allowed value.
threshold (float): Threshold activity value to mark on plot with dashed lines.
error_bars (bool): If true and if uncertainty estimates are included in the model predictions, draw error bars
at +- 1 SD from the predicted y values.
pdf_dir (str): If given, output the plots to a PDF file in the given directory.
Returns:
None
"""
params = MP.params
# For now restrict this to regression models.
# TODO: Implement a version of plot_pred_vs_actual for classification models.
if params.prediction_type != 'regression':
MP.log.error("plot_pred_vs_actual is currently for regression models only.")
return
wrapper = MP.model_wrapper
if pdf_dir is not None:
pdf_path = os.path.join(pdf_dir, '%s_%s_%s_%s_pred_vs_actual.pdf' % (params.dataset_name, params.model_type,
params.featurizer, params.splitter))
pdf = PdfPages(pdf_path)
if MP.run_mode == 'training':
subsets = ['train', 'valid', 'test']
else:
subsets = ['full']
dataset_name = MP.data.dataset_name
splitter = MP.params.splitter
model_type = MP.params.model_type
featurizer = MP.params.featurizer
tasks = MP.params.response_cols
for subset in subsets:
perf_data = wrapper.get_perf_data(subset, epoch_label)
pred_results = perf_data.get_prediction_results()
y_actual = perf_data.get_real_values()
ids, y_pred, y_std = perf_data.get_pred_values()
r2 = pred_results['r2_score']
if perf_data.num_tasks > 1:
r2_scores = pred_results['task_r2_scores']
else:
r2_scores = [r2]
if MP.params.model_type != "hybrid":
for t in range(MP.params.num_model_tasks):
fig, ax = plt.subplots(figsize=(12.0,12.0))
title = '%s\n%s split %s model on %s features\n%s subset predicted vs actual %s, R^2 = %.3f' % (
dataset_name, splitter, model_type, featurizer, subset, tasks[t], r2_scores[t])
# Force axes to have same scale
ymin = min(min(y_actual[:,t]), min(y_pred[:,t]))
ymax = max(max(y_actual[:,t]), max(y_pred[:,t]))
ax.set_xlim(ymin, ymax)
ax.set_ylim(ymin, ymax)
if error_bars and y_std is not None:
# Draw error bars
ax.errorbar(y_actual[:,t], y_pred[:,t], y_std[:,t], c='blue', marker='o', alpha=0.4, linestyle='')
else:
plt.scatter(y_actual[:,t], y_pred[:,t], s=9, c='blue', marker='o', alpha=0.4)
ax.set_xlabel('Observed value')
ax.set_ylabel('Predicted value')
# Draw an identity line
ax.plot([ymin,ymax], [ymin,ymax], c='forestgreen', linestyle='--')
if threshold is not None:
plt.axvline(threshold, color='r', linestyle='--')
plt.axhline(threshold, color='r', linestyle='--')
ax.set_title(title, fontdict={'fontsize' : 10})
if pdf_dir is not None:
pdf.savefig(fig)
else:
fig, ax = plt.subplots(1,2, figsize=(20.0,10.0))
title = '%s\n%s split %s model on %s features\n%s subset predicted vs actual %s, R^2 = %.3f' % (
dataset_name, splitter, model_type, featurizer, subset, "Ki/XC50", r2_scores[0])
pos_ki = np.where(np.isnan(y_actual[:, 1]))[0]
pos_bind = np.where(~np.isnan(y_actual[:, 1]))[0]
y_pred_ki = y_pred[pos_ki, 0]
y_real_ki = y_actual[pos_ki, 0]
y_pred_bind = y_pred[pos_bind, 0]
y_real_bind = y_actual[pos_bind, 0]
ki_ymin = min(min(y_real_ki), min(y_pred_ki)) - 0.5
ki_ymax = max(max(y_real_ki), max(y_pred_ki)) + 0.5
ax[0].set_xlim(ki_ymin, ki_ymax)
ax[0].set_ylim(ki_ymin, ki_ymax)
ax[0].scatter(y_real_ki, y_pred_ki, s=9, c='blue', marker='o', alpha=0.4)
ax[0].set_xlabel('Observed value')
ax[0].set_ylabel('Predicted value')
ax[0].plot([ki_ymin,ki_ymax], [ki_ymin,ki_ymax], c='forestgreen', linestyle='--')
if threshold is not None:
ax[0].axvline(threshold, color='r', linestyle='--')
ax[0].axhline(threshold, color='r', linestyle='--')
ax[0].set_title(title, fontdict={'fontsize' : 10})
title = '%s\n%s split %s model on %s features\n%s subset predicted vs actual %s, R^2 = %.3f' % (
dataset_name, splitter, model_type, featurizer, subset, "Binding/Inhibition", r2_scores[1])
bind_ymin = min(min(y_real_bind), min(y_pred_bind)) - 0.1
bind_ymax = max(max(y_real_bind), max(y_pred_bind)) + 0.1
ax[1].set_xlim(bind_ymin, bind_ymax)
ax[1].set_ylim(bind_ymin, bind_ymax)
ax[1].scatter(y_real_bind, y_pred_bind, s=9, c='blue', marker='o', alpha=0.4)
ax[1].set_xlabel('Observed value')
ax[1].set_ylabel('Predicted value')
ax[1].plot([bind_ymin,bind_ymax], [bind_ymin,bind_ymax], c='forestgreen', linestyle='--')
if threshold is not None:
ax[1].axvline(threshold, color='r', linestyle='--')
ax[1].axhline(threshold, color='r', linestyle='--')
ax[1].set_title(title, fontdict={'fontsize' : 10})
if pdf_dir is not None:
pdf.close()
MP.log.info("Wrote plot to %s" % pdf_path)
#------------------------------------------------------------------------------------------------------------------------
def plot_perf_vs_epoch(MP, pdf_dir=None):
"""
Plot the current NN model's standard performance metric (r2_score or roc_auc_score) vs epoch number for the training,
validation and test subsets. If the model was trained with k-fold CV, plot shading for the validation set out to += 1 SD from the mean
score metric values, and plot the training and test set metrics from the final model retraining rather than the cross-validation
phase. Make a second plot showing the validation set model choice score used for ranking training epochs and other hyperparameters
against epoch number.
Args:
MP (`ModelPipeline`): Pipeline object for a model that was trained in the current Python session.
pdf_dir (str): If given, output the plots to a PDF file in the given directory.
Returns:
None
"""
wrapper = MP.model_wrapper
if 'train_epoch_perfs' not in wrapper.__dict__:
raise ValueError("plot_perf_vs_epoch() can only be called for NN models")
num_epochs = wrapper.num_epochs_trained
best_epoch = wrapper.best_epoch
num_folds = len(MP.data.train_valid_dsets)
if num_folds > 1:
subset_perf = dict(training = wrapper.train_epoch_perfs[:best_epoch+1], validation = wrapper.valid_epoch_perfs[:num_epochs],
test = wrapper.test_epoch_perfs[:best_epoch+1])
subset_std = dict(training = wrapper.train_epoch_perf_stds[:best_epoch+1], validation = wrapper.valid_epoch_perf_stds[:num_epochs],
test = wrapper.test_epoch_perf_stds[:best_epoch+1])
else:
subset_perf = dict(training = wrapper.train_epoch_perfs[:num_epochs], validation = wrapper.valid_epoch_perfs[:num_epochs],
test = wrapper.test_epoch_perfs[:num_epochs])
subset_std = dict(training = wrapper.train_epoch_perf_stds[:num_epochs], validation = wrapper.valid_epoch_perf_stds[:num_epochs],
test = wrapper.test_epoch_perf_stds[:num_epochs])
model_scores = wrapper.model_choice_scores[:num_epochs]
model_score_type = MP.params.model_choice_score_type
if MP.params.prediction_type == 'regression':
perf_label = 'R-squared'
else:
perf_label = 'ROC AUC'
if pdf_dir is not None:
pdf_path = os.path.join(pdf_dir, '%s_perf_vs_epoch.pdf' % os.path.basename(MP.params.output_dir))
pdf = PdfPages(pdf_path)
subset_colors = dict(training='blue', validation='forestgreen', test='red')
subset_shades = dict(training='deepskyblue', validation='lightgreen', test='hotpink')
fig, ax = plt.subplots(figsize=(10,10))
title = '%s dataset\n%s vs epoch for %s %s model on %s features with %s split\nBest validation set performance at epoch %d' % (
MP.params.dataset_name, perf_label, MP.params.model_type, MP.params.prediction_type,
MP.params.featurizer, MP.params.splitter, best_epoch)
for subset in ['training', 'validation', 'test']:
epoch = list(range(len(subset_perf[subset])))
ax.plot(epoch, subset_perf[subset], color=subset_colors[subset], label=subset)
# Add shading to show variance across folds during cross-validation
if (num_folds > 1) and (subset == 'validation'):
ax.fill_between(epoch, subset_perf[subset] + subset_std[subset], subset_perf[subset] - subset_std[subset],
alpha=0.3, facecolor=subset_shades[subset], linewidth=0)
plt.axvline(best_epoch, color='forestgreen', linestyle='--')
ax.set_xlabel('Epoch')
ax.set_ylabel(perf_label)
ax.set_title(title, fontdict={'fontsize' : 12})
legend = ax.legend(loc='lower right')
if pdf_dir is not None:
pdf.savefig(fig)
# Now plot the score used for choosing the best epoch and model params
fig, ax = plt.subplots(figsize=(10,10))
title = '%s dataset\n%s vs epoch for %s %s model on %s features with %s split\nBest validation set performance at epoch %d' % (
MP.params.dataset_name, model_score_type, MP.params.model_type, MP.params.prediction_type,
MP.params.featurizer, MP.params.splitter, best_epoch)
epoch = list(range(num_epochs))
ax.plot(epoch, model_scores, color=subset_colors['validation'])
plt.axvline(best_epoch, color='red', linestyle='--')
ax.set_xlabel('Epoch')
if model_score_type in perf.loss_funcs:
score_label = "negative %s" % model_score_type
else:
score_label = model_score_type
ax.set_ylabel(score_label)
ax.set_title(title, fontdict={'fontsize' : 12})
if pdf_dir is not None:
pdf.savefig(fig)
pdf.close()
MP.log.info("Wrote plot to %s" % pdf_path)
#------------------------------------------------------------------------------------------------------------------------
def _get_perf_curve_data(MP, epoch_label, curve_type='ROC'):
"""
Common code for ROC and precision-recall curves. Returns true classes and active class probabilities
for each training/test data subset.
Args:
MP (`ModelPipeline`): Pipeline object for a model that was trained in the current Python session.
epoch_label (str): Label for training epoch to draw predicted values from. Currently 'best' is the only allowed value.
threshold (float): Threshold activity value to mark on plot with dashed lines.
error_bars (bool): If true and if uncertainty estimates are included in the model predictions, draw error bars
at +- 1 SD from the predicted y values.
pdf_dir (str): If given, output the plots to a PDF file in the given directory.
Returns:
None
"""
if MP.params.prediction_type != 'classification':
MP.log.error("Can only plot %s curve for classification models" % curve_type)
return {}
if MP.run_mode == 'training':
subsets = ['train', 'valid', 'test']
else:
subsets = ['full']
wrapper = MP.model_wrapper
curve_data = {}
for subset in subsets:
perf_data = wrapper.get_perf_data(subset, epoch_label)
true_classes = perf_data.get_real_values()
ids, pred_classes, class_probs, prob_stds = perf_data.get_pred_values()
ntasks = class_probs.shape[1]
nclasses = class_probs.shape[-1]
if nclasses != 2:
MP.log.error("%s curve plot is only supported for binary classifiers" % curve_type)
return {}
prob_active = class_probs[:,:,1]
roc_aucs = [metrics.roc_auc_score(true_classes[:,i], prob_active[:,i], average='macro')
for i in range(ntasks)]
prc_aucs = [metrics.average_precision_score(true_classes[:,i], prob_active[:,i], average='macro')
for i in range(ntasks)]
curve_data[subset] = dict(true_classes=true_classes, prob_active=prob_active, roc_aucs=roc_aucs, prc_aucs=prc_aucs)
return curve_data
#------------------------------------------------------------------------------------------------------------------------
def plot_ROC_curve(MP, epoch_label='best', pdf_dir=None):
"""
Plot ROC curves for a classification model.
Args:
MP (`ModelPipeline`): Pipeline object for a model that was trained in the current Python session.
epoch_label (str): Label for training epoch to draw predicted values from. Currently 'best' is the only allowed value.
pdf_dir (str): If given, output the plots to a PDF file in the given directory.
Returns:
None
"""
params = MP.params
curve_data = _get_perf_curve_data(MP, epoch_label, 'ROC')
if len(curve_data) == 0:
return
if MP.run_mode == 'training':
# Draw overlapping ROC curves for train, valid and test sets
subsets = ['train', 'valid', 'test']
else:
subsets = ['full']
if pdf_dir is not None:
pdf_path = os.path.join(pdf_dir, '%s_%s_model_%s_features_%s_split_ROC_curves.pdf' % (
params.dataset_name, params.model_type, params.featurizer, params.splitter))
pdf = PdfPages(pdf_path)
subset_colors = dict(train='blue', valid='forestgreen', test='red', full='purple')
# For multitask, do a separate figure for each task
ntasks = curve_data[subsets[0]]['prob_active'].shape[1]
for i in range(ntasks):
fig, ax = plt.subplots(figsize=(10,10))
title = '%s dataset\nROC curve for %s %s classifier on %s features with %s split' % (
params.dataset_name, params.response_cols[i],
params.model_type, params.featurizer, params.splitter)
for subset in subsets:
fpr, tpr, thresholds = metrics.roc_curve(curve_data[subset]['true_classes'][:,i],
curve_data[subset]['prob_active'][:,i])
roc_auc = curve_data[subset]['roc_aucs'][i]
ax.step(fpr, tpr, color=subset_colors[subset], label="%s: AUC = %.3f" % (subset, roc_auc))
ax.set_xlabel('False positive rate')
ax.set_ylabel('True positive rate')
ax.set_title(title, fontdict={'fontsize' : 12})
legend = ax.legend(loc='lower right')
if pdf_dir is not None:
pdf.savefig(fig)
if pdf_dir is not None:
pdf.close()
MP.log.info("Wrote plot to %s" % pdf_path)
#------------------------------------------------------------------------------------------------------------------------
def plot_prec_recall_curve(MP, epoch_label='best', pdf_dir=None):
"""
Plot precision-recall curves for a classification model.
Args:
MP (`ModelPipeline`): Pipeline object for a model that was trained in the current Python session.
epoch_label (str): Label for training epoch to draw predicted values from. Currently 'best' is the only allowed value.
pdf_dir (str): If given, output the plots to a PDF file in the given directory.
Returns:
None
"""
params = MP.params
curve_data = _get_perf_curve_data(MP, epoch_label, 'precision-recall')
if len(curve_data) == 0:
return
if MP.run_mode == 'training':
# Draw overlapping PR curves for train, valid and test sets
subsets = ['train', 'valid', 'test']
else:
subsets = ['full']
if pdf_dir is not None:
pdf_path = os.path.join(pdf_dir, '%s_%s_model_%s_features_%s_split_PRC_curves.pdf' % (
params.dataset_name, params.model_type, params.featurizer, params.splitter))
pdf = PdfPages(pdf_path)
subset_colors = dict(train='blue', valid='forestgreen', test='red', full='purple')
# For multitask, do a separate figure for each task
ntasks = curve_data[subsets[0]]['prob_active'].shape[1]
for i in range(ntasks):
fig, ax = plt.subplots(figsize=(10,10))
title = '%s dataset\nPrecision-recall curve for %s %s classifier on %s features with %s split' % (
params.dataset_name, params.response_cols[i],
params.model_type, params.featurizer, params.splitter)
for subset in subsets:
precision, recall, thresholds = metrics.precision_recall_curve(curve_data[subset]['true_classes'][:,i],
curve_data[subset]['prob_active'][:,i])
prc_auc = curve_data[subset]['prc_aucs'][i]
ax.step(recall, precision, color=subset_colors[subset], label="%s: AUC = %.3f" % (subset, prc_auc))
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
ax.set_title(title, fontdict={'fontsize' : 12})
legend = ax.legend(loc='lower right')
if pdf_dir is not None:
pdf.savefig(fig)
if pdf_dir is not None:
pdf.close()
MP.log.info("Wrote plot to %s" % pdf_path)
#------------------------------------------------------------------------------------------------------------------------
def plot_umap_feature_projections(MP, ndim=2, num_neighbors=20, min_dist=0.1,
fit_to_train=True,
dist_metric='euclidean', dist_metric_kwds={},
target_weight=0, random_seed=17, pdf_dir=None):
"""
Projects features of a model's input dataset using UMAP to 2D or 3D coordinates and draws a scatterplot.
Shape-codes plot markers to indicate whether the associated compound was in the training, validation or
test set. For classification models, also uses the marker shape to indicate whether the compound's class was correctly
predicted, and uses color to indicate whether the true class was active or inactive. For regression models, uses
the marker color to indicate the discrepancy between the predicted and actual values.
Args:
MP (`ModelPipeline`): Pipeline object for a model that was trained in the current Python session.
ndim (int): Number of dimensions (2 or 3) to project features into.
num_neighbors (int): Number of nearest neighbors used by UMAP for manifold approximation.
Larger values give a more global view of the data, while smaller values preserve more local detail.
min_dist (float): Parameter used by UMAP to set minimum distance between projected points.
fit_to_train (bool): If true (the default), fit the UMAP projection to the training set feature vectors only.
Otherwise, fit it to the entire dataset.
dist_metric (str): Name of metric to use for initial distance matrix computation. Check UMAP documentation
for supported values. The metric should be appropriate for the type of features used in the model (fingerprints
or descriptors); note that `jaccard` is equivalent to Tanimoto distance for ECFP fingerprints.
dist_metric_kwds (dict): Additional key-value pairs used to parameterize dist_metric; see the UMAP documentation.
In particular, dist_metric_kwds['p'] specifies the power/exponent for the Minkowski metric.
target_weight (float): Weighting factor determining balance between activities and feature values in determining topology
of projected points. A weight of zero prioritizes the feature vectors; weight = 1 prioritizes the activity values,
so that compounds with the same activity tend to be clustered together.
random_seed (int): Seed for random number generator.
pdf_dir (str): If given, output the plot to a PDF file in the given directory.
Returns:
None
"""
if (ndim != 2) and (ndim != 3):
MP.log.error('Only 2D and 3D visualizations are supported by plot_umap_feature_projections()')
return
params = MP.params
if params.featurizer == 'graphconv':
MP.log.error('plot_umap_feature_projections() does not support GraphConv models.')
return
split_strategy = params.split_strategy
if pdf_dir is not None:
pdf_path = os.path.join(pdf_dir, '%s_%s_model_%s_split_umap_%s_%dd_projection.pdf' % (
params.dataset_name, params.model_type, params.splitter, params.featurizer, ndim))
pdf = PdfPages(pdf_path)
dataset = MP.data.dataset
ncmpds = dataset.y.shape[0]
ntasks = dataset.y.shape[1]
nfeat = dataset.X.shape[1]
cmpd_ids = {}
features = {}
# TODO: Need an option to pass in a training (or combined training & validation) dataset, in addition
# to a dataset on which we ran predictions with the same model, to display as training and test data.
if split_strategy == 'train_valid_test':
subsets = ['train', 'valid', 'test']
train_dset, valid_dset = MP.data.train_valid_dsets[0]
features['train'] = train_dset.X
cmpd_ids['train'] = train_dset.ids
else:
# For k-fold split, every compound in combined training & validation set is in the validation
# set for 1 fold and in the training set for the k-1 others.
subsets = ['valid', 'test']
features['train'] = np.empty((0,nfeat))
cmpd_ids['train'] = []
valid_dset = MP.data.combined_train_valid_data
features['valid'] = valid_dset.X
cmpd_ids['valid'] = valid_dset.ids
test_dset = MP.data.test_dset
features['test'] = test_dset.X
cmpd_ids['test'] = test_dset.ids
all_features = np.concatenate([features[subset] for subset in subsets], axis=0)
if fit_to_train:
if split_strategy == 'train_valid_test':
fit_features = features['train']
else:
fit_features = features['valid']
else:
fit_features = all_features
epoch_label = 'best'
pred_vals = {}
real_vals = {}
for subset in subsets:
perf_data = MP.model_wrapper.get_perf_data(subset, epoch_label)
y_actual = perf_data.get_real_values()
if MP.params.prediction_type == 'classification':
ids, y_pred, class_probs, y_std = perf_data.get_pred_values()
else:
ids, y_pred, y_std = perf_data.get_pred_values()
# Have to get predictions and real values in same order as in dataset subset
pred_dict = dict([(id, y_pred[i,:]) for i, id in enumerate(ids)])
real_dict = dict([(id, y_actual[i,:]) for i, id in enumerate(ids)])
pred_vals[subset] = np.concatenate([pred_dict[id] for id in cmpd_ids[subset]], axis=0)
real_vals[subset] = np.concatenate([real_dict[id] for id in cmpd_ids[subset]], axis=0)
all_actual = | np.concatenate([real_vals[subset] for subset in subsets], axis=0) | numpy.concatenate |
import numpy as np
import cv2
from renderer import plot_sdf
SHAPE_PATH = '../shapes/shape/'
SHAPE_IMAGE_PATH = '../shapes/shape_images/'
TRAIN_DATA_PATH = '../datasets/train/'
VAL_DATA_PATH = '../datasets/val/'
SAMPLED_IMAGE_PATH = '../datasets/sampled_images/'
HEATMAP_PATH = '../results/true_heatmaps/'
CANVAS_SIZE = np.array([800, 800]) # Keep two dimensions the same
SHAPE_COLOR = (255, 255, 255)
POINT_COLOR = (127, 127, 127)
# The Shape and Circle classes are adapted from
# https://github.com/Oktosha/DeepSDF-explained/blob/master/deepSDF-explained.ipynb
class Shape:
def sdf(self, p):
pass
class Circle(Shape):
def __init__(self, c, r):
self.c = c
self.r = r
def set_c(self, c):
self.c = c
def set_r(self, r):
self.r = r
def sdf(self, p):
return np.linalg.norm(p - self.c) - self.r
# The CircleSampler class is adapted from
# https://github.com/mintpancake/2d-sdf-net
class CircleSampler(object):
def __init__(self, circle_name, circle_path, circle_image_path, sampled_image_path, train_data_path, val_data_path,
split_ratio=0.8, show_image=False):
self.circle_name = circle_name
self.circle_path = circle_path
self.circle_image_path = circle_image_path
self.sampled_image_path = sampled_image_path
self.train_data_path = train_data_path
self.val_data_path = val_data_path
self.circle = Circle([0, 0], 0)
self.sampled_data = np.array([])
self.train_data = np.array([])
self.val_data = np.array([])
self.split_ratio = split_ratio
self.show_image = show_image
def run(self, show_image):
self.load()
self.sample()
self.save(show_image)
# load the coordinate of center and the radius of the circle for sampling
def load(self):
f = open(f'{self.circle_path}{self.circle_name}.txt', 'r')
line = f.readline()
x, y, radius = map(lambda n: np.double(n), line.strip('\n').split(' '))
center = np.array([x, y])
f.close()
self.circle.set_c(center)
self.circle.set_r(radius)
def sample(self, m=5000, n=2000, var=(0.025, 0.0025)):
"""
:param m: number of points sampled on the boundary
each boundary point generates 2 samples
:param n: number of points sampled uniformly in the canvas
:param var: two Gaussian variances used to transform boundary points
"""
# Do uniform sampling
# Use polar coordinate
r = np.random.uniform(0, 0.5, size=(n, 1))
t = | np.random.uniform(0, 2 * np.pi, size=(n, 1)) | numpy.random.uniform |
#! python
""" sspals: python tools for analysing single-shot positron annihilation lifetime spectra
Copyright (c) 2015-2018, UNIVERSITY COLLEGE LONDON
@author: <NAME>
"""
from __future__ import division
from math import floor, ceil
from scipy import integrate
import numpy as np
import pandas as pd
from .chmx import chmx
from .cfd import cfd_1d
# ----------------
# delayed fraction
# ----------------
def integral(arr, dt, t0, lim_a, lim_b, **kwargs):
''' Simpsons integration of arr (1D) between t=lim_a and t=lim_b.
args:
arr # numpy.array(dims=1)
dt # float64
t0 # float64
lim_a # float64
lim_b # float64
kwargs:
corr = True # apply boundary corrections
debug = False # fail quietly, or not if True
return:
float64
'''
corr = kwargs.get('corr', True)
debug = kwargs.get('debug', False)
assert lim_b > lim_a, "lim_b must be greater than lim_a"
# fractional index
frac_a = (lim_a + t0) / dt
frac_b = (lim_b + t0) / dt
# nearest index
ix_a = round(frac_a)
ix_b = round(frac_b)
try:
int_ab = integrate.simps(arr[int(ix_a) : int(ix_b)], None, dt)
if corr:
# boundary corrections (trap rule)
corr_a = dt * (ix_a - frac_a) * (arr[int(floor(frac_a))] + arr[int(ceil(frac_a))]) / 2.0
corr_b = dt * (ix_b - frac_b) * (arr[int(floor(frac_b))] + arr[int(ceil(frac_b))]) / 2.0
int_ab = int_ab + corr_a - corr_b
except IndexError:
if not debug:
# fail quietly
int_ab = np.nan
else:
raise
except:
raise
return int_ab
def dfrac(arr, dt, t0, limits, **kwargs):
''' Calculate the delayed fraction (DF) (int B->C/ int A->C) for arr (1D).
args:
arr # numpy.array(dims=1)
dt # float64
t0 # float64
limits # (A, B, C)
kwargs:
corr = True # apply boundary corrections
debug = False # fail quietly, or not if True
return:
AC :: float64, BC :: float64, DF :: float64
'''
int_ac = integral(arr, dt, t0, limits[0], limits[2], **kwargs)
int_bc = integral(arr, dt, t0, limits[1], limits[2], **kwargs)
df = int_bc / int_ac
return int_ac, int_bc, df
def sspals_1d(arr, dt, limits, **kwargs):
''' Calculate the trigger time (cfd) and delayed fraction (BC / AC) for
arr (1D).
args:
arr # numpy.array(dims=1)
dt # float64
limits # (A, B, C)
kwargs:
cfd_scale=0.8
cfd_offset=1.4e-8
cfd_threshold=0.04
corr=True
debug=False
return:
(t0, AC, BC, DF)
'''
t0 = cfd_1d(arr, dt, **kwargs)
if not np.isnan(t0):
int_ac, int_bc, df = dfrac(arr, dt, t0, limits, **kwargs)
return (t0, int_ac, int_bc, df)
else:
return (np.nan, np.nan, np.nan, np.nan)
def sspals(arr, dt, limits, axis=1, **kwargs):
''' Apply sspals_1D to each row of arr (2D).
args:
arr # numpy.array(dims=1)
dt # float64
limits # (A, B, C)
axis=1 # int
kwargs:
cfd_scale=0.8
cfd_offset=1.4e-8
cfd_threshold=0.04
corr=True # apply boundary corrections
dropna=False # remove empty rows
debug=False # nans in output? try debug=True.
name=None # pd.DataFrame.index.name
return:
pandas.DataFrame(columns=[t0, AC, BC, DF])
'''
name = kwargs.get('name', None)
dropna = kwargs.get('dropna', False)
data = | np.apply_along_axis(sspals_1d, axis, arr, dt, limits, **kwargs) | numpy.apply_along_axis |
import numpy as np
from xml.etree.ElementTree import Element
from napari.layers import Shapes
def test_empty_shapes():
shp = Shapes()
assert shp.dims.ndim == 2
def test_rectangles():
"""Test instantiating Shapes layer with a random 2D rectangles."""
# Test a single four corner rectangle
shape = (1, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all(layer.data[0] == data[0])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_types])
# Test multiple four corner rectangles
shape = (10, 4, 2)
data = 20 * np.random.random(shape)
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_types])
# Test a single two corner rectangle, which gets converted into four
# corner rectangle
shape = (1, 2, 2)
data = 20 * np.random.random(shape)
layer = Shapes(data)
assert layer.nshapes == 1
assert len(layer.data[0]) == 4
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_types])
# Test multiple two corner rectangles
shape = (10, 2, 2)
data = 20 * np.random.random(shape)
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([len(ld) == 4 for ld in layer.data])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_types])
def test_rectangles_roundtrip():
"""Test a full roundtrip with rectangles data."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data)
new_layer = Shapes(layer.data)
assert np.all([nd == d for nd, d in zip(new_layer.data, layer.data)])
def test_integer_rectangle():
"""Test instantiating rectangles with integer data."""
shape = (10, 2, 2)
np.random.seed(1)
data = np.random.randint(20, size=shape)
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([len(ld) == 4 for ld in layer.data])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_types])
def test_negative_rectangle():
"""Test instantiating rectangles with negative data."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape) - 10
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_types])
def test_empty_rectangle():
"""Test instantiating rectangles with empty data."""
shape = (0, 0, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_types])
def test_3D_rectangles():
"""Test instantiating Shapes layer with 3D planar rectangles."""
# Test a single four corner rectangle
np.random.seed(0)
planes = np.tile(np.arange(10).reshape((10, 1, 1)), (1, 4, 1))
corners = np.random.uniform(0, 10, size=(10, 4, 2))
data = np.concatenate((planes, corners), axis=2)
layer = Shapes(data)
assert layer.nshapes == len(data)
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == 3
assert np.all([s == 'rectangle' for s in layer.shape_types])
def test_ellipses():
"""Test instantiating Shapes layer with a random 2D ellipses."""
# Test a single four corner ellipses
shape = (1, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='ellipse')
assert layer.nshapes == shape[0]
assert np.all(layer.data[0] == data[0])
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_types])
# Test multiple four corner ellipses
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='ellipse')
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_types])
# Test a single ellipse center radii, which gets converted into four
# corner ellipse
shape = (1, 2, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='ellipse')
assert layer.nshapes == 1
assert len(layer.data[0]) == 4
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_types])
# Test multiple center radii ellipses
shape = (10, 2, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='ellipse')
assert layer.nshapes == shape[0]
assert np.all([len(ld) == 4 for ld in layer.data])
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_types])
def test_ellipses_roundtrip():
"""Test a full roundtrip with ellipss data."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='ellipse')
new_layer = Shapes(layer.data, shape_type='ellipse')
assert np.all([nd == d for nd, d in zip(new_layer.data, layer.data)])
def test_lines():
"""Test instantiating Shapes layer with a random 2D lines."""
# Test a single two end point line
shape = (1, 2, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='line')
assert layer.nshapes == shape[0]
assert np.all(layer.data[0] == data[0])
assert layer.ndim == shape[2]
assert np.all([s == 'line' for s in layer.shape_types])
# Test multiple lines
shape = (10, 2, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='line')
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == shape[2]
assert np.all([s == 'line' for s in layer.shape_types])
def test_lines_roundtrip():
"""Test a full roundtrip with line data."""
shape = (10, 2, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='line')
new_layer = Shapes(layer.data, shape_type='line')
assert np.all([nd == d for nd, d in zip(new_layer.data, layer.data)])
def test_paths():
"""Test instantiating Shapes layer with a random 2D paths."""
# Test a single path with 6 points
shape = (1, 6, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='path')
assert layer.nshapes == shape[0]
assert np.all(layer.data[0] == data[0])
assert layer.ndim == shape[2]
assert np.all([s == 'path' for s in layer.shape_types])
# Test multiple paths with different numbers of points
data = [
20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(10)
]
layer = Shapes(data, shape_type='path')
assert layer.nshapes == len(data)
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == 2
assert np.all([s == 'path' for s in layer.shape_types])
def test_paths_roundtrip():
"""Test a full roundtrip with path data."""
np.random.seed(0)
data = [
20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(10)
]
layer = Shapes(data, shape_type='path')
new_layer = Shapes(layer.data, shape_type='path')
assert np.all(
[np.all(nd == d) for nd, d in zip(new_layer.data, layer.data)]
)
def test_polygons():
"""Test instantiating Shapes layer with a random 2D polygons."""
# Test a single polygon with 6 points
shape = (1, 6, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='polygon')
assert layer.nshapes == shape[0]
assert np.all(layer.data[0] == data[0])
assert layer.ndim == shape[2]
assert np.all([s == 'polygon' for s in layer.shape_types])
# Test multiple polygons with different numbers of points
data = [
20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(10)
]
layer = Shapes(data, shape_type='polygon')
assert layer.nshapes == len(data)
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == 2
assert np.all([s == 'polygon' for s in layer.shape_types])
def test_polygon_roundtrip():
"""Test a full roundtrip with polygon data."""
np.random.seed(0)
data = [
20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(10)
]
layer = Shapes(data, shape_type='polygon')
new_layer = Shapes(layer.data, shape_type='polygon')
assert np.all(
[np.all(nd == d) for nd, d in zip(new_layer.data, layer.data)]
)
def test_mixed_shapes():
"""Test instantiating Shapes layer with a mix of random 2D shapes."""
# Test multiple polygons with different numbers of points
np.random.seed(0)
data = [
20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(5)
] + list(np.random.random((5, 4, 2)))
shape_type = ['polygon'] * 5 + ['rectangle'] * 3 + ['ellipse'] * 2
layer = Shapes(data, shape_type=shape_type)
assert layer.nshapes == len(data)
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == 2
assert np.all([s == so for s, so in zip(layer.shape_types, shape_type)])
# Test roundtrip with mixed data
new_layer = Shapes(layer.data, shape_type=layer.shape_types)
assert np.all(
[np.all(nd == d) for nd, d in zip(new_layer.data, layer.data)]
)
assert np.all(
[ns == s for ns, s in zip(new_layer.shape_types, layer.shape_types)]
)
def test_changing_shapes():
"""Test changing Shapes data."""
shape_a = (10, 4, 2)
shape_b = (20, 4, 2)
np.random.seed(0)
data_a = 20 * np.random.random(shape_a)
data_b = 20 * np.random.random(shape_b)
layer = Shapes(data_a)
layer.data = data_b
assert layer.nshapes == shape_b[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data_b)])
assert layer.ndim == shape_b[2]
assert np.all([s == 'rectangle' for s in layer.shape_types])
def test_adding_shapes():
"""Test adding shapes."""
# Start with polygons with different numbers of points
np.random.seed(0)
data = [
20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(5)
]
# shape_type = ['polygon'] * 5 + ['rectangle'] * 3 + ['ellipse'] * 2
layer = Shapes(data, shape_type='polygon')
new_data = np.random.random((5, 4, 2))
new_shape_types = ['rectangle'] * 3 + ['ellipse'] * 2
layer.add(new_data, shape_type=new_shape_types)
all_data = data + list(new_data)
all_shape_types = ['polygon'] * 5 + new_shape_types
assert layer.nshapes == len(all_data)
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, all_data)])
assert layer.ndim == 2
assert np.all(
[s == so for s, so in zip(layer.shape_types, all_shape_types)]
)
def test_adding_shapes_to_empty():
"""Test adding shapes to empty."""
data = np.empty((0, 0, 2))
np.random.seed(0)
layer = Shapes(np.empty((0, 0, 2)))
assert len(layer.data) == 0
data = [
20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(5)
] + list(np.random.random((5, 4, 2)))
shape_type = ['path'] * 5 + ['rectangle'] * 3 + ['ellipse'] * 2
layer.add(data, shape_type=shape_type)
assert layer.nshapes == len(data)
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == 2
assert np.all([s == so for s, so in zip(layer.shape_types, shape_type)])
def test_selecting_shapes():
"""Test selecting shapes."""
data = 20 * np.random.random((10, 4, 2))
np.random.seed(0)
layer = Shapes(data)
layer.selected_data = [0, 1]
assert layer.selected_data == [0, 1]
layer.selected_data = [9]
assert layer.selected_data == [9]
layer.selected_data = []
assert layer.selected_data == []
def test_removing_selected_shapes():
"""Test removing selected shapes."""
np.random.seed(0)
data = [
20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(5)
] + list(np.random.random((5, 4, 2)))
shape_type = ['polygon'] * 5 + ['rectangle'] * 3 + ['ellipse'] * 2
layer = Shapes(data, shape_type=shape_type)
# With nothing selected no points should be removed
layer.remove_selected()
assert len(layer.data) == len(data)
# Select three shapes and remove them
layer.selected_data = [1, 7, 8]
layer.remove_selected()
keep = [0] + list(range(2, 7)) + [9]
data_keep = [data[i] for i in keep]
shape_type_keep = [shape_type[i] for i in keep]
assert len(layer.data) == len(data_keep)
assert len(layer.selected_data) == 0
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data_keep)])
assert layer.ndim == 2
assert np.all(
[s == so for s, so in zip(layer.shape_types, shape_type_keep)]
)
def test_changing_modes():
"""Test changing modes."""
np.random.seed(0)
data = 20 * np.random.random((10, 4, 2))
layer = Shapes(data)
assert layer.mode == 'pan_zoom'
assert layer.interactive == True
layer.mode = 'select'
assert layer.mode == 'select'
assert layer.interactive == False
layer.mode = 'direct'
assert layer.mode == 'direct'
assert layer.interactive == False
layer.mode = 'vertex_insert'
assert layer.mode == 'vertex_insert'
assert layer.interactive == False
layer.mode = 'vertex_remove'
assert layer.mode == 'vertex_remove'
assert layer.interactive == False
layer.mode = 'add_rectangle'
assert layer.mode == 'add_rectangle'
assert layer.interactive == False
layer.mode = 'add_ellipse'
assert layer.mode == 'add_ellipse'
assert layer.interactive == False
layer.mode = 'add_line'
assert layer.mode == 'add_line'
assert layer.interactive == False
layer.mode = 'add_path'
assert layer.mode == 'add_path'
assert layer.interactive == False
layer.mode = 'add_polygon'
assert layer.mode == 'add_polygon'
assert layer.interactive == False
layer.mode = 'pan_zoom'
assert layer.mode == 'pan_zoom'
assert layer.interactive == True
def test_name():
"""Test setting layer name."""
np.random.seed(0)
data = 20 * np.random.random((10, 4, 2))
layer = Shapes(data)
assert layer.name == 'Shapes'
layer = Shapes(data, name='random')
assert layer.name == 'random'
layer.name = 'shps'
assert layer.name == 'shps'
def test_visiblity():
"""Test setting layer visiblity."""
np.random.seed(0)
data = 20 * np.random.random((10, 4, 2))
layer = Shapes(data)
assert layer.visible == True
layer.visible = False
assert layer.visible == False
layer = Shapes(data, visible=False)
assert layer.visible == False
layer.visible = True
assert layer.visible == True
def test_opacity():
"""Test setting layer opacity."""
np.random.seed(0)
data = 20 * np.random.random((10, 4, 2))
layer = Shapes(data)
assert layer.opacity == 0.7
layer.opacity = 0.5
assert layer.opacity == 0.5
layer = Shapes(data, opacity=0.6)
assert layer.opacity == 0.6
layer.opacity = 0.3
assert layer.opacity == 0.3
def test_blending():
"""Test setting layer blending."""
np.random.seed(0)
data = 20 * np.random.random((10, 4, 2))
layer = Shapes(data)
assert layer.blending == 'translucent'
layer.blending = 'additive'
assert layer.blending == 'additive'
layer = Shapes(data, blending='additive')
assert layer.blending == 'additive'
layer.blending = 'opaque'
assert layer.blending == 'opaque'
def test_edge_color():
"""Test setting edge color."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data)
assert layer.edge_color == 'black'
assert len(layer.edge_colors) == shape[0]
assert layer.edge_colors == ['black'] * shape[0]
# With no data selected chaning edge color has no effect
layer.edge_color = 'blue'
assert layer.edge_color == 'blue'
assert layer.edge_colors == ['black'] * shape[0]
# Select data and change edge color of selection
layer.selected_data = [0, 1]
assert layer.edge_color == 'black'
layer.edge_color = 'green'
assert layer.edge_colors == ['green'] * 2 + ['black'] * (shape[0] - 2)
# Add new shape and test its color
new_shape = np.random.random((1, 4, 2))
layer.selected_data = []
layer.edge_color = 'blue'
layer.add(new_shape)
assert len(layer.edge_colors) == shape[0] + 1
assert layer.edge_colors == ['green'] * 2 + ['black'] * (shape[0] - 2) + [
'blue'
]
# Instantiate with custom edge color
layer = Shapes(data, edge_color='red')
assert layer.edge_color == 'red'
# Instantiate with custom edge color list
col_list = ['red', 'green'] * 5
layer = Shapes(data, edge_color=col_list)
assert layer.edge_color == 'black'
assert layer.edge_colors == col_list
# Add new point and test its color
layer.edge_color = 'blue'
layer.add(new_shape)
assert len(layer.edge_colors) == shape[0] + 1
assert layer.edge_colors == col_list + ['blue']
# Check removing data adjusts colors correctly
layer.selected_data = [0, 2]
layer.remove_selected()
assert len(layer.data) == shape[0] - 1
assert len(layer.edge_colors) == shape[0] - 1
assert layer.edge_colors == [col_list[1]] + col_list[3:] + ['blue']
def test_face_color():
"""Test setting face color."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data)
assert layer.face_color == 'white'
assert len(layer.face_colors) == shape[0]
assert layer.face_colors == ['white'] * shape[0]
# With no data selected chaning face color has no effect
layer.face_color = 'blue'
assert layer.face_color == 'blue'
assert layer.face_colors == ['white'] * shape[0]
# Select data and change face color of selection
layer.selected_data = [0, 1]
assert layer.face_color == 'white'
layer.face_color = 'green'
assert layer.face_colors == ['green'] * 2 + ['white'] * (shape[0] - 2)
# Add new shape and test its color
new_shape = np.random.random((1, 4, 2))
layer.selected_data = []
layer.face_color = 'blue'
layer.add(new_shape)
assert len(layer.face_colors) == shape[0] + 1
assert layer.face_colors == ['green'] * 2 + ['white'] * (shape[0] - 2) + [
'blue'
]
# Instantiate with custom face color
layer = Shapes(data, face_color='red')
assert layer.face_color == 'red'
# Instantiate with custom face color list
col_list = ['red', 'green'] * 5
layer = Shapes(data, face_color=col_list)
assert layer.face_color == 'white'
assert layer.face_colors == col_list
# Add new point and test its color
layer.face_color = 'blue'
layer.add(new_shape)
assert len(layer.face_colors) == shape[0] + 1
assert layer.face_colors == col_list + ['blue']
# Check removing data adjusts colors correctly
layer.selected_data = [0, 2]
layer.remove_selected()
assert len(layer.data) == shape[0] - 1
assert len(layer.face_colors) == shape[0] - 1
assert layer.face_colors == [col_list[1]] + col_list[3:] + ['blue']
def test_edge_width():
"""Test setting edge width."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data)
assert layer.edge_width == 1
assert len(layer.edge_widths) == shape[0]
assert layer.edge_widths == [1] * shape[0]
# With no data selected chaning edge width has no effect
layer.edge_width = 2
assert layer.edge_width == 2
assert layer.edge_widths == [1] * shape[0]
# Select data and change edge color of selection
layer.selected_data = [0, 1]
assert layer.edge_width == 1
layer.edge_width = 3
assert layer.edge_widths == [3] * 2 + [1] * (shape[0] - 2)
# Add new shape and test its width
new_shape = np.random.random((1, 4, 2))
layer.selected_data = []
layer.edge_width = 4
layer.add(new_shape)
assert layer.edge_widths == [3] * 2 + [1] * (shape[0] - 2) + [4]
# Instantiate with custom edge width
layer = Shapes(data, edge_width=5)
assert layer.edge_width == 5
# Instantiate with custom edge width list
width_list = [2, 3] * 5
layer = Shapes(data, edge_width=width_list)
assert layer.edge_width == 1
assert layer.edge_widths == width_list
# Add new shape and test its color
layer.edge_width = 4
layer.add(new_shape)
assert len(layer.edge_widths) == shape[0] + 1
assert layer.edge_widths == width_list + [4]
# Check removing data adjusts colors correctly
layer.selected_data = [0, 2]
layer.remove_selected()
assert len(layer.data) == shape[0] - 1
assert len(layer.edge_widths) == shape[0] - 1
assert layer.edge_widths == [width_list[1]] + width_list[3:] + [4]
def test_opacities():
"""Test setting opacities."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data)
# Check default opacity value of 0.7
assert layer.opacity == 0.7
assert len(layer.opacities) == shape[0]
assert layer.opacities == [0.7] * shape[0]
# With no data selected chaning opacity has no effect
layer.opacity = 1
assert layer.opacity == 1
assert layer.opacities == [0.7] * shape[0]
# Select data and change opacity of selection
layer.selected_data = [0, 1]
assert layer.opacity == 0.7
layer.opacity = 0.5
assert layer.opacities == [0.5] * 2 + [0.7] * (shape[0] - 2)
# Add new shape and test its width
new_shape = np.random.random((1, 4, 2))
layer.selected_data = []
layer.opacity = 0.3
layer.add(new_shape)
assert layer.opacities == [0.5] * 2 + [0.7] * (shape[0] - 2) + [0.3]
# Instantiate with custom opacity
layer = Shapes(data, opacity=0.2)
assert layer.opacity == 0.2
# Instantiate with custom opacity list
opacity_list = [0.1, 0.4] * 5
layer = Shapes(data, opacity=opacity_list)
assert layer.opacity == 0.7
assert layer.opacities == opacity_list
# Add new shape and test its opacity
layer.opacity = 0.6
layer.add(new_shape)
assert len(layer.opacities) == shape[0] + 1
assert layer.opacities == opacity_list + [0.6]
# Check removing data adjusts opacities correctly
layer.selected_data = [0, 2]
layer.remove_selected()
assert len(layer.data) == shape[0] - 1
assert len(layer.opacities) == shape[0] - 1
assert layer.opacities == [opacity_list[1]] + opacity_list[3:] + [0.6]
def test_z_index():
"""Test setting z-index during instantiation."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data)
assert layer.z_indices == [0] * shape[0]
# Instantiate with custom z-index
layer = Shapes(data, z_index=4)
assert layer.z_indices == [4] * shape[0]
# Instantiate with custom z-index list
z_index_list = [2, 3] * 5
layer = Shapes(data, z_index=z_index_list)
assert layer.z_indices == z_index_list
# Add new shape and its z-index
new_shape = np.random.random((1, 4, 2))
layer.add(new_shape)
assert len(layer.z_indices) == shape[0] + 1
assert layer.z_indices == z_index_list + [4]
# Check removing data adjusts colors correctly
layer.selected_data = [0, 2]
layer.remove_selected()
assert len(layer.data) == shape[0] - 1
assert len(layer.z_indices) == shape[0] - 1
assert layer.z_indices == [z_index_list[1]] + z_index_list[3:] + [4]
def test_move_to_front():
"""Test moving shapes to front."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
z_index_list = [2, 3] * 5
layer = Shapes(data, z_index=z_index_list)
assert layer.z_indices == z_index_list
# Move selected shapes to front
layer.selected_data = [0, 2]
layer.move_to_front()
assert layer.z_indices == [4] + [z_index_list[1]] + [4] + z_index_list[3:]
def test_move_to_back():
"""Test moving shapes to back."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
z_index_list = [2, 3] * 5
layer = Shapes(data, z_index=z_index_list)
assert layer.z_indices == z_index_list
# Move selected shapes to front
layer.selected_data = [0, 2]
layer.move_to_back()
assert layer.z_indices == [1] + [z_index_list[1]] + [1] + z_index_list[3:]
def test_interaction_box():
"""Test the creation of the interaction box."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * | np.random.random(shape) | numpy.random.random |
#!/usr/bin/env python
# coding: utf-8
import os
from pathlib import Path
import numpy as np
import itk
from itk import TubeTK as ttk
import numpy as np
#################
#################
#################
#################
#################
def scv_convert_ctp_to_cta(filenames,
report_progress=print,
debug=False,
output_dir="."):
filenames.sort()
num_images = len(filenames)
base_im = itk.imread(filenames[num_images//2],itk.F)
base_spacing = base_im.GetSpacing()
progress_percent = 10
report_progress("Reading images",progress_percent)
Dimension = 3
PixelType = itk.ctype('float')
ImageType = itk.Image[PixelType,Dimension]
imdatamax = itk.GetArrayFromImage(base_im)
imdatamin = imdatamax
if output_dir!=None and not os.path.exists(output_dir):
os.mkdir(output_dir)
progress_percent = 20
progress_per_file = 70/num_images
for imNum in range(num_images):
imMoving = itk.imread(filenames[imNum],itk.F)
if imMoving.shape != base_im.shape:
resample = ttk.ResampleImage.New(Input=imMoving)
resample.SetMatchImage(base_im)
resample.Update()
imMovingIso = resample.GetOutput()
progress_label = "Resampling "+str(imNum)+" of "+str(num_images)
report_progress(progress_label,progress_percent)
else:
imMovingIso = imMoving
imdataTmp = itk.GetArrayFromImage(imMovingIso)
imdatamax = np.maximum(imdatamax,imdataTmp)
imdataTmp = np.where(imdataTmp==-1024,imdatamin,imdataTmp)
imdatamin = np.minimum(imdatamin,imdataTmp)
progress_percent += progress_per_file
progress_label = "Integrating "+str(imNum)+" of "+str(num_images)
report_progress(progress_label,progress_percent)
report_progress("Generating CT, CTA, and CTP",90)
ct = itk.GetImageFromArray(imdatamin)
ct.CopyInformation(base_im)
cta = itk.GetImageFromArray(imdatamax)
cta.CopyInformation(base_im)
diff = imdatamax-imdatamin
diff[:4,:,:] = 0
diff[-4:,:,:] = 0
diff[:,:4,:] = 0
diff[:,-4:,:] = 0
diff[:,:,:4] = 0
diff[:,:,-4:] = 0
dsa = itk.GetImageFromArray(diff)
dsa.CopyInformation(base_im)
report_progress("Done",100)
return ct,cta,dsa
#################
#################
#################
#################
#################
def scv_segment_brain_from_ct(ct_image,
report_progress=print,
debug=False):
ImageType = itk.Image[itk.F,3]
LabelMapType = itk.Image[itk.UC,3]
report_progress("Threshold",5)
thresh = ttk.ImageMath.New(Input=cta_image)
thresh.ReplaceValuesOutsideMaskRange(cta_image,1,6000,0)
thresh.ReplaceValuesOutsideMaskRange(cta_image,0,600,1)
cta_tmp = thresh.GetOutput()
thresh.ReplaceValuesOutsideMaskRange(cta_tmp,0,1,2)
cta_mask = thresh.GetOutputUChar()
report_progress("Initial Mask",10)
maskMath = ttk.ImageMath.New(Input=cta_mask)
maskMath.Threshold(0,1,0,1)
maskMath.Erode(15,1,0)
maskMath.Dilate(20,1,0)
maskMath.Dilate(12,0,1)
maskMath.Erode(12,1,0)
brainSeed = maskMath.GetOutputUChar()
maskMath.SetInput(cta_mask)
maskMath.Threshold(2,2,0,1)
maskMath.Erode(2,1,0)
maskMath.Dilate(10,1,0)
maskMath.Erode(7,1,0)
skullSeed = maskMath.GetOutputUChar()
maskMath.AddImages(brainSeed,1,2)
comboSeed = maskMath.GetOutputUChar()
report_progress("Connected Component",20)
segmenter = ttk.SegmentConnectedComponentsUsingParzenPDFs[ImageType,
LabelMapType].New()
segmenter.SetFeatureImage( cta_image )
segmenter.SetInputLabelMap( comboSeed )
segmenter.SetObjectId( 2 )
segmenter.AddObjectId( 1 )
segmenter.SetVoidId( 0 )
segmenter.SetErodeDilateRadius( 20 )
segmenter.SetHoleFillIterations( 40 )
segmenter.Update()
segmenter.ClassifyImages()
brainMaskRaw = segmenter.GetOutputLabelMap()
report_progress("Masking",60)
maskMath.SetInput(brainMaskRaw)
maskMath.Threshold(2,2,1,0)
maskMath.Erode(1,1,0)
brainMaskRaw2 = maskMath.GetOutputUChar()
connComp = ttk.SegmentConnectedComponents.New(Input=brainMaskRaw2)
connComp.SetKeepOnlyLargestComponent(True)
connComp.Update()
brainMask = connComp.GetOutput()
report_progress("Finishing",90)
cast = itk.CastImageFilter[LabelMapType,ImageType].New()
cast.SetInput(brainMask)
cast.Update()
brainMaskF = cast.GetOutput()
brainMath = ttk.ImageMath[ImageType].New(Input=cta_image)
brainMath.ReplaceValuesOutsideMaskRange( brainMaskF,1,1,0)
cta_brain_image = brainMath.GetOutput()
report_progress("Done",100)
return cta_brain_image
#################
#################
#################
#################
#################
def scv_enhance_vessels_in_cta(cta_image,
cta_roi_image,
report_progress=print,
debug=False ):
ImageType = itk.Image[itk.F,3]
LabelMapType = itk.Image[itk.UC,3]
report_progress("Masking",5)
imMath = ttk.ImageMath.New(Input=cta_roi_image)
imMath.Threshold( 0.00001,4000,1,0)
imMath.Erode(10,1,0)
imBrainMaskErode = imMath.GetOutput()
imMath.SetInput(cta_roi_image)
imMath.IntensityWindow(0,300,0,300)
imMath.ReplaceValuesOutsideMaskRange(imBrainMaskErode,0.5,1.5,0)
imBrainErode = imMath.GetOutput()
spacing = cta_image.GetSpacing()[0]
report_progress("Blurring",10)
imMath = ttk.ImageMath[ImageType].New()
imMath.SetInput(imBrainErode)
imMath.Blur(1.5*spacing)
imBlur = imMath.GetOutput()
imBlurArray = itk.GetArrayViewFromImage(imBlur)
report_progress("Generating Seeds",20)
numSeeds = 15
seedCoverage = 20
seedCoord = np.zeros([numSeeds,3])
for i in range(numSeeds):
seedCoord[i] = np.unravel_index(np.argmax(imBlurArray,
axis=None),imBlurArray.shape)
indx = [int(seedCoord[i][0]),int(seedCoord[i][1]),
int(seedCoord[i][2])]
minX = max(indx[0]-seedCoverage,0)
maxX = max(indx[0]+seedCoverage,imBlurArray.shape[0])
minY = max(indx[1]-seedCoverage,0)
maxY = max(indx[1]+seedCoverage,imBlurArray.shape[1])
minZ = max(indx[2]-seedCoverage,0)
maxZ = max(indx[2]+seedCoverage,imBlurArray.shape[2])
imBlurArray[minX:maxX,minY:maxY,minZ:maxZ]=0
indx.reverse()
seedCoord[:][i] = cta_roi_image.TransformIndexToPhysicalPoint(indx)
report_progress("Segmenting Initial Vessels",30)
vSeg = ttk.SegmentTubes.New(Input=cta_roi_image)
vSeg.SetVerbose(debug)
vSeg.SetMinRoundness(0.4)
vSeg.SetMinCurvature(0.002)
vSeg.SetRadiusInObjectSpace( 1 )
for i in range(numSeeds):
progress_label = "Vessel "+str(i)+" of "+str(numSeeds)
progress_percent = i/numSeeds*20+30
report_progress(progress_label,progress_percent)
vSeg.ExtractTubeInObjectSpace( seedCoord[i],i )
tubeMaskImage = vSeg.GetTubeMaskImage()
imMath.SetInput(tubeMaskImage)
imMath.AddImages(cta_roi_image,200,1)
blendIm = imMath.GetOutput()
report_progress("Computing Training Mask",50)
trMask = ttk.ComputeTrainingMask[ImageType,LabelMapType].New()
trMask.SetInput( tubeMaskImage )
trMask.SetGap( 4 )
trMask.SetObjectWidth( 1 )
trMask.SetNotObjectWidth( 1 )
trMask.Update()
fgMask = trMask.GetOutput()
report_progress("Enhancing Image",70)
enhancer = ttk.EnhanceTubesUsingDiscriminantAnalysis[ImageType,
LabelMapType].New()
enhancer.AddInput( cta_image )
enhancer.SetLabelMap( fgMask )
enhancer.SetRidgeId( 255 )
enhancer.SetBackgroundId( 128 )
enhancer.SetUnknownId( 0 )
enhancer.SetTrainClassifier(True)
enhancer.SetUseIntensityOnly(True)
enhancer.SetScales([0.75*spacing,2*spacing,6*spacing])
enhancer.Update()
enhancer.ClassifyImages()
report_progress("Finalizing",90)
imMath = ttk.ImageMath[ImageType].New()
imMath.SetInput(enhancer.GetClassProbabilityImage(0))
imMath.Blur(0.5*spacing)
prob0 = imMath.GetOutput()
imMath.SetInput(enhancer.GetClassProbabilityImage(1))
imMath.Blur(0.5*spacing)
prob1 = imMath.GetOutput()
cta_vess = itk.SubtractImageFilter(Input1=prob0, Input2=prob1)
imMath.SetInput(cta_roi_image)
imMath.Threshold(0.0000001,2000,1,0)
imMath.Erode(2,1,0)
imBrainE = imMath.GetOutput()
imMath.SetInput(cta_vess)
imMath.ReplaceValuesOutsideMaskRange(imBrainE,1,1,-0.001)
cta_roi_vess = imMath.GetOutput()
report_progress("Done",100)
return cta_vess,cta_roi_vess
#################
#################
#################
#################
#################
def scv_extract_vessels_from_cta(cta_image,
cta_roi_vessels_image,
report_progress=print,
debug=False,
output_dir="."):
if output_dir!=None and not os.path.exists(output_dir):
os.mkdir(output_dir)
spacing = cta_image.GetSpacing()[0]
report_progress("Thresholding",5)
imMath = ttk.ImageMath.New(cta_roi_vessels_image)
imMath.MedianFilter(1)
imMath.Threshold(0.00000001,9999,1,0)
vess_mask_im = imMath.GetOutputShort()
if debug and output_dir!=None:
itk.imwrite(vess_mask_im,
output_dir+"/extract_vessels_mask.mha",
compression=True)
report_progress("Connecting",10)
ccSeg = ttk.SegmentConnectedComponents.New(vess_mask_im)
ccSeg.SetMinimumVolume(50)
ccSeg.Update()
vess_mask_cc_im = ccSeg.GetOutput()
if debug and output_dir!=None:
itk.imwrite(vess_mask_cc_im,
output_dir+"/extract_vessels_mask_cc.mha",
compression=True)
imMathSS = ttk.ImageMath.New(vess_mask_cc_im)
imMathSS.Threshold(0,0,1,0)
vess_mask_inv_im = imMathSS.GetOutputFloat()
report_progress("Filling in",20)
distFilter = itk.DanielssonDistanceMapImageFilter.New(vess_mask_inv_im)
distFilter.Update()
dist_map_im = distFilter.GetOutput()
report_progress("Generating seeds",30)
imMath.SetInput(dist_map_im)
imMath.Blur(0.5*spacing)
tmp = imMath.GetOutput()
# Distance map's distances are in index units, not spacing
imMath.ReplaceValuesOutsideMaskRange(tmp,0.333,10,0)
initial_radius_im = imMath.GetOutput()
if debug and output_dir!=None:
itk.imwrite(initial_radius_im,
output_dir+"/vessel_extraction_initial_radius.mha",
compression=True)
report_progress("Generating input",30)
imMath.SetInput(cta_image)
imMath.ReplaceValuesOutsideMaskRange(cta_roi_vessels_image,0,1000,0)
imMath.Blur(0.4*spacing)
imMath.NormalizeMeanStdDev()
imMath.IntensityWindow(-4,4,0,1000)
input_im = imMath.GetOutput()
if debug and output_dir!=None:
itk.imwrite(input_im,
output_dir+"/vessel_extraction_input.mha",
compression=True)
report_progress("Extracting vessels",40)
vSeg = ttk.SegmentTubes.New(Input=input_im)
vSeg.SetVerbose(debug)
vSeg.SetMinCurvature(0)#.0001)
vSeg.SetMinRoundness(0.02)
vSeg.SetMinRidgeness(0.5)
vSeg.SetMinLevelness(0.0)
vSeg.SetRadiusInObjectSpace( 0.8*spacing )
vSeg.SetBorderInIndexSpace(3)
vSeg.SetSeedMask( initial_radius_im )
#vSeg.SetSeedRadiusMask( initial_radius_im )
vSeg.SetOptimizeRadius(True)
vSeg.SetUseSeedMaskAsProbabilities(True)
# Performs large-to-small vessel extraction using radius as probability
vSeg.SetSeedExtractionMinimumProbability(0.99)
vSeg.ProcessSeeds()
report_progress("Finalizing",90)
tubeMaskImage = vSeg.GetTubeMaskImage()
if debug and output_dir!=None:
itk.imwrite(tubeMaskImage,
output_dir+"/vessel_extraction_output.mha",
compression=True)
report_progress("Done",100)
return tubeMaskImage,vSeg.GetTubeGroup()
def scv_register_ctp_images(fixed_image_file,
moving_image_files,
output_dir,
report_progress=print,
debug=False):
ImageType = itk.Image[itk.F,3]
num_images = len(moving_image_files)
progress_percent = 10
progress_per_file = 80/num_images
fixed_im = itk.imread(fixed_image_file,itk.F)
fixed_im_spacing = fixed_im.GetSpacing()
if fixed_im_spacing[0] != fixed_im_spacing[1] or \
fixed_im_spacing[1] != fixed_im_spacing[2]:
report_progress("Resampling",10)
resample = ttk.ResampleImage.New(Input=fixed_im)
resample.SetMakeIsotropic(True)
resample.Update()
fixed_im = resample.GetOutput()
if debug:
progress_label = "DEBUG: Resampling to "+str(
fixed_im.GetSpacing())
report_progress(progress_label,10)
imMath = ttk.ImageMath.New(fixed_im)
imMath.Threshold(150,800,1,0)
imMath.Dilate(10,1,0)
mask_im = imMath.GetOutputUChar()
mask_array = itk.GetArrayViewFromImage(mask_im)
mask_array[:4,:,:] = 0
mask_array[-4:,:,:] = 0
mask_obj = itk.ImageMaskSpatialObject[3].New()
mask_obj.SetImage(mask_im)
mask_obj.Update()
for imNum in range(num_images):
progress_percent += progress_per_file
progress_label = "Registering "+str(imNum)+" of "+str(num_images)
report_progress(progress_label,progress_percent)
if moving_image_files[imNum] != fixed_image_file:
moving_im = itk.imread(moving_image_files[imNum],itk.F)
imreg = ttk.RegisterImages[ImageType].New()
imreg.SetFixedImage(fixed_im)
imreg.SetMovingImage(moving_im)
imreg.SetRigidMaxIterations(100)
imreg.SetRegistration("RIGID")
imreg.SetExpectedOffsetMagnitude(5)
imreg.SetExpectedRotationMagnitude(0.05)
imreg.SetFixedImageMaskObject(mask_obj)
imreg.SetUseEvolutionaryOptimization(False)
if debug:
imreg.SetReportProgress(True)
imreg.Update()
tfm = imreg.GetCurrentMatrixTransform()
moving_reg_im = imreg.ResampleImage("SINC_INTERPOLATION",
moving_im,tfm,-1024)
if output_dir!=None:
pname,fname = os.path.split(moving_image_files[imNum])
suffix = Path(fname).suffixA
new_suffix = "_reg"+suffix
new_fname = Path(fname).with_suffix(new_suffix)
itk.imwrite(moving_reg_im,
output_dir+"/"+new_fname,
compression=True)
elif output_dir!=None:
pname,fname = os.path.split(moving_image_files[imNum])
itk.imwrite(fixed_im,
output_dir+"/"+fname,
compression=True)
def scv_register_atlas_to_image(atlas_im, atlas_mask_im, in_im):
ImageType = itk.Image[itk.F,3]
regAtlasToIn = ttk.RegisterImages[ImageType].New(FixedImage=in_im,
MovingImage=atlas_im)
regAtlasToIn.SetReportProgress(True)
regAtlasToIn.SetRegistration("PIPELINE_AFFINE")
regAtlasToIn.SetMetric("MATTES_MI_METRIC")
regAtlasToIn.SetInitialMethodEnum("INIT_WITH_IMAGE_CENTERS")
regAtlasToIn.Update()
atlas_reg_im = regAtlasToIn.ResampleImage()
atlas_mask_reg_im = regAtlasToIn.ResampleImage("NEAREST_NEIGHBOR",
atlas_mask_im)
return atlas_reg_im,atlas_mask_reg_im
def scv_compute_atlas_region_stats(atlas_im,
time_im,
vess_im,
number_of_time_bins=100,
report_progress=print,
debug=False):
atlas_arr = itk.GetArrayFromImage(atlas_im)
time_arr = itk.GetArrayFromImage(time_im)
vess_arr = itk.GetArrayFromImage(vess_im)
num_regions = int(atlas_arr.max())
time_max = time_arr.max()
time_min = time_arr.min()
nbins = int(number_of_time_bins)
time_factor = (time_max-time_min)/(nbins+1)
bin_value = np.zeros([num_regions,nbins])
bin_count = np.zeros([num_regions,nbins])
for atlas_region in range(num_regions):
report_progress("Masking",(atlas_region+1)*(100/num_regions))
indx_arr = np.where(atlas_arr==atlas_region)
indx_list = list(zip(indx_arr[0],indx_arr[1],indx_arr[2]))
for indx in indx_list:
time_bin = int((time_arr[indx]-time_min)*time_factor)
time_bin = min(max(0,time_bin),nbins-1)
if np.isnan(vess_arr[indx]) == False:
bin_count[atlas_region,time_bin] += 1
bin_value[atlas_region,time_bin] += vess_arr[indx]
bin_label = np.arange(nbins) * time_factor - time_min
bin_value = | np.divide(bin_value,bin_count,where=bin_count!=0) | numpy.divide |
"""
This file is part of pyS5p
https://github.com/rmvanhees/pys5p.git
The classes L1Bio, L1BioIRR, L1BioRAD and L1BioENG provide read/write access to
offline level 1b products, resp. calibration, irradiance, radiance
and engineering.
Copyright (c) 2017-2021 SRON - Netherlands Institute for Space Research
All Rights Reserved
License: BSD-3-Clause
"""
from datetime import datetime, timedelta
from pathlib import Path, PurePosixPath
from setuptools_scm import get_version
import h5py
import numpy as np
from .biweight import biweight
from .swir_texp import swir_exp_time
# - global parameters ------------------------------
# - local functions --------------------------------
def pad_rows(arr1, arr2):
"""
Pad the array with the least numer of rows with NaN's
"""
if arr2.ndim == 1:
pass
elif arr2.ndim == 2:
if arr1.shape[0] < arr2.shape[0]:
buff = arr1.copy()
arr1 = np.full(arr2.shape, np.nan, dtype=arr2.dtype)
arr1[0:buff.shape[0], :] = buff
elif arr1.shape[0] > arr2.shape[0]:
buff = arr2.copy()
arr2 = np.full(arr1.shape, np.nan, dtype=arr2.dtype)
arr2[0:buff.shape[0], :] = buff
else:
if arr1.shape[1] < arr2.shape[1]:
buff = arr1.copy()
arr1 = np.full(arr2.shape, np.nan, dtype=arr2.dtype)
arr1[:, 0:buff.shape[1], :] = buff
elif arr1.shape[1] > arr2.shape[1]:
buff = arr2.copy()
arr2 = np.full(arr1.shape, np.nan, dtype=arr2.dtype)
arr2[:, 0:buff.shape[1], :] = buff
return (arr1, arr2)
# - class definition -------------------------------
class L1Bio:
"""
class with methods to access Tropomi L1B calibration products
The L1b calibration products are available for UVN (band 1-6)
and SWIR (band 7-8).
Attributes
----------
fid : h5py.File
filename : string
bands : string
Methods
-------
close()
Close recources.
get_attr(attr_name)
Obtain value of an HDF5 file attribute.
get_orbit()
Returns absolute orbit number.
get_processor_version()
Returns version of the L01b processor used to generate this product.
get_coverage_time()
Returns start and end of the measurement coverage time.
get_creation_time()
Returns datetime when the L1b product was created.
select(msm_type=None)
Select a calibration measurement as <processing class>_<ic_id>.
sequence(band=None)
Returns sequence number for each unique measurement based on ICID
and delta_time.
get_ref_time(band=None)
Returns reference start time of measurements.
get_delta_time(band=None)
Returns offset from the reference start time of measurement.
get_instrument_settings(band=None)
Returns instrument settings of measurement.
get_exposure_time(band=None)
Returns pixel exposure time of the measurements, which is calculated
from the parameters 'int_delay' and 'int_hold' for SWIR.
get_housekeeping_data(band=None)
Returns housekeeping data of measurements.
get_geo_data(geo_dset=None, band=None)
Returns data of selected datasets from the GEODATA group.
get_msm_attr(msm_dset, attr_name, band=None)
Returns value attribute of measurement dataset "msm_dset".
get_msm_data(msm_dset, band=None, fill_as_nan=False, msm_to_row=None)
Reads data from dataset "msm_dset".
set_msm_data(msm_dset, new_data)
Replace data of dataset "msm_dset" with new_data.
Notes
-----
Examples
--------
"""
band_groups = ('/BAND%_CALIBRATION', '/BAND%_IRRADIANCE',
'/BAND%_RADIANCE')
geo_dset = 'satellite_latitude,satellite_longitude'
msm_type = None
def __init__(self, l1b_product, readwrite=False, verbose=False):
"""
Initialize access to a Tropomi offline L1b product
"""
# open L1b product as HDF5 file
if not Path(l1b_product).is_file():
raise FileNotFoundError(f'{l1b_product} does not exist')
# initialize private class-attributes
self.__rw = readwrite
self.__verbose = verbose
self.__msm_path = None
self.__patched_msm = []
self.filename = l1b_product
self.bands = ''
if readwrite:
self.fid = h5py.File(l1b_product, "r+")
else:
self.fid = h5py.File(l1b_product, "r")
def __repr__(self):
class_name = type(self).__name__
return f'{class_name}({self.filename!r}, readwrite={self.__rw!r})'
def __iter__(self):
for attr in sorted(self.__dict__):
if not attr.startswith("__"):
yield attr
def __enter__(self):
"""
method called to initiate the context manager
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
method called when exiting the context manager
"""
self.close()
return False # any exception is raised by the with statement.
def close(self):
"""
Close resources.
Notes
-----
Before closing the product, we make sure that the output product
describes what has been altered by the S/W. To keep any change
traceable.
In case the L1b product is altered, the attributes listed below are
added to the group: "/METADATA/SRON_METADATA":
- dateStamp ('now')
- Git-version of S/W
- list of patched datasets
- auxiliary datasets used by patch-routines
"""
if self.fid is None:
return
if self.__patched_msm:
# pylint: disable=no-member
sgrp = self.fid.require_group("/METADATA/SRON_METADATA")
sgrp.attrs['dateStamp'] = datetime.utcnow().isoformat()
sgrp.attrs['git_tag'] = get_version(root='..',
relative_to=__file__)
if 'patched_datasets' not in sgrp:
dtype = h5py.special_dtype(vlen=str)
dset = sgrp.create_dataset('patched_datasets',
(len(self.__patched_msm),),
maxshape=(None,), dtype=dtype)
dset[:] = np.asarray(self.__patched_msm)
else:
dset = sgrp['patched_datasets']
dset.resize(dset.shape[0] + len(self.__patched_msm), axis=0)
dset[dset.shape[0]-1:] = np.asarray(self.__patched_msm)
self.fid.close()
self.fid = None
# ---------- PUBLIC FUNCTIONS ----------
def get_attr(self, attr_name):
"""
Obtain value of an HDF5 file attribute
Parameters
----------
attr_name : string
Name of the attribute
"""
if attr_name not in self.fid.attrs.keys():
return None
attr = self.fid.attrs[attr_name]
if attr.shape is None:
return None
return attr
def get_orbit(self):
"""
Returns absolute orbit number
"""
res = self.get_attr('orbit')
if res is None:
return None
return int(res)
def get_processor_version(self):
"""
Returns version of the L01b processor
"""
attr = self.get_attr('processor_version')
if attr is None:
return None
# pylint: disable=no-member
return attr.decode('ascii')
def get_coverage_time(self):
"""
Returns start and end of the measurement coverage time
"""
attr_start = self.get_attr('time_coverage_start')
if attr_start is None:
return None
attr_end = self.get_attr('time_coverage_end')
if attr_end is None:
return None
# pylint: disable=no-member
return (attr_start.decode('ascii'),
attr_end.decode('ascii'))
def get_creation_time(self):
"""
Returns datetime when the L1b product was created
"""
grp = self.fid['/METADATA/ESA_METADATA/earth_explorer_header']
dset = grp['fixed_header/source']
if 'Creation_Date' in self.fid.attrs.keys():
attr = dset.attrs['Creation_Date']
if isinstance(attr, bytes):
return attr.decode('ascii')
return attr
return None
def select(self, msm_type=None):
"""
Select a calibration measurement as <processing class>_<ic_id>
Parameters
----------
msm_type : string
Name of calibration measurement group as <processing class>_<ic_id>
Returns
-------
out : string
String with spectral bands found in product
Updated object attributes:
- bands : available spectral bands
"""
if msm_type is None:
if self.msm_type is None:
raise ValueError('parameter msm_type is not defined')
msm_type = self.msm_type
self.bands = ''
for name in self.band_groups:
for ii in '12345678':
grp_path = PurePosixPath(name.replace('%', ii), msm_type)
if str(grp_path) in self.fid:
if self.__verbose:
print('*** INFO: found: ', grp_path)
self.bands += ii
if self.bands:
self.__msm_path = str(
PurePosixPath(name, msm_type))
break
return self.bands
def sequence(self, band=None):
"""
Returns sequence number for each unique measurement based on ICID
and delta_time
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product
Default is 'None' which returns the first available band
Returns
-------
out : array-like
Numpy rec-array with sequence number, ICID and delta-time
"""
if self.__msm_path is None:
return None
if band is None or len(band) > 1:
band = self.bands[0]
msm_path = self.__msm_path.replace('%', band)
grp = self.fid[str(PurePosixPath(msm_path, 'INSTRUMENT'))]
icid_list = np.squeeze(grp['instrument_configuration']['ic_id'])
master_cycle = grp['instrument_settings']['master_cycle_period_us'][0]
master_cycle /= 1000
grp = self.fid[str(PurePosixPath(msm_path, 'OBSERVATIONS'))]
delta_time = np.squeeze(grp['delta_time'])
# define result as numpy array
length = delta_time.size
res = np.empty((length,), dtype=[('sequence', 'u2'),
('icid', 'u2'),
('delta_time', 'u4'),
('index', 'u4')])
res['sequence'] = [0]
res['icid'] = icid_list
res['delta_time'] = delta_time
res['index'] = np.arange(length, dtype=np.uint32)
if length == 1:
return res
# determine sequence number
buff_icid = np.concatenate(([icid_list[0]-10], icid_list,
[icid_list[-1]+10]))
dt_thres = 10 * master_cycle
buff_time = np.concatenate(([delta_time[0] - 10 * dt_thres], delta_time,
[delta_time[-1] + 10 * dt_thres]))
indx = (((buff_time[1:] - buff_time[0:-1]) > dt_thres)
| ((buff_icid[1:] - buff_icid[0:-1]) != 0)).nonzero()[0]
for ii in range(len(indx)-1):
res['sequence'][indx[ii]:indx[ii+1]] = ii
return res
def get_ref_time(self, band=None):
"""
Returns reference start time of measurements
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product.
Default is 'None' which returns the first available band
"""
if self.__msm_path is None:
return None
if band is None:
band = self.bands[0]
msm_path = self.__msm_path.replace('%', band)
grp = self.fid[str(PurePosixPath(msm_path, 'OBSERVATIONS'))]
return datetime(2010, 1, 1, 0, 0, 0) \
+ timedelta(seconds=int(grp['time'][0]))
def get_delta_time(self, band=None):
"""
Returns offset from the reference start time of measurement
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product.
Default is 'None' which returns the first available band
"""
if self.__msm_path is None:
return None
if band is None:
band = self.bands[0]
msm_path = self.__msm_path.replace('%', band)
grp = self.fid[str(PurePosixPath(msm_path, 'OBSERVATIONS'))]
return grp['delta_time'][0, :].astype(int)
def get_instrument_settings(self, band=None):
"""
Returns instrument settings of measurement
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product.
Default is 'None' which returns the first available band
"""
if self.__msm_path is None:
return None
if band is None:
band = self.bands[0]
msm_path = self.__msm_path.replace('%', band)
#
# Due to a bug in python module h5py (v2.6.0), it fails to read
# the UVN instrument settings directy, with exception:
# KeyError: 'Unable to open object (Component not found)'.
# This is my workaround
#
grp = self.fid[str(PurePosixPath(msm_path, 'INSTRUMENT'))]
instr = np.empty(grp['instrument_settings'].shape,
dtype=grp['instrument_settings'].dtype)
grp['instrument_settings'].read_direct(instr)
# for name in grp['instrument_settings'].dtype.names:
# instr[name][:] = grp['instrument_settings'][name]
return instr
def get_exposure_time(self, band=None):
"""
Returns pixel exposure time of the measurements, which is calculated
from the parameters 'int_delay' and 'int_hold' for SWIR.
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product
Default is 'None' which returns the first available band
"""
if band is None:
band = self.bands[0]
instr_arr = self.get_instrument_settings(band)
# calculate exact exposure time
if int(band) < 7:
return [instr['exposure_time'] for instr in instr_arr]
return [swir_exp_time(instr['int_delay'], instr['int_hold'])
for instr in instr_arr]
def get_housekeeping_data(self, band=None):
"""
Returns housekeeping data of measurements
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product
Default is 'None' which returns the first available band
"""
if self.__msm_path is None:
return None
if band is None:
band = self.bands[0]
msm_path = self.__msm_path.replace('%', band)
grp = self.fid[str(PurePosixPath(msm_path, 'INSTRUMENT'))]
return np.squeeze(grp['housekeeping_data'])
def get_geo_data(self, geo_dset=None, band=None):
"""
Returns data of selected datasets from the GEODATA group
Parameters
----------
geo_dset : string
Name(s) of datasets in the GEODATA group, comma separated
Default is 'satellite_latitude,satellite_longitude'
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product
Default is 'None' which returns the first available band
Returns
-------
out : dict of numpy
Compound array with data of selected datasets from the GEODATA group
"""
if self.__msm_path is None:
return None
if geo_dset is None:
geo_dset = self.geo_dset
if band is None:
band = self.bands[0]
msm_path = self.__msm_path.replace('%', band)
grp = self.fid[str(PurePosixPath(msm_path, 'GEODATA'))]
res = {}
for name in geo_dset.split(','):
res[name] = grp[name][0, ...]
return res
def get_msm_attr(self, msm_dset, attr_name, band=None):
"""
Returns value attribute of measurement dataset "msm_dset"
Parameters
----------
attr_name : string
Name of the attribute
msm_dset : string
Name of measurement dataset
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product
Default is 'None' which returns the first available band
Returns
-------
out : scalar or numpy array
Value of attribute "attr_name"
"""
if self.__msm_path is None:
return None
if band is None:
band = self.bands[0]
msm_path = self.__msm_path.replace('%', band)
ds_path = str(PurePosixPath(msm_path, 'OBSERVATIONS', msm_dset))
if attr_name in self.fid[ds_path].attrs.keys():
attr = self.fid[ds_path].attrs[attr_name]
if isinstance(attr, bytes):
return attr.decode('ascii')
return attr
return None
def get_msm_data(self, msm_dset, band=None,
fill_as_nan=False, msm_to_row=None):
"""
Reads data from dataset "msm_dset"
Parameters
----------
msm_dset : string
Name of measurement dataset.
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product
Default is 'None' which returns
both bands (Calibration, Irradiance)
or one band (Radiance)
fill_as_nan : boolean
Set data values equal (KNMI) FillValue to NaN
msm_to_row : boolean
Combine two bands using padding if necessary
Returns
-------
out : values read from or written to dataset "msm_dset"
"""
fillvalue = float.fromhex('0x1.ep+122')
if self.__msm_path is None:
return None
if band is None:
band = self.bands
elif not isinstance(band, str):
raise TypeError('band must be a string')
elif band not in self.bands:
raise ValueError('band not found in product')
if len(band) == 2 and msm_to_row is None:
msm_to_row = 'padding'
data = ()
for ii in band:
msm_path = self.__msm_path.replace('%', ii)
ds_path = str(PurePosixPath(msm_path, 'OBSERVATIONS', msm_dset))
dset = self.fid[ds_path]
if fill_as_nan and dset.attrs['_FillValue'] == fillvalue:
buff = np.squeeze(dset)
buff[(buff == fillvalue)] = np.nan
data += (buff,)
else:
data += (np.squeeze(dset),)
if len(band) == 1:
return data[0]
if msm_to_row == 'padding':
data = pad_rows(data[0], data[1])
return np.concatenate(data, axis=data[0].ndim-1)
def set_msm_data(self, msm_dset, new_data):
"""
Replace data of dataset "msm_dset" with new_data
Parameters
----------
msm_dset : string
Name of measurement dataset.
new_data : array-like
Data to be written with same dimensions as dataset "msm_dset"
"""
if self.__msm_path is None:
return
# we will overwrite existing data, thus readwrite access is required
if not self.__rw:
raise PermissionError('read/write access required')
# overwrite the data
col = 0
for ii in self.bands:
msm_path = self.__msm_path.replace('%', ii)
ds_path = str(PurePosixPath(msm_path, 'OBSERVATIONS', msm_dset))
dset = self.fid[ds_path]
dims = dset.shape
dset[0, ...] = new_data[..., col:col+dims[-1]]
col += dims[-1]
# update patch logging
self.__patched_msm.append(ds_path)
# --------------------------------------------------
class L1BioIRR(L1Bio):
"""
class with methods to access Tropomi L1B irradiance products
"""
band_groups = ('/BAND%_IRRADIANCE',)
geo_dset = 'earth_sun_distance'
msm_type = 'STANDARD_MODE'
# --------------------------------------------------
class L1BioRAD(L1Bio):
"""
class with function to access Tropomi L1B radiance products
"""
band_groups = ('/BAND%_RADIANCE',)
geo_dset = 'latitude,longitude'
msm_type = 'STANDARD_MODE'
# --------------------------------------------------
class L1BioENG:
"""
class with methods to access Tropomi offline L1b engineering products
Attributes
----------
fid : HDF5 file object
filename : string
Methods
-------
close()
Close recources.
get_attr(attr_name)
Obtain value of an HDF5 file attribute.
get_orbit()
Returns absolute orbit number.
get_processor_version()
Returns version of the L01b processor used to generate this product.
get_coverage_time()
Returns start and end of the measurement coverage time.
get_creation_time()
Returns datetime when the L1b product was created.
get_ref_time()
Returns reference start time of measurements.
get_delta_time()
Returns offset from the reference start time of measurement.
get_msmtset()
Returns L1B_ENG_DB/SATELLITE_INFO/satellite_pos.
get_msmtset_db()
Returns compressed msmtset from L1B_ENG_DB/MSMTSET/msmtset.
get_swir_hk_db(stats=None, fill_as_nan=False)
Returns the most important SWIR house keeping parameters.
Notes
-----
The L1b engineering products are available for UVN (band 1-6)
and SWIR (band 7-8).
Examples
--------
"""
def __init__(self, l1b_product):
"""
Initialize access to a Tropomi offline L1b product
"""
# open L1b product as HDF5 file
if not Path(l1b_product).is_file():
raise FileNotFoundError(f'{l1b_product} does not exist')
# initialize private class-attributes
self.filename = l1b_product
self.fid = h5py.File(l1b_product, "r")
def __repr__(self):
class_name = type(self).__name__
return f'{class_name}({self.filename!r})'
def __iter__(self):
for attr in sorted(self.__dict__):
if not attr.startswith("__"):
yield attr
def __enter__(self):
"""
method called to initiate the context manager
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
method called when exiting the context manager
"""
self.close()
return False # any exception is raised by the with statement.
def close(self):
"""
close access to product
"""
if self.fid is None:
return
self.fid.close()
self.fid = None
# ---------- PUBLIC FUNCTIONS ----------
def get_attr(self, attr_name):
"""
Obtain value of an HDF5 file attribute
Parameters
----------
attr_name : string
Name of the attribute
"""
if attr_name not in self.fid.attrs.keys():
return None
attr = self.fid.attrs[attr_name]
if attr.shape is None:
return None
return attr
def get_orbit(self):
"""
Returns absolute orbit number
"""
res = self.get_attr('orbit')
if res is None:
return None
return int(res)
def get_processor_version(self):
"""
Returns version of the L01b processor
"""
attr = self.get_attr('processor_version')
if attr is None:
return None
# pylint: disable=no-member
return attr.decode('ascii')
def get_coverage_time(self):
"""
Returns start and end of the measurement coverage time
"""
attr_start = self.get_attr('time_coverage_start')
if attr_start is None:
return None
attr_end = self.get_attr('time_coverage_end')
if attr_end is None:
return None
# pylint: disable=no-member
return (attr_start.decode('ascii'),
attr_end.decode('ascii'))
def get_creation_time(self):
"""
Returns datetime when the L1b product was created
"""
grp = self.fid['/METADATA/ESA_METADATA/earth_explorer_header']
dset = grp['fixed_header/source']
if 'Creation_Date' in self.fid.attrs.keys():
attr = dset.attrs['Creation_Date']
if isinstance(attr, bytes):
return attr.decode('ascii')
return attr
return None
def get_ref_time(self):
"""
Returns reference start time of measurements
"""
return self.fid['reference_time'][0].astype(int)
def get_delta_time(self):
"""
Returns offset from the reference start time of measurement
"""
return self.fid['/MSMTSET/msmtset']['delta_time'][:].astype(int)
def get_msmtset(self):
"""
Returns L1B_ENG_DB/SATELLITE_INFO/satellite_pos
"""
return self.fid['/SATELLITE_INFO/satellite_pos'][:]
def get_msmtset_db(self):
"""
Returns compressed msmtset from L1B_ENG_DB/MSMTSET/msmtset
Notes
-----
This function is used to fill the SQLite product databases
"""
dtype_msmt_db = np.dtype([('meta_id', np.int32),
('ic_id', np.uint16),
('ic_version', np.uint8),
('class', np.uint8),
('repeats', np.uint16),
('exp_per_mcp', np.uint16),
('exp_time_us', np.uint32),
('mcp_us', np.uint32),
('delta_time_start', np.int32),
('delta_time_end', np.int32)])
# read full msmtset
msmtset = self.fid['/MSMTSET/msmtset'][:]
# get indices to start and end of every measurement (based in ICID)
icid = msmtset['icid']
indx = (np.diff(icid) != 0).nonzero()[0] + 1
indx = np.insert(indx, 0, 0)
indx = np.append(indx, -1)
# compress data from msmtset
msmt = np.zeros(indx.size-1, dtype=dtype_msmt_db)
msmt['ic_id'][:] = msmtset['icid'][indx[0:-1]]
msmt['ic_version'][:] = msmtset['icv'][indx[0:-1]]
msmt['class'][:] = msmtset['class'][indx[0:-1]]
msmt['delta_time_start'][:] = msmtset['delta_time'][indx[0:-1]]
msmt['delta_time_end'][:] = msmtset['delta_time'][indx[1:]]
# add SWIR timing information
timing = self.fid['/DETECTOR4/timing'][:]
msmt['mcp_us'][:] = timing['mcp_us'][indx[1:]-1]
msmt['exp_time_us'][:] = timing['exp_time_us'][indx[1:]-1]
msmt['exp_per_mcp'][:] = timing['exp_per_mcp'][indx[1:]-1]
# duration per ICID execution in micro-seconds
duration = 1000 * (msmt['delta_time_end'] - msmt['delta_time_start'])
# duration can be zero
mask = msmt['mcp_us'] > 0
# divide duration by measurement period in micro-seconds
msmt['repeats'][mask] = (duration[mask]
/ (msmt['mcp_us'][mask])).astype(np.uint16)
return msmt
def get_swir_hk_db(self, stats=None, fill_as_nan=False):
"""
Returns the most important SWIR house keeping parameters
Parameters
----------
fill_as_nan : boolean
Replace (float) FillValues with Nan's, when True
Notes
-----
This function is used to fill the SQLite product datbase and
HDF5 monitoring database
"""
dtype_hk_db = np.dtype([('detector_temp', np.float32),
('grating_temp', np.float32),
('imager_temp', np.float32),
('obm_temp', np.float32),
('calib_unit_temp', np.float32),
('fee_inner_temp', np.float32),
('fee_board_temp', np.float32),
('fee_ref_volt_temp', np.float32),
('fee_video_amp_temp', np.float32),
('fee_video_adc_temp', np.float32),
('detector_heater', np.float32),
('obm_heater_cycle', np.float32),
('fee_box_heater_cycle', np.float32),
('obm_heater', np.float32),
('fee_box_heater', np.float32)])
num_eng_pkts = self.fid['nr_of_engdat_pkts'].size
swir_hk = np.empty(num_eng_pkts, dtype=dtype_hk_db)
hk_tbl = self.fid['/DETECTOR4/DETECTOR_HK/temperature_info'][:]
swir_hk['detector_temp'] = hk_tbl['temp_det_ts2']
swir_hk['fee_inner_temp'] = hk_tbl['temp_d1_box']
swir_hk['fee_board_temp'] = hk_tbl['temp_d5_cold']
swir_hk['fee_ref_volt_temp'] = hk_tbl['temp_a3_vref']
swir_hk['fee_video_amp_temp'] = hk_tbl['temp_d6_vamp']
swir_hk['fee_video_adc_temp'] = hk_tbl['temp_d4_vadc']
hk_tbl = self.fid['/NOMINAL_HK/TEMPERATURES/hires_temperatures'][:]
swir_hk['grating_temp'] = hk_tbl['hires_temp_1']
hk_tbl = self.fid['/NOMINAL_HK/TEMPERATURES/instr_temperatures'][:]
swir_hk['imager_temp'] = hk_tbl['instr_temp_29']
swir_hk['obm_temp'] = hk_tbl['instr_temp_28']
swir_hk['calib_unit_temp'] = hk_tbl['instr_temp_25']
hk_tbl = self.fid['/DETECTOR4/DETECTOR_HK/heater_data'][:]
swir_hk['detector_heater'] = hk_tbl['det_htr_curr']
hk_tbl = self.fid['/NOMINAL_HK/HEATERS/heater_data'][:]
swir_hk['obm_heater'] = hk_tbl['meas_cur_val_htr12']
swir_hk['obm_heater_cycle'] = hk_tbl['last_pwm_val_htr12']
swir_hk['fee_box_heater'] = hk_tbl['meas_cur_val_htr13']
swir_hk['fee_box_heater_cycle'] = hk_tbl['last_pwm_val_htr13']
# CHECK: works only when all elements of swir_hk are floats
if fill_as_nan:
for key in dtype_hk_db.names:
swir_hk[key][swir_hk[key] == 999.] = np.nan
if stats is None:
return swir_hk
if stats == 'median':
hk_median = np.empty(1, dtype=dtype_hk_db)
for key in dtype_hk_db.names:
if np.all(np.isnan(swir_hk[key])):
hk_median[key][0] = np.nan
elif np.nanmin(swir_hk[key]) == np.nanmax(swir_hk[key]):
hk_median[key][0] = swir_hk[key][0]
else:
hk_median[key][0] = biweight(swir_hk[key])
return hk_median
if stats == 'range':
hk_min = np.empty(1, dtype=dtype_hk_db)
hk_max = np.empty(1, dtype=dtype_hk_db)
for key in dtype_hk_db.names:
if np.all(np.isnan(swir_hk[key])):
hk_min[key][0] = np.nan
hk_max[key][0] = np.nan
elif | np.nanmin(swir_hk[key]) | numpy.nanmin |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
from collections import defaultdict
import copy
np.random.seed=2021
# In[2]:
def temp_scaled_softmax(data,temp=1.0):
prob=np.exp(data/temp)/np.sum(np.exp(data/temp),axis=-1)
return prob
def greedy_search(prob):
return np.argmax(prob)
def topk_sampling(prob,k=10,verbose=True):
# this function is only used to process one single example
if prob.ndim>1:
prob=prob[0]
topk_label=np.argsort(prob)[-k:]
topk_prob=prob[topk_label]/np.sum(prob[topk_label])
label=np.random.choice(topk_label,p=topk_prob)
if verbose:
print('orig_all_prob:{}'.format(prob))
print('**********************')
print('topk_sorted_label:{}'.format(topk_label))
print('topk_sorted_prob:{}'.format(prob[topk_label]))
print('**********************')
print('topk_new_prob:{}'.format(topk_prob))
print('finally sampled label:{}'.format(label))
return label
def topp_sampling(prob,p=0.9,verbose=True):
# this function is only used to process one single example
if prob.ndim>1:
prob=prob[0]
sorted_htol_label=np.argsort(prob)[::-1]
sorted_htol_prob=prob[sorted_htol_label]
for i in range(prob.shape[0]):
if np.sum(sorted_htol_prob[:i+1])>=p:
break
topp_htol_label=sorted_htol_label[:i+1]
topp_htol_prob=sorted_htol_prob[topp_htol_label]/ | np.sum(sorted_htol_prob[topp_htol_label]) | numpy.sum |
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_per_channel(self):
# test channelwise
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 1 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Add(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
aug = iaa.Add(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.Add((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10)
class TestAddElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.AddElementwise(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.AddElementwise(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_add_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.AddElementwise(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
aug = iaa.AddElementwise(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AddElementwise(value=1)
aug_det = iaa.AddElementwise(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(-50, 50))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.9 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.AddElementwise(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.AddElementwise(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.AddElementwise((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class AdditiveGaussianNoise(unittest.TestCase):
def setUp(self):
reseed()
def test_loc_zero_scale_zero(self):
# no noise, shouldnt change anything
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_loc_zero_scale_nonzero(self):
# zero-centered noise
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_std_dev_of_added_noise_matches_scale(self):
# std correct?
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0])
values = np.array(values)
assert np.min(values) == 0
assert 0.1 < np.std(values) / 255.0 < 0.4
def test_nonzero_loc(self):
# non-zero loc
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.25 * 255, scale=0.01 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0] - 128)
values = np.array(values)
assert 54 < np.average(values) < 74 # loc=0.25 should be around 255*0.25=64 average
def test_tuple_as_loc(self):
# varying locs
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=(0, 0.5 * 255), scale=0.0001 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_loc(self):
# varying locs by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=iap.Choice([-20, 20]), scale=0.0001 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
seen = [0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
mean = np.mean(observed)
diff_m20 = abs(mean - (128-20))
diff_p20 = abs(mean - (128+20))
if diff_m20 <= 1:
seen[0] += 1
elif diff_p20 <= 1:
seen[1] += 1
else:
assert False
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_tuple_as_scale(self):
# varying stds
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=(0.01 * 255, 0.2 * 255))
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_scale(self):
# varying stds by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=iap.Choice([1, 20]))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 128
seen = [0, 0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
std = np.std(observed.astype(np.int32) - 128)
diff_1 = abs(std - 1)
diff_20 = abs(std - 20)
if diff_1 <= 2:
seen[0] += 1
elif diff_20 <= 5:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 5
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(loc="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(scale="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.5, scale=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.AdditiveGaussianNoise(scale=(0.1, 10), per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class TestDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
# no dropout, shouldnt change anything
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Dropout(p=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# 100% dropout, should drop everything
aug = iaa.Dropout(p=1.0)
observed = aug.augment_images(images)
expected = np.zeros((1, 512, 512, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.zeros((512, 512, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_p_is_50_percent(self):
# 50% dropout
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Dropout(p=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_tuple_as_p(self):
# varying p
aug = iaa.Dropout(p=(0.0, 1.0))
aug_det = aug.to_deterministic()
images = np.ones((1, 8, 8, 1), dtype=np.uint8) * 255
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_list_as_p(self):
aug = iaa.Dropout(p=[0.0, 0.5, 1.0])
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
nb_seen = [0, 0, 0, 0]
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
n_dropped = np.sum(observed_aug == 0)
p_observed = n_dropped / observed_aug.size
if 0 <= p_observed <= 0.01:
nb_seen[0] += 1
elif 0.5 - 0.05 <= p_observed <= 0.5 + 0.05:
nb_seen[1] += 1
elif 1.0-0.01 <= p_observed <= 1.0:
nb_seen[2] += 1
else:
nb_seen[3] += 1
assert np.allclose(nb_seen[0:3], nb_iterations*0.33, rtol=0, atol=75)
assert nb_seen[3] < 30
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.Dropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for wrong parameter datatype
got_exception = False
try:
_aug = iaa.Dropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Dropout(p=1.0)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.Dropout(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = base_img
assert np.array_equal(observed, expected)
def test_p_is_one(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=1.0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = np.zeros_like(base_img)
assert np.array_equal(observed, expected)
def test_p_is_50_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_size_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=0.001, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_per_channel(self):
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=True, min_size=1)
base_img = np.ones((4, 4, 3), dtype=np.uint8) * 100
found = False
for _ in sm.xrange(100):
observed = aug.augment_image(base_img)
avgs = np.average(observed, axis=(0, 1))
if len(set(avgs)) >= 2:
found = True
break
assert found
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.CoarseDropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])), size_px=50)
images = np.ones((1, 100, 100, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for bad parameters
got_exception = False
try:
_ = iaa.CoarseDropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test___init___size_px_and_size_percent_both_none(self):
got_exception = False
try:
_ = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseDropout(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseDropout(p=0.5, size_px=10, per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10, shape=(40, 40, 3))
class TestDropout2d(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.Dropout2d(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 1
def test___init___p_is_float(self):
aug = iaa.Dropout2d(p=0.7)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 0.3)
assert aug.nb_keep_channels == 1
def test___init___nb_keep_channels_is_int(self):
aug = iaa.Dropout2d(p=0, nb_keep_channels=2)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 2
def test_no_images_in_batch(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
heatmaps = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=heatmaps)
assert np.allclose(heatmaps_aug.arr_0to1, heatmaps.arr_0to1)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_1_heatmaps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_1_segmentation_maps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_1_cbaois__keep_one_channel(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_heatmaps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075(self):
image = np.full((1, 1, 3000), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.75, nb_keep_channels=0)
image_aug = aug(image=image)
nb_kept = np.sum(image_aug == 255)
nb_dropped = image.shape[2] - nb_kept
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.isclose(nb_dropped, image.shape[2]*0.75, atol=75)
def test_force_nb_keep_channels(self):
image = np.full((1, 1, 3), 255, dtype=np.uint8)
images = np.array([image] * 1000)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
images_aug = aug(images=images)
ids_kept = [np.nonzero(image[0, 0, :]) for image in images_aug]
ids_kept_uq = np.unique(ids_kept)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
# on average, keep 1 of 3 channels
# due to p=1.0 we expect to get exactly 2/3 dropped
assert np.isclose(nb_dropped,
(len(images)*images.shape[3])*(2/3), atol=1)
# every channel dropped at least once, i.e. which one is kept is random
assert sorted(ids_kept_uq.tolist()) == [0, 1, 2]
def test_some_images_below_nb_keep_channels(self):
image_2c = np.full((1, 1, 2), 255, dtype=np.uint8)
image_3c = np.full((1, 1, 3), 255, dtype=np.uint8)
images = [image_2c if i % 2 == 0 else image_3c
for i in sm.xrange(100)]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=2)
images_aug = aug(images=images)
for i, image_aug in enumerate(images_aug):
assert np.sum(image_aug == 255) == 2
if i % 2 == 0:
assert np.sum(image_aug == 0) == 0
else:
assert np.sum(image_aug == 0) == 1
def test_all_images_below_nb_keep_channels(self):
image = np.full((1, 1, 2), 255, dtype=np.uint8)
images = np.array([image] * 100)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert nb_dropped == 0
def test_get_parameters(self):
aug = iaa.Dropout2d(p=0.7, nb_keep_channels=2)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert np.isclose(params[0].p.value, 0.3)
assert params[1] == 2
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.full(shape, 255, dtype=np.uint8)
aug = iaa.Dropout2d(1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if value == 0:
assert np.sum(image_aug == value) == 10
else:
assert np.sum(image_aug == value) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if _isclose(value, 0.0):
assert np.sum(_isclose(image_aug, value)) == 10
else:
assert (
np.sum(_isclose(image_aug, np.float128(value)))
== 3)
assert np.sum(image_aug == 0) == 7
def test_pickleable(self):
aug = iaa.Dropout2d(p=0.5, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3, shape=(1, 1, 50))
class TestTotalDropout(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p(self):
aug = iaa.TotalDropout(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.sum(images_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=1.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_heatmaps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=0.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075_multiple_images_list(self):
images = [np.full((1, 1, 1), 255, dtype=np.uint8)] * 3000
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum([np.sum(image_aug == 255) for image_aug in images_aug])
nb_dropped = len(images) - nb_kept
for image_aug in images_aug:
assert image_aug.shape == images[0].shape
assert image_aug.dtype.name == images[0].dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_p_is_075_multiple_images_array(self):
images = np.full((3000, 1, 1, 1), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = len(images) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_get_parameters(self):
aug = iaa.TotalDropout(p=0.0)
params = aug.get_parameters()
assert params[0] is aug.p
def test_unusual_channel_numbers(self):
shapes = [
(5, 1, 1, 4),
(5, 1, 1, 5),
(5, 1, 1, 512),
(5, 1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.zeros(shape, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert np.all(images_aug == 0)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == shape
def test_zero_sized_axes(self):
shapes = [
(5, 0, 0),
(5, 0, 1),
(5, 1, 0),
(5, 0, 1, 0),
(5, 1, 0, 0),
(5, 0, 1, 1),
(5, 1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.full(shape, 255, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == images.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 0
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0) or value == 0:
assert np.sum(images_aug == 0) == 5*3
else:
assert np.sum(images_aug == value) == 5*3
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0):
assert np.sum(_isclose(images_aug, 0.0)) == 5*3
else:
assert (
np.sum(_isclose(images_aug, np.float128(value)))
== 5*3)
def test_pickleable(self):
aug = iaa.TotalDropout(p=0.5, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=30, shape=(4, 4, 2))
class TestMultiply(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Multiply(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Multiply(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_per_channel(self):
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=True)
observed = aug.augment_image(np.ones((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 2 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Multiply(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Multiply(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(1)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Multiply(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Multiply(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1.2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(1.2 * int(center_value)))
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.Multiply(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.Multiply(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.Multiply((0.5, 1.5), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
class TestMultiplyElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0.5, 1.5))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.95 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
assert observed.shape == (100, 100, 3)
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.MultiplyElementwise(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.MultiplyElementwise(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), int(center_value), dtype=dtype)
# aug = iaa.MultiplyElementwise(1.2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == int(1.2 * int(center_value)))
# deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == min_value)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10.0, dtype=dtype)
# aug = iaa.MultiplyElementwise(2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.MultiplyElementwise((0.5, 1.5), per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestReplaceElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mask_is_always_zero(self):
# no replace, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
aug = iaa.ReplaceElementwise(mask=0, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mask_is_always_one(self):
# replace at 100 percent prob., should change everything
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_mask_is_stochastic_parameter(self):
# replace half
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
img = np.ones((100, 100, 1), dtype=np.uint8)
nb_iterations = 100
nb_diff_all = 0
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
nb_diff = np.sum(img != observed)
nb_diff_all += nb_diff
p = nb_diff_all / (nb_iterations * 100 * 100)
assert 0.45 <= p <= 0.55
def test_mask_is_list(self):
# mask is list
aug = iaa.ReplaceElementwise(mask=[0.2, 0.7], replacement=1)
img = np.zeros((20, 20, 1), dtype=np.uint8)
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_image(img)
p = np.mean(observed)
if 0.1 < p < 0.3:
seen[0] += 1
elif 0.6 < p < 0.8:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
aug_det = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_replacement_is_stochastic_parameter(self):
# different replacements
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Choice([100, 200]))
img = np.zeros((1000, 1000, 1), dtype=np.uint8)
img100 = img + 100
img200 = img + 200
observed = aug.augment_image(img)
nb_diff_100 = np.sum(img100 != observed)
nb_diff_200 = np.sum(img200 != observed)
p100 = nb_diff_100 / (1000 * 1000)
p200 = nb_diff_200 / (1000 * 1000)
assert 0.45 <= p100 <= 0.55
assert 0.45 <= p200 <= 0.55
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.ReplaceElementwise(mask=iap.Choice([0, 1]), replacement=1, per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.ReplaceElementwise(mask="test", replacement=1)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.ReplaceElementwise(mask=1, replacement=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ReplaceElementwise(1.0, 1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ReplaceElementwise(1.0, 1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.ReplaceElementwise(mask=0.5, replacement=2, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert isinstance(params[0].p, iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert 0.5 - 1e-6 < params[0].p.value < 0.5 + 1e-6
assert params[1].value == 2
assert params[2].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.5)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
image = np.full((3, 3), True, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), True, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.7)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.2)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=2)
image = np.full((3, 3), 1, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 2)
# deterministic stochastic parameters are by default int32 for
# any integer value and hence cannot cover the full uint32 value
# range
if dtype.name != "uint32":
aug = iaa.ReplaceElementwise(mask=1, replacement=max_value)
image = np.full((3, 3), min_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=min_value)
image = np.full((3, 3), max_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Uniform(1.0, 10.0))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 1
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.DiscreteUniform(1, 10))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 1
aug = iaa.ReplaceElementwise(mask=0.5, replacement=iap.DiscreteUniform(1, 10), per_channel=True)
image = np.full((1, 1, 100), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(0 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 2
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32, np.float64]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
atol = 1e-3*max_value if dtype == np.float16 else 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1.0)
image = np.full((3, 3), 0.0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, 1.0)
aug = iaa.ReplaceElementwise(mask=1, replacement=2.0)
image = np.full((3, 3), 1.0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, 2.0)
# deterministic stochastic parameters are by default float32 for
# any float value and hence cannot cover the full float64 value
# range
if dtype.name != "float64":
aug = iaa.ReplaceElementwise(mask=1, replacement=max_value)
image = np.full((3, 3), min_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=min_value)
image = np.full((3, 3), max_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Uniform(1.0, 10.0))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[1:, :], image_aug[:-1, :], atol=0.01)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.DiscreteUniform(1, 10))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[1:, :], image_aug[:-1, :], atol=0.01)
aug = iaa.ReplaceElementwise(mask=0.5, replacement=iap.DiscreteUniform(1, 10), per_channel=True)
image = np.full((1, 1, 100), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(0 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1], atol=0.01)
def test_pickleable(self):
aug = iaa.ReplaceElementwise(mask=0.5, replacement=(0, 255),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
# not more tests necessary here as SaltAndPepper is just a tiny wrapper around
# ReplaceElementwise
class TestSaltAndPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_p_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper > 200
assert nb_salt > 200
def test_pickleable(self):
aug = iaa.SaltAndPepper(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseSaltAndPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
aug2 = iaa.CoarseSaltAndPepper(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarseSaltAndPepper(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarseSaltAndPepper(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarseSaltAndPepper(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.CoarseSaltAndPepper(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseSaltAndPepper(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=(4, 15),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
# not more tests necessary here as Salt is just a tiny wrapper around
# ReplaceElementwise
class TestSalt(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
# Salt() occasionally replaces with 127, which probably should be the center-point here anyways
assert np.all(observed >= 127)
def test_p_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper == 0
assert nb_salt > 200
def test_pickleable(self):
aug = iaa.Salt(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseSalt(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSalt(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarseSalt(p=0.5, size_px=100)
aug2 = iaa.CoarseSalt(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarseSalt(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarseSalt(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarseSalt(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
def test_size_px_or_size_percent_not_none(self):
got_exception = False
try:
_ = iaa.CoarseSalt(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseSalt(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseSalt(p=0.5, size_px=(4, 15),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
# not more tests necessary here as Salt is just a tiny wrapper around
# ReplaceElementwise
class TestPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_probability_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Pepper(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
assert np.all(observed <= 128)
def test_probability_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Pepper(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper > 200
assert nb_salt == 0
def test_pickleable(self):
aug = iaa.Pepper(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarsePepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarsePepper(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarsePepper(p=0.5, size_px=100)
aug2 = iaa.CoarsePepper(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarsePepper(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarsePepper(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarsePepper(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
def test_size_px_or_size_percent_not_none(self):
got_exception = False
try:
_ = iaa.CoarsePepper(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarsePepper(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarsePepper(p=0.5, size_px=(4, 15),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
class Test_invert(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked_defaults(self, mock_invert):
mock_invert.return_value = "foo"
arr = np.zeros((1,), dtype=np.uint8)
observed = iaa.invert(arr)
assert observed == "foo"
args = mock_invert.call_args_list[0]
assert np.array_equal(mock_invert.call_args_list[0][0][0], arr)
assert args[1]["min_value"] is None
assert args[1]["max_value"] is None
assert args[1]["threshold"] is None
assert args[1]["invert_above_threshold"] is True
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked(self, mock_invert):
mock_invert.return_value = "foo"
arr = np.zeros((1,), dtype=np.uint8)
observed = iaa.invert(arr, min_value=1, max_value=10, threshold=5,
invert_above_threshold=False)
assert observed == "foo"
args = mock_invert.call_args_list[0]
assert np.array_equal(mock_invert.call_args_list[0][0][0], arr)
assert args[1]["min_value"] == 1
assert args[1]["max_value"] == 10
assert args[1]["threshold"] == 5
assert args[1]["invert_above_threshold"] is False
def test_uint8(self):
values = np.array([0, 20, 45, 60, 128, 255], dtype=np.uint8)
expected = np.array([
255,
255-20,
255-45,
255-60,
255-128,
255-255
], dtype=np.uint8)
observed = iaa.invert(values)
assert np.array_equal(observed, expected)
assert observed is not values
# most parts of this function are tested via Invert
class Test_invert_(unittest.TestCase):
def test_arr_is_noncontiguous_uint8(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
max_vr_flipped = np.fliplr(np.copy(zeros + 255))
observed = iaa.invert_(max_vr_flipped)
expected = zeros
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_arr_is_view_uint8(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
max_vr_view = np.copy(zeros + 255)[:, :, [0, 2]]
observed = iaa.invert_(max_vr_view)
expected = zeros[:, :, [0, 2]]
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_uint(self):
dtypes = ["uint8", "uint16", "uint32", "uint64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
max_value - 0,
max_value - 20,
max_value - 45,
max_value - 60,
max_value - center_value,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values))
assert np.array_equal(observed, expected)
def test_uint_with_threshold_50_inv_above(self):
threshold = 50
dtypes = ["uint8", "uint16", "uint32", "uint64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
0,
20,
45,
max_value - 60,
max_value - center_value,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_uint_with_threshold_0_inv_above(self):
threshold = 0
dtypes = ["uint8", "uint16", "uint32", "uint64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
max_value - 0,
max_value - 20,
max_value - 45,
max_value - 60,
max_value - center_value,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_uint8_with_threshold_255_inv_above(self):
threshold = 255
dtypes = ["uint8"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
0,
20,
45,
60,
center_value,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_uint8_with_threshold_256_inv_above(self):
threshold = 256
dtypes = ["uint8"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
0,
20,
45,
60,
center_value,
max_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_uint_with_threshold_50_inv_below(self):
threshold = 50
dtypes = ["uint8", "uint16", "uint32", "uint64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
max_value - 0,
max_value - 20,
max_value - 45,
60,
center_value,
max_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=False)
assert np.array_equal(observed, expected)
def test_uint_with_threshold_50_inv_above_with_min_max(self):
threshold = 50
# uint64 does not support custom min/max, hence removed it here
dtypes = ["uint8", "uint16", "uint32"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
0, # not clipped to 10 as only >thresh affected
20,
45,
100 - 50,
100 - 90,
100 - 90
], dtype=dt)
observed = iaa.invert_(np.copy(values),
min_value=10,
max_value=100,
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_int_with_threshold_50_inv_above(self):
threshold = 50
dtypes = ["int8", "int16", "int32", "int64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([-45, -20, center_value, 20, 45, max_value],
dtype=dt)
expected = np.array([
-45,
-20,
center_value,
20,
45,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_int_with_threshold_50_inv_below(self):
threshold = 50
dtypes = ["int8", "int16", "int32", "int64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([-45, -20, center_value, 20, 45, max_value],
dtype=dt)
expected = np.array([
(-1) * (-45) - 1,
(-1) * (-20) - 1,
(-1) * center_value - 1,
(-1) * 20 - 1,
(-1) * 45 - 1,
max_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=False)
assert np.array_equal(observed, expected)
def test_float_with_threshold_50_inv_above(self):
threshold = 50
dtypes = ["float16", "float32", "float64", "float128"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = center_value
values = np.array([-45.5, -20.5, center_value, 20.5, 45.5,
max_value],
dtype=dt)
expected = np.array([
-45.5,
-20.5,
center_value,
20.5,
45.5,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.allclose(observed, expected, rtol=0, atol=1e-4)
def test_float_with_threshold_50_inv_below(self):
threshold = 50
dtypes = ["float16", "float32", "float64", "float128"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = center_value
values = np.array([-45.5, -20.5, center_value, 20.5, 45.5,
max_value],
dtype=dt)
expected = np.array([
(-1) * (-45.5),
(-1) * (-20.5),
(-1) * center_value,
(-1) * 20.5,
(-1) * 45.5,
max_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=False)
assert np.allclose(observed, expected, rtol=0, atol=1e-4)
class Test_solarize(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.solarize_")
def test_mocked_defaults(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.solarize(arr)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is not arr
assert np.array_equal(args[0], arr)
assert kwargs["threshold"] == 128
assert observed == "foo"
@mock.patch("imgaug.augmenters.arithmetic.solarize_")
def test_mocked(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.solarize(arr, threshold=5)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is not arr
assert np.array_equal(args[0], arr)
assert kwargs["threshold"] == 5
assert observed == "foo"
def test_uint8(self):
arr = np.array([0, 10, 50, 150, 200, 255], dtype=np.uint8)
arr = arr.reshape((2, 3, 1))
observed = iaa.solarize(arr)
expected = np.array([0, 10, 50, 255-150, 255-200, 255-255],
dtype=np.uint8).reshape((2, 3, 1))
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_compare_with_pil(self):
import PIL.Image
import PIL.ImageOps
def _solarize_pil(image, threshold):
img = PIL.Image.fromarray(image)
return np.asarray(PIL.ImageOps.solarize(img, threshold))
image = np.mod(np.arange(20*20*3), 255).astype(np.uint8)\
.reshape((20, 20, 3))
for threshold in np.arange(256):
image_pil = _solarize_pil(image, threshold)
image_iaa = iaa.solarize(image, threshold)
assert np.array_equal(image_pil, image_iaa)
class Test_solarize_(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked_defaults(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.solarize_(arr)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is arr
assert kwargs["threshold"] == 128
assert observed == "foo"
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.solarize_(arr, threshold=5)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is arr
assert kwargs["threshold"] == 5
assert observed == "foo"
class TestInvert(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_one(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
observed = iaa.Invert(p=1.0).augment_image(zeros + 255)
expected = zeros
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_p_is_zero(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
observed = iaa.Invert(p=0.0).augment_image(zeros + 255)
expected = zeros + 255
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_max_value_set(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
observed = iaa.Invert(p=1.0, max_value=200).augment_image(zeros + 200)
expected = zeros
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_min_value_and_max_value_set(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
observed = iaa.Invert(p=1.0, max_value=200, min_value=100).augment_image(zeros + 200)
expected = zeros + 100
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
observed = iaa.Invert(p=1.0, max_value=200, min_value=100).augment_image(zeros + 100)
expected = zeros + 200
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_min_value_and_max_value_set_with_float_image(self):
# with min/max and float inputs
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
zeros_f32 = zeros.astype(np.float32)
observed = iaa.Invert(p=1.0, max_value=200, min_value=100).augment_image(zeros_f32 + 200)
expected = zeros_f32 + 100
assert observed.dtype.name == "float32"
assert np.array_equal(observed, expected)
observed = iaa.Invert(p=1.0, max_value=200, min_value=100).augment_image(zeros_f32 + 100)
expected = zeros_f32 + 200
assert observed.dtype.name == "float32"
assert np.array_equal(observed, expected)
def test_p_is_80_percent(self):
nb_iterations = 1000
nb_inverted = 0
aug = iaa.Invert(p=0.8)
img = np.zeros((1, 1, 1), dtype=np.uint8) + 255
expected = np.zeros((1, 1, 1), dtype=np.uint8)
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
if np.array_equal(observed, expected):
nb_inverted += 1
pinv = nb_inverted / nb_iterations
assert 0.75 <= pinv <= 0.85
nb_iterations = 1000
nb_inverted = 0
aug = iaa.Invert(p=iap.Binomial(0.8))
img = np.zeros((1, 1, 1), dtype=np.uint8) + 255
expected = np.zeros((1, 1, 1), dtype=np.uint8)
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
if np.array_equal(observed, expected):
nb_inverted += 1
pinv = nb_inverted / nb_iterations
assert 0.75 <= pinv <= 0.85
def test_per_channel(self):
aug = iaa.Invert(p=0.5, per_channel=True)
img = np.zeros((1, 1, 100), dtype=np.uint8) + 255
observed = aug.augment_image(img)
assert len(np.unique(observed)) == 2
# TODO split into two tests
def test_p_is_stochastic_parameter_per_channel_is_probability(self):
nb_iterations = 1000
aug = iaa.Invert(p=iap.Binomial(0.8), per_channel=0.7)
img = np.zeros((1, 1, 20), dtype=np.uint8) + 255
seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
uq = np.unique(observed)
if len(uq) == 1:
seen[0] += 1
elif len(uq) == 2:
seen[1] += 1
else:
assert False
assert 300 - 75 < seen[0] < 300 + 75
assert 700 - 75 < seen[1] < 700 + 75
def test_threshold(self):
arr = np.array([0, 10, 50, 150, 200, 255], dtype=np.uint8)
arr = arr.reshape((2, 3, 1))
aug = iaa.Invert(p=1.0, threshold=128, invert_above_threshold=True)
observed = aug.augment_image(arr)
expected = np.array([0, 10, 50, 255-150, 255-200, 255-255],
dtype=np.uint8).reshape((2, 3, 1))
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_threshold_inv_below(self):
arr = np.array([0, 10, 50, 150, 200, 255], dtype=np.uint8)
arr = arr.reshape((2, 3, 1))
aug = iaa.Invert(p=1.0, threshold=128, invert_above_threshold=False)
observed = aug.augment_image(arr)
expected = np.array([255-0, 255-10, 255-50, 150, 200, 255],
dtype=np.uint8).reshape((2, 3, 1))
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=zeros.shape)]
aug = iaa.Invert(p=1.0)
aug_det = iaa.Invert(p=1.0).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Invert(p="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Invert(p=0.5, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Invert(1.0)
image_aug = aug(image=image)
assert np.all(image_aug == 255)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Invert(1.0)
image_aug = aug(image=image)
assert np.all(image_aug == 255)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Invert(p=0.5, per_channel=False, min_value=10, max_value=20)
params = aug.get_parameters()
assert params[0] is aug.p
assert params[1] is aug.per_channel
assert params[2] == 10
assert params[3] == 20
assert params[4] is aug.threshold
assert params[5] is aug.invert_above_threshold
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Invert(p=1.0)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_p_is_zero(self):
# with p=0.0
aug = iaa.Invert(p=0.0)
dtypes = [bool,
np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
np.float16, np.float32, np.float64, np.float128]
for dtype in dtypes:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
kind = np.dtype(dtype).kind
image_min = np.full((3, 3), min_value, dtype=dtype)
if dtype is not bool:
image_center = np.full((3, 3), center_value if kind == "f" else int(center_value), dtype=dtype)
image_max = np.full((3, 3), max_value, dtype=dtype)
image_min_aug = aug.augment_image(image_min)
image_center_aug = None
if dtype is not bool:
image_center_aug = aug.augment_image(image_center)
image_max_aug = aug.augment_image(image_max)
assert image_min_aug.dtype == np.dtype(dtype)
if image_center_aug is not None:
assert image_center_aug.dtype == np.dtype(dtype)
assert image_max_aug.dtype == np.dtype(dtype)
if dtype is bool:
assert np.all(image_min_aug == image_min)
assert np.all(image_max_aug == image_max)
elif np.dtype(dtype).kind in ["i", "u"]:
assert np.array_equal(image_min_aug, image_min)
assert np.array_equal(image_center_aug, image_center)
assert np.array_equal(image_max_aug, image_max)
else:
assert np.allclose(image_min_aug, image_min)
assert np.allclose(image_center_aug, image_center)
assert np.allclose(image_max_aug, image_max)
def test_other_dtypes_p_is_one(self):
# with p=1.0
aug = iaa.Invert(p=1.0)
dtypes = [np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
np.float16, np.float32, np.float64, np.float128]
for dtype in dtypes:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
kind = np.dtype(dtype).kind
image_min = np.full((3, 3), min_value, dtype=dtype)
if dtype is not bool:
image_center = np.full((3, 3), center_value if kind == "f" else int(center_value), dtype=dtype)
image_max = np.full((3, 3), max_value, dtype=dtype)
image_min_aug = aug.augment_image(image_min)
image_center_aug = None
if dtype is not bool:
image_center_aug = aug.augment_image(image_center)
image_max_aug = aug.augment_image(image_max)
assert image_min_aug.dtype == np.dtype(dtype)
if image_center_aug is not None:
assert image_center_aug.dtype == np.dtype(dtype)
assert image_max_aug.dtype == np.dtype(dtype)
if dtype is bool:
assert np.all(image_min_aug == image_max)
assert np.all(image_max_aug == image_min)
elif np.dtype(dtype).kind in ["i", "u"]:
assert np.array_equal(image_min_aug, image_max)
assert np.allclose(image_center_aug, image_center, atol=1.0+1e-4, rtol=0)
assert np.array_equal(image_max_aug, image_min)
else:
assert np.allclose(image_min_aug, image_max)
assert np.allclose(image_center_aug, image_center)
assert np.allclose(image_max_aug, image_min)
def test_other_dtypes_p_is_one_with_min_value(self):
# with p=1.0 and min_value
aug = iaa.Invert(p=1.0, min_value=1)
dtypes = [np.uint8, np.uint16, np.uint32,
np.int8, np.int16, np.int32,
np.float16, np.float32]
for dtype in dtypes:
_min_value, _center_value, max_value = iadt.get_value_range_of_dtype(dtype)
min_value = 1
kind = np.dtype(dtype).kind
center_value = min_value + 0.5 * (max_value - min_value)
image_min = np.full((3, 3), min_value, dtype=dtype)
if dtype is not bool:
image_center = np.full((3, 3), center_value if kind == "f" else int(center_value), dtype=dtype)
image_max = np.full((3, 3), max_value, dtype=dtype)
image_min_aug = aug.augment_image(image_min)
image_center_aug = None
if dtype is not bool:
image_center_aug = aug.augment_image(image_center)
image_max_aug = aug.augment_image(image_max)
assert image_min_aug.dtype == np.dtype(dtype)
if image_center_aug is not None:
assert image_center_aug.dtype == np.dtype(dtype)
assert image_max_aug.dtype == np.dtype(dtype)
if dtype is bool:
assert np.all(image_min_aug == 1)
assert np.all(image_max_aug == 1)
elif np.dtype(dtype).kind in ["i", "u"]:
assert np.array_equal(image_min_aug, image_max)
assert np.allclose(image_center_aug, image_center, atol=1.0+1e-4, rtol=0)
assert np.array_equal(image_max_aug, image_min)
else:
assert np.allclose(image_min_aug, image_max)
assert np.allclose(image_center_aug, image_center)
assert np.allclose(image_max_aug, image_min)
def test_other_dtypes_p_is_one_with_max_value(self):
# with p=1.0 and max_value
aug = iaa.Invert(p=1.0, max_value=16)
dtypes = [np.uint8, np.uint16, np.uint32,
np.int8, np.int16, np.int32,
np.float16, np.float32]
for dtype in dtypes:
min_value, _center_value, _max_value = iadt.get_value_range_of_dtype(dtype)
max_value = 16
kind = np.dtype(dtype).kind
center_value = min_value + 0.5 * (max_value - min_value)
image_min = np.full((3, 3), min_value, dtype=dtype)
if dtype is not bool:
image_center = np.full((3, 3), center_value if kind == "f" else int(center_value), dtype=dtype)
image_max = np.full((3, 3), max_value, dtype=dtype)
image_min_aug = aug.augment_image(image_min)
image_center_aug = None
if dtype is not bool:
image_center_aug = aug.augment_image(image_center)
image_max_aug = aug.augment_image(image_max)
assert image_min_aug.dtype == np.dtype(dtype)
if image_center_aug is not None:
assert image_center_aug.dtype == np.dtype(dtype)
assert image_max_aug.dtype == np.dtype(dtype)
if dtype is bool:
assert not np.any(image_min_aug == 1)
assert not np.any(image_max_aug == 1)
elif np.dtype(dtype).kind in ["i", "u"]:
assert np.array_equal(image_min_aug, image_max)
assert np.allclose(image_center_aug, image_center, atol=1.0+1e-4, rtol=0)
assert np.array_equal(image_max_aug, image_min)
else:
assert np.allclose(image_min_aug, image_max)
if dtype is np.float16:
# for float16, this is off by about 10
assert np.allclose(image_center_aug, image_center, atol=0.001*np.finfo(dtype).max)
else:
assert | np.allclose(image_center_aug, image_center) | numpy.allclose |
__author__ = 'jan'
from matplotlib.testing.decorators import image_comparison
import prettyplotlib as ppl
import numpy as np
import os
import string
import six
if six.PY3:
UPPERCASE_CHARS = string.ascii_uppercase
else:
UPPERCASE_CHARS = string.uppercase
@image_comparison(baseline_images=['barh'], extensions=['png'])
def test_barh():
np.random.seed(14)
ppl.barh(np.arange(10), np.abs(np.random.randn(10)))
# fig.savefig('%s/baseline_images/test_barh/bar.png' %
# os.path.dirname(os.path.abspath(__file__)))
@image_comparison(baseline_images=['barh_grid'], extensions=['png'])
def test_barh_grid():
np.random.seed(14)
ppl.barh(np.arange(10), np.abs(np.random.randn(10)), grid='x')
# fig.savefig('%s/baseline_images/test_barh/bar_grid.png' %
# os.path.dirname(os.path.abspath(__file__)))
@image_comparison(baseline_images=['barh_annotate'], extensions=['png'])
def test_barh_annotate():
| np.random.seed(14) | numpy.random.seed |
def vibrations(inp):
'''Calculate the vibrational frequencies.'''
import constants
import numpy as np
import scipy as sp
from pyscf.future import hessian
from .scf import do_scf
from .read_input import pstr
na = inp.mol.natm
n3 = na * 3
# get atom coords and sqrt of mass
q = inp.mol.atom_coords().reshape(n3)
m = np.zeros((na,3))
for i in range(na):
sym = constants.elem[inp.mol.atom_charge(i)]
m[i] = constants.atomic_mass(sym)
m = m.reshape(n3)
m = np.sqrt(m)
# get mass-weighted coordinates
x = q * m
inp.timer.start('scf')
inp = do_scf(inp)
inp.timer.end('scf')
# get hessian
inp.timer.start('hessian')
if hasattr(inp, 'hessian'):
h = inp.hessian
else:
if inp.scf.method in ('uhf', 'rhf', 'hf'):
h = hessian.RHF(inp.mf).kernel()
else:
h = hessian.RKS(inp.mf).kernel()
h = h.transpose(0,2,1,3).reshape(n3,n3)
inp.timer.end('hessian')
# mass-weighted hessian
M = np.outer(m,m)
h /= m
print (h.reshape(na,3,na,3))
# diagonalize
w, X = sp.linalg.eig(h)
imf = | np.where( w < 0.) | numpy.where |
#!/usr/bin/env python3
import numpy as np
import re
from pkg_resources import resource_filename
from ..num.num_input import Num_input
from directdm.run import rge
#-----------------------#
# Conventions and Basis #
#-----------------------#
# The basis of operators in the DM-SM sector below the weak scale (5-flavor EFT) is given by
# dim.5 (2 operators)
#
# 'C51', 'C52',
# dim.6 (32 operators)
#
# 'C61u', 'C61d', 'C61s', 'C61c', 'C61b', 'C61e', 'C61mu', 'C61tau',
# 'C62u', 'C62d', 'C62s', 'C62c', 'C62b', 'C62e', 'C62mu', 'C62tau',
# 'C63u', 'C63d', 'C63s', 'C63c', 'C63b', 'C63e', 'C63mu', 'C63tau',
# 'C64u', 'C64d', 'C64s', 'C64c', 'C64b', 'C64e', 'C64mu', 'C64tau',
# dim.7 (129 operators)
#
# 'C71', 'C72', 'C73', 'C74',
# 'C75u', 'C75d', 'C75s', 'C75c', 'C75b', 'C75e', 'C75mu', 'C75tau',
# 'C76u', 'C76d', 'C76s', 'C76c', 'C76b', 'C76e', 'C76mu', 'C76tau',
# 'C77u', 'C77d', 'C77s', 'C77c', 'C77b', 'C77e', 'C77mu', 'C77tau',
# 'C78u', 'C78d', 'C78s', 'C78c', 'C78b', 'C78e', 'C78mu', 'C78tau',
# 'C79u', 'C79d', 'C79s', 'C79c', 'C79b', 'C79e', 'C79mu', 'C79tau',
# 'C710u', 'C710d', 'C710s', 'C710c', 'C710b', 'C710e', 'C710mu', 'C710tau',
# 'C711', 'C712', 'C713', 'C714',
# 'C715u', 'C715d', 'C715s', 'C715c', 'C715b', 'C715e', 'C715mu', 'C715tau',
# 'C716u', 'C716d', 'C716s', 'C716c', 'C716b', 'C716e', 'C716mu', 'C716tau',
# 'C717u', 'C717d', 'C717s', 'C717c', 'C717b', 'C717e', 'C717mu', 'C717tau',
# 'C718u', 'C718d', 'C718s', 'C718c', 'C718b', 'C718e', 'C718mu', 'C718tau',
# 'C719u', 'C719d', 'C719s', 'C719c', 'C719b', 'C719e', 'C719mu', 'C719tau',
# 'C720u', 'C720d', 'C720s', 'C720c', 'C720b', 'C720e', 'C720mu', 'C720tau',
# 'C721u', 'C721d', 'C721s', 'C721c', 'C721b', 'C721e', 'C721mu', 'C721tau',
# 'C722u', 'C722d', 'C722s', 'C722c', 'C722b', 'C722e', 'C722mu', 'C722tau',
# 'C723u', 'C723d', 'C723s', 'C723c', 'C723b', 'C723e', 'C723mu', 'C723tau',
# 'C725',
# dim.8 (12 operators)
#
# 'C81u', 'C81d', 'C81s', 'C82u', 'C82d', 'C82s'
# 'C83u', 'C83d', 'C83s', 'C84u', 'C84d', 'C84s'
# In total, we have 2+32+129+12=175 operators.
# In total, we have 2+32+129=163 operators w/o dim.8.
#-----------------------------#
# The QED anomalous dimension #
#-----------------------------#
def ADM_QED(nf):
""" Return the QED anomalous dimension in the DM-SM sector for nf flavor EFT """
Qu = 2/3
Qd = -1/3
Qe = -1
nc = 3
gamma_QED = np.array([[8/3*Qu*Qu*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qu*nc,\
8/3*Qu*Qd*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc],
[8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc,\
8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],
[8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc,\
8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],
[8/3*Qu*Qu*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qu*nc,\
8/3*Qu*Qd*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc],
[8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc,\
8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],
[8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu,\
8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe],
[8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu,\
8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe],
[8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu,\
8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe]])
gamma_QED_1 = np.zeros((2,163))
gamma_QED_2 = np.hstack((np.zeros((8,2)),gamma_QED,np.zeros((8,153))))
gamma_QED_3 = np.hstack((np.zeros((8,10)),gamma_QED,np.zeros((8,145))))
gamma_QED_4 = np.zeros((145,163))
gamma_QED = np.vstack((gamma_QED_1, gamma_QED_2, gamma_QED_3, gamma_QED_4))
if nf == 5:
return gamma_QED
elif nf == 4:
return np.delete(np.delete(gamma_QED, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 0)\
, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 1)
elif nf == 3:
return np.delete(np.delete(gamma_QED, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 0)\
, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 1)
else:
raise Exception("nf has to be 3, 4 or 5")
def ADM_QED2(nf):
""" Return the QED anomalous dimension in the DM-SM sector for nf flavor EFT at alpha^2 """
# Mixing of Q_{11}^(7) into Q_{5,f}^(7) and Q_{12}^(7) into Q_{6,f}^(7), adapted from Hill et al. [1409.8290].
gamma_gf = -8
gamma_QED2_gf = np.array([5*[gamma_gf]])
gamma_QED2_1 = np.zeros((86,163))
gamma_QED2_2 = np.hstack((np.zeros((1,38)),gamma_QED2_gf,np.zeros((1,120))))
gamma_QED2_3 = np.hstack((np.zeros((1,46)),gamma_QED2_gf,np.zeros((1,112))))
gamma_QED2_4 = np.zeros((75,163))
gamma_QED2 = np.vstack((gamma_QED2_1, gamma_QED2_2, gamma_QED2_3, gamma_QED2_4))
if nf == 5:
return gamma_QED2
elif nf == 4:
return np.delete(np.delete(gamma_QED2, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 0)\
, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 1)
elif nf == 3:
return np.delete(np.delete(gamma_QED2, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 0)\
, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 1)
else:
raise Exception("nf has to be 3, 4 or 5")
#------------------------------#
# The QCD anomalous dimensions #
#------------------------------#
def ADM_QCD(nf):
""" Return the QCD anomalous dimension in the DM-SM sector for nf flavor EFT, when ADM starts at O(alphas) """
gamma_QCD_T = 32/3 * np.eye(5)
gt2qq = 64/9
gt2qg = -4/3
gt2gq = -64/9
gt2gg = 4/3*nf
gamma_twist2 = np.array([[gt2qq, 0, 0, 0, 0, 0, 0, 0, gt2qg],
[0, gt2qq, 0, 0, 0, 0, 0, 0, gt2qg],
[0, 0, gt2qq, 0, 0, 0, 0, 0, gt2qg],
[0, 0, 0, gt2qq, 0, 0, 0, 0, gt2qg],
[0, 0, 0, 0, gt2qq, 0, 0, 0, gt2qg],
[0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[gt2gq, gt2gq, gt2gq, gt2gq, gt2gq, 0, 0, 0, gt2gg]])
gamma_QCD_1 = np.zeros((70,163))
gamma_QCD_2 = np.hstack((np.zeros((5,70)), gamma_QCD_T, np.zeros((5,88))))
gamma_QCD_3 = np.zeros((3,163))
gamma_QCD_4 = np.hstack((np.zeros((5,78)), gamma_QCD_T, np.zeros((5,80))))
gamma_QCD_5 = np.zeros((71,163))
gamma_QCD_6 = np.hstack((np.zeros((9,154)), gamma_twist2))
gamma_QCD = [np.vstack((gamma_QCD_1, gamma_QCD_2, gamma_QCD_3,\
gamma_QCD_4, gamma_QCD_5, gamma_QCD_6))]
if nf == 5:
return gamma_QCD
elif nf == 4:
return np.delete(np.delete(gamma_QCD, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 1)\
, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 2)
elif nf == 3:
return np.delete(np.delete(gamma_QCD, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 1)\
, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 2)
else:
raise Exception("nf has to be 3, 4 or 5")
def ADM_QCD2(nf):
# CHECK ADM #
""" Return the QCD anomalous dimension in the DM-SM sector for nf flavor EFT, when ADM starts at O(alphas^2) """
# Mixing of Q_1^(7) into Q_{5,q}^(7) and Q_2^(7) into Q_{6,q}^(7), from Hill et al. [1409.8290].
# Note that we have different prefactors and signs.
cf = 4/3
gamma_gq = 8*cf # changed 2019-08-29, double check with RG solution
# Mixing of Q_3^(7) into Q_{7,q}^(7) and Q_4^(7) into Q_{8,q}^(7), from Hill et al. [1409.8290].
# Note that we have different prefactors and signs.
gamma_5gq = -8 # changed 2019-08-29, double check with RG solution
gamma_QCD2_gq = np.array([5*[gamma_gq]])
gamma_QCD2_5gq = np.array([5*[gamma_5gq]])
gamma_QCD2_1 = np.zeros((34,163))
gamma_QCD2_2 = np.hstack((np.zeros((1,38)),gamma_QCD2_gq,np.zeros((1,120))))
gamma_QCD2_3 = np.hstack((np.zeros((1,46)),gamma_QCD2_gq,np.zeros((1,112))))
gamma_QCD2_4 = np.hstack((np.zeros((1,54)),gamma_QCD2_5gq,np.zeros((1,104))))
gamma_QCD2_5 = np.hstack((np.zeros((1,62)),gamma_QCD2_5gq,np.zeros((1,96))))
gamma_QCD2_6 = np.zeros((125,163))
gamma_QCD2 = [np.vstack((gamma_QCD2_1, gamma_QCD2_2, gamma_QCD2_3,\
gamma_QCD2_4, gamma_QCD2_5, gamma_QCD2_6))]
if nf == 5:
return gamma_QCD2
elif nf == 4:
return np.delete(np.delete(gamma_QCD2, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 1)\
, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94,\
102, 110, 118, 126, 134, 142, 150, 158], 2)
elif nf == 3:
return np.delete(np.delete(gamma_QCD2, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 1)\
, [5,6, 13,14, 21,22, 29,30, 41,42,\
49,50, 57,58, 65,66, 73,74, 81,82,\
93,94, 101,102, 109,110, 117,118,\
125,126, 133,134, 141,142, 149,150, 158,159], 2)
else:
raise Exception("nf has to be 3, 4 or 5")
def ADM5(Ychi, dchi):
""" The dimension-five anomalous dimension
Return a numpy array with the anomalous dimension matrices for g1, g2, g3, and yt
The Higgs self coupling lambda is currently ignored.
Variables
---------
Ychi: The DM hypercharge, defined via the Gell-Mann - Nishijima relation Q = I_W^3 + Ychi/2.
dchi: The dimension of the electroweak SU(2) representation furnished by the DM multiplet.
"""
jj1 = (dchi**2-1)/4
# The beta functions for one multiplet
b1 = - 41/6 - Ychi**2 * dchi/3
b2 = 19/6 - 4*jj1*dchi/9
adm5_g1 = np.array([[5/2*Ychi**2-2*b1, 0, -6*Ychi, 0, 0, 0, 0, 0],
[-4*Ychi*jj1, Ychi**2/2, 0, 12*Ychi, 0, 0, 0, 0],
[0, 0, -3/2*(1+Ychi**2), 0, 0, 0, 0, 0],
[0, 0, 0, -3/2*(1+Ychi**2), 0, 0, 0, 0],
[0, 0, 0, 0, 5/2*Ychi**2-2*b1, 0, -6*Ychi, 0],
[0, 0, 0, 0, -4*Ychi*jj1, Ychi**2/2, 0, 12*Ychi],
[0, 0, 0, 0, 0, 0, -3/2*(1+Ychi**2), 0],
[0, 0, 0, 0, 0, 0, 0, -3/2*(1+Ychi**2)]])
adm5_g2 = np.array([[2*jj1, -4*Ychi, 0, -24, 0, 0, 0, 0],
[0, (10*jj1-8)-2*b2, 12*jj1, 0, 0, 0, 0, 0],
[0, 0, (-9/2-6*jj1), 0, 0, 0, 0, 0],
[0, 0, 0, (3/2-6*jj1), 0, 0, 0, 0],
[0, 0, 0, 0, 2*jj1, -4*Ychi, 0, -24],
[0, 0, 0, 0, 0, (10*jj1-8)-2*b2, 12*jj1, 0],
[0, 0, 0, 0, 0, 0, (-9/2-6*jj1), 0],
[0, 0, 0, 0, 0, 0, 0, (3/2-6*jj1)]])
adm5_g3 = np.zeros((8,8))
adm5_yc = np.diag([0,0,6,6,0,0,6,6])
adm5_ytau = np.diag([0,0,2,2,0,0,2,2])
adm5_yb = np.diag([0,0,6,6,0,0,6,6])
adm5_yt = np.diag([0,0,6,6,0,0,6,6])
adm5_lam = np.diag([0,0,3,1,0,0,3,1])
full_adm = np.array([adm5_g1, adm5_g2, adm5_g3, adm5_yc, adm5_ytau, adm5_yb, adm5_yt, adm5_lam])
if dchi == 1:
return np.delete(np.delete(full_adm, [1,3,5,7], 1), [1,3,5,7], 2)
else:
return full_adm
def ADM6(Ychi, dchi):
""" The dimension-five anomalous dimension
Return a numpy array with the anomalous dimension matrices for g1, g2, g3, ytau, yb, and yt
The running due to the Higgs self coupling lambda is currently ignored.
The operator basis is Q1-Q14 1st, 2nd, 3rd gen.; S1-S17 (mixing of gen: 1-1, 2-2, 3-3, 1-2, 1-3, 2-3),
S18-S24 1st, 2nd, 3rd gen., S25; D1-D4.
The explicit ordering of the operators, including flavor indices, is contained in the file
"directdm/run/operator_ordering.txt"
Variables
---------
Ychi: The DM hypercharge, defined via the Gell-Mann - Nishijima relation Q = I_W^3 + Ychi/2.
dchi: The dimension of the electroweak SU(2) representation furnished by the DM multiplet.
"""
scope = locals()
def load_adm(admfile):
with open(admfile, "r") as f:
adm = []
for line in f:
line = re.sub("\n", "", line)
line = line.split(",")
adm.append(list(map(lambda x: eval(x, scope), line)))
return adm
admg1 = load_adm(resource_filename("directdm", "run/full_adm_g1.py"))
admg2 = load_adm(resource_filename("directdm", "run/full_adm_g2.py"))
admg3 = np.zeros((207,207))
admyc = load_adm(resource_filename("directdm", "run/full_adm_yc.py"))
admytau = load_adm(resource_filename("directdm", "run/full_adm_ytau.py"))
admyb = load_adm(resource_filename("directdm", "run/full_adm_yb.py"))
admyt = load_adm(resource_filename("directdm", "run/full_adm_yt.py"))
admlam = np.zeros((207,207))
full_adm = np.array([np.array(admg1), np.array(admg2), admg3,\
np.array(admyc), | np.array(admytau) | numpy.array |
# Author: <NAME>
# Date: 26 November 2016
# Python version: 3.5
# Updated June 2018 by <NAME> (KTH dESA)
# Modified grid algorithm and population calibration to improve computational speed
import logging
import pandas as pd
from math import pi, exp, log, sqrt, ceil
# from pyproj import Proj
import numpy as np
from collections import defaultdict
import os
logging.basicConfig(format='%(asctime)s\t\t%(message)s', level=logging.DEBUG)
# general
LHV_DIESEL = 9.9445485 # (kWh/l) lower heating value
HOURS_PER_YEAR = 8760
# Columns in settlements file must match these exactly
SET_COUNTRY = 'Country' # This cannot be changed, lots of code will break
SET_X = 'X' # Coordinate in metres/kilometres
SET_Y = 'Y' # Coordinate in metres/kilometres
SET_X_DEG = 'X_deg' # Coordinates in degrees
SET_Y_DEG = 'Y_deg'
SET_POP = 'Pop' # Population in people per point (equally, people per km2)
SET_POP_CALIB = 'PopStartCalibrated' # Calibrated population to reference year, same units
SET_POP_FUTURE = 'PopFuture' # Project future population, same units
SET_GRID_DIST_CURRENT = 'GridDistCurrent' # Distance in km from current grid
SET_GRID_DIST_PLANNED = 'GridDistPlan' # Distance in km from current and future grid
SET_ROAD_DIST = 'RoadDist' # Distance in km from road network
SET_NIGHT_LIGHTS = 'VIIRS' # Intensity of night time lights (from NASA), range 0 - 63
SET_TRAVEL_HOURS = 'TravelHours' # Travel time to large city in hours
SET_GHI = 'GHI' # Global horizontal irradiance in kWh/m2/day
SET_WINDVEL = 'WindVel' # Wind velocity in m/s
SET_WINDCF = 'WindCF' # Wind capacity factor as percentage (range 0 - 1)
SET_HYDRO = 'Hydropower' # Hydropower potential in kW
SET_HYDRO_DIST = 'HydropowerDist' # Distance to hydropower site in km
SET_HYDRO_FID = 'HydropowerFID' # the unique tag for eah hydropower, to not over-utilise
SET_SUBSTATION_DIST = 'SubstationDist'
SET_ELEVATION = 'Elevation' # in metres
SET_SLOPE = 'Slope' # in degrees
SET_LAND_COVER = 'LandCover'
SET_SOLAR_RESTRICTION = 'SolarRestriction'
SET_ROAD_DIST_CLASSIFIED = 'RoadDistClassified'
SET_SUBSTATION_DIST_CLASSIFIED = 'SubstationDistClassified'
SET_ELEVATION_CLASSIFIED = 'ElevationClassified'
SET_SLOPE_CLASSIFIED = 'SlopeClassified'
SET_LAND_COVER_CLASSIFIED = 'LandCoverClassified'
SET_COMBINED_CLASSIFICATION = 'GridClassification'
SET_GRID_PENALTY = 'GridPenalty'
SET_URBAN = 'IsUrban' # Whether the site is urban (0 or 1)
SET_ENERGY_PER_HH = 'EnergyPerHH'
SET_NUM_PEOPLE_PER_HH = 'NumPeoplePerHH'
SET_ELEC_CURRENT = 'ElecStart' # If the site is currently electrified (0 or 1)
SET_ELEC_FUTURE = 'ElecFuture' # If the site has the potential to be 'easily' electrified in future
SET_NEW_CONNECTIONS = 'NewConnections' # Number of new people with electricity connections
SET_NEW_CONNECTIONS_PROD = 'New_Connections_Prod' # Number of new people with electricity connections plus productive uses corresponding
SET_MIN_GRID_DIST = 'MinGridDist'
SET_LCOE_GRID = 'Grid' # All lcoes in USD/kWh
SET_LCOE_SA_PV = 'SA_PV'
SET_LCOE_SA_DIESEL = 'SA_Diesel'
SET_LCOE_MG_WIND = 'MG_Wind'
SET_LCOE_MG_DIESEL = 'MG_Diesel'
SET_LCOE_MG_PV = 'MG_PV'
SET_LCOE_MG_HYDRO = 'MG_Hydro'
SET_LCOE_MG_HYBRID = 'MG_Hybrid'
SET_MIN_OFFGRID = 'MinimumOffgrid' # The technology with lowest lcoe (excluding grid)
SET_MIN_OVERALL = 'MinimumOverall' # Same as above, but including grid
SET_MIN_OFFGRID_LCOE = 'MinimumTechLCOE' # The lcoe value for minimum tech
SET_MIN_OVERALL_LCOE = 'MinimumOverallLCOE' # The lcoe value for overall minimum
SET_MIN_OVERALL_CODE = 'MinimumOverallCode' # And a code from 1 - 7 to represent that option
SET_MIN_CATEGORY = 'MinimumCategory' # The category with minimum lcoe (grid, minigrid or standalone)
SET_NEW_CAPACITY = 'NewCapacity' # Capacity in kW
SET_INVESTMENT_COST = 'InvestmentCost' # The investment cost in USD
# Columns in the specs file must match these exactly
SPE_COUNTRY = 'Country'
SPE_POP = 'Pop2016' # The actual population in the base year
SPE_URBAN = 'UrbanRatio2016' # The ratio of urban population (range 0 - 1) in base year
SPE_POP_FUTURE = 'Pop2030'
SPE_URBAN_FUTURE = 'UrbanRatio2030'
SPE_URBAN_MODELLED = 'UrbanRatioModelled' # The urban ratio in the model after calibration (for comparison)
SPE_URBAN_CUTOFF = 'UrbanCutOff' # The urban cutoff population calirated by the model, in people per km2
SPE_URBAN_GROWTH = 'UrbanGrowth' # The urban growth rate as a simple multplier (urban pop future / urban pop present)
SPE_RURAL_GROWTH = 'RuralGrowth' # Same as for urban
SPE_NUM_PEOPLE_PER_HH_RURAL = 'NumPeoplePerHHRural'
SPE_NUM_PEOPLE_PER_HH_URBAN = 'NumPeoplePerHHUrban'
SPE_DIESEL_PRICE_LOW = 'DieselPriceLow' # Diesel price in USD/litre
SPE_DIESEL_PRICE_HIGH = 'DieselPriceHigh' # Same, with a high forecast var
SPE_GRID_PRICE = 'GridPrice' # Grid price of electricity in USD/kWh
SPE_GRID_CAPACITY_INVESTMENT = 'GridCapacityInvestmentCost' # grid capacity investments costs from TEMBA USD/kW
SPE_GRID_LOSSES = 'GridLosses' # As a ratio (0 - 1)
SPE_BASE_TO_PEAK = 'BaseToPeak' # As a ratio (0 - 1)
SPE_EXISTING_GRID_COST_RATIO = 'ExistingGridCostRatio'
SPE_MAX_GRID_DIST = 'MaxGridDist'
SPE_ELEC = 'ElecActual' # Actual current percentage electrified population (0 - 1)
SPE_ELEC_MODELLED = 'ElecModelled' # The modelled version after calibration (for comparison)
SPE_MIN_NIGHT_LIGHTS = 'MinNightLights'
SPE_MAX_GRID_EXTENSION_DIST = 'MaxGridExtensionDist'
SPE_MAX_ROAD_DIST = 'MaxRoadDist'
SPE_POP_CUTOFF1 = 'PopCutOffRoundOne'
SPE_POP_CUTOFF2 = 'PopCutOffRoundTwo'
class Technology:
"""
Used to define the parameters for each electricity access technology, and to calculate the LCOE depending on
input parameters.
"""
start_year = 2016
end_year = 2030
discount_rate = 0.08
grid_cell_area = 1 # in km2, normally 1km2
mv_line_cost = 9000 # USD/km
lv_line_cost = 5000 # USD/km
mv_line_capacity = 50 # kW/line
lv_line_capacity = 10 # kW/line
lv_line_max_length = 30 # km
hv_line_cost = 53000 # USD/km
mv_line_max_length = 50 # km
hv_lv_transformer_cost = 5000 # USD/unit
mv_increase_rate = 0.1 # percentage
def __init__(self,
tech_life, # in years
base_to_peak_load_ratio,
distribution_losses=0, # percentage
connection_cost_per_hh=0, # USD/hh
om_costs=0.0, # OM costs as percentage of capital costs
capital_cost=0, # USD/kW
capacity_factor=1.0, # percentage
efficiency=1.0, # percentage
diesel_price=0.0, # USD/litre
grid_price=0.0, # USD/kWh for grid electricity
standalone=False,
mg_pv=False,
mg_wind=False,
mg_diesel=False,
mg_hydro=False,
grid_capacity_investment=0.0, # USD/kW for on-grid capacity investments (excluding grid itself)
diesel_truck_consumption=0, # litres/hour
diesel_truck_volume=0, # litres
om_of_td_lines=0): # percentage
self.distribution_losses = distribution_losses
self.connection_cost_per_hh = connection_cost_per_hh
self.base_to_peak_load_ratio = base_to_peak_load_ratio
self.tech_life = tech_life
self.om_costs = om_costs
self.capital_cost = capital_cost
self.capacity_factor = capacity_factor
self.efficiency = efficiency
self.diesel_price = diesel_price
self.grid_price = grid_price
self.standalone = standalone
self.mg_pv = mg_pv
self.mg_wind = mg_wind
self.mg_diesel = mg_diesel
self.mg_hydro = mg_hydro
self.grid_capacity_investment = grid_capacity_investment
self.diesel_truck_consumption = diesel_truck_consumption
self.diesel_truck_volume = diesel_truck_volume
self.om_of_td_lines = om_of_td_lines
def pv_diesel_hybrid(self,
energy_per_hh, # kWh/household/year as defined
max_ghi, # highest annual GHI value encountered in the GIS data
max_travel_hours, # highest value for travel hours encountered in the GIS data
diesel_no=1, # 50, # number of diesel generators simulated
pv_no=1, #70, # number of PV panel sizes simulated
n_chg=0.92, # charge efficiency of battery
n_dis=0.92, # discharge efficiency of battery
lpsp=0.05, # maximum loss of load allowed over the year, in share of kWh
battery_cost=150, # battery capital capital cost, USD/kWh of storage capacity
pv_cost=2490, # PV panel capital cost, USD/kW peak power
diesel_cost=550, # diesel generator capital cost, USD/kW rated power
pv_life=20, # PV panel expected lifetime, years
diesel_life=15, # diesel generator expected lifetime, years
pv_om=0.015, # annual OM cost of PV panels
diesel_om=0.1, # annual OM cost of diesel generator
k_t=0.005): # temperature factor of PV panels
ghi = pd.read_csv('Supplementary_files\GHI_hourly.csv', usecols=[4], sep=';', skiprows=21).as_matrix()
# hourly GHI values downloaded from SoDa for one location in the country
temp = pd.read_csv('Supplementary_files\Temperature_hourly.csv', usecols=[4], sep=';', skiprows=21).as_matrix()
# hourly temperature values downloaded from SoDa for one location in the country
hour_numbers = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23) * 365
LHV_DIESEL = 9.9445485
dod_max = 0.8 # maximum depth of discharge of battery
# the values below define the load curve for the five tiers. The values reflect the share of the daily demand
# expected in each hour of the day (sum of all values for one tier = 1)
tier5_load_curve = np.array([0.021008403, 0.021008403, 0.021008403, 0.021008403, 0.027310924, 0.037815126,
0.042016807, 0.042016807, 0.042016807, 0.042016807, 0.042016807, 0.042016807,
0.042016807, 0.042016807, 0.042016807, 0.042016807, 0.046218487, 0.050420168,
0.067226891, 0.084033613, 0.073529412, 0.052521008, 0.033613445, 0.023109244])
tier4_load_curve = np.array([0.017167382, 0.017167382, 0.017167382, 0.017167382, 0.025751073, 0.038626609,
0.042918455, 0.042918455, 0.042918455, 0.042918455, 0.042918455, 0.042918455,
0.042918455, 0.042918455, 0.042918455, 0.042918455, 0.0472103, 0.051502146,
0.068669528, 0.08583691, 0.075107296, 0.053648069, 0.034334764, 0.021459227])
tier3_load_curve = np.array([0.013297872, 0.013297872, 0.013297872, 0.013297872, 0.019060284, 0.034574468,
0.044326241, 0.044326241, 0.044326241, 0.044326241, 0.044326241, 0.044326241,
0.044326241, 0.044326241, 0.044326241, 0.044326241, 0.048758865, 0.053191489,
0.070921986, 0.088652482, 0.077570922, 0.055407801, 0.035460993, 0.019946809])
tier2_load_curve = np.array([0.010224949, 0.010224949, 0.010224949, 0.010224949, 0.019427403, 0.034764826,
0.040899796, 0.040899796, 0.040899796, 0.040899796, 0.040899796, 0.040899796,
0.040899796, 0.040899796, 0.040899796, 0.040899796, 0.04601227, 0.056237219,
0.081799591, 0.102249489, 0.089468303, 0.06390593, 0.038343558, 0.017893661])
tier1_load_curve = np.array([0, 0, 0, 0, 0.012578616, 0.031446541, 0.037735849, 0.037735849, 0.037735849,
0.037735849, 0.037735849, 0.037735849, 0.037735849, 0.037735849, 0.037735849,
0.037735849, 0.044025157, 0.062893082, 0.100628931, 0.125786164, 0.110062893,
0.078616352, 0.044025157, 0.012578616])
if energy_per_hh < 75:
load_curve = tier1_load_curve * energy_per_hh / 365
elif energy_per_hh < 365:
load_curve = tier2_load_curve * energy_per_hh / 365
elif energy_per_hh < 1241:
load_curve = tier3_load_curve * energy_per_hh / 365
elif energy_per_hh < 2993:
load_curve = tier4_load_curve * energy_per_hh / 365
else:
load_curve = tier5_load_curve * energy_per_hh / 365
def pv_diesel_capacities(pv_capacity, battery_size, diesel_capacity, initital_condition=False):
condition = 1
ren_limit = 0
break_hour = 17
while condition > lpsp:
dod = np.zeros(24)
battery_use = np.zeros(24) # Stores the amount of battery discharge during the day
fuel_result = 0
battery_life = 0
soc = 0.5
unmet_demand = 0
annual_diesel_gen = 0
for i in range(8760):
diesel_gen = 0
battery_use[hour_numbers[i]] = 0.0002 * soc # Battery self-discharge
soc *= 0.9998
t_cell = temp[i] + 0.0256 * ghi[i] # PV cell temperature
pv_gen = pv_capacity * 0.9 * ghi[i] / 1000 * (
1 - k_t * (t_cell - 298.15)) # PV generation in the hour
net_load = load_curve[hour_numbers[i]] - pv_gen # remaining load not met by PV panels
if net_load <= 0: # If pv generation is greater than load the excess energy is stored in battery
if battery_size > 0:
soc -= n_chg * net_load / battery_size
net_load = 0
max_diesel = min(diesel_capacity, net_load + (1 - soc) * battery_size / n_chg)
# Maximum aount of diesel needed to supply load and charge battery, limited by rated diesel capacity
# Below is the dispatch strategy for the diesel generator as described in word document
if break_hour + 1 > hour_numbers[i] > 4 and net_load > soc * battery_size * n_dis:
diesel_gen = min(diesel_capacity, max(0.4 * diesel_capacity, net_load))
elif 23 > hour_numbers[i] > break_hour and max_diesel > 0.40 * diesel_capacity:
diesel_gen = max_diesel
elif n_dis * soc * battery_size < net_load:
diesel_gen = max(0.4 * diesel_capacity, max_diesel)
if diesel_gen > 0: # Fuel consumption is stored
fuel_result += diesel_capacity * 0.08145 + diesel_gen * 0.246
annual_diesel_gen += diesel_gen
if (net_load - diesel_gen) > 0: # If diesel generator cannot meet load the battery is also used
if battery_size > 0:
soc -= (net_load - diesel_gen) / n_dis / battery_size
battery_use[hour_numbers[i]] += (net_load - diesel_gen) / n_dis / battery_size
if soc < 0: # If battery and diesel generator cannot supply load there is unmet demand
unmet_demand -= soc * n_dis * battery_size
battery_use[hour_numbers[i]] += soc
soc = 0
else: # If diesel generation is larger than load the excess energy is stored in battery
if battery_size > 0:
soc += (diesel_gen - net_load) * n_chg / battery_size
if battery_size == 0: # If no battery and diesel generation < net load there is unmet demand
unmet_demand += net_load - diesel_gen
soc = min(soc, 1) # Battery state of charge cannot be >1
dod[hour_numbers[i]] = 1 - soc # The depth of discharge in every hour of the day is storeed
if hour_numbers[i] == 23 and max(dod) > 0: # The battery wear during the last day is calculated
battery_life += sum(battery_use) / (531.52764 * max(0.1, (max(dod) * dod_max)) ** -1.12297)
condition = unmet_demand / energy_per_hh # lpsp is calculated
if initital_condition: # During the first calculation the minimum PV size with no diesel generator is calculated
if condition > lpsp:
pv_capacity *= (1 + unmet_demand / energy_per_hh / 4)
elif condition > lpsp or (annual_diesel_gen > (1 - ren_limit) * energy_per_hh): # For the remaining configurations the solution is considered unusable if lpsp criteria is not met
diesel_capacity = 99
condition = 0
battery_life = 1
elif condition < lpsp: # If lpsp criteria is met the expected battery life is stored
battery_life = | np.round(1 / battery_life) | numpy.round |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from scipy.sparse import csr_matrix, identity, kron
from scipy.sparse.linalg import eigs, eigsh
import itertools
from scipy.linalg import block_diag, eig, expm, eigh
from scipy.sparse import save_npz, load_npz, csr_matrix, csc_matrix
import scipy.sparse as sp
from scipy.special import binom
import yaml
import copy
import warnings
import os
import time
from .Hamiltonians import DisplacedAnharmonicOscillator, PolymerVibrations, Polymer, DiagonalizeHamiltonian, LadderOperators
from .general_Liouvillian_classes import LiouvillianConstructor
class OpenPolymer(Polymer,LiouvillianConstructor):
def __init__(self,site_energies,site_couplings,dipoles):
"""Extends Polymer object to an open systems framework,
using the Lindblad formalism to describe bath coupling
"""
super().__init__(site_energies,site_couplings,dipoles)
# Values that need to be set
self.optical_dephasing_gamma = 0
self.optical_relaxation_gamma = 0
self.site_to_site_dephasing_gamma = 0
self.site_to_site_relaxation_gamma = 0
self.exciton_relaxation_gamma = 0
self.exciton_exciton_dephasing_gamma = 0
self.kT = 0
def optical_dephasing_operator(self):
total_deph = self.occupied_list[0].copy()
for i in range(1,len(self.occupied_list)):
total_deph += self.occupied_list[i]
return total_deph
def optical_dephasing_instructions(self):
O = self.optical_dephasing_operator()
gamma = self.optical_dephasing_gamma
return self.make_Lindblad_instructions(gamma,O)
def optical_dephasing_Liouvillian(self):
instructions = self.optical_dephasing_instructions()
return self.make_Liouvillian(instructions)
def boltzmann_factors(self,E1,E2):
if E1 == E2:
return 0.5,0.5
if E1 < E2:
return self.boltzmann_factors_ordered_inputs(E1,E2)
else:
E1_to_E2, E2_to_E1 = self.boltzmann_factors_ordered_inputs(E2,E1)
return E2_to_E1, E1_to_E2
def boltzmann_factors_ordered_inputs(self,E1,E2):
"""E1 must be less than E2"""
if self.kT == 0:
return 1, 0
Z = | np.exp(-E1/self.kT) | numpy.exp |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
from utils import *
def get_results(dataset='adni', feats='dma', sep='random'):
dict = {}
for i in np.concatenate([[1.0], | np.arange(10, 101, 10) | numpy.arange |
import numpy as np
from scipy.linalg import eig
from pylsa.utils import *
from pylsa.transforms import *
from pylsa.dmsuite import *
from pylsa.decorators import *
import matplotlib.pyplot as plt
#-------------------------------------------------------------------
@io_decorator
def solve_rbc1d(Ny=100,Ra=1708,Pr=1,alpha=3.14,plot=True ):
#----------------------- Parameters ---------------------------
nu = Pr
kappa = 1
beta = Pr*Ra
#----------------- diiscrete diff matrices -------------------
_,D1y = chebdif(Ny-1,1) # chebyshev in y-direction
y,D2y = chebdif(Ny-1,2)
#Transform to y=[0,1]
y,D1y,D2y = chebder_transform(y,D1y,D2y, zerotoone_transform)
N, I= Ny, np.eye(Ny)
#----------------------- mean flow -----------------------------
# RBC FLOW
U = U_y = 0.0*y
T = -1.0*y+1 ; T_y = D1y@T;
# Derivatives
UU, UU_y = np.diag(U), np.diag(U_y)
_ , TT_y = np.diag(T), np.diag(T_y)
#-------------------- construct matrix ------------------------
L2d = UU*1.j*alpha + nu*(alpha**2*I - D2y)
K2d = UU*1.j*alpha + kappa*(alpha**2*I - D2y)
#lhs
L11 = 1*L2d ; L12 = 0*UU_y ; L13 = 1.j*alpha*I; L14 = 0*I
L21 = 0*I ; L22 = 1*L2d ; L23 = 1*D1y ; L24 = -1*I*beta
L31 = 1.j*alpha*I ; L32 = 1*D1y ; L33 = 0*I ; L34 = 0*I
L41 = 0*I ; L42 = 1*TT_y ; L43 = 0*I ; L44 = 1*K2d
#rhs
M11 = 1*I ; M12 = 0*I ; M13 = 0*I ; M14 = 0*I
M21 = 0*I ; M22 = 1*I ; M23 = 0*I ; M24 = 0*I
M31 = 0*I ; M32 = 0*I ; M33 = 0*I ; M34 = 0*I
M41 = 0*I ; M42 = 0*I ; M43 = 0*I ; M44 = 1*I
#-------------------- boundary conditions ----------------------
L1 = np.block([ [L11,L12,L13,L14] ]); M1 = np.block([ [M11,M12,M13,M14] ]) #u
L2 = np.block([ [L21,L22,L23,L24] ]); M2 = np.block([ [M21,M22,M23,M24] ]) #v
L3 = np.block([ [L31,L32,L33,L34] ]); M3 = np.block([ [M31,M32,M33,M34] ]) #p
L4 = np.block([ [L41,L42,L43,L44] ]); M4 = np.block([ [M41,M42,M43,M44] ]) #T
yi = np.array( [*range(Ny)] ); yi = yi.flatten()
# u
bcA = np.argwhere( (np.abs(yi)==0) | (np.abs(yi)==Ny-1) ).flatten() # pos
L1[bcA,:] = np.block([ [1.*I, 0.*I, 0.*I, 0.*I ] ])[bcA,:] # dirichlet
M1[bcA,:] = np.block([ [0.*I, 0.*I, 0.*I, 0.*I ] ])[bcA,:]
# v
bcA = np.argwhere( (np.abs(yi)==0) | (np.abs(yi)==Ny-1) ).flatten() # pos
L2[bcA,:] = np.block([ [0.*I, 1.*I, 0.*I, 0.*I ] ])[bcA,:] # dirichlet
M2[bcA,:] = np.block([ [0.*I, 0.*I, 0.*I, 0.*I ] ])[bcA,:]
#L2[bcB,:] = np.block([ [0.*I,1.*D1y, 0.*I, 0.*I ] ])[bcA,:] # neumann
#M2[bcB,:] = np.block([ [0.*I, 0.*I, 0.*I, 0.*I ] ])[bcA,:]
# p
bcA = np.argwhere( (np.abs(yi)==0) | (np.abs(yi)==Ny-1) ).flatten()
L3[bcA,:] = np.block([ [0.*I, 0.*I,1.*D1y, 0.*I ] ])[bcA,:] # neumann
M3[bcA,:] = np.block([ [0.*I, 0.*I, 0.*I , 0.*I ] ])[bcA,:]
# T
bcA = np.argwhere( (np.abs(yi)==0) | (np.abs(yi)==Ny-1) ).flatten() # pos
L4[bcA,:] = np.block([ [0.*I, 0.*I, 0.*I, 1.*I ] ])[bcA,:] # dirichlet
M4[bcA,:] = np.block([ [0.*I, 0.*I, 0.*I, 0.*I ] ])[bcA,:]
#----------------------- solve EVP -----------------------------
L = np.block([ [L1], [L2], [L3], [L4]])
M = np.block([ [M1], [M2], [M3], [M4]])
evals,evecs = eig(L,1.j*M)
# Post Process egenvalues
evals, evecs = remove_evals(evals,evecs,higher=400)
evals, evecs = sort_evals(evals,evecs,which="I")
#--------------------- post-processing -------------------------
if plot:
blue = (0/255, 137/255, 204/255)
red = (196/255, 0, 96/255)
yel = (230/255,159/255,0)
fig,(ax0,ax1,ax2) = plt.subplots(ncols=3, figsize=(8,3))
ax0.set_title("Eigenvalues")
ax0.set_xlim(-1,1); ax0.grid(True)
ax0.scatter(np.real(evals[:]),np.imag(evals[:]), marker="o", edgecolors="k", s=60, facecolors='none');
ax1.set_ylabel("y"); ax1.set_title("Largest Eigenvector")
ax1.plot(np.abs(evecs[0*N:1*N,-1:]),y, marker="", color=blue, label=r"$|u|$")
ax1.plot(np.abs(evecs[1*N:2*N,-1:]),y, marker="", color=red, label=r"$|v|$")
#ax2.plot(np.abs(evecs[2*N:3*N,-1:]),y, marker="", color="k" , label=r"$|p|$")
ax1.legend(loc="lower right")
ax2.set_ylabel("y"); ax2.set_title("Largest Eigenvector")
ax2.plot(np.abs(evecs[3*N:4*N,-1:]),y, marker="", color=yel , label=r"$|T|$")
ax2.legend()
plt.tight_layout();
figname="rbc1d.png"
print("Figure saved to {:}".format(figname))
fig.savefig(figname)
#plt.show()
return evals,evecs
#-------------------------------------------------------------------
@io_decorator
def solve_rbc1d_neutral(Ny=100,Pr=1,alpha=3.14,plot=True ):
#----------------------- Parameters ---------------------------
nu = Pr
kappa = 1
beta = Pr
#----------------- diiscrete diff matrices -------------------
_,D1y = chebdif(Ny-1,1) # chebyshev in y-direction
y,D2y = chebdif(Ny-1,2)
#Transform to y=[0,1]
y,D1y,D2y = chebder_transform(y,D1y,D2y, zerotoone_transform)
N, I= Ny, np.eye(Ny)
#----------------------- mean flow -----------------------------
# RBC FLOW
U = U_y = 0.0*y
T = -1.0*y+1 ; T_y = D1y@T;
# Derivatives
UU, UU_y = np.diag(U), np.diag(U_y)
_ , TT_y = | np.diag(T) | numpy.diag |
'''
Creating a toy example to test the kernelised dynamical system code.
We have a system that evolves deterministically through four states:
state 1 -> state 2/state 3 -> state 4
state 4 is an absorbing state
A time-series belongs to one of two types, A or B. Both A and B have rare and common variants. There are three actions available at each stage, a, b, and c. The rewards are as follows:
state 1, a = -10 , b = 5 , c = 0
state 2, a = 5 , b = -10 , c = 0
state 3, a = 5 if A, -10 if B; b = 5 if B, -10 if A; c = 0
state 3 persists for one step, so everyone follows 0,1/2,3
We observe the rewards (as observations, just made it easy for me)
as well as an observation that depends only on the time-series' type (the obs mean which determines whether we have A or B):
obs dim #1: either 0,1 or 3 if type A, 2 or 4,5 if type B. Designed
so that in the training set, we see mostly 0,1 and 4,5; if a kernel
maps to the nearest it will fail on the observations because they
will be incorrectly mapped.
The output is the sequence of observations, actions, and rewards
stored in the fencepost way
'''
import numpy as np
import numpy.random as npr
import cPickle as pkl
import os
# create the data set
def create_toy_data(train_data, test_data):
sequence_count = 250
sequence_length = 4 # M = sequence_count * sequence_length
# where to store things
state_set = np.zeros((0, 1))
obs_set = np.zeros((0, 1))
action_set = np.zeros((0, 1))
reward_set = np.zeros((0, 1))
dataindex_set = np.zeros((0, 1))
optimal_set = np.zeros((0, 1))
# pre-decide the sequence types
rare_count = 20
# initialize all sequences to the same type eg A
sequence_type = np.zeros(int(sequence_count)) + 1.0
# make all second half of the sequences another type B
sequence_type[int(sequence_count + 0.0) // int(2.0):] = -1.0
if test_data is True:
skip = 5
else:
skip = 1
my_start = 0
for rare_index in range(rare_count):
my_end = my_start + skip
sequence_type[int(my_start):int(my_end)] = rare_index + 2
sequence_type[(-1 * int(my_end + 1)):(-1 * int(my_start + 1))] = -1 * int(rare_index + 2)
my_start = my_end
min_obs = | np.min(sequence_type) | numpy.min |
#
#
# 0======================0
# | Mesh utilities |
# 0======================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# functions related to meshes
#
# ----------------------------------------------------------------------------------------------------------------------
#
# <NAME> - 10/02/2017
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Basic libs
import os
os.environ.update(OMP_NUM_THREADS='1',
OPENBLAS_NUM_THREADS='1',
NUMEXPR_NUM_THREADS='1',
MKL_NUM_THREADS='1',)
import numpy as np
import time
# ----------------------------------------------------------------------------------------------------------------------
#
# Functions
# \***************/
#
def rasterize_mesh(vertices, faces, dl, verbose=False):
"""
Creation of point cloud from mesh via rasterization. All models are rescaled to fit in a 1 meter radius sphere
:param vertices: array of vertices
:param faces: array of faces
:param dl: parameter controlling density. Distance between each point
:param verbose: display parameter
:return: point cloud
"""
######################################
# Eliminate useless faces and vertices
######################################
# 3D coordinates of faces
faces3D = vertices[faces, :]
sides = np.stack([faces3D[:, i, :] - faces3D[:, i - 1, :] for i in [2, 0, 1]], axis=1)
# Indices of big enough faces
keep_bool = np.min(np.linalg.norm(sides, axis=-1), axis=-1) > 1e-9
faces = faces[keep_bool]
##################################
# Place random points on each face
##################################
# 3D coordinates of faces
faces3D = vertices[faces, :]
# Area of each face
opposite_sides = np.stack([faces3D[:, i, :] - faces3D[:, i - 1, :] for i in [2, 0, 1]], axis=1)
lengths = np.linalg.norm(opposite_sides, axis=-1)
# Points for each face
all_points = []
all_vert_inds = []
for face_verts, face, l, sides in zip(faces, faces3D, lengths, opposite_sides):
# All points generated for this face
face_points = []
# Safe check for null faces
if np.min(l) < 1e-9:
continue
# Smallest faces, only place one point in the center
if np.max(l) < dl:
face_points.append(np.mean(face, axis=0))
continue
# Chose indices so that A is the largest angle
A_idx = | np.argmax(l) | numpy.argmax |
"""
Code to plot results of a pair of tracker experiments hwere particles were
released near the bottom of Admiralty Inlet North (job=aiN) at the start of ebb
during Spring or neap. We used a high-resolution nested model.
"""
import matplotlib.pyplot as plt
import xarray as xr
import numpy as np
from lo_tools import Lfun
from lo_tools import plotting_functions as pfun
Ldir = Lfun.Lstart()
# Choose an experiment and release to plot.
in_dir0 = Ldir['LOo'] / 'tracks'
# get grid info
fng = in_dir0 / 'aiN_3d_sh7_Spring' / 'grid.nc'
dsg = xr.open_dataset(fng)
lonp, latp = pfun.get_plon_plat(dsg.lon_rho.values, dsg.lat_rho.values)
hh = dsg.h.values
maskr = dsg.mask_rho.values
# get tracker output
fnS = in_dir0 / 'aiN_3d_sh7_Spring' / 'release_2018.01.02.nc'
fnN = in_dir0 / 'aiN_3d_sh12_Neap' / 'release_2018.01.09.nc'
# We know that these had 300 particles each, and ran for 3 days minus the number of
# hours in sh#, so we will just plot the first 50 hours. We also know they were saved at
# 12 saves per hour, so we will just look at the first 600 time points.
for fnr in [fnS, fnN]:
V = dict()
vn_list = ['lon', 'lat', 'z', 'salt', 'temp']
# extract variables and clip
dsr = xr.open_dataset(fnr)
nn = 600
for vn in vn_list:
V[vn] = dsr[vn][:nn,:].values
# make a mask that is False from the time a particle first leaves the domain
# and onwards
AA = [dsg.lon_rho.values[0,2], dsg.lon_rho.values[0,-3],
dsg.lat_rho.values[2,0], dsg.lat_rho.values[-3,0]]
ib_mask = np.ones(V['lon'].shape, dtype=bool)
ib_mask[V['lon'] < AA[0]] = False
ib_mask[V['lon'] > AA[1]] = False
ib_mask[V['lat'] < AA[2]] = False
ib_mask[V['lat'] > AA[3]] = False
NT, NP = V['lon'].shape
for pp in range(NP):
tt = | np.argwhere(ib_mask[:,pp]==False) | numpy.argwhere |
import os
import mrcfile
import numpy as np
import pandas as pd
import networkx as nx
from igraph import Graph
from scipy import ndimage as ndi
from skimage import transform, measure
import tkinter as tk
from tkinter import ttk
import tkinter.filedialog
import matplotlib
from matplotlib import cm
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import ImageGrid
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
matplotlib.use('TkAgg')
class SLICEM_GUI(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
tk.Tk.wm_title(self, "SLICEM_GUI")
tabControl = ttk.Notebook(self)
input_tab = ttk.Frame(tabControl)
network_tab = ttk.Frame(tabControl)
projection_tab = ttk.Frame(tabControl)
output_tab = ttk.Frame(tabControl)
tabControl.add(input_tab, text='Inputs')
tabControl.add(network_tab, text='Network Plot')
tabControl.add(projection_tab, text='Projection Plot')
tabControl.add(output_tab, text='Outputs')
tabControl.pack(expand=1, fill="both")
self.cwd = os.getcwd()
######################### INPUT TAB ##############################
mrc_label = ttk.Label(input_tab, text="path to 2D class averages (mrcs): ")
mrc_label.grid(row=0, column=0, sticky=tk.E, pady=10)
self.mrc_entry = ttk.Entry(input_tab, width=20)
self.mrc_entry.grid(row=0, column=1, sticky=tk.W, pady=10)
self.mrc_button = ttk.Button(
input_tab,
text="Browse",
command=lambda: self.set_text(
text=self.askfile(),
entry=self.mrc_entry
)
)
self.mrc_button.grid(row=0, column=2, sticky=tk.W, pady=2)
scores_label = ttk.Label(input_tab, text="path to SLICEM scores: ")
scores_label.grid(row=1, column=0, sticky=tk.E, pady=10)
self.score_entry = ttk.Entry(input_tab, width=20)
self.score_entry.grid(row=1, column=1, sticky=tk.W, pady=10)
self.score_button = ttk.Button(
input_tab,
text="Browse",
command=lambda: self.set_text(
text=self.askfile(),
entry=self.score_entry
)
)
self.score_button.grid(row=1, column=2, sticky=tk.W, pady=2)
scale_label = ttk.Label(input_tab, text="scale factor (if used): ")
scale_label.grid(row=2, column=0, sticky=tk.E, pady=10)
self.scale_entry = ttk.Entry(input_tab, width=5)
self.scale_entry.grid(row=2, column=1, sticky=tk.W, pady=10)
self.load_button = ttk.Button(
input_tab,
text='Load Inputs',
command=lambda: self.load_inputs(
self.mrc_entry.get(),
self.score_entry.get(),
self.scale_entry.get()
)
)
self.load_button.grid(row=3, column=1, pady=20)
############################################################################
######################### NETWORK TAB ##############################
network_tab.grid_rowconfigure(0, weight=1)
network_tab.grid_columnconfigure(0, weight=1)
#TOP FRAME
nettopFrame = tk.Frame(network_tab, bg='lightgrey', width=600, height=400)
nettopFrame.grid(row=0, column=0, sticky='nsew')
self.netcanvas = None
self.nettoolbar = None
#BOTTOM FRAME
netbottomFrame = ttk.Frame(network_tab, width=600, height=100)
netbottomFrame.grid(row=1, column=0, sticky='nsew')
netbottomFrame.grid_propagate(0)
self.detection = tk.StringVar(network_tab)
self.detection.set('walktrap')
comm_label = ttk.Label(netbottomFrame, text='community detection:')
comm_label.grid(row=0, column=0, sticky=tk.E)
self.community_wt = ttk.Radiobutton(
netbottomFrame,
text='walktrap',
variable=self.detection,
value='walktrap'
)
self.community_wt.grid(row=0, column=1, padx=5, sticky=tk.W)
n_clusters_label = ttk.Label(netbottomFrame, text='# of clusters (optional):')
n_clusters_label.grid(row=0, column=2, sticky=tk.E)
self.n_clust = ttk.Entry(netbottomFrame, width=6)
self.n_clust.grid(row=0, column=3, padx=5, sticky=tk.W)
self.wt_steps = ttk.Entry(netbottomFrame, width=6)
self.wt_steps.insert(0, 4)
# self.wt_steps.grid(row=0, column=2, padx=50, sticky=tk.W)
#EV: Errors w/ betweenness iGraph version, temporarily remove
#self.community_eb = ttk.Radiobutton(
# netbottomFrame,
# text='betweenness',
# variable=self.detection,
# value='betweenness'
#)
#self.community_eb.grid(row=0, column=2, padx=3, sticky=tk.W)
self.network = tk.StringVar(network_tab)
self.network.set('knn')
net_label = ttk.Label(netbottomFrame, text='construct network from:')
net_label.grid(row=1, column=0, sticky=tk.E)
self.net1 = ttk.Radiobutton(
netbottomFrame,
text='nearest neighbors',
variable=self.network,
value='knn'
)
self.net1.grid(row=1, column=1, padx=5, sticky=tk.W)
self.net2 = ttk.Radiobutton(
netbottomFrame,
text='top n scores',
variable=self.network,
value='top_n'
)
self.net2.grid(row=2, column=1, padx=5, sticky=tk.W)
knn_label = ttk.Label(netbottomFrame, text='# of k:')
knn_label.grid(row=1, column=2, sticky=tk.E)
self.knn_entry = ttk.Entry(netbottomFrame, width=6)
self.knn_entry.insert(0, 0)
self.knn_entry.grid(row=1, column=3, padx=5, sticky=tk.W)
topn_label = ttk.Label(netbottomFrame, text='# of n:')
topn_label.grid(row=2, column=2, sticky=tk.E)
self.topn_entry = ttk.Entry(netbottomFrame, width=6)
self.topn_entry.insert(0, 0)
self.topn_entry.grid(row=2, column=3, padx=5, sticky=tk.W)
self.cluster = ttk.Button(
netbottomFrame,
width=12,
text='cluster',
command=lambda: self.slicem_cluster(
self.detection.get(),
self.network.get(),
int(self.wt_steps.get()),
self.n_clust.get(),
int(self.knn_entry.get()),
int(self.topn_entry.get()),
self.drop_nodes.get()
)
)
self.cluster.grid(row=0, column=4, sticky=tk.W, padx=5, pady=2)
self.net_plot = ttk.Button(
netbottomFrame,
width=12,
text='plot network',
command=lambda: self.plot_slicem_network(
self.network.get(),
nettopFrame)
)
self.net_plot.grid(row=1, column=4, sticky=tk.W, padx=5, pady=2)
self.tiles = ttk.Button(
netbottomFrame,
width=12,
text='plot 2D classes',
command=lambda: self.plot_tiles()
)
self.tiles.grid(row=2, column=4, sticky=tk.W, padx=5, pady=2)
drop_label = ttk.Label(netbottomFrame, text='remove nodes')
drop_label.grid(row=0, column=5)
self.drop_nodes = ttk.Entry(netbottomFrame, width=15)
self.drop_nodes.grid(row=1, column=5, sticky=tk.W, padx=10)
############################################################################
######################### PROJECTION TAB ##########################
projection_tab.grid_rowconfigure(0, weight=1)
projection_tab.grid_columnconfigure(0, weight=1)
#TOP FRAME
projtopFrame = tk.Frame(projection_tab, bg='lightgrey', width=600, height=400)
projtopFrame.grid(row=0, column=0, sticky='nsew')
projtopFrame.grid_rowconfigure(0, weight=1)
projtopFrame.grid_columnconfigure(0, weight=1)
self.projcanvas = None
self.projtoolbar = None
#BOTTOM FRAME
projbottomFrame = ttk.Frame(projection_tab, width=600, height=50)
projbottomFrame.grid(row=1, column=0, sticky='nsew')
projbottomFrame.grid_propagate(0)
avg1_label = ttk.Label(projbottomFrame, text='class average 1: ')
avg1_label.grid(row=0, column=0, sticky=tk.E, padx=2)
self.avg1 = ttk.Entry(projbottomFrame, width=5)
self.avg1.grid(row=0, column=1, padx=2)
avg2_label = ttk.Label(projbottomFrame, text='class avereage 2: ')
avg2_label.grid(row=0, column=2, sticky=tk.E, padx=2)
self.avg2 = ttk.Entry(projbottomFrame, width=5)
self.avg2.grid(row=0, column=3, padx=2)
self.proj_button = ttk.Button(
projbottomFrame,
text='plot projections',
command=lambda: self.plot_projections(
int(self.avg1.get()),
int(self.avg2.get()),
projtopFrame
)
)
self.proj_button.grid(row=0, column=4, padx=20)
self.overlay_button = ttk.Button(
projbottomFrame,
text='plot overlap',
command=lambda: self.overlay_lines(
int(self.avg1.get()),
int(self.avg2.get()),
self.ft_check_var.get(),
projtopFrame
)
)
self.overlay_button.grid(row=0, column=5, padx=12)
self.ft_check_var = tk.BooleanVar()
self.ft_check_var.set(0)
self.ft_check = ttk.Checkbutton(projbottomFrame, text='FT plot', variable=self.ft_check_var)
self.ft_check.grid(row=0, column=6, padx=12)
################################################################################
########################### OUTPUT TAB #################################
star_label = ttk.Label(output_tab, text='path to corresponding star file (star): ')
star_label.grid(row=0, column=0, sticky=tk.E, pady=10)
self.star_entry = ttk.Entry(output_tab, width=20)
self.star_entry.grid(row=0, column=1, stick=tk.W, pady=10)
self.star_button = ttk.Button(
output_tab,
text="Browse",
command=lambda: self.set_text(
text=self.askfile(),
entry=self.star_entry
)
)
self.star_button.grid(row=0, column=2, sticky=tk.W, pady=2)
outdir_label = ttk.Label(output_tab, text='directory to save files in: ')
outdir_label.grid(row=1, column=0, sticky=tk.E, pady=10)
self.out_entry = ttk.Entry(output_tab, width=20)
self.out_entry.grid(row=1, column=1, sticky=tk.W, pady=10)
self.out_button = ttk.Button(
output_tab,
text="Browse",
command=lambda: self.set_text(
text=self.askpath(),
entry=self.out_entry
)
)
self.out_button.grid(row=1, column=2, sticky=tk.W, pady=2)
self.write_button = ttk.Button(
output_tab,
text='Write Star Files',
command=lambda: self.write_star_files(
self.star_entry.get(),
self.out_entry.get()
)
)
self.write_button.grid(row=2, column=1, pady=20)
self.write_edges = ttk.Button(
output_tab,
text='Write Edge List',
command=lambda: self.write_edge_list(
self.network.get(),
self.out_entry.get()
)
)
self.write_edges.grid(row=3, column=1, pady=10)
################################################################################
############################### GUI METHODS ################################
def load_scores(self, score_file):
complete_scores = {}
with open(score_file, 'r') as f:
next(f)
for line in f:
l = line.rstrip('\n').split('\t')
complete_scores[(int(l[0]), int(l[2]))] = (int(l[1]), int(l[3]), float(l[4]))
return complete_scores
def load_class_avg(self, mrcs, factor):
"""read, scale and extract class averages"""
global shape
projection_2D = {}
extract_2D = {}
if len(factor) == 0: # Empty entry, set factor 1
factor = 1
with mrcfile.open(mrcs) as mrc:
for i, data in enumerate(mrc.data):
projection_2D[i] = data
mrc.close()
shape = transform.rotate(projection_2D[0].copy(), 45, resize=True).shape[0]
for k, avg in projection_2D.items():
if factor == 1:
extract_2D[k] = extract_class_avg(avg)
else:
scaled_img = transform.rescale(
avg,
scale=(1/float(factor)),
anti_aliasing=True,
multichannel=False, # Add to supress warning
mode='constant' # Add to supress warning
)
extract_2D[k] = extract_class_avg(scaled_img)
return projection_2D, extract_2D
def load_inputs(self, mrc_entry, score_entry, scale_entry):
global projection_2D, extract_2D, num_class_avg, complete_scores
projection_2D, extract_2D = self.load_class_avg(mrc_entry, scale_entry)
num_class_avg = len(projection_2D)
complete_scores = self.load_scores(score_entry)
print('Inputs Loaded!')
def askfile(self):
file = tk.filedialog.askopenfilename(initialdir=self.cwd)
return file
def askpath(self):
path = tk.filedialog.askdirectory(initialdir=self.cwd)
return path
def set_text(self, text, entry):
entry.delete(0, tk.END)
entry.insert(0, text)
def show_dif_class_msg(self):
tk.messagebox.showwarning(None, 'Select different class averages')
def show_cluster_fail(self):
tk.messagebox.showwarning(None, 'Clustering failed.\nTry adjusting # of clusters\n or # of edges')
def show_drop_list_msg(self):
tk.messagebox.showwarning(None, 'use comma separated list\nfor nodes to drop \ne.g. 1, 2, 3')
def slicem_cluster(self, community_detection, network_from, wt_steps, n_clust, neighbors, top, drop_nodes):
"""construct graph and get colors for plotting"""
#TODO: change to prevent cluster on exception
global scores_update, drop, flat, clusters, G, colors
if len(n_clust) == 0:
n_clust = None # Cluster at optimum modularity
else:
n_clust = int(n_clust)
if len(drop_nodes) > 0:
try:
drop = [int(n) for n in drop_nodes.split(',')]
print('dropping nodes:', drop)
scores_update = {}
for pair, score in complete_scores.items():
if pair[0] in drop or pair[1] in drop:
next
else:
scores_update[pair] = score
except:
self.show_drop_list_msg()
else:
drop = []
scores_update = complete_scores
flat, clusters, G = self.create_network(
community_detection=community_detection,
wt_steps=wt_steps,
n_clust=n_clust,
network_from=network_from,
neighbors=neighbors,
top=top
)
colors = get_plot_colors(clusters, G)
print('clusters computed!')
def create_network(self, community_detection, wt_steps, n_clust, network_from, neighbors, top):
"""get new clusters depending on input options"""
if network_from == 'top_n':
sort_by_scores = []
for pair, score in scores_update.items():
sort_by_scores.append([pair[0], pair[1], score[2]])
top_n = sorted(sort_by_scores, reverse=False, key=lambda x: x[2])[:top]
# Convert from distance to similarity for edge
for score in top_n:
c = 1/(1 + score[2])
score[2] = c
flat = [tuple(pair) for pair in top_n]
elif network_from == 'knn':
flat = []
projection_knn = nearest_neighbors(neighbors=neighbors)
for projection, knn in projection_knn.items():
for n in knn:
flat.append((projection, n[0], abs(n[3]))) # p1, p2, score
clusters = {}
g = Graph.TupleList(flat, weights=True)
if community_detection == 'walktrap':
try:
wt = Graph.community_walktrap(g, weights='weight', steps=wt_steps)
cluster_dendrogram = wt.as_clustering(n_clust)
except:
self.show_cluster_fail()
elif community_detection == 'betweenness':
try:
ebs = Graph.community_edge_betweenness(g, weights='weight', directed=True)
cluster_dendrogram = ebs.as_clustering(n_clust)
except:
self.show_cluster_fail()
for community, projection in enumerate(cluster_dendrogram.subgraphs()):
clusters[community] = projection.vs['name']
#convert node IDs back to ints
for cluster, nodes in clusters.items():
clusters[cluster] = sorted([int(node) for node in nodes])
remove_outliers(clusters)
clustered = []
for cluster, nodes in clusters.items():
for n in nodes:
clustered.append(n)
clusters['singles'] = [] # Add singles to clusters if not in top n scores
clusters['removed'] = []
for node in projection_2D:
if node not in clustered and node not in drop:
clusters['singles'].append(node)
elif node in drop:
clusters['removed'].append(node)
G = nx.Graph()
for pair in flat:
G.add_edge(int(pair[0]), int(pair[1]), weight=pair[2])
#if you want to see directionality in the networkx plot
#G = nx.MultiDiGraph(G)
#adds singles if not in top n scores
for node_key in projection_2D:
if node_key not in G.nodes:
G.add_node(node_key)
return flat, clusters, G
def plot_slicem_network(self, network_from, frame):
#TODO: adjust k, scale for clearer visualization
G_subset = G.copy()
color_dict = {i: color for i, color in enumerate(colors)}
node_dict = {node: i for i, node in enumerate(G.nodes)}
for d in drop:
G_subset.remove_node(d)
color_dict.pop(node_dict[d])
color_subset = [color for k, color in color_dict.items()]
if network_from == 'knn':
positions = nx.spring_layout(G_subset, weight='weight', k=0.3, scale=3.5)
else:
positions = nx.spring_layout(G_subset, weight='weight', k=0.18, scale=1.5)
f = Figure(figsize=(8,5))
a = f.add_subplot(111)
a.axis('off')
nx.draw_networkx_nodes(G_subset, positions, ax=a, edgecolors='black', linewidths=2,
node_size=300, alpha=0.65, node_color=color_subset)
nx.draw_networkx_edges(G_subset, positions, ax=a, width=1, edge_color='grey')
nx.draw_networkx_labels(G_subset, positions, ax=a, font_weight='bold', font_size=10)
if self.netcanvas:
self.netcanvas.get_tk_widget().destroy()
self.nettoolbar.destroy()
self.netcanvas = FigureCanvasTkAgg(f, frame)
self.netcanvas.draw()
self.netcanvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.netcanvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.nettoolbar = NavigationToolbar2Tk(self.netcanvas, frame)
self.nettoolbar.update()
def plot_tiles(self):
"""plot 2D class avgs sorted and colored by cluster"""
#TODO: adjust plot, border and text_box sizes
ordered_projections = []
flat_clusters = []
colors_2D = []
for cluster, nodes in clusters.items():
for n in nodes:
ordered_projections.append(projection_2D[n])
for n in nodes:
flat_clusters.append(n)
for i, n in enumerate(G.nodes):
if n in nodes:
colors_2D.append(colors[i])
grid_cols = int(np.ceil(np.sqrt(len(ordered_projections))))
if len(ordered_projections) <= (grid_cols**2 - grid_cols):
grid_rows = grid_cols - 1
else:
grid_rows = grid_cols
#assuming images are same size, get shape
l, w = ordered_projections[0].shape
#add blank images to pack in grid
while len(ordered_projections) < grid_rows*grid_cols:
ordered_projections.append(np.zeros((l, w)))
colors_2D.append((0., 0., 0.))
flat_clusters.append('')
f = Figure()
grid = ImageGrid(f, 111, #similar to subplot(111)
nrows_ncols=(grid_rows, grid_cols), #creates grid of axes
axes_pad=0.05) #pad between axes in inch
lw = 1.75
text_box_size = 5
props = dict(boxstyle='round', facecolor='white')
for i, (ax, im) in enumerate(zip(grid, ordered_projections)):
ax.imshow(im, cmap='gray')
for side, spine in ax.spines.items():
spine.set_color(colors_2D[i])
spine.set_linewidth(lw)
ax.get_yaxis().set_ticks([])
ax.get_xaxis().set_ticks([])
text = str(flat_clusters[i])
ax.text(1, 1, text, va='top', ha='left', bbox=props, size=text_box_size)
newWindow = tk.Toplevel()
newWindow.grid_rowconfigure(0, weight=1)
newWindow.grid_columnconfigure(0, weight=1)
#PLOT FRAME
plotFrame = tk.Frame(newWindow, bg='lightgrey', width=600, height=400)
plotFrame.grid(row=0, column=0, sticky='nsew')
canvas = FigureCanvasTkAgg(f, plotFrame)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
canvas.figure.tight_layout()
#TOOLBAR FRAME
toolbarFrame = ttk.Frame(newWindow, width=600, height=100)
toolbarFrame.grid(row=1, column=0, sticky='nsew')
toolbarFrame.grid_propagate(0)
toolbar = NavigationToolbar2Tk(canvas, toolbarFrame)
toolbar.update()
def plot_projections(self, p1, p2, frame):
if p1 == p2:
self.show_dif_class_msg()
else:
projection1 = extract_2D[p1]
projection2 = extract_2D[p2]
angle1 = complete_scores[p1, p2][0]
angle2 = complete_scores[p1, p2][1]
ref = transform.rotate(projection1, angle1, resize=True)
comp = transform.rotate(projection2, angle2, resize=True)
ref_square, comp_square = make_equal_square_images(ref, comp)
ref_intensity = ref_square.sum(axis=0)
comp_intensity = comp_square.sum(axis=0)
y_axis_max = max(np.amax(ref_intensity), np.amax(comp_intensity))
y_axis_min = min(np.amin(ref_intensity), np.amin(comp_intensity))
f = Figure(figsize=(4,4))
spec = gridspec.GridSpec(ncols=2, nrows=2, figure=f)
tl = f.add_subplot(spec[0, 0])
tr = f.add_subplot(spec[0, 1])
bl = f.add_subplot(spec[1, 0])
br = f.add_subplot(spec[1, 1])
# PROJECTION_1
#2D projection image
tl.imshow(ref_square, cmap=plt.get_cmap('gray'), aspect='equal')
tl.axis('off')
#1D line projection
bl.plot(ref_intensity, color='black')
bl.xaxis.set_visible(False)
bl.yaxis.set_visible(False)
bl.set_ylim([y_axis_min, (y_axis_max + 0.025*y_axis_max)])
bl.fill_between(range(len(ref_intensity)), ref_intensity, alpha=0.5, color='deepskyblue')
# PROJECTION_2
#2D projection image
tr.imshow(comp_square, cmap=plt.get_cmap('gray'), aspect='equal')
tr.axis('off')
#lD line projection
br.plot(comp_intensity, color='black')
br.xaxis.set_visible(False)
br.yaxis.set_visible(False)
br.set_ylim([y_axis_min, (y_axis_max + 0.025*y_axis_max)])
br.fill_between(range(len(comp_intensity)), comp_intensity, alpha=0.5, color='yellow')
asp = np.diff(bl.get_xlim())[0] / np.diff(bl.get_ylim())[0]
bl.set_aspect(asp)
asp1 = np.diff(br.get_xlim())[0] / np.diff(br.get_ylim())[0]
br.set_aspect(asp)
f.tight_layout()
if self.projcanvas:
self.projcanvas.get_tk_widget().destroy()
self.projtoolbar.destroy()
self.projcanvas = FigureCanvasTkAgg(f, frame)
self.projcanvas.draw()
self.projcanvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.projcanvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.projtoolbar = NavigationToolbar2Tk(self.projcanvas, frame)
self.projtoolbar.update()
def overlay_lines(self, p1, p2, FT, frame):
"""overlays line projections at optimum angle between two class averages"""
if p1 == p2:
self.show_dif_class_msg()
else:
a1 = complete_scores[p1, p2][0]
a2 = complete_scores[p1, p2][1]
projection1 = make_1D(extract_2D[p1], a1)
projection2 = make_1D(extract_2D[p2], a2)
if FT:
pad_p1 = np.pad(projection1.vector, pad_width=(0, shape-projection1.size()))
pad_p2 = np.pad(projection2.vector, pad_width=(0, shape-projection2.size()))
A = abs(np.fft.rfft(pad_p1))
B = abs(np.fft.rfft(pad_p2))
f = Figure(figsize=(8,4))
ax = f.add_subplot(111)
ax.bar(range(len(A)), A, alpha=0.35, color='deepskyblue', ec='k', linewidth=1)
ax.bar(range(len(B)), B, alpha=0.35, color='yellow', ec='k', linewidth=1)
ax.get_xaxis().set_ticks([])
ax.set_xlabel('frequency component')
ax.set_ylabel('Amplitude')
else:
a2_flip = complete_scores[p1, p2][1] + 180
projection2_flip = make_1D(extract_2D[p2], a2_flip)
score_default, r, c = slide_score(projection1, projection2) # Score and location of optimum
score_flip, r_flip, c_flip = slide_score(projection1, projection2_flip) # Score of phase flipped
if score_default <= score_flip:
ref_intensity, comp_intensity = r, c
else:
ref_intensity, comp_intensity = r_flip, c_flip
f = Figure(figsize=(8,4))
ax = f.add_subplot(111)
x_axis_max = len(ref_intensity)
y_axis_max = max(np.amax(ref_intensity), np.amax(comp_intensity))
y_axis_min = min(np.amin(ref_intensity), np.amin(comp_intensity))
ax.plot(ref_intensity, color='black')
ax.plot(comp_intensity, color='black')
ax.fill_between(range(len(ref_intensity)), ref_intensity, alpha=0.35, color='deepskyblue')
ax.fill_between(range(len(comp_intensity)), comp_intensity, alpha=0.35, color='yellow')
ax.set_ylabel('Intensity')
ax.set_ylim([y_axis_min, (y_axis_max + 0.025*y_axis_max)])
ax.xaxis.set_visible(False)
f.tight_layout()
if self.projcanvas:
self.projcanvas.get_tk_widget().destroy()
self.projtoolbar.destroy()
self.projcanvas = FigureCanvasTkAgg(f, frame)
self.projcanvas.draw()
self.projcanvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.projcanvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.projtoolbar = NavigationToolbar2Tk(self.projcanvas, frame)
self.projtoolbar.update()
def write_star_files(self, star_input, outpath):
"""split star file into new star files based on clusters"""
with open(star_input, 'r') as f:
table = parse_star(f)
cluster_star = {}
for cluster, nodes in clusters.items():
if nodes:
#convert to str to match df
#add 1 to match RELION indexing
avgs = [str(node+1) for node in nodes]
subset = table[table['ClassNumber'].isin(avgs)]
cluster_star[cluster] = subset
for cluster, table in cluster_star.items():
with open(outpath+'/slicem_cluster_{0}.star'.format(cluster), 'w') as f:
#write the star file
print('data_', file=f)
print('loop_', file=f)
for i, name in enumerate(table.columns):
print('_rln' + name + ' #' + str(i+1), file=f)
table.to_csv(f, sep='\t', index=False, header=False)
with open(outpath+'/slicem_clusters.txt', 'w') as f:
for cluster, averages in clusters.items():
f.write(str(cluster) + '\t' + str(averages) + '\n')
print('star files written!')
def write_edge_list(self, network, outpath):
with open(outpath+'/slicem_edge_list.txt', 'w') as f:
f.write('projection_1'+'\t'+'projection_2'+'\t'+'score'+'\n')
for t in flat:
f.write(str(t[0])+'\t'+str(t[1])+'\t'+str(t[2])+'\n')
if network == 'top_n':
if clusters['singles']:
for single in clusters['singles']:
f.write(str(single)+'\n')
print('edge list written!')
#Utility functions from main script to make GUI standalone
def extract_class_avg(avg):
"""fit in minimal bounding box"""
image = avg.copy()
image[image < 0] = 0
struct = np.ones((2, 2), dtype=bool)
dilate = ndi.binary_dilation(image, struct)
labeled = measure.label(dilate, connectivity=2)
rprops = measure.regionprops(labeled, image, cache=False)
if len(rprops) == 1:
select_region = 0
else:
img_y, img_x = image.shape
if labeled[int(img_y/2), int(img_x/2)] != 0: # Check for central region
select_region = labeled[int(img_y/2), int(img_x/2)] - 1 # For index
else:
distances = [
(i, np.linalg.norm(np.array((img_y/2, img_x/2)) - np.array(r.weighted_centroid)))
for i, r in enumerate(rprops)
]
select_region = min(distances, key=lambda x: x[1])[0] # Pick first closest region
y_min, x_min, y_max, x_max = [p for p in rprops[select_region].bbox]
return image[y_min:y_max, x_min:x_max]
def nearest_neighbors(neighbors):
"""group k best scores for each class average to construct graph"""
projection_knn = {}
order_scores = {avg: [] for avg in range(num_class_avg)}
for d in drop:
order_scores.pop(d, None)
#projection_knn[projection_1] = [projection_2, angle_1, angle_2, score]
for pair, values in scores_update.items():
p1, p2 = [p for p in pair]
a1, a2, s = [v for v in values]
c = [p2, a1, a2, s]
order_scores[p1].append(c)
# Zscore per class avg for edge
for projection, scores in order_scores.items():
all_scores = [v[3] for v in scores]
u = np.mean(all_scores)
s = np.std(all_scores)
for v in scores:
zscore = (v[3] - u)/s
v[3] = zscore
for avg, scores in order_scores.items():
sort = sorted(scores, reverse=False, key=lambda x: x[3])[:neighbors]
projection_knn[avg] = sort
return projection_knn
def remove_outliers(clusters):
"""
Use median absolute deviation to remove outliers
<NAME> and <NAME> (1993)
"""
pixel_sums = {}
outliers = []
for cluster, nodes in clusters.items():
if len(nodes) > 1:
pixel_sums[cluster] = []
for node in nodes:
pixel_sums[cluster].append(sum(sum(extract_2D[node])))
for cluster, psums in pixel_sums.items():
med = np.median(psums)
m_psums = [abs(x - med) for x in psums]
mad = np.median(m_psums)
if mad == 0:
next
else:
for i, proj in enumerate(psums):
z = 0.6745*(proj - med)/mad
if abs(z) > 3.5:
outliers.append((cluster, clusters[cluster][i]))
clusters["outliers"] = [o[1] for o in outliers]
for outlier in outliers:
cluster, node = outlier[0], outlier[1]
clusters[cluster].remove(node)
print('class_avg node {0} was removed from cluster {1} as an outlier'.format(node, cluster))
def random_color():
return tuple(np.random.rand(1,3)[0])
def get_plot_colors(clusters, graph):
color_list = []
preset_colors = [color for colors in [cm.Set3.colors] for color in colors]
for i in range(len(clusters)):
if i < len(preset_colors):
color_list.append(preset_colors[i])
else:
color_list.append(random_color())
colors = []
for i, node in enumerate(graph.nodes):
for cluster, projections in clusters.items():
if cluster == 'singles':
if node in projections:
colors.append((0.85, 0.85, 0.85))
elif cluster == 'outliers':
if node in projections:
colors.append((0.35, 0.35, 0.35))
elif cluster == 'removed':
if node in projections:
colors.append((0.9, 0, 0))
elif node in projections:
colors.append((color_list[cluster]))
return colors
def make_equal_square_images(ref, comp):
ry, rx = np.shape(ref)
cy, cx = np.shape(comp)
max_dim = max(rx, ry, cx, cy) # Max dimension
ref = adjust_image_size(ref, max_dim)
comp = adjust_image_size(comp, max_dim)
return ref, comp
def adjust_image_size(img, max_dim):
y, x = np.shape(img)
y_pad = int((max_dim-y)/2)
if y % 2 == 0:
img = np.pad(img, pad_width=((y_pad,y_pad), (0,0)), mode='constant')
else:
img = np.pad(img, pad_width=((y_pad+1,y_pad), (0,0)), mode='constant')
x_pad = int((max_dim-x)/2)
if x % 2 == 0:
img = np.pad(img, pad_width=((0,0), (x_pad,x_pad)), mode='constant')
else:
img = np.pad(img, pad_width=((0,0), (x_pad+1,x_pad)), mode='constant')
return img
class Projection:
"""for 1D projection vectors"""
def __init__(self,
class_avg,
angle,
vector):
self.class_avg = class_avg
self.angle = angle
self.vector = vector
def size(self):
return len(self.vector)
def make_1D(projection, angle):
proj_1D = transform.rotate(projection, angle, resize=True).sum(axis=0)
trim_1D = | np.trim_zeros(proj_1D, trim='fb') | numpy.trim_zeros |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.