prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import sys
import time
import numpy
import os
import datetime
from textwrap import wrap
from operator import add
import matplotlib.pyplot as plt
PWD = os.path.dirname(os.path.abspath(__file__))
#-------------------------------------------------------------------------------
def gen_graph():
enmd_avg_md_prep_times = []
enmd_avg_md_times = []
enmd_avg_exchange_prep_times = []
enmd_avg_exchange_times = []
enmd_avg_post_processing_times = []
repex_avg_md_prep_times = []
repex_avg_md_times = []
repex_avg_exchange_prep_times = []
repex_avg_exchange_times = []
repex_avg_post_processing_times = []
############################################################################################
# 64/128
md_prep_times = [11.15554, 12.643986, 16.038453]
enmd_avg_md_prep_times.append( numpy.average(md_prep_times) )
md_times = [119.813373, 139.695717, 156.084988]
enmd_avg_md_times.append( numpy.average(md_times) )
exchange_prep_times = [9.891328, 13.688966, 16.503259]
enmd_avg_exchange_prep_times.append( numpy.average(exchange_prep_times) )
exchange_times = [102.411035, 121.539652, 128.82382]
enmd_avg_exchange_times.append( numpy.average(exchange_times) )
post_processing_times = [0.671823, 0.798953, 1.097424]
enmd_avg_post_processing_times.append( numpy.average(post_processing_times) )
############################################################################################
# OK
md_prep_times = [1.33276, 2.948043, 3.450892]
repex_avg_md_prep_times.append( numpy.average(md_prep_times) )
md_times = [137.878725, 156.24525, 174.764014]
repex_avg_md_times.append( numpy.average(md_times) )
exchange_prep_times = [2.171397, 2.699995, 2.915867]
repex_avg_exchange_prep_times.append( numpy.average(exchange_prep_times) )
exchange_times = [125.228446, 142.720571, 153.899415]
repex_avg_exchange_times.append( numpy.average(exchange_times) )
post_processing_times = [0.488691, 0.72613, 0.774859]
repex_avg_post_processing_times.append( numpy.average(post_processing_times) )
############################################################################################
############################################################################################
# 128/256
md_prep_times = [22.541008, 35.884892]
enmd_avg_md_prep_times.append( numpy.average(md_prep_times) )
md_times = [271.408067, 365.774492]
enmd_avg_md_times.append( numpy.average(md_times) )
exchange_prep_times = [26.389173, 41.275375]
enmd_avg_exchange_prep_times.append( numpy.average(exchange_prep_times) )
exchange_times = [252.021597, 305.552244]
enmd_avg_exchange_times.append( numpy.average(exchange_times) )
post_processing_times = [2.053707, 2.707205]
enmd_avg_post_processing_times.append( numpy.average(post_processing_times) )
############################################################################################
# ok
md_prep_times = [3.442241, 7.023973]
repex_avg_md_prep_times.append( numpy.average(md_prep_times) )
md_times = [303.468326, 387.972735]
repex_avg_md_times.append( numpy.average(md_times) )
exchange_prep_times = [4.880221, 6.347701]
repex_avg_exchange_prep_times.append( numpy.average(exchange_prep_times) )
exchange_times = [283.058266, 334.289674]
repex_avg_exchange_times.append( numpy.average(exchange_times) )
post_processing_times = [1.738836, 2.537882]
repex_avg_post_processing_times.append( numpy.average(post_processing_times) )
############################################################################################
############################################################################################
# 256/512
md_prep_times = [43.535514, 125.557439]
enmd_avg_md_prep_times.append( numpy.average(md_prep_times) )
md_times = [649.177261, 1249.577718]
enmd_avg_md_times.append( numpy.average(md_times) )
exchange_prep_times = [78.872782, 153.390076]
enmd_avg_exchange_prep_times.append( numpy.average(exchange_prep_times) )
exchange_times = [820.879971, 1127.394562]
enmd_avg_exchange_times.append( numpy.average(exchange_times) )
post_processing_times = [10.086089, 11.759318]
enmd_avg_post_processing_times.append( numpy.average(post_processing_times) )
############################################################################################
md_prep_times = [3.918126, 17.080726]
repex_avg_md_prep_times.append( numpy.average(md_prep_times) )
md_times = [765.13554, 1280.874559]
repex_avg_md_times.append( numpy.average(md_times) )
exchange_prep_times = [10.708313, 16.369491]
repex_avg_exchange_prep_times.append( numpy.average(exchange_prep_times) )
exchange_times = [912.782984, 1386.196337]
repex_avg_exchange_times.append( numpy.average(exchange_times) )
post_processing_times = [9.147824, 10.985776]
repex_avg_post_processing_times.append( numpy.average(post_processing_times) )
############################################################################################
############################################################################################
# 384/768
md_prep_times = [98.983471, 370.094089]
enmd_avg_md_prep_times.append( numpy.average(md_prep_times) )
md_times = [1314.953057, 2386.687527]
enmd_avg_md_times.append( numpy.average(md_times) )
exchange_prep_times = [222.776623, 460.798372]
enmd_avg_exchange_prep_times.append( numpy.average(exchange_prep_times) )
exchange_times = [1870.395685, 2527.059035]
enmd_avg_exchange_times.append( numpy.average(exchange_times) )
post_processing_times = [24.110429, 22.896567]
enmd_avg_post_processing_times.append( numpy.average(post_processing_times) )
############################################################################################
md_prep_times = [6.488597, 33.02816]
repex_avg_md_prep_times.append( | numpy.average(md_prep_times) | numpy.average |
import numpy as np
import pytest
from scipy.interpolate import RegularGridInterpolator
from RAiDER.interpolate import interpolate, interpolate_along_axis
from RAiDER.interpolator import RegularGridInterpolator as Interpolator, interpolateDEM
from RAiDER.interpolator import fillna3D, interp_along_axis, interpVector
@pytest.fixture
def nanArr():
array = np.random.randn(2, 2, 3)
array[0, 0, 0] = np.nan
array[0, 0, 1] = np.nan
array[0, 0, 2] = np.nan
array[1, 0, 0] = np.nan
array[0, 1, 1] = np.nan
array[1, 1, 2] = np.nan
true_array = array.copy()
true_array[0, 0, 0] = np.nan
true_array[0, 0, 1] = np.nan
true_array[0, 0, 2] = np.nan
true_array[1, 0, 0] = true_array[1, 0, 1]
true_array[0, 1, 1] = (true_array[0, 1, 0] + true_array[0, 1, 2]) / 2
true_array[1, 1, 2] = true_array[1, 1, 1]
return array, true_array
def test_interpVector():
assert np.allclose(
interpVector(
np.array([
0, 1, 2, 3, 4, 5,
0, 0.84147098, 0.90929743, 0.14112001, -0.7568025, -0.95892427,
0.5, 1.5, 2.5, 3.5, 4.5
]),
6
),
np.array([0.42073549, 0.87538421, 0.52520872, -0.30784124, -0.85786338])
)
def test_fillna3D(nanArr):
arr, tarr = nanArr
assert np.allclose(fillna3D(arr), tarr, equal_nan=True)
def test_interp_along_axis():
z2 = np.tile(np.arange(100)[..., np.newaxis], (5, 1, 5)).swapaxes(1, 2)
zvals = 0.3 * z2 - 12.75
newz = np.tile(
np.array([1.5, 9.9, 15, 23.278, 39.99, 50.1])[..., np.newaxis],
(5, 1, 5)
).swapaxes(1, 2)
corz = 0.3 * newz - 12.75
assert np.allclose(interp_along_axis(z2, newz, zvals, axis=2), corz)
def shuffle_along_axis(a, axis):
idx = np.random.rand(*a.shape).argsort(axis=axis)
return np.take_along_axis(a, idx, axis=axis)
def test_interpolate_along_axis():
# Rejects scalar values
with pytest.raises(TypeError):
interpolate_along_axis(np.array(0), np.array(0), np.array(0))
# Rejects mismatched number of dimensions
with pytest.raises(TypeError):
interpolate_along_axis(np.zeros(1), np.zeros(1), np.zeros((1, 1)))
with pytest.raises(TypeError):
interpolate_along_axis(np.zeros(1), np.zeros((1, 1)), np.zeros(1))
with pytest.raises(TypeError):
interpolate_along_axis(np.zeros((1, 1)), np.zeros(1), np.zeros(1))
with pytest.raises(TypeError):
interpolate_along_axis(np.zeros(1), np.zeros((1, 1)), np.zeros((1, 1)))
with pytest.raises(TypeError):
interpolate_along_axis(np.zeros((1, 1)), np.zeros((1, 1)), np.zeros(1))
with pytest.raises(TypeError):
interpolate_along_axis(np.zeros((1, 1)), np.zeros(1), np.zeros((1, 1)))
# Rejects mismatched shape for points and values
with pytest.raises(TypeError):
interpolate_along_axis(np.zeros(1), np.zeros(2), np.zeros(1))
with pytest.raises(TypeError):
interpolate_along_axis(np.zeros((9, 2)), np.zeros((9, 3)), np.zeros(1))
# Rejects bad axis
with pytest.raises(TypeError):
interpolate_along_axis(np.zeros(1), np.zeros(1), np.zeros(1), axis=1)
with pytest.raises(TypeError):
interpolate_along_axis(np.zeros(1), np.zeros(1), np.zeros(1), axis=-2)
# Rejects bad interp_points shape
with pytest.raises(TypeError):
interpolate_along_axis(
np.zeros((2, 2)), np.zeros((2, 2)), np.zeros((3, 2))
)
with pytest.raises(TypeError):
interpolate_along_axis(
np.zeros((2, 2)), np.zeros((2, 2)), np.zeros((2, 3)),
axis=0, max_threads=1
)
def test_interp_along_axis_1d():
def f(x):
return 2 * x
xs = np.array([1, 2, 3, 4])
ys = f(xs)
points = np.array([1.5, 3.1])
assert np.allclose(
interp_along_axis(xs, points, ys, axis=0),
2 * points
)
assert np.allclose(
interpolate_along_axis(xs, ys, points, axis=0, max_threads=1),
2 * points
)
def test_interp_along_axis_1d_out_of_bounds():
def f(x):
return 2 * x
xs = np.array([1, 2, 3, 4])
ys = f(xs)
points = np.array([0, 5])
assert np.allclose(
interp_along_axis(xs, points, ys, axis=0),
np.array([np.nan, np.nan]),
equal_nan=True
)
assert np.allclose(
interpolate_along_axis(xs, ys, points, axis=0,
max_threads=1, fill_value=np.nan),
np.array([np.nan, np.nan]),
equal_nan=True
)
def test_interp_along_axis_2d():
def f(x):
return 2 * x
xs = np.array([
[1, 2, 3, 4],
[3, 4, 5, 6]
])
ys = f(xs)
points = np.array([
[1.5, 3.1, 3.6],
[3.5, 5.1, 5.2]
])
assert np.allclose(
interp_along_axis(xs, points, ys, axis=1),
2 * points
)
assert np.allclose(
interpolate_along_axis(xs, ys, points, axis=1),
2 * points
)
def test_interp_along_axis_2d_threads_edge_case():
def f(x):
return 2 * x
# Max of 4 threads but 5 rows to interpolate over. Each thread will get 2
# rows which means only 3 threads will be used
max_threads = 4
xs = np.array([
[1, 2, 3, 4],
[3, 4, 5, 6],
[7, 8, 9, 10],
[11, 12, 13, 14],
[15, 16, 17, 18]
])
ys = f(xs)
points = np.array([
[1.5, 3.1, 3.6],
[3.5, 5.1, 5.2],
[7.5, 9.1, 9.9],
[11.1, 12.2, 13.3],
[15.1, 16.2, 17.3]
])
assert np.allclose(
interp_along_axis(xs, points, ys, axis=1),
2 * points
)
assert np.allclose(
interpolate_along_axis(xs, ys, points, axis=1, max_threads=max_threads),
2 * points
)
def test_interp_along_axis_3d():
def f(x):
return 2 * x
xs = np.array([
[[1, 2, 3, 4],
[3, 4, 5, 6]],
[[10, 11, 12, 13],
[21, 22, 23, 24]]
])
ys = f(xs)
points = np.array([
[[1.5, 3.1],
[3.5, 5.1]],
[[10.3, 12.9],
[22.6, 22.1]]
])
assert np.allclose(
interp_along_axis(xs, points, ys, axis=2),
2 * points
)
assert np.allclose(
interpolate_along_axis(xs, ys, points, axis=2),
2 * points
)
def test_interp_along_axis_3d_axis1():
def f(x):
return 2 * x
xs = np.array([
[[1, 2],
[3, 4]],
[[10, 11],
[21, 22]]
])
ys = f(xs)
points = np.array([
[[1.5, 3.1],
[2.5, 2.1]],
[[10.3, 12.9],
[15, 17]]
])
assert np.allclose(
interp_along_axis(xs, points, ys, axis=1),
2 * points
)
assert np.allclose(
interpolate_along_axis(xs, ys, points, axis=1),
2 * points
)
@pytest.mark.parametrize("num_points", (7, 200, 500))
def test_interp_along_axis_3d_large(num_points):
def f(x):
return 2 * x
# To scale values along axis 0 of a 3 dimensional array
scale = np.arange(1, 101).reshape((100, 1, 1))
axis1 = np.arange(100)
axis2 = np.repeat(np.array([axis1]), 100, axis=0)
xs = np.repeat(np.array([axis2]), 100, axis=0) * scale
ys = f(xs)
points = np.array([np.linspace(0, 99, num=num_points)]).repeat(100, axis=0)
points = np.repeat(np.array([points]), 100, axis=0) * scale
ans = 2 * points
assert np.allclose(interp_along_axis(xs, points, ys, axis=2), ans)
assert np.allclose(interpolate_along_axis(xs, ys, points, axis=2), ans)
assert np.allclose(
interpolate_along_axis(xs, ys, points, axis=2, assume_sorted=True), ans
)
def test_interp_along_axis_3d_large_unsorted():
def f(x):
return 2 * x
# To scale values along axis 0 of a 3 dimensional array
scale = np.arange(1, 101).reshape((100, 1, 1))
axis1 = np.arange(100)
axis2 = np.repeat(np.array([axis1]), 100, axis=0)
xs = np.repeat(np.array([axis2]), 100, axis=0) * scale
ys = f(xs)
points = np.array([np.linspace(0, 99, num=300)]).repeat(100, axis=0)
points = np.repeat(np.array([points]), 100, axis=0) * scale
points = shuffle_along_axis(points, 2)
ans = 2 * points
assert np.allclose(interp_along_axis(xs, points, ys, axis=2), ans)
assert np.allclose(interpolate_along_axis(xs, ys, points, axis=2), ans)
def test_grid_dim_mismatch():
with pytest.raises(TypeError):
interpolate(
points=(np.zeros((10,)), np.zeros((5,))),
values=np.zeros((1,)),
interp_points=np.zeros((1,))
)
def test_basic():
ans = interpolate(
points=(np.array([0, 1]),),
values=np.array([0, 1]),
interp_points=np.array([[0.5]]),
max_threads=1,
assume_sorted=True
)
assert ans == np.array([0.5])
def test_1d_out_of_bounds():
ans = interpolate(
points=(np.array([0, 1]),),
values=np.array([0, 1]),
interp_points=np.array([[100]]),
max_threads=1,
assume_sorted=True
)
# Output is extrapolated
assert ans == np.array([100])
def test_1d_fill_value():
ans = interpolate(
points=(np.array([0, 1]),),
values=np.array([0, 1]),
interp_points=np.array([[100]]),
max_threads=1,
fill_value=np.nan,
assume_sorted=True
)
assert np.all(np.isnan(ans))
def test_small():
ans = interpolate(
points=(np.array([1, 2, 3, 4, 5, 6]),),
values=np.array([10, 9, 30, 10, 6, 1]),
interp_points=np.array([1.25, 2.9, 3.01, 5.7]).reshape(-1, 1)
)
assert ans.shape == (4,)
assert np.allclose(ans, np.array([9.75, 27.9, 29.8, 2.5]), atol=1e-15)
def test_small_not_sorted():
ans = interpolate(
points=(np.array([1, 2, 3, 4, 5, 6]),),
values=np.array([10, 9, 30, 10, 6, 1]),
interp_points=np.array([2.9, 1.25, 5.7, 3.01]).reshape(-1, 1),
)
assert ans.shape == (4,)
assert np.allclose(ans, np.array([27.9, 9.75, 2.5, 29.8]), atol=1e-15)
def test_exact_points():
ans = interpolate(
points=(np.array([1, 2, 3, 4, 5, 6]),),
values=np.array([10, 9, 30, 10, 6, 1]),
interp_points=np.array([1, 2, 3, 4, 5, 6]).reshape(-1, 1)
)
assert ans.shape == (6,)
assert np.allclose(ans, np.array([10, 9, 30, 10, 6, 1]), atol=1e-15)
def test_2d_basic():
xs = np.array([0, 1])
ys = np.array([0, 1])
values = (lambda x, y: x + y)(
*np.meshgrid(xs, ys, indexing="ij", sparse=True)
)
ans = interpolate(
points=(xs, ys),
values=values,
interp_points=np.array([[0.5, 0.5]])
)
assert ans == np.array([1])
def test_2d_out_of_bounds():
xs = np.array([0, 1])
ys = np.array([0, 1])
values = (lambda x, y: x + y)(
*np.meshgrid(xs, ys, indexing="ij", sparse=True)
)
ans = interpolate(
points=(xs, ys),
values=values,
interp_points=np.array([[100, 100]])
)
# Output is extrapolated
assert ans == np.array([200])
def test_2d_fill_value():
xs = np.array([0, 1])
ys = np.array([0, 1])
values = (lambda x, y: x + y)(
*np.meshgrid(xs, ys, indexing="ij", sparse=True)
)
ans = interpolate(
points=(xs, ys),
values=values,
interp_points=np.array([[100, 100]]),
fill_value=np.nan
)
assert np.all(np.isnan(ans))
def test_2d_square_small():
def f(x, y):
return x ** 2 + 3 * y
xs = np.linspace(0, 1000, 100)
ys = np.linspace(0, 1000, 100)
values = f(*np.meshgrid(xs, ys, indexing="ij", sparse=True))
points = np.stack((
np.linspace(10, 990, 5),
np.linspace(10, 890, 5)
), axis=-1)
ans = interpolate(
points=(xs, ys),
values=values,
interp_points=points,
assume_sorted=True
)
rgi = RegularGridInterpolator((xs, ys), values)
ans_scipy = rgi(points)
assert np.allclose(ans, ans_scipy, atol=1e-15)
def test_2d_rectangle_small():
def f(x, y):
return x ** 2 + 3 * y
xs = np.linspace(0, 2000, 200)
ys = np.linspace(0, 1000, 100)
values = f(*np.meshgrid(xs, ys, indexing="ij", sparse=True))
points = np.stack((
np.linspace(10, 990, 5),
np.linspace(10, 890, 5)
), axis=-1)
ans = interpolate(
points=(xs, ys),
values=values,
interp_points=points,
assume_sorted=True
)
rgi = RegularGridInterpolator((xs, ys), values)
ans_scipy = rgi(points)
assert np.allclose(ans, ans_scipy, atol=1e-15)
def test_2d_rectangle_small_2():
def f(x, y):
return x ** 2 + 3 * y
xs = np.linspace(0, 1000, 100)
ys = np.linspace(0, 2000, 200)
values = f(*np.meshgrid(xs, ys, indexing="ij", sparse=True))
points = np.stack((
np.linspace(10, 990, 5),
np.linspace(10, 890, 5)
), axis=-1)
ans = interpolate(
points=(xs, ys),
values=values,
interp_points=points,
assume_sorted=True
)
rgi = RegularGridInterpolator((xs, ys), values)
ans_scipy = rgi(points)
assert np.allclose(ans, ans_scipy, atol=1e-15)
def test_2d_square_large():
def f(x, y):
return x ** 2 + 3 * y
xs = np.linspace(-10_000, 10_000, num=1_000)
ys = np.linspace(0, 20_000, num=1_000)
values = f(*np.meshgrid(xs, ys, indexing="ij", sparse=True))
num_points = 2_000_000
points = np.stack((
np.linspace(10, 990, num_points),
np.linspace(10, 890, num_points)
), axis=-1)
ans = interpolate(
points=(xs, ys),
values=values,
interp_points=points,
assume_sorted=True
)
rgi = RegularGridInterpolator((xs, ys), values)
ans_scipy = rgi(points)
assert np.allclose(ans, ans_scipy, atol=1e-15)
def test_3d_basic():
xs = np.array([0, 1])
ys = np.array([0, 1])
zs = np.array([0, 1])
values = (lambda x, y, z: x + y + z)(
*np.meshgrid(xs, ys, zs, indexing="ij", sparse=True)
)
ans = interpolate(
points=(xs, ys, zs),
values=values,
interp_points=np.array([[0.5, 0.5, 0.5]]),
assume_sorted=True
)
assert ans == np.array([1.5])
def test_3d_out_of_bounds():
xs = np.array([0, 1])
ys = np.array([0, 1])
zs = np.array([0, 1])
values = (lambda x, y, z: x + y + z)(
* | np.meshgrid(xs, ys, zs, indexing="ij", sparse=True) | numpy.meshgrid |
import os
import time
from collections import Counter
from functools import partial
import numpy as np
import scipy.optimize as so
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.figure import Figure
from matplotlib.animation import FuncAnimation
from matplotlib.collections import LineCollection
from traits.api import Instance, Button, HasTraits, Float, Bool, Enum, on_trait_change, Int, File
from traitsui.api import View, VGroup, HGroup, Item, UItem, HSplit, VSplit, Label, RangeEditor, FileEditor
from tqdm import tqdm
from nitime.utils import autocov
from ecogdata.channel_map import ChannelMap
from ecogdata.filt.blocks import BlockedSignal
import ecoglib.estimation.spatial_variance as sv
from ecoglib.signal_testing import spatial_autocovariance
from ecoglib.estimation import cxx_to_pairs, matern_semivariogram, matern_spectrum
from ecoglib.vis.traitsui_bridge import MPLFigureEditor, PingPongStartup
from ecoglib.vis.gui_tools import ArrayMap, SavesFigure
from ecoglib.vis.plot_util import subplots, subplot2grid
import seaborn as sns
from .base import PlotsInterval, MultiframeSavesFigure, colormaps, FigureCanvas
from ..helpers import PersistentWindow
__all__ = ['ArrayVarianceTool', 'SpatialVariance']
class ArrayVarianceTool(PersistentWindow):
"""Stand-alone UI to image covariance seeded at chosen sites"""
array_plot = Instance(ArrayMap)
selected_site = Int(-1)
min_max_norm = Bool(True)
_c_lo = Float
_c_hi = Float
cmap = Enum('gray', colormaps)
save_file = File(os.getcwd())
save_all = Button('Save PDF')
def __init__(self, cov, chan_map, **traits):
self.cov = cov
self._cv_mn = cov.min()
self._cv_mx = cov.max()
array_plot = ArrayMap(chan_map, vec=self.cov.mean(1))
HasTraits.__init__(
self, array_plot=array_plot,
_c_lo=self._cv_mn, _c_hi=self._cv_mx
)
self.sync_trait('selected_site', self.array_plot, mutual=True)
@on_trait_change('selected_site, cmap, min_max_norm, _c_lo, _c_hi')
def _image(self):
if not hasattr(self, 'array_plot'):
return
if self.array_plot is None:
return
kw = dict(cmap=self.cmap)
if self.min_max_norm:
kw['clim'] = (self._cv_mn, self._cv_mx)
else:
kw['clim'] = self._c_lo, self._c_hi
site = self.selected_site
if site >= 0:
self.array_plot.ax.set_title('Seeded covariance map')
self.array_plot.update_map(self.cov[site], **kw)
else:
self.array_plot.ax.set_title('Mean covariance weight')
self.array_plot.update_map(self.cov.mean(1), **kw)
def _save_all_fired(self):
# cycle through sites, update_zoom_callback image, and use
# PdfPages.savefig(self.array_plot.fig)
chan_map = self.array_plot.chan_map
ij = list(zip(*chan_map.to_mat()))
ij = sorted(ij)
pdf_file = self.save_file
if not pdf_file.endswith('.pdf'):
pdf_file = pdf_file + '.pdf'
save_site = self.selected_site
with PdfPages(pdf_file) as pdf:
for i_, j_ in tqdm(ij, desc='Saving PDF pages', leave=True):
s = chan_map.lookup(i_, j_)
self.selected_site = s
f = self.array_plot.fig
ax = self.array_plot.ax
ttl = ax.get_title()
ax.set_title(ttl + ' site ({0}, {1})'.format(i_, j_))
pdf.savefig(f)
self.selected_site = save_site
def _post_canvas_hook(self):
self.array_plot.fig.canvas.mpl_connect(
'button_press_event', self.array_plot.click_listen
)
self._image()
def default_traits_view(self):
v = View(
VSplit(
UItem('array_plot', editor=MPLFigureEditor(),
width=500, height=400, resizable=True),
HGroup(
Item('selected_site', style='readonly'),
Item('min_max_norm', label='Min-Max Normalize'),
Item('cmap', label='Color map'),
),
HGroup(
Item('_c_lo', label='Low color'),
Item('_c_hi', label='High color'),
enabled_when='min_max_norm==False'
),
HGroup(
Item('save_file', label='pdf file',
editor=FileEditor(dialog_style='save')),
UItem('save_all')
)
),
handler=PingPongStartup,
resizable=True,
title='Covariance Visualization'
)
return v
def make_matern_label(**params):
"""Helper function for plot labels."""
label = ''
if 'nu' in params:
label = label + '\u03BD {nu:.1f} '
if 'theta' in params:
label = label + '\u03B8 {theta:.1f} '
if 'nugget' in params:
label = label + '\u03C3 {nugget:.2f} '
if 'sill' in params:
label = label + '\u03C2 {sill:.2f} '
return label.format(**params)
class STSemivar(PersistentWindow):
sv_fig = Instance(Figure)
array_frame = Instance(Figure)
cnx = Instance(Figure)
tsfig = Instance(Figure)
_lo = Int(0)
_hi = Int
slider = Int(0)
fitting = Bool(False)
def __init__(self, x, t, st_semivar, timeseries, chan_map, **traits):
self._hi = len(t) - 1
self._x = x
self._t = t
self._timeseries = timeseries
self._st_semivar = st_semivar
self._chan_map = chan_map
combs = chan_map.site_combinations
self._binned = (len(x) != len(np.unique(combs.dist)))
if self._binned:
diffs = np.abs(combs.dist - x[:, None])
self._bin_map = diffs.argmin(0)
# set up semivariogram figure
self.sv_fig = Figure(figsize=(5, 4))
ax = self.sv_fig.add_subplot(111)
self._sv_line = ax.plot(x, st_semivar[:, 0], marker='o', ls='--')[0]
ax.set_ylim(0, st_semivar.max())
sns.despine(ax=ax)
self._marked_point = None
self._fit_line = None
self._info_text = None
# set up raw potential map
self.array_frame = Figure(figsize=(5, 4))
ax = self.array_frame.add_subplot(111)
frame = chan_map.embed(
self._timeseries[:, int(self._t[self.slider])]
)
clim = np.percentile(self._timeseries, [2, 98])
self._lfp_img = ax.imshow(frame, clim=clim, origin='upper')
# set up raw timeseries plot
self.tsfig = Figure(figsize=(5, 4))
ax = self.tsfig.add_subplot(111)
lines = [list(enumerate(ts)) for ts in timeseries]
lc = LineCollection(lines, colors='k', alpha=0.5, linewidths=0.5)
ax.add_collection(lc)
ax.autoscale_view(True, True, True)
ax.set_xlabel('Samples')
ax.set_ylabel('Voltage')
self._time_marker = ax.axvline(t[self.slider], color='r', lw=1)
# set up site-site connections plot
self.cnx = Figure(figsize=(5, 5))
ax = self.cnx.add_subplot(111)
i, j = chan_map.to_mat()
ax.scatter(j, i, s=10)
ax.set_ylim(i.max() + 0.5, i.min() - 0.5)
ax.axis('image')
self._cnx_lines = None
super(STSemivar, self).__init__(**traits)
@staticmethod
def from_array_and_lag(x, n, lag, chan_map, normed=False, **kwargs):
"""
Create new STSemivar object from array and short-time specs.
Parameters
----------
x : ndarray
n_site x n_samples
n : int
short-time length in samples
lag : int
lag (step) length in samples
chan_map : ChannelMap
site to array lookup
normed : bool
Normalize (unit) variance (default False)
Other keyword arguments are passed through to the
semivariogram estimator.
"""
t = np.arange(x.shape[-1])
xb = BlockedSignal(x.copy(), n, overlap=n - lag)
tb = BlockedSignal(t, n, overlap=n - lag)
st_svar = []
block_time = []
combs = chan_map.site_combinations
for i in range(len(xb)):
xb_ = xb.block(i)
if normed:
xb_ = xb_ / xb_.std(1)[:, None]
sx, sy = sv.semivariogram(xb_, combs, **kwargs)
st_svar.append(sy)
block_time.append(tb.block(i).mean())
st_svar = np.array(st_svar).T
block_time = np.array(block_time)
return STSemivar(sx, block_time, st_svar, x, chan_map)
@on_trait_change('slider')
def _slider_changed(self):
# 1) semivar plot (and marked point) (and fit line)
self._sv_line.set_data(self._x, self._st_semivar[:, self.slider])
if self._marked_point is not None:
mx, my = self._marked_point.get_data()
xbin = np.argmin(np.abs(self._x - mx[0]))
x = self._x[xbin]
y = self._st_semivar[xbin, self.slider]
self._marked_point.set_data([x], [y])
if self.fitting:
self._draw_fit()
# 2) heatmap
f = self._timeseries[:, int(self._t[self.slider])]
m = self._chan_map.embed(f)
self._lfp_img.set_array(m)
# 3) time marker on timeseries
self._time_marker.set_data(
[self._t[self.slider], self._t[self.slider]], [0, 1]
)
for f in (self.sv_fig, self.array_frame, self.tsfig):
f.canvas.draw_idle()
@on_trait_change('fitting')
def _toggle_fit(self):
if self._fit_line is None:
self._draw_fit()
else:
self._fit_line.remove()
self._info_text.remove()
self._fit_line = self._info_text = None
def fit_semivar(self, xf=None):
x, y = self._sv_line.get_data()
yl = self._sv_line.axes.get_ylim()[1]
bounds = {
'theta': (0.5, x.max()),
'nu': (0.25, 5),
'nugget': (0, y.min()),
'sill': (y.mean(), 1.5 * yl)
}
prm = matern_semivariogram(
x, y=y, free=('theta', 'nu', 'sill', 'nugget'), bounds=bounds
)
if xf is None:
xf = x
y_est = matern_semivariogram(xf, **prm)
return prm, y_est
def _get_info_text(self, prm):
matern_label = make_matern_label(**prm)
theta = prm['theta']
nu = prm['nu']
s0 = matern_spectrum(0, theta=theta, nu=nu)
def fn(x, t, n):
return matern_spectrum(x, t, n) - s0 / 100
kc = so.brentq(fn, 0, 3, args=(theta, nu))
bw_label = 'Crit dens: {0:.2f} mm'.format((2 * kc) ** -1)
label = '\n'.join([matern_label, bw_label])
return label
def _draw_fit(self):
ax = self.sv_fig.axes[0]
xl = ax.get_xlim()
xf = np.linspace(xl[0], xl[1], 100)
prm, y = self.fit_semivar(xf=xf)
label = self._get_info_text(prm)
if self._fit_line is None:
self._fit_line = ax.plot(xf, y, color='r', ls='-')[0]
self._info_text = ax.text(
0.1, 0.75, label, fontsize=10, transform=ax.transAxes
)
else:
self._fit_line.set_data(xf, y)
self._info_text.set_text(label)
if self.sv_fig.canvas:
self.sv_fig.canvas.draw_idle()
def _setup_picker(self):
xscale = self.sv_fig.axes[0].transData.get_matrix()[0, 0]
self._sv_line.set_picker(True)
self._sv_line.set_pickradius(xscale * np.diff(self._x).min())
self.sv_fig.canvas.mpl_connect('pick_event', self._pick_pairs)
def _pick_pairs(self, event):
m_event = event.mouseevent
px = m_event.xdata
xbin = np.argmin(np.abs(self._x - px))
x = self._x[xbin]
y = self._st_semivar[xbin, self.slider]
# unmark previous marked point (if any)
if self._marked_point is not None:
mx, my = self._marked_point.get_data()
self._marked_point.remove()
self._cnx_lc.remove()
pc = self.cnx.axes[0].collections[0]
pc.set_sizes(np.ones(len(self._chan_map)) * 10)
self._marked_point = self._cnx_lc = None
# if the picked point was the old point, then return
if | np.abs(mx[0] - x) | numpy.abs |
import os
import errno
import numpy as np
import itertools
import six
from scipy import sparse
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.patches import Wedge
from sklearn.metrics import confusion_matrix
from textwrap import wrap
def savefig_and_close(fig, figname, path='', bbox_extra_artists=None):
filename = os.path.join(path, figname)
fig.savefig(filename, bbox_extra_artists=bbox_extra_artists,
bbox_inches='tight')
fig.clear()
plt.close(fig)
def newfig(name, **kwargs):
fig = plt.figure(name, **kwargs)
fig.clf()
return fig
def savefig(fig, path='figures', prefix='weak_labels_', extension='svg'):
fig.tight_layout()
name = fig.get_label()
filename = "{}{}.{}".format(prefix, name, extension)
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
fig.savefig(os.path.join(path, filename))
def plot_data(x, y, loc='best', save=True, title='data', cmap='Paired'):
if sparse.issparse(x):
x = x.toarray()
fig = newfig('data', figsize=(5, 3))
ax = fig.add_subplot(111)
classes = np.unique(y)
n_c = float(len(classes))
cmap = cm.get_cmap(cmap)
for i, y_i in enumerate(classes):
ax.scatter(x[(y == y_i).flatten(), 0], x[(y == y_i).flatten(), 1],
c=cmap(i/n_c), s=30, edgecolors=None, alpha=.8, lw=0.1,
label='{}'.format(y_i))
ax.set_xlabel('$x_0$')
ax.set_ylabel('$x_1$')
ax.set_title(title)
ax.axis('equal')
ax.legend(loc=loc)
ax.grid(True)
if save:
savefig(fig)
return fig
class MyFloat(float):
def _remove_leading_zero(self, value, string):
if 1 > value > -1:
string = string.replace('0', '', 1)
return string
def __str__(self):
string = super(MyFloat, self).__str__()
return self._remove_leading_zero(self, string)
def __format__(self, format_string):
string = super(MyFloat, self).__format__(format_string)
return self._remove_leading_zero(self, string)
# TODO use this or other heatmap to visualize confusion matrix
def plot_df_heatmap(df, normalize=None, title='Heat-map',
cmap=plt.cm.Blues, colorbar=False, ylabel=None,
xlabel=None):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
normalize : 'rows', 'cols' (default=None)
"""
rows = df.index.values
columns = df.columns.values
M = df.values
if normalize == 'rows':
M = M.astype('float') / M.sum(axis=1)[:, np.newaxis]
if normalize == 'cols':
M = M.astype('float') / M.sum(axis=0)[np.newaxis, :]
xlabel = df.columns.name
ylabel = df.index.name
return plot_heatmap(M, columns=columns, rows=rows, cmap=cmap,
colorbar=colorbar, title=title, ylabel=ylabel,
xlabel=xlabel)
def plot_confusion_matrix(M, columns=None, rows=None, cmap=plt.cm.Blues,
colorbar=False, fig=None, title='Heat-map',
ylabel='True label', xlabel='Predicted label',
**kwargs):
return plot_heatmap(M=M, columns=columns, rows=rows, cmap=cmap,
colorbar=colorbar, fig=fig, title=title, ylabel=ylabel,
xlabel=xlabel, **kwargs)
def plot_heatmap(M, columns=None, rows=None, cmap=plt.cm.Blues, colorbar=False,
fig=None, ax=None, title='Heat-map', ylabel=None, xlabel=None):
if columns is None:
columns = [str(i) for i in range(M.shape[1])]
if rows is None:
rows = [str(i) for i in range(M.shape[0])]
h_size = 5 # len(columns)*.5 + 2
v_size = 4 # len(rows)*.5 + 2
if fig is None:
fig = plt.figure(figsize=(h_size, v_size))
if ax is None:
ax = fig.add_subplot(111)
im = ax.imshow(M, interpolation='nearest', cmap=cmap)
if colorbar:
fig.colorbar(im)
if h_size < 4:
title = "\n".join(wrap(title, 30))
ax.set_title(title)
column_tick_marks = np.arange(len(columns))
ax.set_xticks(column_tick_marks)
ax.set_xticklabels(columns, rotation=45, ha='right')
row_tick_marks = np.arange(len(rows))
ax.set_yticks(row_tick_marks)
ax.set_yticklabels(rows)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
thresh = np.nanmin(M) + ((np.nanmax(M)-np.nanmin(M)) / 2.)
are_ints = M.dtype in ['int', 'int32', 'int64']
for i, j in itertools.product(range(M.shape[0]), range(M.shape[1])):
# fontsize is adjusted for different number of digits
if are_ints:
num_text = str(M[i, j])
else:
num_text = '{:0.2f}'.format(MyFloat(M[i, j]))
if np.isfinite(M[i, j]):
ax.text(j, i, num_text, horizontalalignment="center",
verticalalignment="center", color="white" if M[i, j] >
thresh else "black", fontsize=16-len(num_text))
fig.tight_layout()
return fig
def dual_half_circle(center, radius, angle=0, ax=None, colors=('w','k'),
**kwargs):
"""
Add two half circles to the axes *ax* (or the current axes) with the
specified facecolors *colors* rotated at *angle* (in degrees).
"""
if ax is None:
ax = plt.gca()
theta1, theta2 = angle, angle + 180
w1 = Wedge(center, radius, theta1, theta2, fc=colors[0], **kwargs)
w2 = Wedge(center, radius, theta2, theta1, fc=colors[1], **kwargs)
for wedge in [w1, w2]:
#ax.add_artist(wedge)
ax.add_patch(wedge)
return [w1, w2]
def test_dual_half_circle_main():
fig, ax = plt.subplots()
dual_half_circle((0.5, 0.5), radius=0.3, angle=90, ax=ax)
ax.axis('equal')
plt.show()
def plot_multilabel_scatter(X, Y, cmap=cm.get_cmap('tab20'), edgecolor='k',
linewidth=0.4, title=None, fig=None, ax=None,
radius_scaler=20.0, **kwargs):
X_std = X.std(axis=0)
if X.shape[1] > 2:
biggest_variance = np.argsort(X_std)[-2:]
X_std = X_std[biggest_variance]
X = X[:,biggest_variance]
X_min = X.min(axis=0)
X_max = X.max(axis=0)
n_classes = Y.shape[1]
radius = ((X_max - X_min)/radius_scaler)[:2].min()
#radius = (X.max() - X.min())/radius_scaler
if fig is None:
fig = plt.figure(figsize=(4, 3))
if ax is None:
ax = fig.add_subplot(111)
for x, y in zip(X, Y):
theta2s = np.cumsum(np.true_divide(y, y.sum())*360.0)
theta1 = 0
if np.isfinite(theta2s[0]):
for i, theta2 in enumerate(theta2s):
if theta1 != theta2:
w = Wedge(x[:2], radius, theta1, theta2, ec=edgecolor, lw=linewidth,
fc=cmap(i), **kwargs)
ax.add_patch(w)
theta1 = theta2
else:
# Not belong to any class
print('Do not belong to any class')
w = Wedge(x[:2], radius, 0, 360, ec='black', lw=linewidth,
fc='white', **kwargs)
ax.add_patch(w)
ax.set_xlim([X_min[0]-X_std[0], X_max[0]+X_std[0]])
ax.set_ylim([X_min[1]-X_std[1], X_max[1]+X_std[1]])
ax.axis('equal')
if title is not None:
ax.set_title(title)
return fig
def test_multilabel_plot():
X = np.array([[0,0], [0,1], [1,0], [1,1]])
Y = np.array([[0, 0, 1],
[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
plot_multilabel_scatter(X, Y)
plt.show()
def plot_errorbar(data, x=None, fmt='--o', title='errorbar', elinewidth=1.0,
perrorevery=0.2, legend=None, fig=None, **kwargs):
"""
paramters
data: np.array or list of np.array
If it is a list, each np.array is considered as an errorbar line
errorevery: float
Percentage of errorbars with respect to the number of samples
"""
return_fig = fig is None
if fig is None:
fig = newfig(title, figsize=(5, 3))
ax = fig.add_subplot(111)
ax.set_title(title)
if type(data) is np.ndarray:
data = (data,)
for i, matrix in enumerate(data):
errorevery = int(1 / perrorevery)
if errorevery < 1:
errorevery = 1
if x is None:
x = range(matrix.shape[1])
means = matrix.mean(axis=0)
stds = matrix.std(axis=0)
ax.errorbar(x=x, y=means, yerr=stds, elinewidth=(len(data)-i)*elinewidth,
errorevery=errorevery, capthick=(len(data)-i)*elinewidth,
capsize=4, **kwargs)
if legend is not None:
ax.legend(legend)
if return_fig:
return fig
return ax
def render_mpl_table(data, col_width=3.0, row_height=0.625, font_size=14,
header_color='#40466e', row_colors=['#f1f1f2', 'w'],
edge_color='w', bbox=[0, 0, 1, 1], header_columns=0,
ax=None, **kwargs):
"""
source: https://stackoverflow.com/questions/19726663/how-to-save-the-pandas-dataframe-series-data-as-a-figure
"""
if ax is None:
size = (np.array(data.shape[::-1]) + np.array([0, 1])) * | np.array([col_width, row_height]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 26 20:38:40 2019
@author: gpang
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
class Continuous_time_GP_forward:
def __init__(self, dataset):
self.xu_train = dataset['xu_train']
self.yu_train = dataset['yu_train']
self.xU_train = dataset['xU_train']
self.yU_train = dataset['yU_train']
self.x_test = dataset['x_test']
self.y_test = dataset['y_test']
self.diffusivity = dataset['diffusivity']
def ku(self, x, xp, n_x, sig, lx, lt, diag = False):
# xp : x'; hyp: hyper-parameters
# n_x: number of x's
# sig_f: signal standard deviation
# lx: characteristic length in x direction
# lt: characteristic legnth in t direction
# 'diag = False' : output a matrix of size n_x \times n_xp
# 'diag = True' : output a column vector of size n_x \times 1 (n_x=n_xp)
x1, t1 = x[:,0:1], x[:,1:2]
x2, t2 = xp[:,0:1], xp[:,1:2]
x2 = tf.reshape(x2, (1,-1))
t2 = tf.reshape(t2, (1,-1))
if diag == False:
k = sig**2 * tf.exp(-(x1-x2)**2/2/lx**2 - (t1-t2)**2/2/lt**2)
else:
k = sig**2 * tf.ones((n_x,1),dtype=tf.float64)
return k
def kU(self, x, xp, n_x, sig, lx, lt, c, diag = False):
# xp : x'; hyp: hyper-parameters
# n_x: number of x's
# sig: signal standard deviation
# lx: characteristic length in x direction
# lt: characteristic legnth in t direction
# c: diffusivity
# 'diag = False' : output a matrix of size n_x \times n_xp
# 'diag = True' : output a column vector of size n_x \times 1 (n_x=n_xp)
x1, t1 = x[:,0:1], x[:,1:2]
x2, t2 = xp[:,0:1], xp[:,1:2]
x2 = tf.reshape(x2, (1,-1))
t2 = tf.reshape(t2, (1,-1))
# Use Maple to do symbol manipulation
if diag == False:
k = 3*tf.exp((-(x1 - x2)**2*lt**2 - (t1 - t2)**2*lx**2)/(2*lx**2*lt**2))*(c**2*(lx**4 - 2*(x1 - x2)**2*lx**2 + (x1 - x2)**4/3)*lt**4 + lx**8*lt**2/3 - lx**8*(t1 - t2)**2/3)/(lt**4*lx**8)
k = sig**2 * k
else:
k = 3*(c**2*lt**4*lx**4 + 1/3*lx**8*lt**2)/(lt**4*lx**8)
k = sig**2 * k
return k
def kuU(self, x, xp, n_x, sig, lx, lt, c):
# xp : x'; hyp: hyper-parameters
# n_x: number of x's
# sig: signal standard deviation
# lx: characteristic length in x direction
# lt: characteristic legnth in t direction
# c: diffusivity
x1, t1 = x[:,0:1], x[:,1:2]
x2, t2 = xp[:,0:1], xp[:,1:2]
x2 = tf.reshape(x2, (1,-1))
t2 = tf.reshape(t2, (1,-1))
# Use Maple to do symbol manipulation
k = tf.exp((-(x1 - x2)**2*lt**2 - (t1 - t2)**2*lx**2)/(2*lx**2*lt**2))*(c*(lx + x1 - x2)*(lx - x1 + x2)*lt**2 + (t1 - t2)*lx**4)/(lt**2*lx**4)
return k * sig**2
def kUu(self, x, xp, n_x, sig, lx, lt, c):
# xp : x'; hyp: hyper-parameters
# n_x: number of x's
# sig: signal standard deviation
# lx: characteristic length in x direction
# lt: characteristic legnth in t direction
# c: diffusivity
x1, t1 = x[:,0:1], x[:,1:2]
x2, t2 = xp[:,0:1], xp[:,1:2]
x2 = tf.reshape(x2, (1,-1))
t2 = tf.reshape(t2, (1,-1))
# Use Maple to do symbol manipulation
k = tf.exp((-(x1 - x2)**2*lt**2 - (t1 - t2)**2*lx**2)/(2*lx**2*lt**2))*(c*(lx + x1 - x2)*(lx - x1 + x2)*lt**2 - (t1 - t2)*lx**4)/(lt**2*lx**4)
return k * sig**2
def K_train(self, xu, xU, n_u, n_U, sig, lx, lt, c): ## assemble the convariance matrix for training
KU = self.kU(xU, xU, n_U, sig, lx, lt, c)
Ku = self.ku(xu, xu, n_u, sig, lx, lt)
KuU = self.kuU(xu, xU, n_u, sig, lx, lt, c)
KUu = self.kUu(xU, xu, n_U, sig, lx, lt, c)
K1 = tf.concat((KU, KUu),axis=1)
K2 = tf.concat((KuU, Ku),axis=1)
K = tf.concat((K1,K2),axis=0)
return K
def K_test(self, xt, xu, xU, n_t, sig, lx, lt, c): ## assemble the covariance matrix for testing or predicting
Ku = self.ku(xt, xu, n_t, sig, lx, lt)
KuU = self.kuU(xt, xU, n_t, sig, lx, lt, c)
K = tf.concat((KuU, Ku),axis=1)
K_diag = self.ku(xt, xu, n_t, sig, lx, lt, diag = True)
return K, K_diag
def nlml(self, xu, yu, n_u, xU, yU, n_U, sig, lx, lt, sig_n, c): ## negative log-marginal likeliood
N = n_u + n_U
self.Kn = self.K_train(xu, xU, n_u, n_U, sig, lx, lt, c)+ (sig_n**2+1.0e-10) * tf.eye(N, dtype=tf.float64)
self.L = tf.cholesky(self.Kn)
r = tf.concat((yU,yu),axis=0)
self.alpha = tf.cholesky_solve(self.L, r)
temp = tf.matmul(r, self.alpha, transpose_a=True)
return temp /2.0 +tf.reduce_sum(tf.log(tf.diag_part(self.L))) \
+ 0.5 * N * np.log(2.0*np.pi)
def training(self, num_iter=10001, learning_rate = 5.0e-4):
tf.reset_default_graph()
## initialize hyperparameters of GP;
## 'tf.exp' preserves the positivity of hyperparameters
sig = tf.exp(tf.Variable(0.0,dtype=np.float64)) # signal standard deviation
lx = tf.exp(tf.Variable(0.0,dtype=np.float64)) # charactersitic length in space
lt = tf.exp(tf.Variable(0.0,dtype=np.float64)) # characteristic length in time
sig_n = tf.exp(tf.Variable(0.0,dtype=np.float64)) # noise standard deviation
c = self.diffusivity
n_u = self.xu_train.shape[0]
n_U = self.xU_train.shape[0]
n_t = self.x_test.shape[0]
xu = tf.placeholder(tf.float64, shape=(None,2))
yu = tf.placeholder(tf.float64, shape=(None,1))
xU = tf.placeholder(tf.float64, shape=(None,2))
yU = tf.placeholder(tf.float64, shape=(None,1))
xt = tf.placeholder(tf.float64, shape=(None,2))
nlml_tf = self.nlml(xu, yu, n_u, xU, yU, n_U, sig, lx, lt, sig_n, c)
k_test, k_diag = self.K_test(xt, xu, xU, n_t, sig, lx, lt, c)
mean_u = tf.matmul(k_test,self.alpha) ## posterior mean
V = tf.linalg.triangular_solve(self.L,tf.transpose(k_test))
var_u = k_diag - tf.reshape(tf.reduce_sum(V*V,axis=0),(-1,1)) + sig_n**2 ## posterior variance
std_u = tf.sqrt(tf.maximum(var_u, tf.zeros((n_t,1),dtype=tf.float64))) ## keep the variance non-negative
optimizer_Adam = tf.train.AdamOptimizer(learning_rate) ### Employ Adam stochastic gradient descent
train_op_Adam = optimizer_Adam.minimize(nlml_tf) ## try to miniminze the 'nlml'
nlml_min = 1.0e16
feed_dict = {xu: self.xu_train, yu: self.yu_train, \
xU: self.xU_train, yU: self.yU_train, \
xt: self.x_test}
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(num_iter):
sess.run(train_op_Adam, feed_dict=feed_dict) # training for one iteration
if i % 10000 == 0: # print results every 10000 iterations or epochs
nlml_temp = sess.run(nlml_tf, feed_dict = feed_dict)
if nlml_temp < nlml_min:
nlml_min = nlml_temp # keep the results corresponding to lowest loss
self.mean, self.std, sig0, lx0, lt0, sig_n0 = \
sess.run([mean_u, std_u, sig, lx, lt, sig_n], feed_dict=feed_dict)
print ('*****************Iter: ',i, ' *********** \n')
print ('nlml: ', nlml_min)
print ('signal std: ', sig0)
print ('noise std: ',sig_n0)
print ('lx: ', lx0)
print ('lt: ', lt0)
print ('L2_error: ', np.linalg.norm(self.mean-self.y_test,2)/np.linalg.norm(self.y_test,2))
print ('\n')
class Continuous_time_NN_forward:
def __init__(self, dataset):
self.xu_train = dataset['xu_train']
self.yu_train = dataset['yu_train']
self.xU_train = dataset['xU_train']
self.yU_train = dataset['yU_train']
self.x_test = dataset['x_test']
self.y_test = dataset['y_test']
self.diffusivity = dataset['diffusivity']
self.noise = dataset['noise']
def xavier_init(self,size): # Initialize weight matrices of NN and fix the initialization of weights for sake of reproducing
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2.0/(in_dim + out_dim))
return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev,dtype=tf.float64,seed=1234), dtype=tf.float64)
def DNN(self, X, layers,weights,biases): # forward propagatio of a fully-connected NN
L = len(layers)
H = X
for l in range(0,L-2):
W = weights[l]
b = biases[l]
H = tf.nn.tanh(tf.add(tf.matmul(H, W), b))
W = weights[-1]
b = biases[-1]
Y = tf.add(tf.matmul(H, W), b)
return Y
def training(self, num_iter=10001, learning_rate = 5.0e-4):
tf.reset_default_graph()
layers = [2]+[20]*3 +[1] #DNN layers: two neurons in input layer, three hidden layers with 20 neurons in each, and one neuron in output layer
L = len(layers)
weights = [self.xavier_init([layers[l], layers[l+1]]) for l in range(0, L-1)]
biases = [tf.Variable( tf.zeros((1, layers[l+1]),dtype=tf.float64)) for l in range(0, L-1)]
x_u = tf.placeholder(tf.float64, shape=(None,1))
t_u = tf.placeholder(tf.float64, shape=(None,1))
x_U = tf.placeholder(tf.float64, shape=(None,1))
t_U = tf.placeholder(tf.float64, shape=(None,1))
u_u = self.DNN(tf.concat((x_u,t_u),axis=1), layers, weights, biases) ## NN for the soltion u, with inputs (x_u,t_u)
u_U = self.DNN(tf.concat((x_U,t_U),axis=1), layers, weights, biases) ## NN for u, but with different inputs (x_U, t_U)
u_U_x = tf.gradients(u_U,x_U)[0] ## automatic differentiation to compute the gradients of output with respect to input
u_U_xx = tf.gradients(u_U_x,x_U)[0] ## second derivative with respect to spatial coordinate
u_U_t = tf.gradients(u_U, t_U)[0] ## first derivtive in time
c = self.diffusivity
u_obs = self.yu_train ### observations for y_2 and y_3 (see the book chapter for notations y_2 and y_3)
U_obs = self.yU_train ### observation for y_1
U_U = u_U_t - c * u_U_xx
loss_u = tf.reduce_mean(tf.square(u_u-u_obs))/tf.reduce_mean(tf.square(u_obs)) ## mean squared error in the sense of relative error
loss_U = tf.reduce_mean(tf.square(U_U-U_obs))/tf.reduce_mean(tf.square(U_obs))
if self.noise == 0.0:
strength = 0.0
else:
strength = 1.0e-4 ### strength of regularization
reg = 0.0
for i in range(len(weights)):
reg = reg + strength * tf.nn.l2_loss(weights[i]) ### add l_2 regularization for reducing the overfitting when noise comes out.
loss = loss_U + loss_u + reg
feed_dict = {x_u: self.xu_train[:,0:1], t_u: self.xu_train[:,1:2], \
x_U: self.xU_train[:,0:1], t_U: self.xU_train[:,1:2]
}
feed_dict_test = {x_u: self.x_test[:,0:1], t_u: self.x_test[:,1:2], \
x_U: self.x_test[:,0:1], t_U: self.x_test[:,1:2]
}
optimizer_Adam = tf.train.AdamOptimizer(learning_rate)
train_op_Adam = optimizer_Adam.minimize(loss)
x_index = []
loss_max = 1.0e16
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(num_iter+1):
sess.run(train_op_Adam, feed_dict = feed_dict)
if i % 10000 == 0:
loss_val, loss_u_val, loss_U_val = sess.run([loss,loss_u, loss_U], feed_dict=feed_dict)
self.u, self.f = sess.run([u_u, U_U], feed_dict=feed_dict_test)
if loss_val < loss_max:
loss_max = loss_val
error= np.linalg.norm(self.u-self.y_test)/np.linalg.norm(self.y_test)
x_index.append(i)
print ('***************Iteration: ', i, '************\n')
print ('Loss: ' , loss_max, 'Loss_u: ', loss_u_val, 'Loss_U: ', loss_U_val)
print ('L2_error: ', error)
class Discrete_time_GP_inverse:
def __init__(self, dataset):
self.xu_train = dataset['xu_train']
self.yu_train = dataset['yu_train']
self.xU_train = dataset['xU_train']
self.yU_train = dataset['yU_train']
self.noise = dataset['noise']
def ku(self, x, xp, n_x, sig, lx, lt, diag = False):
# xp : x'; hyp: hyper-parameters
# n_x: number of x's
# sig_f: signal standard deviation
# lx: characteristic length in x direction
# lt: characteristic legnth in t direction
# 'diag = False' : output a matrix of size n_x \times n_xp
# 'diag = True' : output a column vector of size n_x \times 1 (n_x=n_xp)
x1 = x[:,0:1]
x2 = xp[:,0:1]
x2 = tf.reshape(x2, (1,-1))
if diag == False:
k = sig**2 * tf.exp(-(x1-x2)**2/2/lx**2 )
else:
k = sig**2 * tf.ones((n_x,1),dtype=tf.float64)
return k
def kU(self, x, xp, n_x, sig, lx, lt, c, dt, diag = False):
# xp : x'; hyp: hyper-parameters
# n_x: number of x's
# sig: signal standard deviation
# lx: characteristic length in x direction
# lt: characteristic legnth in t direction
# c: diffusivity
# 'diag = False' : output a matrix of size n_x \times n_xp
# 'diag = True' : output a column vector of size n_x \times 1 (n_x=n_xp)
x1 = x[:,0:1]
x2 = xp[:,0:1]
x2 = tf.reshape(x2, (1,-1))
# Use Maple to do symbol manipulation
if diag == False:
k = 3*tf.exp(-(x1 - x2)**2/(2*lx**2))*(c**2*(lx**4 - 2*(x1 - x2)**2*lx**2 + (x1 - x2)**4/3)*dt**2 + (2*c*lx**4*(lx + x1 - x2)*(lx - x1 + x2)*dt)/3 + lx**8/3)/lx**8
k = sig**2 * k
else:
k = 3*(c**2*dt**2*lx**4 + 2/3*c*dt*lx**6 + 1/3*lx**8)/lx**8
k = sig**2 * k
return k
def kuU(self, x, xp, n_x, sig, lx, lt, c, dt):
# xp : x'; hyp: hyper-parameters
# n_x: number of x's
# sig: signal standard deviation
# lx: characteristic length in x direction
# lt: characteristic legnth in t direction
# c: diffusivity
x1 = x[:,0:1]
x2 = xp[:,0:1]
x2 = tf.reshape(x2, (1,-1))
# Use Maple to do symbol manipulation
k = (c*(lx + x1 - x2)*(lx - x1 + x2)*dt + lx**4)*tf.exp(-(x1 - x2)**2/(2*lx**2))/lx**4
return k * sig**2
def kUu(self, x, xp, n_x, sig, lx, lt, c,dt):
# xp : x'; hyp: hyper-parameters
# n_x: number of x's
# sig: signal standard deviation
# lx: characteristic length in x direction
# lt: characteristic legnth in t direction
# c: diffusivity
x1 = x[:,0:1]
x2 = xp[:,0:1]
x2 = tf.reshape(x2, (1,-1))
# Use Maple to do symbol manipulation
k = (c*(lx + x1 - x2)*(lx - x1 + x2)*dt + lx**4)*tf.exp(-(x1 - x2)**2/(2*lx**2))/lx**4
return k * sig**2
def K_train(self, xu, xU, n_u, n_U, sig, lx, lt, c, dt):
KU = self.kU(xU, xU, n_U, sig, lx, lt, c, dt)
Ku = self.ku(xu, xu, n_u, sig, lx, lt)
KuU = self.kuU(xu, xU, n_u, sig, lx, lt, c, dt)
KUu = self.kUu(xU, xu, n_U, sig, lx, lt, c, dt)
K1 = tf.concat((KU, KUu),axis=1)
K2 = tf.concat((KuU, Ku),axis=1)
K = tf.concat((K1,K2),axis=0)
return K
def K_test(self, xt, xu, xU, n_t, sig, lx, lt, c, dt):
Ku = self.ku(xt, xu, n_t, sig, lx, lt)
KuU = self.kuU(xt, xU, n_t, sig, lx, lt, c, dt)
K = tf.concat((KuU, Ku),axis=1)
K_diag = self.ku(xt, xu, n_t, sig, lx, lt, diag = True)
return K, K_diag
def nlml(self, xu, yu, n_u, xU, yU, n_U, sig, lx, lt, sig_n, c, dt):
N = n_u + n_U
self.Kn = self.K_train(xu, xU, n_u, n_U, sig, lx, lt, c, dt)+ (sig_n**2+1.0e-10) * tf.eye(N, dtype=tf.float64)
self.L = tf.cholesky(self.Kn)
r = tf.concat((yU,yu),axis=0)
self.alpha = tf.cholesky_solve(self.L, r)
temp = tf.matmul(r, self.alpha, transpose_a=True)
return temp /2.0 +tf.reduce_sum(tf.log(tf.diag_part(self.L))) \
+ 0.5 * N * np.log(2.0*np.pi)
def training(self, num_iter=10001, learning_rate = 5.0e-4):
tf.reset_default_graph()
sig = tf.exp(tf.Variable(0.0,dtype=np.float64))
lx = tf.exp(tf.Variable(0.0,dtype=np.float64))
lt = tf.exp(tf.Variable(0.0,dtype=np.float64))
sig_n = tf.exp(tf.Variable(0.0,dtype=np.float64))
c = tf.exp(tf.Variable(0.0,dtype=np.float64, trainable=True))
dt = 0.01
n_u = self.xu_train.shape[0]
n_U = self.xU_train.shape[0]
xu = tf.placeholder(tf.float64, shape=(None,1))
yu = tf.placeholder(tf.float64, shape=(None,1))
xU = tf.placeholder(tf.float64, shape=(None,1))
yU = tf.placeholder(tf.float64, shape=(None,1))
nlml_tf = self.nlml(xu, yu, n_u, xU, yU, n_U, sig, lx, lt, sig_n, c, dt)
optimizer_Adam = tf.train.AdamOptimizer(learning_rate)
train_op_Adam = optimizer_Adam.minimize(nlml_tf)
nlml_min = 1.0e16
feed_dict = {xu: self.xu_train, yu: self.yu_train, \
xU: self.xU_train, yU: self.yU_train
}
index = []
c_record = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(num_iter):
sess.run(train_op_Adam, feed_dict=feed_dict)
if i % 100 == 0:
nlml_temp = sess.run(nlml_tf, feed_dict = feed_dict)
if nlml_temp < nlml_min:
index.append(i)
nlml_min = nlml_temp
sig0, lx0, lt0, sig_n0, self.c0 = \
sess.run([sig, lx, lt, sig_n, c], feed_dict=feed_dict)
c_record.append(self.c0)
fig=plt.figure()
plt.plot(np.stack(index), np.stack(c_record),'r.-',label='Optimization history')
plt.plot(np.stack(index), 0.1*np.ones((len(index),1)),'b-.',label='True parameter')
plt.legend()
plt.xlabel('Iteration')
plt.ylabel('Estimated c')
plt.title('D-GP-estimated c: '+str(self.c0)+' (True c: 0.1)')
plt.savefig('D-GP_noise_'+str(self.noise*100)+'.png',dpi=300)
plt.close(fig)
class Discrete_time_NN_inverse:
def __init__(self, dataset):
self.xu_train = dataset['xu_train']
self.yu_train = dataset['yu_train']
self.xU_train = dataset['xU_train']
self.yU_train = dataset['yU_train']
self.noise = dataset['noise']
def xavier_init(self,size): # Initializing the weight matrices of NN
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2.0/(in_dim + out_dim))
return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev,dtype=tf.float64,seed=1234), dtype=tf.float64)
def DNN(self, X, layers,weights,biases):
L = len(layers)
H = X
for l in range(0,L-2):
W = weights[l]
b = biases[l]
H = tf.nn.tanh(tf.add(tf.matmul(H, W), b))
W = weights[-1]
b = biases[-1]
Y = tf.add(tf.matmul(H, W), b)
return Y
def training(self, num_iter=10001, learning_rate = 5.0e-4):
tf.reset_default_graph()
layers = [1]+[20]*3 +[1] #DNN layers
L = len(layers)
weights = [self.xavier_init([layers[l], layers[l+1]]) for l in range(0, L-1)]
biases = [tf.Variable( tf.zeros((1, layers[l+1]),dtype=tf.float64)) for l in range(0, L-1)]
dt = 0.01
x_u = tf.placeholder(tf.float64, shape=(None,1))
x_U = tf.placeholder(tf.float64, shape=(None,1))
u_u = self.DNN(x_u, layers, weights, biases) #fractional order - aplha
u_U = self.DNN(x_U, layers, weights, biases)
u_U_x = tf.gradients(u_U,x_U)[0]
u_U_xx = tf.gradients(u_U_x,x_U)[0]
c = tf.exp(tf.Variable(0.0,dtype=np.float64,trainable=True))
u_obs = self.yu_train
U_obs = self.yU_train
U_U = u_U - dt * c * u_U_xx
loss_u = tf.reduce_mean(tf.square(u_u-u_obs))/tf.reduce_mean(tf.square(u_obs))
loss_U = tf.reduce_mean(tf.square(U_U-U_obs))/tf.reduce_mean(tf.square(U_obs))
reg = 0.0
if self.noise == 0.0:
strength=0.0
else:
strength = 1.0e-4
for i in range(len(weights)):
reg = reg + tf.nn.l2_loss(weights[i])
loss = loss_U + loss_u + strength * reg
feed_dict = {x_u: self.xu_train[:,0:1], \
x_U: self.xU_train[:,0:1]
}
optimizer_Adam = tf.train.AdamOptimizer(learning_rate)
train_op_Adam = optimizer_Adam.minimize(loss)
c_record = []
index = []
loss_max = 1.0e16
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(num_iter+1):
sess.run(train_op_Adam, feed_dict = feed_dict)
if i % 100 == 0:
loss_val, loss_u_val, loss_U_val, self.c_val = sess.run([loss,loss_u, loss_U, c], feed_dict=feed_dict)
if loss_val < loss_max:
loss_max = loss_val
c_record.append(self.c_val)
index.append(i)
fig=plt.figure()
plt.plot(np.stack(index), | np.stack(c_record) | numpy.stack |
# License: BSD 3 clause
import itertools
import sys
import warnings
import numpy as np
from numpy.polynomial.legendre import leggauss
from scipy.linalg import solve
from tick.base import Base, ThreadPool
from tick.hawkes.inference.build.hawkes_inference import (PointProcessCondLaw)
# noinspection PyPep8Naming
class HawkesConditionalLaw(Base):
"""This class is used for performing non parametric estimation of
multi-dimensional marked Hawkes processes based on conditional laws.
Marked Hawkes processes are point processes defined by the intensity:
.. math::
\\forall i \\in [1 \\dots D], \\quad
\\lambda_i = \\mu_i + \\sum_{j=1}^D \\int \\phi_{ij} * f_{ij}(v_j) dN_j
where
* :math:`D` is the number of nodes
* :math:`\mu_i` are the baseline intensities
* :math:`\phi_{ij}` are the kernels
* :math:`v_j` are the marks (considered iid) of the process :math:`N_j`
* :math:`f_{ij}` the mark functions supposed to be piece-wise constant
on intervals :math:`I^j(l)`
The estimation is made from empirical computations of
.. math::
\\lim_{\\epsilon \\rightarrow 0}
E [ (N_i[t + lag + \\delta + \\epsilon] -
\Lambda[t + lag + \\epsilon]) | N_j[t]=1
\quad \& \quad
v_j(t) \in I^j(l) ]
For all the possible values of :math:`i`, :math:`i` and :math:`l`.
The :math:`lag` is sampled on a uniform grid defined by
:math:`\\delta`: :math:`lag = n * \\delta`.
Estimation can be performed using several realizations.
Parameters
----------
claw_method : {'lin', 'log'}, default='lin'
Specifies the way the conditional laws are sampled. It can be either:
* 'lin' : sampling is linear on [0, max_lag] using sampling period
delta_lag
* 'log' : sampling is semi-log. It uses linear sampling on [0, min_lag]
with sampling period delta_lag and log sampling on [min_lag, max_lag]
using :math:`\\exp(\\delta)` sampling period.
delta_lag : `float`, default=0.1
See claw_methods
min_lag : `float`, default=1e-4
See claw_methods
max_lag : `float`, default=40
See claw_methods
quad_method : {'gauss', 'lin', 'log'}, default=gauss
Sampling used for quadrature
* 'gauss' for gaussian quadrature
* 'lin' for linear quadrature
* 'log' for log quadrature
min_support : `float`, default=1e-4
Start value of kernel estimation. It is used for 'log' quadrature
method only, otherwise it is set to 0.
max_support : `float`, default=40
End value of kernel estimation
n_quad : `int` : default=50
The number of quadrature points between [min_support, max_support]
used for solving the system.
Be aware that the complexity increase as this number squared.
n_threads : `int`, default=1
Number of threads used for parallel computation.
* if `int <= 0`: the number of physical cores available on the CPU
* otherwise the desired number of threads
Other Parameters
----------------
delayed_component : list of `int`, shape=(n_nodes, ), default=None
list of node indices corresponding to node that should be delayed
(to avoid simultaneous jumps of different components which can be a
problem in the estimation)
delay : `float`
The delayed used for `delayed_component`. Selected components are
all delayed with the same value
marked_components : `dict`
A dictionary that indicates which component is considered as marked
and what are the corresponding intervals ``I_j(l)``
Attributes
----------
n_nodes : `int`
Number of nodes of the estimated Hawkes process
n_realizations : `int`
Number of given realizations
baseline : np.ndarray, shape=(n_nodes,)
Estimation of the baseline
kernels_norms : np.ndarray, shape=(n_nodes, n_nodes)
L1 norm matrix of the kernel norms
kernels : list of list
Kernel's estimation on the quadrature points
mean_intensity : list of `float`
The estimated mean intensity
symmetries1d : list of 2-tuple
List of component index pairs for imposing symmetries on the mean
intensity (e.g, ``[(0,1),(2,3)]`` means that the mean intensity of
the components 0 and 1 must be the same and the mean intensity of the
components 2 and 3 also
Can be set using can be set using the `set_model` method.
symmetries2d : list of 2-tuple of 2-tuple
List of kernel coordinates pairs to impose symmetries on the kernel
matrix (e.g., ``[[(0,0),(1,1)],[(1,0),(0,1)]]`` for a bidiagonal
kernel in dimension 2)
Can be set using can be set using the `set_model` method.
mark_functions : list of 2-tuple
The mark functions as a list (lexical order on i,j and l, see below)
References
----------
<NAME>., & <NAME>. (2014).
Second order statistics characterization of Hawkes processes and
non-parametric estimation. `arXiv preprint arXiv:1401.0903`_.
.. _arXiv preprint arXiv:1401.0903: https://arxiv.org/pdf/1401.0903.pdf
"""
_attrinfos = {
'_hawkes_object': {},
'_lags': {},
'_lock': {
'writable': False
},
'_phi_ijl': {},
'_norm_ijl': {},
'_ijl2index': {},
'_index2ijl': {},
'_n_index': {},
'_mark_probabilities': {},
'_mark_probabilities_N': {},
'_mark_min': {},
'_mark_max': {},
'_lam_N': {},
'_lam_T': {},
'_claw': {},
'_claw1': {},
'_claw_X': {},
'_n_events': {},
'_int_claw': {},
'_IG': {},
'_IG2': {},
'_quad_x': {},
'_quad_w': {}
}
def __init__(self, delta_lag=.1, min_lag=1e-4, max_lag=40, n_quad=50,
max_support=40, min_support=1e-4, quad_method='gauss',
marked_components=None, delayed_component=None, delay=0.00001,
model=None, n_threads=1, claw_method='lin'):
Base.__init__(self)
# Init the claw sampling parameters
self.delta_lag = delta_lag
self.max_lag = max_lag
self.min_lag = min_lag
self.claw_method = claw_method
# Init quadrature method
self.quad_method = quad_method
self.n_quad = n_quad
self.min_support = min_support
self.max_support = max_support
# Init marked components
if marked_components is None:
marked_components = dict()
self.marked_components = marked_components
# Init attributes
self.n_realizations = 0
self._lags = None
self._compute_lags()
self.symmetries1d = []
self.symmetries2d = []
self.delayed_component = np.array(delayed_component)
self.delay = delay
# _claw : list of 2-tuple
# Represents the conditional laws written above (lexical order on i,
# j and l, see below). Each conditional law is represented by a
# pair (x, c) where x are the abscissa
self._claw = None
# _claw1 : list of list
# Represents the conditional laws written above without conditioning by
# the mark (so a i,j list)
self._claw1 = None
self._lock = None
# quad_x : `np.ndarray`, shape=(n_quad, )
# The abscissa of the quadrature points used for the Fredholm system
self._quad_x = None
# quad_w : `np.ndarray`, shape=(n_quad, )
# The weights the quadrature points used for the Fredholm system
self._quad_w = None
self._phi_ijl, self._norm_ijl = None, None
self.kernels, self.kernels_norms, self.baseline = None, None, None
self.mark_functions = None
if n_threads == -1:
import multiprocessing
n_threads = multiprocessing.cpu_count()
self.n_threads = n_threads
if model:
self.set_model(model)
def fit(self, events: list, T=None):
"""Fit the model according to the given training data.
Parameters
----------
events : `list` of `list` of `np.ndarray`
List of Hawkes processes realizations.
Each realization of the Hawkes process is a list of n_node for
each component of the Hawkes. Namely `events[i][j]` contains a
one-dimensional `numpy.array` of the events' timestamps of
component j of realization i.
If only one realization is given, it will be wrapped into a list
T : `double`, default=None
The duration (in physical time) of the realization. If it is None then
T is considered to be the time of the last event (of any component).
Returns
-------
output : `HawkesConditionalLaw`
The current instance of the Learner
"""
if not isinstance(events[0][0], np.ndarray):
events = [events]
for timestamps in events:
self.incremental_fit(timestamps, compute=False, T=T)
self.compute()
return self
def set_model(self, symmetries1d=list(), symmetries2d=list(),
delayed_component=None):
"""Set the model to be used.
Parameters
----------
symmetries1d : list of 2-tuple
List of component index pairs for imposing symmetries on the mean
intensity (e.g, ``[(0,1),(2,3)]`` means that the mean intensity of
the components 0 and 1 must be the same and the mean intensity of
the components 2 and 3 also.
Can be set using can be set using the `set_model` method.
symmetries2d : list of 2-tuple of 2-tuple
List of kernel coordinates pairs to impose symmetries on the kernel
matrix (e.g., ``[[(0,0),(1,1)],[(1,0),(0,1)]]`` for a bidiagonal
kernel in dimension 2)
Can be set using can be set using the `set_model` method.
delayed_component : list of `int`, shape=(N, ), default=`None`
list of node indices corresponding to node that should be delayed
(to avoid simultaneous jumps of different components which can be a
problem in the estimation)
If no model is specified then default values for these fields are used
Notes
-----
We set the symmetries, the kernel names and delayed components for
first realization only
"""
self.symmetries1d = symmetries1d
self.symmetries2d = symmetries2d
self.delayed_component = np.array(delayed_component)
def _init_basics(self, realization):
"""Init the dimension
"""
self.n_nodes = len(realization)
return realization
def _init_marked_components(self):
"""Init marked components
This builds the field self.marked_components so that it is set to
[component1_mark_intervals, ..., componentN_mark_intervals]
where each componentj_mark_intervals is of the form
[[min1, max1], [min2, max2], ..., [mink, maxk]]
It describes the intervals the function f^ij are constants on.
"""
marked_components = self.marked_components
self.marked_components = []
for i in range(0, self.n_nodes):
self.marked_components.append([])
if i in marked_components:
self.marked_components[i].append(
[-sys.float_info.max, marked_components[i][0]])
for j in range(0, len(marked_components[i]) - 1):
self.marked_components[i].append(
marked_components[i][j:j + 2])
self.marked_components[i].append(
[marked_components[i][-1], sys.float_info.max])
else:
self.marked_components[i].append(
[-sys.float_info.max, sys.float_info.max])
def _init_index(self):
"""Init for indexing
Given i,j,l --> index and vice versa (i and j are components of the
Hawkes and l is the marked interval index of the component j)
"""
self._ijl2index = []
self._index2ijl = []
index = 0
for i in range(0, self.n_nodes):
self._ijl2index.append([])
for j in range(0, self.n_nodes):
self._ijl2index[i].append([])
for l in range(0, len(self.marked_components[j])):
self._ijl2index[i][j].append(index)
self._index2ijl.append((i, j, l))
index += 1
self._n_index = len(self._index2ijl)
def _init_mark_stats(self):
"""We initialize the mark probabilities and min-max of the marks
"""
# Proba for the mark
self._mark_probabilities = []
# In order to compute the probability we need to store the number of
# events
self._mark_probabilities_N = []
self._mark_min = [sys.float_info.max] * self.n_nodes
self._mark_max = [sys.float_info.min] * self.n_nodes
for i in range(0, self.n_nodes):
self._mark_probabilities_N.append(
[0] * len(self.marked_components[i]))
self._mark_probabilities.append(
[0] * len(self.marked_components[i]))
def _init_lambdas(self):
"""Init the lambda's
"""
self.mean_intensity = [0] * self.n_nodes
self._lam_N = [0] * self.n_nodes
self._lam_T = [0] * self.n_nodes
# Used to store the number of events of each component that
# have been used to perform estimation on all the lags
# versus the number of events that could not be used for all the lags
# Warning : we don't take care of marks for this computation
# normally we should do this computation independantly for each mark
self._n_events = np.zeros((2, self.n_nodes))
def _init_claws(self):
"""Init the claw storage
"""
self._claw = [0] * len(self._index2ijl)
def _index_to_lexical(self, index):
"""Convert index to lexical order (i,j,l)
Parameters
----------
index : `int`
Returns
-------
i : `int`
First node of the Hawkes
j : `int`
Second node of the Hawkes
l : `int`
Marked interval index of the component j
Examples
--------
>>> from tick.hawkes import HawkesConditionalLaw
>>> import numpy as np
>>> learner = HawkesConditionalLaw()
>>> learner.incremental_fit([np.array([2.1, 3, 4]),
... np.array([2., 2.01, 8])],
... compute=False)
>>> learner._index_to_lexical(2)
(1, 0, 0)
"""
return self._index2ijl[index]
def _lexical_to_index(self, i, j, l):
"""Convert lexical order (i,j,l) to index
Parameters
----------
i : `int`
First node of the Hawkes
j : `int`
Second node of the Hawkes
l : `int`
Marked interval index of the component j
Returns
-------
index : `int`
Examples
--------
>>> from tick.hawkes import HawkesConditionalLaw
>>> import numpy as np
>>> learner = HawkesConditionalLaw()
>>> learner.incremental_fit([np.array([2.1, 3, 4]),
... np.array([2., 2.01, 8])],
... compute=False)
>>> learner._lexical_to_index(1, 0, 0)
2
"""
return self._ijl2index[i][j][l]
def incremental_fit(self, realization, T=None, compute=True):
"""Allows to add some more realizations before estimation is performed.
It updates the conditional laws (stored in `self._claw` and
`self._claw1`) and of the mean intensity (in `self._mean_intensity`).
Parameters
----------
realization : list of `np.narrays` or list of 2-tuple of `np.arrays`
* list of `np.narrays`, shape=(N) , representing the arrival times
of each component
* list of pairs (t,m) np.arrays representing the arrival times of
each component (x) and the cumulative marks signal (m)
T : `double`, default=None
The duration (in physical time) of the realization. If it is -1 then
T is considered to be the time of the last event (of any component).
compute : `bool`, default=`False`
Computes kernel estimation. If set to `False`, you will have to
manually call `compute` method afterwards.
This is useful to add multiple realizations and compute only once
all conditional laws have been updated.
"""
# If first realization we perform some init
if self.n_realizations == 0:
realization = self._init_basics(realization)
self._init_marked_components()
self._init_index()
self._init_mark_stats()
self._init_lambdas()
self._init_claws()
else:
if compute and self._has_been_computed_once():
warnings.warn(("compute() method was already called, "
"computed kernels will be updated."))
# We perform some checks
if self.n_nodes != len(realization):
msg = 'Bad dimension for realization, should be %d instead of %d' \
% (self.n_nodes, len(realization))
raise ValueError(msg)
# Realization normalization
if not isinstance(realization[0], (list, tuple)):
realization = [(r, np.arange(len(r), dtype=np.double) + 1)
for r in realization]
# Do we need to delay the realization ?
if self.delayed_component:
old_realization = realization
realization = []
for i in range(0, self.n_nodes):
if any(self.delayed_component == i):
if len(old_realization[i][0]) == 0:
realization.append(old_realization[i])
else:
realization.append((old_realization[i][0] + self.delay,
old_realization[i][1]))
else:
realization.append(old_realization[i])
# We compute last event time
last_event_time = -1
for i in range(0, self.n_nodes):
if len(realization[i][0]) > 0:
last_event_time = max(realization[i][0][-1], last_event_time)
# If realization empty --> return
if last_event_time < 0:
warnings.warn(
"An empty realization was passed. No computation was performed."
)
return
# We set T if needed
if T is None:
T = last_event_time
elif T < last_event_time:
raise ValueError("Argument T (%g) specified is too small, "
"you should use default value or a value "
"greater or equal to %g." % (T, last_event_time))
# We update the mark probabilities and min-max
for i in range(0, self.n_nodes):
if len(realization[i][0]) == 0:
continue
# We have to take into account the first mark
der = np.hstack([realization[i][1][0], np.diff(realization[i][1])])
total = 0
self._mark_min[i] = min(self._mark_min[i], np.min(der))
self._mark_max[i] = max(self._mark_max[i], np.max(der))
for l, interval in enumerate(self.marked_components[i]):
self._mark_probabilities_N[i][l] += \
np.sum((der >= interval[0]) & (der < interval[1]))
total += self._mark_probabilities_N[i][l]
for l, interval in enumerate(self.marked_components[i]):
self._mark_probabilities[i][l] = \
self._mark_probabilities_N[i][l] / total
der[:] = 1
# We update the Lambda
for i in range(0, self.n_nodes):
if len(realization[i][0]) <= 0:
continue
self._lam_N[i] += len(realization[i][0])
self._lam_T[i] += T
self.mean_intensity[i] = self._lam_N[i] / self._lam_T[i]
# We update the _n_events of component i
# Warning : we don't take care of marks for this computation
# normally we should do this computation independantly for each mark
for i in range(0, self.n_nodes):
good = np.sum(realization[i][0] <= T - self._lags[-1])
bad = len(realization[i][0]) - good
self._n_events[0, i] += good
self._n_events[1, i] += bad
# We might want to use threads, since this is the time consuming part
with_multi_processing = self.n_threads > 1
if with_multi_processing:
pool = ThreadPool(with_lock=True, max_threads=self.n_threads)
self._set('_lock', pool.lock)
for index, (i, j, l) in enumerate(self._index2ijl):
if with_multi_processing:
pool.add_work(self._PointProcessCondLaw, realization, index, i,
j, l, T)
else:
self._PointProcessCondLaw(realization, index, i, j, l, T)
if with_multi_processing:
pool.start()
# Here we compute the G^ij (not conditioned to l)
# It is recomputed each time
self._claw1 = []
for i in range(0, self.n_nodes):
self._claw1.append([])
for j in range(0, self.n_nodes):
index = self._ijl2index[i][j][0]
self._claw1[i].append(np.copy(self._claw[index]))
self._claw1[i][j] *= self._mark_probabilities[j][0]
for l in range(1, len(self._ijl2index[i][j])):
index = self._ijl2index[i][j][l]
self._claw1[i][j] += self._claw[index] * \
self._mark_probabilities[j][l]
self.n_realizations += 1
# Deal with symmetrization
for (i, j) in self.symmetries1d:
t = (self.mean_intensity[i] + self.mean_intensity[j]) / 2
self.mean_intensity[i] = t
self.mean_intensity[j] = t
t = (self._mark_min[i] + self._mark_min[j]) / 2
self._mark_min[i] = t
self._mark_min[j] = t
t = (self._mark_max[i] + self._mark_max[j]) / 2
self._mark_max[i] = t
self._mark_max[j] = t
if self.marked_components[i] != self.marked_components[j]:
continue
for l in range(0, len(self.marked_components[i])):
t = (self._mark_probabilities_N[i][l] +
self._mark_probabilities_N[j][l]) / 2
self._mark_probabilities_N[i][l] = t
self._mark_probabilities_N[j][l] = t
t = (self._mark_probabilities[i][l] +
self._mark_probabilities[j][l]) / 2
self._mark_probabilities[i][l] = t
self._mark_probabilities[j][l] = t
for ((i1, j1), (i2, j2)) in self.symmetries2d:
t = (self._claw1[i1][j1] + self._claw1[i2][j2]) / 2
self._claw1[i1][j1] = t
self._claw1[i2][j2] = t
if self.marked_components[j1] != self.marked_components[j2]:
continue
for l in range(0, len(self.marked_components[j1])):
index1 = self._ijl2index[i1][j1][l]
index2 = self._ijl2index[i2][j2][l]
t = (self._claw[index1] + self._claw[index2]) / 2
self._claw[index1] = t
self._claw[index2] = t
# We can remove the thread lock (lock disallows pickling)
self._set('_lock', None)
if compute:
self.compute()
def _PointProcessCondLaw(self, realization, index, i, j, l, T):
claw_X = np.zeros(len(self._lags) - 1)
claw_Y = np.zeros(len(self._lags) - 1)
lambda_i = len(realization[i][0]) / T
PointProcessCondLaw(
realization[i][0], realization[j][0], realization[j][1],
self._lags, self.marked_components[j][l][0],
self.marked_components[j][l][1], T, lambda_i, claw_X, claw_Y)
self._claw_X = claw_X
# TODO: this lock acquire is very expensive here
if self.n_threads > 1:
self._lock.acquire()
# Update claw
if self.n_realizations == 0:
self._claw[index] = claw_Y
else:
self._claw[index] *= self.n_realizations
self._claw[index] += claw_Y
self._claw[index] /= self.n_realizations + 1
# Unlock
if self.n_threads > 1:
self._lock.release()
def _compute_lags(self):
"""Computes the lags at which the claw will be computed
"""
claw_method = self.claw_method
# computes the claw either on a uniform grid (lin) or a semi log
# uniform grid (log)
if claw_method == "log":
y1 = np.arange(0., self.min_lag, self.min_lag * self.delta_lag)
y2 = np.exp(
np.arange(
np.log(self.min_lag), np.log(self.max_lag),
self.delta_lag))
self._lags = np.append(y1, y2)
if claw_method == "lin":
self._lags = np.arange(0., self.max_lag, self.delta_lag)
def _compute_ints_claw(self):
"""Computes the claw and its integrals at the difference of
quadrature points using a linear interpolation
"""
self._int_claw = [0] * self._n_index
# Builds a linear interpolation of the claws at the difference of
# quadrature (only positive abscissa are kept)
for index in range(self._n_index):
xe = self._claw_X
ye = self._claw[index]
xs2 = np.array(
[(a - b)
for (a, b) in itertools.product(self._quad_x, repeat=2)])
xs2 = np.append(xe, xs2)
xs2 = np.append(self._quad_x, xs2)
xs2 = np.array(np.lib.arraysetops.unique(xs2))
xs2 = np.array(np.core.fromnumeric.sort(xs2))
xs2 = xs2[xs2 >= 0.]
ys2 = np.zeros(len(xs2))
j = 0
for i in range(1, len(xe)):
while j < len(xs2) and xs2[j] < xe[i]:
ys2[j] = (ye[i - 1]) + ((ye[i]) - (ye[i - 1])) * (
xs2[j] - xe[i - 1]) / (xe[i] - xe[i - 1])
j += 1
sc = (xs2, ys2)
self._int_claw[index] = sc
# Computes the integrals of the claws (IG) and the integrals of x
# times the claws from 0 to the abscissa we have just computed
self._IG = []
self._IG2 = []
for i in range(self._n_index):
xc = self._int_claw[i][0]
yc = self._int_claw[i][1]
iyc_IG = np.append(
np.array(0.), np.cumsum(np.diff(xc) * (yc[:-1] + yc[1:]) / 2.))
self._IG += [(xc, iyc_IG)]
iyc_IG2 = np.append(
np.array(0.),
np.cumsum((yc[:-1] + yc[1:]) / 2. * np.diff(xc) * xc[:-1] +
np.diff(xc) * np.diff(xc) / 3. * np.diff(yc) +
np.diff(xc) * np.diff(xc) / 2. * yc[:-1]))
self._IG2 += [(xc, iyc_IG2)]
@staticmethod
def _lin0(sig, t):
"""Find closest value of a signal, zero value border
"""
x, y = sig
if t >= x[-1]:
return 0
index = np.searchsorted(x, t)
if index == len(y) - 1:
return y[index]
elif np.abs(x[index] - t) < np.abs(x[index + 1] - t):
return y[index]
else:
return y[index + 1]
@staticmethod
def _linc(sig, t):
"""Find closest value of a signal, continuous border
"""
x, y = sig
if t >= x[-1]:
return y[-1]
index = np.searchsorted(x, t)
if np.abs(x[index] - t) < np.abs(x[index + 1] - t):
return y[index]
else:
return y[index + 1]
def _G(self, i, j, l, t):
"""Returns the value of a claw at a point
Used to fill V and M with 'gauss' method
"""
if t < 0:
warnings.warn("G(): should not be called for t < 0")
index = self._ijl2index[i][j][l]
return HawkesConditionalLaw._lin0(self._int_claw[index], t)
def _DIG(self, i, j, l, t1, t2):
"""Returns the integral of a claw between t1 and t2
"""
if t1 >= t2:
warnings.warn("t2>t1 wrong in DIG")
index = self._ijl2index[i][j][l]
return HawkesConditionalLaw._linc(self._IG[index], t2) - \
HawkesConditionalLaw._linc(self._IG[index], t1)
def _DIG2(self, i, j, l, t1, t2):
"""Returns the integral of x times a claw between t1 and t2
"""
if t1 >= t2:
warnings.warn("t2>t1 wrong in DIG2")
index = self._ijl2index[i][j][l]
return HawkesConditionalLaw._linc(self._IG2[index], t2) - \
HawkesConditionalLaw._linc(self._IG2[index], t1)
def compute(self):
"""Computes kernel estimation by solving a Fredholm system.
"""
# We raise an exception if a claw component had no input to be computed
if any(self._n_events[0, :] == 0):
k = np.where(self._n_events[0, :] == 0)[0]
msg = "Cannot run estimation : not enough events for components {}" \
.format(k)
raise ValueError(msg)
# Here we compute the quadrature points and the corresponding weights
# self.quad_x and self.quad_w
if self.quad_method in {'gauss', 'gauss-'}:
self._quad_x, self._quad_w = leggauss(self.n_quad)
self._quad_x = self.max_support * (self._quad_x + 1) / 2
self._quad_w *= self.max_support / 2
elif self.quad_method == 'log':
logstep = (np.log(self.max_support) - np.log(
self.min_support) + 1.) / \
self.n_quad
x1 = np.arange(0., self.min_support, self.min_support * logstep)
x2 = np.exp(
np.arange(
| np.log(self.min_support) | numpy.log |
import numpy as np
from functools import lru_cache
class Weights(object):
def __init__(self, mean, std, sample_size, zeros=0, is_count=True):
super(Weights, self).__init__()
self.mean = mean
self.std = std
self.sample_size = sample_size
# Count metrics require positive integers
self.is_count = is_count
# The probability of a metric requires that some of the values be 0
# All remaining values (for count metrics) must be positive integers
self.zeros = zeros
# We can focus on reaching specific totals for mean and standard deviation based on x
# Removing sample_size from the equation lets us focus on reducing those totals to 0
m_total = mean * sample_size # sum(x) = mean * sample_size
s_total = std**2. * (sample_size-1) # sum((x-m)^2) = std**2 * (sample_size-1)
s_total -= zeros*(mean**2) # As zeros are pre-set, we can remove them now (mean total is unaffected by zeros)
self.totals_pair = | np.array((m_total, s_total)) | numpy.array |
"""Tests for band unfolding calculations."""
import os
import numpy as np
from phonopy import Phonopy
from phonopy.unfolding.core import Unfolding
data_dir = os.path.dirname(os.path.abspath(__file__))
def test_Unfolding_NaCl(ph_nacl: Phonopy):
"""Test to reproduce proper band structure of primitive cell.
Results are written to "bin-unfolding-test.dat".
This data can be plotted by
% plot_band.py bin-unfolding-test.dat
Increase nd to get better plot.
The test is done with nd=10.
"""
# ph = _get_phonon(ph_nacl)
ph = ph_nacl
nd = 10
qpoints = (
np.array(
[
[
x,
]
* 3
for x in range(nd)
]
)
/ float(nd)
- 0.5
)
unfolding_supercell_matrix = [[-2, 2, 2], [2, -2, 2], [2, 2, -2]]
mapping = np.arange(len(ph.supercell), dtype=int)
unfolding = Unfolding(
ph, unfolding_supercell_matrix, ph.supercell.scaled_positions, mapping, qpoints
)
unfolding.run()
weights = _get_weights(unfolding, qpoints)
# _write_weights(weights, "unfolding.dat")
# filename_out = os.path.join(data_dir, "bin-unfolding-test.dat")
_compare(weights, os.path.join(data_dir, "bin-unfolding.dat"), filename_out=None)
def test_Unfolding_SC(ph_nacl: Phonopy):
"""Test to reproduce unfoled band structure.
Atomic positions are considered as the lattice ponts.
Results are written to "bin-unfolding_to_atoms-test.dat".
This data can be plotted by
% plot_band.py bin-unfolding_to_atoms-test.dat
Increase nd to get better plot.
The test is done with nd=10.
"""
# ph = _get_phonon(ph_nacl)
ph = ph_nacl
nd = 10
qpoints = (
np.array(
[
[
x,
]
* 3
for x in range(nd)
]
)
/ float(nd)
- 0.5
)
unfolding_supercell_matrix = np.diag([4, 4, 4])
mapping = np.arange(len(ph.supercell), dtype=int)
unfolding = Unfolding(
ph, unfolding_supercell_matrix, ph.supercell.scaled_positions, mapping, qpoints
)
unfolding.run()
weights = _get_weights(unfolding, qpoints)
# _write_weights(weights, "unfolding_to_atoms.dat")
# filename_out = os.path.join(data_dir, "bin-unfolding_to_atoms-test.dat")
_compare(
weights, os.path.join(data_dir, "bin-unfolding_to_atoms.dat"), filename_out=None
)
def _compare(weights, filename, filename_out=None):
bin_data = _binning(weights)
if filename_out:
_write_bin_data(bin_data, filename_out)
with open(filename) as f:
bin_data_in_file = np.loadtxt(f)
np.testing.assert_allclose(bin_data, bin_data_in_file, atol=1e-2)
def _get_weights(unfolding, qpoints):
weights = unfolding.unfolding_weights
freqs = unfolding.frequencies
out_vals = []
for i, q in enumerate(qpoints):
for f, w in zip(freqs[i], weights[i]):
out_vals.append([q[0], q[1], q[2], f, w])
return out_vals
def _write_weights(weights, filename):
with open(filename, "w") as w:
lines = ["%10.7f %10.7f %10.7f %12.7f %10.7f" % tuple(x) for x in weights]
w.write("\n".join(lines))
def _write_bin_data(bin_data, filename):
with open(filename, "w") as w:
lines = ["%8.5f %8.5f %8.5f" % tuple(v) for v in bin_data]
w.write("\n".join(lines))
def _binning(data):
x = []
y = []
w = []
for vals in data:
if vals[4] > 1e-3:
x.append(vals[0])
y.append(vals[3])
w.append(vals[4])
x = np.around(x, decimals=5)
y = | np.around(y, decimals=5) | numpy.around |
from __future__ import print_function
import urllib
import bz2
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pickle
import matplotlib.image as mpimg
import cv2
import plotly.graph_objs as go
import os
from utils.utils_IO import ordered_arr_3d_to_dict, refill_nan_array, arr_2d_to_list_of_dicts, read_image, make_image_array, revert_ordered_arr_2d_to_dict, save_object, write_video
from utils.utils_plotting import plot_image_labels, plot_3d_points, vector_plot, draw_circles, slope, drawLine, skew, plot_cams_and_points
from utils.utils_BA import fun, bundle_adjustment_sparsity, project
from anipose_BA import CameraGroup, Camera
from scipy.spatial.transform import Rotation as R
import alf.io
from one.api import ONE
from pathlib import Path
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as animation
import matplotlib.patches as mpatches
'''
adapted from <NAME>, Paninski Lab, 51N84D/3D-Animal-Pose;
this includes bundle adjustement taken from
https://github.com/lambdaloop/aniposelib/blob/master/aniposelib/cameras.py
'''
'''
P_{X,Y}_{TOP,BOT}: (int) - {width / 2, height / 2} for camera {1,2}
-->For setting the offset terms in the camera matrix to the center of the image plane
pts_array_2d: (np.array) - Array of shape (num_cameras, num_points, 2) containing set of 2d points for each camera. This should be after cleaning NaNs i.e. removing rows with NaNs
info_dict: Dictionary with keys {'num_frames', 'num_analyzed_body_parts', 'num_cameras', 'num_points_all', 'clean_point_indices'}
--> 'num_frames' is the number of frames in the video
--> 'num_analyzed_body_parts' is the number of body parts / joints being modeled (i.e. one per keypoint)
--> 'num_cameras' is the number of cameras. In our case, it is 2
--> 'num_points_all' is the original number of points (including NaNs)
--> 'clean_point_indices' is a list of indices (with length = num_points in pts_array_2d) pointing to the clean (non-NaN) entries in the original data
path_images: (list) - List of sublists. Each sublist (one per camera / view) contains absolute paths to image frames.
'''
'''
Get IBL 2D points for a given trial
'''
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def GetXYs(eid, video_type, trial_range):
'''
eid: session id, e.g. '3663d82b-f197-4e8b-b299-7b803a155b84'
video_type: one of 'left', 'right', 'body'
trial_range: first and last trial number of range to be shown, e.g. [5,7]
'''
one = ONE()
dataset_types = ['camera.times',
'trials.intervals',
'camera.dlc']
a = one.list(eid, 'dataset-types')
assert all([i in a for i in dataset_types]
), 'For this eid, not all data available'
datasets = one.type2datasets(eid, dataset_types)
D = one.load_datasets(eid, datasets=datasets)
alf_path = Path(D.local_path[0]).parent.parent / 'alf'
video_data = alf_path.parent / 'raw_video_data'
video_path = list(video_data.rglob('_iblrig_%sCamera.raw.*' % video_type))[0]
print(video_path)
# that gives cam time stamps and DLC output (change to alf_path eventually)
cam1 = alf.io.load_object(video_path.parent, '_ibl_%sCamera' % video_type)
try:
cam0 = alf.io.load_object(alf_path, '_ibl_%sCamera' % video_type)
except:
cam0 = {}
cam = {**cam0,**cam1}
# just to read in times for newer data (which has DLC results in pqt format
#cam = alf.io.load_object(alf_path, '_ibl_%sCamera' % video_type)
# pick trial range for which to display stuff
trials = alf.io.load_object(alf_path, '_ibl_trials')
num_trials = len(trials['intervals'])
if trial_range[-1] > num_trials - 1:
print('There are only %s trials' % num_trials)
frame_start = find_nearest(cam['times'],
[trials['intervals'][trial_range[0]][0]])
frame_stop = find_nearest(cam['times'],
[trials['intervals'][trial_range[-1]][1]])
'''
DLC related stuff
'''
Times = cam['times'][frame_start:frame_stop]
del cam['times']
# dlc_name = '_ibl_%sCamera.dlc.pqt' % video_type
# dlc_path = alf_path / dlc_name
# cam=pd.read_parquet(dlc_path)
points = np.unique(['_'.join(x.split('_')[:-1]) for x in cam.keys()])
if video_type != 'body':
d = list(points)
d.remove('tube_top')
d.remove('tube_bottom')
points = np.array(d)
# Set values to nan if likelyhood is too low # for pqt: .to_numpy()
XYs = {}
for point in points:
x = np.ma.masked_where(
cam[point + '_likelihood'] < 0.9, cam[point + '_x'])
x = x.filled(np.nan)
y = np.ma.masked_where(
cam[point + '_likelihood'] < 0.9, cam[point + '_y'])
y = y.filled(np.nan)
XYs[point] = np.array(
[x[frame_start:frame_stop], y[frame_start:frame_stop]])
res_folder = '/home/mic/3D-Animal-Pose-master/IBL_example/%s_trials_%s_%s' %(eid, video_type, trial_range[0], trial_range[1])
Path(res_folder).mkdir(parents=True, exist_ok=True)
np.save('/home/mic/3D-Animal-Pose-master/IBL_example/%s_trials_%s_%s/XYs_%s.npy' %(eid, video_type, trial_range[0], trial_range[1]), XYs)
np.save('/home/mic/3D-Animal-Pose-master/IBL_example/%s_trials_%s_%s/times_%s.npy' %(eid, video_type, trial_range[0], trial_range[1]), Times)
#return XYs, Times
def get_3d_points_for_IBL_example():
#bring IBL data in format for bundle_adjust
# starting with one paw only, the one called left in video left
XYs_left = np.load('/home/mic/3D-Animal-Pose-master/IBL_example/XYs_left.npy', allow_pickle=True).flatten()[0]
XYs_right = np.load('/home/mic/3D-Animal-Pose-master/IBL_example/XYs_right.npy', allow_pickle=True).flatten()[0]
times_left = np.load('/home/mic/3D-Animal-Pose-master/IBL_example/times_left.npy')
times_right = np.load('/home/mic/3D-Animal-Pose-master/IBL_example/times_right.npy')
# get closest stamps or right cam (150 Hz) for each stamp of left (60 Hz)
idx_aligned = []
for t in times_left:
idx_aligned.append(find_nearest(times_right, t))
# paw_l in video left = paw_r in video right
# Divide left coordinates by 2 to get them in half resolution like right cam;
# reduce temporal resolution of right cam to that of left cam
num_analyzed_body_parts = 3 # both paws and nose
cam_right_paw1 = np.array([XYs_right['paw_r'][0][idx_aligned], XYs_right['paw_r'][1][idx_aligned]])
cam_left_paw1 = np.array([XYs_left['paw_l'][0]/2,XYs_left['paw_l'][1]/2])
cam_right_paw2 = np.array([XYs_right['paw_l'][0][idx_aligned], XYs_right['paw_l'][1][idx_aligned]])
cam_left_paw2 = | np.array([XYs_left['paw_r'][0]/2,XYs_left['paw_r'][1]/2]) | numpy.array |
import numpy as np
import CCDutils
#This module contains the functions necessary for doing CCSD in the spin-orbital basis
##Spin-orbital-based utilities
def CCSDdoubles(F,Eri,T2,T1,nocc,nbas,variant):
#Get the right hand side of the spinorbital CCSD singles equations. p. 307-308 of Bartlett and Shavitt
niter = 1
#p.307
#Get CCD contribution
G = CCDutils.GHFCCD(F,Eri,T2,nocc,nbas,niter,variant)
# return G
G += np.einsum('cjab,ic->ijab',Eri[nocc:,:nocc,nocc:,nocc:],T1)
G -= np.einsum('ciab,jc->ijab',Eri[nocc:,:nocc,nocc:,nocc:],T1)
G -= np.einsum('ijkb,ka->ijab',Eri[:nocc,:nocc,:nocc,nocc:],T1)
G += np.einsum('ijka,kb->ijab',Eri[:nocc,:nocc,:nocc,nocc:],T1)
G -= np.einsum('ck,ic,kjab->ijab',F[nocc:,:nocc],T1,T2)
G += np.einsum('ck,jc,kiab->ijab',F[nocc:,:nocc],T1,T2)
G -= np.einsum('ck,ka,ijcb->ijab',F[nocc:,:nocc],T1,T2)
G += np.einsum('ck,kb,ijca->ijab',F[nocc:,:nocc],T1,T2)
G += np.einsum('cdak,ic,kjdb->ijab',Eri[nocc:,nocc:,nocc:,:nocc],T1,T2)
G -= np.einsum('cdbk,ic,kjda->ijab',Eri[nocc:,nocc:,nocc:,:nocc],T1,T2)
G -= np.einsum('cdak,jc,kidb->ijab',Eri[nocc:,nocc:,nocc:,:nocc],T1,T2)
G += np.einsum('cdbk,jc,kida->ijab',Eri[nocc:,nocc:,nocc:,:nocc],T1,T2)
G -= np.einsum('ickl,ka,ljcb->ijab',Eri[:nocc,nocc:,:nocc,:nocc],T1,T2)
G += np.einsum('jckl,ka,licb->ijab',Eri[:nocc,nocc:,:nocc,:nocc],T1,T2)
G += np.einsum('ickl,kb,ljca->ijab',Eri[:nocc,nocc:,:nocc,:nocc],T1,T2)
G -= np.einsum('jckl,kb,lica->ijab',Eri[:nocc,nocc:,:nocc,:nocc],T1,T2)
G -= 0.5e0*np.einsum('cdkb,ka,ijcd->ijab',Eri[nocc:,nocc:,:nocc,nocc:],T1,T2)
G += 0.5e0*np.einsum('cdka,kb,ijcd->ijab',Eri[nocc:,nocc:,:nocc,nocc:],T1,T2)
#p. 308
G += 0.5e0*np.einsum('cjkl,ic,klab->ijab',Eri[nocc:,:nocc,:nocc,:nocc],T1,T2)
G -= 0.5e0*np.einsum('cikl,jc,klab->ijab',Eri[nocc:,:nocc,:nocc,:nocc],T1,T2)
G += np.einsum('cdka,kc,ijdb->ijab',Eri[nocc:,nocc:,:nocc,nocc:],T1,T2)
G -= np.einsum('cdkb,kc,ijda->ijab',Eri[nocc:,nocc:,:nocc,nocc:],T1,T2)
G -= np.einsum('cikl,kc,ljab->ijab',Eri[nocc:,:nocc,:nocc,:nocc],T1,T2)
G += np.einsum('cjkl,kc,liab->ijab',Eri[nocc:,:nocc,:nocc,:nocc],T1,T2)
G += np.einsum('cdab,ic,jd->ijab',Eri[nocc:,nocc:,nocc:,nocc:],T1,T1)
G += np.einsum('ijkl,ka,lb->ijab',Eri[:nocc,:nocc,:nocc,:nocc],T1,T1)
G -= np.einsum('cjkb,ic,ka->ijab',Eri[nocc:,:nocc,:nocc,nocc:],T1,T1)
G += np.einsum('cikb,jc,ka->ijab',Eri[nocc:,:nocc,:nocc,nocc:],T1,T1)
G += np.einsum('cjka,ic,kb->ijab',Eri[nocc:,:nocc,:nocc,nocc:],T1,T1)
G -= np.einsum('cika,jc,kb->ijab',Eri[nocc:,:nocc,:nocc,nocc:],T1,T1)
G += 0.5e0*np.einsum('cdkl,ic,jd,klab->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T2)
G += 0.5e0*np.einsum('cdkl,ka,lb,ijcd->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T2)
G -= np.einsum('cdkl,ic,ka,ljdb->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T2)
G += np.einsum('cdkl,jc,ka,lidb->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T2)
G += np.einsum('cdkl,ic,kb,ljda->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T2)
G -= np.einsum('cdkl,jc,kb,lida->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T2)
G -= np.einsum('cdkl,kc,id,ljab->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T2)
G += np.einsum('cdkl,kc,jd,liab->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T2)
G -= np.einsum('cdkl,kc,la,ijdb->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T2)
G += np.einsum('cdkl,kc,lb,ijda->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T2)
# -------------------------------
# G += np.einsum('cdkb,ic,ka,jd->ijab',Eri[nocc:,nocc:,:nocc,nocc:],T1,T1,T1)
# G -= np.einsum('cdka,ic,kb,jd->ijab',Eri[nocc:,nocc:,:nocc,nocc:],T1,T1,T1) wrong sign in diagram D8a, p. 306
# of Shavitt and Bartlett. Compare to
G -= np.einsum('cdkb,ic,ka,jd->ijab',Eri[nocc:,nocc:,:nocc,nocc:],T1,T1,T1)
G += np.einsum('cdka,ic,kb,jd->ijab',Eri[nocc:,nocc:,:nocc,nocc:],T1,T1,T1)
#from Crawford and Schaefer, An Introduction to Coupled Cluster Theory, Wiley ...
#-----------------------------------------------------------------------------
G += np.einsum('cjkl,ic,ka,lb->ijab',Eri[nocc:,:nocc,:nocc,:nocc],T1,T1,T1)
G -=np.einsum('cikl,jc,ka,lb->ijab',Eri[nocc:,:nocc,:nocc,:nocc],T1,T1,T1)
G += np.einsum('cdkl,ic,jd,ka,lb->ijab',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T1,T1)
return G
def CCSDsingles(F,Eri,T2,T1,nocc,nbas):
#Get the right hand side of the spinorbital CCSD singles equations. p. 304 of Bartlett and Shavitt
#Driver
G = np.copy(F[:nocc,nocc:])
#Terms involving only doubles
G += np.einsum('kc,ikac->ia',F[:nocc,nocc:],T2)
G += 0.5e0*np.einsum('cdak,ikcd->ia',Eri[nocc:,nocc:,nocc:,:nocc],T2)
G -= 0.5e0*np.einsum('ickl,klac->ia',Eri[:nocc,nocc:,:nocc,:nocc],T2)
#Linear term involving only singles
G += np.einsum('icak,kc->ia',Eri[:nocc,nocc:,nocc:,:nocc],T1)
#Mixed Terms
G -= 0.5e0*np.einsum('cdkl,ic,klad->ia',Eri[nocc:,nocc:,:nocc,:nocc],T1,T2)
G -= 0.5e0*np.einsum('cdkl,ka,ilcd->ia',Eri[nocc:,nocc:,:nocc,:nocc],T1,T2)
G += np.einsum('cdkl,kc,lida->ia',Eri[nocc:,nocc:,:nocc,:nocc],T1,T2)
#higher-order terms involving only singles
G -= np.einsum('ck,ic,ka->ia',F[nocc:,:nocc],T1,T1)
G += np.einsum('cdak,ic,kd->ia',Eri[nocc:,nocc:,nocc:,:nocc],T1,T1)
G -= np.einsum('ickl,ka,lc->ia',Eri[:nocc,nocc:,:nocc,:nocc],T1,T1)
G -= np.einsum('cdkl,ic,ka,ld->ia',Eri[nocc:,nocc:,:nocc,:nocc],T1,T1,T1)
#Don't forget other non-canonical terms
tol = 1.0e-07
F_offdiag = F - np.diag(np.diag(F))
if np.amax(abs(F_offdiag) > tol):
G += np.einsum('ca,ic->ia',F_offdiag[nocc:,nocc:],T1)
G -= np.einsum('ik,ka->ia',F_offdiag[:nocc,:nocc],T1)
return G
def CCSDsingles_fact(F,Eri,T2,T1,nocc,nbas):
#build intermediates according to Stanton et al. JCP 94(6) 1991
F_diag = np.diag(np.diag(F))
Tau_tilde = T2 + 0.50e0*(np.einsum('ia,jb->ijab',T1,T1)-np.einsum('ib,ja->ijab',T1,T1))
Fae = F[nocc:,nocc:] - F_diag[nocc:,nocc:]
Fae -= 0.5e0*(np.einsum('em,ma->ea',F[nocc:,:nocc],T1))
Fae += np.einsum('mf,fema->ea',T1,Eri[nocc:,nocc:,:nocc,nocc:])
Fae -= 0.5e0*np.einsum('mnaf,efmn->ea',Tau_tilde,Eri[nocc:,nocc:,:nocc,:nocc])
Fmi = F[:nocc,:nocc] - F_diag[:nocc,:nocc]
Fmi += 0.5e0*(np.einsum('em,ie->im',F[nocc:,:nocc],T1))
Fmi += np.einsum('ne,iemn->im',T1,Eri[:nocc,nocc:,:nocc,:nocc])
Fmi += 0.5e0*np.einsum('inef,efmn->im',Tau_tilde,Eri[nocc:,nocc:,:nocc,:nocc])
Fme = F[nocc:,:nocc] + np.einsum('nf,efmn->em',T1,Eri[nocc:,nocc:,:nocc,:nocc])
#contract T with intermediates to get RHS of singles equation. (eq 1 in Stanton reference)
G = F[:nocc,nocc:] + np.einsum('ie,ea->ia',T1,Fae)
G -= np.einsum('ma,im->ia',T1,Fmi)
G += np.einsum('imae,em->ia',T2,Fme)
G -= np.einsum('nf,ifna->ia',T1,Eri[:nocc,nocc:,:nocc,nocc:])
G -= 0.5e0*np.einsum('imef,efma->ia',T2,Eri[nocc:,nocc:,:nocc,nocc:])
G -= 0.5e0*np.einsum('mnae,einm->ia',T2,Eri[nocc:,:nocc,:nocc,:nocc])
return G
def CCSDdoubles_fact(F,Eri,T2,T1,nocc,nbas):
#build intermediates according to Stanton et al. JCP 94(6) 1991
F_diag = np.diag(np.diag(F))
Tau_tilde = T2 + 0.50e0*(np.einsum('ia,jb->ijab',T1,T1)-np.einsum('ib,ja->ijab',T1,T1))
Tau = T2 + np.einsum('ia,jb->ijab',T1,T1) - np.einsum('ib,ja->ijab',T1,T1)
#2-index intermediates
Fae = F[nocc:,nocc:] - F_diag[nocc:,nocc:]
Fae -= 0.5e0*(np.einsum('em,ma->ea',F[nocc:,:nocc],T1))
Fae += np.einsum('mf,fema->ea',T1,Eri[nocc:,nocc:,:nocc,nocc:])
Fae -= 0.5e0*np.einsum('mnaf,efmn->ea',Tau_tilde,Eri[nocc:,nocc:,:nocc,:nocc])
Fmi = F[:nocc,:nocc] - F_diag[:nocc,:nocc]
Fmi += 0.5e0*(np.einsum('em,ie->im',F[nocc:,:nocc],T1))
Fmi += np.einsum('ne,iemn->im',T1,Eri[:nocc,nocc:,:nocc,:nocc])
Fmi += 0.5e0*np.einsum('inef,efmn->im',Tau_tilde,Eri[nocc:,nocc:,:nocc,:nocc])
Fme = F[nocc:,:nocc] + np.einsum('nf,efmn->em',T1,Eri[nocc:,nocc:,:nocc,:nocc])
#4-index intermediates
Wijmn = Eri[:nocc,:nocc,:nocc,:nocc] + np.einsum('je,iemn->ijmn',T1,Eri[:nocc,nocc:,:nocc,:nocc])
Wijmn -= np.einsum('ie,jemn->ijmn',T1,Eri[:nocc,nocc:,:nocc,:nocc])
Wijmn += 0.25e0* | np.einsum('ijef,efmn->ijmn',Tau,Eri[nocc:,nocc:,:nocc,:nocc]) | numpy.einsum |
import json
import hashlib
import numpy as np
from os import path
import seaborn as sns
from tqdm import tqdm
from scipy.stats import zscore
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from sklearn.decomposition import PCA
from multiprocessing import Process, Manager, Pool
from Code import sampling
from Code.file_io import load_spontaneous,load_orientations
__author__ = '<NAME>'
"""
Calculates bootstrapped variance explained based on sampling of neurons
"""
def demo_variance_explained_curve(use_multiprocessing=False):
"""
Load example data and creates plot of dimensionality based on sample size
"""
# Load data and calculate (assumes .npy file is in the same directory)
# neurons = np.load('stringer_spontaneous.npy', allow_pickle=True).item()['sresp']
neurons = load_spontaneous()['sresp']
cell_sample_nums = np.arange(10,20)
cum_var_cutoff = 0.8
dmeans, dlower, dupper = get_variance_explained_curve(neurons, cell_sample_nums, cum_var_cutoff, use_multiprocessing=use_multiprocessing)
# Plot dimensionality means and confidence intervals
ax = plt.subplots(1,1,figsize=(10,10))[1]
ax.plot(cell_sample_nums, dmeans)
ax.fill_between(cell_sample_nums, (dlower), (dupper), color='b', alpha=.1, label='95%-Confidence Interval')
plt.plot(cell_sample_nums, dmeans, color='b', label=f'Mean Dimensionality')
plt.xlabel('Number of Cells Sampled')
plt.ylabel(f'Dimensionality (Cummulative Var > {int(100*cum_var_cutoff)}%)')
plt.title('Dimensionality of Spontaneous V1 Activity')
plt.legend()
plt.show()
plt.close()
return
def get_variance_explained_curve(neurons, cell_sample_nums, cum_var_cutoff=0.8, pca_repetitions=10,
z_transform_data=True, sampling_method='sample_uniform',
use_multiprocessing=False, return_dict=False, depth_range=None,
neuron_locs=None, **kwargs):
""" Return a curve of variance explained. Extra arguments are passed to the sampling function.
Warnings: 1) Returned data will be sorted from lowest to highest cell_sample_nums.
:param neurons: 2D array. Raw data. MUST be in the shape Timepoints x Neurons.
:param cell_sample_nums: 1D Int array. Contains sample numbers to use.
:param cum_var_cutoff: Float. Between 0 and 1. Cutoff for cumulative variance explained.
:param pca_repetitions: Int. Number of PCA repeats for each sample_num
:param z_transform_data: Bool. Set to True to z-score your array before processing
:param sampling_method: Str. Unused at this time.
:param use_multiprocessing: Bool. Set to False if multiprocessing functions throw errors.
Returns three lists: dimensionality means, lower confidence intervals, and upper confidence intervals
"""
sampling_func_lookup = {'sample_uniform': sampling.sample_uniform,
'sample_around_point': sampling.sample_around_point,
'sample_depths_uniform': sampling.sample_depth_range,
'sample_depths_point': sampling.sample_around_point}
sample_func = sampling_func_lookup[sampling_method]
if np.any(np.array(cell_sample_nums) > neurons.shape[1]):
raise Exception('Warning: More samples than neurons available requested!')
# This is shuffled to better estimate runtime in TQDM
shuff_cell_sample_nums = np.copy(cell_sample_nums)
np.random.shuffle(shuff_cell_sample_nums)
# Create empty arrays to store values
dimensionality_means = np.zeros_like(shuff_cell_sample_nums, dtype='float')
dimensionality_lower_ci = np.zeros_like(shuff_cell_sample_nums) # 5th percentile of bootstrapped dimensionality
dimensionality_upper_ci = np.zeros_like(shuff_cell_sample_nums) # 95th percentile of bootstrapped dimensionality
# Transform data to z-score to center it as the units are not the same for all neurons
Z = neurons
if z_transform_data:
Z = zscore(Z, axis=0)
Z = np.nan_to_num(Z)
# Filter dataset to only include depth range if sample_depths_point used
if sampling_method == 'sample_depths_point':
upper,lower = (np.max(depth_range), np.min(depth_range))
mask = np.where(np.logical_and(neuron_locs[2,:] <= upper, neuron_locs[2,:] >= lower))[0]
Z = Z[:, mask]
neuron_locs = np.array(neuron_locs)[:,mask]
# Determine curve for dimensionality guess
dim_sample_nums = [1000, 2000, 3000]
dim_sample_results = []
for dim_sample_num in dim_sample_nums:
sample_neurons = sampling_func_lookup['sample_uniform'](neurons=Z, n=dim_sample_num, depth_range=depth_range, **kwargs)
guess_dimensionality = int(np.min(sample_neurons.shape)*0.75)
dim_sample_results.append(get_pca_dimensionality(sample_neurons, cum_var_cutoff, guess_dimensionality))
dim_curve_params, _ = curve_fit(_dim_curve, dim_sample_nums, dim_sample_results, p0=(1, 1, 4000), maxfev=10000)
full_data_dict = {}
full_data_dict['neuron_nums'] = {}
for i,cell_sample_num in tqdm(enumerate(shuff_cell_sample_nums), total=len(shuff_cell_sample_nums)):
# Create list of smaller arrays to pass to multiprocessing function
array_subsets = []
for rep in range(pca_repetitions):
temp_array = sample_func(Z, n=cell_sample_num, neuron_locs=neuron_locs, depth_range=depth_range, **kwargs)
array_subsets.append(temp_array)
# Calculate dimensionality for all random samples
dimensionality_guess = int(np.min((_dim_curve(cell_sample_num, *dim_curve_params)+300, *array_subsets[0].shape)))
dimensionality_bootstrap = []
if use_multiprocessing:
cutoff_array = np.ones(pca_repetitions)*cum_var_cutoff
dimensionality_guess_array = ( | np.ones(pca_repetitions) | numpy.ones |
import autoarray as aa
import os
import shutil
import numpy as np
import pytest
test_data_path = "{}/../test_files/array/".format(
os.path.dirname(os.path.realpath(__file__))
)
@pytest.fixture(name="memoizer")
def make_memoizer():
return aa.util.array.Memoizer()
class TestMemoizer:
def test_storing(self, memoizer):
@memoizer
def func(arg):
return "result for {}".format(arg)
func(1)
func(2)
func(1)
assert memoizer.results == {
"('arg', 1)": "result for 1",
"('arg', 2)": "result for 2",
}
assert memoizer.calls == 2
def test_multiple_arguments(self, memoizer):
@memoizer
def func(arg1, arg2):
return arg1 * arg2
func(1, 2)
func(2, 1)
func(1, 2)
assert memoizer.results == {
"('arg1', 1), ('arg2', 2)": 2,
"('arg1', 2), ('arg2', 1)": 2,
}
assert memoizer.calls == 2
def test_key_word_arguments(self, memoizer):
@memoizer
def func(arg1=0, arg2=0):
return arg1 * arg2
func(arg1=1)
func(arg2=1)
func(arg1=1)
func(arg1=1, arg2=1)
assert memoizer.results == {
"('arg1', 1)": 0,
"('arg2', 1)": 0,
"('arg1', 1), ('arg2', 1)": 1,
}
assert memoizer.calls == 3
def test_key_word_for_positional(self, memoizer):
@memoizer
def func(arg):
return "result for {}".format(arg)
func(1)
func(arg=2)
func(arg=1)
assert memoizer.calls == 2
def test_methods(self, memoizer):
class Class:
def __init__(self, value):
self.value = value
@memoizer
def method(self):
return self.value
one = Class(1)
two = Class(2)
assert one.method() == 1
assert two.method() == 2
class TestResize:
def test__trim__from_7x7_to_3x3(self):
array = np.ones((7, 7))
array[3, 3] = 2.0
modified = aa.util.array.resized_array_2d_from_array_2d(
array_2d=array, resized_shape=(3, 3)
)
assert (
modified == np.array([[1.0, 1.0, 1.0], [1.0, 2.0, 1.0], [1.0, 1.0, 1.0]])
).all()
def test__trim__from_7x7_to_4x4(self):
array = np.ones((7, 7))
array[3, 3] = 2.0
modified = aa.util.array.resized_array_2d_from_array_2d(
array_2d=array, resized_shape=(4, 4)
)
assert (
modified
== np.array(
[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 2.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
]
)
).all()
def test__trim__from_6x6_to_4x4(self):
array = np.ones((6, 6))
array[2:4, 2:4] = 2.0
modified = aa.util.array.resized_array_2d_from_array_2d(
array_2d=array, resized_shape=(4, 4)
)
assert (
modified
== np.array(
[
[1.0, 1.0, 1.0, 1.0],
[1.0, 2.0, 2.0, 1.0],
[1.0, 2.0, 2.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
]
)
).all()
def test__trim__from_6x6_to_3x3(self):
array = np.ones((6, 6))
array[2:4, 2:4] = 2.0
modified = aa.util.array.resized_array_2d_from_array_2d(
array_2d=array, resized_shape=(3, 3)
)
assert (
modified == np.array([[2.0, 2.0, 1.0], [2.0, 2.0, 1.0], [1.0, 1.0, 1.0]])
).all()
def test__trim__from_5x4_to_3x2(self):
array = np.ones((5, 4))
array[2, 1:3] = 2.0
modified = aa.util.array.resized_array_2d_from_array_2d(
array_2d=array, resized_shape=(3, 2)
)
assert (modified == np.array([[1.0, 1.0], [2.0, 2.0], [1.0, 1.0]])).all()
def test__trim__from_4x5_to_2x3(self):
array = np.ones((4, 5))
array[1:3, 2] = 2.0
modified = aa.util.array.resized_array_2d_from_array_2d(
array_2d=array, resized_shape=(2, 3)
)
assert (modified == np.array([[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]])).all()
def test__trim_with_new_origin_as_input(self):
array = np.ones((7, 7))
array[4, 4] = 2.0
modified = aa.util.array.resized_array_2d_from_array_2d(
array_2d=array, resized_shape=(3, 3), origin=(4, 4)
)
assert (
modified == np.array([[1.0, 1.0, 1.0], [1.0, 2.0, 1.0], [1.0, 1.0, 1.0]])
).all()
array = np.ones((6, 6))
array[3, 4] = 2.0
modified = aa.util.array.resized_array_2d_from_array_2d(
array_2d=array, resized_shape=(3, 3), origin=(3, 4)
)
assert (
modified == np.array([[1.0, 1.0, 1.0], [1.0, 2.0, 1.0], [1.0, 1.0, 1.0]])
).all()
array = np.ones((9, 8))
array[4, 3] = 2.0
modified = aa.util.array.resized_array_2d_from_array_2d(
array_2d=array, resized_shape=(3, 3), origin=(4, 3)
)
assert (
modified == np.array([[1.0, 1.0, 1.0], [1.0, 2.0, 1.0], [1.0, 1.0, 1.0]])
).all()
array = np.ones((8, 9))
array[3, 5] = 2.0
modified = aa.util.array.resized_array_2d_from_array_2d(
array_2d=array, resized_shape=(3, 3), origin=(3, 5)
)
assert (
modified == np.array([[1.0, 1.0, 1.0], [1.0, 2.0, 1.0], [1.0, 1.0, 1.0]])
).all()
def test__pad__from_3x3_to_5x5(self):
array = np.ones((3, 3))
array[1, 1] = 2.0
modified = aa.util.array.resized_array_2d_from_array_2d(
array_2d=array, resized_shape=(5, 5)
)
assert (
modified
== np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 2.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
)
).all()
def test__pad__from_3x3_to_4x4(self):
array = np.ones((3, 3))
array[1, 1] = 2.0
modified = aa.util.array.resized_array_2d_from_array_2d(
array_2d=array, resized_shape=(4, 4)
)
assert (
modified
== np.array(
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 1.0, 1.0],
[0.0, 1.0, 2.0, 1.0],
[0.0, 1.0, 1.0, 1.0],
]
)
).all()
def test__pad__from_4x4_to_6x6(self):
array = np.ones((4, 4))
array[1:3, 1:3] = 2.0
modified = aa.util.array.resized_array_2d_from_array_2d(
array_2d=array, resized_shape=(6, 6)
)
assert (
modified
== np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 2.0, 2.0, 1.0, 0.0],
[0.0, 1.0, 2.0, 2.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
]
)
).all()
def test__pad__from_4x4_to_5x5(self):
array = np.ones((4, 4))
array[1:3, 1:3] = 2.0
modified = aa.util.array.resized_array_2d_from_array_2d(
array_2d=array, resized_shape=(5, 5)
)
assert (
modified
== np.array(
[
[1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 2.0, 2.0, 1.0, 0.0],
[1.0, 2.0, 2.0, 1.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
)
).all()
def test__pad__from_3x2_to_5x4(self):
array = np.ones((3, 2))
array[1, 0:2] = 2.0
modified = aa.util.array.resized_array_2d_from_array_2d(
array_2d=array, resized_shape=(5, 4)
)
assert (
modified
== np.array(
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 1.0, 0.0],
[0.0, 2.0, 2.0, 0.0],
[0.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
)
).all()
def test__pad__from_2x3_to_4x5(self):
array = np.ones((2, 3))
array[0:2, 1] = 2.0
modified = aa.util.array.resized_array_2d_from_array_2d(
array_2d=array, resized_shape=(4, 5)
)
assert (
modified
== np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 2.0, 1.0, 0.0],
[0.0, 1.0, 2.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
)
).all()
def test__pad__with_input_new_origin(self):
array = np.ones((3, 3))
array[2, 2] = 2.0
modified = aa.util.array.resized_array_2d_from_array_2d(
array_2d=array, resized_shape=(5, 5), origin=(2, 2)
)
assert (
modified
== np.array(
[
[1.0, 1.0, 1.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.0, 0.0],
[1.0, 1.0, 2.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
)
).all()
array = np.ones((2, 3))
array[0, 0] = 2.0
modified = aa.util.array.resized_array_2d_from_array_2d(
array_2d=array, resized_shape=(4, 5), origin=(0, 1)
)
assert (
modified
== np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 2.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 0.0],
]
)
).all()
class TestFits:
def test__numpy_array_1d_from_fits(self):
arr = aa.util.array.numpy_array_1d_from_fits(
file_path=test_data_path + "3_ones.fits", hdu=0
)
assert (arr == np.ones((3))).all()
def test__numpy_array_1d_to_fits__output_and_load(self):
if os.path.exists(test_data_path + "test_autoarray.fits"):
os.remove(test_data_path + "test_autoarray.fits")
arr = np.array([10.0, 30.0, 40.0, 92.0, 19.0, 20.0])
aa.util.array.numpy_array_1d_to_fits(
arr, file_path=test_data_path + "test_autoarray.fits"
)
array_load = aa.util.array.numpy_array_1d_from_fits(
file_path=test_data_path + "test_autoarray.fits", hdu=0
)
assert (arr == array_load).all()
def test__numpy_array_2d_from_fits(self):
arr = aa.util.array.numpy_array_2d_from_fits(
file_path=test_data_path + "3x3_ones.fits", hdu=0
)
assert (arr == np.ones((3, 3))).all()
arr = aa.util.array.numpy_array_2d_from_fits(
file_path=test_data_path + "4x3_ones.fits", hdu=0
)
assert (arr == np.ones((4, 3))).all()
def test__numpy_array_2d_to_fits__output_and_load(self):
if os.path.exists(test_data_path + "test_autoarray.fits"):
os.remove(test_data_path + "test_autoarray.fits")
arr = np.array([[10.0, 30.0, 40.0], [92.0, 19.0, 20.0]])
aa.util.array.numpy_array_2d_to_fits(
arr, file_path=test_data_path + "test_autoarray.fits"
)
array_load = aa.util.array.numpy_array_2d_from_fits(
file_path=test_data_path + "test_autoarray.fits", hdu=0
)
assert (arr == array_load).all()
class TestReplaceNegativeNoise:
def test__2x2_array__no_negative_values__no_change(self):
image_2d = np.ones(shape=(2, 2))
noise_map_2d = np.array([[1.0, 2.0], [3.0, 4.0]])
noise_map_2d = aa.util.array.replace_noise_map_2d_values_where_image_2d_values_are_negative(
image_2d=image_2d, noise_map_2d=noise_map_2d, target_signal_to_noise=1.0
)
assert (noise_map_2d == noise_map_2d).all()
def test__2x2_array__negative_values__do_not_produce_absolute_signal_to_noise_values_above_target__no_change(
self
):
image_2d = -1.0 * np.ones(shape=(2, 2))
noise_map_2d = np.array([[1.0, 0.5], [0.25, 0.125]])
noise_map_2d = aa.util.array.replace_noise_map_2d_values_where_image_2d_values_are_negative(
image_2d=image_2d, noise_map_2d=noise_map_2d, target_signal_to_noise=10.0
)
assert (noise_map_2d == noise_map_2d).all()
def test__2x2_array__negative_values__values_give_absolute_signal_to_noise_below_target__replaces_their_noise(
self
):
image_2d = -1.0 * np.ones(shape=(2, 2))
noise_map_2d = np.array([[1.0, 0.5], [0.25, 0.125]])
noise_map_2d = aa.util.array.replace_noise_map_2d_values_where_image_2d_values_are_negative(
image_2d=image_2d, noise_map_2d=noise_map_2d, target_signal_to_noise=4.0
)
assert (noise_map_2d == np.array([[1.0, 0.5], [0.25, 0.25]])).all()
noise_map_2d = np.array([[1.0, 0.5], [0.25, 0.125]])
noise_map_2d = aa.util.array.replace_noise_map_2d_values_where_image_2d_values_are_negative(
image_2d=image_2d, noise_map_2d=noise_map_2d, target_signal_to_noise=2.0
)
assert (noise_map_2d == np.array([[1.0, 0.5], [0.5, 0.5]])).all()
noise_map_2d = np.array([[1.0, 0.5], [0.25, 0.125]])
noise_map_2d = aa.util.array.replace_noise_map_2d_values_where_image_2d_values_are_negative(
image_2d=image_2d, noise_map_2d=noise_map_2d, target_signal_to_noise=1.0
)
assert (noise_map_2d == np.array([[1.0, 1.0], [1.0, 1.0]])).all()
noise_map_2d = np.array([[1.0, 0.5], [0.25, 0.125]])
noise_map_2d = aa.util.array.replace_noise_map_2d_values_where_image_2d_values_are_negative(
image_2d=image_2d, noise_map_2d=noise_map_2d, target_signal_to_noise=0.5
)
assert (noise_map_2d == np.array([[2.0, 2.0], [2.0, 2.0]])).all()
def test__same_as_above__image_not_all_negative_ones(self):
image_2d = np.array([[1.0, -2.0], [5.0, -4.0]])
noise_map_2d = np.array([[3.0, 1.0], [4.0, 8.0]])
noise_map_2d = aa.util.array.replace_noise_map_2d_values_where_image_2d_values_are_negative(
image_2d=image_2d, noise_map_2d=noise_map_2d, target_signal_to_noise=1.0
)
assert (noise_map_2d == np.array([[3.0, 2.0], [4.0, 8.0]])).all()
image_2d = np.array([[-10.0, -20.0], [100.0, -30.0]])
noise_map_2d = np.array([[1.0, 2.0], [40.0, 3.0]])
noise_map_2d = aa.util.array.replace_noise_map_2d_values_where_image_2d_values_are_negative(
image_2d=image_2d, noise_map_2d=noise_map_2d, target_signal_to_noise=5.0
)
assert (noise_map_2d == np.array([[2.0, 4.0], [40.0, 6.0]])).all()
def test__rectangular_2x3_and_3x2_arrays(self):
image_2d = -1.0 * np.ones(shape=(2, 3))
noise_map_2d = np.array([[1.0, 0.5, 0.25], [0.25, 0.125, 2.0]])
noise_map_2d = aa.util.array.replace_noise_map_2d_values_where_image_2d_values_are_negative(
image_2d=image_2d, noise_map_2d=noise_map_2d, target_signal_to_noise=2.0
)
assert (noise_map_2d == np.array([[1.0, 0.5, 0.5], [0.5, 0.5, 2.0]])).all()
image_2d = -1.0 * np.ones(shape=(3, 2))
noise_map_2d = np.array([[1.0, 0.5], [0.25, 0.125], [0.25, 2.0]])
noise_map_2d = aa.util.array.replace_noise_map_2d_values_where_image_2d_values_are_negative(
image_2d=image_2d, noise_map_2d=noise_map_2d, target_signal_to_noise=2.0
)
assert (noise_map_2d == | np.array([[1.0, 0.5], [0.5, 0.5], [0.5, 2.0]]) | numpy.array |
"""
pyrad.proc.process_traj
=============================
Trajectory functions. Functions to pass trajectory dataset data to
the product generation functions.
.. autosummary::
:toctree: generated/
process_trajectory
process_traj_trt
process_traj_trt_contour
process_traj_lightning
process_traj_atplane
process_traj_antenna_pattern
_get_ts_values_antenna_pattern
_get_contour_trt
_get_gates
_get_gates_trt
_get_gates_antenna_pattern
_get_closest_bin
_sample_out_of_sector
TargetRadar
"""
from warnings import warn
import gc
from copy import deepcopy
import numpy as np
from netCDF4 import num2date, date2num
import pyart
from pyart.config import get_metadata
from pyart.core import Radar
from ..io.io_aux import get_datatype_fields, get_fieldname_pyart
from ..io.io_aux import get_field_unit, get_field_name
from ..io.timeseries import TimeSeries
from ..io.read_data_other import read_antenna_pattern
from ..util.stat_utils import quantiles_weighted
from ..util.radar_utils import belongs_roi_indices, find_nearest_gate
def process_trajectory(procstatus, dscfg, radar_list=None, trajectory=None):
"""
Return trajectory
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
trajectory : Trajectory object
containing trajectory samples
Returns
-------
new_dataset : Trajectory object
radar object
ind_rad : int
None
"""
if procstatus != 1:
return None, None
if not dscfg['initialized']:
if trajectory is None:
raise Exception("ERROR: Undefined trajectory for dataset '%s'"
% dscfg['dsname'])
if radar_list is not None:
for radar in radar_list:
rad = trajectory.add_radar(radar)
trajectory.calculate_velocities(rad)
else:
warn('ERROR: No valid radar found')
return None, None
dscfg['initialized'] = True
return trajectory, None
return None, None
def process_traj_trt(procstatus, dscfg, radar_list=None, trajectory=None):
"""
Processes data according to TRT trajectory
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
time_tol : float. Dataset keyword
tolerance between reference time of the radar volume and that of
the TRT cell [s]. Default 100.
alt_min, alt_max : float. Dataset keyword
Minimum and maximum altitude of the data inside the TRT cell to
retrieve [m MSL]. Default None
cell_center : Bool. Dataset keyword
If True only the range gate closest to the center of the cell is
extracted. Default False
latlon_tol : Float. Dataset keyword
Tolerance in lat/lon when extracting data only from the center of
the TRT cell. Default 0.01
radar_list : list of Radar objects
Optional. list of radar objects
trajectory : Trajectory object
containing trajectory samples
Returns
-------
new_dataset : dictionary
Dictionary containing radar_out, a radar object containing only data
from inside the TRT cell
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
# Process
field_names = []
datatypes = []
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
field_names.append(get_fieldname_pyart(datatype))
datatypes.append(datatype)
ind_rad = int(radarnr[5:8])-1
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar found')
return None, None
# keep locally only field of interest in radar object
radar = deepcopy(radar_list[ind_rad])
radar.fields = dict()
nfields_available = 0
for field_name in field_names:
if field_name not in radar_list[ind_rad].fields:
warn("Datatype '%s' not available in radar data" % field_name)
continue
radar.add_field(field_name, radar_list[ind_rad].fields[field_name])
nfields_available += 1
if nfields_available == 0:
warn("Fields not available in radar data")
return None, None
# get TRT cell corresponding to current radar volume
time_tol = dscfg.get('TimeTol', 100.)
alt_min = dscfg.get('alt_min', None)
alt_max = dscfg.get('alt_max', None)
cell_center = dscfg.get('cell_center', False)
latlon_tol = dscfg.get('latlon_tol', 0.01) # aprox. 1 km
inds_ray, inds_rng, lat, lon, alt = _get_gates_trt(
radar, trajectory, dscfg['timeinfo'], time_tol=time_tol,
alt_min=alt_min, alt_max=alt_max, cell_center=cell_center,
latlon_tol=latlon_tol)
if inds_ray is None:
return None, None
# prepare new radar object output
radar_roi = deepcopy(radar)
radar_roi.range['data'] = radar.range['data'][inds_rng]
radar_roi.ngates = inds_rng.size
radar_roi.time['data'] = np.asarray([radar_roi.time['data'][0]])
radar_roi.scan_type = 'roi'
radar_roi.sweep_mode['data'] = np.array(['roi'])
radar_roi.sweep_start_ray_index['data'] = np.array([0], dtype='int32')
radar_roi.fixed_angle['data'] = np.array([], dtype='float64')
radar_roi.sweep_number['data'] = np.array([0], dtype='int32')
radar_roi.nsweeps = 1
if radar.rays_are_indexed is not None:
radar_roi.rays_are_indexed['data'] = np.array(
[radar.rays_are_indexed['data'][0]])
if radar.ray_angle_res is not None:
radar_roi.ray_angle_res['data'] = np.array(
[radar.ray_angle_res['data'][0]])
radar_roi.sweep_end_ray_index['data'] = np.array([1], dtype='int32')
radar_roi.rays_per_sweep = np.array([1], dtype='int32')
radar_roi.azimuth['data'] = np.array([], dtype='float64')
radar_roi.elevation['data'] = np.array([], dtype='float64')
radar_roi.nrays = 1
radar_roi.gate_longitude['data'] = np.empty(
(radar_roi.nrays, radar_roi.ngates), dtype=float)
radar_roi.gate_latitude['data'] = np.empty(
(radar_roi.nrays, radar_roi.ngates), dtype=float)
radar_roi.gate_altitude['data'] = np.empty(
(radar_roi.nrays, radar_roi.ngates), dtype=float)
radar_roi.gate_x['data'] = np.empty(
(radar_roi.nrays, radar_roi.ngates), dtype=float)
radar_roi.gate_y['data'] = np.empty(
(radar_roi.nrays, radar_roi.ngates), dtype=float)
radar_roi.gate_z['data'] = np.empty(
(radar_roi.nrays, radar_roi.ngates), dtype=float)
radar_roi.gate_longitude['data'][0, :] = lon
radar_roi.gate_latitude['data'][0, :] = lat
radar_roi.gate_altitude['data'][0, :] = alt
radar_roi.gate_x['data'][0, :] = radar.gate_x['data'][inds_ray, inds_rng]
radar_roi.gate_y['data'][0, :] = radar.gate_y['data'][inds_ray, inds_rng]
radar_roi.gate_z['data'][0, :] = radar.gate_z['data'][inds_ray, inds_rng]
radar_roi.fields = dict()
for field_name in field_names:
if field_name not in radar.fields:
warn("Datatype '%s' not available in radar data" % field_name)
continue
field_dict = deepcopy(radar.fields[field_name])
field_dict['data'] = np.ma.empty(
(radar_roi.nrays, radar_roi.ngates), dtype=float)
field_dict['data'][0, :] = radar.fields[field_name]['data'][
inds_ray, inds_rng]
radar_roi.add_field(field_name, field_dict)
new_dataset = {'radar_out': radar_roi}
return new_dataset, ind_rad
def process_traj_trt_contour(procstatus, dscfg, radar_list=None,
trajectory=None):
"""
Gets the TRT cell contour corresponding to each radar volume
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
time_tol : float. Dataset keyword
tolerance between reference time of the radar volume and that of
the TRT cell [s]. Default 100.
radar_list : list of Radar objects
Optional. list of radar objects
trajectory : Trajectory object
containing trajectory samples
Returns
-------
new_dataset : dict
Dictionary containing radar_out and roi_dict. Radar out is the current
radar object. roi_dict contains the positions defining the TRT cell
contour
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
# Process
field_names = []
datatypes = []
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
field_names.append(get_fieldname_pyart(datatype))
datatypes.append(datatype)
ind_rad = int(radarnr[5:8])-1
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar found')
return None, None
# keep locally only field of interest in radar object
radar = deepcopy(radar_list[ind_rad])
radar.fields = dict()
nfields_available = 0
for field_name in field_names:
if field_name not in radar_list[ind_rad].fields:
warn("Datatype '%s' not available in radar data" % field_name)
continue
radar.add_field(field_name, radar_list[ind_rad].fields[field_name])
nfields_available += 1
if nfields_available == 0:
warn("Fields not available in radar data")
return None, None
# get TRT cell corresponding to current radar volume
time_tol = dscfg.get('TimeTol', 100.)
roi_dict = _get_contour_trt(
radar, trajectory, dscfg['timeinfo'], time_tol=time_tol)
if roi_dict is None:
return None, None
new_dataset = {
'radar_out': radar,
'roi_dict': roi_dict}
return new_dataset, ind_rad
def process_traj_lightning(procstatus, dscfg, radar_list=None,
trajectory=None):
"""
Return time series according to lightning trajectory
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
data_is_log : dict. Dataset keyword
Dictionary specifying for each field if it is in log (True) or
linear units (False). Default False
ang_tol : float. Dataset keyword
Factor that multiplies the angle resolution. Used when determining
the neighbouring rays. Default 1.2
radar_list : list of Radar objects
Optional. list of radar objects
trajectory : Trajectory object
containing trajectory samples
Returns
-------
trajectory : Trajectory object
Object holding time series
ind_rad : int
radar index
"""
if procstatus == 0:
# first call: nothing to do
return None, None
if procstatus == 2:
# last call: do the products
if not dscfg['initialized']:
warn('ERROR: No trajectory dataset available!')
return None, None
trajdict = dscfg['traj_atplane_dict']
dataset = {
'ts': trajdict['ts'],
'final': 1
}
return dataset, trajdict['ind_rad']
# Process
field_names = []
datatypes = []
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
field_names.append(get_fieldname_pyart(datatype))
datatypes.append(datatype)
ind_rad = int(radarnr[5:8])-1
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar found')
return None, None
# keep locally only field of interest in radar object
radar = deepcopy(radar_list[ind_rad])
radar.fields = dict()
nfields_available = 0
for field_name in field_names:
if field_name not in radar_list[ind_rad].fields:
warn("Datatype '%s' not available in radar data" % field_name)
continue
radar.add_field(field_name, radar_list[ind_rad].fields[field_name])
nfields_available += 1
if nfields_available == 0:
warn("Fields not available in radar data")
return None, None
ttask_start = radar.time['data'].min()
dt_task_start = num2date(ttask_start, radar.time['units'],
radar.time['calendar'])
if not dscfg['initialized']:
# init
if trajectory is None:
raise Exception("ERROR: Undefined trajectory for dataset '%s'"
% dscfg['dsname'])
rad_traj = trajectory.add_radar(radar)
description = [
"Description:",
"Time series of a weather radar data type at the location",
"of the lightning flash.",
"The time samples where the flash was out of the weather radar",
"sector are NOT included in this file.",
"NaN (Not a number): No data detected at the flash location."
]
ts_dict = dict()
data_is_log = dict()
for datatype, field_name in zip(datatypes, field_names):
ts = TimeSeries(
description, maxlength=trajectory.time_vector.size,
datatype=datatype)
unit = get_field_unit(datatype)
name = get_field_name(datatype)
# Append empty series: Note: sequence matters!
ts.add_dataseries("#Flash", "", "", plot=False)
ts.add_dataseries("Power", "", "dBm", plot=False)
ts.add_dataseries("at_flash", name, unit, color='b')
ts.add_dataseries("Mean", name, unit, color='r')
ts.add_dataseries("Min", name, unit, color='k', linestyle=':')
ts.add_dataseries("Max", name, unit, color='k', linestyle=':')
ts.add_dataseries("#Valid", "", "", plot=False)
ts_dict.update({field_name: ts})
data_is_log.update({field_name: False})
if 'data_is_log' in dscfg:
if datatype in dscfg['data_is_log']:
data_is_log[field_name] = (dscfg['data_is_log'] != 0)
else:
warn('Units type for data type '+datatype +
' not specified. Assumed linear')
trajdict = dict({
'radar_traj': rad_traj,
'radar_old': None,
'radar_old2': None,
'last_task_start_dt': None,
'ind_rad': ind_rad,
'ts_dict': ts_dict,
'data_is_log': data_is_log})
traj_ind = trajectory.get_samples_in_period(end=dt_task_start)
dscfg['traj_atplane_dict'] = trajdict
dscfg['initialized'] = True
else:
# init already done
trajdict = dscfg['traj_atplane_dict']
rad_traj = trajdict['radar_traj']
traj_ind = trajectory.get_samples_in_period(
start=trajdict['last_task_start_dt'], end=dt_task_start)
ts_dict = trajdict['ts_dict']
data_is_log = trajdict['data_is_log']
traj_time_vec = date2num(trajectory.time_vector, radar.time['units'],
radar.time['calendar'])
if np.size(traj_ind) == 0:
warn('No trajectory samples within current period')
trajdict['radar_old2'] = trajdict['radar_old']
trajdict['radar_old'] = radar
return None, None
# User defined parameter
ang_tol = dscfg.get('ang_tol', 1.2)
az_list = []
el_list = []
rr_list = []
tt_list = []
for tind in np.nditer(traj_ind):
az = rad_traj.azimuth_vec[tind]
el = rad_traj.elevation_vec[tind]
rr = rad_traj.range_vec[tind]
tt = traj_time_vec[tind]
dBm = trajectory.dBm[tind]
flashnr = trajectory.flashnr_vec[tind]
(radar_sel, traj_ray_ind, traj_rng_ind, cell_ray_inds,
cell_rng_ind_min, cell_rng_ind_max) = _get_gates(
radar, az, el, rr, tt, trajdict, ang_tol=ang_tol)
if radar_sel is None:
continue
# Get data samples and compute statistics
for field_name in field_names:
if field_name not in radar_sel.fields:
warn("Datatype '%s' not available in radar data" % field_name)
continue
rdata = radar_sel.fields[field_name]['data']
val = rdata[traj_ray_ind, traj_rng_ind]
cell_vals = rdata[
cell_ray_inds, cell_rng_ind_min:cell_rng_ind_max+1]
# Compute statistics and get number of valid data
if data_is_log[field_name]:
val_mean = np.ma.masked
cell_vals_valid = cell_vals.compressed()
if cell_vals_valid.size > 0:
vals_lin = np.ma.power(10., cell_vals_valid/10.)
val_mean = np.ma.mean(vals_lin)
val_mean = 10. * np.ma.log10(val_mean)
else:
val_mean = np.ma.mean(cell_vals)
val_min = np.ma.min(cell_vals)
val_max = np.ma.max(cell_vals)
nvals_valid = np.count_nonzero(
np.logical_not(np.ma.getmaskarray(cell_vals)))
# Add to time series dict
ts_dict[field_name].add_timesample(
trajectory.time_vector[tind],
(flashnr, dBm, val, val_mean, val_min, val_max, nvals_valid))
az_list.append(az)
el_list.append(el)
rr_list.append(rr)
tt_list.append(trajectory.time_vector[tind])
# end loop over traj samples within period
# output radar volume and flash coordinates respect to radar
radar_sel = radar
if trajdict['radar_old'] is not None:
radar_sel = trajdict['radar_old']
dataset = {
'azi_traj': np.asarray(az_list),
'ele_traj': np.asarray(el_list),
'rng_traj': np.asarray(rr_list),
'time_traj': np.asarray(tt_list),
'radar': radar_sel,
'final': 0
}
# update trajectory dictionary
trajdict['last_task_start_dt'] = dt_task_start
trajdict['radar_old2'] = trajdict['radar_old']
trajdict['radar_old'] = radar
return dataset, ind_rad
def process_traj_atplane(procstatus, dscfg, radar_list=None, trajectory=None):
"""
Return time series according to trajectory
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
data_is_log : dict. Dataset keyword
Dictionary specifying for each field if it is in log (True) or
linear units (False). Default False
ang_tol : float. Dataset keyword
Factor that multiplies the angle resolution. Used when determining
the neighbouring rays. Default 1.2
radar_list : list of Radar objects
Optional. list of radar objects
trajectory : Trajectory object
containing trajectory samples
Returns
-------
trajectory : Trajectory object
Object holding time series
ind_rad : int
radar index
"""
if procstatus == 0:
# first call: nothing to do
return None, None
if procstatus == 2:
# last call: do the products
if not dscfg['initialized']:
warn('ERROR: No trajectory dataset available!')
return None, None
trajdict = dscfg['traj_atplane_dict']
dataset = {
'ts_dict': trajdict['ts_dict'],
'final': 1
}
return dataset, trajdict['ind_rad']
# Process
field_names = []
datatypes = []
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
field_names.append(get_fieldname_pyart(datatype))
datatypes.append(datatype)
ind_rad = int(radarnr[5:8])-1
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar found')
return None, None
# keep locally only field of interest in radar object
radar = deepcopy(radar_list[ind_rad])
radar.fields = dict()
nfields_available = 0
for field_name in field_names:
if field_name not in radar_list[ind_rad].fields:
warn("Datatype '%s' not available in radar data" % field_name)
continue
radar.add_field(field_name, radar_list[ind_rad].fields[field_name])
nfields_available += 1
if nfields_available == 0:
warn("Fields not available in radar data")
return None, None
ttask_start = radar.time['data'].min()
dt_task_start = num2date(ttask_start, radar.time['units'],
radar.time['calendar'])
if not dscfg['initialized']:
# init
if trajectory is None:
raise Exception("ERROR: Undefined trajectory for dataset '%s'"
% dscfg['dsname'])
rad_traj = trajectory.add_radar(radar)
description = [
"Description:",
"Time series of a weather radar data type at the location",
"of the plane.",
"The time samples where the plane was out of the weather radar",
"sector are NOT included in this file.",
"NaN (Not a number): No rain detected at the plane location."
]
ts_dict = dict()
data_is_log = dict()
for datatype, field_name in zip(datatypes, field_names):
ts = TimeSeries(
description, maxlength=trajectory.time_vector.size,
datatype=datatype)
unit = get_field_unit(datatype)
name = get_field_name(datatype)
# Append empty series: Note: sequence matters!
ts.add_dataseries("at plane", name, unit, color='b')
ts.add_dataseries("Mean", name, unit, color='r')
ts.add_dataseries("Min", name, unit, color='k', linestyle=':')
ts.add_dataseries("Max", name, unit, color='k', linestyle=':')
ts.add_dataseries("#Valid", "", "", plot=False)
ts_dict.update({field_name: ts})
data_is_log.update({field_name: False})
if 'data_is_log' in dscfg:
if datatype in dscfg['data_is_log']:
data_is_log[field_name] = (dscfg['data_is_log'] != 0)
else:
warn('Units type for data type '+datatype +
' not specified. Assumed linear')
trajdict = dict({
'radar_traj': rad_traj,
'radar_old': None,
'radar_old2': None,
'last_task_start_dt': None,
'ind_rad': ind_rad,
'ts_dict': ts_dict,
'data_is_log': data_is_log})
traj_ind = trajectory.get_samples_in_period(end=dt_task_start)
dscfg['traj_atplane_dict'] = trajdict
dscfg['initialized'] = True
else:
# init already done
trajdict = dscfg['traj_atplane_dict']
rad_traj = trajdict['radar_traj']
traj_ind = trajectory.get_samples_in_period(
start=trajdict['last_task_start_dt'], end=dt_task_start)
ts_dict = trajdict['ts_dict']
data_is_log = trajdict['data_is_log']
traj_time_vec = date2num(trajectory.time_vector, radar.time['units'],
radar.time['calendar'])
if np.size(traj_ind) == 0:
warn('No trajectory samples within current period')
trajdict['radar_old2'] = trajdict['radar_old']
trajdict['radar_old'] = radar
return None, None
# User defined parameter
ang_tol = dscfg.get('ang_tol', 1.2)
az_list = []
el_list = []
rr_list = []
tt_list = []
for tind in np.nditer(traj_ind):
az = rad_traj.azimuth_vec[tind]
el = rad_traj.elevation_vec[tind]
rr = rad_traj.range_vec[tind]
tt = traj_time_vec[tind]
(radar_sel, traj_ray_ind, traj_rng_ind, cell_ray_inds,
cell_rng_ind_min, cell_rng_ind_max) = _get_gates(
radar, az, el, rr, tt, trajdict, ang_tol=ang_tol)
if radar_sel is None:
continue
# Get data samples and compute statistics
for field_name in field_names:
if field_name not in radar_sel.fields:
warn("Datatype '%s' not available in radar data" % field_name)
continue
rdata = radar_sel.fields[field_name]['data']
val = rdata[traj_ray_ind, traj_rng_ind]
cell_vals = rdata[
cell_ray_inds, cell_rng_ind_min:cell_rng_ind_max+1]
# Compute statistics and get number of valid data
if data_is_log[field_name]:
val_mean = np.ma.masked
cell_vals_valid = cell_vals.compressed()
if cell_vals_valid.size > 0:
vals_lin = np.ma.power(10., cell_vals_valid/10.)
val_mean = np.ma.mean(vals_lin)
val_mean = 10. * np.ma.log10(val_mean)
else:
val_mean = np.ma.mean(cell_vals)
val_min = np.ma.min(cell_vals)
val_max = np.ma.max(cell_vals)
nvals_valid = np.count_nonzero(
np.logical_not(np.ma.getmaskarray(cell_vals)))
# Add to time series dict
ts_dict[field_name].add_timesample(
trajectory.time_vector[tind],
(val, val_mean, val_min, val_max, nvals_valid))
az_list.append(az)
el_list.append(el)
rr_list.append(rr)
tt_list.append(trajectory.time_vector[tind])
# end loop over traj samples within period
# output radar volume and antenna coordinates respect to radar
radar_sel = radar
if trajdict['radar_old'] is not None:
radar_sel = trajdict['radar_old']
dataset = {
'azi_traj': np.asarray(az_list),
'ele_traj': np.asarray(el_list),
'rng_traj': np.asarray(rr_list),
'time_traj': np.asarray(tt_list),
'radar': radar_sel,
'final': 0
}
# update trajectory dictionary
trajdict['last_task_start_dt'] = dt_task_start
trajdict['radar_old2'] = trajdict['radar_old']
trajdict['radar_old'] = radar
return dataset, ind_rad
def process_traj_antenna_pattern(procstatus, dscfg, radar_list=None,
trajectory=None):
"""
Process a new array of data volumes considering a plane
trajectory. As result a timeseries with the values
transposed for a given antenna pattern is created.
The result is created when the LAST flag is set.
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
datatype : list of string. Dataset keyword
The input data types
antennaType : str. Dataset keyword
Type of antenna of the radar we want to get the view from. Can
be AZIMUTH, ELEVATION, LOWBEAM, HIGHBEAM
par_azimuth_antenna : dict. Global ekyword
Dictionary containing the parameters of the PAR azimuth antenna,
i.e. name of the file with the antenna elevation pattern and fixed
antenna angle
par_elevation_antenna : dict. Global keyword
Dictionary containing the parameters of the PAR elevation antenna,
i.e. name of the file with the antenna azimuth pattern and fixed
antenna angle
asr_lowbeam_antenna : dict. Global keyword
Dictionary containing the parameters of the ASR low beam antenna,
i.e. name of the file with the antenna elevation pattern and fixed
antenna angle
asr_highbeam_antenna : dict. Global keyword
Dictionary containing the parameters of the ASR high beam antenna,
i.e. name of the file with the antenna elevation pattern and fixed
antenna angle
target_radar_pos : dict. Global keyword
Dictionary containing the latitude, longitude and altitude of
the radar we want to get the view from. If not specifying it will
assume the radar is collocated
range_all : Bool. Dataset keyword
If the real radar and the synthetic radar are co-located and this
parameter is true the statistics are going to be computed using
all the data from range 0 to the position of the plane. Default
False
rhi_resolution : Bool. Dataset keyword
Resolution of the synthetic RHI used to compute the data as viewed
from the synthetic radar [deg]. Default 0.5
max_altitude : float. Dataset keyword
Max altitude of the data to use when computing the view from the
synthetic radar [m MSL]. Default 12000.
latlon_tol : float. Dataset keyword
The tolerance in latitude and longitude to determine which
synthetic radar gates are co-located with real radar gates [deg].
Default 0.04
alt_tol : float. Datset keyword
The tolerance in altitude to determine which synthetic
radar gates are co-located with real radar gates [m]. Default 1000.
distance_upper_bound : float. Dataset keyword
The maximum distance where to look for a neighbour when
determining which synthetic radar gates are co-located with real
radar gates [m]. Default 1000.
use_cKDTree : Bool. Dataset keyword
Which function to use to find co-located real radar gates with the
synthetic radar. If True a function using cKDTree from
scipy.spatial is used. This function uses parameter
distance_upper_bound. If False a native implementation is used
that takes as parameters latlon_tol and alt_tol. Default True.
pattern_thres : float. Dataset keyword
The minimum of the sum of the weights given to each value in order
to consider the weighted quantile valid. It is related to the
number of valid data points
data_is_log : dict. Dataset keyword
Dictionary specifying for each field if it is in log (True) or
linear units (False). Default False
use_nans : dict. Dataset keyword
Dictionary specyfing whether the nans have to be used in the
computation of the statistics for each field. Default False
nan_value : dict. Dataset keyword
Dictionary with the value to use to substitute the NaN values when
computing the statistics of each field. Default 0
radar_list : list of Radar objects
Optional. list of radar objects
trajectory : Trajectory object
containing trajectory samples
Returns
-------
trajectory : Trajectory object
Object holding time series
ind_rad : int
radar index
"""
if procstatus == 0:
# first call: nothing to do
return None, None
if procstatus == 2:
# last call: do the products
if not dscfg['initialized']:
warn('ERROR: No trajectory dataset available!')
return None, None
tadict = dscfg['traj_antenna_dict']
dataset = {
'ts_dict': tadict['ts_dict'],
'final': 1
}
return dataset, tadict['ind_rad']
# Process
field_names = []
datatypes = []
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
field_names.append(get_fieldname_pyart(datatype))
datatypes.append(datatype)
ind_rad = int(radarnr[5:8])-1
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar found')
return None, None
# keep locally only field of interest in radar object
radar = deepcopy(radar_list[ind_rad])
radar.fields = dict()
nfields_available = 0
for field_name in field_names:
if field_name not in radar_list[ind_rad].fields:
warn("Datatype '%s' not available in radar data" % field_name)
continue
radar.add_field(field_name, radar_list[ind_rad].fields[field_name])
nfields_available += 1
if nfields_available == 0:
warn("Fields not available in radar data")
return None, None
ttask_start = radar.time['data'].min()
dt_task_start = num2date(ttask_start, radar.time['units'],
radar.time['calendar'])
if not dscfg['initialized']:
# === init ============================================================
if trajectory is None:
raise Exception("ERROR: Undefined trajectory for dataset '%s'"
% dscfg['dsname'])
# Check config
if 'antennaType' not in dscfg:
raise Exception("ERROR: Undefined 'antennaType' for dataset '%s'"
% dscfg['dsname'])
if 'configpath' not in dscfg:
raise Exception("ERROR: Undefined 'configpath' for dataset '%s'"
% dscfg['dsname'])
if 'target_radar_pos' not in dscfg:
radar_antenna_atsameplace = True
target_radar = None
warn('No target radar position specified. ' +
'The radars are assumed colocated')
rad_traj = trajectory.add_radar(radar)
else:
radar_antenna_atsameplace = False
# create dummy radar object with target radar specs
latitude = get_metadata('latitude')
longitude = get_metadata('longitude')
altitude = get_metadata('altitude')
latitude['data'] = np.array(
[dscfg['target_radar_pos']['latitude']], dtype='float64')
longitude['data'] = np.array(
[dscfg['target_radar_pos']['longitude']], dtype='float64')
altitude['data'] = np.array(
[dscfg['target_radar_pos']['altitude']], dtype='float64')
target_radar = TargetRadar(latitude, longitude, altitude)
rad_traj = trajectory.add_radar(target_radar)
if dscfg['antennaType'] == 'AZIMUTH':
is_azimuth_antenna = True
info = 'parAzAnt'
description = [
"Antenna: PAR Azimuth antenna",
"Description:",
"Time series of a weather radar data type at the location",
"of the plane weighted by the antenna pattern of the PAR",
"antenna.",
"The time samples where the plane was out of the weather",
"radar sector are NOT included in this file.",
"NaN (Not a number): No rain detected at the plane location."
]
if 'par_azimuth_antenna' not in dscfg:
raise Exception("ERROR: Undefined 'par_azimuth_antenna' for"
" dataset '%s'" % dscfg['dsname'])
patternfile = dscfg['configpath'] + 'antenna/' \
+ dscfg['par_azimuth_antenna']['elPatternFile']
fixed_angle = dscfg['par_azimuth_antenna']['fixed_angle']
elif dscfg['antennaType'] == 'ELEVATION':
is_azimuth_antenna = False
info = 'parElAnt'
description = [
"Antenna: PAR Elevation antenna",
"Description:",
"Time series of a weather radar data type at the location",
"of the plane weighted by the antenna pattern of the PAR",
"antenna.",
"The time samples where the plane was out of the weather ",
"radar sector are NOT included in this file.",
"NaN (Not a number): No rain detected at the plane location."
]
if 'par_elevation_antenna' not in dscfg:
raise Exception("ERROR: Undefined 'par_elevation_antenna' for"
" dataset '%s'" % dscfg['dsname'])
patternfile = dscfg['configpath'] + 'antenna/' \
+ dscfg['par_elevation_antenna']['azPatternFile']
fixed_angle = dscfg['par_elevation_antenna']['fixed_angle']
elif dscfg['antennaType'] == 'LOWBEAM':
is_azimuth_antenna = True
info = 'asrLowBeamAnt'
description = [
"Antenna: ASR low beam antenna",
"Description:",
"Time series of a weather radar data type at the location",
"of the plane weighted by the antenna pattern of the ASR",
"antenna.",
"The time samples where the plane was out of the weather",
"radar sector are NOT included in this file.",
"NaN (Not a number): No rain detected at the plane location."
]
if 'asr_lowbeam_antenna' not in dscfg:
raise Exception("ERROR: Undefined 'asr_lowbeam_antenna' for"
" dataset '%s'" % dscfg['dsname'])
patternfile = dscfg['configpath'] + 'antenna/' \
+ dscfg['asr_lowbeam_antenna']['elPatternFile']
fixed_angle = dscfg['asr_lowbeam_antenna']['fixed_angle']
elif dscfg['antennaType'] == 'HIGHBEAM':
is_azimuth_antenna = True
info = 'asrHighBeamAnt'
description = [
"Antenna: ASR high beam antenna",
"Description:",
"Time series of a weather radar data type at the location",
"of the plane weighted by the antenna pattern of the ASR",
"antenna.",
"The time samples where the plane was out of the weather",
"radar sector are NOT included in this file.",
"NaN (Not a number): No rain detected at the plane location."
]
if 'asr_highbeam_antenna' not in dscfg:
raise Exception("ERROR: Undefined 'asr_highbeam_antenna' for"
" dataset '%s'" % dscfg['dsname'])
patternfile = dscfg['configpath'] + 'antenna/' \
+ dscfg['asr_highbeam_antenna']['elPatternFile']
patternfile_low = dscfg['configpath'] + 'antenna/' \
+ dscfg['asr_lowbeam_antenna']['elPatternFile']
fixed_angle = dscfg['asr_highbeam_antenna']['fixed_angle']
else:
raise Exception("ERROR: Unexpected antenna type '%s' for dataset"
" '%s'" % (dscfg['antennaType'], dscfg['dsname']))
# Read dataset config parameters:
weight_threshold = dscfg.get('pattern_thres', 0.)
do_all_ranges = False
if 'range_all' in dscfg:
if dscfg['range_all'] != 0:
do_all_ranges = True
# Config parameters for processing when the weather radar and the
# antenna are not at the same place:
rhi_resolution = dscfg.get('rhi_resolution', 0.5) # [deg]
max_altitude = dscfg.get('max_altitude', 12000.) # [m]
latlon_tol = dscfg.get('latlon_tol', 0.04) # [deg]
alt_tol = dscfg.get('alt_tol', 1000.) # [m]
distance_upper_bound = dscfg.get('distance_upper_bound', 1000.)
use_cKDTree = dscfg.get('use_cKDTree', True)
# Get antenna pattern and make weight vector
try:
if info == 'asrHighBeamAnt':
antpattern = read_antenna_pattern(
patternfile, linear=True, twoway=False)
antpattern_low = read_antenna_pattern(
patternfile_low, linear=True, twoway=False)
antpattern['attenuation'] *= antpattern_low['attenuation']
else:
antpattern = read_antenna_pattern(patternfile, linear=True,
twoway=True)
except Exception as ee:
warn(str(ee))
raise
pattern_angles = antpattern['angle'] + fixed_angle
if not is_azimuth_antenna:
pattern_angles[pattern_angles < 0] += 360.
pattern_angles[pattern_angles >= 360.] -= 360.
if radar_antenna_atsameplace:
if is_azimuth_antenna:
scan_angles = np.sort(np.unique(
radar.elevation['data'].round(decimals=1)))
else:
scan_angles = np.sort(np.unique(
radar.azimuth['data'].round(decimals=1)))
else:
scan_angles = np.arange(0, 90, rhi_resolution, dtype=float)
weightvec = np.empty(scan_angles.size, dtype=float)
for kk in range(scan_angles.size):
ind = np.argmin(np.abs(pattern_angles - scan_angles[kk]))
weightvec[kk] = antpattern['attenuation'][ind]
ts_dict = dict()
data_is_log = dict()
use_nans = dict()
nan_value = dict()
for datatype, field_name in zip(datatypes, field_names):
ts = TimeSeries(
description, maxlength=trajectory.time_vector.size,
datatype=datatype)
unit = get_field_unit(datatype)
name = get_field_name(datatype)
# Quantiles of interest
quantiles = [
{"val": 0.1, "plot": False, "color": None, "ltype": None},
{"val": 0.2, "plot": True, "color": 'k', "ltype": ':'},
{"val": 0.3, "plot": False, "color": None, "ltype": None},
{"val": 0.4, "plot": False, "color": None, "ltype": None},
{"val": 0.5, "plot": True, "color": 'r', "ltype": None},
{"val": 0.6, "plot": False, "color": None, "ltype": None},
{"val": 0.7, "plot": False, "color": None, "ltype": None},
{"val": 0.8, "plot": True, "color": 'k', "ltype": ':'},
{"val": 0.9, "plot": False, "color": None, "ltype": None},
{"val": 0.95, "plot": False, "color": None, "ltype": None}]
ts.add_dataseries("Weighted average", name, unit, color='b')
for qq in quantiles:
label = "Quantile_%4.2f" % qq["val"]
ts.add_dataseries(label, name, unit, plot=qq["plot"],
color=qq["color"], linestyle=qq["ltype"])
ts.add_dataseries("#Valid", "", "", plot=False)
ts_dict.update({field_name: ts})
data_is_log.update({field_name: False})
if 'data_is_log' in dscfg:
if datatype in dscfg['data_is_log']:
data_is_log[field_name] = (
dscfg['data_is_log'][datatype] != 0)
else:
warn('Units type for data type '+datatype +
' not specified. Assumed linear')
use_nans.update({field_name: False})
if 'use_nans' in dscfg:
if datatype in dscfg['use_nans']:
use_nans[field_name] = (
dscfg['use_nans'][datatype] != 0)
else:
warn('Use of nans not specified for data type '+datatype +
' not specified. Assumed not used')
nan_value.update({field_name: 0.})
if 'nan_value' in dscfg:
if datatype in dscfg['nan_value']:
nan_value[field_name] = dscfg['nan_value'][datatype]
else:
warn('NaN value not specified for data type '+datatype +
' not specified. Assumed 0')
quants = np.array([ee['val'] for ee in quantiles])
# Persistent data structure
tadict = dict({
'radar_traj': rad_traj,
'radar_old': None,
'radar_old2': None,
'target_radar': target_radar,
'last_task_start_dt': None,
'ind_rad': ind_rad,
'is_azimuth_antenna': is_azimuth_antenna,
'info': info,
'scan_angles': scan_angles,
'radar_antenna_atsameplace': radar_antenna_atsameplace,
'weightvec': weightvec,
'quantiles': quants,
'use_nans': use_nans,
'nan_value': nan_value,
'weight_threshold': weight_threshold,
'do_all_ranges': do_all_ranges,
'max_altitude': max_altitude,
'latlon_tol': latlon_tol,
'alt_tol': alt_tol,
'distance_upper_bound': distance_upper_bound,
'use_cKDTree': use_cKDTree,
'data_is_log': data_is_log,
'ts_dict': ts_dict})
traj_ind = trajectory.get_samples_in_period(end=dt_task_start)
dscfg['traj_antenna_dict'] = tadict
dscfg['initialized'] = True
# end init
else:
# init already done
tadict = dscfg['traj_antenna_dict']
traj_ind = trajectory.get_samples_in_period(
start=tadict['last_task_start_dt'], end=dt_task_start)
if not _get_ts_values_antenna_pattern(
radar, trajectory, tadict, traj_ind, field_names):
return None, None
tadict['last_task_start_dt'] = dt_task_start
tadict['radar_old2'] = tadict['radar_old']
tadict['radar_old'] = radar
# Collect garbage
gc.collect()
return None, None
def _get_ts_values_antenna_pattern(radar, trajectory, tadict, traj_ind,
field_names):
"""
Get the time series values of a trajectory using a synthetic antenna
pattern
Parameters
----------
radar : radar object
The radar volume with the data
trajectory : trajectory object
The plane trajectory
tadict : dict
A dictionary containing parameters useful for trajectory computation
traj_ind : array
The indices of trajectory data within the current radar volume time
field_names : list of str
list of names of the radar field
Returns
-------
result : Bool
A flag signaling whether radar data matching the trajectory was found
"""
rad_traj = tadict['radar_traj']
is_azimuth_antenna = tadict['is_azimuth_antenna']
scan_angles = tadict['scan_angles']
radar_antenna_atsameplace = tadict['radar_antenna_atsameplace']
nan_value = tadict['nan_value']
use_nans = tadict['use_nans']
weight_threshold = tadict['weight_threshold']
do_all_ranges = tadict['do_all_ranges']
ts_dict = tadict['ts_dict']
target_radar = tadict['target_radar']
max_altitude = tadict['max_altitude']
latlon_tol = tadict['latlon_tol']
alt_tol = tadict['alt_tol']
distance_upper_bound = tadict['distance_upper_bound']
use_cKDTree = tadict['use_cKDTree']
data_is_log = tadict['data_is_log']
traj_time_vec = date2num(trajectory.time_vector, radar.time['units'],
radar.time['calendar'])
if np.size(traj_ind) == 0:
warn('No trajectory samples within current period')
return False
for tind in np.nditer(traj_ind):
az = rad_traj.azimuth_vec[tind]
el = rad_traj.elevation_vec[tind]
rr = rad_traj.range_vec[tind]
tt = traj_time_vec[tind]
# Select radar object, find closest azimuth and elevation ray
(radar_sel, ray_sel, rr_ind, el_vec_rnd, az_vec_rnd) = \
_get_closest_bin(az, el, rr, tt, radar, tadict)
# Check if traj sample is within scan sector
if (_sample_out_of_sector(az, el, rr, radar_sel, ray_sel,
rr_ind, el_vec_rnd, az_vec_rnd)):
continue
if radar_antenna_atsameplace:
# ==============================================================
# Radar and scanning antenna are at the SAME place
# ==============================================================
# ==============================================================
# Get sample at bin
if is_azimuth_antenna:
angles = radar_sel.azimuth['data']
angles_scan = radar_sel.elevation['data']
ray_angle = radar_sel.azimuth['data'][ray_sel]
else:
angles = radar_sel.elevation['data']
angles_scan = radar_sel.azimuth['data']
ray_angle = radar_sel.elevation['data'][ray_sel]
d_angle = np.abs(angles - ray_angle)
ray_inds = np.where(d_angle < 0.09)[0]
angles_sortind = np.argsort(angles_scan[ray_inds])
ray_inds = ray_inds[angles_sortind]
angles_sorted = angles_scan[ray_inds]
# Set default values
avg = None
qvals = np.array([None] * tadict['quantiles'].size)
nvals_valid = None
if ((scan_angles.size != angles_sorted.size) or
(np.max(np.abs(scan_angles - angles_sorted)) > 0.1)):
warn("Scan angle mismatch!")
for field_name in field_names:
ts_dict[field_name].add_timesample(
trajectory.time_vector[tind],
np.concatenate([[avg], qvals, [nvals_valid]]))
continue
if do_all_ranges:
rr_ind_min = 0
else:
rr_ind_min = rr_ind
w_vec = tadict['weightvec']
for field_name in field_names:
if field_name not in radar_sel.fields:
warn("Datatype '%s' not available in radar data" %
field_name)
continue
rdata = radar_sel.fields[field_name]['data']
values = rdata[ray_inds, rr_ind_min:rr_ind+1]
if use_nans[field_name]:
values_ma = np.ma.getmaskarray(values)
values[values_ma] = nan_value[field_name]
try:
(avg, qvals, nvals_valid) = quantiles_weighted(
values,
weight_vector=w_vec,
quantiles=tadict['quantiles'],
weight_threshold=weight_threshold,
data_is_log=data_is_log[field_name])
except Exception as ee:
warn(str(ee))
continue
ts_dict[field_name].add_timesample(
trajectory.time_vector[tind],
np.concatenate([[avg], qvals, [nvals_valid]]))
else:
# ================================================================
# Radar and scanning antenna are NOT at the same place
# ================================================================
ray_inds, rng_inds, w_inds = _get_gates_antenna_pattern(
radar_sel, target_radar, az, rr, tt, scan_angles,
alt_tol=alt_tol, latlon_tol=latlon_tol,
max_altitude=max_altitude,
distance_upper_bound=distance_upper_bound,
use_cKDTree=use_cKDTree)
w_vec = tadict['weightvec'][w_inds]
for field_name in field_names:
if field_name not in radar_sel.fields:
warn("Datatype '%s' not available in radar data" %
field_name)
continue
rdata = radar_sel.fields[field_name]['data']
values = rdata[ray_inds, rng_inds]
if use_nans[field_name]:
values_ma = np.ma.getmaskarray(values)
values[values_ma] = nan_value[field_name]
try:
(avg, qvals, nvals_valid) = quantiles_weighted(
values,
weight_vector=w_vec,
quantiles=tadict['quantiles'],
weight_threshold=weight_threshold,
data_is_log=data_is_log[field_name])
except Exception as ee:
warn(str(ee))
continue
ts_dict[field_name].add_timesample(
trajectory.time_vector[tind],
| np.concatenate([[avg], qvals, [nvals_valid]]) | numpy.concatenate |
"""
Plot comparisons to understand contributions of SIC dependent on the
thickness initial conditions for the regional experiments.
Notes
-----
Author : <NAME>
Date : 9 October 2018
"""
### Import modules
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import nclcmaps as ncm
import datetime
import read_MonthlyOutput as MO
import calc_Utilities as UT
import cmocean
### Define directories
directorydata = '/surtsey/zlabe/simu/'
directoryfigure = '/home/zlabe/Desktop/'
#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'
### Define time
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
titletime = currentmn + '/' + currentdy + '/' + currentyr
print('\n' '----Plotting SIC contributions - %s----' % titletime)
### Alott time series
year1 = 1900
year2 = 2000
years = np.arange(year1,year2+1,1)
### Call arguments
varnames = ['Z500','Z30','SLP','T2M','U10','U300','SWE','THICK','P','EGR',
'RNET']
varnames = ['Z500']
runnames = [r'CIT',r'FPOL',r'FSUB']
experiments = [r'\textbf{$\Delta$POLAR}',r'\textbf{$\Delta$SUBPOLAR}',r'\textbf{difference}']
period = 'DJF'
for v in range(len(varnames)):
### Call function for surface temperature data from reach run
lat,lon,time,lev,tascit = MO.readExperi(directorydata,
'%s' % varnames[v],'CIT','surface')
lat,lon,time,lev,tasfic = MO.readExperi(directorydata,
'%s' % varnames[v],'FPOL','surface')
lat,lon,time,lev,tasfict = MO.readExperi(directorydata,
'%s' % varnames[v],'FSUB','surface')
### Create 2d array of latitude and longitude
lon2,lat2 = np.meshgrid(lon,lat)
### Concatonate runs
runs = [tascit,tasfic,tasfict]
### Separate per periods (ON,DJ,FM)
if period == 'ON':
tas_mo = np.empty((4,tascit.shape[0],tascit.shape[2],tascit.shape[3]))
for i in range(len(runs)):
tas_mo[i] = np.nanmean(runs[i][:,9:11,:,:],axis=1)
elif period == 'DJ':
tas_mo = np.empty((4,tascit.shape[0]-1,tascit.shape[2],tascit.shape[3]))
for i in range(len(runs)):
tas_mo[i],tas_mo[i] = UT.calcDecJan(runs[i],runs[i],lat,
lon,'surface',1)
elif period == 'FM':
tas_mo= np.empty((4,tascit.shape[0],tascit.shape[2],tascit.shape[3]))
for i in range(len(runs)):
tas_mo[i] = np.nanmean(runs[i][:,1:3,:,:],axis=1)
elif period == 'DJF':
tas_mo= np.empty((4,tascit.shape[0]-1,tascit.shape[2],tascit.shape[3]))
for i in range(len(runs)):
tas_mo[i],tas_mo[i] = UT.calcDecJanFeb(runs[i],runs[i],lat,
lon,'surface',1)
elif period == 'M':
tas_mo= np.empty((4,tascit.shape[0],tascit.shape[2],tascit.shape[3]))
for i in range(len(runs)):
tas_mo[i] = runs[i][:,2,:,:]
else:
ValueError('Wrong period selected! (ON,DJ,FM)')
### Composite by QBO phase
tas_mocit = tas_mo[0][:,:,:]
tas_mofic = tas_mo[1][:,:,:]
tas_mofict = tas_mo[2][:,:,:]
### Compute comparisons for months - taken ensemble average
ficcit = np.nanmean(tas_mofic - tas_mocit,axis=0)
fictfit = np.nanmean(tas_mofict - tas_mocit,axis=0)
difference = ficcit - fictfit
diffruns_mo = [ficcit,fictfit,difference]
### Calculate significance for FM
stat_FICCIT,pvalue_FICCIT = UT.calc_indttest(tas_mofic,tas_mocit)
stat_FICTFIT,pvalue_FICTFIT = UT.calc_indttest(tas_mofict,tas_mocit)
stat_difference,pvalue_difference = UT.calc_indttest(tas_mofic - tas_mocit,
tas_mofict - tas_mocit)
pruns_mo = [pvalue_FICCIT,pvalue_FICTFIT,pvalue_difference]
###########################################################################
###########################################################################
###########################################################################
#### Plot T2M
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
fig = plt.figure()
for i in range(len(experiments)):
var = diffruns_mo[i]
pvar = pruns_mo[i]
### Set limits for contours and colorbars
if varnames[v] == 'T2M':
limit = np.arange(-10,10.1,0.5)
barlim = np.arange(-10,11,5)
elif varnames[v] == 'Z500':
limit = np.arange(-60,60.1,1)
barlim = np.arange(-60,61,30)
elif varnames[v] == 'Z30':
limit = np.arange(-100,100.1,5)
barlim = np.arange(-100,101,50)
elif varnames[v] == 'SLP':
limit = np.arange(-6,6.1,0.5)
barlim = np.arange(-6,7,3)
elif varnames[v] == 'U10' or varnames[v] == 'U300':
limit = np.arange(-10,10.1,1)
barlim = np.arange(-10,11,5)
elif varnames[v] == 'SWE':
limit = np.arange(-25,25.1,1)
barlim = np.arange(-25,26,25)
elif varnames[v] == 'P':
limit = np.arange(-2,2.1,0.05)
barlim = np.arange(-2,3,1)
elif varnames[v] == 'THICK':
limit = np.arange(-60,60.1,3)
barlim = | np.arange(-60,61,30) | numpy.arange |
"""This module provides convenience functions to calculate curved-sky responses and reconstruction noise curve for lensing or other estimators
In plancklens a QE is often described by a short string.
For example 'ptt' stands for lensing (or lensing gradient mode) from temperature x temperature.
Anisotropy source keys are a one-letter string including
'p' (lensing gradient)
'x' (lensing curl)
's' (point sources)
'f' (modulation field)
'a' (polarization rotation)
Typical keys include then:
'ptt', 'xtt', 'stt', 'ftt' for the corresponding QEs from temperature only
'p_p', 'x_p', 'f_p', 'a_p' for the corresponding QEs from polarization only (combining EE EB and BB if relevant)
'p', 'x', 'f', 'a', 'f' ... for the MV (or GMV) combination
'p_eb', ... for the EB estimator (this is the symmetrized version ('peb' + 'pbe') / 2 so that E and B appear each once on the gradient and inverse-variance filtered leg)
Bias-hardening can be included by inserting '_bh_'.
E.g. 'ptt_bh_s' is the lensing TT QE bias-hardened against point source contamination using the 'stt' estimator
Responses method takes as input the QE weights (typically the lensed CMB spectra) and the filtering cls ('fals')
which describes the filtering applied to the maps (the :math:`(C + N)^{-1}` operation)
*get_N0_iter* calculates an estimate of the N0s for iterative lensing estimator beyond the QE
"""
import os
import healpy as hp
import numpy as np
import plancklens
from plancklens import utils, qresp, nhl
from copy import deepcopy
def get_N0(beam_fwhm=1.4, nlev_t:float or np.ndarray=5., nlev_p=None, lmax_CMB: dict or int =3000, lmin_CMB=100, lmax_out=None,
cls_len:dict or None =None, cls_weight:dict or None=None,
joint_TP=True, ksource='p'):
r"""Example function to calculates reconstruction noise levels for a bunch of quadratic estimators
Args:
beam_fwhm: beam fwhm in arcmin
nlev_t: T white noise level in uK-arcmin (an array of size lmax_CMB can be passed for scale-dependent noise level)
nlev_p: P white noise level in uK-arcmin (defaults to root(2) nlevt) (can also be an array)
lmax_CMB: max. CMB multipole used in the QE (use a dict with 't' 'e' 'b' keys instead of int to set different CMB lmaxes)
lmin_CMB: min. CMB multipole used in the QE
lmax_out: max lensing 'L' multipole calculated
cls_len: CMB spectra entering the sky response to the anisotropy (defaults to FFP10 lensed CMB spectra)
cls_weight: CMB spectra entering the QE weights (defaults to FFP10 lensed CMB spectra)
joint_TP: if True include calculation of the N0s for the GMV estimator (incl. joint T and P filtering)
ksource: anisotropy source to consider (defaults to 'p', lensing)
Returns:
N0s array for the lensing gradient and curl modes for the T-only, P-onl and (G)MV estimators
Prompted by AL
"""
if nlev_p is None:
nlev_p = nlev_t * np.sqrt(2)
if not isinstance(lmax_CMB, dict):
lmaxs_CMB = {s: lmax_CMB for s in ['t', 'e', 'b']}
else:
lmaxs_CMB = lmax_CMB
print("Seeing lmax's:")
for s in lmaxs_CMB.keys():
print(s + ': ' + str(lmaxs_CMB[s]))
lmax_ivf = np.max(list(lmaxs_CMB.values()))
lmin_ivf = lmin_CMB
lmax_qlm = lmax_out or lmax_ivf
cls_path = os.path.join(os.path.dirname(os.path.abspath(plancklens.__file__)), 'data', 'cls')
cls_len = cls_len or utils.camb_clfile(os.path.join(cls_path, 'FFP10_wdipole_lensedCls.dat'))
cls_weight = cls_weight or utils.camb_clfile(os.path.join(cls_path, 'FFP10_wdipole_lensedCls.dat'))
# We consider here TT, Pol-only and the GMV comb if joint_TP is set
qe_keys = [ksource + 'tt', ksource + '_p']
if not joint_TP:
qe_keys.append(ksource)
# Simple white noise model. Can feed here something more fancy if desired
transf = hp.gauss_beam(beam_fwhm / 60. / 180. * np.pi, lmax=lmax_ivf)
Noise_L_T = (nlev_t / 60. / 180. * np.pi) ** 2 / transf ** 2
Noise_L_P = (nlev_p / 60. / 180. * np.pi) ** 2 / transf ** 2
# Data power spectra
cls_dat = {
'tt': (cls_len['tt'][:lmax_ivf + 1] + Noise_L_T),
'ee': (cls_len['ee'][:lmax_ivf + 1] + Noise_L_P),
'bb': (cls_len['bb'][:lmax_ivf + 1] + Noise_L_P),
'te': np.copy(cls_len['te'][:lmax_ivf + 1])}
for s in cls_dat.keys():
cls_dat[s][min(lmaxs_CMB[s[0]], lmaxs_CMB[s[1]]) + 1:] *= 0.
# (C+N)^{-1} filter spectra
# For independent T and P filtering, this is really just 1/ (C+ N), diagonal in T, E, B space
fal_sepTP = {spec: utils.cli(cls_dat[spec]) for spec in ['tt', 'ee', 'bb']}
# Spectra of the inverse-variance filtered maps
# In general cls_ivfs = fal * dat_cls * fal^t, with a matrix product in T, E, B space
cls_ivfs_sepTP = utils.cls_dot([fal_sepTP, cls_dat, fal_sepTP], ret_dict=True)
# For joint TP filtering, fals is matrix inverse
fal_jtTP = utils.cl_inverse(cls_dat)
# since cls_dat = fals, cls_ivfs = fals. If the data spectra do not match the filter, this must be changed:
cls_ivfs_jtTP = utils.cls_dot([fal_jtTP, cls_dat, fal_jtTP], ret_dict=True)
for cls in [fal_sepTP, fal_jtTP, cls_ivfs_sepTP, cls_ivfs_jtTP]:
for cl in cls.values():
cl[:max(1, lmin_ivf)] *= 0.
N0s = {}
N0_curls = {}
for qe_key in qe_keys:
# This calculates the unormalized QE gradient (G), curl (C) variances and covariances:
# (GC and CG is zero for most estimators)
NG, NC, NGC, NCG = nhl.get_nhl(qe_key, qe_key, cls_weight, cls_ivfs_sepTP, lmax_ivf, lmax_ivf,
lmax_out=lmax_qlm)
# Calculation of the G to G, C to C, G to C and C to G QE responses (again, cross-terms are typically zero)
RG, RC, RGC, RCG = qresp.get_response(qe_key, lmax_ivf, ksource, cls_weight, cls_len, fal_sepTP,
lmax_qlm=lmax_qlm)
# Gradient and curl noise terms
N0s[qe_key] = utils.cli(RG ** 2) * NG
N0_curls[qe_key] = utils.cli(RC ** 2) * NC
if joint_TP:
NG, NC, NGC, NCG = nhl.get_nhl(ksource, ksource, cls_weight, cls_ivfs_jtTP, lmax_ivf, lmax_ivf,
lmax_out=lmax_qlm)
RG, RC, RGC, RCG = qresp.get_response(ksource, lmax_ivf, ksource, cls_weight, cls_len, fal_jtTP,
lmax_qlm=lmax_qlm)
N0s[ksource] = utils.cli(RG ** 2) * NG
N0_curls[ksource] = utils.cli(RC ** 2) * NC
return N0s, N0_curls
def cls2dls(cls):
"""Turns cls dict. into camb cl array format"""
keys = ['tt', 'ee', 'bb', 'te']
lmax = np.max([len(cl) for cl in cls.values()]) - 1
dls = np.zeros((lmax + 1, 4), dtype=float)
refac = | np.arange(lmax + 1) | numpy.arange |
import numpy as np
from scipy.optimize import minimize
from intvalpy.MyClass import Interval
from intvalpy.intoper import zeros
def Uni(A, b, x=None, maxQ=False, x0=None, tol=1e-12, maxiter=1e3):
"""
Вычисление распознающего функционала Uni.
В случае, если maxQ=True то находится максимум функционала.
Parameters:
A: Interval
Матрица ИСЛАУ.
b: Interval
Вектор правой части ИСЛАУ.
Optional Parameters:
x: float, array_like
Точка в которой вычисляется распознающий функционал.
По умолчанию x равен массиву из нулей.
maxQ: bool
Если значение параметра равно True, то производится
максимизация функционала.
x0: float, array_like
Первоначальная догадка.
tol: float
Погрешность для прекращения оптимизационного процесса.
maxiter: int
Максимальное количество итераций.
Returns:
out: float, tuple
Возвращается значение распознающего функционала в точке x.
В случае, если maxQ=True, то возвращается кортеж, где
первый элемент -- корректность завершения оптимизации,
второй элемент -- точка оптимума,
третий элемент -- значение функции в этой точке.
"""
__uni = lambda x: min(b.rad - (b.mid - A @ x).mig)
__minus_uni = lambda x: -__uni(x)
if maxQ==False:
if x is None:
x = np.zeros(A.shape[1])
return __uni(x)
else:
from scipy.optimize import minimize
if x0 is None:
x0 = np.zeros(A.shape[1])+1
maximize = minimize(__minus_uni, x0, method='Nelder-Mead', tol=tol,
options={'maxiter': maxiter})
return maximize.success, maximize.x, -maximize.fun
def Tol(A, b, x=None, maxQ=False, x0=None, tol=1e-12, maxiter=1e3):
"""
Вычисление распознающего функционала Tol.
В случае, если maxQ=True то находится максимум функционала.
Parameters:
A: Interval
Матрица ИСЛАУ.
b: Interval
Вектор правой части ИСЛАУ.
Optional Parameters:
x: float, array_like
Точка в которой вычисляется распознающий функционал.
По умолчанию x равен массиву из нулей.
maxQ: bool
Если значение параметра равно True, то производится
максимизация функционала.
x0: float, array_like
Первоначальная догадка.
tol: float
Погрешность для прекращения оптимизационного процесса.
maxiter: int
Максимальное количество итераций.
Returns:
out: float, tuple
Возвращается значение распознающего функционала в точке x.
В случае, если maxQ=True, то возвращается кортеж, где
первый элемент -- корректность завершения оптимизации,
второй элемент -- точка оптимума,
третий элемент -- значение функции в этой точке.
"""
__tol = lambda x: min(b.rad - abs(b.mid - A @ x))
__minus_tol = lambda x: -__tol(x)
if maxQ==False:
if x is None:
x = np.zeros(A.shape[1])
return __tol(x)
else:
from scipy.optimize import minimize
if x0 is None:
x0 = np.zeros(A.shape[1])+1
maximize = minimize(__minus_tol, x0, method='Nelder-Mead', tol=tol,
options={'maxiter': maxiter})
return maximize.success, maximize.x, -maximize.fun
def ive(A, b, N=40):
"""
Вычисление меры вариабельности оценки параметров.
Parameters:
A: Interval
Матрица ИСЛАУ.
b: Interval
Вектор правой части ИСЛАУ.
Optional Parameters:
N: int
Количество угловых матриц для которых вычисляется обусловленность.
Returns:
out: float
Возвращается мера вариабельности IVE.
"""
success, _arg_max, _max = Tol(A, b, maxQ=True)
if not success:
print('Оптимизация функционала Tol завершена некорректно!')
_inf = A.a
_sup = A.b
cond = float('inf')
angle_A = | np.zeros(A.shape, dtype='float64') | numpy.zeros |
import numpy
from matplotlib import pyplot
import advection
import weno_coefficients
from scipy.integrate import ode
def weno(order, q):
"""
Do WENO reconstruction
Parameters
----------
order : int
The stencil width
q : numpy array
Scalar data to reconstruct
Returns
-------
qL : numpy array
Reconstructed data - boundary points are zero
"""
C = weno_coefficients.C_all[order]
a = weno_coefficients.a_all[order]
sigma = weno_coefficients.sigma_all[order]
qL = numpy.zeros_like(q)
beta = numpy.zeros((order, len(q)))
w = numpy.zeros_like(beta)
np = len(q) - 2 * order
epsilon = 1e-16
for i in range(order, np+order):
q_stencils = numpy.zeros(order)
alpha = numpy.zeros(order)
for k in range(order):
for l in range(order):
for m in range(l+1):
beta[k, i] += sigma[k, l, m] * q[i+k-l] * q[i+k-m]
alpha[k] = C[k] / (epsilon + beta[k, i]**2)
for l in range(order):
q_stencils[k] += a[k, l] * q[i+k-l]
w[:, i] = alpha / numpy.sum(alpha)
qL[i] = numpy.dot(w[:, i], q_stencils)
return qL
def weno_M(order, q):
"""
Do WENOM reconstruction following Gerolymos equation (18)
Parameters
----------
order : int
The stencil width
q : numpy array
Scalar data to reconstruct
Returns
-------
qL : numpy array
Reconstructed data - boundary points are zero
"""
C = weno_coefficients.C_all[order]
a = weno_coefficients.a_all[order]
sigma = weno_coefficients.sigma_all[order]
qL = | numpy.zeros_like(q) | numpy.zeros_like |
import itertools
import logging
import os.path as osp
import tempfile
import numpy as np
from mmcv.utils import print_log
from terminaltables import AsciiTable
from .builder import DATASETS
from .coco import CocoDataset
# DATASETS.register_module(name='LVISDataset', module=LVISDataset)
# LVISDataset = LVISV05Dataset
# DATASETS.register_module(name='LVISDataset', module=LVISDataset)
@DATASETS.register_module()
class LVISV1Dataset(CocoDataset):
CLASSES = (
'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock', 'alcohol',
'alligator', 'almond', 'ambulance', 'amplifier', 'anklet', 'antenna',
'apple', 'applesauce', 'apricot', 'apron', 'aquarium',
'arctic_(type_of_shoe)', 'armband', 'armchair', 'armoire', 'armor',
'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer',
'avocado', 'award', 'awning', 'ax', 'baboon', 'baby_buggy',
'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel',
'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon',
'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna', 'banjo',
'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow',
'baseball_base', 'baseball', 'baseball_bat', 'baseball_cap',
'baseball_glove', 'basket', 'basketball', 'bass_horn', 'bat_(animal)',
'bath_mat', 'bath_towel', 'bathrobe', 'bathtub', 'batter_(food)',
'battery', 'beachball', 'bead', 'bean_curd', 'beanbag', 'beanie',
'bear', 'bed', 'bedpan', 'bedspread', 'cow', 'beef_(food)', 'beeper',
'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt',
'belt_buckle', 'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor',
'billboard', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath',
'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card',
'pirate_flag', 'black_sheep', 'blackberry', 'blackboard', 'blanket',
'blazer', 'blender', 'blimp', 'blinker', 'blouse', 'blueberry',
'gameboard', 'boat', 'bob', 'bobbin', 'bobby_pin', 'boiled_egg',
'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'bookcase',
'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle',
'bottle_opener', 'bouquet', 'bow_(weapon)', 'bow_(decorative_ribbons)',
'bow-tie', 'bowl', 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'box',
'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere',
'bread-bin', 'bread', 'breechcloth', 'bridal_gown', 'briefcase',
'broccoli', 'broach', 'broom', 'brownie', 'brussels_sprouts',
'bubble_gum', 'bucket', 'horse_buggy', 'bull', 'bulldog', 'bulldozer',
'bullet_train', 'bulletin_board', 'bulletproof_vest', 'bullhorn',
'bun', 'bunk_bed', 'buoy', 'burrito', 'bus_(vehicle)', 'business_card',
'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car',
'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf',
'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)',
'can', 'can_opener', 'candle', 'candle_holder', 'candy_bar',
'candy_cane', 'walking_cane', 'canister', 'canoe', 'cantaloup',
'canteen', 'cap_(headwear)', 'bottle_cap', 'cape', 'cappuccino',
'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car',
'car_battery', 'identity_card', 'card', 'cardigan', 'cargo_ship',
'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton',
'cash_register', 'casserole', 'cassette', 'cast', 'cat', 'cauliflower',
'cayenne_(spice)', 'CD_player', 'celery', 'cellular_telephone',
'chain_mail', 'chair', 'chaise_longue', 'chalice', 'chandelier',
'chap', 'checkbook', 'checkerboard', 'cherry', 'chessboard',
'chicken_(animal)', 'chickpea', 'chili_(vegetable)', 'chime',
'chinaware', 'crisp_(potato_chip)', 'poker_chip', 'chocolate_bar',
'chocolate_cake', 'chocolate_milk', 'chocolate_mousse', 'choker',
'chopping_board', 'chopstick', 'Christmas_tree', 'slide', 'cider',
'cigar_box', 'cigarette', 'cigarette_case', 'cistern', 'clarinet',
'clasp', 'cleansing_agent', 'cleat_(for_securing_rope)', 'clementine',
'clip', 'clipboard', 'clippers_(for_plants)', 'cloak', 'clock',
'clock_tower', 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster',
'coat', 'coat_hanger', 'coatrack', 'cock', 'cockroach',
'cocoa_(beverage)', 'coconut', 'coffee_maker', 'coffee_table',
'coffeepot', 'coil', 'coin', 'colander', 'coleslaw',
'coloring_material', 'combination_lock', 'pacifier', 'comic_book',
'compass', 'computer_keyboard', 'condiment', 'cone', 'control',
'convertible_(automobile)', 'sofa_bed', 'cooker', 'cookie',
'cooking_utensil', 'cooler_(for_food)', 'cork_(bottle_plug)',
'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet',
'cornice', 'cornmeal', 'corset', 'costume', 'cougar', 'coverall',
'cowbell', 'cowboy_hat', 'crab_(animal)', 'crabmeat', 'cracker',
'crape', 'crate', 'crayon', 'cream_pitcher', 'crescent_roll', 'crib',
'crock_pot', 'crossbar', 'crouton', 'crow', 'crowbar', 'crown',
'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch',
'cub_(animal)', 'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup',
'cupboard', 'cupcake', 'hair_curler', 'curling_iron', 'curtain',
'cushion', 'cylinder', 'cymbal', 'dagger', 'dalmatian', 'dartboard',
'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk',
'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux',
'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher',
'dishwasher_detergent', 'dispenser', 'diving_board', 'Dixie_cup',
'dog', 'dog_collar', 'doll', 'dollar', 'dollhouse', 'dolphin',
'domestic_ass', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly',
'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit',
'dresser', 'drill', 'drone', 'dropper', 'drum_(musical_instrument)',
'drumstick', 'duck', 'duckling', 'duct_tape', 'duffel_bag', 'dumbbell',
'dumpster', 'dustpan', 'eagle', 'earphone', 'earplug', 'earring',
'easel', 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater',
'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk',
'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan',
'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)',
'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm',
'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace',
'fireplug', 'first-aid_kit', 'fish', 'fish_(food)', 'fishbowl',
'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flap',
'flash', 'flashlight', 'fleece', 'flip-flop_(sandal)',
'flipper_(footwear)', 'flower_arrangement', 'flute_glass', 'foal',
'folding_chair', 'food_processor', 'football_(American)',
'football_helmet', 'footstool', 'fork', 'forklift', 'freight_car',
'French_toast', 'freshener', 'frisbee', 'frog', 'fruit_juice',
'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage',
'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic',
'gasmask', 'gazelle', 'gelatin', 'gemstone', 'generator',
'giant_panda', 'gift_wrap', 'ginger', 'giraffe', 'cincture',
'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles',
'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose',
'gorilla', 'gourd', 'grape', 'grater', 'gravestone', 'gravy_boat',
'green_bean', 'green_onion', 'griddle', 'grill', 'grits', 'grizzly',
'grocery_bag', 'guitar', 'gull', 'gun', 'hairbrush', 'hairnet',
'hairpin', 'halter_top', 'ham', 'hamburger', 'hammer', 'hammock',
'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel',
'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw',
'hardback_book', 'harmonium', 'hat', 'hatbox', 'veil', 'headband',
'headboard', 'headlight', 'headscarf', 'headset',
'headstall_(for_horses)', 'heart', 'heater', 'helicopter', 'helmet',
'heron', 'highchair', 'hinge', 'hippopotamus', 'hockey_stick', 'hog',
'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'hookah',
'hornet', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce',
'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear',
'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate',
'igniter', 'inhaler', 'iPod', 'iron_(for_clothing)', 'ironing_board',
'jacket', 'jam', 'jar', 'jean', 'jeep', 'jelly_bean', 'jersey',
'jet_plane', 'jewel', 'jewelry', 'joystick', 'jumpsuit', 'kayak',
'keg', 'kennel', 'kettle', 'key', 'keycard', 'kilt', 'kimono',
'kitchen_sink', 'kitchen_table', 'kite', 'kitten', 'kiwi_fruit',
'knee_pad', 'knife', 'knitting_needle', 'knob', 'knocker_(on_a_door)',
'koala', 'lab_coat', 'ladder', 'ladle', 'ladybug', 'lamb_(animal)',
'lamb-chop', 'lamp', 'lamppost', 'lampshade', 'lantern', 'lanyard',
'laptop_computer', 'lasagna', 'latch', 'lawn_mower', 'leather',
'legging_(clothing)', 'Lego', 'legume', 'lemon', 'lemonade', 'lettuce',
'license_plate', 'life_buoy', 'life_jacket', 'lightbulb',
'lightning_rod', 'lime', 'limousine', 'lion', 'lip_balm', 'liquor',
'lizard', 'log', 'lollipop', 'speaker_(stero_equipment)', 'loveseat',
'machine_gun', 'magazine', 'magnet', 'mail_slot', 'mailbox_(at_home)',
'mallard', 'mallet', 'mammoth', 'manatee', 'mandarin_orange', 'manger',
'manhole', 'map', 'marker', 'martini', 'mascot', 'mashed_potato',
'masher', 'mask', 'mast', 'mat_(gym_equipment)', 'matchbox',
'mattress', 'measuring_cup', 'measuring_stick', 'meatball', 'medicine',
'melon', 'microphone', 'microscope', 'microwave_oven', 'milestone',
'milk', 'milk_can', 'milkshake', 'minivan', 'mint_candy', 'mirror',
'mitten', 'mixer_(kitchen_tool)', 'money',
'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor',
'motor_scooter', 'motor_vehicle', 'motorcycle', 'mound_(baseball)',
'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom',
'music_stool', 'musical_instrument', 'nailfile', 'napkin',
'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newspaper',
'newsstand', 'nightshirt', 'nosebag_(for_animals)',
'noseband_(for_animals)', 'notebook', 'notepad', 'nut', 'nutcracker',
'oar', 'octopus_(food)', 'octopus_(animal)', 'oil_lamp', 'olive_oil',
'omelet', 'onion', 'orange_(fruit)', 'orange_juice', 'ostrich',
'ottoman', 'oven', 'overalls_(clothing)', 'owl', 'packet', 'inkpad',
'pad', 'paddle', 'padlock', 'paintbrush', 'painting', 'pajamas',
'palette', 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake',
'pantyhose', 'papaya', 'paper_plate', 'paper_towel', 'paperback_book',
'paperweight', 'parachute', 'parakeet', 'parasail_(sports)', 'parasol',
'parchment', 'parka', 'parking_meter', 'parrot',
'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport',
'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter',
'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'wooden_leg',
'pegboard', 'pelican', 'pen', 'pencil', 'pencil_box',
'pencil_sharpener', 'pendulum', 'penguin', 'pennant', 'penny_(coin)',
'pepper', 'pepper_mill', 'perfume', 'persimmon', 'person', 'pet',
'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano',
'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow',
'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball',
'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)',
'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat',
'plate', 'platter', 'playpen', 'pliers', 'plow_(farm_equipment)',
'plume', 'pocket_watch', 'pocketknife', 'poker_(fire_stirring_tool)',
'pole', 'polo_shirt', 'poncho', 'pony', 'pool_table', 'pop_(soda)',
'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato',
'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'pretzel',
'printer', 'projectile_(weapon)', 'projector', 'propeller', 'prune',
'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher',
'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit',
'race_car', 'racket', 'radar', 'radiator', 'radio_receiver', 'radish',
'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat',
'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt',
'recliner', 'record_player', 'reflector', 'remote_control',
'rhinoceros', 'rib_(food)', 'rifle', 'ring', 'river_boat', 'road_map',
'robe', 'rocking_chair', 'rodent', 'roller_skate', 'Rollerblade',
'rolling_pin', 'root_beer', 'router_(computer_equipment)',
'rubber_band', 'runner_(carpet)', 'plastic_bag',
'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', 'safety_pin',
'sail', 'salad', 'salad_plate', 'salami', 'salmon_(fish)',
'salmon_(food)', 'salsa', 'saltshaker', 'sandal_(type_of_shoe)',
'sandwich', 'satchel', 'saucepan', 'saucer', 'sausage', 'sawhorse',
'saxophone', 'scale_(measuring_instrument)', 'scarecrow', 'scarf',
'school_bus', 'scissors', 'scoreboard', 'scraper', 'screwdriver',
'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane',
'seashell', 'sewing_machine', 'shaker', 'shampoo', 'shark',
'sharpener', 'Sharpie', 'shaver_(electric)', 'shaving_cream', 'shawl',
'shears', 'sheep', 'shepherd_dog', 'sherbert', 'shield', 'shirt',
'shoe', 'shopping_bag', 'shopping_cart', 'short_pants', 'shot_glass',
'shoulder_bag', 'shovel', 'shower_head', 'shower_cap',
'shower_curtain', 'shredder_(for_paper)', 'signboard', 'silo', 'sink',
'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', 'ski_pole',
'skirt', 'skullcap', 'sled', 'sleeping_bag', 'sling_(bandage)',
'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman',
'snowmobile', 'soap', 'soccer_ball', 'sock', 'sofa', 'softball',
'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon',
'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)',
'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'crawfish',
'sponge', 'spoon', 'sportswear', 'spotlight', 'squid_(food)',
'squirrel', 'stagecoach', 'stapler_(stapling_machine)', 'starfish',
'statue_(sculpture)', 'steak_(food)', 'steak_knife', 'steering_wheel',
'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer',
'stirrup', 'stool', 'stop_sign', 'brake_light', 'stove', 'strainer',
'strap', 'straw_(for_drinking)', 'strawberry', 'street_sign',
'streetlight', 'string_cheese', 'stylus', 'subwoofer', 'sugar_bowl',
'sugarcane_(plant)', 'suit_(clothing)', 'sunflower', 'sunglasses',
'sunhat', 'surfboard', 'sushi', 'mop', 'sweat_pants', 'sweatband',
'sweater', 'sweatshirt', 'sweet_potato', 'swimsuit', 'sword',
'syringe', 'Tabasco_sauce', 'table-tennis_table', 'table',
'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', 'taillight',
'tambourine', 'army_tank', 'tank_(storage_vessel)',
'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure',
'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup',
'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth',
'telephone_pole', 'telephoto_lens', 'television_camera',
'television_set', 'tennis_ball', 'tennis_racket', 'tequila',
'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread',
'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil',
'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven',
'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush',
'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel',
'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light',
'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline',
'tray', 'trench_coat', 'triangle_(musical_instrument)', 'tricycle',
'tripod', 'trousers', 'truck', 'truffle_(chocolate)', 'trunk', 'vat',
'turban', 'turkey_(food)', 'turnip', 'turtle', 'turtleneck_(clothing)',
'typewriter', 'umbrella', 'underwear', 'unicycle', 'urinal', 'urn',
'vacuum_cleaner', 'vase', 'vending_machine', 'vent', 'vest',
'videotape', 'vinegar', 'violin', 'vodka', 'volleyball', 'vulture',
'waffle', 'waffle_iron', 'wagon', 'wagon_wheel', 'walking_stick',
'wall_clock', 'wall_socket', 'wallet', 'walrus', 'wardrobe',
'washbasin', 'automatic_washer', 'watch', 'water_bottle',
'water_cooler', 'water_faucet', 'water_heater', 'water_jug',
'water_gun', 'water_scooter', 'water_ski', 'water_tower',
'watering_can', 'watermelon', 'weathervane', 'webcam', 'wedding_cake',
'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', 'whipped_cream',
'whistle', 'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)',
'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket',
'wineglass', 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon',
'wreath', 'wrench', 'wristband', 'wristlet', 'yacht', 'yogurt',
'yoke_(animal_equipment)', 'zebra', 'zucchini')
# def load_annotations(self, ann_file):
# try:
# import lvis
# assert lvis.__version__ >= '10.5.3'
# from lvis import LVIS
# except AssertionError:
# raise AssertionError('Incompatible version of lvis is installed. '
# 'Run pip uninstall lvis first. Then run pip '
# 'install mmlvis to install open-mmlab forked '
# 'lvis. ')
# except ImportError:
# raise ImportError('Package lvis is not installed. Please run pip '
# 'install mmlvis to install open-mmlab forked '
# 'lvis.')
# self.coco = LVIS(ann_file)
# # assert not self.custom_classes, 'LVIS custom classes is not supported'
# self.cat_ids = self.coco.get_cat_ids()
# self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
# self.img_ids = self.coco.get_img_ids()
# data_infos = []
# for i in self.img_ids:
# info = self.coco.load_imgs([i])[0]
# # coco_url is used in LVISv1 instead of file_name
# # e.g. http://images.cocodataset.org/train2017/000000391895.jpg
# # train/val split in specified in url
# info['filename'] = info['coco_url'].replace(
# 'http://images.cocodataset.org/', '')
# data_infos.append(info)
# return data_infos
def load_annotations(self, ann_file):
try:
import lvis
assert lvis.__version__ >= '10.5.3'
from lvis import LVIS
except AssertionError:
raise AssertionError('Incompatible version of lvis is installed. '
'Run pip uninstall lvis first. Then run pip '
'install mmlvis to install open-mmlab forked '
'lvis. ')
except ImportError:
raise ImportError('Package lvis is not installed. Please run pip '
'install mmlvis to install open-mmlab forked '
'lvis.')
self.lvis = LVIS(ann_file)
self.full_cat_ids = self.lvis.get_cat_ids()
self.full_cat2label = {
cat_id: i + 1
for i, cat_id in enumerate(self.full_cat_ids)
}
self.CLASSES = tuple([item['name'] for item in self.lvis.dataset['categories']])
self.cat_ids = self.lvis.get_cat_ids()
self.cat2label = {
cat_id: i + 1
for i, cat_id in enumerate(self.cat_ids)
}
self.img_ids = self.lvis.get_img_ids()
self.img_infos = []
for i in self.img_ids:
info = self.lvis.load_imgs([i])[0]
info['filename'] = info['coco_url'].replace(
'http://images.cocodataset.org/', '')
self.img_infos.append(info)
return self.img_infos
def get_ann_info(self, idx):
img_id = self.img_infos[idx]['id']
ann_ids = self.lvis.get_ann_ids(img_ids=[img_id])
ann_info = self.lvis.load_anns(ann_ids)
return self._parse_ann_info(self.img_infos[idx], ann_info)
def get_ann_info_withoutparse(self, idx):
img_id = self.img_infos[idx]['id']
ann_ids = self.lvis.get_ann_ids(img_ids=[img_id])
ann_info = self.lvis.load_anns(ann_ids)
return ann_info
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
ids_with_ann = set(_['image_id'] for _ in self.lvis.anns.values())
for i, img_info in enumerate(self.img_infos):
if self.img_ids[i] not in ids_with_ann:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,
labels, masks, mask_polys, poly_lens.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
# Two formats are provided.
# 1. mask: a binary map of the same size of the image.
# 2. polys: each mask consists of one or several polys, each poly is a
# list of float.
gt_masks = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
if ann['area'] <= 0 or w < 1 or h < 1:
continue
bbox = [x1, y1, x1 + w - 1, y1 + h - 1]
if 'iscrowd' in ann.keys():
if ann['iscrowd']:
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks.append(self.lvis.ann_to_mask(ann))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = | np.array(gt_labels, dtype=np.int64) | numpy.array |
import pytest
import sys
import os
from math import trunc, ceil, floor
import numpy as np
sys.path.insert(0, os.getcwd())
from uncvalue import Value, val, unc, set_unc # noqa: E402
ϵ = 1e-8
a = Value(3.1415, 0.0012)
b = Value(-1.618, 0.235)
c = Value(3.1264e2, 1.268)
A = np.array([[a, a], [b, b], [c, c]])
B = Value([a.x] * 5, a.ux)
C = Value([b.x] * 5, [b.ux] * 5)
@pytest.mark.parametrize('v, x', [
(a, a.x),
(A, np.array([[a.x, a.x], [b.x, b.x], [c.x, c.x]])),
(B, a.x),
(a.x, a.x)],
ids=['Single', 'Array of values', 'Value array', 'Number'])
def test_val(v, x):
assert np.all(val(v) == x)
@pytest.mark.parametrize('v, x', [
(a, a.ux),
(A, np.array([[a.ux, a.ux], [b.ux, b.ux], [c.ux, c.ux]])),
(B, a.ux),
(a.x, 0)],
ids=['Single', 'Array of values', 'Value array', 'Number'])
def test_unc(v, x):
assert np.all(unc(v) == x)
def test_set_unc():
v = set_unc(0.234, 0.0052)
assert isinstance(v, Value)
assert v.x == 0.234
assert v.ux == 0.0052
v = set_unc(a, 0.0052)
assert isinstance(v, Value)
assert v.x == a.x
assert v.ux == 0.0052
v = set_unc([0.234] * 8, 0.0052)
assert isinstance(v, np.ndarray)
assert v.shape == (8, )
assert np.mean(unc(v)) == 0.0052
v = set_unc([0.234] * 8, [0.0052] * 8)
assert isinstance(v, np.ndarray)
assert v.shape == (8, )
assert np.mean(unc(v)) == 0.0052
with pytest.raises(ValueError):
set_unc(np.random.random((3, 2, 1)), np.random.random((4, 2, 1)))
def test_constructor():
v = Value(3.1415, 0.0012)
assert v.x == 3.1415 == v.val
assert v.ux == 0.0012 == v.unc
with pytest.raises(ValueError):
Value(3.14, -0.28)
V = Value([3.1415] * 8, 0.0012)
assert V.x.shape == (8, )
assert V.ux.shape == (8, )
assert np.mean(V.ux) == 0.0012
V = Value([3.1415] * 8, [0.0012] * 8)
assert V.x.shape == (8, )
assert V.ux.shape == (8, )
assert np.mean(V.ux) == 0.0012
with pytest.raises(ValueError):
Value(np.random.random((3, 2, 1)), np.random.random((4, 2, 1)))
with pytest.raises(ValueError):
Value(1j, 0)
Value(1, 2j)
@pytest.mark.parametrize('x, y, r', [
(a.x, a, False),
(a, a.x, False),
(a, Value(a.x, a.ux * 5), False),
(b, a, True),
(a, a - 0.0001, False),
(A, A, False),
(B, C, False)],
ids=['Right', 'Left', 'Both', 'Different', 'Within unc', 'Array eq', 'Array dif'])
def test_smaller(x, y, r):
assert np.all((x < y) == r)
@pytest.mark.parametrize('x, y, r', [
(1, a, Value(a.x + 1, a.ux)),
(a, 1, Value(a.x + 1, a.ux)),
(a, b, Value(a.x + b.x, np.hypot(a.ux, b.ux))),
(1, A, np.array([[a+1, a+1], [b+1, b+1], [c+1, c+1]])),
(a, A, np.array([[a+a, a+a], [b+a, b+a], [c+a, c+a]])),
(1, B, Value(1 + B.x, B.ux)),
(a, B, Value(a.x + B.x, np.hypot(a.ux, B.ux))),
(A, A, np.array([[a+a, a+a], [b+b, b+b], [c+c, c+c]])),
(B, C, Value(B.x + C.x, np.hypot(B.ux, C.ux))),
],
ids=['Right', 'Left', 'Both', 'Number + Array', 'Value + Array', 'Array of values',
'Number + Valued array', 'Value + Valued array', 'Valued array'])
def test_add(x, y, r):
z = x + y
assert np.all(val(z) == val(r))
assert np.all(unc(z) == unc(r))
@pytest.mark.parametrize('x, y, r', [
(1, a, Value(a.x + 1, a.ux)),
(a.copy(), 1, Value(a.x + 1, a.ux)),
(a.copy(), b, Value(a.x + b.x, np.hypot(a.ux, b.ux))),
(B.copy(), C, Value(B.x + C.x, np.hypot(B.ux, C.ux))),
],
ids=['Right', 'Left', 'Both', 'Array'])
def test_iadd(x, y, r):
x += y
assert isinstance(x, Value)
assert np.all(x.x == r.x)
assert np.all(x.ux == r.ux)
@pytest.mark.parametrize('x, y, r', [
(1, a, Value(1 - a.x, a.ux)),
(a, 1, Value(a.x - 1, a.ux)),
(a, b, Value(a.x - b.x, np.hypot(a.ux, b.ux))),
(A, A, np.array([[a-a, a-a], [b-b, b-b], [c-c, c-c]])),
(B, C, Value(B.x - C.x, np.hypot(B.ux, C.ux))),
],
ids=['Right', 'Left', 'Both', 'Array of values', 'Valued array'])
def test_sub(x, y, r):
z = x - y
assert np.all(val(z) == val(r))
assert np.all(unc(z) == unc(r))
@pytest.mark.parametrize('x, y, r', [
(1, a, Value(1 - a.x, a.ux)),
(a.copy(), 1, Value(a.x - 1, a.ux)),
(a.copy(), b, Value(a.x - b.x, np.hypot(a.ux, b.ux))),
(B.copy(), C, Value(B.x - C.x, np.hypot(B.ux, C.ux))),
],
ids=['Right', 'Left', 'Both', 'Array'])
def test_isub(x, y, r):
x -= y
assert isinstance(x, Value)
assert np.all(x.x == r.x)
assert np.all(x.ux == r.ux)
@pytest.mark.parametrize('x, y, r', [
(2, a, Value(2 * a.x, 2 * a.ux)),
(a, 2, Value(a.x * 2, 2 * a.ux)),
(a, b, Value(a.x * b.x, np.hypot(a.ux * b.x, a.x * b.ux))),
(A, A, np.array([[a*a, a*a], [b*b, b*b], [c*c, c*c]])),
(B, C, Value(B.x * C.x, np.hypot(B.ux * C.x, B.x * C.ux))),
],
ids=['Right', 'Left', 'Both', 'Array of values', 'Valued array'])
def test_mul(x, y, r):
z = x * y
assert np.all(val(z) == val(r))
assert np.all(unc(z) == unc(r))
@pytest.mark.parametrize('x, y, r', [
(2, a, Value(2 * a.x, 2 * a.ux)),
(a.copy(), 2, Value(a.x * 2, 2 * a.ux)),
(a.copy(), b, Value(a.x * b.x, np.hypot(a.ux * b.x, a.x * b.ux))),
(B.copy(), C, Value(B.x * C.x, np.hypot(B.ux * C.x, B.x * C.ux))),
],
ids=['Right', 'Left', 'Both', 'Array'])
def test_imul(x, y, r):
x *= y
assert isinstance(x, Value)
assert np.all(x.x == r.x)
assert np.all(x.ux == r.ux)
@pytest.mark.parametrize('x, y, r', [
(2, a, Value(2 / a.x, 2 * a.ux / a.x**2)),
(a, 2, Value(a.x / 2, a.ux / 2)),
(a, b, Value(a.x / b.x, np.hypot(a.ux / b.x, a.x * b.ux / b.x**2))),
(B, C, Value(B.x / C.x, np.hypot(B.ux / C.x, B.x * C.ux / C.x**2))),
],
ids=['Right', 'Left', 'Both', 'Array'])
def test_div(x, y, r):
z = x / y
assert isinstance(z, Value)
assert np.all(z.x == r.x)
assert np.all(z.ux == r.ux)
@pytest.mark.parametrize('x, y, r', [
(2, a, Value(2 // a.x, 2 * a.ux // a.x**2)),
(a, 2, Value(a.x // 2, a.ux // 2)),
(a, b, Value(a.x // b.x, np.hypot(a.ux // b.x, a.x * b.ux // b.x**2))),
(B, C, Value(B.x // C.x, np.hypot(B.ux // C.x, B.x * C.ux // C.x**2))),
],
ids=['Right', 'Left', 'Both', 'Array'])
def test_floordiv(x, y, r):
z = x // y
assert isinstance(z, Value)
assert | np.all(z.x == r.x) | numpy.all |
import json
import os
import os.path as osp
from glob import glob
from PIL import Image
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader, ConcatDataset, Dataset
SRC_DATASET_DIR = '/data/datasets/ICDAR17_MLT' # FIXME
DST_DATASET_DIR = '/data/datasets/ICDAR17_Korean' # FIXME
NUM_WORKERS = 32 # FIXME
IMAGE_EXTENSIONS = {'.gif', '.jpg', '.png'}
LANGUAGE_MAP = {
'Korean': 'ko',
'Latin': 'en',
'Symbols': None
}
def get_language_token(x):
return LANGUAGE_MAP.get(x, 'others')
def maybe_mkdir(x):
if not osp.exists(x):
os.makedirs(x)
class MLT17Dataset(Dataset):
def __init__(self, image_dir, label_dir, copy_images_to=None):
image_paths = {x for x in glob(osp.join(image_dir, '*')) if osp.splitext(x)[1] in
IMAGE_EXTENSIONS}
label_paths = set(glob(osp.join(label_dir, '*.txt')))
assert len(image_paths) == len(label_paths)
sample_ids, samples_info = list(), dict()
for image_path in image_paths:
sample_id = osp.splitext(osp.basename(image_path))[0]
label_path = osp.join(label_dir, 'gt_{}.txt'.format(sample_id))
assert label_path in label_paths
words_info, extra_info = self.parse_label_file(label_path)
if 'ko' not in extra_info['languages'] or extra_info['languages'].difference({'ko', 'en'}):
continue
sample_ids.append(sample_id)
samples_info[sample_id] = dict(image_path=image_path, label_path=label_path,
words_info=words_info)
self.sample_ids, self.samples_info = sample_ids, samples_info
self.copy_images_to = copy_images_to
def __len__(self):
return len(self.sample_ids)
def __getitem__(self, idx):
sample_info = self.samples_info[self.sample_ids[idx]]
image_fname = osp.basename(sample_info['image_path'])
image = Image.open(sample_info['image_path'])
img_w, img_h = image.size
if self.copy_images_to:
maybe_mkdir(self.copy_images_to)
image.save(osp.join(self.copy_images_to, osp.basename(sample_info['image_path'])))
license_tag = dict(usability=True, public=True, commercial=True, type='CC-BY-SA',
holder=None)
sample_info_ufo = dict(img_h=img_h, img_w=img_w, words=sample_info['words_info'], tags=None,
license_tag=license_tag)
return image_fname, sample_info_ufo
def parse_label_file(self, label_path):
def rearrange_points(points):
start_idx = np.argmin([np.linalg.norm(p, ord=1) for p in points])
if start_idx != 0:
points = | np.roll(points, -start_idx, axis=0) | numpy.roll |
## MINE_binary_search.py -- attack a network and update constant c with binary search
##
## author: <NAME>
import sys
import tensorflow as tf
import numpy as np
BINARY_SEARCH_STEPS = 9 # number of times to adjust the constant with binary search
MAX_ITERATIONS = 10000 # number of iterations to perform gradient descent
LEARNING_RATE = 1e-2 # larger values converge faster to less accurate results
TARGETED = False # should we target one specific class? or just be wrong?
CONFIDENCE = 0 # how strong the adversarial example should be
INITIAL_CONST = 1e-3 # the initial constant c to pick as a first guess
def standardized(img):
mean, var = tf.nn.moments(tf.convert_to_tensor(img),[1])
mean = tf.tile(tf.reshape(mean,[-1,1]),[1,tf.shape(img)[1]])
var = tf.tile(tf.reshape(tf.sqrt(var),[-1,1]),[1, tf.shape(img)[1]])
img = (img-mean)/var
return img
def MiNetwork(x_in, y_in,mine_batch_='conv'):
H=10
seed = np.random.randint(0,1000,1)
y_shuffle = tf.gather(y_in, tf.random_shuffle(tf.range(tf.shape(y_in)[0]),seed=seed))
x_conc = tf.concat([x_in, x_in], axis=0)
y_conc = tf.concat([y_in, y_shuffle], axis=0)
# propagate the forward pass
if mine_batch_ == 'conv':
layerx = tf.layers.conv2d(x_conc, 16, 2, (1,1),use_bias=True,name='M_0')
layerx = tf.layers.flatten(layerx,name='M_1')
layerx = tf.layers.dense(layerx, 512,name='M_2',use_bias=True)
layerx = tf.layers.dense(layerx, H,name='M_3',use_bias=True)
#========================================
layery = tf.layers.conv2d(y_conc, 16, 2, (1,1),name='M_4',use_bias=True)
layery = tf.layers.flatten(layery,name='M_5')
layery = tf.layers.dense(layery, 512,name='M_6',use_bias=True)
layery = tf.layers.dense(layery, H, name='M_7',use_bias=True)
else:
layerx = tf.layers.dense(x_conc, 512,name='M_2',use_bias=True)
layerx = tf.layers.dense(layerx, H,name='M_3',use_bias=True)
layery = tf.layers.dense(y_conc, 512,name='M_6',use_bias=True)
layery = tf.layers.dense(layery, H, name='M_7',use_bias=True)
layer2 = tf.nn.relu(layerx + layery,name='M_8')
output = tf.layers.dense(layer2, 1,name='M_9',use_bias=False)
# split in T_xy and T_x_y predictions
N_samples = tf.shape(x_in)[0]
T_xy = output[:N_samples]
T_x_y = output[N_samples:]
return T_xy, T_x_y
class MINE_supervised_binary_search:
def __init__(self, sess, model,batch_size=1, confidence = CONFIDENCE,
targeted = False, learning_rate = LEARNING_RATE,
binary_search_steps = BINARY_SEARCH_STEPS, max_iterations = MAX_ITERATIONS,
initial_const = INITIAL_CONST,
boxmin = 0, boxmax = 1,epsilon=0.3,mine_batch='conv'):
"""
Returns adversarial examples for the supplied model.
confidence: Confidence of adversarial examples: higher produces examples
that are farther away, but more strongly classified as adversarial.
batch_size: Number of attacks to run simultaneously.
targeted: True if we should perform a targetted attack, False otherwise.
learning_rate: The learning rate for the attack algorithm. Smaller values
produce better results but are slower to converge.
binary_search_steps: The number of times we perform binary search to
find the optimal tradeoff-constant between distance and confidence.
max_iterations: The maximum number of iterations. Larger values are more
accurate; setting too small will require a large learning rate and will
produce poor results.
initial_const: The initial tradeoff-constant to use to tune the relative
importance of distance and confidence.
boxmin: Minimum pixel value (default 0).
boxmax: Maximum pixel value (default 1).
epsilon: Maximum pixel value can be changed for attack.
mine_batch: generate batch sample for MINE ('conv', 'random_sampling').
"""
image_size, num_channels, num_labels = model.image_size, model.num_channels, model.num_labels
self.sess = sess
self.TARGETED = targeted
self.LEARNING_RATE = learning_rate
self.MAX_ITERATIONS = max_iterations
self.BINARY_SEARCH_STEPS = binary_search_steps
self.CONFIDENCE = confidence
self.initial_const = initial_const
self.batch_size = batch_size
self.mine_batch = mine_batch
shape = (batch_size,image_size,image_size,num_channels)
# these are variables to be more efficient in sending data to tf
self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32)
self.tlab = tf.Variable(np.zeros((batch_size,num_labels)), dtype=tf.float32)
self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32)
# and here's what we use to assign them
self.assign_timg = tf.placeholder(tf.float32, shape)
self.assign_tlab = tf.placeholder(tf.float32, (batch_size,num_labels))
self.assign_const = tf.placeholder(tf.float32, [batch_size])
# the variable we're going to optimize over
modifier = tf.Variable(np.random.uniform(-epsilon,epsilon,shape).astype('float32'),name='modifier')
self.modifier = tf.get_variable('modifier',shape,trainable=True, constraint=lambda x: tf.clip_by_value(x, -epsilon, epsilon))
# the resulting image, tanh'd to keep bounded from boxmin to boxmax
self.newimg = tf.clip_by_value(self.modifier + self.timg, boxmin, boxmax)
if self.mine_batch == 'random_sampling':
matrix = tf.random_normal([500,image_size*image_size*num_channels,128],0.,1.0/tf.sqrt(128.))
self.x_batch = standardized( tf.keras.backend.dot(tf.reshape(self.timg,[image_size*image_size*num_channels]),matrix))
self.y_batch = standardized( tf.keras.backend.dot(tf.reshape(self.newimg,[image_size*image_size*num_channels]),matrix))
else:
self.x_batch = tf.transpose(model.conv1(self.timg),perm=[3,1,2,0])
self.y_batch = tf.transpose(model.conv1(self.newimg),perm=[3,1,2,0])
T_xy , T_x_y = MiNetwork(self.x_batch, self.y_batch, mine_batch_=self.mine_batch)
self.MI = tf.reduce_mean(T_xy, axis=0) - tf.log(tf.reduce_mean(tf.exp(T_x_y)))
# prediction BEFORE-SOFTMAX of the model
self.output = model.predict(self.newimg)
# compute the probability of the label class versus the maximum other
real = tf.reduce_sum((self.tlab)*self.output,1)
other = tf.reduce_max((1-self.tlab)*self.output - (self.tlab*10000),1)
if self.TARGETED:
# if targetted, optimize for making the other class most likely
loss1 = tf.maximum(0.0, other-real+self.CONFIDENCE)
else:
# if untargeted, optimize for making this class least likely.
loss1 = tf.maximum(0.0, real-other+self.CONFIDENCE)
# sum up the losses
self.loss1_1 = tf.maximum(0.0, real-other+self.CONFIDENCE)
self.loss1 = tf.reduce_sum(self.const*loss1)
self.loss = self.loss1 - self.MI
# Setup the adam optimizer and keep track of variables we're creating
start_vars = set(x.name for x in tf.global_variables())
m_var = [var for var in tf.global_variables() if 'M_' in var.name]
optimizer = tf.train.GradientDescentOptimizer(self.LEARNING_RATE)
self.mi_train = tf.train.AdamOptimizer(0.00002).minimize(-self.MI, var_list=m_var+[self.modifier])
self.train = optimizer.minimize(self.loss, var_list=[self.modifier])
end_vars = tf.global_variables()
new_vars = [x for x in end_vars if x.name not in start_vars]
# these are the variables to initialize when we run
self.setup = []
self.setup.append(self.timg.assign(self.assign_timg))
self.setup.append(self.tlab.assign(self.assign_tlab))
self.setup.append(self.const.assign(self.assign_const))
self.init = tf.variables_initializer(var_list=[self.modifier]+new_vars)
self.mi_init = tf.variables_initializer(var_list=m_var)
def attack(self, imgs, targets):
"""
Perform the MINE-based attack on the given images for the given targets.
If self.targeted is true, then the targets represents the target labels.
If self.targeted is false, then targets are the original class labels.
"""
r = []
print('go up to',len(imgs))
for i in range(0,len(imgs),self.batch_size):
print('tick',i)
result, m = self.attack_batch(imgs[i:i+self.batch_size], targets[i:i+self.batch_size])
r.extend(result)
return np.array(r), m
def attack_batch(self, imgs, labs):
"""
Run the attack on a batch of images and labels.
"""
def compare(x,y):
if not isinstance(x, (float, int, np.int64)):
x = np.copy(x)
if self.TARGETED:
x[y] -= self.CONFIDENCE
else:
x[y] += self.CONFIDENCE
x = np.argmax(x)
if self.TARGETED:
return x == y
else:
return x != y
batch_size = self.batch_size
# convert to tanh-space
#imgs = np.arctanh((imgs - self.boxplus) / self.boxmul * 0.999999)
# set the lower and upper bounds accordingly
lower_bound = np.zeros(batch_size)
CONST = | np.ones(batch_size) | numpy.ones |
import importlib.resources
import numpy as np
from hexrd import constants
from hexrd import symmetry, symbols
from hexrd.spacegroup import Allowed_HKLs
from hexrd.ipfcolor import sphere_sector, colorspace
from hexrd.valunits import valWUnit
import hexrd.resources
import warnings
import h5py
from pathlib import Path
from scipy.interpolate import interp1d
import time
eps = constants.sqrt_epsf
class unitcell:
'''
>> @AUTHOR: <NAME>, Lawrence Livermore National Lab, <EMAIL>
>> @DATE: 10/09/2018 SS 1.0 original
@DATE: 10/15/2018 SS 1.1 added space group handling
>> @DETAILS: this is the unitcell class
'''
# initialize the unitcell class
# need lattice parameters and space group data from HDF5 file
def __init__(self, lp, sgnum,
atomtypes, charge,
atominfo,
U, dmin, beamenergy,
sgsetting=0):
self._tstart = time.time()
self.pref = 0.4178214
self.atom_type = atomtypes
self.chargestates = charge
self.atom_pos = atominfo
self._dmin = dmin
self.lparms = lp
self.U = U
'''
initialize interpolation from table for anomalous scattering
'''
self.InitializeInterpTable()
'''
sets x-ray energy
calculate wavelength
also calculates anomalous form factors for xray scattering
'''
self.voltage = beamenergy * 1000.0
'''
calculate symmetry
'''
self.sgsetting = sgsetting
self.sgnum = sgnum
self._tstop = time.time()
self.tinit = self._tstop - self._tstart
def GetPgLg(self):
'''
simple subroutine to get point and laue groups
to maintain consistency for planedata initialization
in the materials class
'''
for k in list(_pgDict.keys()):
if self.sgnum in k:
pglg = _pgDict[k]
self._pointGroup = pglg[0]
self._laueGroup = pglg[1]
self._supergroup = pglg[2]
self._supergroup_laue = pglg[3]
def CalcWavelength(self):
# wavelength in nm
self.wavelength = constants.cPlanck * \
constants.cLight / \
constants.cCharge / \
self.voltage
self.wavelength *= 1e9
self.CalcAnomalous()
def calcBetaij(self):
self.betaij = np.zeros([self.atom_ntype, 3, 3])
for i in range(self.U.shape[0]):
U = self.U[i, :]
self.betaij[i, :, :] = np.array([[U[0], U[3], U[4]],
[U[3], U[1], U[5]],
[U[4], U[5], U[2]]])
self.betaij[i, :, :] *= 2. * np.pi**2 * self._aij
def calcmatrices(self):
a = self.a
b = self.b
c = self.c
alpha = np.radians(self.alpha)
beta = np.radians(self.beta)
gamma = np.radians(self.gamma)
ca = np.cos(alpha)
cb = np.cos(beta)
cg = np.cos(gamma)
sa = np.sin(alpha)
sb = np.sin(beta)
sg = np.sin(gamma)
tg = np.tan(gamma)
'''
direct metric tensor
'''
self._dmt = np.array([[a**2, a*b*cg, a*c*cb],
[a*b*cg, b**2, b*c*ca],
[a*c*cb, b*c*ca, c**2]])
self._vol = np.sqrt(np.linalg.det(self.dmt))
if(self.vol < 1e-5):
warnings.warn('unitcell volume is suspiciously small')
'''
reciprocal metric tensor
'''
self._rmt = np.linalg.inv(self.dmt)
'''
direct structure matrix
'''
self._dsm = np.array([[a, b*cg, c*cb],
[0., b*sg, -c*(cb*cg - ca)/sg],
[0., 0., self.vol/(a*b*sg)]])
self._dsm[np.abs(self._dsm) < eps] = 0.
'''
reciprocal structure matrix
'''
self._rsm = np.array([[1./a, 0., 0.],
[-1./(a*tg), 1./(b*sg), 0.],
[b*c*(cg*ca - cb)/(self.vol*sg),
a*c*(cb*cg - ca)/(self.vol*sg),
a*b*sg/self.vol]])
self._rsm[np.abs(self._rsm) < eps] = 0.
ast = self.CalcLength([1, 0, 0], 'r')
bst = self.CalcLength([0, 1, 0], 'r')
cst = self.CalcLength([0, 0, 1], 'r')
self._aij = np.array([[ast**2, ast*bst, ast*cst],
[bst*ast, bst**2, bst*cst],
[cst*ast, cst*bst, cst**2]])
''' transform between any crystal space to any other space.
choices are 'd' (direct), 'r' (reciprocal) and 'c' (cartesian)'''
def TransSpace(self, v_in, inspace, outspace):
if(inspace == 'd'):
if(outspace == 'r'):
v_out = np.dot(v_in, self.dmt)
elif(outspace == 'c'):
v_out = np.dot(self.dsm, v_in)
else:
raise ValueError(
'inspace in ''d'' but outspace can''t be identified')
elif(inspace == 'r'):
if(outspace == 'd'):
v_out = np.dot(v_in, self.rmt)
elif(outspace == 'c'):
v_out = np.dot(self.rsm, v_in)
else:
raise ValueError(
'inspace in ''r'' but outspace can''t be identified')
elif(inspace == 'c'):
if(outspace == 'r'):
v_out = np.dot(v_in, self.rsm)
elif(outspace == 'd'):
v_out = np.dot(v_in, self.dsm)
else:
raise ValueError(
'inspace in ''c'' but outspace can''t be identified')
else:
raise ValueError('incorrect inspace argument')
return v_out
''' calculate dot product of two vectors in any space 'd' 'r' or 'c' '''
def CalcDot(self, u, v, space):
if(space == 'd'):
dot = np.dot(u, np.dot(self.dmt, v))
elif(space == 'r'):
dot = np.dot(u, np.dot(self.rmt, v))
elif(space == 'c'):
dot = np.dot(u, v)
else:
raise ValueError('space is unidentified')
return dot
''' calculate dot product of two vectors in any space 'd' 'r' or 'c' '''
def CalcLength(self, u, space):
if(space == 'd'):
vlen = np.sqrt(np.dot(u, np.dot(self.dmt, u)))
elif(space == 'r'):
vlen = np.sqrt(np.dot(u, np.dot(self.rmt, u)))
elif(space == 'c'):
vlen = np.linalg.norm(u)
else:
raise ValueError('incorrect space argument')
return vlen
''' normalize vector in any space 'd' 'r' or 'c' '''
def NormVec(self, u, space):
ulen = self.CalcLength(u, space)
return u/ulen
''' calculate angle between two vectors in any space'''
def CalcAngle(self, u, v, space):
ulen = self.CalcLength(u, space)
vlen = self.CalcLength(v, space)
dot = self.CalcDot(u, v, space)/ulen/vlen
angle = np.arccos(dot)
return angle
''' calculate cross product between two vectors in any space.
cross product of two vectors in direct space is a vector in
reciprocal space
cross product of two vectors in reciprocal space is a vector in
direct space
the outspace specifies if a conversion needs to be made
@NOTE: iv is the switch (0/1) which will either turn division
by volume of the unit cell on or off.'''
def CalcCross(self, p, q, inspace, outspace, vol_divide=False):
iv = 0
if(vol_divide):
vol = self.vol
else:
vol = 1.0
pxq = np.array([p[1]*q[2]-p[2]*q[1],
p[2]*q[0]-p[0]*q[2],
p[0]*q[1]-p[1]*q[0]])
if(inspace == 'd'):
'''
cross product vector is in reciprocal space
and can be converted to direct or cartesian space
'''
pxq *= vol
if(outspace == 'r'):
pass
elif(outspace == 'd'):
pxq = self.TransSpace(pxq, 'r', 'd')
elif(outspace == 'c'):
pxq = self.TransSpace(pxq, 'r', 'c')
else:
raise ValueError(
'inspace is ''d'' but outspace is unidentified')
elif(inspace == 'r'):
'''
cross product vector is in direct space and
can be converted to any other space
'''
pxq /= vol
if(outspace == 'r'):
pxq = self.TransSpace(pxq, 'd', 'r')
elif(outspace == 'd'):
pass
elif(outspace == 'c'):
pxq = self.TransSpace(pxq, 'd', 'c')
else:
raise ValueError(
'inspace is ''r'' but outspace is unidentified')
elif(inspace == 'c'):
'''
cross product is already in cartesian space so no
volume factor is involved. can be converted to any
other space too
'''
if(outspace == 'r'):
pxq = self.TransSpace(pxq, 'c', 'r')
elif(outspace == 'd'):
pxq = self.TransSpace(pxq, 'c', 'd')
elif(outspace == 'c'):
pass
else:
raise ValueError(
'inspace is ''c'' but outspace is unidentified')
else:
raise ValueError('inspace is unidentified')
return pxq
def GenerateRecipPGSym(self):
self.SYM_PG_r = self.SYM_PG_d[0, :, :]
self.SYM_PG_r = np.broadcast_to(self.SYM_PG_r, [1, 3, 3])
self.SYM_PG_r_laue = self.SYM_PG_d[0, :, :]
self.SYM_PG_r_laue = np.broadcast_to(self.SYM_PG_r_laue, [1, 3, 3])
for i in range(1, self.npgsym):
g = self.SYM_PG_d[i, :, :]
g = np.dot(self.dmt, np.dot(g, self.rmt))
g = np.round( | np.broadcast_to(g, [1, 3, 3]) | numpy.broadcast_to |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Implements naive Q learning with tabular Q function
"""
import logging
import numpy as np
from collections import defaultdict
from collections import namedtuple
from rainman2.utils import exceptions
from rainman2.lib.algorithm.Qlearning.agents import agent_template
__author__ = '<NAME> (<EMAIL>)'
__date__ = 'Wednesday, February 21st 2018, 12:42:05 pm'
CELLULAR_AGENT_ACTION = namedtuple(
'CELLULAR_AGENT_ACTION', ('action', 'ap_id'))
class QNaiveAgent(agent_template.Base):
def __init__(self, alg_config, agent_config):
# Make sure actions are provided by the environment
assert self.n_actions
# setup logging
self.logger = logging.getLogger(self.__class__.__name__)
# log params
self.logger.info("Configuration used for the Agent:")
self.logger.info("episodes: {}".format(self.episodes))
self.logger.info("alpha: {}".format(self.alpha))
self.logger.info("gamma: {}".format(self.gamma))
self.logger.info("epsilon: {}".format(self.epsilon))
self.logger.info("epsilon_decay: {}".format(self.epsilon_decay))
self.logger.info("epsilon_min: {}".format(self.epsilon_min))
# Build tabular Q(s, a) model
self.model = self._build_model()
def _build_model(self):
"""
Implements Q(s, a)
"""
# Initialize Q(s, a) arbitrarily. Here every state is initialized
# to 0
return defaultdict(lambda: np.zeros(self.n_actions))
def _take_action(self, state):
"""
Implements how to take actions when provided with a state
This follows epsilon-greedy policy (behavior policy)
Args
----
state: (tuple)
Returns
-------
action: (float)
"""
# explore if random number between [0, 1] is less than epsilon,
# that is this agent exlores 10% of the time and rest exploits
if np.random.rand() < self.epsilon:
return np.random.choice(list(range(self.n_actions)))
return np.argmax(self.model[state])
def _learn(self, state, action, reward, next_state):
"""
Implements how the agent learns
Args
----
state: (tuple)
Current state of the environment.
action: (float)
Current action taken by the agent.
reward: (float):
Reward produced by the environment.
next_state: (tuple)
Next state of the environment.
"""
# update epsilon to reduce exploration with increase in episodes
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
target = reward + self.gamma * max(self.model[next_state])
error = target - self.model[state][action]
# update
self.model[state][action] += self.alpha * error
@property
def Q(self):
"""
Public method that keeps Q(s, a) values
"""
if not self.model:
raise exceptions.AgentMethodNotImplemented(
"_model is not implemented for this agent!")
return self.model
class QCellularAgent(agent_template.Base):
def __init__(self, alg_config, agent_config):
# Make sure actions are provided by the environment
assert self.n_actions
# setup logging
self.logger = logging.getLogger(self.__class__.__name__)
# log params
self.logger.info("Configuration used for the QCellular Agent:")
self.logger.info("episodes: {}".format(self.episodes))
self.logger.info("alpha: {}".format(self.alpha))
self.logger.info("gamma: {}".format(self.gamma))
self.logger.info("epsilon: {}".format(self.epsilon))
self.logger.info("epsilon_decay: {}".format(self.epsilon_decay))
self.logger.info("epsilon_min: {}".format(self.epsilon_min))
# Build tabular Q(s, a) model
self.model = self._build_model()
self.ap_model = self._build_ap_model()
def _build_model(self):
"""
Implements Q(s, a)
"""
# Initialize Q(s, a) arbitrarily. Here every state is initialized
# to 0
return defaultdict(lambda: | np.zeros(self.n_actions) | numpy.zeros |
# Copyright 2020-present NAVER Corp. Under BSD 3-clause license
from typing import Optional
import numpy as np
import quaternion
import math
import kapture_localization.utils.path_to_kapture # noqa: F401
import kapture
def get_position_diff(imgs_query, imgs_map):
positions_query = np.empty((len(imgs_query), 3, 1), dtype=np.float32)
for i, (_, pose) in enumerate(imgs_query):
positions_query[i, :, 0] = pose.t_raw
positions_query_tile = np.tile(positions_query, (1, 1, len(imgs_map)))
positions_map = np.empty((1, 3, len(imgs_map)), dtype=np.float32)
for i, (_, pose) in enumerate(imgs_map):
positions_map[0, :, i] = pose.t_raw
positions_map_tile = np.tile(positions_map, (len(imgs_query), 1, 1))
return np.sqrt(np.sum(np.square(positions_query_tile - positions_map_tile), axis=1))
def get_rotations_diff(imgs_query, imgs_map):
rad_to_deg = 180.0 / math.pi
rotations_query = np.empty((len(imgs_query), 1), dtype=np.quaternion)
for i, (_, pose) in enumerate(imgs_query):
rotations_query[i, 0] = pose.r
rotations_query_tile = np.tile(rotations_query, (1, len(imgs_map)))
rotations_map = np.empty((1, len(imgs_map)), dtype=np.quaternion)
for i, (_, pose) in enumerate(imgs_map):
rotations_map[0, i] = pose.r
rotations_map_tile = np.tile(rotations_map, (len(imgs_query), 1))
return quaternion.rotation_intrinsic_distance(rotations_query_tile, rotations_map_tile) * rad_to_deg
def get_pairs_distance(kdata: kapture.Kapture,
kdata_query: kapture.Kapture,
topk: Optional[int],
min_distance: float,
max_distance: float,
max_angle: float,
keep_rejected: bool):
"""
get pairs as list from distance
"""
if kdata.rigs is None:
map_trajectories = kdata.trajectories
else:
map_trajectories = kapture.rigs_remove(kdata.trajectories, kdata.rigs)
imgs_map = [(img, map_trajectories[ts, sensor_id].inverse())
for ts, sensor_id, img in kapture.flatten(kdata.records_camera)
if (ts, sensor_id) in map_trajectories]
if kdata_query.rigs is None:
query_trajectories = kdata_query.trajectories
else:
query_trajectories = kapture.rigs_remove(kdata_query.trajectories, kdata_query.rigs)
imgs_query = [(img, query_trajectories[ts, sensor_id].inverse())
for ts, sensor_id, img in kapture.flatten(kdata_query.records_camera)
if (ts, sensor_id) in query_trajectories]
positions_scores = get_position_diff(imgs_query, imgs_map)
rotation_scores = get_rotations_diff(imgs_query, imgs_map)
# is_rejected = (distance < min_distance or distance > max_distance or rotation_distance > max_angle)
ones = | np.ones(positions_scores.shape) | numpy.ones |
import numpy as np
def calculate_variance(prediction_probs):
mean_for_every_classifier = []
variance_for_every_classifier=[]
for collumn in range(len(prediction_probs[1])):#collumns
mean_for_this_classifier=0.0
for row in range(len(prediction_probs)):#rows
mean_for_this_classifier=mean_for_this_classifier+prediction_probs[row][collumn]
mean_for_this_classifier=mean_for_this_classifier/len(prediction_probs)
mean_for_every_classifier.append(mean_for_this_classifier)
#print(mean_for_every_classifier)
preds_sub_mean=np.subtract(prediction_probs, mean_for_every_classifier)
#print(preds_sub_mean)
preds_sub_mean_squared=np.square(preds_sub_mean)
#print(preds_sub_mean_squared)
for collumn in range(len(prediction_probs[1])):
variance = 0
for row in range(len(prediction_probs)):
variance=variance+preds_sub_mean_squared[row][collumn]
variance=variance/len(prediction_probs)
variance_for_every_classifier.append(variance)
#print(variance_for_every_classifier)
return variance_for_every_classifier
def calculate_deviation(variance_for_every_classifier):
deviation_for_every_classifier= | np.sqrt(variance_for_every_classifier) | numpy.sqrt |
# xyz Dec 2017
from __future__ import print_function
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
#from plyfile import (PlyData, PlyElement, make2d, PlyParseError, PlyProperty)
import numpy as np
import h5py
import glob
import time
import multiprocessing as mp
import itertools
from block_data_prep_util import Normed_H5f
ROOT_DIR = os.path.dirname(BASE_DIR)
DATA_DIR = os.path.join(ROOT_DIR,'data')
DATASET_DIR={}
DATASET_DIR['scannet'] = os.path.join(DATA_DIR,'scannet_data')
DATASET_DIR['stanford_indoor3d'] = os.path.join(DATA_DIR,'stanford_indoor3d')
matterport3D_h5f_dir = os.path.join(DATA_DIR,'Matterport3D_H5F/all_merged_nf5')
DATASET_DIR['matterport3d'] = matterport3D_h5f_dir
#-------------------------------------------------------------------------------
# provider for training and testing
#------------------------------------------------------------------------------
class Net_Provider():
'''
(1) provide data for training
(2) load file list to list of Norm_H5f[]
dataset_name: 'stanford_indoor3d' 'scannet'
all_filename_glob: stride_1_step_2_test_small_4096_normed/*.nh5
eval_fnglob_or_rate: file name str glob or file number rate. 'scan1*.nh5' 0.2
num_point_block: if the block point number is not this, do randomly sample
feed_data_elements: sub list of ['xyz_1norm','xyz_midnorm','nxnynz','color_1norm','intensity_1norm']
feed_label_elements: sub list of ['label_category','label_instance','label_material']
'''
# input normalized h5f files
# normed_h5f['data']: [blocks*block_num_point*num_channel],like [1000*4096*9]
# one batch would contain sevel(batch_size) blocks,this will be set out side
# provider with train_start_idx and test_start_idx
def __init__(self,dataset_name,all_filename_glob,eval_fnglob_or_rate,\
only_evaluate,num_point_block=None,feed_data_elements=['xyz_midnorm'],feed_label_elements=['label_category'],\
train_num_block_rate=1,eval_num_block_rate=1 ):
self.dataset_name = dataset_name
self.feed_data_elements = feed_data_elements
self.feed_label_elements = feed_label_elements
self.num_point_block = num_point_block
all_file_list = self.get_all_file_name_list(dataset_name,all_filename_glob)
train_file_list,eval_file_list = self.split_train_eval_file_list\
(all_file_list,eval_fnglob_or_rate)
if only_evaluate:
open_type = 'a' # need to write pred labels
else:
open_type = 'r'
self.train_file_N = train_file_N = len(train_file_list)
eval_file_N = len(eval_file_list)
self.g_file_N = train_file_N + eval_file_N
self.normed_h5f_file_list = normed_h5f_file_list = train_file_list + eval_file_list
#-----------------------------------------------------------------------
# open each file as a Normed_H5f class instance
self.norm_h5f_L = []
# self.g_block_idxs: within the whole train/test dataset (several files)
# record the start/end row idx of each file to help search data from all files
# [ [start_global_row_idxs,end_global__idxs] ]
# [[ 0, 38], [ 38, 90],[ 90, 150],...[259, 303],[303, 361],[361, 387]]
# self.train_num_blocks: 303
# self.eval_num_blocks: 84
# self.eval_global_start_idx: 303
self.g_block_idxs = np.zeros((self.g_file_N,2),np.int32)
self.eval_global_start_idx = None
for i,fn in enumerate(normed_h5f_file_list):
assert(os.path.exists(fn))
h5f = h5py.File(fn,open_type)
norm_h5f = Normed_H5f(h5f,fn)
self.norm_h5f_L.append( norm_h5f )
self.g_block_idxs[i,1] = self.g_block_idxs[i,0] + norm_h5f.data_set.shape[0]
if i<self.g_file_N-1:
self.g_block_idxs[i+1,0] = self.g_block_idxs[i,1]
self.eval_global_start_idx = self.g_block_idxs[train_file_N,0]
if train_file_N > 0:
self.train_num_blocks = self.g_block_idxs[train_file_N-1,1] # = self.eval_global_start_idx
else: self.train_num_blocks = 0
self.eval_num_blocks = self.g_block_idxs[-1,1] - self.train_num_blocks
self.num_classes = self.norm_h5f_L[0].num_classes
self.label_ele_idxs = self.norm_h5f_L[0].label_ele_idxs
self.label_eles = self.norm_h5f_L[0].label_set_elements
self.update_sample_loss_weight()
self.update_train_eval_shuffled_idx()
#-----------------------------------------------------------------------
# use only part of the data to test code:
if train_num_block_rate!=1 or eval_num_block_rate!=1:
self.get_data_label_shape()
print('whole train data shape: %s'%(str(self.train_data_shape)))
print('whole eval data shape: %s'%(str(self.eval_data_shape)))
# train: use the front part
self.train_num_blocks = int( self.train_num_blocks * train_num_block_rate )
if not only_evaluate:
self.train_num_blocks = max(self.train_num_blocks,2)
new_eval_num_blocks = int( max(2,self.eval_num_blocks * eval_num_block_rate) )
# eval:use the back part, so train_file_list and eval_file_list can be
# the same
self.eval_global_start_idx += self.eval_num_blocks - new_eval_num_blocks
self.eval_num_blocks = new_eval_num_blocks
self.get_data_label_shape()
self.update_data_summary()
#self.test_tmp()
def update_data_summary(self):
self.data_summary_str = '%s \nfeed_data_elements:%s \nfeed_label_elements:%s \n'%(self.dataset_name,self.feed_data_elements,self.feed_label_elements)
self.data_summary_str += 'train data shape: %s \ntest data shape: %s \n'%(
str(self.train_data_shape),str(self.eval_data_shape))
# self.data_summary_str += 'train labels histogram: %s \n'%( np.array_str(np.transpose(self.train_labels_hist_1norm) ))
# self.data_summary_str += 'test labels histogram: %s \n'%( np.array_str(np.transpose(self.test_labels_hist_1norm) ))
self.data_summary_str += 'labels histogram: %s \n'%( np.array_str(np.transpose(self.labels_hist_1norm[:,0]) ))
#print(self.data_summary_str)
def get_all_file_name_list(self,dataset_name,all_filename_globs):
all_file_list = []
fn_globs = []
for all_filename_glob in all_filename_globs:
fn_glob = os.path.join(DATASET_DIR[dataset_name],all_filename_glob+'*.nh5')
all_file_list += glob.glob( fn_glob )
fn_globs.append(fn_glob)
if len(all_file_list)== 0:
print('no file in:')
print(fn_globs)
return all_file_list
def split_train_eval_file_list(self,all_file_list,eval_fnglob_or_rate=None):
if eval_fnglob_or_rate == None:
if self.dataset_name=='stanford_indoor3d':
eval_fnglob_or_rate = 'Area_6'
if self.dataset_name=='scannet':
eval_fnglob_or_rate = 0.2
if type(eval_fnglob_or_rate)==str:
# split by name
train_file_list = []
eval_file_list = []
for fn in all_file_list:
if fn.find(eval_fnglob_or_rate) > 0:
eval_file_list.append(fn)
else:
train_file_list.append(fn)
elif type(eval_fnglob_or_rate) == float:
# split by number
n = len(all_file_list)
m = int(n*(1-eval_fnglob_or_rate))
train_file_list = all_file_list[0:m]
eval_file_list = all_file_list[m:n]
log_str = '\ntrain file list (n=%d) = \n%s\n\n'%(len(train_file_list),train_file_list[-2:])
log_str += 'eval file list (n=%d) = \n%s\n\n'%(len(eval_file_list),eval_file_list[-2:])
print( log_str )
return train_file_list,eval_file_list
def get_data_label_shape(self):
data_batches,label_batches,_ = self.get_train_batch(0,1)
self.train_data_shape = list(data_batches.shape)
self.train_data_shape[0] = self.train_num_blocks
self.num_channels = self.train_data_shape[2]
self.eval_data_shape = list(data_batches.shape)
self.eval_data_shape[0] = self.eval_num_blocks
self.num_label_eles = label_batches.shape[2]
def test_tmp(self):
s = 0
e = 1
train_data,train_label = self.get_train_batch(s,e)
eval_data,eval_label = self.get_eval_batch(s,e)
print('train:\n',train_data[0,0,:])
print('eval:\n',eval_data[0,0,:])
print('err=\n',train_data[0,0,:]-eval_data[0,0,:])
def __exit__(self):
print('exit Net_Provider')
for norm_h5f in self.norm_h5f:
norm_h5f.h5f.close()
def global_idx_to_local(self,g_start_idx,g_end_idx):
assert(g_start_idx>=0 and g_start_idx<=self.g_block_idxs[-1,1])
assert(g_end_idx>=0 and g_end_idx<=self.g_block_idxs[-1,1])
for i in range(self.g_file_N):
if g_start_idx >= self.g_block_idxs[i,0] and g_start_idx < self.g_block_idxs[i,1]:
start_file_idx = i
local_start_idx = g_start_idx - self.g_block_idxs[i,0]
for j in range(i,self.g_file_N):
if g_end_idx > self.g_block_idxs[j,0] and g_end_idx <= self.g_block_idxs[j,1]:
end_file_idx = j
local_end_idx = g_end_idx - self.g_block_idxs[j,0]
return start_file_idx,end_file_idx,local_start_idx,local_end_idx
def set_pred_label_batch(self,pred_label,g_start_idx,g_end_idx):
start_file_idx,end_file_idx,local_start_idx,local_end_idx = \
self.global_idx_to_local(g_start_idx,g_end_idx)
pred_start_idx = 0
for f_idx in range(start_file_idx,end_file_idx+1):
if f_idx == start_file_idx:
start = local_start_idx
else:
start = 0
if f_idx == end_file_idx:
end = local_end_idx
else:
end = self.norm_h5f_L[f_idx].label_set.shape[0]
n = end-start
self.norm_h5f_L[f_idx].set_dset_value('pred_label',\
pred_label[pred_start_idx:pred_start_idx+n,:],start,end)
pred_start_idx += n
self.norm_h5f_L[f_idx].h5f.flush()
def get_global_batch(self,g_start_idx,g_end_idx):
start_file_idx,end_file_idx,local_start_idx,local_end_idx = \
self.global_idx_to_local(g_start_idx,g_end_idx)
#t0 = time.time()
data_ls = []
label_ls = []
center_mask = []
for f_idx in range(start_file_idx,end_file_idx+1):
if f_idx == start_file_idx:
start = local_start_idx
else:
start = 0
if f_idx == end_file_idx:
end = local_end_idx
else:
end = self.norm_h5f_L[f_idx].labels_set.shape[0]
data_i,feed_data_elements_idxs = self.norm_h5f_L[f_idx].get_normed_data(start,end,self.feed_data_elements)
label_i = self.norm_h5f_L[f_idx].get_label_eles(start,end,self.feed_label_elements)
data_ls.append(data_i)
label_ls.append(label_i)
if 'xyz_midnorm' in self.feed_data_elements:
xyz_midnorm_i = data_i[:,:,feed_data_elements_idxs['xyz_midnorm']]
else:
xyz_midnorm_i,_ = self.norm_h5f_L[f_idx].get_normed_data(start,end,['xyz_midnorm'])
center_mask_i = self.get_center_mask(xyz_midnorm_i)
center_mask.append(center_mask_i)
data_batches = np.concatenate(data_ls,0)
label_batches = np.concatenate(label_ls,0)
center_mask = np.concatenate(center_mask,0)
data_batches,label_batches = self.sample(data_batches,label_batches,self.num_point_block)
num_label_eles = self.labels_weights.shape[1]
center_mask = np.expand_dims(center_mask,axis=-1)
center_mask = np.tile(center_mask,(1,1,num_label_eles))
sample_weights = []
for k in range(num_label_eles):
sample_weights_k = np.take(self.labels_weights[:,k],label_batches[:,:,k])
sample_weights.append( | np.expand_dims(sample_weights_k,axis=-1) | numpy.expand_dims |
"""
(c) RIKEN 2015. All rights reserved.
Author: <NAME>
This software is released under the new BSD License; see LICENSE.
"""
import numpy
vectors_angle = lambda x, y: abs(numpy.arccos(numpy.dot(x,y)/numpy.linalg.norm(x)/numpy.linalg.norm(y)))
def kabsch_superpose(P, Q): # P,Q: vstack'ed matrix
"""
Usage:
P = numpy.vstack([a2, b2, c2])
Q = numpy.vstack([a1, b1, c1])
m = kabsch_superpose(P, Q)
newP = numpy.dot(m, P)
"""
A = numpy.dot(numpy.transpose(P), Q)
U, s, V = | numpy.linalg.svd(A) | numpy.linalg.svd |
import mne
import pickle
import numpy as np
from sta import sta_matrix, sdtw_matrix
from sklearn.manifold import TSNE
# change this if you have GPUs
# in our platform, this experiment ran on 4 GPUs in around 20 minutes
n_gpu_devices = 0
def generate_samples(n_samples, n_times, time_point, space_points, M,
smoothing_time=1., smoothing_space=0.01,
seed=None):
"""Simulate brain signals at a time_point and in a random vertex among
`space_points`."""
rng = np.random.RandomState(seed)
n_features = len(M)
time_points = (np.ones(n_samples) * time_point).astype(int)
space_points = rng.choice(space_points, size=n_samples)
signals = np.zeros((n_samples, n_times, n_features)).astype(float)
values = rng.rand(n_samples) * 2 + 1
signals[ | np.arange(n_samples) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 24 22:54:15 2015
@author: sansomk
"""
import numpy as np
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
#import scipy.special as special #jn, jn_zeros
#from matplotlib import cm
import os
#from scipy import interpolate
import glob
import re
from pydicom import dicomio
#from matplotlib.widgets import Slider, Button, RadioButtons
import fnmatch
import pickle
import hashlib
def build_dcm_dict(dcmpath, fn_dict, image_dict_pkl="image_dict.pkl"):
slice_location = []
trigger_time = []
image_dict = {}
hash_value = hashlib.sha1(dcmpath).hexdigest()
#count = 0
for dirname, subdirlist, filelist in os.walk(dcmpath):
for filen in filelist:
filePath = os.path.join(dirname,filen)
#print(filePath)
#print(dirname, subdirlist, filelist)
#print(filePath)
#print(f.SliceLocation)
try:
f = dicomio.read_file(filePath, stop_before_pixels=True)
except Exception as e:
print(str(e))
print("error: {0}".format(filen))
continue
#dictionary of images
if (f.TriggerTime not in image_dict.keys()):
image_dict[f.TriggerTime] = {}
if (f.SliceLocation not in image_dict[f.TriggerTime].keys()):
image_dict[f.TriggerTime][f.SliceLocation] = {}
for fn_key in fn_dict.keys():
if( fn_key not in image_dict[f.TriggerTime][f.SliceLocation].keys()):
image_dict[f.TriggerTime][f.SliceLocation][fn_key] = {}
#print(fn_key, filen, fn_dict[fn_key])
if (fnmatch.fnmatch(filen, fn_dict[fn_key])):
#print('did i get here')
if (f.SOPInstanceUID not in image_dict[f.TriggerTime][f.SliceLocation][fn_key].keys()):
image_dict[f.TriggerTime][f.SliceLocation][fn_key][f.SOPInstanceUID] = [filePath]
#print(image_dict[fn_key])
if (f.TriggerTime not in trigger_time):
trigger_time.append(f.TriggerTime)
if (f.SliceLocation not in slice_location):
slice_location.append(f.SliceLocation)
print("writing {0} to current working directory".format(image_dict_pkl))
with open(os.path.join(os.getcwd(), image_dict_pkl), "wb") as pkl_f:
pickle.dump(hash_value, pkl_f, -1)
pickle.dump(image_dict, pkl_f, -1)
return image_dict
def load_dcm_dict(dcmpath, fn_dict, image_dict_pkl="image_dict.pkl"):
try:
with open(os.path.join(os.getcwd(), image_dict_pkl), "rb") as pkl_f:
hash_value = pickle.load(pkl_f)
image_dict = pickle.load(pkl_f)
if (hash_value != hashlib.sha1(dcmpath).hexdigest()):
raise Exception("pickle file doesn't match, rebuilding")
except Exception as e:
print(str(e))
print("no image dictionary pickle file, building one")
# create the dictionary
#dcm_files = []
#count = 0
#dict_test = {}
# tags
# TriggerTime = time of the image
# SliceLocation = spatial location of slice.
# SliceThickness = the thickness of the image
# need to figure our how to convert images to axial ones
image_dict = build_dcm_dict(dcmpath, fn_dict, image_dict_pkl="image_dict.pkl")
else:
print('Read the pickle file from the current directory')
trigger_time = image_dict.keys()
slice_location = image_dict[trigger_time[0]].keys()
return image_dict, sorted(slice_location), sorted(trigger_time)
def create_image_volume(image_dict, mri_2_cfd_map, image_type, return_coord=True):
trigger_t = mri_2_cfd_map[0]
slice_location = image_dict[trigger_t].keys()
dcm_files = []
for loc in slice_location:
for image_id in image_dict[trigger_t][loc][image_type].keys():
dcm_files.append(image_dict[trigger_t][loc][image_type][image_id][0])
path_loc = zip(dcm_files, slice_location)
path_loc.sort(key=lambda x: x[1])
dcm_files, slice_location = zip(*path_loc)
#print(slice_location)
# get reference image
#print(len(dcm_files), dcm_files)
ref_image = dicomio.read_file(dcm_files[0])
# load dimensions based on the number of rows columns and slices
const_pixel_dims = (int(ref_image.Rows), int(ref_image.Columns), len(dcm_files))
#check it whether image has been interpolated
if (hasattr(ref_image, 'SpacingBetweenSlices')):
if(ref_image.SpacingBetweenSlices < ref_image.SliceThickness):
z_spacing = float(ref_image.SpacingBetweenSlices)
else:
z_spacing = float(ref_image.SliceThickness)
else:
z_spacing = float(ref_image.SliceThickness)
# the array is sized based on 'const_pixel_dims
array_dicom = np.zeros(const_pixel_dims, dtype=np.float64) #ref_image.pixel_array.dtype)
print(array_dicom.shape)
#loop through all the DICOM FILES
for filenamedcm in dcm_files:
#read the file
ds = dicomio.read_file(filenamedcm)
#store the raw image data
array_dicom[:, :, dcm_files.index(filenamedcm)] = (
np.asarray(ds.pixel_array, dtype=np.float64) * (
np.float64(ds.RescaleSlope)) + | np.float64(ds.RescaleIntercept) | numpy.float64 |
# HaloFeedback
import time
import warnings
from abc import ABC, abstractmethod
from time import time as timeit
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from scipy.integrate import quad, simps
from scipy.interpolate import interp1d
from scipy.special import ellipeinc, ellipkinc, ellipe, ellipk
from scipy.special import gamma as Gamma
from scipy.special import beta as Beta
# ------------------
G_N = 4.3021937e-3 # (km/s)^2 pc/M_sun
c = 2.9979e5 # km/s
# Conversion factors
pc_to_km = 3.085677581e13
# Numerical parameters
N_GRID = 10000 # Number of grid points in the specific energy
#N_GRID = 2000
N_KICK = 50 # Number of points to use for integration over Delta-epsilon
float_2eps = 2.0 * np.finfo(float).eps
# ------------------
# Alternative elliptic function which is valid for m > 1
def ellipeinc_alt(phi, m):
beta = np.arcsin(np.clip(np.sqrt(m) * np.sin(phi), 0, 1))
return np.sqrt(m) * ellipeinc(beta, 1 / m) + ((1 - m) / np.sqrt(m)) * ellipkinc(
beta, 1 / m
)
class DistributionFunction(ABC):
"""
Base class for phase space distribution of a DM spike surrounding a black
hole with an orbiting body. Child classes must implement the following:
Methods
- rho_init(): initial density function
- f_init() initial phase-space distribution function
Attributes
- r_sp: DM halo extent [pc]. Used for making grids for the calculation.
- IDstr_model: ID string used for file names.
"""
def __init__(self, M_BH=1e3, M_NS=1.0, Lambda=-1):
self.M_BH = M_BH # Solar mass
self.M_NS = M_NS # Solar mass
if Lambda <= 0:
self.Lambda = np.sqrt(M_BH / M_NS)
else:
self.Lambda = Lambda
self.r_isco = 6.0 * G_N * M_BH / c ** 2
# Initialise grid of r, eps and f(eps)
self.r_grid = np.geomspace(self.r_isco, 1e5 * self.r_isco, N_GRID - 1000)
self.r_grid = np.append(
self.r_grid, np.geomspace(1.01 * self.r_grid[-1], 1e3 * self.r_sp, 1000)
)
self.eps_grid = self.psi(self.r_grid)
self.f_eps = self.f_init(self.eps_grid)
# Density of states
self.DoS = (
np.sqrt(2) * (np.pi * G_N * self.M_BH) ** 3 * self.eps_grid ** (-5 / 2.0)
)
# Define a string which specifies the model parameters
# and numerical parameters (for use in file names etc.)
self.IDstr_num = "lnLambda=%.1f" % ( | np.log(self.Lambda) | numpy.log |
# coding: utf-8
""" Tests for infer_potential.py """
from __future__ import division, print_function
__author__ = "adrn <<EMAIL>>"
# Standard library
import os, sys
import logging
import shutil
import time
# Third-party
import astropy.units as u
from astropy.utils import isiterable
import h5py
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import triangle
# Project
from streams.coordinates.frame import galactocentric, heliocentric
import streams.io as io
import streams.inference as si
from streams.inference.back_integrate import back_integration_likelihood
import streams.potential as sp
from streams.util import project_root
matplotlib.rc('lines', marker=None, linestyle='-')
# Create logger
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
minimum_config = """
name: test
data_file: data/observed_particles/2.5e8.hdf5
nparticles: 4
particle_idx: [3, 3667, 710, 1576]
potential:
class_name: LawMajewski2010
{potential_params}
particles:
{particles_params}
satellite:
{satellite_params}
"""
pot_params = """
parameters: [q1, qz, phi, v_halo]
"""
ptc_params = """
parameters: [d]
"""
# parameters: [d, mul, mub, vr]
sat_params = """
parameters: [alpha]
"""
# parameters: [logmass, logmdot, d, mul, mub, vr]
# _config = minimum_config.format(potential_params=pot_params,
# particles_params=ptc_params,
# satellite_params=sat_params)
_config = minimum_config.format(potential_params=pot_params,
particles_params="",
satellite_params=sat_params)
# particles_params=ptc_params,
# satellite_params=sat_params
Ncoarse = 21
Nfine = 71
output_path = os.path.join(project_root, 'plots', 'tests', 'infer_potential')
if not os.path.exists(output_path):
os.mkdir(output_path)
def make_plot(model, idx, vals1, vals2):
fig,axes = plt.subplots(2,2,figsize=(12,12),sharex='col')
p = model.truths.copy()
Ls = []
for val in vals1:
p[idx] = val
Ls.append(model(p))
axes[0,0].plot(vals1, Ls)
axes[0,0].set_ylabel("$\ln\mathcal{L}$")
axes[1,0].plot(vals1, np.exp(Ls-np.max(Ls)))
axes[1,0].set_ylabel("$\mathcal{L}$")
p = model.truths.copy()
Ls = []
for val in vals2:
p[idx] = val
Ls.append(model(p))
#logger.debug("{} vs truth {}".format(vals[np.argmax(Ls)], truth))
#logger.debug("{:.2f}% off".format(abs(vals[np.argmax(Ls)] - truth)/truth*100.))
axes[0,1].set_title("zoomed")
axes[0,1].plot(vals2, Ls)
axes[1,1].plot(vals2, np.exp(Ls-np.max(Ls)))
for ax in axes.flat:
ax.axvline(model.truths[idx])
return fig
class TestStreamModel(object):
def setup(self):
config = io.read_config(_config)
self.model = si.StreamModel.from_config(config)
self.model.sample_priors()
def test_simple(self):
# make sure true posterior value is higher than any randomly sampled value
logger.debug("Checking posterior values...")
true_ln_p = self.model.ln_posterior(self.model.truths, *self.model.lnpargs)
true_ln_p2 = self.model(self.model.truths)
logger.debug("\t\t At truth: {}".format(true_ln_p))
p0 = self.model.sample_priors()
ln_p = self.model.ln_posterior(p0, *self.model.lnpargs)
ln_p2 = self.model(p0)
logger.debug("\t\t At random sample: {}".format(ln_p))
assert true_ln_p > ln_p
assert true_ln_p == true_ln_p2
assert ln_p == ln_p2
def test_model(self):
""" Simple test of posterior """
model = self.model
test_path = os.path.join(output_path, "model")
if not os.path.exists(test_path):
os.mkdir(test_path)
truth_dict = model._decompose_vector(model.truths)
model.sample_priors()
idx = 0
for group_name,group in truth_dict.items():
for param_name,truths in group.items():
print(group_name, param_name)
param = model.parameters[group_name][param_name]
if group_name == "potential":
vals1 = np.linspace(param._prior.a,
param._prior.b,
Ncoarse)
vals2 = np.linspace(0.9,1.1,Nfine)*truths
fig = make_plot(model, idx, vals1, vals2)
fig.savefig(os.path.join(test_path, "{}_{}.png".format(idx,param_name)))
plt.close('all')
idx += 1
if group_name == "particles":
if param_name in heliocentric.coord_names:
for jj in range(param.value.shape[0]):
prior = model._prior_cache[("particles",param_name)]
truth = truths[jj]
mu,sigma = truth,prior.sigma[jj]
vals1 = np.linspace(mu-10*sigma,
mu+10*sigma,
Ncoarse)
vals2 = np.linspace(mu-3*sigma,
mu+3*sigma,
Nfine)
fig = make_plot(model, idx, vals1, vals2)
fig.savefig(os.path.join(test_path,
"ptcl{}_{}.png".format(idx,param_name)))
plt.close('all')
idx += 1
elif param_name == 'p_shocked':
for jj in range(param.value.shape[0]):
vals1 = np.linspace(param._prior.a[jj],
param._prior.b[jj],
Ncoarse)
vals2 = np.linspace(param._prior.a[jj],
param._prior.b[jj],
Nfine)
fig = make_plot(model, idx, vals1, vals2)
fig.savefig(os.path.join(test_path,
"ptcl{}_{}.png".format(idx,param_name)))
plt.close('all')
idx += 1
elif param_name == 'beta':
for jj in range(param.value.shape[0]):
vals1 = np.linspace(param._prior.a[jj],
param._prior.b[jj],
Ncoarse)
vals2 = np.linspace(param._prior.a[jj],
param._prior.b[jj],
Nfine)
fig = make_plot(model, idx, vals1, vals2)
fig.savefig(os.path.join(test_path,
"ptcl{}_{}.png".format(idx,param_name)))
plt.close('all')
idx += 1
elif param_name == 'tub':
for jj in range(param.value.shape[0]):
vals1 = np.linspace(param._prior.a[jj],
param._prior.b[jj],
Ncoarse)
vals2 = np.linspace(0.9,1.1,Nfine)*truths[jj]
fig = make_plot(model, idx, vals1, vals2)
fig.savefig(os.path.join(test_path,
"ptcl{}_{}.png".format(idx,param_name)))
plt.close('all')
idx += 1
if group_name == "satellite":
if param_name in heliocentric.coord_names:
for jj in range(param.value.shape[0]):
prior = model._prior_cache[("satellite",param_name)]
truth = truths
mu,sigma = truth,prior.sigma
vals1 = np.linspace(mu-10*sigma,
mu+10*sigma,
Ncoarse)
vals2 = np.linspace(mu-3*sigma,
mu+3*sigma,
Nfine)
fig = make_plot(model, idx, vals1, vals2)
fig.savefig(os.path.join(test_path,
"sat{}_{}.png".format(idx,param_name)))
plt.close('all')
idx += 1
elif param_name == "logmass":
vals1 = np.linspace(param._prior.a,
param._prior.b,
Ncoarse)
vals2 = np.linspace(0.9,1.1,Nfine)*truths
fig = make_plot(model, idx, vals1, vals2)
fig.savefig(os.path.join(test_path, "sat{}_{}.png".format(idx,param_name)))
plt.close('all')
idx += 1
elif param_name == "logmdot":
vals1 = np.linspace(param._prior.a,
param._prior.b,
Ncoarse)
vals2 = np.linspace(0.9,1.1,Nfine)*truths
fig = make_plot(model, idx, vals1, vals2)
fig.savefig(os.path.join(test_path, "sat{}_{}.png".format(idx,param_name)))
plt.close('all')
idx += 1
elif param_name == "alpha":
vals1 = np.linspace(0.5, 3.5, Ncoarse)
vals2 = np.linspace(0.9, 1.1, Nfine)*truths
fig = make_plot(model, idx, vals1, vals2)
fig.savefig(os.path.join(test_path, "sat{}_{}.png".format(idx,param_name)))
plt.close('all')
idx += 1
def test_sample_priors(self):
test_path = os.path.join(output_path, "model", "priors")
if not os.path.exists(test_path):
os.mkdir(test_path)
p = self.model.sample_priors(size=100).T
#ptruths = self.model.start_truths(size=100).T
ptruths = self.model.sample_priors(size=100, start_truth=True).T
plt.figure(figsize=(5,5))
for ii,(vals,truevals) in enumerate(zip(p,ptruths)):
n,bins,pat = plt.hist(vals, bins=25, alpha=0.5)
plt.hist(truevals, bins=25, alpha=0.5)
plt.savefig(os.path.join(test_path, "{}.png".format(ii)))
plt.clf()
return
def test_per_particle(self):
_c = minimum_config.format(potential_params=pot_params,
particles_params="",
satellite_params=sat_params)
config = io.read_config(_c)
model = si.StreamModel.from_config(config)
model.sample_priors()
test_path = os.path.join(output_path, "model")
if not os.path.exists(test_path):
os.mkdir(test_path)
# likelihood args
t1, t2, dt = model.lnpargs
p_gc = model.true_particles.to_frame(galactocentric)._X
s_gc = model.true_satellite.to_frame(galactocentric)._X
logmass = model.satellite.logmass.truth
logmdot = model.satellite.logmdot.truth
#true_alpha = model.satellite.alpha.truth
true_alpha = 1.4
beta = model.particles.beta.truth
tub = model.particles.tub.truth
truth_dict = model._decompose_vector(model.truths)
group = truth_dict['potential']
for param_name,truths in group.items():
print(param_name)
param = model.parameters['potential'][param_name]
vals = | np.linspace(0.9,1.1,Nfine) | numpy.linspace |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 4 10:27:55 2021
@author: Raj
"""
import numpy as np
from .mechanical_drive import MechanicalDrive
from .utils.load import params_from_experiment as load_parm
from .utils.load import simulation_configuration as load_sim_config
from ffta.pixel_utils.load import configuration
from scipy.interpolate import InterpolatedUnivariateSpline as ius
from scipy.signal import medfilt
from matplotlib import pyplot as plt
import pandas as pd
def cal_curve(can_path, param_cfg, taus_range=[], plot=True, **kwargs):
'''
Generates a calibration curve for a given cantilever given some particular
parameters.
Ideally you would have a tip parameters file as well.
Usage:
------
>>> param_cfg = 'path'
>>> can_params = 'path'
>>> taus, tfp, spl = cal_curve(param_cfg, can_params)
>>> from matplotlib import pyplot as plt
>>> plt.plot(tfp, taus, 'bX-')
If you want to change the fit parameters per tau
taus, tfp, spl = cal_curve(param_cfg, can_params, roi=0.001, n_taps=199)
:param can_path:
:type can_path: str
:param params_cfg: Path to parameters.cfg file (from FFtrEFM experiment, in the data folder)
:type params_cfg: string
:param taus_range: taus_range to set a range for the simulations, taken as [low, high]
:type taus_range: ndarray (2-index array), optional
:param plot: Plots the last taus vs tfps for verification
:type plot: bool, optional
:param kwargs:
:type kwargs:
:returns: tuple (taus, tfps, spl)
WHERE
ndarray taus is the single exponential taus that were simulated
ndarray tfps is the measured time to first peaks
UnivariateSpline spl is spline object of the calibration curve. To scale an image, type spl(x)
'''
if isinstance(can_path, str):
can_params, force_params, sim_params, _, parms = load_parm(can_path, param_cfg)
elif isinstance(can_path, tuple):
can_params, force_params, sim_params = load_sim_config(can_path)
_, parms = configuration(param_cfg)
can_params['drive_freq'] = parms['drive_freq']
can_params['res_freq'] = parms['drive_freq']
sim_params['trigger'] = parms['trigger']
sim_params['total_time'] = parms['total_time']
sim_params['sampling_rate'] = parms['sampling_rate']
_rlo = -7
_rhi = -3
if len(taus_range) != 2 or (taus_range[1] <= taus_range[0]):
raise ValueError('Range must be ascending and 2-items')
else:
_rlo = np.log10(taus_range[0])
_rhi = np.log10(taus_range[1])
# _rlo = np.floor(np.log10(taus_range[0]))
# _rhi = np.ceil(np.log10(taus_range[1]))
taus = | np.logspace(_rlo, _rhi, 50) | numpy.logspace |
# SPDX-License-Identifier: Apache-2.0
"""Rewrites operator einsum into simple ONNX operators.
"""
import math
from itertools import permutations
import numpy as np
from onnx import helper, numpy_helper, TensorProto, AttributeProto
from .. import utils
from ..constants import OPSET_TO_IR_VERSION, PREFERRED_OPSET
from .optimizer_base import GraphOptimizerBase
class OnnxMicroRuntime:
"""
Implements a micro runtime for ONNX graphs.
It does not implements all the operator types.
This runtime is used to infer shape. `shape_inference`
from `onnx` does not return all shapes when the onnx graph
includes an operator *Reshape*.
:param model_onnx: ONNX model
"""
def __init__(self, model_onnx):
if not hasattr(model_onnx, 'graph'):
raise TypeError(
"model_onnx is not an ONNX graph but %r." % type(model_onnx))
self.model_onnx = model_onnx
def run(self, inputs):
"""
Computes the outputs of the graph.
:param inputs: dictionary
:return: all intermediates results and output as a dictionary
"""
def _get_dtype(onnx_type):
if onnx_type == 1:
return np.float32
if onnx_type == 7:
return np.int64
raise ValueError("Unable to guess dtype from ONNX type %r." % onnx_type)
def _extract_numpy_array(v):
return np.frombuffer(v.raw_data, dtype=_get_dtype(v.data_type))
if not isinstance(inputs, dict):
raise TypeError(
"inputs must be a dictionary not %r." % type(inputs))
results = inputs.copy()
for init in self.model_onnx.graph.initializer:
name = init.name
mat = _extract_numpy_array(init)
results[name] = mat
for node in self.model_onnx.graph.node:
op_type = node.op_type
inp = [results[n] for n in node.input]
meth_name = "_op_%s" % op_type.lower()
if not hasattr(self, meth_name):
raise NotImplementedError(
"OnnxMicroRuntime does not implement operator %r." % op_type)
kwargs = {}
for at in node.attribute:
kwargs[at.name] = at
out = getattr(self, meth_name)(*inp, **kwargs)
for n, o in zip(node.output, out):
results[n] = o
return results
def _op_add(self, x, y):
"Runtime for operator."
return (x + y,)
def _op_concat(self, *args, axis=None):
"Runtime for operator."
if axis is not None:
axis = axis.i
def _preprocess(a, axis):
if axis >= len(a.shape):
new_shape = a.shape + (1,) * (axis + 1 - len(a.shape))
return a.reshape(new_shape)
return a
targs = tuple(_preprocess(a, axis) for a in args)
return (np.concatenate(targs, axis),)
def _op_gemm(self, a, b, c=None, alpha=None, beta=None, # pylint: disable=C0103
transA=None, transB=None): # pylint: disable=C0103
"Runtime for operator."
if alpha is not None:
alpha = alpha.f
if beta is not None:
beta = beta.f
if transA is None:
transA = False
else:
transA = transA.i
if transB is None:
transB = False
else:
transB = transB.i
def _gemm00(a, b, c, alpha, beta):
o = np.dot(a, b) * alpha
if beta != 0:
o += c * beta
return o
def _gemm01(a, b, c, alpha, beta):
o = np.dot(a, b.T) * alpha
if beta != 0:
o += c * beta
return o
def _gemm10(a, b, c, alpha, beta):
o = np.dot(a.T, b) * alpha
if beta != 0:
o += c * beta
return o
def _gemm11(a, b, c, alpha, beta):
o = np.dot(a.T, b.T) * alpha
if beta != 0:
o += c * beta
return o
if transA:
fct = _gemm11 if transB else _gemm10
else:
fct = _gemm01 if transB else _gemm00
return (fct(a, b, c, alpha=alpha, beta=beta),)
def _op_gather(self, x, indices, axis=None):
"Runtime for operator."
if not x.flags['C_CONTIGUOUS']:
x = np.ascontiguousarray(x)
if not indices.flags['C_CONTIGUOUS']:
indices = indices.ascontiguousarray()
if axis is not None:
axis = axis.i
return (np.take(x, indices, axis=axis),)
def _op_identity(self, x):
"Runtime for operator."
return (x,)
def _op_matmul(self, x, y):
"Runtime for operator."
return (np.matmul(x, y),)
def _op_max(self, *x):
"Runtime for operator."
return (np.maximum(*x),) #pylint: disable=E1120
def _op_mul(self, x, y):
"Runtime for operator."
return (x * y,)
def _op_reduceprod(self, data, axes=None, keepdims=None):
"Runtime for operator :epkg:`Op:ReduceProd`."
if keepdims is not None:
keepdims = keepdims.i
if axes is not None and not isinstance(axes, int):
if isinstance(axes, np.ndarray) and len(axes.shape) == 0:
axes = int(axes)
else:
axes = tuple(axes) if len(axes) > 0 else None
return (np.prod(data, axis=axes, keepdims=keepdims, dtype=data.dtype),)
def _op_reducesum(self, data, axes, keepdims=None, noop_with_empty_axes=None):
"Runtime for operator."
if keepdims is not None:
keepdims = keepdims.i
if noop_with_empty_axes is not None:
noop_with_empty_axes = noop_with_empty_axes.i
if axes is None and self.noop_with_empty_axes is not None:
return (data,)
if axes is not None and not isinstance(axes, int):
if isinstance(axes, np.ndarray) and len(axes.shape) == 0:
axes = int(axes)
else:
axes = tuple(axes) if len(axes) > 0 else None
return (np.sum(data, axis=axes, keepdims=keepdims, dtype=data.dtype),)
def _op_reshape(self, x, shape):
"Runtime for operator."
return (x.reshape(shape),)
def _op_shape(self, x):
"Runtime for operator."
return (np.array(list(x.shape), dtype=np.int64),)
def _op_squeeze(self, x, axes=None):
"Runtime for operator."
if axes is None:
return (x,)
if isinstance(axes, AttributeProto):
axes = list(axes.ints)
if hasattr(axes, '__iter__'):
return (np.squeeze(x, axis=tuple(axes)),)
return (np.squeeze(x, axis=axes),)
def _op_transpose(self, x, perm=None):
"Runtime for operator."
if perm is not None:
perm = tuple(perm.ints)
return (np.transpose(x, perm),)
def _op_unsqueeze(self, x, axes=None):
"Runtime for operator."
if axes is None:
return (x,)
if isinstance(axes, AttributeProto):
axes = list(axes.ints)
if hasattr(axes, '__iter__'):
return (np.expand_dims(x, axis=tuple(axes)),)
return (np.expand_dims(x, axis=axes),)
def single_axes(axes):
"""
*axes* contains positive values, then it is the position
of this axis in the original matrix, otherwise it is -1
meaning this axis is an added single dimension to align
all the dimensions based on the einsum equation.
:param axes: axes described above
:return: list of integer in set `{1, 2}`, 1 for
a single axis, 2 otherwise
"""
if axes is None:
return axes
return [(1 if a == -1 else 2) for a in axes]
class EinsumSubOp:
"""
Defines a sub operation used in Einsum decomposition.
:param name: name (reshape, transpose, reduce_sum, matmul, id,
squeeze, diagonal, mul, batch_dot)
:param inputs: inputs
:param kwargs: arguments
Operator suffixed by `_mm` (*transpose_mm*, *reduce_sum_mm*)
are equivalent to the same operator without the suffix
but takes two inputs and only changes the first one.
Attributes `_info` summarizes the known information
about dimensions. Many of them are empty because inserted.
Value `1` means it was the case, `2` means it is a plain dimension.
"""
_allowed = {'expand_dims', 'transpose', 'reduce_sum', 'matmul', 'id',
'squeeze', 'diagonal', 'mul', 'batch_dot',
'transpose_mm', 'reduce_sum_mm'}
def __init__(self, full_dim, name, *inputs, **kwargs):
self.full_dim = full_dim
self.name = name
self.inputs = inputs
self.kwargs = kwargs
self._info = {}
if name not in EinsumSubOp._allowed:
raise ValueError(
"Unexpected name %r. It should be in %r."
"" % (name, EinsumSubOp._allowed))
if len(inputs) not in (1, 2):
raise RuntimeError(
"Inputs must contains 1 or 2 inputs not %d." % len(inputs))
if name == 'matmul' and len(inputs) != 2:
raise RuntimeError(
"Inputs must contains 2 inputs not %d for operator 'matmul'."
"" % len(inputs))
for i, inp in enumerate(inputs):
if not isinstance(inp, (int, EinsumSubOp)):
raise TypeError(
"Input %d has type %r, int or EinsumSubOp is expected."
"" % (i, type(inp)))
self._check_()
def _check_(self):
"Checks for wrong values."
if self.name == 'transpose':
self._check_arg_('perm', tuple)
perm = self.kwargs['perm']
if len(perm) != len(set(perm)):
raise RuntimeError(
"perm has duplicated values %r (name=%r)."
"" % (perm, self.name))
if list(perm) == list(range(len(perm))):
raise ValueError(
"Transpose = identity perm=%r. It must be removed."
"" % perm)
elif self.name == 'matmul':
self._check_arg_('axes', tuple)
self._check_arg_('left', tuple)
self._check_arg_('right', tuple)
axes = self.kwargs['axes']
left = self.kwargs['left']
right = self.kwargs['right']
for a in axes:
if a in left and a in right:
raise RuntimeError(
"One axis belongs to every set (axes, left, right). "
"axes=%r, left=%r, right=%r." % (axes, left, right))
def __repr__(self):
inps = ", ".join(map(str, self.inputs))
kw = ", ".join("%s=%r" % (k, w) for k, w in self.kwargs.items())
m = "%s(%r, %s, %s)" % (
self.__class__.__name__, self.name, inps, kw)
return m
def _check_arg_(self, name, typ, empty=False):
if name not in self.kwargs:
raise RuntimeError(
"Parameter %r not found for operator %r." % (name, self.name))
if empty and self.kwargs[name] is None:
return
if not isinstance(self.kwargs[name], typ):
raise TypeError(
"Unexpected type %r for parameter %r and parameter %r."
"" % (type(self.kwargs[name]), name, self.name))
def _check_row_(self, row, inp=False):
"""
Checks input or output is valid.
"""
def _compute_output_row_id(self, row, row2=None, ab=False):
"compute shape after operator id"
if ab:
raise RuntimeError("ab option not allowed.")
self._check_row_(row, True)
row[:] = row2[:]
self._check_row_(row)
def _compute_output_row_transpose(self, row, row2=None, ab=False):
"compute shape after operator transpose"
if ab:
self._compute_output_row_transpose(row2)
return
self._check_row_(row, True)
self._check_arg_('perm', tuple)
if len(self.kwargs['perm']) != len(row):
raise RuntimeError(
"Unexpected permutation %r (row=%r)."
"" % (self.kwargs['perm'], row))
perm = self.kwargs['perm']
cpy = row.copy()
for i, p in enumerate(perm):
row[i] = cpy[p]
self._check_row_(row)
def _compute_output_row_transpose_mm(self, row, row2=None, ab=False):
"compute shape after operator transpose"
if not ab:
raise RuntimeError("ab must be True.")
self._check_row_(row, True)
if row2 is None:
raise RuntimeError("transpose_mm expects a second input.")
self._compute_output_row_transpose(row, row2=None)
def _compute_output_row_expand_dims(self, row, row2=None, ab=False):
"compute shape after operator expand_dims"
if ab:
raise RuntimeError("ab option not allowed.")
self._check_row_(row, True)
self._check_arg_('axes', tuple)
axes = self.kwargs['axes']
for axis in axes:
if not isinstance(axis, tuple):
raise TypeError(
"Parameter axes of expand_dims should be a tuple of "
"tuple, axes=%r." % axes)
if row[axis[1]] != -1:
raise RuntimeError(
"Dimension should be -1 in row %r axis=%r." % (
row, self.kwargs['axis']))
self._check_row_(row)
def _compute_output_row_reduce_sum(self, row, row2=None, ab=False):
"compute shape after operator reduce_sum"
if ab:
raise RuntimeError("ab option not allowed.")
self._check_row_(row, True)
self._check_arg_('axes', tuple)
for a in self.kwargs['axes']:
row[a] = -1
self._check_row_(row)
def _compute_output_row_reduce_sum_mm(self, row, row2=None, ab=False):
"compute shape after operator reduce_sum"
if not ab:
raise RuntimeError("ab must be true.")
self._check_row_(row2, True)
if row2 is None:
raise RuntimeError("reduce_sum_mm expects a second input.")
self._compute_output_row_reduce_sum(row, row2=None)
def _compute_output_row_squeeze(self, row, row2=None, ab=False):
"compute shape after operator squeeze"
if ab:
raise RuntimeError("ab option not allowed.")
self._check_row_(row, True)
self._check_arg_('axes', tuple)
for a in self.kwargs['axes']:
row[a] = -1
self._check_row_(row)
def _compute_output_row_diagonal(self, row, row2=None, ab=False):
"compute shape after operator diagonal"
if ab:
raise RuntimeError("ab option not allowed.")
self._check_row_(row, True)
self._check_arg_('diag', list)
to_remove = []
for choice, choices in self.kwargs['diag']:
for ch in choices:
if ch != choice:
to_remove.append(ch)
for i in range(len(row)): # pylint: disable=C0200
if row[i] in choices:
if row[i] != choice:
row[i] = choice
to_remove.sort()
for r in to_remove:
for i in range(len(row)): # pylint: disable=C0200
if row[i] == r:
raise RuntimeError(
"Unexpected result r=%r row=%r to_remove=%r "
"diag=%r." % (
r, row, to_remove, self.kwargs['diag']))
if row[i] > r:
row[i] -= 1
self._check_row_(row)
def _compute_output_row_matmul(self, row, row2=None, ab=False):
"compute shape after operator matmul"
if not ab:
raise RuntimeError("ab must be True.")
self._check_row_(row, True)
self._check_row_(row2, True)
self._check_arg_('axes', tuple)
self._check_arg_('left', tuple)
self._check_arg_('right', tuple)
self._check_arg_('ndim', int)
if row2 is None:
raise RuntimeError("matmul expects two inputs.")
row2[:] = np.maximum(row, row2)
for a in self.kwargs['axes']:
if a not in self.kwargs['right']:
row2[a] = -1
self._check_row_(row2)
def _compute_output_row_batch_dot(self, row, row2=None, ab=False):
"compute shape after operator batch_dot"
if not ab:
raise RuntimeError("ab must be True.")
self._check_row_(row, True)
self._check_row_(row2, True)
self._check_arg_('batch_axes', tuple)
self._check_arg_('keep_axes', tuple, empty=True)
self._check_arg_('sum_axes', tuple)
self._check_arg_('left', tuple)
self._check_arg_('right', tuple)
self._check_arg_('ndim', int)
if row2 is None:
raise RuntimeError("batch_dot expects two inputs.")
row2[:] = np.maximum(row, row2)
for a in self.kwargs['sum_axes']:
if a not in self.kwargs['right']:
row2[a] = -1
self._check_row_(row2)
def _compute_output_row_mul(self, row, row2=None, ab=False):
"compute shape after operator mul"
if not ab:
raise RuntimeError("ab must be True.")
self._check_row_(row, True)
self._check_row_(row2, True)
if row2 is None:
raise RuntimeError("mul expects two inputs.")
row2[:] = np.maximum(row, row2)
self._check_row_(row2)
def compute_output_row(self, row, row2=None, ab=False):
"""
Updates *row* based on the operator.
"""
method_name = "_compute_output_row_%s" % self.name
meth = getattr(self, method_name, None)
if meth is None:
raise NotImplementedError(
"compute_output_row not implemented for %r." % self.name)
self.add_info(i_row=single_axes(row), i_row2=single_axes(row2))
meth(row, row2=row2, ab=ab)
self.add_info(o_row=single_axes(row), o_row2=single_axes(row2))
def add_info(self, **kwargs):
"""
Adds information to the node.
:param kwargs: dictionary
"""
for k, v in kwargs.items():
if k in self._info:
raise KeyError(
"Key %r already added (operator %r)." % (k, self.name))
self._info[k] = v
def _check_inputs_(self, n_expected, check_dim=False):
if len(self.inputs) != n_expected:
raise RuntimeError(
"Number of inputs must be %d not %d for operator %r."
"" % (n_expected, len(self.inputs), self.name))
def _check_shape_(self, m):
if len(m.shape) != self.full_dim:
raise RuntimeError(
"Number of dimensions %r is different from expected value "
"%d." % (m.shape, self.full_dim))
def _get_data(self, data, key, as_str=False):
"Returns data[key] or raises an exception if not here."
if isinstance(key, int):
if key not in data:
raise RuntimeError(
"Unable to find key %d in %r." % (
key, list(sorted(data))))
value = data[key]
elif isinstance(key, EinsumSubOp):
if id(key) not in data:
raise RuntimeError(
"Unable to find key %d in %r." % (
id(key), list(sorted(data))))
value = data[id(key)]
else:
raise TypeError(
"Unexpected input type %r." % type(key))
if as_str:
if isinstance(value, str):
return value
if hasattr(value, 'output') and len(value.output) == 1:
return value.output[0]
if hasattr(value, list) and len(value) == 1:
return value[0]
raise RuntimeError(
"Unable to guess what to return in that case %r - %r"
"." % (type(value), value))
return value
def _onnx_name(self):
return 'einsum%d_%s' % (id(self), self.name[:2])
def _check_onnx_opset_(self, opset, limit):
if opset is not None and opset < limit:
raise RuntimeError(
"Opset (%r) must be >= %r for operator %r."
"" % (opset, limit, self.name))
def _to_onnx_id(self, names, opset): # pylint: disable=W0613
self._check_inputs_(1)
inp = self.inputs[0]
name = self._get_data(names, inp)
yield helper.make_node('Identity', [name], [self._onnx_name()])
def _to_onnx_expand_dims(self, names, opset):
"insert node unsqueeze"
self._check_inputs_(1)
self._check_onnx_opset_(opset, 11)
inp = self.inputs[0]
name = self._get_data(names, inp)
axes = self.kwargs['axes']
name_axes = name + '_axes'
if opset >= 13:
yield numpy_helper.from_array(
np.array([a[1] for a in axes], dtype=np.int64), name=name_axes)
yield helper.make_node(
'Unsqueeze', [name, name_axes], [self._onnx_name()])
else:
yield helper.make_node(
'Unsqueeze', [name], [self._onnx_name()], axes=[a[1] for a in axes])
def _to_onnx_squeeze(self, names, opset):
"insert node squeeze"
self._check_inputs_(1)
self._check_onnx_opset_(opset, 11)
inp = self.inputs[0]
name = self._get_data(names, inp)
axes = self.kwargs['axes']
name_axes = name + '_axes'
if opset >= 13:
yield numpy_helper.from_array(
| np.array(axes, dtype=np.int64) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
## softmax: 0.1 600
## perceptron: 0.05 550
def readData(csvname):
data = np.loadtxt(csvname, delimiter=',')
x = data[:-1, :]
y = data[-1:, :]
return x, y
def softmaxCostFunc(x, y, w):
cost = np.sum(np.log(1 + np.exp(-y*np.transpose(np.dot(np.transpose(x), w)))))
return cost / float(np.size(y))
def gradientDescentOneStepForSoftmax(x, y, w, alpha=0.1):
total = np.zeros([9,1])
for i in range(np.size(y)):
power = np.exp(-y[:,i] * np.dot(x[:,i], w))
term = power / (1 + power)
total += term * y[:,i] * x[:,[i]]
w = w + alpha * (1/np.size(y)) * total
return w
def perceptronCostFunc(x, y, w):
cost = 0
a = (-y*np.transpose(np.dot(np.transpose(x), w)))[0]
for i in range(len(a)):
cost += a[i] if (a[i] > 0) else 0
return cost / float(np.size(y))
def gradientDescentOneStepForPerceptron(x, y, w, alpha=0.05):
total = np.zeros([9,1])
for i in range(np.size(y)):
term = -y[:,i] * np.dot(x[:,[i]].T, w)
total += 0 if term <= 0 else -y[:,i] * x[:,[i]]
w = w - alpha * (1/ | np.size(y) | numpy.size |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of pelicun.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You should have received a copy of the BSD 3-Clause License along with
# pelicun. If not, see <http://www.opensource.org/licenses/>.
#
# Contributors:
# <NAME>
"""
This subpackage performs system tests on the control module of pelicun.
"""
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy.stats import truncnorm as tnorm
from copy import deepcopy
import os, sys, inspect
current_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0,os.path.dirname(parent_dir))
from pelicun.control import *
from pelicun.uq import mvn_orthotope_density as mvn_od
from pelicun.tests.test_pelicun import prob_allclose, prob_approx
# -----------------------------------------------------------------------------
# FEMA_P58_Assessment
# -----------------------------------------------------------------------------
def test_FEMA_P58_Assessment_central_tendencies():
"""
Perform a loss assessment with customized inputs that reduce the
dispersion of calculation parameters to negligible levels. This allows us
to test the results against pre-defined reference values in spite of the
randomness involved in the calculations.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())[0]
assert RV_EDP.theta[0] == pytest.approx(0.5 * g)
assert RV_EDP.theta[1] == pytest.approx(0.5 * g * 1e-6, abs=1e-7)
assert RV_EDP._distribution == 'lognormal'
# QNT
assert A._QNT_dict is None
#RV_QNT = A._RV_dict['QNT']
#assert RV_QNT is None
# FRG
RV_FRG = list(A._FF_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_FRG]).T
assert_allclose(thetas, np.array([0.444, 0.6, 0.984]) * g, rtol=0.01)
assert_allclose(betas, np.array([0.3, 0.4, 0.5]), rtol=0.01)
rho = RV_FRG[0].RV_set.Rho()
assert_allclose(rho, np.ones((3, 3)), rtol=0.01)
assert np.all([rv.distribution == 'lognormal' for rv in RV_FRG])
# RED
RV_RED = list(A._DV_RED_dict.values())
mus, sigmas = np.array([rv.theta for rv in RV_RED]).T
assert_allclose(mus, np.ones(2), rtol=0.01)
assert_allclose(sigmas, np.array([1e-4, 1e-4]), rtol=0.01)
rho = RV_RED[0].RV_set.Rho()
assert_allclose(rho, np.array([[1, 0], [0, 1]]), rtol=0.01)
assert np.all([rv.distribution == 'normal' for rv in RV_RED])
assert_allclose (RV_RED[0].truncation_limits, [0., 2.], rtol=0.01)
assert_allclose (RV_RED[1].truncation_limits, [0., 4.], rtol=0.01)
# INJ
RV_INJ = list(A._DV_INJ_dict.values())
mus, sigmas = np.array([rv.theta for rv in RV_INJ]).T
assert_allclose(mus, np.ones(4), rtol=0.01)
assert_allclose(sigmas, np.ones(4) * 1e-4, rtol=0.01)
rho = RV_INJ[0].RV_set.Rho()
rho_target = np.zeros((4, 4))
np.fill_diagonal(rho_target, 1.)
assert_allclose(rho, rho_target, rtol=0.01)
assert np.all([rv.distribution == 'normal' for rv in RV_INJ])
assert_allclose(RV_INJ[0].truncation_limits, [0., 10./3.], rtol=0.01)
assert_allclose(RV_INJ[1].truncation_limits, [0., 10./3.], rtol=0.01)
assert_allclose(RV_INJ[2].truncation_limits, [0., 10.], rtol=0.01)
assert_allclose(RV_INJ[3].truncation_limits, [0., 10.], rtol=0.01)
# REP
RV_REP = list(A._DV_REP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_REP]).T
assert_allclose(thetas, np.ones(6), rtol=0.01)
assert_allclose(betas, np.ones(6) * 1e-4, rtol=0.01)
rho = RV_REP[0].RV_set.Rho()
rho_target = np.zeros((6, 6))
np.fill_diagonal(rho_target, 1.)
assert_allclose(rho, rho_target, rtol=0.01)
assert np.all([rv.distribution == 'lognormal' for rv in RV_REP])
# ------------------------------------------------------------------------
A.define_loss_model()
# QNT (deterministic)
QNT = A._FG_dict['T0001.001']._performance_groups[0]._quantity
assert QNT == pytest.approx(50., rel=0.01)
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# TIME
T_check = A._TIME.describe().T.loc[['hour','month','weekday?'],:]
assert_allclose(T_check['mean'], np.array([11.5, 5.5, 5. / 7.]), rtol=0.05)
assert_allclose(T_check['min'], np.array([0., 0., 0.]), rtol=0.01)
assert_allclose(T_check['max'], np.array([23., 11., 1.]), rtol=0.01)
assert_allclose(T_check['50%'], np.array([12., 5., 1.]), atol=1.0)
assert_allclose(T_check['count'], np.array([10000., 10000., 10000.]),
rtol=0.01)
# POP
P_CDF = A._POP.describe(np.arange(1, 27) / 27.).iloc[:, 0].values[4:]
vals, counts = np.unique(P_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]), rtol=0.01)
assert_allclose(counts, np.array([14, 2, 7, 5]), atol=1)
# COL
COL_check = A._COL.describe().T
assert COL_check['mean'].values[0] == pytest.approx(0.5, rel=0.05)
assert len(A._ID_dict['non-collapse']) == pytest.approx(5000, rel=0.05)
assert len(A._ID_dict['collapse']) == pytest.approx(5000, rel=0.05)
# DMG
DMG_check = A._DMG.describe().T
assert_allclose(DMG_check['mean'], np.array([17.074, 17.074, 7.9361]),
rtol=0.1, atol=1.0)
assert_allclose(DMG_check['min'], np.zeros(3), rtol=0.01)
assert_allclose(DMG_check['max'], np.ones(3) * 50.0157, rtol=0.05)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# RED
DV_RED = A._DV_dict['red_tag'].describe().T
assert_allclose(DV_RED['mean'], np.array([0.341344, 0.1586555]), rtol=0.1)
# INJ - collapse
DV_INJ_C = deepcopy(A._COL[['INJ-0', 'INJ-1']])
DV_INJ_C.dropna(inplace=True)
NC_count = DV_INJ_C.describe().T['count'][0]
assert_allclose(NC_count, np.ones(2) * 5000, rtol=0.05)
# lvl 1
vals, counts = np.unique(DV_INJ_C.iloc[:, 0].values, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]) * 0.1, rtol=0.01)
assert_allclose(counts / NC_count, np.array([14, 2, 7, 5]) / 28., atol=0.01, rtol=0.1)
# lvl 2
vals, counts = np.unique(DV_INJ_C.iloc[:, 1].values, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]) * 0.9, rtol=0.01)
assert_allclose(counts / NC_count, np.array([14, 2, 7, 5]) / 28., atol=0.01, rtol=0.1)
# INJ - non-collapse
DV_INJ_NC = deepcopy(A._DV_dict['injuries'])
DV_INJ_NC[0].dropna(inplace=True)
assert_allclose(DV_INJ_NC[0].describe().T['count'], np.ones(2) * 5000,
rtol=0.05)
# lvl 1 DS2
I_CDF = DV_INJ_NC[0].iloc[:, 0]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.075, 0.15, 0.3]), rtol=0.01)
target_prob = np.array(
[0.6586555, 0., 0., 0.] + 0.3413445 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# lvl 1 DS3
I_CDF = DV_INJ_NC[0].iloc[:, 1]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.075, 0.15, 0.3]), rtol=0.01)
target_prob = np.array(
[0.8413445, 0., 0., 0.] + 0.1586555 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# lvl 2 DS2
I_CDF = DV_INJ_NC[1].iloc[:, 0]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.025, 0.05, 0.1]), rtol=0.01)
target_prob = np.array(
[0.6586555, 0., 0., 0.] + 0.3413445 * | np.array([14, 2, 7, 5]) | numpy.array |
# python3
# -*- coding: utf-8 -*-
# @Author : lina
# @Time : 2018/12/5 21:32
"""
功能:使用train_seq2seq_word_based.py保存的模型和参数生成歌词。
"""
from keras.models import load_model, Model
from keras.layers import Input
import numpy as np
import json
import warnings
warnings.filterwarnings("ignore")
LATENT_DIM = 1000 # encoder结果语义向量的维度,需要和train文件中的配置一样
def load_param(model_file, word2index_input_file, index2word_input_file, word2index_target_file, index2word_target_file):
"""
加载模型和参数
:param model_file: 模型和参数路径
:param word2index_input_file: 输入的word2index
:param index2word_input_file: 输入的index2word
:param word2index_target_file: 输出的word2index
:param index2word_target_file: 输出的index2word
:return:
model:训练之后的模型和参数
word2index_input, index2word_input_new: 输入的index2word和word2index
word2index_target, index2word_target_new: 输出的index2word和word2index。
"""
# get model.
model = load_model(model_file)
# get the word2index and index2word data.
with open(word2index_input_file, 'r', encoding='utf8') as f:
json_obj = f.read()
word2index_input = json.loads(json_obj)
f.close()
with open(index2word_input_file, 'r', encoding='utf8') as f:
json_obj = f.read()
index2word_input = json.loads(json_obj)
f.close()
index2word_input_new = {}
for key, value in index2word_input.items():
index2word_input_new[int(key)] = value
with open(word2index_target_file, 'r', encoding='utf8') as f:
json_obj = f.read()
word2index_target = json.loads(json_obj)
f.close()
with open(index2word_target_file, 'r', encoding='utf8') as f:
json_obj = f.read()
index2word_target = json.loads(json_obj)
f.close()
index2word_target_new = {}
for key, value in index2word_target.items():
index2word_target_new[int(key)] = value
print("word2index is::", index2word_input_new)
print("word2index is::", word2index_input)
print("index2word is::", index2word_target_new)
return model, word2index_input, index2word_input_new, word2index_target, index2word_target_new
def get_model(model):
"""
通过已经加载的模型,获取模型中的encoder和decoder
:param model: 模型和参数路径
:return: encoder_model, decoder_model: 编码模型和解码编码,编码模型用于对上句编码,解码模型用于生成下句
"""
encoder_inputs = model.get_layer(name="encoder_inputs").input
encoder_outputs, state_h_enc, state_c_enc = model.get_layer(name="encoder_outputs").output
encoder_states = [state_h_enc, state_c_enc]
encoder_model = Model(encoder_inputs, encoder_states)
decoder_inputs = model.get_layer(name="decoder_inputs").input
decoder_state_input_h = Input(shape=(LATENT_DIM,), name='input_3')
decoder_state_input_c = Input(shape=(LATENT_DIM,), name='input_4')
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h_dec, state_c_dec = model.get_layer(name="decoder_LSTM")(decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h_dec, state_c_dec]
decoder_outputs = model.get_layer(name="Dense_1")(decoder_outputs)
decoder_model = Model([decoder_inputs] + decoder_states_inputs,
[decoder_outputs] +decoder_states)
return encoder_model, decoder_model
def sample(preds, diversity = 1.0):
"""
得到最大概率的phrase对应的下标
:param preds:
:param diversity:
:return:
"""
preds = | np.asarray(preds) | numpy.asarray |
# encoding: utf-8
"""
Following the notation in [1]_, the Kalman filter framework consists of
a *dynamic model* (state transition model)
.. math::
x_k = A x_{k-1} + q_{k-1}, \\qquad q_{k-1} \\sim N(0, Q)
and a *measurement model* (observation model)
.. math::
y_k = H x_k + r_k, \\qquad r_k \\sim N(0, R),
where the vector :math:`x` is the (hidden) state of the system and
:math:`y` is an observation. `A` and `H` are matrices of suitable shape
and :math:`Q`, :math:`R` are positive-definite noise covariance matrices.
.. [1] <NAME> (2013).
Bayesian Filtering and Smoothing. Cambridge University Press.
https://users.aalto.fi/~ssarkka/pub/cup_book_online_20131111.pdf
Usage example
---------------
.. testsetup ::
import numpy.random
numpy.random.seed(0)
Define model
.. testcode ::
import simdkalman
import numpy as np
kf = simdkalman.KalmanFilter(
state_transition = [[1,1],[0,1]], # matrix A
process_noise = np.diag([0.1, 0.01]), # Q
observation_model = np.array([[1,0]]), # H
observation_noise = 1.0) # R
Generate some fake data
.. testcode ::
import numpy.random as random
# 100 independent time series
data = random.normal(size=(100, 200))
# with 10% of NaNs denoting missing values
data[random.uniform(size=data.shape) < 0.1] = np.nan
Smooth all data
.. testcode ::
smoothed = kf.smooth(data,
initial_value = [1,0],
initial_covariance = np.eye(2) * 0.5)
# second timeseries, third time step, hidden state x
print('mean')
print(smoothed.states.mean[1,2,:])
print('covariance')
print(smoothed.states.cov[1,2,:,:])
.. testoutput ::
mean
[ 0.29311384 -0.06948961]
covariance
[[ 0.19959416 -0.00777587]
[-0.00777587 0.02528967]]
Predict new data for a single series (1d case)
.. testcode ::
predicted = kf.predict(data[1,:], 123)
# predicted observation y, third new time step
pred_mean = predicted.observations.mean[2]
pred_stdev = np.sqrt(predicted.observations.cov[2])
print('%g +- %g' % (pred_mean, pred_stdev))
.. testoutput ::
1.71543 +- 1.65322
"""
import numpy as np
# pylint: disable=W0401,W0614
from simdkalman.primitives import *
class Gaussian:
def __init__(self, mean, cov):
self.mean = mean
if cov is not None:
self.cov = cov
@staticmethod
def empty(n_states, n_vars, n_measurements, cov=True):
mean = np.empty((n_vars, n_measurements, n_states))
if cov:
cov = np.empty((n_vars, n_measurements, n_states, n_states))
else:
cov = None
return Gaussian(mean, cov)
def unvectorize_state(self):
n_states = self.mean.shape[-1]
assert(n_states == 1)
mean = self.mean
cov = self.cov
mean = mean[...,0]
if cov is not None:
cov = cov[...,0,0]
return Gaussian(mean, cov)
def unvectorize_vars(self):
n_vars = self.mean.shape[0]
assert(n_vars == 1)
mean = self.mean
cov = self.cov
mean = mean[0,...]
if cov is not None:
cov = cov[0,...]
return Gaussian(mean, cov)
def __str__(self):
s = "mean:\n %s" % str(self.mean).replace("\n", "\n ")
if self.cov is not None:
s += "\ncov:\n %s" % str(self.cov).replace("\n", "\n ")
return s
class KalmanFilter(object):
"""
The main Kalman filter class providing convenient interfaces to
vectorized smoothing and filtering operations on multiple independent
time series.
As long as the shapes of the given parameters match reasonably according
to the rules of matrix multiplication, this class is flexible in their
exact nature accepting
* scalars: ``process_noise = 0.1``
* (2d) numpy matrices: ``process_noise = numpy.eye(2)``
* 2d arrays: ``observation_model = [[1,2]]``
* 3d arrays and matrices for vectorized computations. Unlike the other
options, this locks the shape of the inputs that can be processed
by the smoothing and prediction methods.
:param state_transition:
State transition matrix :math:`A`
:param process_noise:
Process noise (state transition covariance) matrix :math:`Q`
:param observation_model:
Observation model (measurement model) matrix :math:`H`
:param observation_noise:
Observation noise (measurement noise covariance) matrix :math:`R`
"""
# pylint: disable=W0232
class Result:
def __str__(self):
s = ""
for k,v in self.__dict__.items():
if len(s) > 0:
s += "\n"
s += "%s:\n" % k
s += " " + str(v).replace("\n", "\n ")
return s
def __init__(self,
state_transition,
process_noise,
observation_model,
observation_noise):
state_transition = ensure_matrix(state_transition)
n_states = state_transition.shape[0]
process_noise = ensure_matrix(process_noise, n_states)
observation_model = ensure_matrix(observation_model)
n_obs = observation_model.shape[-2]
observation_noise = ensure_matrix(observation_noise, n_obs)
assert(state_transition.shape[-2:] == (n_states, n_states))
assert(process_noise.shape[-2:] == (n_states, n_states))
assert(observation_model.shape[-2:] == (n_obs, n_states))
assert(observation_noise.shape[-2:] == (n_obs, n_obs))
self.state_transition = state_transition
self.process_noise = process_noise
self.observation_model = observation_model
self.observation_noise = observation_noise
def predict_next(self, m, P):
"""
Single prediction step
:param m: :math:`{\\mathbb E}[x_{j-1}]`, the previous mean
:param P: :math:`{\\rm Cov}[x_{j-1}]`, the previous covariance
:rtype: ``(prior_mean, prior_cov)`` predicted mean and covariance
:math:`{\\mathbb E}[x_j]`, :math:`{\\rm Cov}[x_j]`
"""
return predict(m, P, self.state_transition, self.process_noise)
def update(self, m, P, y, log_likelihood=False):
"""
Single update step with NaN check.
:param m: :math:`{\\mathbb E}[x_j|y_1,\\ldots,y_{j-1}]`,
the prior mean of :math:`x_j`
:param P: :math:`{\\rm Cov}[x_j|y_1,\\ldots,y_{j-1}]`,
the prior covariance of :math:`x_j`
:param y: observation :math:`y_j`
:param log_likelihood: compute log-likelihood?
:type states: boolean
:rtype: ``(posterior_mean, posterior_covariance, log_likelihood)``
posterior mean :math:`{\\mathbb E}[x_j|y_1,\\ldots,y_j]`
& covariance :math:`{\\rm Cov}[x_j|y_1,\\ldots,y_j]`
and, if requested, log-likelihood. If :math:`y_j` is NaN, returns
the prior mean and covariance instead
"""
return priv_update_with_nan_check(m, P,
self.observation_model, self.observation_noise, y,
log_likelihood=log_likelihood)
def predict_observation(self, m, P):
"""
Probability distribution of observation :math:`y` for a given
distribution of :math:`x`
:param m: :math:`{\\mathbb E}[x]`
:param P: :math:`{\\rm Cov}[x]`
:rtype: mean :math:`{\\mathbb E}[y]` and
covariance :math:`{\\rm Cov}[y]`
"""
return predict_observation(m, P,
self.observation_model, self.observation_noise)
def smooth_current(self, m, P, ms, Ps):
"""
Simgle Kalman smoother backwards step
:param m: :math:`{\\mathbb E}[x_j|y_1,\\ldots,y_j]`,
the filtered mean of :math:`x_j`
:param P: :math:`{\\rm Cov}[x_j|y_1,\\ldots,y_j]`,
the filtered covariance of :math:`x_j`
:param ms:
:math:`{\\mathbb E}[x_{j+1}|y_1,\\ldots,y_T]`
:param Ps:
:math:`{\\rm Cov}[x_{j+1}|y_1,\\ldots,y_T]`
:rtype: ``(smooth_mean, smooth_covariance, smoothing_gain)``
smoothed mean :math:`{\\mathbb E}[x_j|y_1,\\ldots,y_T]`,
and covariance :math:`{\\rm Cov}[x_j|y_1,\\ldots,y_T]`
& smoothing gain :math:`C`
"""
return priv_smooth(m, P,
self.state_transition, self.process_noise, ms, Ps)
def predict(self,
data,
n_test,
initial_value = None,
initial_covariance = None,
states = True,
observations = True,
covariances = True,
verbose = False):
"""
Filter past data and predict a given number of future values.
The data can be given as either of
* 1d array, like ``[1,2,3,4]``. In this case, one Kalman filter is
used and the return value structure will contain an 1d array of
``observations`` (both ``.mean`` and ``.cov`` will be 1d).
* 2d matrix, whose each row is interpreted as an independent time
series, all of which are filtered independently. The returned
``observations`` members will be 2-dimensional in this case.
* 3d matrix, whose the last dimension can be used for multi-dimensional
observations, i.e, ``data[1,2,:]`` defines the components of the
third observation of the second series. In the-multi-dimensional
case the returned ``observations.mean`` will be 3-dimensional and
``observations.cov`` 4-dimensional.
Initial values and covariances can be given as scalars or 2d matrices
in which case the same initial states will be used for all rows or
3d arrays for different initial values.
:param data: Past data
:param n_test: number of future steps to predict.
:type n_test: integer
:param initial_value: Initial value :math:`{\\mathbb E}[x_0]`
:param initial_covariance: Initial uncertainty :math:`{\\rm Cov}[x_0]`
:param states: predict states :math:`x`?
:type states: boolean
:param observations: predict observations :math:`y`?
:type observations: boolean
:param covariances: include covariances in predictions?
:type covariances: boolean
:rtype: Result object with fields
``states`` and ``observations``, if the respective parameter flags
are set to True. Both are ``Gaussian`` result objects with fields
``mean`` and ``cov`` (if the *covariances* flag is True)
"""
return self.compute(
data,
n_test,
initial_value,
initial_covariance,
smoothed = False,
states = states,
covariances = covariances,
observations = observations,
verbose = verbose).predicted
def smooth(self,
data,
initial_value = None,
initial_covariance = None,
observations = True,
states = True,
covariances = True,
verbose = False):
"""
Smooth given data, which can be either
* 1d array, like ``[1,2,3,4]``. In this case, one Kalman filter is
used and the return value structure will contain an 1d array of
``observations`` (both ``.mean`` and ``.cov`` will be 1d).
* 2d matrix, whose each row is interpreted as an independent time
series, all of which are smoothed independently. The returned
``observations`` members will be 2-dimensional in this case.
* 3d matrix, whose the last dimension can be used for multi-dimensional
observations, i.e, ``data[1,2,:]`` defines the components of the
third observation of the second series. In the-multi-dimensional
case the returned ``observations.mean`` will be 3-dimensional and
``observations.cov`` 4-dimensional.
Initial values and covariances can be given as scalars or 2d matrices
in which case the same initial states will be used for all rows or
3d arrays for different initial values.
:param data: 1d or 2d data, see above
:param initial_value: Initial value :math:`{\\mathbb E}[x_0]`
:param initial_covariance: Initial uncertainty :math:`{\\rm Cov}[x_0]`
:param states: return smoothed states :math:`x`?
:type states: boolean
:param observations: return smoothed observations :math:`y`?
:type observations: boolean
:param covariances: include covariances results?
:type covariances: boolean
:rtype: Result object with fields
``states`` and ``observations``, if the respective parameter flags
are set to True. Both are ``Gaussian`` result objects with fields
``mean`` and ``cov`` (if the *covariances* flag is True)
"""
return self.compute(
data,
0,
initial_value,
initial_covariance,
smoothed = True,
states = states,
covariances = covariances,
observations = observations,
verbose = verbose).smoothed
def compute(self,
data,
n_test,
initial_value = None,
initial_covariance = None,
smoothed = True,
filtered = False,
states = True,
covariances = True,
observations = True,
likelihoods = False,
gains = False,
log_likelihood = False,
verbose = False):
"""
Smoothing, filtering and prediction at the same time. Used internally
by other methods, but can also be used directly if, e.g., both smoothed
and predicted data is wanted.
See **smooth** and **predict** for explanation of the common parameters.
With this method, there also exist the following flags.
:param smoothed: compute Kalman smoother (used by **smooth**)
:type smoothed: boolean
:param filtered: return (one-way) filtered data
:type filtered: boolean
:param likelihoods: return likelihoods of each step
:type likelihoods: boolean
:param gains: return Kalman gains and pairwise covariances (used by
the EM algorithm)
:type gains: boolean
:param log_likelihood: return the log-likelihood(s) for the entire
series. If matrix data is given, this will be a vector where each
element is the log-likelihood of a single row.
:type log_likelihood: boolean
:rtype: result object whose fields depend on of the above parameter
flags are True. The possible values are:
``smoothed`` (the return value of **smooth**),
``filtered`` (like *smoothed*),
``predicted`` (the return value of **predict** if ``n_test > 0``)
``gains``, ``pairwise_covariances``, ``likelihoods`` and
``log_likelihood``.
"""
# pylint: disable=W0201
result = KalmanFilter.Result()
data = ensure_matrix(data)
single_sequence = len(data.shape) == 1
if single_sequence:
data = data[np.newaxis,:]
n_vars = data.shape[0]
n_measurements = data.shape[1]
n_states = self.state_transition.shape[0]
n_obs = self.observation_model.shape[-2]
def empty_gaussian(
n_states=n_states,
n_measurements=n_measurements,
cov=covariances):
return Gaussian.empty(n_states, n_vars, n_measurements, cov)
def auto_flat_observations(obs_gaussian):
r = obs_gaussian
if n_obs == 1:
r = r.unvectorize_state()
if single_sequence:
r = r.unvectorize_vars()
return r
def auto_flat_states(obs_gaussian):
if single_sequence:
return obs_gaussian.unvectorize_vars()
return obs_gaussian
if initial_value is None:
initial_value = np.zeros((n_states, 1))
initial_value = ensure_matrix(initial_value)
if len(initial_value.shape) == 1:
initial_value = initial_value.reshape((n_states, 1))
if initial_covariance is None:
initial_covariance = ensure_matrix(
np.trace(ensure_matrix(self.observation_model))*(5**2), n_states)
initial_covariance = ensure_matrix(initial_covariance, n_states)
initial_value = ensure_matrix(initial_value)
assert(initial_value.shape[-2:] == (n_states, 1))
assert(initial_covariance.shape[-2:] == (n_states, n_states))
if len(initial_value.shape) == 2:
initial_value = np.vstack([initial_value[np.newaxis,...]]*n_vars)
if len(initial_covariance.shape) == 2:
initial_covariance = np.vstack([initial_covariance[np.newaxis,...]]*n_vars)
m = initial_value
P = initial_covariance
keep_filtered = filtered or smoothed
if filtered or gains:
result.filtered = KalmanFilter.Result()
if log_likelihood:
result.log_likelihood = np.zeros((n_vars,))
if likelihoods:
result.log_likelihoods = np.empty((n_vars, n_measurements))
if keep_filtered:
if observations:
filtered_observations = empty_gaussian(n_states=n_obs)
filtered_states = empty_gaussian(cov=True)
if gains:
result.filtered.gains = np.empty((n_vars, n_measurements, n_states, n_states))
for j in range(n_measurements):
if verbose:
print('filtering %d/%d' % (j+1, n_measurements))
y = data[:,j,...].reshape((n_vars, n_obs, 1))
tup = self.update(m, P, y, log_likelihood)
m, P, K = tup[:3]
if log_likelihood:
l = tup[-1]
result.log_likelihood += l
if likelihoods:
result.log_likelihoods[:,j] = l
if keep_filtered:
if observations:
obs_mean, obs_cov = self.predict_observation(m, P)
filtered_observations.mean[:,j,:] = obs_mean[...,0]
if covariances:
filtered_observations.cov[:,j,:,:] = obs_cov
filtered_states.mean[:,j,:] = m[...,0]
filtered_states.cov[:,j,:,:] = P
if gains:
result.filtered.gains[:,j,:,:] = K
m, P = self.predict_next(m, P)
if smoothed:
result.smoothed = KalmanFilter.Result()
if states:
result.smoothed.states = empty_gaussian()
# lazy trick to keep last filtered = last smoothed
result.smoothed.states.mean = 1*filtered_states.mean
if covariances:
result.smoothed.states.cov = 1*filtered_states.cov
if observations:
result.smoothed.observations = empty_gaussian(n_states=n_obs)
result.smoothed.observations.mean = 1*filtered_observations.mean
if covariances:
result.smoothed.observations.cov = 1*filtered_observations.cov
if gains:
result.smoothed.gains = np.zeros((n_vars, n_measurements, n_states, n_states))
result.pairwise_covariances = np.zeros((n_vars, n_measurements, n_states, n_states))
ms = filtered_states.mean[:,-1,:][...,np.newaxis]
Ps = filtered_states.cov[:,-1,:,:]
for j in range(n_measurements)[-2::-1]:
if verbose:
print('smoothing %d/%d' % (j+1, n_measurements))
m0 = filtered_states.mean[:,j,:][...,np.newaxis]
P0 = filtered_states.cov[:,j,:,:]
PsNext = Ps
ms, Ps, Cs = self.smooth_current(m0, P0, ms, Ps)
if states:
result.smoothed.states.mean[:,j,:] = ms[...,0]
if covariances:
result.smoothed.states.cov[:,j,:,:] = Ps
if observations:
obs_mean, obs_cov = self.predict_observation(ms, Ps)
result.smoothed.observations.mean[:,j,:] = obs_mean[...,0]
if covariances:
result.smoothed.observations.cov[:,j,:,:] = obs_cov
if gains:
result.smoothed.gains[:,j,:,:] = Cs
result.pairwise_covariances[:,j,:,:] = ddot_t_right(PsNext, Cs)
if filtered:
if states:
result.filtered.states = Gaussian(filtered_states.mean, None)
if covariances:
result.filtered.states.cov = filtered_states.cov
result.filtered.states = auto_flat_states(result.filtered.states)
if observations:
result.filtered.observations = auto_flat_observations(
filtered_observations)
if smoothed:
if observations:
result.smoothed.observations = auto_flat_observations(
result.smoothed.observations)
if states:
result.smoothed.states = auto_flat_states(
result.smoothed.states)
if n_test > 0:
result.predicted = KalmanFilter.Result()
if observations:
result.predicted.observations = empty_gaussian(
n_measurements=n_test,
n_states=n_obs)
if states:
result.predicted.states = empty_gaussian(n_measurements=n_test)
for j in range(n_test):
if verbose:
print('predicting %d/%d' % (j+1, n_test))
if states:
result.predicted.states.mean[:,j,:] = m[...,0]
if covariances:
result.predicted.states.cov[:,j,:,:] = P
if observations:
obs_mean, obs_cov = self.predict_observation(m, P)
result.predicted.observations.mean[:,j,:] = obs_mean[...,0]
if covariances:
result.predicted.observations.cov[:,j,:,:] = obs_cov
m, P = self.predict_next(m, P)
if observations:
result.predicted.observations = auto_flat_observations(
result.predicted.observations)
if states:
result.predicted.states = auto_flat_states(result.predicted.states)
return result
def em_process_noise(self, result, verbose=False):
n_vars, n_measurements, n_states = result.smoothed.states.mean.shape
res = | np.zeros((n_vars, n_states, n_states)) | numpy.zeros |
import numpy as np
import pytest
from numpy import ndarray
from numpy.testing import assert_array_equal
from pytest import approx
from meshkernel import (
DeleteMeshOption,
GeometryList,
InputError,
Mesh2d,
MeshKernel,
MeshKernelError,
MeshRefinementParameters,
RefinementType,
)
cases_is_geometric_constructor = [(True), (False)]
@pytest.mark.parametrize("is_geometric", cases_is_geometric_constructor)
def test_constructor(is_geometric: bool):
"""Test if the constructor works"""
MeshKernel(is_geometric)
def test_different_instances_have_different_ids():
"""Test if the meshkernelid of two instances differs"""
mk_1 = MeshKernel()
mk_2 = MeshKernel()
assert mk_1._meshkernelid != mk_2._meshkernelid
def test_mesh2d_set_and_mesh2d_get():
"""Test to set a simple mesh and then get it again with new parameters
3---2
| |
0---1
"""
mk = MeshKernel()
edge_nodes = np.array([0, 1, 1, 2, 2, 3, 3, 0], dtype=np.int32)
node_x = np.array([0.0, 1.0, 1.0, 0.0], dtype=np.double)
node_y = np.array([0.0, 0.0, 1.0, 1.0], dtype=np.double)
input_mesh2d = Mesh2d(node_x, node_y, edge_nodes)
mk.mesh2d_set(input_mesh2d)
output_mesh2d = mk.mesh2d_get()
# Test if the input and output differs
assert_array_equal(output_mesh2d.edge_nodes, input_mesh2d.edge_nodes)
assert_array_equal(output_mesh2d.node_x, input_mesh2d.node_x)
assert_array_equal(output_mesh2d.node_y, input_mesh2d.node_y)
# Test if faces are correctly calculated
assert_array_equal(output_mesh2d.face_nodes, np.array([0, 1, 2, 3]))
assert_array_equal(output_mesh2d.nodes_per_face, np.array([4]))
assert_array_equal(output_mesh2d.face_x, np.array([0.5]))
assert_array_equal(output_mesh2d.face_y, np.array([0.5]))
# Test if edges are correctly calculated
assert_array_equal(output_mesh2d.edge_x, np.array([0.5, 1.0, 0.5, 0.0]))
assert_array_equal(output_mesh2d.edge_y, np.array([0.0, 0.5, 1.0, 0.5]))
def test_mesh2d_insert_edge(meshkernel_with_mesh2d: MeshKernel):
"""Test `mesh2d_insert_edge` by inserting one edge within a 1x1 Mesh2d.
2---3
| |
0---1
"""
mk = meshkernel_with_mesh2d(1, 1)
edge_index = mk.mesh2d_insert_edge(0, 3)
mesh2d = mk.mesh2d_get()
assert edge_index == 4
assert mesh2d.node_x.size == 4
assert mesh2d.edge_x.size == 5
assert mesh2d.face_x.size == 2
def test_mesh2d_insert_node(meshkernel_with_mesh2d: MeshKernel):
"""Test `mesh2d_insert_node` with a 1x1 Mesh2d.
2---3
| |
0---1
"""
mk = meshkernel_with_mesh2d(1, 1)
node_index = mk.mesh2d_insert_node(1.5, 0.5)
edge_index = mk.mesh2d_insert_edge(3, node_index)
mesh2d = mk.mesh2d_get()
assert node_index == 4
assert mesh2d.node_x.size == 5
assert edge_index == 4
assert mesh2d.edge_x.size == 5
cases_mesh2d_delete_node = [
(0, 0.0, 0.0),
(1, 1.0, 0.0),
(2, 2.0, 0.0),
(3, 0.0, 1.0),
(4, 1.0, 1.0),
(5, 2.0, 1.0),
(6, 0.0, 2.0),
(7, 1.0, 2.0),
(8, 2.0, 2.0),
]
@pytest.mark.parametrize("node_index, deleted_x, deleted_y", cases_mesh2d_delete_node)
def test_mesh2d_delete_node(
meshkernel_with_mesh2d: MeshKernel,
node_index: int,
deleted_x: float,
deleted_y: float,
):
"""Test `mesh2d_delete_node` by deleting a node from a 2x2 Mesh2d.
6---7---8
| | |
3---4---5
| | |
0---1---2
"""
mk = meshkernel_with_mesh2d(2, 2)
mk.mesh2d_delete_node(node_index)
mesh2d = mk.mesh2d_get()
assert mesh2d.node_x.size == 8
for x, y in zip(mesh2d.node_x, mesh2d.node_y):
assert x != deleted_x or y != deleted_y
def test_mesh2d_delete_node_invalid_node_index(meshkernel_with_mesh2d: MeshKernel):
"""Test `mesh2d_delete_node` by passing a negative `node_index`."""
mk = meshkernel_with_mesh2d(1, 1)
with pytest.raises(InputError):
mk.mesh2d_delete_node(-1)
cases_mesh2d_move_node = [
(0, 0.0, 0.0),
(1, 1.0, 0.0),
(2, 2.0, 0.0),
(3, 0.0, 1.0),
(4, 1.0, 1.0),
(5, 2.0, 1.0),
(6, 0.0, 2.0),
(7, 1.0, 2.0),
(8, 2.0, 2.0),
]
@pytest.mark.parametrize("node_index, moved_x, moved_y", cases_mesh2d_move_node)
def test_mesh2d_move_node(
meshkernel_with_mesh2d: MeshKernel, node_index: int, moved_x: float, moved_y: float
):
"""Test to move a node in a simple Mesh2d to new location.
6---7---8
| | |
3---4---5
| | |
0---1---2
"""
mk = meshkernel_with_mesh2d(2, 2)
mk.mesh2d_move_node(5.0, 7.0, node_index)
mesh2d = mk.mesh2d_get()
assert mesh2d.node_x[node_index] == 5.0
assert mesh2d.node_y[node_index] == 7.0
for x, y in zip(mesh2d.node_x, mesh2d.node_y):
assert x != moved_x or y != moved_y
def test_mesh2d_move_node_invalid_node_index(meshkernel_with_mesh2d: MeshKernel):
"""Test `mesh2d_move_node` by passing a negative `node_index`."""
mk = meshkernel_with_mesh2d(1, 1)
with pytest.raises(InputError):
mk.mesh2d_move_node(5.0, 7.0, -1)
cases_mesh2d_delete_edge = [
(0.5, 0.0),
(1.5, 0.0),
(0.0, 0.5),
(1.0, 0.5),
(2.0, 0.5),
(0.5, 1.0),
(1.5, 1.0),
(0.0, 1.5),
(1.0, 1.5),
(2.0, 1.5),
(0.5, 2.0),
(1.5, 2.0),
]
@pytest.mark.parametrize("delete_x, delete_y", cases_mesh2d_delete_edge)
def test_mesh2d_delete_edge(
meshkernel_with_mesh2d: MeshKernel, delete_x: float, delete_y: float
):
"""Test `mesh2d_delete_edge` by deleting an edge from a 2x2 Mesh2d.
6---7---8
| | |
3---4---5
| | |
0---1---2
"""
mk = meshkernel_with_mesh2d(2, 2)
mk.mesh2d_delete_edge(delete_x, delete_y)
mesh2d = mk.mesh2d_get()
assert mesh2d.node_x.size == 9
assert mesh2d.edge_x.size == 11
assert mesh2d.face_x.size == 3
for x, y in zip(mesh2d.edge_x, mesh2d.edge_y):
assert x != delete_x or y != delete_y
cases_mesh2d_get_edge = [
(0.5, 0.0, 2),
(1.0, 0.5, 1),
(0.5, 1.0, 3),
(0.0, 0.5, 0),
]
@pytest.mark.parametrize("x, y, exp_index", cases_mesh2d_get_edge)
def test_mesh2d_get_edge(
meshkernel_with_mesh2d: MeshKernel, x: float, y: float, exp_index: int
):
"""Test `mesh2d_get_edge` on a 2x2 Mesh2d.
(3)
2---3
(0)| |(1)
0---1
(2)
"""
mk = meshkernel_with_mesh2d(1, 1)
edge_index = mk.mesh2d_get_edge(x, y)
assert edge_index == exp_index
cases_mesh2d_get_node_index = [
(0.0, 0.0, 0),
(0.4, 0.0, 0),
(0.0, 0.4, 0),
(1.0, 0.0, 1),
(0.6, 0.0, 1),
(1.0, 0.4, 1),
(0.0, 1.0, 2),
(0.4, 1.0, 2),
(0.0, 0.6, 2),
(1.0, 1.0, 3),
(0.6, 1.0, 3),
(1.0, 0.6, 3),
]
@pytest.mark.parametrize("x, y, exp_index", cases_mesh2d_get_node_index)
def test_mesh2d_get_node_index(
meshkernel_with_mesh2d: MeshKernel, x: float, y: float, exp_index: int
):
"""Test `mesh2d_get_node_index` on a 1x1 Mesh2d.
2---3
| |
0---1
"""
mk = meshkernel_with_mesh2d(1, 1)
edge_index = mk.mesh2d_get_node_index(x, y, 0.5)
assert edge_index == exp_index
def test_mesh2d_get_node_index_no_node_in_search_radius(
meshkernel_with_mesh2d: MeshKernel,
):
"""Test `get_node_index` when there is no node within the search radius."""
mk = meshkernel_with_mesh2d(1, 1)
with pytest.raises(MeshKernelError):
mk.mesh2d_get_node_index(0.5, 0.5, 0.4)
cases_mesh2d_delete_small_polygon = [
(True, DeleteMeshOption.ALL_NODES, 4, 4, 1),
(True, DeleteMeshOption.ALL_FACE_CIRCUMCENTERS, 16, 24, 9),
(True, DeleteMeshOption.ALL_COMPLETE_FACES, 4, 4, 1),
(False, DeleteMeshOption.ALL_NODES, 32, 48, 16),
(False, DeleteMeshOption.ALL_FACE_CIRCUMCENTERS, 32, 48, 16),
(False, DeleteMeshOption.ALL_COMPLETE_FACES, 36, 60, 25),
]
@pytest.mark.parametrize(
"invert_deletion, delete_option, exp_nodes, exp_edges, exp_faces",
cases_mesh2d_delete_small_polygon,
)
def test_mesh2d_delete_small_polygon(
meshkernel_with_mesh2d: MeshKernel,
invert_deletion: bool,
delete_option: DeleteMeshOption,
exp_nodes: int,
exp_edges: int,
exp_faces: int,
):
"""Test `mesh2d_delete` by deleting a polygon from a 5x5 mesh2d.
30--31--32--33--34--35
| | | | | |
24--25--26--27--28--29
| | * | | * | |
18--19--20--21--22--23
| | | | | |
12--13--14--15--16--17
| | * | | * | |
6---7---8---9---10--11
| | | | | |
0---1---2---3---4---5
"""
mk = meshkernel_with_mesh2d(5, 5)
# Polygon around nodes 14, 15, 21 & 20 (through the face circum centers)
x_coordinates = np.array([1.5, 3.5, 3.5, 1.5, 1.5], dtype=np.double)
y_coordinates = np.array([1.5, 1.5, 3.5, 3.5, 1.5], dtype=np.double)
geometry_list = GeometryList(x_coordinates, y_coordinates)
mk.mesh2d_delete(geometry_list, delete_option, invert_deletion)
mesh2d = mk.mesh2d_get()
assert mesh2d.node_x.size == exp_nodes
assert mesh2d.edge_x.size == exp_edges
assert mesh2d.face_x.size == exp_faces
cases_mesh2d_delete_empty_polygon = [(False, 0, 0, 0), (True, 25, 40, 16)]
@pytest.mark.parametrize(
"invert_deletion, exp_nodes, exp_edges, exp_faces",
cases_mesh2d_delete_empty_polygon,
)
def test_mesh2d_delete_empty_polygon(
meshkernel_with_mesh2d: MeshKernel,
invert_deletion: bool,
exp_nodes: int,
exp_edges: int,
exp_faces: int,
):
"""Test `mesh2d_delete` by deleting a an empty polygon from a 4x4 mesh2d.
20--21--22--23--24
| | | | |
15--16--17--18--19
| | | | |
10--11--12--13--14
| | | | |
5---6---7---8---9
| | | | |
0---1---2---3---4
"""
mk = meshkernel_with_mesh2d(4, 4)
x_coordinates = np.empty(0, dtype=np.double)
y_coordinates = np.empty(0, dtype=np.double)
geometry_list = GeometryList(x_coordinates, y_coordinates)
delete_option = DeleteMeshOption.ALL_NODES
mk.mesh2d_delete(geometry_list, delete_option, invert_deletion)
mesh2d = mk.mesh2d_get()
assert mesh2d.node_x.size == exp_nodes
assert mesh2d.edge_x.size == exp_edges
assert mesh2d.face_x.size == exp_faces
cases_mesh2d_get_hanging_edges = [
(
np.array([0.0, 1.0, 1.0, 0.0], dtype=np.double), # node_x
np.array([0.0, 0.0, 1.0, 1.0], dtype=np.double), # node_y
np.array([0, 1, 1, 3, 2, 3, 2, 0], dtype=np.int32), # edge_nodes
np.array([], dtype=np.int32), # expected
),
(
np.array([0.0, 1.0, 1.0, 0.0, 0.0], dtype=np.double), # node_x
np.array([0.0, 0.0, 1.0, 1.0, 2.0], dtype=np.double), # node_y
np.array([0, 1, 1, 3, 2, 3, 2, 0, 3, 4], dtype=np.int32), # edge_nodes
np.array([4], dtype=np.int32), # expected
),
(
np.array([0.0, 1.0, 1.0, 0.0, 0.0, 2.0], dtype=np.double), # node_x
np.array([0.0, 0.0, 1.0, 1.0, 2.0, 1.0], dtype=np.double), # node_y
np.array([0, 1, 1, 3, 2, 3, 2, 0, 3, 4, 2, 5], dtype=np.int32), # edge_nodes
np.array([4, 5], dtype=np.int32), # expected
),
]
@pytest.mark.parametrize(
"node_x, node_y, edge_nodes, expected", cases_mesh2d_get_hanging_edges
)
def test_mesh2d_get_hanging_edges(
node_x: np.ndarray, node_y: np.ndarray, edge_nodes: np.ndarray, expected: int
):
"""Tests `mesh2d_get_hanging_edges` by comparing the returned hanging edges with the expected ones
4*
|
3---2---5*
| |
0---1
"""
mk = MeshKernel()
mesh2d = Mesh2d(node_x, node_y, edge_nodes)
mk.mesh2d_set(mesh2d)
result = mk.mesh2d_get_hanging_edges()
assert_array_equal(result, expected)
def test_mesh2d_delete_hanging_edges():
"""Tests `mesh2d_delete_hanging_edges` by deleting 2 hanging edges in a simple Mesh2d
4*
|
3---2---5*
| |
0---1
"""
mk = MeshKernel()
node_x = np.array([0.0, 1.0, 1.0, 0.0, 0.0, 2.0], dtype=np.double)
node_y = np.array([0.0, 0.0, 1.0, 1.0, 2.0, 1.0], dtype=np.double)
edge_nodes = np.array([0, 1, 1, 2, 2, 3, 3, 0, 3, 4, 2, 5], dtype=np.int32)
mesh2d = Mesh2d(node_x, node_y, edge_nodes)
mk.mesh2d_set(mesh2d)
mk.mesh2d_delete_hanging_edges()
mesh2d = mk.mesh2d_get()
assert mesh2d.node_x.size == 4
assert mesh2d.edge_x.size == 4
assert mesh2d.face_x.size == 1
def test_mesh2d_make_mesh_from_polygon():
"""Tests `mesh2d_make_mesh_from_polygon` by creating a mesh2d from a simple hexagon."""
mk = MeshKernel()
# 5__4
# / \
# 0 3
# \1__2/
x_coordinates = np.array([0.0, 0.5, 1.5, 2.0, 1.5, 0.5, 0.0], dtype=np.double)
y_coordinates = np.array([1.0, 0.0, 0.0, 1.0, 2.0, 2.0, 1.0], dtype=np.double)
polygon = GeometryList(x_coordinates, y_coordinates)
mk.mesh2d_make_mesh_from_polygon(polygon)
mesh2d = mk.mesh2d_get()
assert mesh2d.node_x.size == 7
assert mesh2d.edge_x.size == 12
assert mesh2d.face_x.size == 6
def test_mesh2d_make_mesh_from_samples():
"""Tests `mesh2d_make_mesh_from_samples` by creating a mesh2d from six sample points."""
mk = MeshKernel()
# 5 4
# 0 3
# 1 2
x_coordinates = np.array([0.0, 0.5, 1.5, 2.0, 1.5, 0.5, 0.0], dtype=np.double)
y_coordinates = np.array([1.0, 0.0, 0.0, 1.0, 2.0, 2.0, 1.0], dtype=np.double)
polygon = GeometryList(x_coordinates, y_coordinates)
mk.mesh2d_make_mesh_from_samples(polygon)
mesh2d = mk.mesh2d_get()
assert mesh2d.node_x.size == 6
assert mesh2d.edge_x.size == 9
assert mesh2d.face_x.size == 4
cases_polygon_refine = [
(0, 0, 30.0, 9),
(0, 1, 30.0, 6),
(0, 2, 30.0, 7),
(0, 3, 30.0, 8),
(0, 4, 30.0, 9),
(0, 0, 20.0, 13),
(0, 1, 20.0, 7),
(0, 2, 20.0, 9),
(0, 3, 20.0, 11),
(0, 4, 20.0, 13),
]
@pytest.mark.parametrize("start, end, length, exp_nodes", cases_polygon_refine)
def test_polygon_refine(start: int, end: int, length: float, exp_nodes: int):
"""Tests `polygon_refine` by refining a simple polygon."""
mk = MeshKernel()
# 3---2
# | |
# 0---1
x_coordinates = np.array([0.0, 60.0, 60.0, 0.0, 0.0], dtype=np.double)
y_coordinates = np.array([0.0, 0.0, 60.0, 60.0, 0.0], dtype=np.double)
polygon = GeometryList(x_coordinates, y_coordinates)
geom = mk.polygon_refine(polygon, start, end, length)
assert geom.x_coordinates.size == exp_nodes
cases_mesh2d_refine_based_on_samples = [
(0.5, 0, 9, 12, 4),
(0.5, 1, 25, 40, 16),
# (0.5, 2, 81, 144, 64),
]
@pytest.mark.parametrize(
"min_face_size, sample_value, exp_nodes, exp_edges, exp_faces",
cases_mesh2d_refine_based_on_samples,
)
def test_mesh2d_refine_based_on_samples(
meshkernel_with_mesh2d: MeshKernel,
min_face_size: float,
sample_value: float,
exp_nodes: int,
exp_edges: int,
exp_faces: int,
):
"""Tests `mesh2d_refine_based_on_samples` with a simple 2x2 mesh.
6---7---8
| | |
3---4---5
| | |
0---1---2
"""
mk = meshkernel_with_mesh2d(2, 2)
x_coordinates = np.array([0.5, 0.5, 1.5, 1.5], dtype=np.double)
y_coordinates = np.array([0.5, 1.5, 1.5, 0.5], dtype=np.double)
values = np.array(
[sample_value, sample_value, sample_value, sample_value], dtype=np.double
)
samples = GeometryList(x_coordinates, y_coordinates, values)
refinement_params = MeshRefinementParameters(
False, False, min_face_size, RefinementType.REFINEMENT_LEVELS, False, False, 1
)
mk.mesh2d_refine_based_on_samples(samples, 1.0, 1, refinement_params)
mesdh2d = mk.mesh2d_get()
assert mesdh2d.node_x.size == exp_nodes
assert mesdh2d.edge_x.size == exp_edges
assert mesdh2d.face_x.size == exp_faces
cases_mesh2d_refine_based_on_polygon = [
(1, 25, 40, 16),
(2, 81, 144, 64),
(3, 289, 544, 256),
]
@pytest.mark.parametrize(
"max_iterations, exp_nodes, exp_edges, exp_faces",
cases_mesh2d_refine_based_on_polygon,
)
def test_mesh2d_refine_based_on_polygon(
meshkernel_with_mesh2d: MeshKernel,
max_iterations: int,
exp_nodes: int,
exp_edges: int,
exp_faces: int,
):
"""Tests `mesh2d_refine_based_on_polygon` with a simple 2x2 mesh.
6---7---8
| | |
3---4---5
| | |
0---1---2
"""
mk = meshkernel_with_mesh2d(2, 2)
x_coordinates = np.array([0.0, 0.0, 2.0, 2.0, 0.0], dtype=np.double)
y_coordinates = np.array([0.0, 2.0, 2.0, 0.0, 0.0], dtype=np.double)
polygon = GeometryList(x_coordinates, y_coordinates)
refinement_params = MeshRefinementParameters(
True, False, 0.5, 1, False, False, max_iterations
)
mk.mesh2d_refine_based_on_polygon(polygon, refinement_params)
mesdh2d = mk.mesh2d_get()
assert mesdh2d.node_x.size == exp_nodes
assert mesdh2d.edge_x.size == exp_edges
assert mesdh2d.face_x.size == exp_faces
def test_mesh2d_get_mesh_boundaries_as_polygons(meshkernel_with_mesh2d: MeshKernel):
"""Tests `mesh2d_get_mesh_boundaries_as_polygons` by checking if the resulted boundary is as expected"""
mk = meshkernel_with_mesh2d(2, 2)
mesh_boundary = mk.mesh2d_get_mesh_boundaries_as_polygons()
assert_array_equal(
mesh_boundary.x_coordinates,
np.array([0.0, 0.0, 0.0, 1.0, 2.0, 2.0, 2.0, 1.0, 0.0], dtype=np.double),
)
assert_array_equal(
mesh_boundary.y_coordinates,
np.array([0.0, 1.0, 2.0, 2.0, 2.0, 1.0, 0.0, 0.0, 0.0], dtype=np.double),
)
cases_mesh2d_merge_nodes = [(1e-2, 4), (1e-4, 5)]
@pytest.mark.parametrize("merging_distance, number_of_nodes", cases_mesh2d_merge_nodes)
def test_mesh2d_merge_nodes(merging_distance: float, number_of_nodes: int):
"""Test if `mesh2d_merge_nodes` reduces the number of close nodes
4---3
| |
01--2
"""
mk = MeshKernel()
# Set up mesh
edge_nodes = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 0], dtype=np.int32)
node_x = np.array([0.0, 1e-3, 1.0, 1.0, 0.0], dtype=np.double)
node_y = np.array([0.0, 0.0, 0.0, 1.0, 1.0], dtype=np.double)
input_mesh2d = Mesh2d(node_x, node_y, edge_nodes)
mk.mesh2d_set(input_mesh2d)
# Define polygon where we want to merge
x_coordinates = np.array([-1.0, 2.0, 2.0, -1.0, -1.0], dtype=np.double)
y_coordinates = np.array([-1.0, -1.0, 2.0, 2.0, -1.0], dtype=np.double)
geometry_list = GeometryList(x_coordinates, y_coordinates)
mk.mesh2d_merge_nodes(geometry_list, merging_distance)
output_mesh2d = mk.mesh2d_get()
assert output_mesh2d.node_x.size == number_of_nodes
cases_mesh2d_merge_two_nodes = [(0, 1, 4), (4, 5, 4), (0, 4, 3)]
@pytest.mark.parametrize(
"first_node, second_node, num_faces", cases_mesh2d_merge_two_nodes
)
def test_mesh2d_merge_two_nodes(
meshkernel_with_mesh2d: MeshKernel,
first_node: int,
second_node: int,
num_faces: int,
):
"""Tests `mesh2d_merge_two_nodes` by checking if two selected nodes are properly merged
6---7---8
| | |
3---4---5
| | |
0---1---2
"""
mk = meshkernel_with_mesh2d(2, 2)
mk.mesh2d_merge_two_nodes(first_node, second_node)
output_mesh2d = mk.mesh2d_get()
assert output_mesh2d.node_x.size == 8
assert output_mesh2d.face_x.size == num_faces
cases_polygon_get_included_points = [
(
# Select all
np.array([0.0, 3.0, 3.0, 0.0, 0.0]),
np.array([0.0, 0.0, 3.0, 3.0, 0.0]),
np.array([1.0, 1.0, 1.0, 1.0, 1.0]),
),
(
# Select right half
np.array([1.5, 3.0, 3.0, 1.5, 1.5]),
np.array([0.0, 0.0, 3.0, 3.0, 0.0]),
np.array([0.0, 1.0, 1.0, 0.0, 0.0]),
),
(
# Select bottom-right
np.array([1.5, 3.0, 3.0, 1.5, 1.5]),
np.array([0.0, 0.0, 1.5, 1.5, 0.0]),
np.array([0.0, 1.0, 0.0, 0.0, 0.0]),
),
(
# Select top half
np.array([0.0, 3.0, 3.0, 0.0, 0.0]),
np.array([1.5, 1.5, 3.0, 3.0, 1.5]),
np.array([0.0, 0.0, 1.0, 1.0, 0.0]),
),
(
# Select top-left
np.array([0.0, 1.5, 1.5, 0.0, 0.0]),
np.array([1.5, 1.5, 3.0, 3.0, 1.5]),
np.array([0.0, 0.0, 0.0, 1.0, 0.0]),
),
]
@pytest.mark.parametrize(
"selecting_x, selecting_y, exp_values",
cases_polygon_get_included_points,
)
def test_polygon_get_included_points(
selecting_x: np.array, selecting_y: np.array, exp_values: np.array
):
"""Tests `polygon_get_included_points` with a simple polygon and various selecting polygons."""
selecting_polygon = GeometryList(selecting_x, selecting_y)
x_coordinates = np.array([1.0, 2.0, 2.0, 1.0, 1.0], dtype=np.double)
y_coordinates = np.array([1.0, 1.0, 2.0, 2.0, 1.0], dtype=np.double)
selected_polygon = GeometryList(x_coordinates, y_coordinates)
mk = MeshKernel()
selection = mk.polygon_get_included_points(selecting_polygon, selected_polygon)
assert_array_equal(selection.values, exp_values)
@pytest.mark.parametrize("triangulate", [True, False])
def test_mesh2d_flip_edges(triangulate: bool):
"""Tests `mesh2d_flip_edges` with a simple triangular mesh (heptagon)."""
mk = MeshKernel()
node_x = np.array([0, -8, -10, -4, 4, 10, 8, 0], dtype=np.double)
node_y = np.array([10, 6, -2, -9, -9, -2, 6, -5], dtype=np.double)
edge_nodes = np.array(
[
0,
1,
1,
2,
2,
3,
3,
4,
4,
5,
5,
6,
6,
0,
0,
7,
1,
7,
2,
7,
3,
7,
4,
7,
5,
7,
6,
7,
],
dtype=np.int32,
)
mk.mesh2d_set(Mesh2d(node_x, node_y, edge_nodes))
polygon_x = np.array([-11, 11, 11, -11, -11], dtype=np.double)
polygon_y = np.array([-11, -11, 11, 11, -11], dtype=np.double)
polygon = GeometryList(polygon_x, polygon_y)
land_boundaries_x = np.array([-10, -4, 4, 10], dtype=np.double)
land_boundaries_y = np.array([-2, -9, -9, -2], dtype=np.double)
land_boundaries = GeometryList(land_boundaries_x, land_boundaries_y)
mk.mesh2d_flip_edges(triangulate, False, polygon, land_boundaries)
mesh2d = mk.mesh2d_get()
assert mesh2d.node_x.size == 8
assert mesh2d.edge_x.size == 14
assert mesh2d.face_x.size == 7
def test_mesh2d_flip_edges2_triangulate(meshkernel_with_mesh2d: MeshKernel):
"""Tests `mesh2d_flip_edges` with a simple 2x2 mesh.
6---7---8 6---7---8
| | | | / | / |
3---4---5 --> 3---4---5
| | | | / | / |
0---1---2 0---1---2
"""
mk = meshkernel_with_mesh2d(2, 2)
mk.mesh2d_flip_edges(
True,
True,
GeometryList(np.empty(0, dtype=np.double), np.empty(0, dtype=np.double)),
GeometryList(np.empty(0, dtype=np.double), np.empty(0, dtype=np.double)),
)
mesh2d = mk.mesh2d_get()
assert mesh2d.node_x.size == 9
assert mesh2d.edge_x.size == 16
assert mesh2d.face_x.size == 8
assert np.all(mesh2d.nodes_per_face == 3)
def test_mesh2d_count_obtuse_triangles():
r"""Tests `_mesh2d_count_obtuse_triangles` on a 3x3 mesh with two obtuse triangles.
6---7---8
| / \ |
3---4---5
| \ / |
0---1---2
"""
mk = MeshKernel()
# Mesh with obtuse triangles (4, 5, 7 and 1, 5, 4)
node_x = np.array([0.0, 1.0, 2.0, 0.0, 1.5, 2.0, 0.0, 1.0, 2.0], dtype=np.double)
node_y = np.array([0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0], dtype=np.double)
edge_nodes = np.array(
[
0,
1,
1,
2,
3,
4,
4,
5,
6,
7,
7,
8,
0,
3,
1,
4,
2,
5,
3,
6,
4,
7,
5,
8,
1,
3,
1,
5,
3,
7,
5,
7,
],
dtype=np.int32,
)
mk.mesh2d_set(Mesh2d(node_x, node_y, edge_nodes))
n_obtuse_triangles = mk._mesh2d_count_obtuse_triangles()
assert n_obtuse_triangles == 2
def test_mesh2d_get_obtuse_triangles_mass_centers():
r"""Tests `mesh2d_get_obtuse_triangles_mass_centers` on a 3x3 mesh with two obtuse triangles.
6---7---8
| / \ |
3---4---5
| \ / |
0---1---2
"""
mk = MeshKernel()
# Mesh with obtuse triangles (4, 5, 7 and 1, 5, 4)
node_x = np.array([0.0, 1.0, 2.0, 0.0, 1.5, 2.0, 0.0, 1.0, 2.0], dtype=np.double)
node_y = np.array([0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0], dtype=np.double)
edge_nodes = np.array(
[
0,
1,
1,
2,
3,
4,
4,
5,
6,
7,
7,
8,
0,
3,
1,
4,
2,
5,
3,
6,
4,
7,
5,
8,
1,
3,
1,
5,
3,
7,
5,
7,
],
dtype=np.int32,
)
mk.mesh2d_set(Mesh2d(node_x, node_y, edge_nodes))
obtuse_triangles = mk.mesh2d_get_obtuse_triangles_mass_centers()
assert obtuse_triangles.x_coordinates.size == 2
assert obtuse_triangles.x_coordinates[0] == 1.5
assert obtuse_triangles.y_coordinates[0] == approx(0.666, 0.01)
assert obtuse_triangles.x_coordinates[1] == 1.5
assert obtuse_triangles.y_coordinates[1] == approx(1.333, 0.01)
cases_mesh2d_count_small_flow_edge_centers = [(0.9, 0), (1.0, 0), (1.1, 4)]
@pytest.mark.parametrize(
"threshold, exp_int", cases_mesh2d_count_small_flow_edge_centers
)
def test_mesh2d_count_small_flow_edge_centers(threshold: float, exp_int: int):
"""Tests `_mesh2d_count_small_flow_edge_centers` with a simple 3x3 mesh with 4 small flow edges.
6---7---8
| 11|-12|
3-|-4-|-5
| 9-|-10|
0---1---2
"""
mk = MeshKernel()
node_x = np.array(
[0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 0.5, 1.5, 0.5, 1.5],
dtype=np.double,
)
node_y = np.array(
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 0.5, 0.5, 1.5, 1.5],
dtype=np.double,
)
edge_nodes = np.array(
[
0,
1,
1,
2,
3,
4,
4,
5,
6,
7,
7,
8,
0,
3,
1,
4,
2,
5,
3,
6,
4,
7,
5,
8,
9,
10,
11,
12,
9,
11,
10,
12,
],
dtype=np.int32,
)
mk.mesh2d_set(Mesh2d(node_x, node_y, edge_nodes))
n_small_flow_edges = mk._mesh2d_count_small_flow_edge_centers(threshold)
assert n_small_flow_edges == exp_int
def test_mesh2d_get_small_flow_edge_centers():
"""Tests `mesh2d_get_small_flow_edge_centers` with a simple 3x3 mesh with 4 small flow edges.
6---7---8
| 11|-12|
3-|-4-|-5
| 9-|-10|
0---1---2
"""
mk = MeshKernel()
node_x = np.array(
[0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 0.5, 1.5, 0.5, 1.5],
dtype=np.double,
)
node_y = np.array(
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 0.5, 0.5, 1.5, 1.5],
dtype=np.double,
)
edge_nodes = np.array(
[
0,
1,
1,
2,
3,
4,
4,
5,
6,
7,
7,
8,
0,
3,
1,
4,
2,
5,
3,
6,
4,
7,
5,
8,
9,
10,
11,
12,
9,
11,
10,
12,
],
dtype=np.int32,
)
mk.mesh2d_set(Mesh2d(node_x, node_y, edge_nodes))
small_flow_edge_centers = mk.mesh2d_get_small_flow_edge_centers(1.1)
assert small_flow_edge_centers.x_coordinates.size == 4
assert small_flow_edge_centers.x_coordinates[0] == 0.5
assert small_flow_edge_centers.y_coordinates[0] == 1.0
assert small_flow_edge_centers.x_coordinates[1] == 1.5
assert small_flow_edge_centers.y_coordinates[1] == 1.0
assert small_flow_edge_centers.x_coordinates[2] == 1.0
assert small_flow_edge_centers.y_coordinates[2] == 0.5
assert small_flow_edge_centers.x_coordinates[3] == 1.0
assert small_flow_edge_centers.y_coordinates[3] == 1.5
def test_mesh2d_delete_small_flow_edges_and_small_triangles_delete_small_flow_edges():
r"""Tests `mesh2d_get_small_flow_edge_centers` with a simple mesh with one small flow link.
3---4---5
| 6-|-7 |
0---1---2
"""
mk = MeshKernel()
node_x = np.array(
[0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 0.5, 1.5],
dtype=np.double,
)
node_y = np.array(
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.5, 0.5],
dtype=np.double,
)
edge_nodes = np.array(
[0, 1, 1, 2, 3, 4, 4, 5, 0, 3, 1, 4, 2, 5, 6, 7],
dtype=np.int32,
)
mk.mesh2d_set(Mesh2d(node_x, node_y, edge_nodes))
mk.mesh2d_delete_small_flow_edges_and_small_triangles(1.1, 0.01)
mesh2d = mk.mesh2d_get()
assert mesh2d.node_x.size == 8
assert mesh2d.edge_x.size == 7
assert mesh2d.face_x.size == 1
def test_mesh2d_delete_small_flow_edges_and_small_triangles_delete_small_triangles():
r"""Tests `mesh2d_get_small_flow_edge_centers` with a simple mesh with one small triangle.
3---4---5\
| | | 6
0---1---2/
"""
mk = MeshKernel()
node_x = np.array(
[0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 2.1],
dtype=np.double,
)
node_y = np.array(
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.5],
dtype=np.double,
)
edge_nodes = np.array(
[0, 1, 1, 2, 3, 4, 4, 5, 0, 3, 1, 4, 2, 5, 5, 6, 6, 2],
dtype=np.int32,
)
mk.mesh2d_set(Mesh2d(node_x, node_y, edge_nodes))
mk.mesh2d_delete_small_flow_edges_and_small_triangles(1.0, 0.01)
mesh2d = mk.mesh2d_get()
assert mesh2d.node_x.size == 7
assert mesh2d.edge_x.size == 8
assert mesh2d.face_x.size == 2
cases_nodes_in_polygons_mesh2d = [
(np.array([1.5, 2.5, 2.5, 1.5, 1.5]), np.array([1.5, 1.5, 2.5, 2.5, 1.5]), True, 1),
(
np.array([1.5, 2.5, 2.5, 1.5, 1.5]),
np.array([1.5, 1.5, 2.5, 2.5, 1.5]),
False,
8,
),
(
np.array([]),
| np.array([]) | numpy.array |
import os
import time
import datetime
import json
import pandas as pd
import numpy as np
from pathlib import Path
from tqdm import tqdm
from typing import List, Optional
class PrepareData:
"""
Limpiar y extraer las series de tiempo de una tabla CSV
+ Asegurar fechas continuas, completar con 0 las no registradas
+ Separar series de tiempo de acuerdo al identificador que se eliga (Ej: id_producto, id_producto + cadena)
+ Guardar todas las series de tiempo generadas con el nombre de su identificador en formato numpy. Además, guardar
un archivo json con la lista de timesteps y los nombres de las features de cada serie de tiempo
"""
def __init__(self, path_data: str, colname_datetime: str, colname_features: List[str], colname_id_time_series: str = None,):
"""
+ Los datos son cargados desde 'path_data'.
+ 'colname_datetime' corresponde a la columna que contiene las fechas.
+ Se crea una serie de tiempo por cada valor distinto en la columna 'colname_id_time_series'. Si esta es None,
se considera que los datos corresponden a una sola serie de tiempo.
"""
self.path_data = path_data
self.colname_datetime = colname_datetime
self.colname_features = colname_features
self.colname_id_time_series = colname_id_time_series
self.time_series = {} # Diccionario para guardar series de tiempo por su id
def __call__(self,):
"Cargar los datos y generar las series de tiempo"
self.load_data() # Cargar datos
self.get_id_time_series() # Obtener id de cada serie de tiempo
self.get_timesteps() # Obtener rango de fechas
self.get_minmax() # Obtener minimos y maximos por feature
self.get_mean_std() # Obtener promedio y desv std por feature
print("Generando series de tiempo")
time.sleep(1)
for id_time_serie in tqdm(self.id_time_series):
self.get_time_serie(id_time_serie)
def load_data(self,):
"Cargar datos"
ALLOWED_FILES = [".csv"] # En caso de agregar mas formas de cargar. Ej: xlsx, pickle.
# Extension del archivo proporcionado
extension = os.path.splitext(self.path_data)[-1]
# Verificar si es uno de los archivos que podemos cargar
assert extension in set(ALLOWED_FILES), "Archivo debe ser uno de estos {}. El suyo '{}'".format(ALLOWED_FILES, extension)
# Cargar el archivo
if self._file_exists(filename = self.path_data):
self.data = pd.read_csv(self.path_data)
print("Archivo cargado desde {}".format(self.path_data))
def get_id_time_series(self,):
"Definir el identificador de cada serie de tiempo a generar"
self.colname_id = "ID_ts"
self.data[self.colname_id] = self.data[self.colname_id_time_series].apply(lambda row:
"_".join([ str(c) + "-" + str(r)
for c,r in
zip(self.colname_id_time_series,row) ]), axis=1)
# Total de series de tiempo que se van a extraer
self.id_time_series = list(set(self.data[self.colname_id].tolist()))
total_id = len(self.id_time_series)
print("Se encontraron {} series de tiempo con id {}.".format(total_id, self.colname_id))
def get_time_serie(self, id_time_serie):
"""Obtener serie de tiempo para un id, en el rango total de fechas.
Guardar la serie de tiempo generada en el atributo .time_series
"""
# Extraer datos de la serie de tiempo solicitada
cols = [self.colname_datetime]
cols.extend(self.colname_features)
time_serie = self.data.query("`ID_ts` == '{}'".format(id_time_serie))[cols].copy()
time_serie_by_date = {d.get(self.colname_datetime): [d.get(feature) for feature in self.colname_features] for d in time_serie.to_dict("records")}
# Extraer las fechas
dates_time_serie = list(time_serie_by_date.keys())
# Construir la serie de tiempo en el rango total de fechas
rows = []
for date in self.timesteps:
str_date = self.date_to_str(date)
if str_date in dates_time_serie:
date_values = time_serie_by_date.get(str_date)
#info_date = time_serie_by_date.get(str_date)
#date_values = info_date#[info_date for feature in self.colname_features]
else:
date_values = [0 for _ in self.colname_features]
rows.append(date_values)
self.time_series[id_time_serie] = np.array(rows)
def get_timesteps(self,):
"Obtener rango de fechas"
# Obtener la columna con todas las fechas
dates = self.data[self.colname_datetime].tolist()
# Transformar a datetime
dates = [self.str_to_date(date) for date in dates]
# Calcular fecha minima y maxima
self.min_date = min(dates)
self.max_date = max(dates)
# Obtener el listado de timesteps
n_days = (self.max_date-self.min_date).days + 1 # todos los dias incluidos inicial y final
self.timesteps = [ self.add_days(self.min_date, days) for days in range(n_days)]
print(f"Datos desde {self.date_to_str(self.min_date)} hasta {self.date_to_str(self.max_date)}, ({n_days} dias) ")
def get_minmax(self,):
self.list_min = self.data[self.colname_features].min(axis=0).tolist()
self.list_max = self.data[self.colname_features].max(axis=0).tolist()
def get_mean_std(self,):
self.list_mean = self.data[self.colname_features].mean(axis=0).tolist()
self.list_std = self.data[self.colname_features].std(axis=0).tolist()
def save(self,):
"""Guardar series de tiempo generadas como numpy y un archivo de
configuracion con los timesteps, features y paths a los numpy"""
folder = Path("time_series")
folder.mkdir(exist_ok=True)
folder.joinpath("numpy").mkdir(exist_ok=True)
print("Guardando series de tiempo")
time.sleep(1)
for name_ts, ts_array in tqdm(self.time_series.items()):
path_save = str(folder.joinpath("numpy/{}.npy".format(name_ts)))
| np.save(path_save, ts_array) | numpy.save |
import numpy as np
from scipy.interpolate import interp2d, NearestNDInterpolator
from nbodykit.utils import DistributedArray
from nbodykit.lab import BigFileCatalog, MultipleSpeciesCatalog
from nbodykit.cosmology.cosmology import Cosmology
from pmesh.pm import ParticleMesh
from sfr import logSFR_Behroozi
gb_k_B = 1.38064852e-16 # Boltzmann constant in unit of erg K^-1
gb_L_sun = 3.83e33 # in unit of erg/s
gb_c = 2.99792458e5 # in units of km/s
gb_len_conv = 3.086e19 # conversion factor from Mpc to km
gb_h = 0.677
cosmodef = {'omegam':0.309167, 'h':0.677, 'omegab':0.048}
cosmo = Cosmology.from_dict(cosmodef)
def H(z):
return 100.* cosmo.h * cosmo.efunc(z)
# ########################################### ###########################################
# ########################################### MODELS ###########################################
# ########################################### ###########################################
class ModelHI_A():
def __init__(self, aa):
self.aa = aa
self.zz = 1/aa-1
self.alp = (1+2*self.zz)/(2+2*self.zz)
#self.mcut = 1e9*( 1.8 + 15*(3*self.aa)**8)
self.mcut = 3e9*( 1 + 10*(3*self.aa)**8)
###self.normhalo = 3e5*(1+(3.5/self.zz)**6)
###self.normhalo = 3e7 *(4+(3.5/self.zz)**6)
self.normhalo = 8e5*(1+(3.5/self.zz)**6)
self.normsat = self.normhalo*(1.75 + 0.25*self.zz)
self.normsat *= 0.5 #THis is to avoid negative masses
#z=1
if np.abs(self.zz-1.0)<0.1:
self.alp = 0.76
self.mcut = 2.6e10
self.normhalo = 4.6e8
self.normsat = self.normsat/5
#z=0.5
if np.abs(self.zz-0.5)<0.1:
self.alp = 0.63
self.mcut = 3.7e10
self.normhalo = 9.8e8
self.normsat = self.normsat/100
#z=0
if np.abs(self.zz - 0.0)<0.1:
#print('Modify')
self.alp = 0.49
self.mcut = 5.2e10
self.normhalo = 2.1e9
self.normsat = self.normsat/100
def assignline(self, halocat, cencat, satcat):
mHIhalo = self.assignhalo(halocat['Mass'].compute())
mHIsat = self.assignsat(satcat['Mass'].compute())
mHIcen = self.assigncen(mHIhalo, mHIsat, satcat['GlobalID'].compute(),
cencat.csize, cencat.comm)
return mHIhalo, mHIcen, mHIsat
def assignhalo(self, mhalo):
xx = mhalo/self.mcut+1e-10
mHI = xx**self.alp * np.exp(-1/xx)
mHI*= self.normhalo
return mHI
def assignsat(self, msat):
xx = msat/self.mcut+1e-10
mHI = xx**self.alp * np.exp(-1/xx)
mHI*= self.normsat
return mHI
def getinsat(self, mHIsat, satid, totalsize, localsize, comm):
#print(comm.rank, np.all(np.diff(satid) >=0))
#diff = np.diff(satid)
#if comm.rank == 260:
# print(satid[:-1][diff <0], satid[1:][diff < 0])
da = DistributedArray(satid, comm)
mHI = da.bincount(mHIsat, shared_edges=False)
zerosize = totalsize - mHI.cshape[0]
zeros = DistributedArray.cempty(cshape=(zerosize, ), dtype=mHI.local.dtype, comm=comm)
zeros.local[...] = 0
mHItotal = DistributedArray.concat(mHI, zeros, localsize=localsize)
return mHItotal
def assigncen(self, mHIhalo, mHIsat, satid, censize, comm):
#Assumes every halo has a central...which it does...usually
mHItotal = self.getinsat(mHIsat, satid, censize, mHIhalo.size, comm)
return mHIhalo - mHItotal.local
def assignrsd(self, rsdfac, halocat, cencat, satcat, los=[0,0,1]):
hrsdpos = halocat['Position']+halocat['Velocity']*los * rsdfac
crsdpos = cencat['Position']+cencat['Velocity']*los * rsdfac
srsdpos = satcat['Position']+satcat['Velocity']*los * rsdfac
return hrsdpos, crsdpos, srsdpos
def createmesh(self, bs, nc, positions, weights):
'''use this to create mesh of HI
'''
pm = ParticleMesh(BoxSize=bs, Nmesh=[nc,nc,nc])
mesh = pm.create(mode='real', value=0)
comm = pm.comm
rankweight = sum([wt.sum() for wt in weights])
totweight = comm.allreduce(rankweight)
for wt in weights: wt /= totweight/float(nc)**3
for i in range(len(positions)):
lay = pm.decompose(positions[i])
mesh.paint(positions[i], mass=weights[i], layout=lay, hold=True)
return mesh
def createmesh_catalog(self, bs, nc, halocat, cencat, satcat, mode='galaxies', position='RSDpos', weight='HImass', tofield=False):
'''use this to create mesh of HI
'''
comm = halocat.comm
if mode == 'halos': catalogs = [halocat]
elif mode == 'galaxies': catalogs = [cencat, satcat]
elif mode == 'all': catalogs = [halocat, cencat, satcat]
else: print('Mode not recognized')
rankweight = sum([cat[weight].sum().compute() for cat in catalogs])
totweight = comm.allreduce(rankweight)
for cat in catalogs: cat[weight] /= totweight/float(nc)**3
allcat = MultipleSpeciesCatalog(['%d'%i for i in range(len(catalogs))], *catalogs)
mesh = allcat.to_mesh(BoxSize=bs,Nmesh=[nc,nc,nc],\
position=position,weight=weight)
if tofield: mesh = mesh.to_field()
return mesh
# ###########################################
class ModelHI_A2(ModelHI_A):
'''Same as model A with a different RSD for satellites
'''
def __init__(self, aa):
super().__init__(aa)
def assignrsd(self, rsdfac, halocat, cencat, satcat, los=[0,0,1]):
hrsdpos = halocat['Position']+halocat['Velocity']*los * rsdfac
crsdpos = cencat['Position']+cencat['Velocity']*los * rsdfac
srsdpos = satcat['Position']+satcat['Velocity_HI']*los * rsdfac
return hrsdpos, crsdpos, srsdpos
# ###########################################
class ModelHI_B():
def __init__(self, aa, h=0.6776):
self.aa = aa
self.zz = 1/aa-1
self.h = h
#self.mcut = 1e9*( 1.8 + 15*(3*self.aa)**8 )
self.mcut = 3e9*( 1 + 10*(3*self.aa)**8)
self.normhalo = 1
#self.slope, self.intercept = np.polyfit([8.1, 11], [0.2, -1.], deg=1)
def assignline(self, halocat, cencat, satcat):
mHIsat = self.assignsat(satcat['Mass'].compute())
mHIcen = self.assigncen(cencat['Mass'].compute())
mHIhalo = self.assignhalo(mHIcen, mHIsat, satcat['GlobalID'].compute(),
halocat.csize, halocat.comm)
return mHIhalo, mHIcen, mHIsat
def assignhalo(self, mHIcen, mHIsat, satid, hsize, comm):
#Assumes every halo has a central...which it does...usually
mHItotal = self.getinsat(mHIsat, satid, hsize, mHIcen.size, comm)
return mHIcen + mHItotal.local
def getinsat(self, mHIsat, satid, totalsize, localsize, comm):
da = DistributedArray(satid, comm)
mHI = da.bincount(mHIsat, shared_edges=False)
zerosize = totalsize - mHI.cshape[0]
zeros = DistributedArray.cempty(cshape=(zerosize, ), dtype=mHI.local.dtype, comm=comm)
zeros.local[...] = 0
mHItotal = DistributedArray.concat(mHI, zeros, localsize=localsize)
return mHItotal
def _assign(self, mstellar):
'''Takes in M_stellar and gives M_HI in M_solar
'''
mm = 3e8 #5e7
f = 0.18 #0.35
alpha = 0.4 #0.35
mfrac = f*(mm/(mstellar + mm))**alpha
mh1 = mstellar * mfrac
return mh1
def assignsat(self, msat, scatter=None):
mstellar = self.moster(msat, scatter=scatter)/self.h
mh1 = self._assign(mstellar)
mh1 = mh1*self.h #* np.exp(-self.mcut/msat)
return mh1
def assigncen(self, mcen, scatter=None):
mstellar = self.moster(mcen, scatter=scatter)/self.h
mh1 = self._assign(mstellar)
mh1 = mh1*self.h #* np.exp(-self.mcut/mcen)
return mh1
def moster(self, Mhalo, scatter=None):
"""
moster(Minf,z):
Returns the stellar mass (M*/h) given Minf and z from Table 1 and
Eq. (2,11-14) of Moster++13 [1205.5807].
This version now works in terms of Msun/h units,
convert to Msun units in the function
To get "true" stellar mass, add 0.15 dex of lognormal scatter.
To get "observed" stellar mass, add between 0.1-0.45 dex extra scatter.
"""
z = self.zz
Minf = Mhalo/self.h
zzp1 = z/(1+z)
M1 = 10.0**(11.590+1.195*zzp1)
mM = 0.0351 - 0.0247*zzp1
beta = 1.376 - 0.826*zzp1
gamma = 0.608 + 0.329*zzp1
Mstar = 2*mM/( (Minf/M1)**(-beta) + (Minf/M1)**gamma )
Mstar*= Minf
if scatter is not None:
Mstar = 10**(np.log10(Mstar) + np.random.normal(0, scatter, Mstar.size))
return Mstar*self.h
#
def assignrsd(self, rsdfac, halocat, cencat, satcat, los=[0,0,1]):
hrsdpos = halocat['Position']+halocat['Velocity']*los * rsdfac
crsdpos = cencat['Position']+cencat['Velocity']*los * rsdfac
srsdpos = satcat['Position']+satcat['Velocity']*los * rsdfac
return hrsdpos, crsdpos, srsdpos
def createmesh(self, bs, nc, positions, weights):
'''use this to create mesh of HI
'''
pm = ParticleMesh(BoxSize=bs,Nmesh=[nc,nc,nc])
mesh = pm.create(mode='real', value=0)
comm = pm.comm
rankweight = sum([wt.sum() for wt in weights])
totweight = comm.allreduce(rankweight)
for wt in weights: wt /= totweight/float(nc)**3
for i in range(len(positions)):
lay = pm.decompose(positions[i])
mesh.paint(positions[i], mass=weights[i], layout=lay, hold=True)
return mesh
def createmesh_catalog(self, bs, nc, halocat, cencat, satcat, mode='galaxies', position='RSDpos', weight='HImass', tofield=False):
'''use this to create mesh of HI
'''
comm = halocat.comm
if mode == 'halos': catalogs = [halocat]
elif mode == 'galaxies': catalogs = [cencat, satcat]
elif mode == 'all': catalogs = [halocat, cencat, satcat]
else: print('Mode not recognized')
rankweight = sum([cat[weight].sum().compute() for cat in catalogs])
totweight = comm.allreduce(rankweight)
for cat in catalogs: cat[weight] /= totweight/float(nc)**3
allcat = MultipleSpeciesCatalog(['%d'%i for i in range(len(catalogs))], *catalogs)
mesh = allcat.to_mesh(BoxSize=bs,Nmesh=[nc,nc,nc],\
position=position,weight=weight)
if tofield: mesh = mesh.to_field()
return mesh
# ###########################################
class ModelHI_C(ModelHI_A):
'''Vanilla model with no centrals and satellites, only halo
Halos have the COM velocity but do not have any dispersion over it
'''
def __init__(self, aa):
super().__init__(aa)
self.normsat = 0
#self.alp = 1.0
#self.mcut = 1e9
#self.normhalo = 2e5*(1+3/self.zz**2)
#self.normhalo = 1.1e5*(1+4/self.zz)
self.alp = 0.9
self.mcut = 1e10
self.normhalo = 3.5e6*(1+1/self.zz)
def derivate(self, param, delta):
if param == 'alpha':
self.alp = (1+delta)*self.alp
elif param == 'mcut':
self.mcut = 10**( (1+delta)*np.log10(self.mcut))
elif param == 'norm':
self.mcut = 10**( (1+delta)*np.log10(self.normhalo))
else:
print('Parameter to vary not recongnized. Should be "alpha", "mcut" or "norm"')
def assignline(self, halocat, cencat, satcat):
mHIhalo = self.assignhalo(halocat['Mass'].compute())
mHIsat = self.assignsat(satcat['Mass'].compute())
mHIcen = self.assigncen(cencat['Mass'].compute())
return mHIhalo, mHIcen, mHIsat
def assignsat(self, msat):
return msat*0
def assigncen(self, mcen):
return mcen*0
def createmesh(self, bs, nc, positions, weights):
'''use this to create mesh of HI
'''
pm = ParticleMesh(BoxSize=bs,Nmesh=[nc,nc,nc])
mesh = pm.create(mode='real', value=0)
comm = pm.comm
rankweight = sum([wt.sum() for wt in weights])
totweight = comm.allreduce(rankweight)
for wt in weights: wt /= totweight/float(nc)**3
for i in range(len(positions)):
lay = pm.decompose(positions[i])
mesh.paint(positions[i], mass=weights[i], layout=lay, hold=True)
return mesh
def createmesh_catalog(self, bs, nc, halocat, cencat, satcat, mode='halos', position='RSDpos', weight='HImass', tofield=False):
'''use this to create mesh of HI
'''
comm = halocat.comm
if mode == 'halos': catalogs = [halocat]
else: print('Mode not recognized')
rankweight = sum([cat[weight].sum().compute() for cat in catalogs])
totweight = comm.allreduce(rankweight)
for cat in catalogs: cat[weight] /= totweight/float(nc)**3
allcat = MultipleSpeciesCatalog(['%d'%i for i in range(len(catalogs))], *catalogs)
mesh = allcat.to_mesh(BoxSize=bs,Nmesh=[nc,nc,nc],\
position=position,weight=weight)
if tofield: mesh = mesh.to_field()
return mesh
# ###########################################
class ModelHI_C2(ModelHI_C):
'''Vanilla model with no centrals and satellites, only halo
Halos have the COM velocity and a dispersion from VN18 added over it
'''
def __init__(self, aa):
super().__init__(aa)
self.vdisp = self._setupvdisp()
def _setupvdisp(self):
vzdisp0 = np.array([31, 34, 39, 44, 51, 54])
vzdispal = np.array([0.35, 0.37, 0.38, 0.39, 0.39, 0.40])
vdispz = np.arange(0, 6)
vdisp0fit = np.polyfit(vdispz, vzdisp0, 1)
vdispalfit = np.polyfit(vdispz, vzdispal, 1)
vdisp0 = self.zz * vdisp0fit[0] + vdisp0fit[1]
vdispal = self.zz * vdispalfit[0] + vdispalfit[1]
return lambda M: vdisp0*(M/1e10)**vdispal
def assignrsd(self, rsdfac, halocat, cencat, satcat, los=[0,0,1]):
dispersion = np.random.normal(0, self.vdisp(halocat['Mass'].compute())).reshape(-1, 1)
hvel = halocat['Velocity']*los + dispersion*los
hrsdpos = halocat['Position']+ hvel*rsdfac
crsdpos = cencat['Position']+cencat['Velocity']*los * rsdfac
srsdpos = satcat['Position']+satcat['Velocity']*los * rsdfac
return hrsdpos, crsdpos, srsdpos
# ###########################################
class ModelHI_D():
'''Vanilla model with no centrals and satellites, only halo
Halos have the COM velocity and a dispersion from VN18 added over it
'''
def __init__(self, aa):
self.aa = aa
self.zz = 1/aa-1
self.alp = (1.+2*self.zz)/(2.+2*self.zz)
self.mcut = 6e10*np.exp(-0.75*self.zz) + 1
self.normhalo = 1.7e9/(1+self.zz)**(5./3.)
self.nsatfhalo = 0.1
def fsat_h1(self, mhalo):
logmass = np.log10(mhalo)
mminf = 9.5 #10 - 0.2*self.zz
mhalf = 12.8 #13 - 0.1*self.zz
fsat = 0.5/(mhalf-mminf)**2 * (logmass-mminf)**2
fsat[logmass < mminf] = 0
fsat[fsat > 0.8] = 0.8
return fsat
def nsat_h1(self, mhalo):
return ((1 + self.nsatfhalo*mhalo / self.mcut)**0.5).astype(int) #2 #int(mhalo*0 + 2)
def vdisp(self, mhalo):
h = cosmo.efunc(self.zz)
return 1100. * (h * mhalo / 1e15) ** 0.33333
def assignline(self, halocat, cencat, satcat):
if halocat.comm.rank == 0:
if cencat is not None: print("\nCencat not used")
if satcat is not None: print("\nSatcat not used")
mHIhalo = self.assignhalo(halocat['Mass'].compute())
mHIsat = self.assignsat(halocat['Mass'].compute(), mHIhalo)
mHIcen = self.assigncen(mHIhalo, mHIsat)
#now repeat satellite catalog and reduce mass
nsat = self.nsat_h1(halocat['Mass'].compute())
mHIsat = np.repeat(mHIsat/nsat, nsat, axis=0)
return mHIhalo, mHIcen, mHIsat
def assignhalo(self, mhalo):
xx = (mhalo + 1e-30)/self.mcut+1e-10
mHI = xx**self.alp * np.exp(-1.0/xx)
mHI*= self.normhalo
return mHI
def assignsat(self, mhalo, mh1halo):
frac = self.fsat_h1(mhalo)
mHI = mh1halo*frac
return mHI
def assigncen(self, mHIhalo, mHIsat):
#Assumes every halo has a central...which it does...usually
return mHIhalo - mHIsat
def assignrsd(self, rsdfac, halocat, cencat, satcat, los=[0,0,1]):
hrsdpos = halocat['Position']+halocat['Velocity']*los * rsdfac
crsdpos = hrsdpos #cencat['Position']+cencat['Velocity']*los * rsdfac
#now satellites
nsat = self.nsat_h1(halocat['Mass'].compute())
dispersion = np.random.normal(0, np.repeat(self.vdisp(halocat['Mass'].compute()), nsat)).reshape(-1, 1)*los
hvel = np.repeat(halocat['Velocity'].compute(), nsat, axis=0)*los
hveldisp = hvel + dispersion
srsdpos = np.repeat(halocat['Position'].compute(), nsat, axis=0) + hveldisp*rsdfac
return hrsdpos, crsdpos, srsdpos
def createmesh(self, bs, nc, positions, weights):
'''use this to create mesh of HI
'''
pm = ParticleMesh(BoxSize=bs,Nmesh=[nc,nc,nc])
mesh = pm.create(mode='real', value=0)
comm = pm.comm
rankweight = sum([wt.sum() for wt in weights])
totweight = comm.allreduce(rankweight)
for wt in weights: wt /= totweight/float(nc)**3
for i in range(len(positions)):
lay = pm.decompose(positions[i])
mesh.paint(positions[i], mass=weights[i], layout=lay, hold=True)
return mesh
# ###########################################
class ModelHI_D2():
'''Vanilla model with no centrals and satellites, only halo
Halos have the COM velocity and a dispersion from VN18 added over it
'''
def __init__(self, aa):
self.aa = aa
self.zz = 1/aa-1
self.alp = (1.+2*self.zz)/(2.+2*self.zz)
self.mcut = 6e10* | np.exp(-0.75*self.zz) | numpy.exp |
from collections import defaultdict
import numpy as np
import torch
from tqdm import tqdm
from env.envs import make
from learners.policies_algs import REINFORCE
from runners.runners import EnvRunnerNoController
from train_nas import make_rl_learner, make_nas_env, make_controller
from utils.additional import get_device
from utils.arguments import args
from utils.trajectory_transforms import get_nas_transforms
def train(device, args, env_id='FreewayNoFrameskip-v0', logdir=' '):
args = args['atari']
env = make(env_id=env_id, nenvs=args['nenvs'])
learner = make_rl_learner(env, args, device)
nasenv = make_nas_env(learner, args, device)
controller = make_controller(learner.model.space, device)
nasrunner = EnvRunnerNoController(nasenv, args['num_nas_runner_steps'],
asarray=False,
transforms=get_nas_transforms(),
step_var=nasenv.summarizer.step_var)
optimizer = torch.optim.Adam(controller.model.parameters(), args['nas_lr'], eps=args['optimizer_epsilon'])
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args['num_train_steps'])
nasalgo = REINFORCE(controller, optimizer, lr_scheduler=lr_scheduler,
entropy_coef=args['nas_entropy_coef'],
baseline_momentum=args['nas_baseline_momentum'],
step_var=nasenv.summarizer.step_var)
with tqdm(total=args['num_train_steps']) as pbar:
while int(learner.runner.step_var) < args['num_train_steps']:
pbar.update(int(learner.runner.step_var) - pbar.n)
trajectory = defaultdict(list, {"actions": []})
observations = []
rewards = []
resets = []
if controller.is_recurrent():
nasrunner.state["policy_state"] = controller.get_state()
nasrunner.state["env_steps"] = nasrunner.nsteps
for i in range(nasrunner.nsteps):
act = controller.act(nasrunner.state["latest_observation"])
done = nasrunner.get_next(act, observations, rewards, resets, trajectory)
# Only reset if the env is not batched. Batched envs should auto-reset.
if not nasrunner.nenvs and np.all(done):
nasrunner.state["env_steps"] = i + 1
nasrunner.state["latest_observation"] = nasrunner.env.reset()
if nasrunner.cutoff or (nasrunner.cutoff is None and controller.is_recurrent()):
pass
trajectory.update(observations=observations, rewards=rewards, resets=resets)
if nasrunner.asarray:
for key, val in trajectory.items():
try:
trajectory[key] = | np.asarray(val) | numpy.asarray |
"""Implement the GeometricProgram class"""
import sys
import warnings as pywarnings
from time import time
from collections import defaultdict
import numpy as np
from ..repr_conventions import lineagestr
from ..small_classes import CootMatrix, SolverLog, Numbers, FixedScalar
from ..small_scripts import appendsolwarning, initsolwarning
from ..keydict import KeyDict
from ..solution_array import SolutionArray
from .set import ConstraintSet
from ..exceptions import (InvalidPosynomial, Infeasible, UnknownInfeasible,
PrimalInfeasible, DualInfeasible, UnboundedGP,
InvalidLicense)
DEFAULT_SOLVER_KWARGS = {"cvxopt": {"kktsolver": "ldl"}}
SOLUTION_TOL = {"cvxopt": 1e-3, "mosek_cli": 1e-4, "mosek_conif": 1e-3}
class MonoEqualityIndexes:
"Class to hold MonoEqualityIndexes"
def __init__(self):
self.all = set()
self.first_half = set()
def _get_solver(solver, kwargs):
"""Get the solverfn and solvername associated with solver"""
if solver is None:
from .. import settings
try:
solver = settings["default_solver"]
except KeyError:
raise ValueError("No default solver was set during build, so"
" solvers must be manually specified.")
if solver == "cvxopt":
from ..solvers.cvxopt import optimize
elif solver == "mosek_cli":
from ..solvers.mosek_cli import optimize_generator
optimize = optimize_generator(**kwargs)
elif solver == "mosek_conif":
from ..solvers.mosek_conif import optimize
elif hasattr(solver, "__call__"):
solver, optimize = solver.__name__, solver
else:
raise ValueError("Unknown solver '%s'." % solver)
return solver, optimize
class GeometricProgram:
# pylint: disable=too-many-instance-attributes
"""Standard mathematical representation of a GP.
Attributes with side effects
----------------------------
`solver_out` and `solve_log` are set during a solve
`result` is set at the end of a solve if solution status is optimal
Examples
--------
>>> gp = gpkit.constraints.gp.GeometricProgram(
# minimize
x,
[ # subject to
x >= 1,
], {})
>>> gp.solve()
"""
_result = solve_log = solver_out = model = v_ss = nu_by_posy = None
choicevaridxs = integersolve = None
def __init__(self, cost, constraints, substitutions,
*, checkbounds=True, **_):
self.cost, self.substitutions = cost, substitutions
for key, sub in self.substitutions.items():
if isinstance(sub, FixedScalar):
sub = sub.value
if hasattr(sub, "units"):
sub = sub.to(key.units or "dimensionless").magnitude
self.substitutions[key] = sub
if not isinstance(sub, (Numbers, np.ndarray)):
raise TypeError("substitution {%s: %s} has invalid value type"
" %s." % (key, sub, type(sub)))
cost_hmap = cost.hmap.sub(self.substitutions, cost.vks)
if any(c <= 0 for c in cost_hmap.values()):
raise InvalidPosynomial("a GP's cost must be Posynomial")
hmapgen = ConstraintSet.as_hmapslt1(constraints, self.substitutions)
self.hmaps = [cost_hmap] + list(hmapgen)
self.gen() # Generate various maps into the posy- and monomials
if checkbounds:
self.check_bounds(err_on_missing_bounds=True)
def check_bounds(self, *, err_on_missing_bounds=False):
"Checks if any variables are unbounded, through equality constraints."
missingbounds = {}
for var, locs in self.varlocs.items():
upperbound, lowerbound = False, False
for i in locs:
if i not in self.meq_idxs.all:
if self.exps[i][var] > 0: # pylint:disable=simplifiable-if-statement
upperbound = True
else:
lowerbound = True
if upperbound and lowerbound:
break
if not upperbound:
missingbounds[(var, "upper")] = "."
if not lowerbound:
missingbounds[(var, "lower")] = "."
if not missingbounds:
return {} # all bounds found in inequalities
meq_bounds = gen_meq_bounds(missingbounds, self.exps, self.meq_idxs)
fulfill_meq_bounds(missingbounds, meq_bounds)
if missingbounds and err_on_missing_bounds:
raise UnboundedGP(
"\n\n".join("%s has no %s bound%s" % (v, b, x)
for (v, b), x in missingbounds.items()))
return missingbounds
def gen(self):
"""Generates nomial and solve data (A, p_idxs) from posynomials.
k [posys]: number of monomials (rows of A) present in each constraint
m_idxs [mons]: monomial indices of each posynomial
p_idxs [mons]: posynomial index of each monomial
cs, exps [mons]: coefficient and exponents of each monomial
varlocs: {vk: monomial indices of each variables' location}
meq_idxs: {all indices of equality mons} and {the first index of each}
varidxs: {vk: which column corresponds to it in A}
A [mons, vks]: sparse array of each monomials' variables' exponents
"""
self.k = [len(hmap) for hmap in self.hmaps]
self.m_idxs, self.p_idxs, self.cs, self.exps = [], [], [], []
self.varkeys = self.varlocs = defaultdict(list)
self.meq_idxs = MonoEqualityIndexes()
m_idx = 0
row, col, data = [], [], []
for p_idx, (N_mons, hmap) in enumerate(zip(self.k, self.hmaps)):
self.p_idxs.extend([p_idx]*N_mons)
self.m_idxs.append(slice(m_idx, m_idx+N_mons))
if getattr(self.hmaps[p_idx], "from_meq", False):
self.meq_idxs.all.add(m_idx)
if len(self.meq_idxs.all) > 2*len(self.meq_idxs.first_half):
self.meq_idxs.first_half.add(m_idx)
self.exps.extend(hmap)
self.cs.extend(hmap.values())
for exp in hmap:
if not exp: # space out A matrix with constants for mosek
row.append(m_idx)
col.append(0)
data.append(0)
for var in exp:
self.varlocs[var].append(m_idx)
m_idx += 1
self.p_idxs = np.array(self.p_idxs, "int32") # to use array equalities
self.varidxs = {vk: i for i, vk in enumerate(self.varlocs)}
self.choicevaridxs = {vk: i for i, vk in enumerate(self.varlocs)
if vk.choices}
for j, (var, locs) in enumerate(self.varlocs.items()):
row.extend(locs)
col.extend([j]*len(locs))
data.extend(self.exps[i][var] for i in locs)
self.A = CootMatrix(row, col, data)
# pylint: disable=too-many-statements, too-many-locals
def solve(self, solver=None, *, verbosity=1, gen_result=True, **kwargs):
"""Solves a GeometricProgram and returns the solution.
Arguments
---------
solver : str or function (optional)
By default uses a solver found during installation.
If "mosek_conif", "mosek_cli", or "cvxopt", uses that solver.
If a function, passes that function cs, A, p_idxs, and k.
verbosity : int (default 1)
If greater than 0, prints solver name and solve time.
gen_result : bool (default True)
If True, makes a human-readable SolutionArray from solver output.
**kwargs :
Passed to solver constructor and solver function.
Returns
-------
SolutionArray (or dict if gen_result is False)
"""
solvername, solverfn = _get_solver(solver, kwargs)
if verbosity > 0:
print("Using solver '%s'" % solvername)
print(" for %i free variables" % len(self.varlocs))
print(" in %i posynomial inequalities." % len(self.k))
solverargs = DEFAULT_SOLVER_KWARGS.get(solvername, {})
solverargs.update(kwargs)
if self.choicevaridxs and solvername == "mosek_conif":
solverargs["choicevaridxs"] = self.choicevaridxs
self.integersolve = True
starttime = time()
solver_out, infeasibility, original_stdout = {}, None, sys.stdout
try:
sys.stdout = SolverLog(original_stdout, verbosity=verbosity-2)
solver_out = solverfn(c=self.cs, A=self.A, meq_idxs=self.meq_idxs,
k=self.k, p_idxs=self.p_idxs, **solverargs)
except Infeasible as e:
infeasibility = e
except InvalidLicense as e:
raise InvalidLicense("license for solver \"%s\" is invalid."
% solvername) from e
except Exception as e:
raise UnknownInfeasible("Something unexpected went wrong.") from e
finally:
self.solve_log = "\n".join(sys.stdout)
sys.stdout = original_stdout
self.solver_out = solver_out
solver_out["solver"] = solvername
solver_out["soltime"] = time() - starttime
if verbosity > 0:
print("Solving took %.3g seconds." % solver_out["soltime"])
if infeasibility:
if isinstance(infeasibility, PrimalInfeasible):
msg = ("The model had no feasible points; relaxing some"
" constraints or constants will probably fix this.")
elif isinstance(infeasibility, DualInfeasible):
msg = ("The model ran to an infinitely low cost"
" (or was otherwise dual infeasible); bounding"
" the right variables will probably fix this.")
elif isinstance(infeasibility, UnknownInfeasible):
msg = ("Solver failed for an unknown reason. Relaxing"
" constraints/constants, bounding variables, or"
" using a different solver might fix it.")
if verbosity > 0 and solver_out["soltime"] < 1 and self.model:
print(msg + "\nSince the model solved in less than a second,"
" let's run `.debug()` to analyze what happened.\n")
return self.model.debug(solver=solver)
# else, raise a clarifying error
msg += (" Running `.debug()` or increasing verbosity may pinpoint"
" the trouble.")
raise infeasibility.__class__(msg) from infeasibility
if not gen_result:
return solver_out
# else, generate a human-readable SolutionArray
self._result = self.generate_result(solver_out, verbosity=verbosity-2)
return self.result
@property
def result(self):
"Creates and caches a result from the raw solver_out"
if not self._result:
self._result = self.generate_result(self.solver_out)
return self._result
def generate_result(self, solver_out, *, verbosity=0, dual_check=True):
"Generates a full SolutionArray and checks it."
if verbosity > 0:
soltime = solver_out["soltime"]
tic = time()
# result packing #
result = self._compile_result(solver_out) # NOTE: SIDE EFFECTS
if verbosity > 0:
print("Result packing took %.2g%% of solve time." %
((time() - tic) / soltime * 100))
tic = time()
# solution checking #
initsolwarning(result, "Solution Inconsistency")
try:
tol = SOLUTION_TOL.get(solver_out["solver"], 1e-5)
self.check_solution(result["cost"], solver_out['primal'],
solver_out["nu"], solver_out["la"], tol)
except Infeasible as chkerror:
msg = str(chkerror)
if not ("Dual" in msg and not dual_check):
appendsolwarning(msg, None, result, "Solution Inconsistency")
if verbosity > -4:
print("Solution check warning: %s" % msg)
if verbosity > 0:
print("Solution checking took %.2g%% of solve time." %
((time() - tic) / soltime * 100))
return result
def _generate_nula(self, solver_out):
if "nu" in solver_out:
# solver gave us monomial sensitivities, generate posynomial ones
solver_out["nu"] = nu = np.ravel(solver_out["nu"])
nu_by_posy = [nu[mi] for mi in self.m_idxs]
solver_out["la"] = la = np.array([sum(nup) for nup in nu_by_posy])
elif "la" in solver_out:
la = np.ravel(solver_out["la"])
if len(la) == len(self.hmaps) - 1:
# assume solver dropped the cost's sensitivity (always 1.0)
la = np.hstack(([1.0], la))
# solver gave us posynomial sensitivities, generate monomial ones
solver_out["la"] = la
z = np.log(self.cs) + self.A.dot(solver_out["primal"])
m_iss = [self.p_idxs == i for i in range(len(la))]
nu_by_posy = [la[p_i]*np.exp(z[m_is])/sum(np.exp(z[m_is]))
for p_i, m_is in enumerate(m_iss)]
solver_out["nu"] = | np.hstack(nu_by_posy) | numpy.hstack |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Quantum SVM algorithm."""
import logging
import sys
import numpy as np
from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister
from qiskit.tools import parallel_map
from qiskit.tools.events import TextProgressBar
from qiskit.circuit import ParameterVector
from qiskit.aqua import aqua_globals
from qiskit.aqua.algorithms import QuantumAlgorithm
from qiskit.aqua import AquaError
from qiskit.aqua.algorithms.many_sample.qsvm._qsvm_binary import _QSVM_Binary
from qiskit.aqua.algorithms.many_sample.qsvm._qsvm_multiclass import _QSVM_Multiclass
from qiskit.aqua.utils.dataset_helper import get_num_classes
from qiskit.aqua.utils import split_dataset_to_data_and_labels
logger = logging.getLogger(__name__)
# pylint: disable=invalid-name
class QSVM(QuantumAlgorithm):
"""
Quantum SVM algorithm.
Internally, it will run the binary classification or multiclass classification
based on how many classes the data has.
"""
CONFIGURATION = {
'name': 'QSVM',
'description': 'QSVM Algorithm',
'input_schema': {
'$schema': 'http://json-schema.org/draft-07/schema#',
'id': 'QSVM_schema',
'type': 'object',
'properties': {
},
'additionalProperties': False
},
'problems': ['classification'],
'depends': [
{'pluggable_type': 'multiclass_extension'},
{'pluggable_type': 'feature_map',
'default': {
'name': 'SecondOrderExpansion',
'depth': 2
}
},
],
}
BATCH_SIZE = 1000
def __init__(self, feature_map, training_dataset=None, test_dataset=None, datapoints=None,
multiclass_extension=None):
"""
Args:
feature_map (FeatureMap): feature map module, used to transform data
training_dataset (dict, optional): training dataset.
test_dataset (dict, optional): testing dataset.
datapoints (numpy.ndarray, optional): prediction dataset.
multiclass_extension (MultiExtension, optional): if number of classes > 2 then
a multiclass scheme is needed.
Raises:
AquaError: use binary classifier for classes > 3
"""
super().__init__()
# check the validity of provided arguments if possible
if training_dataset is not None:
is_multiclass = get_num_classes(training_dataset) > 2
if is_multiclass:
if multiclass_extension is None:
raise AquaError('Dataset has more than two classes. '
'A multiclass extension must be provided.')
else:
if multiclass_extension is not None:
logger.warning("Dataset has just two classes. "
"Supplied multiclass extension will be ignored")
self.training_dataset = None
self.test_dataset = None
self.datapoints = None
self.class_to_label = None
self.label_to_class = None
self.num_classes = None
self.setup_training_data(training_dataset)
self.setup_test_data(test_dataset)
self.setup_datapoint(datapoints)
self.feature_map = feature_map
self.num_qubits = self.feature_map.num_qubits
if multiclass_extension is None:
qsvm_instance = _QSVM_Binary(self)
else:
qsvm_instance = _QSVM_Multiclass(self, multiclass_extension)
self.instance = qsvm_instance
@staticmethod
def _construct_circuit(x, feature_map, measurement, is_statevector_sim=False):
"""
If `is_statevector_sim` is True, we only build the circuits for Psi(x1)|0> rather than
Psi(x2)^dagger Psi(x1)|0>.
"""
x1, x2 = x
if len(x1) != len(x2):
raise ValueError("x1 and x2 must be the same dimension.")
q = QuantumRegister(feature_map.num_qubits, 'q')
c = ClassicalRegister(feature_map.num_qubits, 'c')
qc = QuantumCircuit(q, c)
# write input state from sample distribution
qc += feature_map.construct_circuit(x1, q)
if not is_statevector_sim:
qc += feature_map.construct_circuit(x2, q).inverse()
if measurement:
qc.barrier(q)
qc.measure(q, c)
return qc
@staticmethod
def _compute_overlap(idx, results, is_statevector_sim, measurement_basis):
if is_statevector_sim:
i, j = idx
# TODO: qiskit-terra did not support np.int64 to lookup result
v_a = results.get_statevector(int(i))
v_b = results.get_statevector(int(j))
# |<0|Psi^daggar(y) x Psi(x)|0>|^2, take the amplitude
tmp = np.vdot(v_a, v_b)
kernel_value = np.vdot(tmp, tmp).real # pylint: disable=no-member
else:
result = results.get_counts(idx)
kernel_value = result.get(measurement_basis, 0) / sum(result.values())
return kernel_value
def construct_circuit(self, x1, x2, measurement=False):
"""
Generate inner product of x1 and x2 with the given feature map.
The dimension of x1 and x2 must be the same.
Args:
x1 (numpy.ndarray): data points, 1-D array, dimension is D
x2 (numpy.ndarray): data points, 1-D array, dimension is D
measurement (bool): add measurement gates at the end
Returns:
QuantumCircuit: constructed circuit
"""
return QSVM._construct_circuit((x1, x2), self.feature_map, measurement)
@staticmethod
def get_kernel_matrix(quantum_instance, feature_map, x1_vec, x2_vec=None):
"""
Construct kernel matrix, if x2_vec is None, self-innerproduct is conducted.
Notes:
When using `statevector_simulator`,
we only build the circuits for Psi(x1)|0> rather than
Psi(x2)^dagger Psi(x1)|0>, and then we perform the inner product classically.
That is, for `statevector_simulator`,
the total number of circuits will be O(N) rather than
O(N^2) for `qasm_simulator`.
Args:
quantum_instance (QuantumInstance): quantum backend with all settings
feature_map (FeatureMap): a feature map that maps data to feature space
x1_vec (numpy.ndarray): data points, 2-D array, N1xD, where N1 is the number of data,
D is the feature dimension
x2_vec (numpy.ndarray): data points, 2-D array, N2xD, where N2 is the number of data,
D is the feature dimension
Returns:
numpy.ndarray: 2-D matrix, N1xN2
"""
use_parameterized_circuits = feature_map.support_parameterized_circuit
if x2_vec is None:
is_symmetric = True
x2_vec = x1_vec
else:
is_symmetric = False
is_statevector_sim = quantum_instance.is_statevector
measurement = not is_statevector_sim
measurement_basis = '0' * feature_map.num_qubits
mat = np.ones((x1_vec.shape[0], x2_vec.shape[0]))
# get all indices
if is_symmetric:
mus, nus = np.triu_indices(x1_vec.shape[0], k=1) # remove diagonal term
else:
mus, nus = | np.indices((x1_vec.shape[0], x2_vec.shape[0])) | numpy.indices |
from __future__ import print_function, division, absolute_import
import unittest
import imp
import numpy as np
from openmdao.api import Problem, Group, IndepVarComp
from openconcept.utilities.dvlabel import DVLabel
from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials
class TestBasic(unittest.TestCase):
def setUp(self):
self.nn = 3
self.p = Problem(model=Group())
ivc = IndepVarComp()
ivc.add_output(name='a_to_be_renamed', shape=(self.nn,))
ivc.add_output(name='b_to_be_renamed', shape=(self.nn,))
dvlabel = DVLabel([['a_to_be_renamed','a',np.ones(self.nn),None],
['b_to_be_renamed','b',np.ones(self.nn),None]])
self.p.model.add_subsystem(name='ivc',
subsys=ivc,
promotes_outputs=['*'])
self.p.model.add_subsystem(name='dvlabel',
subsys=dvlabel,
promotes_inputs=['*'],
promotes_outputs=['*'])
self.p.setup()
self.p['a_to_be_renamed'] = np.random.rand(self.nn,)
self.p['b_to_be_renamed'] = np.random.rand(self.nn,)
self.p.run_model()
def test_results(self):
a_in = self.p['a_to_be_renamed']
b_in = self.p['b_to_be_renamed']
a_out = self.p['a']
b_out = self.p['b']
assert_near_equal(a_in, a_out,1e-16)
assert_near_equal(b_in, b_out,1e-16)
def test_partials(self):
partials = self.p.check_partials(method='fd', out_stream=None)
assert_check_partials(partials)
class TestUnits(unittest.TestCase):
def setUp(self):
self.nn = 3
self.p = Problem(model=Group())
ivc = IndepVarComp()
ivc.add_output(name='a_to_be_renamed', shape=(self.nn,), units='m/s')
ivc.add_output(name='b_to_be_renamed', shape=(self.nn,), units='kg')
dvlabel = DVLabel([['a_to_be_renamed','a',np.ones(self.nn),'m/s'],
['b_to_be_renamed','b', | np.ones(self.nn) | numpy.ones |
import os
import math
import cv2 as cv
import scipy
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy.stats import describe, linregress
from scipy.signal import detrend
from matplotlib.animation import FuncAnimation
#~~~~~~~~~~~~~~~HELPER FUNCTIONS FOR IDENTIFYING SURFACE LINE~~~~~~~~~~~~~~~~~~
# these functions help identify the surface line in PLIF images
def _get_frame(cap: cv.VideoCapture, N: int) -> np.ndarray :
"""
Get the Nth frame from the video capture in grayscale
Return the nth frame from an opencv video capture object as greyscale or
None if it fails.
Raises TypeError for some inputs. Raises IndexError if N is out of bounds.
Raises AssertionError is video capture is not open.
"""
if not isinstance(cap,cv.VideoCapture):
raise TypeError("cap must be an opencv video capture object")
elif not cap.isOpened():
raise AssertionError("cap must be open")
elif not isinstance(N,int):
raise TypeError("N must be an int")
frame_count = cap.get(cv.CAP_PROP_FRAME_COUNT)
# Apparently, frameCount == -2147483648 or -1 for single image sequence
if frame_count < 0:
frame_count = 1
if not 0<=N<frame_count:
raise IndexError("N must be positive and <= frame count of cap")
# cap.set is expensive, only use if needed
if cap.get(cv.CAP_PROP_POS_FRAMES) != N:
cap.set(cv.CAP_PROP_POS_FRAMES, N)
ret_frame, frame = cap.read()
if ret_frame:
if len(frame.shape) == 2:
pass # already greyscale
elif frame.shape[2] == 3:
frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
elif frame.shape[2] == 4:
frame = cv.cvtColor(frame, cv.COLOR_BGRA2GRAY)
else:
raise TypeError("video source not supported")
return frame
else:
return None
def _get_grad_phase(src: np.ndarray) -> "tuple of np.ndarray" :
"""
Return the gradient and phase of the grayscale image
Return the gradient and phase of a grayscale image or None if it fails.
Uses Scharr gradient estimation. Normalizes quantites to use the entire
dynamic range of the src image data type.
Raises TypeError for some inputs.
"""
if not isinstance(src,np.ndarray):
raise TypeError("src must be a numpy array")
if not (src.dtype == np.uint8 or src.dtype == np.uint16):
raise TypeError("src must have type np.uint8 or np.uint16")
gradx = cv.Scharr(src, cv.CV_32F, 1, 0, 3)
grady = cv.Scharr(src, cv.CV_32F, 0, 1, 3)
grad = cv.magnitude(gradx, grady)
phase = cv.phase(gradx, grady)
if src.dtype == np.uint8:
kwargs = {'alpha':0,'beta':255,'norm_type':cv.NORM_MINMAX,
'dtype':cv.CV_8UC1}
else: # otherwise np.uint16
kwargs = {'alpha':0,'beta':65535,'norm_type':cv.NORM_MINMAX,
'dtype':cv.CV_16UC1}
grad = cv.normalize(grad , grad , **kwargs)
phase = cv.normalize(phase, phase, **kwargs)
return grad, phase
def _get_mask_from_gradient(src: np.ndarray, k: int) -> np.ndarray :
"""
Identifies large values of an image gradient with a binary mask.
Return a binary mask isolating the values of src that are sufficiently
large. Sufficiently large is determined by clustering the image in to k
parts, then defining the background as the cluster with the largest number
of elements. All other clusters are considered sufficently large and their
locations in the image are marked 1 in the binary mask. The background
is marked 0 in the binary mask.
Raises TypeError for some inputs.
"""
if not isinstance(src,np.ndarray):
raise TypeError("src must be a numpy array")
if not (src.dtype == np.uint8 or src.dtype == np.uint16):
raise TypeError("src must have type np.uint8 or np.uint16")
# Prepare the src for clustering
clusterable = np.array(src.ravel(), dtype=np.float32)
# kmeans requires some initial guess to iteratively improve
# Using this inital label seems to be more reliable than using PP or random
labels = np.zeros(clusterable.shape, dtype=np.int32)
labels[ np.argwhere(clusterable == clusterable.max()) ] = k-1
# generate and shape label array
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 20, 1.0)
_, labels, centers = cv.kmeans(clusterable, k, labels, criteria, 1,
cv.KMEANS_USE_INITIAL_LABELS)
labels = labels.reshape(-1, src.shape[0])
# exclude the background label from a binary mask where the background label
# has the smallest gradient value among the cluster centers, all other labels
# are included. The background label can be identified by noting that the
# center values are organized like: center[label] = gradient_value
dst = np.ones(src.shape, dtype=src.dtype)
dst[ labels == np.argmin(centers) ] = 0
return dst
def _get_mask_from_phase(src: np.ndarray, mask: np.ndarray,
direction: "'low' or 'high'") -> np.ndarray :
"""
Identifies the low or high phase of an image gradient with a binary mask.
Return a binary mask identifying a low valued cluster or the high valued
cluster as indicated by the directio input. The background cluster is
assumed to be the cluster with the largest count and is ignored.
Raises a TypeError or a ValueError for some inputs.
"""
if not isinstance(src,np.ndarray):
raise TypeError("src must be a numpy array")
elif not isinstance(mask,np.ndarray):
raise TypeError("mask must be a numpy array")
elif not (src.dtype == np.uint8 or src.dtype == np.uint16):
raise TypeError("src must have type np.uint8 or np.uint16")
elif not (mask.dtype == np.uint8 or mask.dtype == np.uint16):
raise TypeError("mask must have type np.uint8 or np.uint16")
elif not len(src.shape) == len(mask.shape) == 2:
raise ValueError("src and mask must have two dimensions (grayscale)")
elif not (direction == 'low' or direction == 'high'):
raise ValueError("direction must be 'low' or 'high'")
# make them the same dtype but preserve the dynamic range of src
if src.dtype != mask.dtype:
mask = np.array(mask,dtype=mask.dtype)
# identify the foreground cluster with the correct directionality
clusterable = np.array(np.multiply(src,mask).ravel(), dtype=np.float32)
labels = np.zeros(clusterable.shape,dtype=np.int32)
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 20, 1.0)
# phase is normalized to take up the entire dynamic range, so choose to
# split the mask down the middle into an 'low' and 'high' phase
mid = 255//2 if (src.dtype == np.uint8) else 65535//2
# low phase is in the lower half and nonzero
labels[ np.argwhere(np.logical_and(clusterable > 0, clusterable < mid)) ] = 1
# high phase is in the upper half
labels[ np.argwhere(clusterable > mid) ] = 2
# TODO: determine if this clustering actually improves results
# compared to a simple binary threshold
_, labels, centers = cv.kmeans(clusterable, 3, labels, criteria, 1,
cv.KMEANS_USE_INITIAL_LABELS )
labels = np.array(labels.reshape(-1, src.shape[0]), dtype=src.dtype)
# To identify the low and high labels, must also identify the background
# label which is assumed to be the largest group by count
# recall phase data is clustered like: centers[label] = phase_val
label_by_count = np.argsort(np.bincount(labels.ravel()))
label_by_phase = np.argsort(centers.ravel())
background_label = label_by_count[-1]
label_by_phase_excluding_background = np.delete(
label_by_phase, np.where(label_by_phase == background_label))
low_label = label_by_phase_excluding_background[ 0]
high_label = label_by_phase_excluding_background[-1]
choose_label = int(low_label) if direction=='low' else int(high_label)
return cv.compare(labels,(choose_label,0,0,0),cv.CMP_EQ)
def _get_widest_connected_group(mask: np.ndarray) -> np.ndarray:
'''
Identifes the widest group (uppermost in case of ties) in the binary image.
Find the widest connected group in the binary mask. If there are multiple,
choose the uppermost among them. Requires an uint8 type image but assumes
that the input image is a binary mask (no check).
Raises a TypeError for some inputs.
'''
if not isinstance(mask,np.ndarray):
raise TypeError("mask must be a numpy array")
elif not (mask.dtype == np.uint8):
raise TypeError("mask must have type np.uint8")
num_groups, labels, stats, centroids = \
cv.connectedComponentsWithStats(mask,connectivity=8)
# identify candidates of connected components by area
idx_candidates = np.argsort(stats[:,cv.CC_STAT_AREA])[:-1]
# among the valid candidates, sort by width of connected components
stats_width = stats[idx_candidates,cv.CC_STAT_WIDTH]
widest_groups = np.argwhere(stats_width == np.amax(stats_width))
# among the widest groups, choose the one closes to top of image
# recall that the y axis for images is flipped
top_group = np.argmin(stats[idx_candidates,cv.CC_STAT_TOP][widest_groups])
# create a new mask from the label of the widest & highest cluster
mask_new = np.zeros(labels.shape, dtype=bool)
label = idx_candidates[widest_groups[top_group]]
mask_new[labels == label] = 1
return np.multiply(mask,mask_new)
def _get_mask_maxima(grad: np.ndarray, mask: np.ndarray) -> np.ndarray:
"""
Finds the local maxima of an image gradeint where the mask is 1.
Returns a binary mask where the values are local maxima or a plateau
edge of grad. Applies the input mask before finding the local maxima.
Assumes (no check) that the mask is binary.
Raises a TypeError for some inputs.
"""
if not isinstance(grad,np.ndarray):
raise TypeError("grad must be a numpy array")
elif not isinstance(mask,np.ndarray):
raise TypeError("mask must be a numpy array")
elif not (mask.dtype == np.uint8 or mask.dtype == np.uint16):
raise TypeError("mask must have type np.uint8 or np.uint16")
se = np.array([1,0,1],dtype=np.uint8).reshape(-1,1)
grad_masked = np.multiply(grad,mask)
local_max = cv.dilate(grad_masked, se)
local_max = cv.compare(grad_masked,local_max,cv.CMP_GE)
return np.multiply(local_max,mask)
def _get_surfaceline(mask: np.ndarray, side: "'lower' or 'upper'") \
-> np.ndarray:
"""
Identifes the surface line from a binary mask.
Returns a 1 dimensional numpy array with the pixel values of the uppermost
or lowermost values in mask.
Raises a TypeError or ValueError for some inputs.
"""
if not isinstance(mask,np.ndarray):
raise TypeError("mask must be a numpy array")
elif not (mask.dtype == np.uint8 or mask.dtype == np.uint16):
raise TypeError("mask must have type np.uint8 or np.uint16")
elif not (side=='upper' or side=='lower'):
raise ValueError("direction must be 'low' or 'high'")
# TODO: why convert uint8 or uint16 into binary mask?
# just require a binary array in the first place?
# accept any non-zero value of the mask, mask must be converted to binary
mask = mask>0
n,m = mask.shape
if side=='upper':
args = (0,n,n)
else: # side=='lower'
args = (n,0,n)
weight_y = np.linspace(*args,dtype=int).reshape(-1,1).repeat(m,axis=1)
line = np.argmax(weight_y*mask,axis=0)
# TODO: replace this with numpy functions
# when columns are all 0, line returns an invalid point, replace with -1
for i, j in enumerate(line):
if mask[j,i]==0:
line[i] = -1
return line.ravel()
def _get_supersample(line: np.ndarray, grad: np.ndarray) -> np.ndarray:
"""
Identifes the supersample interpolation along the surface line of grad.
Returns a tuple of 1 dimensional numpy arrays. The first returns line
with values replaced to be negative if the supersample is invalid. The second
returns the supersample of the gradient or 0 if the supersample is invalid.
Negative values in the first array correspond to the following meanings:
-1 : no identified maxima in column
-2 : identified maxima is not a local maxima (all equal)
-3 : identified maxima is not a local maxima (on a line)
Raises a TypeError or ValueError for some inputs.
"""
if not isinstance(line,np.ndarray):
raise TypeError("line must be a numpy array")
elif not isinstance(grad,np.ndarray):
raise TypeError("grad must be a numpy array")
elif not len(line.shape) == 1:
raise ValueError("line must have one dimension")
elif not len(grad.shape) == 2:
raise ValueError("grad must have two dimensions")
supersample = np.zeros(line.shape)
# TODO: replace loop with array operations
for i,j in enumerate(line):
try:
upper = int(grad[j-1,i])
center = int(grad[j ,i])
lower = int(grad[j+1,i])
except IndexError:
line[i] = -1
continue
numerator = upper - lower
denominator = 2*upper + 2*lower - 4*center
if j == -1:
pass
elif upper==center and lower==center and upper==lower:
line[i] = -2
elif numerator!=0 and denominator==0:
line[i] = -3
else:
supersample[i] = numerator/denominator
# useful for debugging
#if not np.isfinite(supersample).all():
# print(f"non-finite value at {i}, {j}")
# print(f"numerator: {numerator}")
# print(f"denominator: {denominator}")
# raise ValueError
return line, supersample
# The following functions each handle different combinations of the input
# values to lif(), this is explicit but perhaps too verbose.
def _loop_phase_mask_connected(cap: cv.VideoCapture, num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'") -> np.ndarray:
'''
Performs LIF for a specific case in the lif function.
Assumes valid input.
Considers the the case:
use_column_max = False
use_phase_mask = True
connected = True
calibration_params = None
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = np.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
grad, phase = _get_grad_phase(frame)
mask = _get_mask_from_gradient(grad, k)
mask_phase = _get_mask_from_phase(phase,mask,direction)
mask_connected = _get_widest_connected_group(mask_phase)
mask_maxima = _get_mask_maxima(grad,mask)*mask_connected
line = _get_surfaceline(mask_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_phase_mask_connected_calibrate(cap: cv.VideoCapture,
num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'",
calibration_params: tuple,) \
-> np.ndarray:
'''
Performs LIF for a specific case in the lif function.
Assumes valid input.
Considers the the case:
use_column_max = False
use_phase_mask = True
connected = True
calibration_params = Tuple
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = np.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
frame = cv.undistort(frame, calibration_params[0], calibration_params[1])
grad, phase = _get_grad_phase(frame)
mask = _get_mask_from_gradient(grad, k)
mask_phase = _get_mask_from_phase(phase,mask,direction)
mask_connected = _get_widest_connected_group(mask_phase)
mask_maxima = _get_mask_maxima(grad,mask)*mask_connected
line = _get_surfaceline(mask_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_phase_mask(cap: cv.VideoCapture, num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'") -> np.ndarray:
'''
Performs LIF for a specific case in the lif function.
Assumes valid input.
Considers the the case:
use_column_max = False
use_phase_mask = True
connected = False
calibration_params = None
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = np.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
grad, phase = _get_grad_phase(frame)
mask = _get_mask_from_gradient(grad, k)
mask_phase = _get_mask_from_phase(phase,mask,direction)
mask_maxima = _get_mask_maxima(grad,mask)*mask_phase
line = _get_surfaceline(mask_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_phase_mask_calibrate(cap: cv.VideoCapture, num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'",
calibration_params: tuple) -> np.ndarray:
'''
Performs LIF for a specific case in the lif function.
Assumes valid input.
Considers the the case:
use_column_max = False
use_phase_mask = True
connected = False
calibration_params = None
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = np.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
frame = cv.undistort(frame, calibration_params[0], calibration_params[1])
grad, phase = _get_grad_phase(frame)
mask = _get_mask_from_gradient(grad, k)
mask_phase = _get_mask_from_phase(phase,mask,direction)
mask_maxima = _get_mask_maxima(grad,mask)*mask_phase
line = _get_surfaceline(mask_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_local_maxima(cap: cv.VideoCapture, num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'") -> np.ndarray:
'''
Performs LIF for a specific case in the lif function.
Assumes valid input.
Considers the the case:
use_column_max = False
use_phase_mask = False
connected = False
calibration_params = None
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = np.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
grad, phase = _get_grad_phase(frame)
mask = _get_mask_from_gradient(grad, k)
mask_maxima = _get_mask_maxima(grad,mask)
line = _get_surfaceline(mask_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_local_maxima_calibrate(cap: cv.VideoCapture, num_frames: int,
k: int, direction: "'low' or 'high'",
side: "'lower' or 'upper'",
calibration_params: tuple) -> np.ndarray:
'''
Performs LIF for a specific case in the lif function.
Assumes valid input.
Considers the the case:
use_column_max = False
use_phase_mask = False
connected = False
calibration_params = None
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = np.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
frame = cv.undistort(frame, calibration_params[0], calibration_params[1])
grad, phase = _get_grad_phase(frame)
mask = _get_mask_from_gradient(grad, k)
mask_maxima = _get_mask_maxima(grad,mask)
line = _get_surfaceline(mask_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_maxima(cap: cv.VideoCapture, num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'") -> np.ndarray:
'''
Performs LIF for a specific case in the lif function.
Assumes valid input.
Considers the the case:
use_column_max = True
use_phase_mask = False
connected = False
calibration_params = None
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = np.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
grad, _ = _get_grad_phase(frame)
mask_maxima = np.zeros(grad.shape, dtype=np.uint8)
mask_maxima[np.argmax(grad,axis=0),np.arange(width)] = 1
line = _get_surfaceline(mask_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_maxima_calibrate(cap: cv.VideoCapture, num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'",
calibration_params: tuple) -> np.ndarray:
'''
Performs LIF for a specific case in the lif function.
Assumes valid input.
Considers the the case:
use_column_max = True
use_phase_mask = False
connected = False
calibration_params = tuple
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = | np.empty((num_frames,width)) | numpy.empty |
'''
Computer vision assignment 2 by <NAME>
A2_2d_transformation.py
Implementation of various 2-d transformation.
* Status: Implement all basic specifications, not include 'reduce artifacts (extra credit)'.
* GitHub Link: https://github.com/pseudowasabi/computer-vision-exercises/tree/master/CV_A2_
'''
import cv2
import numpy as np
import math
def get_transformed_image(img, M):
# initialize plane
plane = np.empty((801, 801))
for i in range(801):
for j in range(801):
plane[i][j] = 255
'''
0 0 0 0 0
0 0 0 0 0
0 0 0 0 0
(3, 5) shape image
center is (400, 400)
left top is (399, 398) correspond to (center.x - shape[0] // 2, center.y - shape[1] // 2)
right bottom is (401, 402) correspond to (center.x + shape[0] // 2, center.y + shape[1] // 2)
- under condition that assumes all input image should be odd size in both height and width.
[solution]
1. make homogenous coordinate first for original image.
2. calculate transformed coordinate and set intensity in each transformed position.
'''
origin_y_coord = np.zeros((img.shape[0], img.shape[1]))
origin_x_coord = np.zeros((img.shape[0], img.shape[1]))
# do not use zeros_like (data type would be uint8, however desired data type is at least uint16)
y_range = range(400 - img.shape[0] // 2, 400 + img.shape[0] // 2 + 1)
x_range = range(400 - img.shape[1] // 2, 400 + img.shape[1] // 2 + 1)
# (i, 0 -> y), (j, 1 -> x)
i = 0
for y in y_range:
j = 0
for x in x_range:
if (i in range(img.shape[0])) and (j in range(img.shape[1])):
origin_y_coord[i][j] = y
origin_x_coord[i][j] = x
j += 1
i += 1
#print(origin_y_coord)
#print(origin_x_coord)
for i in range(img.shape[0]): # y range
for j in range(img.shape[1]): # x range
x_prime = M[0][0] * origin_x_coord[i][j] + M[0][1] * origin_y_coord[i][j] + M[0][2] * 1
y_prime = M[1][0] * origin_x_coord[i][j] + M[1][1] * origin_y_coord[i][j] + M[1][2] * 1
if (int(y_prime) in range(801)) and (int(x_prime) in range(801)):
plane[int(y_prime)][int(x_prime)] = img[i][j]
else:
print("out of range", i, j, origin_y_coord[i][j], origin_x_coord[i][j], y_prime, x_prime)
# plus - reducing artifact (not implemented yet)
# check x-dir and y-dir respectively, then fill the blanks along each directions
cv2.arrowedLine(plane, (400, 800), (400, 0), (0, 0, 0), thickness=2, tipLength=0.01)
cv2.arrowedLine(plane, (0, 400), (800, 400), (0, 0, 0), thickness=2, tipLength=0.01)
return plane
img_smile = cv2.imread('./smile.png', cv2.IMREAD_GRAYSCALE)
current_M = np.identity(3)
plane = get_transformed_image(img_smile, current_M)
_M = {} # save all transformation matrix into hash map
degree_5 = math.pi / 36.
cos_5 = math.cos(degree_5)
sin_5 = math.sin(degree_5)
_M[ord('a')] = np.array([[1., 0., -5.], [0., 1., 0.], [0., 0., 1.]]) # move left by 5 pixels
_M[ord('d')] = np.array([[1., 0., +5.], [0., 1., 0.], [0., 0., 1.]]) # move right by 5 pixels
_M[ord('w')] = np.array([[1., 0., 0.], [0., 1., -5.], [0., 0., 1.]]) # move upward by 5 pixels
_M[ord('s')] = np.array([[1., 0., 0.], [0., 1., +5.], [0., 0., 1.]]) # move downward by 5 pixels
_M[ord('R')] = np.array([[cos_5, -sin_5, 0.], [sin_5, cos_5, 0.], [0., 0., 1.]]) # rotate CW by 5 degrees
_M[ord('r')] = | np.array([[cos_5, sin_5, 0.], [-sin_5, cos_5, 0.], [0., 0., 1.]]) | numpy.array |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Utilities for dealing with HEALPix projections and mappings."""
import copy
import numpy as np
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.units import Quantity
from gammapy.utils.array import is_power2
from ..axes import MapAxes
from ..coord import MapCoord, skycoord_to_lonlat
from ..geom import Geom, pix_tuple_to_idx
from ..utils import INVALID_INDEX, coordsys_to_frame, frame_to_coordsys
from .io import HPX_FITS_CONVENTIONS, HpxConv
from .utils import (
coords_to_vec,
get_nside_from_pix_size,
get_pix_size_from_nside,
get_subpixels,
get_superpixels,
match_hpx_pix,
nside_to_order,
parse_hpxregion,
ravel_hpx_index,
unravel_hpx_index,
)
# Not sure if we should expose this in the docs or not:
# HPX_FITS_CONVENTIONS, HpxConv
__all__ = ["HpxGeom"]
class HpxGeom(Geom):
"""Geometry class for HEALPIX maps.
This class performs mapping between partial-sky indices (pixel
number within a HEALPIX region) and all-sky indices (pixel number
within an all-sky HEALPIX map). Multi-band HEALPIX geometries use
a global indexing scheme that assigns a unique pixel number based
on the all-sky index and band index. In the single-band case the
global index is the same as the HEALPIX index.
By default the constructor will return an all-sky map.
Partial-sky maps can be defined with the ``region`` argument.
Parameters
----------
nside : `~numpy.ndarray`
HEALPIX nside parameter, the total number of pixels is
12*nside*nside. For multi-dimensional maps one can pass
either a single nside value or a vector of nside values
defining the pixel size for each image plane. If nside is not
a scalar then its dimensionality should match that of the
non-spatial axes.
nest : bool
True -> 'NESTED', False -> 'RING' indexing scheme
frame : str
Coordinate system, "icrs" | "galactic"
region : str or tuple
Spatial geometry for partial-sky maps. If none the map will
encompass the whole sky. String input will be parsed
according to HPX_REG header keyword conventions. Tuple
input can be used to define an explicit list of pixels
encompassed by the geometry.
axes : list
Axes for non-spatial dimensions.
"""
is_hpx = True
is_region = False
def __init__(self, nside, nest=True, frame="icrs", region=None, axes=None):
# FIXME: Require NSIDE to be power of two when nest=True
self._nside = np.array(nside, ndmin=1)
self._axes = MapAxes.from_default(axes, n_spatial_axes=1)
if self.nside.size > 1 and self.nside.shape != self.shape_axes:
raise ValueError(
"Wrong dimensionality for nside. nside must "
"be a scalar or have a dimensionality consistent "
"with the axes argument."
)
self._nest = nest
self._frame = frame
self._ipix = None
self._region = region
self._create_lookup(region)
self._npix = self._npix * np.ones(self.shape_axes, dtype=int)
def _create_lookup(self, region):
"""Create local-to-global pixel lookup table."""
if isinstance(region, str):
ipix = [
self.get_index_list(nside, self._nest, region)
for nside in self._nside.flat
]
self._ipix = [
ravel_hpx_index((p, i * np.ones_like(p)), np.ravel(self.npix_max))
for i, p in enumerate(ipix)
]
self._region = region
self._indxschm = "EXPLICIT"
self._npix = np.array([len(t) for t in self._ipix])
if self.nside.ndim > 1:
self._npix = self._npix.reshape(self.nside.shape)
self._ipix = np.concatenate(self._ipix)
elif isinstance(region, tuple):
region = [np.asarray(t) for t in region]
m = np.any(np.stack([t >= 0 for t in region]), axis=0)
region = [t[m] for t in region]
self._ipix = ravel_hpx_index(region, self.npix_max)
self._ipix = | np.unique(self._ipix) | numpy.unique |
import numpy as np
from ..core.structures import Tensor
from ..utils import sliceT, genToeplitzMatrix
import itertools
class basicTensor():
""" Generates a dense or sparse tensor of any dimension and fills it accordingly
Parameters
----------
dim : int
specifies the dimensions of the tensor
distr (optional): string
Specifies the random generation using a class of the numpy.random module
distr_type (optional) : int
Number of indices to not fix. 0 will be applied globally, 1 will apply to fibers, 2 to slices, etc.
Returns
-------
tensor: Tensor
Generated tensor according to the parameters specified
"""
def __init__(self, dim, distr='uniform', distr_type=0):
self.dim = dim
self.distr_type = distr_type
self.distr = distr
def _predefined_distr(self, dim):
distrlist = {'uniform':np.random.uniform(size=dim),
'normal':np.random.normal(size=dim),
'triangular': np.random.triangular(-1, 0, 1, size=dim),
'standard-t': np.random.standard_t(10, size=dim),
'ones': np.ones(dim),
'zeros': np.zeros(dim)}
if self.distr not in distrlist:
raise NameError("The distribution {} is not an available one.\
Please refer to the list of implementations: {}".format(self.distr, distrlist.keys()))
return distrlist[self.distr]
def dense(self, fxdind=None):
""" Defines a dense Tensor
Returns
-------
tensor : Tensor
"""
# fxdind: fixed indices
if self.distr_type == 0:
tensor = self._predefined_distr(self.dim)
else:
tensor = | np.random.uniform(size=self.dim) | numpy.random.uniform |
"""
Simple computation graph implmentation. Doesn't handle
cycles.
Used to do back propigation for a batch normalization node.
"""
from collections import deque
import numpy as np
class CompNode(object):
def __init__(self):
self.name = "unamed"
self.output = None
def cprint(self, *args):
#print(args)
pass
class PlusNode(CompNode):
def __init__(self):
CompNode.__init__(self)
self.name = "plus"
def execute(self, inputs):
self.cprint("Executing:", self.name, inputs)
self.output = inputs[0] + inputs[1]
def reverse(self, dout, output_map):
return [dout, dout]
class SubNode(CompNode):
def __init__(self):
CompNode.__init__(self)
self.name = "minus"
def execute(self, inputs):
self.cprint("Executing:", self.name, inputs)
self.output = inputs[0] - inputs[1]
def reverse(self, dout, output_map):
return [dout, -dout]
class SqrtNode(CompNode):
def __init__(self):
CompNode.__init__(self)
self.name = "sqrt"
def execute(self, inputs):
self.cprint("Executing:", self.name, inputs)
self.output = np.sqrt(inputs[0])
self.cprint("Sqrt out", self.output)
def reverse(self, dout, output_map):
out = dout / (2 *self.output)
return [out]
class DivNoode(CompNode):
def __init__(self):
self.name = "divide"
def execute(self, inputs):
self.cprint("Executing:", self.name, inputs)
self.numerator = inputs[0]
self.denominator = inputs[1]
self.output = inputs[0] / inputs[1]
self.cprint("divout", self.output)
def reverse(self, dout, output_map):
out0 = dout / self.denominator
out1 = -dout * self.numerator / (self.denominator * self.denominator)
return [out0, out1]
class InputNode(CompNode):
def __init__(self):
self.name = "input"
pass
def execute(self, inputs):
self.cprint("Executing:", self.name, inputs)
self.output = inputs[0]
def reverse(self, dout, output_map):
if 'dx' in output_map:
#print("dx - adding")
#print("existing: ", output_map['dx'])
#print("incoming:", dout)
output_map['dx'] += dout
else:
output_map['dx'] = dout
class ConstantNode(CompNode):
def __init__(self, value):
self.name = "constant"
self.value = value
pass
def execute(self, inputs):
self.cprint("Executing:", self.name, inputs)
self.output = self.value
def reverse(self, dout, output_map):
return [np.zeros_like(dout)]
class MuNode(CompNode):
def __init__(self):
self.name = "average"
pass
def execute(self, inputs):
self.cprint("Executing:", self.name, inputs)
self.output = np.mean(inputs[0], axis = 0)
self.shape = inputs[0].shape
self.cprint("mu", self.output)
def reverse(self, dout, output_map):
vsum = np.sum(dout, axis = 0)
# by convention, we want an N x D output. Adding vsum
# to an array of 0's will replicate the rows.
out = (vsum + np.zeros(self.shape)) /self.shape[0]
return [out]
class SigmaNode(CompNode):
def __init__(self):
self.name = "sigma"
pass
def execute(self, inputs):
self.cprint("Executing:", self.name, inputs)
self.x = inputs[0]
out = np.var(self.x, axis = 0)
out = | np.zeros_like(self.x) | numpy.zeros_like |
import glob
import numpy as np
import os.path as osp
from PIL import Image
import random
import struct
from torch.utils.data import Dataset
import scipy.ndimage as ndimage
import cv2
from skimage.measure import block_reduce
import h5py
import scipy.ndimage as ndimage
class BatchLoader(Dataset):
def __init__(self, dataRoot, dirs = ['main_xml', 'main_xml1',
'mainDiffLight_xml', 'mainDiffLight_xml1',
'mainDiffMat_xml', 'mainDiffMat_xml1'],
imHeight = 240, imWidth = 320,
phase='TRAIN', rseed = None, cascadeLevel = 0,
isLight = False, isAllLight = False,
envHeight = 8, envWidth = 16, envRow = 120, envCol = 160,
SGNum = 12 ):
if phase.upper() == 'TRAIN':
self.sceneFile = osp.join(dataRoot, 'train.txt')
elif phase.upper() == 'TEST':
self.sceneFile = osp.join(dataRoot, 'test.txt')
else:
print('Unrecognized phase for data loader')
assert(False )
with open(self.sceneFile, 'r') as fIn:
sceneList = fIn.readlines()
sceneList = [x.strip() for x in sceneList]
self.imHeight = imHeight
self.imWidth = imWidth
self.phase = phase.upper()
self.cascadeLevel = cascadeLevel
self.isLight = isLight
self.isAllLight = isAllLight
self.envWidth = envWidth
self.envHeight = envHeight
self.envRow = envRow
self.envCol = envCol
self.envWidth = envWidth
self.envHeight = envHeight
self.SGNum = SGNum
shapeList = []
for d in dirs:
shapeList = shapeList + [osp.join(dataRoot, d, x) for x in sceneList ]
shapeList = sorted(shapeList)
print('Shape Num: %d' % len(shapeList ) )
self.imList = []
for shape in shapeList:
imNames = sorted(glob.glob(osp.join(shape, 'im_*.hdr') ) )
self.imList = self.imList + imNames
if isAllLight:
self.imList = [x for x in self.imList if
osp.isfile(x.replace('im_', 'imenv_') ) ]
if cascadeLevel > 0:
self.imList = [x for x in self.imList if
osp.isfile(x.replace('im_',
'imenv_').replace('.hdr', '_%d.h5' %
(self.cascadeLevel - 1 ) ) ) ]
print('Image Num: %d' % len(self.imList ) )
# BRDF parameter
self.albedoList = [x.replace('im_', 'imbaseColor_').replace('hdr', 'png') for x in self.imList ]
self.normalList = [x.replace('im_', 'imnormal_').replace('hdr', 'png') for x in self.imList ]
self.normalList = [x.replace('DiffLight', '') for x in self.normalList ]
self.roughList = [x.replace('im_', 'imroughness_').replace('hdr', 'png') for x in self.imList ]
self.depthList = [x.replace('im_', 'imdepth_').replace('hdr', 'dat') for x in self.imList ]
self.depthList = [x.replace('DiffLight', '') for x in self.depthList ]
self.depthList = [x.replace('DiffMat', '') for x in self.depthList ]
self.segList = [x.replace('im_', 'immask_').replace('hdr', 'png') for x in self.imList ]
self.segList = [x.replace('DiffMat', '') for x in self.segList ]
if self.cascadeLevel == 0:
if self.isLight:
self.envList = [x.replace('im_', 'imenv_') for x in self.imList ]
else:
if self.isLight:
self.envList = [x.replace('im_', 'imenv_') for x in self.imList ]
self.envPreList = [x.replace('im_', 'imenv_').replace('.hdr', '_%d.h5' % (self.cascadeLevel -1) ) for x in self.imList ]
self.albedoPreList = [x.replace('im_', 'imbaseColor_').replace('.hdr', '_%d.h5' % (self.cascadeLevel - 1) ) for x in self.imList ]
self.normalPreList = [x.replace('im_', 'imnormal_').replace('.hdr', '_%d.h5' % (self.cascadeLevel-1) ) for x in self.imList ]
self.roughPreList = [x.replace('im_', 'imroughness_').replace('.hdr', '_%d.h5' % (self.cascadeLevel-1) ) for x in self.imList ]
self.depthPreList = [x.replace('im_', 'imdepth_').replace('.hdr', '_%d.h5' % (self.cascadeLevel-1) ) for x in self.imList ]
self.diffusePreList = [x.replace('im_', 'imdiffuse_').replace('.hdr', '_%d.h5' % (self.cascadeLevel - 1) ) for x in self.imList ]
self.specularPreList = [x.replace('im_', 'imspecular_').replace('.hdr', '_%d.h5' % (self.cascadeLevel - 1) ) for x in self.imList ]
# Permute the image list
self.count = len(self.albedoList )
self.perm = list(range(self.count ) )
if rseed is not None:
random.seed(0)
random.shuffle(self.perm )
def __len__(self):
return len(self.perm )
def __getitem__(self, ind):
# Read segmentation
seg = 0.5 * (self.loadImage(self.segList[self.perm[ind] ] ) + 1)[0:1, :, :]
segArea = np.logical_and(seg > 0.49, seg < 0.51 ).astype(np.float32 )
segEnv = (seg < 0.1).astype(np.float32 )
segObj = (seg > 0.9)
if self.isLight:
segObj = segObj.squeeze()
segObj = ndimage.binary_erosion(segObj, structure=np.ones((7, 7) ),
border_value=1)
segObj = segObj[np.newaxis, :, :]
segObj = segObj.astype(np.float32 )
# Read Image
im = self.loadHdr(self.imList[self.perm[ind] ] )
# Random scale the image
im, scale = self.scaleHdr(im, seg)
# Read albedo
albedo = self.loadImage(self.albedoList[self.perm[ind] ], isGama = False)
albedo = (0.5 * (albedo + 1) ) ** 2.2
# normalize the normal vector so that it will be unit length
normal = self.loadImage(self.normalList[self.perm[ind] ] )
normal = normal / np.sqrt(np.maximum(np.sum(normal * normal, axis=0), 1e-5) )[np.newaxis, :]
# Read roughness
rough = self.loadImage(self.roughList[self.perm[ind] ] )[0:1, :, :]
# Read depth
depth = self.loadBinary(self.depthList[self.perm[ind] ])
if self.isLight == True:
envmaps, envmapsInd = self.loadEnvmap(self.envList[self.perm[ind] ] )
envmaps = envmaps * scale
if self.cascadeLevel > 0:
envmapsPre = self.loadH5(self.envPreList[self.perm[ind] ] )
if envmapsPre is None:
print("Wrong envmap pred")
envmapsInd = envmapsInd * 0
envmapsPre = np.zeros((84, 120, 160), dtype=np.float32 )
if self.cascadeLevel > 0:
# Read albedo
albedoPre = self.loadH5(self.albedoPreList[self.perm[ind] ] )
albedoPre = albedoPre / np.maximum(np.mean(albedoPre ), 1e-10) / 3
# normalize the normal vector so that it will be unit length
normalPre = self.loadH5(self.normalPreList[self.perm[ind] ] )
normalPre = normalPre / np.sqrt(np.maximum(np.sum(normalPre * normalPre, axis=0), 1e-5) )[np.newaxis, :]
normalPre = 0.5 * (normalPre + 1)
# Read roughness
roughPre = self.loadH5(self.roughPreList[self.perm[ind] ] )[0:1, :, :]
roughPre = 0.5 * (roughPre + 1)
# Read depth
depthPre = self.loadH5(self.depthPreList[self.perm[ind] ] )
depthPre = depthPre / np.maximum(np.mean(depthPre), 1e-10) / 3
diffusePre = self.loadH5(self.diffusePreList[self.perm[ind] ] )
diffusePre = diffusePre / max(diffusePre.max(), 1e-10)
specularPre = self.loadH5(self.specularPreList[self.perm[ind] ] )
specularPre = specularPre / max(specularPre.max(), 1e-10)
batchDict = {'albedo': albedo,
'normal': normal,
'rough': rough,
'depth': depth,
'segArea': segArea,
'segEnv': segEnv,
'segObj': segObj,
'im': im,
'name': self.imList[self.perm[ind] ]
}
if self.isLight:
batchDict['envmaps'] = envmaps
batchDict['envmapsInd'] = envmapsInd
if self.cascadeLevel > 0:
batchDict['envmapsPre'] = envmapsPre
if self.cascadeLevel > 0:
batchDict['albedoPre'] = albedoPre
batchDict['normalPre'] = normalPre
batchDict['roughPre'] = roughPre
batchDict['depthPre'] = depthPre
batchDict['diffusePre'] = diffusePre
batchDict['specularPre'] = specularPre
return batchDict
def loadImage(self, imName, isGama = False):
if not(osp.isfile(imName ) ):
print(imName )
assert(False )
im = Image.open(imName)
im = im.resize([self.imWidth, self.imHeight], Image.ANTIALIAS )
im = | np.asarray(im, dtype=np.float32) | numpy.asarray |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""A graph generator based on the PC algorithm [Kalisch2007].
[Kalisch2007] <NAME> and <NAME>. Estimating
high-dimensional directed acyclic graphs with the pc-algorithm. In The
Journal of Machine Learning Research, Vol. 8, pp. 613-636, 2007.
"""
from __future__ import print_function
import logging
import math
from itertools import combinations, permutations
import pandas as pd
import networkx as nx
import numpy as np
from gsq.ci_tests import ci_test_bin, ci_test_dis
from gsq.gsq_testdata import bin_data, dis_data
# from networkx.drawing.tests.test_pylab import plt
from scipy.stats import norm
import matplotlib.pyplot as plt
from utils.pageRank import PRIterator
_logger = logging.getLogger(__name__)
# 条件独立性检验
def gaussCItest(suffstat, x, y, S):
S = list(S)
C = pd.DataFrame(suffstat).astype(float).corr().values
n = pd.DataFrame(suffstat).values.shape[0]
cut_at = 0.9999999
# 偏相关系数
# S中没有点
if len(S) == 0:
r = C[x, y]
# S中只有一个点 一阶偏相关系数
elif len(S) == 1:
a = (C[x, y] - C[x, S] * C[y, S])
try:
b = math.sqrt((1 - math.pow(C[y, S], 2)) * (1 - math.pow(C[x, S], 2)))
r = a / b
except:
r = C[x, y]
# 其实我没太明白这里是怎么求的,但R语言的pcalg包就是这样写的
else:
m = C[np.ix_([x] + [y] + S, [x] + [y] + S)]
PM = np.linalg.pinv(m)
r = -1 * PM[0, 1] / math.sqrt(abs(PM[0, 0] * PM[1, 1]))
r = min(cut_at, max(-1 * cut_at, r))
# Fisher’s z-transform
res = math.sqrt(n - len(S) - 3) * .5 * math.log1p((2 * r) / (1 - r))
# Φ^{-1}(1-α/2)
return 2 * (1 - norm.cdf(abs(res)))
def _create_complete_graph(node_ids):
"""
根据「节点列表」创建「图结构」
Create a complete graph from the list of node ids.
Args:
node_ids: a list of node ids
Returns:
An undirected graph (as a networkx.Graph)
"""
g = nx.Graph()
g.add_nodes_from(node_ids)
for (i, j) in combinations(node_ids, 2):
g.add_edge(i, j)
return g
def estimate_skeleton(indep_test_func, data_matrix, alpha, **kwargs):
"""
根据统计信息预估骨架图,
1. 根据原始数据转换成无方向的的图
2. 遍历所有的有向边,进行独立性检测,当独立性检测结果大于 alpha 时,删除边
Estimate a skeleton graph from the statistical information.
Args:
indep_test_func: 独立性检测方法
the function name for a conditional independency test.
data_matrix: data (as a numpy array).
alpha: the significance level.
kwargs:
'max_reach': maximum value of l (see the code). The
value depends on the underlying distribution.
'method': if 'stable' given, use stable-PC algorithm
(see [Colombo2014]).
'init_graph': initial structure of skeleton graph
(as a networkx.Graph). If not specified,
a complete graph is used.
other parameters may be passed depending on the
indep_test_func()s.
Returns:
g: a skeleton graph (as a networkx.Graph).
sep_set: a separation set (as an 2D-array of set()).
[Colombo2014] <NAME> and <NAME>. Order-independent
constraint-based causal structure learning. In The Journal of Machine
Learning Research, Vol. 15, pp. 3741-3782, 2014.
"""
def method_stable(kwargs):
return ('method' in kwargs) and kwargs['method'] == "stable"
node_ids = range(data_matrix.shape[1])
node_size = data_matrix.shape[1]
sep_set = [[set() for i in range(node_size)] for j in range(node_size)]
if 'init_graph' in kwargs:
g = kwargs['init_graph']
if not isinstance(g, nx.Graph):
raise ValueError
elif not g.number_of_nodes() == len(node_ids):
raise ValueError('init_graph not matching data_matrix shape')
for (i, j) in combinations(node_ids, 2):
if not g.has_edge(i, j):
sep_set[i][j] = None
sep_set[j][i] = None
else:
# 构造无向边的图
g = _create_complete_graph(node_ids)
l = 0
while True:
cont = False
remove_edges = []
# 遍历 node_ids 的全排列,去遍历所有可能存在的边(因为是有向边,所以是排列)
for (i, j) in permutations(node_ids, 2):
# 即其相邻节点
adj_i = list(g.neighbors(i))
# 如果 j 是 i 的相邻节点,则删除;否则继续下一次遍历
if j not in adj_i:
continue
else:
adj_i.remove(j)
# The process stops if all neighborhoods in the current graph are smaller than the size of the conditional set.
if len(adj_i) >= l:
# _logger.debug('testing %s and %s' % (i, j))
_logger.debug('测试 %s 节点和 %s 节点' % (i, j))
# _logger.debug('neighbors of %s are %s' % (i, str(adj_i)))
_logger.debug('%s 的相邻节点有 %s' % (i, str(adj_i)))
if len(adj_i) < l:
continue
# 存在任意节点 k(其实不是节点 k,也可能是节点集合 k),使 i-j 满足条件独立性,那么需要删除 i-j
for k in combinations(adj_i, l):
_logger.debug('indep prob of %s and %s with subset %s'
% (i, j, str(k)))
# 求独立性检测概率
# p_val = indep_test_func(data_matrix, i, j, set(k), **kwargs)
p_val = gaussCItest(data_matrix, i, j, set(k))
_logger.debug('独立性检测概率为 %s' % str(p_val))
# 如果概率值大于 alpha 超参数,则移除 i->j 的边
if p_val > alpha:
if g.has_edge(i, j):
_logger.debug('p: 移除边 (%s, %s)' % (i, j))
if method_stable(kwargs):
remove_edges.append((i, j))
else:
g.remove_edge(i, j)
# 求并集,即将集合 k 加入到 sep_set 中,由于本步骤不考虑方向,因此 i->j j->i 都采取这种策略
sep_set[i][j] |= set(k)
sep_set[j][i] |= set(k)
break
cont = True
l += 1
if method_stable(kwargs):
g.remove_edges_from(remove_edges)
if cont is False:
break
if ('max_reach' in kwargs) and (l > kwargs['max_reach']):
break
return (g, sep_set)
def estimate_cpdag(skel_graph, sep_set):
"""
Estimate a CPDAG from the skeleton graph and separation sets
returned by the estimate_skeleton() function.
Args:
skel_graph: A skeleton graph (an undirected networkx.Graph).
sep_set: An 2D-array of separation set.
The contents look like something like below.
sep_set[i][j] = set([k, l, m])
Returns:
An estimated DAG.
"""
# 将骨架图变成有方向的
dag = skel_graph.to_directed()
node_ids = skel_graph.nodes()
# 提取所有的 i,j 组合
for (i, j) in combinations(node_ids, 2):
# 寻找满足关系的 k,i → k ← j
adj_i = set(dag.successors(i))
if j in adj_i:
continue
adj_j = set(dag.successors(j))
if i in adj_j:
continue
# 程序稳定的验证,无实际意义
if sep_set[i][j] is None:
continue
# 叮! 找到了 K 可能的集合
common_k = adj_i & adj_j
for k in common_k:
# k 不能存在于 sep_set,由于上一步中无方向,因此只需要判断一个即可
if k not in sep_set[i][j]:
# 如果 k->i,那么j->i,这是不合理的
if dag.has_edge(k, i):
_logger.debug('S: 移除边 (%s, %s)' % (k, i))
dag.remove_edge(k, i)
# 同上
if dag.has_edge(k, j):
_logger.debug('S: remove edge (%s, %s)' % (k, j))
dag.remove_edge(k, j)
def _has_both_edges(dag, i, j):
return dag.has_edge(i, j) and dag.has_edge(j, i)
def _has_any_edge(dag, i, j):
return dag.has_edge(i, j) or dag.has_edge(j, i)
def _has_one_edge(dag, i, j):
return ((dag.has_edge(i, j) and (not dag.has_edge(j, i))) or
(not dag.has_edge(i, j)) and dag.has_edge(j, i))
def _has_no_edge(dag, i, j):
return (not dag.has_edge(i, j)) and (not dag.has_edge(j, i))
# For all the combination of nodes i and j, apply the following
# rules.
# 开始使用三种规则了
old_dag = dag.copy()
while True:
# 提取所有的 i,j 组合
for (i, j) in combinations(node_ids, 2):
# Rule 1: Orient i-j into i->j whenever there is an arrow k->i
# such that k and j are nonadjacent.
#
# Check if i-j.
# 检验是否存在 i-j 无向边
if _has_both_edges(dag, i, j):
# Look all the predecessors of i.
for k in dag.predecessors(i):
# Skip if there is an arrow i->k.
if dag.has_edge(i, k):
continue
# Skip if k and j are adjacent.
if _has_any_edge(dag, k, j):
continue
# Make i-j into i->j
_logger.debug('R1: remove edge (%s, %s)' % (j, i))
dag.remove_edge(j, i)
break
# Rule 2: Orient i-j into i->j whenever there is a chain
# i->k->j.
#
# Check if i-j.
if _has_both_edges(dag, i, j):
# Find nodes k where k is i->k.
succs_i = set()
for k in dag.successors(i):
if not dag.has_edge(k, i):
succs_i.add(k)
# Find nodes j where j is k->j.
preds_j = set()
for k in dag.predecessors(j):
if not dag.has_edge(j, k):
preds_j.add(k)
# Check if there is any node k where i->k->j.
if len(succs_i & preds_j) > 0:
# Make i-j into i->j
_logger.debug('R2: remove edge (%s, %s)' % (j, i))
dag.remove_edge(j, i)
# Rule 3: Orient i-j into i->j whenever there are two chains
# i-k->j and i-l->j such that k and l are nonadjacent.
#
# Check if i-j.
if _has_both_edges(dag, i, j):
# Find nodes k where i-k.
adj_i = set()
for k in dag.successors(i):
if dag.has_edge(k, i):
adj_i.add(k)
# For all the pairs of nodes in adj_i,
for (k, l) in combinations(adj_i, 2):
# Skip if k and l are adjacent.
if _has_any_edge(dag, k, l):
continue
# Skip if not k->j.
if dag.has_edge(j, k) or (not dag.has_edge(k, j)):
continue
# Skip if not l->j.
if dag.has_edge(j, l) or (not dag.has_edge(l, j)):
continue
# Make i-j into i->j.
_logger.debug('R3: remove edge (%s, %s)' % (j, i))
dag.remove_edge(j, i)
break
# Rule 4: Orient i-j into i->j whenever there are two chains
# i-k->l and k->l->j such that k and j are nonadjacent.
#
# However, this rule is not necessary when the PC-algorithm
# is used to estimate a DAG.
if nx.is_isomorphic(dag, old_dag):
break
old_dag = dag.copy()
return dag
def construct_service_dependency_diagram(b):
data = np.array(b.iloc[:, :])[:, :]
columns = list(b.columns)[:]
columns_mapping = {i: str(column) for i, column in enumerate(columns)}
(g, sep_set) = estimate_skeleton(indep_test_func=ci_test_dis,
data_matrix=data,
alpha=0.05)
g = estimate_cpdag(skel_graph=g, sep_set=sep_set)
return g, columns_mapping
def get_root_cause(g):
"""
通过关系图获取根因列表
Args:
g: 关系图
Returns: 根因列表
"""
result = list()
node_ids = g.nodes()
# 获取原因最多的节点
max_pre_node, max_pre_size = None, 0
for node_id in node_ids:
if len(list(g.predecessors(node_id))) > max_pre_size:
max_pre_node = node_id
max_pre_size = len(list(g.predecessors(node_id)))
# 层次遍历
node_filter, node_queue = {max_pre_node}, list([max_pre_node])
while node_queue:
node_now = node_queue.pop(0)
if not g.predecessors(node_now):
result.append(node_now)
continue
is_pre_not_filter = False
for k in g.predecessors(node_now):
if k not in node_filter:
is_pre_not_filter = True
node_filter.add(k)
node_queue.append(k)
# 如果所有的上游节点都在 filter 中,将当前节点加入 result,避免 result 为空的情况
if not is_pre_not_filter:
result.append(node_now)
return result
if __name__ == '__main__':
# 打印日志,不要注释掉
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
_logger.setLevel(logging.DEBUG)
_logger.addHandler(ch)
# mock 原始数据
dm = np.array(bin_data).reshape((5000, 5))
(g, sep_set) = estimate_skeleton(indep_test_func=ci_test_bin,
data_matrix=dm,
alpha=0.01)
#
g = estimate_cpdag(skel_graph=g, sep_set=sep_set)
g_answer = nx.DiGraph()
g_answer.add_nodes_from([0, 1, 2, 3, 4])
g_answer.add_edges_from([(0, 1), (2, 3), (3, 2), (3, 1),
(2, 4), (4, 2), (4, 1)])
print('Edges are:', g.edges(), end='')
if nx.is_isomorphic(g, g_answer):
print(' => GOOD')
else:
print(' => WRONG')
print('True edges should be:', g_answer.edges())
# 又 mock 了多点的数据进行测试
dm = | np.array(dis_data) | numpy.array |
import matplotlib.pyplot as plt
import numpy as np
def ResultPlotter(Results,statsToPlot,calibrationMethods,numRuns,numHops):
'''
Plots of the results from the multi-hop calibration
Errorplots of different metrics (Y-axis; specified in config.json: "statsToPlot": ["deltaB","rmse","corr","nrmse"]) over hop id (X-axis)
Inputs:
Results: See MultihopCalibration.py
statsToPlot: List containing strings of the statistics/metrics which should be plotted (any of ["deltaB","rmse","corr","nrmse"])
calibrationMethods: List containing strings of the calibration methods used (any of ["scan","mls])
numRuns: see DataCreator.py
numHops: see DataCreator.py
'''
numStats = len(statsToPlot)
fig, axs = plt.subplots(numStats)
for m in calibrationMethods:
for s in range(numStats):
stat = statsToPlot[s]
SummarizedResult = np.zeros((numRuns,numHops)) # results: runs x hops
for run in range(numRuns):
curR = [d[stat] for d in Results[m + '_run_'+str(run)] if stat in d]
SummarizedResult[run,:] = curR
meanSummarizedResult = np.mean(SummarizedResult,axis=0)
stdSummarizedResult = np.std(SummarizedResult,axis=0)
if numStats > 1:
axs[s].errorbar(np.arange(numHops), meanSummarizedResult,yerr=stdSummarizedResult, label=m)
axs[s].set(ylabel=stat)
else:
axs.errorbar( | np.arange(numHops) | numpy.arange |
from paramz.caching import Cache_this
from paramz.transformations import Logexp
from GPy.kern.src.kern import CombinationKernel
from GPy.core.parameterization import Param
import GPy.kern as _Gk
import numpy as np
from GPy_ABCD.Kernels.sigmoidalKernels import SigmoidalKernel, SigmoidalIndicatorKernel
class ChangeWindowShiftedSidesBase(CombinationKernel):
"""
Abstract class for changewindow kernels with the two sides being allowed a vertical shift difference
"""
def __init__(self, first, second, sigmoidal, sigmoidal_indicator, location: float = 0., slope: float = 0.5, width = 1.,
name = 'change_window_shifted_sides_base', fixed_slope = False):
_newkerns = [kern.copy() for kern in (first, second)]
super(ChangeWindowShiftedSidesBase, self).__init__(_newkerns, name)
self.first = first
self.second = second
self._fixed_slope = fixed_slope # Note: here to be used by subclasses, and changing it from the outside does not link the parameter
if self._fixed_slope: self.slope = slope
else:
self.slope = Param('slope', | np.array(slope) | numpy.array |
""" This script contains a number of functions used for interpolation of kinetic profiles and D,V profiles in STRAHL.
Refer to the STRAHL manual for details.
"""
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from scipy.interpolate import interp1d
import numpy as np
def funct(params, rLCFS, r):
""" Function 'funct' in STRAHL manual
The "params" input is broken down into 6 arguments:
y0 is core offset
y1 is edge offset
y2 (>y0, >y1) sets the gaussian amplification
p0 sets the width of the inner gaussian
P1 sets the width of the outer gaussian
p2 sets the location of the inner and outer peaks
"""
params = np.reshape(params, (-1, 6))
out = []
for param in params:
y0, y1, y2, p0, p1, p2 = param
r1 = p2 * rLCFS
rin = r[r <= r1]
rout = r[r > r1]
yin = y0 + (y2 - y0) * np.exp(np.maximum(-((rin - r1) ** 2) / p0 ** 2, -50))
yout = y1 + (y2 - y1) * np.exp(np.maximum(-((rout - r1) ** 2) / p1 ** 2, -50))
out.append(np.concatenate((yin, yout)))
return np.array(out)
def funct2(params, rLCFS, r):
"""Function 'funct2' in STRAHL manual.
"""
params_1, params_2 = np.swapaxes(np.reshape(params, (-1, 2, 6)), 0, 1)
funct_1 = funct(params_1, rLCFS, r)
funct_2 = funct(params_2, rLCFS, r)
return funct_1 + funct_2
def exppol0(params, d, rLCFS, r):
rho = r[:, None] / rLCFS
d = np.array(d) / rLCFS
params = np.array(params).T
idx = | np.searchsorted(r, rLCFS) | numpy.searchsorted |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import division
from builtins import zip
from builtins import input
from builtins import map
from builtins import next
from builtins import str
from builtins import range
from past.utils import old_div
from builtins import object
import sys
import json
import pprint
import os
import threading
from collections import defaultdict, Counter
import multiprocessing
from itertools import islice, chain, count
import argparse
import numpy as np
from sklearn.feature_extraction import FeatureHasher
import scipy.sparse as sp
from fastxml import Inferencer, Trainer, metric_cluster
from fastxml.weights import uniform, nnllog, propensity, logexp
from fastxml.metrics import ndcg, precision, pSndcg
def build_arg_parser():
parser = argparse.ArgumentParser(description='FastXML trainer and tester',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("model",
help="Model to use for dataset file")
parser.add_argument("input_file",
help="Input file to use")
group = parser.add_mutually_exclusive_group()
group.add_argument("--standard-dataset", dest="standardDataset", action="store_true",
help="Input is standard dataset sparse format")
group.add_argument("--pre-gen", dest="preGen", type=int,
help="Input is is pregenerated sparse format")
parser.add_argument("--verbose", action="store_true",
help="Verbose"
)
subparsers = parser.add_subparsers(dest="command")
trainer = subparsers.add_parser('train', help="Set up for trainer")
build_train_parser(trainer)
inference = subparsers.add_parser('inference', help="Runs a model against a dataset")
build_repl_parser(inference)
build_inference_parser(inference)
cluster = subparsers.add_parser('cluster', help="Clusters labels into NDCG classes")
build_cluster_parser(cluster)
repl = subparsers.add_parser('repl', help="Interactive mode for a model")
build_repl_parser(repl)
return parser
def build_cluster_parser(parser):
parser.add_argument("--trees", dest="trees", type=int, default=1,
help="Number of random trees to cluster on"
)
parser.add_argument("--label-weight", dest="label_weight",
choices=('uniform', 'nnllog', 'propensity', 'logexp'), default='propensity',
help="Metric for computing label weighting."
)
parser.add_argument("--max_leaf_size", dest="max_leaf_size", type=int,
default=10,
help="Maximumum number of examples allowed per leaf"
)
parser.add_argument("--label-weight-hp", dest="label_weight_hp",
metavar="P", nargs=2, type=float, default = (None, None),
help="Hyper parameters for label weight tuning"
)
def build_repl_parser(parser):
parser.add_argument("--max-predict", dest="max_predict", type=int,
default=10,
help="Maximum number of classes to predict"
)
parser.add_argument("--gamma", type=float,
help="Overrides default gamma value for leaf classifiers"
)
parser.add_argument("--blend_factor", type=float,
help="Overrides default blend factor"
)
parser.add_argument("--leaf-probs", dest="leafProbs", type=lambda x: x.lower() == "true",
help="Overrides whether to show log vs P(Y|X)"
)
def build_inference_parser(parser):
parser.add_argument("--dict", dest="dict", action="store_true",
help="Store predict as dict"
)
parser.add_argument("--score", action="store_true",
help="Scores results according to ndcg and precision"
)
parser.add_argument("--score-only", dest="scoreOnly", action="store_true",
help="Scores the dataset and returns the average NDCG scores"
)
def build_train_parser(parser):
parser.add_argument("--engine", dest="engine", default="auto",
choices=('auto', 'sgd', 'liblinear'),
help="Which engine to use."
)
parser.add_argument("--auto-weight", dest="auto_weight", default=32, type=int,
help="When engine is 'auto', number of classes * max_leaf_size remaining to revert to SGD"
)
parser.add_argument("--no-remap-labels", dest="noRemap", action="store_true",
help="Whether to remap labels to an internal format. Needed for string labels"
)
parser.add_argument("--trees", dest="trees", type=int,
default=50,
help="Number of trees to use"
)
parser.add_argument("--max_leaf_size", dest="max_leaf_size", type=int,
default=10,
help="Maximumum number of examples allowed per leaf"
)
parser.add_argument("--max_labels_per_leaf", dest="max_labels_per_leaf", type=int,
default=50,
help="Maximum number of classes to retaion for probability distribution per leaf"
)
parser.add_argument("--re_split", dest="re_split", type=int,
default=1,
help="After fitting a classifier, re-splits the data according to fitted "\
"classifier. If greater than 1, it will re-fit and re-train a classifier "\
"the data if after splitting, it all ends in a leaf. Will retry N times."
)
parser.add_argument("--alpha", dest="alpha", type=float,
default=1e-3,
help="L1 coefficient. Too high and it won't learn a split, too low and "\
"it won't be sparse (larger file size, slower inference)."
)
parser.add_argument("--C", dest="C", type=float,
default=1,
help="C value for when using auto, penalizing accuracy over fit"
)
parser.add_argument("--iters", dest="iters",
type=lambda x: int(x) if x != 'auto' else x,
default=2,
help="Number of iterations to run over the dataset when fitting classifier"
)
parser.add_argument("--n_updates", dest="n_updates",
type=int,
default=100,
help="If iters is 'auto', makes it use iters = n_update / N"
)
parser.add_argument("--no_bias", dest="bias", action="store_false",
help="Fits a bias for the classifier. Not needed if data has E[X] = 0"
)
parser.add_argument("--subsample", dest="subsample", type=float,
default=1.0,
help="Subsample data per tree. if less than 1, interpretted as a "\
"percentage. If greater than one, taken as number of data " \
"points per tree."
)
parser.add_argument("--loss", dest="loss", choices=('log', 'hinge'),
default='log',
help="Loss to minimize."
)
parser.add_argument("--threads", dest="threads", type=int,
default=multiprocessing.cpu_count(),
help="Number of threads to use. Will use min(threads, trees)"
)
parser.add_argument("--label-weight", dest="label_weight",
choices=('uniform', 'nnllog', 'propensity', 'logexp'), default='propensity',
help="Metric for computing label weighting."
)
parser.add_argument("--label-weight-hp", dest="label_weight_hp",
metavar="P", nargs=2, type=float, default = (None, None),
help="Hyper parameters for label weight tuning"
)
parser.add_argument("--optimization", dest="optimization",
choices=('fastxml', 'dsimec'), default='fastxml',
help="optimization strategy to use for linear classifier"
)
parser.add_argument("--eps", dest="eps", type=float,
help="Sparsity epsilon. Weights lower than eps will suppress to zero"
)
parser.add_argument("--leaf-classifiers", dest="leaf_class",
action="store_true",
help="Whether to use and compute leaf classifiers"
)
parser.add_argument("--gamma", type=int, default=30,
help="Gamma coefficient for hyper-sphere weighting"
)
parser.add_argument("--blend-factor", dest="blend_factor",
type=float, default=0.5,
help="blend * tree-probs + (1 - blend) * tail-classifiers"
)
parser.add_argument("--min-label-count", dest="mlc",
type=int, default=5,
help="Filter out labels with count < min-label-count"
)
parser.add_argument("--leaf-probs", dest="leafProbs",
action="store_true",
help="Computes probability: TP(X) * LP(X)"
)
return parser
def sliding(it, window):
x = list(islice(it, window))
try:
if len(x) == window:
while True:
yield x
x2 = x[1:]
x2.append(next(it))
x = x2
except StopIteration:
pass
class Quantizer(object):
def stream(self, fn):
raise NotImplementedError()
class JsonQuantizer(Quantizer):
def __init__(self, verbose, min_label_count=1, inference=False):
self.fh = FeatureHasher(dtype='float32')
self.verbose = verbose
self.inference = inference
self.min_label_count = min_label_count
def quantize(self, text):
text = text.lower().replace(',', '')
unigrams = text.split()
bigrams = (' '.join(xs) for xs in sliding(iter(unigrams), 2))
trigrams = (' '.join(xs) for xs in sliding(iter(unigrams), 3))
d = {f: 1.0 for f in chain(unigrams, bigrams, trigrams)}
return self.fh.transform([d])
def yieldJson(self, fname):
with open(fname, 'rt') as f:
for i, line in enumerate(f):
if self.verbose and i % 10000 == 0:
print("%s docs encoded" % i)
yield json.loads(line)
def count_labels(self, fname):
c = Counter()
for data in self.yieldJson(fname):
c.update(data['tags'])
return (lambda t: c[t] >= self.min_label_count)
def stream(self, fname, no_features=False):
if self.min_label_count > 1:
f = self.count_labels(fname)
else:
f = lambda x: True
for data in self.yieldJson(fname):
y = [yi for yi in set(data.get('tags', [])) if f(yi)]
if no_features:
yield data, y
else:
X = self.quantize(data['title'])
yield data, X, y
class PregenQuantizer(JsonQuantizer):
def __init__(self, verbose, min_label_count, dims, inference=False):
super(PregenQuantizer, self).__init__(verbose, min_label_count, inference)
self.dims = dims
def quantize(self, text):
data = []
row_ind = []
col_ind = []
for p in text.split():
rIndex, rValue = p.split(':')
row_ind.append(0)
col_ind.append(int(rIndex))
data.append(float(rValue))
return sp.csr_matrix((data, (row_ind, col_ind)), (1, self.dims)).astype('float32')
class StandardDatasetQuantizer(Quantizer):
def __init__(self, verbose):
self.verbose = verbose
def quantize(self, line, no_features):
if " " not in line:
classes, sparse = line.strip(), ""
elif line.startswith(' '):
classes, sparse = '', line.strip()
else:
classes, sparse = line.strip().split(None, 1)
if classes:
y = list(map(int, classes.split(',')))
else:
y = []
if no_features:
return y
c, d = [], []
for v in sparse.split():
loc, v = v.split(":")
c.append(int(loc))
d.append(float(v))
return (c, d), y
def stream(self, fn, no_features=False):
with open(fn, 'rt') as f:
n_samples, n_feats, n_classes = list(map(int, f.readline().split()))
for i, line in enumerate(f):
if i == 0:
continue
if self.verbose and i % 10000 == 0:
print("%s docs encoded" % i)
res = self.quantize(line, no_features)
if no_features:
yield {"labels": res}, res
else:
(c, d), y = res
yield {"labels": y}, sp.csr_matrix((d, ([0] * len(d), c)),
shape=(1, n_feats), dtype='float32'), y
class Dataset(object):
def __init__(self, dataset):
self.dataset = dataset
@property
def model(self):
return os.path.join(self.dataset, 'model')
@property
def classes(self):
return os.path.join(self.dataset, 'counts')
@property
def weights(self):
return os.path.join(self.dataset, 'weights')
class ClusterDataset(object):
def __init__(self, dataset):
self.dataset = dataset
def probs(self, i):
return os.path.join(self.dataset, 'probs.%s' % i)
@property
def clusters(self):
return os.path.join(self.dataset, 'cluster')
def quantize(args, quantizer, classes):
cnt = count()
for _, X, ys in quantizer.stream(args.input_file):
nys = []
for y in ys:
if y not in classes:
classes[y] = y if getattr(args, 'noRemap', False) else next(cnt)
nys.append(classes[y])
yield X, nys
def quantize_y(args, quantizer, classes):
cnt = count()
for _, ys in quantizer.stream(args.input_file, no_features=True):
nys = []
for y in ys:
if y not in classes:
classes[y] = y if getattr(args, 'noRemap', False) else next(cnt)
nys.append(classes[y])
yield nys
def train(args, quantizer):
cnt = count()
classes, X_train, y_train = {}, [], []
for i, (X, y) in enumerate(quantize(args, quantizer, classes)):
if y:
X_train.append(X)
y_train.append(y)
elif args.verbose:
print("Skipping example %s since it has no classes matching threshold" % i)
# Save the mapping
dataset = Dataset(args.model)
if not os.path.isdir(args.model):
os.makedirs(args.model)
with open(dataset.classes, 'wt') as out:
json.dump(list(classes.items()), out)
weights = compute_weights(y_train, args.label_weight, args.label_weight_hp)
with open(dataset.weights, 'wt') as out:
for i, w in enumerate(weights):
out.write("%s,%s\n" % (i, w))
# Train
clf = Trainer(
n_trees=args.trees,
max_leaf_size=args.max_leaf_size,
max_labels_per_leaf=args.max_labels_per_leaf,
re_split=args.re_split,
alpha=args.alpha,
n_epochs=args.iters,
n_updates=args.n_updates,
bias=args.bias,
subsample=args.subsample,
loss=args.loss,
leaf_classifiers=args.leaf_class,
n_jobs=args.threads,
optimization=args.optimization,
eps=args.eps,
C=args.C,
engine=args.engine,
auto_weight=args.auto_weight,
verbose=args.verbose
)
clf.fit(X_train, y_train, weights=weights)
clf.save(dataset.model)
sys.exit(0)
def compute_weights(y_train, label_weight, hps):
args = (y_train,)
if hps[0] is not None:
args += tuple(hps)
if label_weight == 'nnllog':
return nnllog(*args)
elif label_weight == 'uniform':
return uniform(y_train)
elif label_weight == 'propensity':
return propensity(*args)
elif label_weight == 'logexp':
return logexp(*args)
else:
raise NotImplementedError(label_weight)
def print_metrics(ndcgs, precs, pndcgs, toStderr):
fout = sys.stderr if toStderr else sys.stdout
ndcgT = list(zip(*ndcgs))
precsT = list(zip(*precs))
pndcgT = list(zip(*pndcgs))
for i in range(3):
print('P@{}: {}'.format(2 * i + 1, np.mean(precsT[i])), file=fout)
for i in range(3):
print('NDCG@{}: {}'.format(2 * i + 1, | np.mean(ndcgT[i]) | numpy.mean |
import sys, os
import numpy as np
from keras.preprocessing.image import transform_matrix_offset_center, apply_transform, Iterator,random_channel_shift, flip_axis
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
import cv2
import random
import pdb
from skimage.io import imsave, imread
from skimage.transform import rotate
from skimage import transform
from skimage.transform import resize
from params import *
import json
import math
#import matplotlib.pyplot as plt
def clip(img, dtype, maxval):
return np.clip(img, 0, maxval).astype(dtype)
def RandomLight(img,img_right):
lights = random.choice(["Rfilter","Rbright","Rcontr", "RSat","RhueSat"])
#print(lights)
if lights=="Rfilter":
alpha = 0.5 * random.uniform(0, 1)
kernel = np.ones((3, 3), np.float32)/9 * 0.2
colored = img[..., :3]
colored = alpha * cv2.filter2D(colored, -1, kernel) + (1-alpha) * colored
maxval = np.max(img[..., :3])
dtype = img.dtype
img[..., :3] = clip(colored, dtype, maxval)
#right image
colored = img_right[..., :3]
colored = alpha * cv2.filter2D(colored, -1, kernel) + (1-alpha) * colored
maxval = np.max(img_right[..., :3])
dtype = img_right.dtype
img_right[..., :3] = clip(colored, dtype, maxval)
if lights=="Rbright":
alpha = 1.0 + 0.1*random.uniform(-1, 1)
maxval = np.max(img[..., :3])
dtype = img.dtype
img[..., :3] = clip(alpha * img[...,:3], dtype, maxval)
#right image
maxval = np.max(img_right[..., :3])
dtype = img_right.dtype
img_right[..., :3] = clip(alpha * img_right[...,:3], dtype, maxval)
if lights=="Rcontr":
alpha = 1.0 + 0.1*random.uniform(-1, 1)
gray = cv2.cvtColor(img[:, :, :3], cv2.COLOR_BGR2GRAY)
gray = (3.0 * (1.0 - alpha) / gray.size) * np.sum(gray)
maxval = np.max(img[..., :3])
dtype = img.dtype
img[:, :, :3] = clip(alpha * img[:, :, :3] + gray, dtype, maxval)
#right image
gray = cv2.cvtColor(img_right[:, :, :3], cv2.COLOR_BGR2GRAY)
gray = (3.0 * (1.0 - alpha) / gray.size) * np.sum(gray)
maxval = np.max(img_right[..., :3])
dtype = img.dtype
img_right[:, :, :3] = clip(alpha * img_right[:, :, :3] + gray, dtype, maxval)
if lights=="RSat":
maxval = np.max(img[..., :3])
dtype = img.dtype
alpha = 1.0 + random.uniform(-0.1, 0.1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
img[..., :3] = alpha * img[..., :3] + (1.0 - alpha) * gray
img[..., :3] = clip(img[..., :3], dtype, maxval)
#righ image
maxval = np.max(img_right[..., :3])
dtype = img_right.dtype
alpha = 1.0 + random.uniform(-0.1, 0.1)
gray = cv2.cvtColor(img_right, cv2.COLOR_BGR2GRAY)
gray = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
img_right[..., :3] = alpha * img_right[..., :3] + (1.0 - alpha) * gray
img_right[..., :3] = clip(img_right[..., :3], dtype, maxval)
if lights=="RhueSat":
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(img)
hue_shift = np.random.uniform(-25,25)
h = cv2.add(h, hue_shift)
sat_shift = np.random.uniform(-25,25)
s = cv2.add(s, sat_shift)
val_shift = np.random.uniform(-25, 25)
v = cv2.add(v, val_shift)
img = cv2.merge((h, s, v))
img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
#right image
img_right = cv2.cvtColor(img_right, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(img_right)
h = cv2.add(h, hue_shift)
s = cv2.add(s, sat_shift)
v = cv2.add(v, val_shift)
img_right = cv2.merge((h, s, v))
img_right = cv2.cvtColor(img_right, cv2.COLOR_HSV2BGR)
return img,img_right
def perspectivedist(img,img_right,img_mask, flag='all'):
if flag=='all':
magnitude=3
# pdb.set_trace()
rw=img.shape[0]
cl=img.shape[1]
#x = random.randrange(50, 200)
#nonzeromask=(img_mask>0).nonzero()
#nonzeroy = np.array(nonzeromask[0])
#nonzerox = np.array(nonzeromask[1])
#bbox = (( np.maximum(np.min(nonzerox)-x,0), np.maximum(np.min(nonzeroy)-x,0)), (np.minimum(np.max(nonzerox)+x,cl), np.minimum(np.max(nonzeroy)+x,rw)))
#pdb.set_trace()
# img=img[bbox[0][1]:(bbox[1][1]),bbox[0][0]:(bbox[1][0])]
# img_mask=img_mask[bbox[0][1]:(bbox[1][1]),bbox[0][0]:(bbox[1][0])]
skew = random.choice(["TILT", "TILT_LEFT_RIGHT", "TILT_TOP_BOTTOM", "CORNER"])
w, h,_ = img.shape
x1 = 0
x2 = h
y1 = 0
y2 = w
original_plane = np.array([[(y1, x1), (y2, x1), (y2, x2), (y1, x2)]], dtype=np.float32)
max_skew_amount = max(w, h)
max_skew_amount = int(math.ceil(max_skew_amount *magnitude))
skew_amount = random.randint(1, max_skew_amount)
if skew == "TILT" or skew == "TILT_LEFT_RIGHT" or skew == "TILT_TOP_BOTTOM":
if skew == "TILT":
skew_direction = random.randint(0, 3)
elif skew == "TILT_LEFT_RIGHT":
skew_direction = random.randint(0, 1)
elif skew == "TILT_TOP_BOTTOM":
skew_direction = random.randint(2, 3)
if skew_direction == 0:
# Left Tilt
new_plane = np.array([(y1, x1 - skew_amount), # Top Left
(y2, x1), # Top Right
(y2, x2), # Bottom Right
(y1, x2 + skew_amount)], dtype=np.float32) # Bottom Left
elif skew_direction == 1:
# Right Tilt
new_plane = np.array([(y1, x1), # Top Left
(y2, x1 - skew_amount), # Top Right
(y2, x2 + skew_amount), # Bottom Right
(y1, x2)],dtype=np.float32) # Bottom Left
elif skew_direction == 2:
# Forward Tilt
new_plane = np.array([(y1 - skew_amount, x1), # Top Left
(y2 + skew_amount, x1), # Top Right
(y2, x2), # Bottom Right
(y1, x2)], dtype=np.float32) # Bottom Left
elif skew_direction == 3:
# Backward Tilt
new_plane = np.array([(y1, x1), # Top Left
(y2, x1), # Top Right
(y2 + skew_amount, x2), # Bottom Right
(y1 - skew_amount, x2)], dtype=np.float32) # Bottom Left
if skew == "CORNER":
skew_direction = random.randint(0, 7)
if skew_direction == 0:
# Skew possibility 0
new_plane = np.array([(y1 - skew_amount, x1), (y2, x1), (y2, x2), (y1, x2)], dtype=np.float32)
elif skew_direction == 1:
# Skew possibility 1
new_plane = np.array([(y1, x1 - skew_amount), (y2, x1), (y2, x2), (y1, x2)], dtype=np.float32)
elif skew_direction == 2:
# Skew possibility 2
new_plane = np.array([(y1, x1), (y2 + skew_amount, x1), (y2, x2), (y1, x2)],dtype=np.float32)
elif skew_direction == 3:
# Skew possibility 3
new_plane = np.array([(y1, x1), (y2, x1 - skew_amount), (y2, x2), (y1, x2)], dtype=np.float32)
elif skew_direction == 4:
# Skew possibility 4
new_plane = np.array([(y1, x1), (y2, x1), (y2 + skew_amount, x2), (y1, x2)], dtype=np.float32)
elif skew_direction == 5:
# Skew possibility 5
new_plane = np.array([(y1, x1), (y2, x1), (y2, x2 + skew_amount), (y1, x2)], dtype=np.float32)
elif skew_direction == 6:
# Skew possibility 6
new_plane = np.array([(y1, x1), (y2, x1), (y2, x2), (y1 - skew_amount, x2)],dtype=np.float32)
elif skew_direction == 7:
# Skew possibility 7
new_plane =np.array([(y1, x1), (y2, x1), (y2, x2), (y1, x2 + skew_amount)], dtype=np.float32)
# pdb.set_trace()
perspective_matrix = cv2.getPerspectiveTransform(original_plane, new_plane)
img = cv2.warpPerspective(img, perspective_matrix,
(img.shape[1], img.shape[0]),
flags = cv2.INTER_LINEAR)
img_right = cv2.warpPerspective(img_right, perspective_matrix,
(img.shape[1], img.shape[0]),
flags = cv2.INTER_LINEAR)
img_mask = cv2.warpPerspective(img_mask, perspective_matrix,
(img.shape[1], img.shape[0]),
flags = cv2.INTER_LINEAR)
return img, img_right, img_mask
def apply_clahe(img):
lab= cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
cl = clahe.apply(l)
limg = cv2.merge((cl,a,b))
img = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
return img
def add_gaussian_noise(X_imgs):
#pdb.set_trace()
row, col,_= X_imgs.shape
#X_imgs=X_imgs/255
X_imgs = X_imgs.astype(np.float32)
# Gaussian distribution parameters
mean = 0
var = 0.1
sigma = var ** 0.5
gaussian = np.random.random((row, col, 1)).astype(np.float32)
gaussian = np.concatenate((gaussian, gaussian, gaussian), axis = 2)
gaussian_img = cv2.addWeighted(X_imgs, 0.75, 0.25 * gaussian, 0.25, 0)
gaussian_img = np.array(gaussian_img, dtype = np.uint8)
return gaussian_img
def random_affine(img,img_right,img_mask):
flat_sum_mask=sum(img_mask.flatten())
(row,col,_)=img_mask.shape
angle=shear_deg=0
zoom=1
center_shift = np.array((1000, 1000)) / 2. - 0.5
tform_center = transform.SimilarityTransform(translation=-center_shift)
tform_uncenter = transform.SimilarityTransform(translation=center_shift)
big_img=np.zeros((1000,1000,3), dtype=np.uint8)
big_img_right=np.zeros((1000,1000,3), dtype=np.uint8)
big_mask=np.zeros((1000,1000,3), dtype=np.uint8)
big_img[190:(190+row),144:(144+col)]=img
big_img_right[190:(190+row),144:(144+col)]=img_right
big_mask[190:(190+row),144:(144+col)]=img_mask
affine = random.choice(["rotate", "zoom", "shear"])
if affine == "rotate":
angle= random.uniform(-90, 90)
if affine == "zoom":
zoom = random.uniform(0.5, 1.5)
if affine=="shear":
shear_deg = random.uniform(-5, 5)
# pdb.set_trace()
tform_aug = transform.AffineTransform(rotation = np.deg2rad(angle),
scale =(1/zoom, 1/zoom),
shear = np.deg2rad(shear_deg),
translation = (0, 0))
tform = tform_center + tform_aug + tform_uncenter
# pdb.set_trace()
img_tr=transform.warp((big_img), tform)
img_tr_right=transform.warp((big_img_right), tform)
mask_tr=transform.warp((big_mask), tform)
# pdb.set_trace()
masktemp = cv2.cvtColor((img_tr*255).astype(np.uint8), cv2.COLOR_BGR2GRAY)>20
img_tr=img_tr[np.ix_(masktemp.any(1),masktemp.any(0))]
mask_tr = mask_tr[np.ix_(masktemp.any(1),masktemp.any(0))]
img_tr_right = img_tr_right[np.ix_(masktemp.any(1),masktemp.any(0))]
return (img_tr*255).astype(np.uint8),(img_tr_right*255).astype(np.uint8),(mask_tr*255).astype(np.uint8)
class CustomNumpyArrayIterator(Iterator):
def __init__(self, X, y, image_data_generator,
batch_size=32, shuffle=False, seed=None,
dim_ordering='th'):
self.X = X
self.y = y
self.image_data_generator = image_data_generator
self.dim_ordering = dim_ordering
self.training=image_data_generator.training
self.img_rows=image_data_generator.netparams.img_rows
self.img_cols=image_data_generator.netparams.img_cols
with open('labels_2017.json') as json_file:
self.Data = json.load(json_file)
#pdb.set_trace()
super(CustomNumpyArrayIterator, self).__init__(X.shape[0], batch_size, shuffle, seed)
def _get_batches_of_transformed_samples(self, index_array):
# pdb.set_trace()
batch_x_right = np.zeros((len(index_array),self.img_rows,self.img_cols,3), dtype=np.float32)
batch_x_left = np.zeros((len(index_array),self.img_rows,self.img_cols,3), dtype=np.float32)
if self.training:
if self.image_data_generator.netparams.task=='all':
ch_num=11
elif self.image_data_generator.netparams.task=='binary':
ch_num=1
elif self.image_data_generator.netparams.task=='parts':
ch_num=3
elif self.image_data_generator.netparams.task=='instrument':
ch_num=7
else:
ch_num=3
batch_y=np.zeros((len(index_array), self.img_rows,self.img_cols,ch_num), dtype=np.float32)
infos=[]
for i, j in enumerate(index_array):
#pdb.set_trace()
x_left = imread(self.X[j][0])
x_right =imread(self.X[j][1])
y1 =imread(self.y[j])
y1 = y1[...,[1,2,0]]
#print(j)
#pdb.set_trace()
infos.append((self.X[j][0], x_left.shape))
_x_left, _x_right, _y1 = self.image_data_generator.random_transform(x_left.astype(np.uint8), x_right.astype(np.uint8),y1.astype(np.uint8),self.Data)
batch_x_left[i]=_x_left
batch_x_right[i]=_x_right
batch_y[i]=_y1
#inf_temp=[]
#inf_temp.append()
# inf_temp.append()
# infos.append(
# pdb.set_trace()
batch_y=np.reshape(batch_y,(-1,self.img_rows,self.img_cols,ch_num))
return batch_x_left,batch_x_right,batch_y,infos
def next(self):
with self.lock:
index_array = next(self.index_generator)
#print(index_array)
return self._get_batches_of_transformed_samples(index_array)
def convert_gray(data,im, tasktype):
#pdb.set_trace()
#np.shape(self.Data['instrument'])
if tasktype.task=='all':
out = (np.zeros((im.shape[0],im.shape[1],11)) ).astype(np.uint8)
#pdb.set_trace()
image=np.squeeze(im[:,:,0])
indexc=0
for label_info,index in zip(data['instrument'],range(0,np.shape(data['instrument'])[0]+1)):
rgb=label_info['color'][0]
if rgb==0:
continue
temp_out = (np.zeros(im.shape[:2]) ).astype(np.uint8)
gray_val=255
#pdb.set_trace()
match_pxls = np.where(image == rgb)
temp_out[match_pxls] = gray_val
out[:,:,index-1]=temp_out
#print(index-1)
#print(rgb)
image=np.squeeze(im[:,:,1])
for label_info,index in zip(data['parts'],range(np.shape(data['instrument'])[0],np.shape(data['instrument'])[0]+np.shape(data['parts'])[0])):
rgb=label_info['color'][1]
#pdb.set_trace()
if rgb==0:
continue
temp_out = (np.zeros(im.shape[:2]) ).astype(np.uint8)
gray_val=255
match_pxls = np.where(image == rgb)
temp_out[match_pxls] = gray_val
out[:,:,index-1]=temp_out
#print(index-1)
#print(rgb)
#pdb.set_trace()
out[:,:,index]=np.squeeze(im[:,:,2])
#print(index)
#pdb.set_trace()
if tasktype.task=='binary':
out = (np.zeros((im.shape[0],im.shape[1])) ).astype(np.uint8)
out[:,:]=np.squeeze(im[:,:,2])
if tasktype.task=='instrument':
out = (np.zeros((im.shape[0],im.shape[1],np.shape(data['instrument'])[0]-1))).astype(np.uint8)
#pdb.set_trace()
image=np.squeeze(im[:,:,0])
indexc=0
for label_info,index in zip(data['instrument'],range(0,np.shape(data['instrument'])[0]+1)):
rgb=label_info['color'][0]
#pdb.set_trace()
if rgb==0:
continue
temp_out = (np.zeros(im.shape[:2]) ).astype(np.uint8)
gray_val=255
match_pxls = np.where((image == rgb))
temp_out[match_pxls] = gray_val
out[:,:,index-1]=temp_out
if tasktype.task=='parts':
out = (np.zeros((im.shape[0],im.shape[1],np.shape(data['parts'])[0])) ).astype(np.uint8)
#pdb.set_trace()
image=np.squeeze(im[:,:,1])
indexc=0
for label_info,index in zip(data['parts'],range(0,np.shape(data['parts'])[0])):
rgb=label_info['color'][1]
#pdb.set_trace()
if rgb==0:
continue
temp_out = (np.zeros(im.shape[:2]) ).astype(np.uint8)
gray_val=255
match_pxls = np.where(image == rgb)
temp_out[match_pxls] = gray_val
out[:,:,index]=temp_out
return out.astype(np.uint8)
def convert_color(data,im, tasktype):
# pdb.set_trace()
im=np.squeeze(im)
if tasktype.task=='all':
out1 = (np.zeros((im.shape[0],im.shape[1])) ).astype(np.uint8)
out2 = (np.zeros((im.shape[0],im.shape[1])) ).astype(np.uint8)
out3 = (np.zeros((im.shape[0],im.shape[1])) ).astype(np.uint8)
for label_info,index in zip(data['instrument'],range(0,np.shape(data['instrument'])[0]+1)):
rgb=label_info['color'][0]
if np.sum(rgb)==0:
continue
temp=im[:,:,index-1]
temp=temp.astype(np.float)
#temp =cv2.resize(temp,(224,224),interpolation=cv2.INTER_CUBIC)
match_pxls = np.where(temp > 0.2)
out1[match_pxls] = rgb
for label_info,index in zip(data['parts'],range(np.shape(data['instrument'])[0],np.shape(data['instrument'])[0]+np.shape(data['parts'])[0])):
rgb=label_info['color'][1]
#pdb.set_trace()
if np.sum(rgb)==0:
continue
temp=im[:,:,index-1]
#print(index-1)
temp=temp.astype(np.float)
#temp =cv2.resize(temp,(224,224),interpolation=cv2.INTER_CUBIC)
match_pxls = np.where(temp > 0.2)
out2[match_pxls] = rgb
out3=(im[:,:,index]>0.2)*255
out=np.dstack((out1,out2,out3))
#pdb.set_trace()
if tasktype.task=='binary':
out = (np.zeros((im.shape[0],im.shape[1])) ).astype(np.uint8)
out=(im>0.2)*255
if tasktype.task=='parts':
out = (np.zeros((im.shape[0],im.shape[1])) ).astype(np.uint8)
for label_info,index in zip(data['parts'],range(0,np.shape(data['parts'])[0])):
rgb=label_info['color'][1]
if np.sum(rgb)==0:
continue
temp=im[:,:,index]
temp=temp.astype(np.float)
temp =cv2.resize(temp,(224,224),interpolation=cv2.INTER_CUBIC)
match_pxls = np.where(temp > 0.2)
out[match_pxls] = rgb
if tasktype.task=='instrument':
out = ( | np.zeros((im.shape[0],im.shape[1])) | numpy.zeros |
"""
unstratified-primary-cylindrical.py
A regression test for globAccDisk-src-primary_grav.hpp (source terms for gravity with a source at the coordinate center).
Current test:
-- stratified gravity
-- cylindrical coordinate version
-- a Keplerian disk inclined wrt the coordinate system is initialized, the test ensures that it remains stationary
"""
# Modules
import numpy as np # standard Python module for numerics
import sys # standard Python module to change path
import scripts.utils.athena as athena # utilities for running Athena++
import scripts.utils.comparison as comparison # more utilities explicitly for testing
sys.path.insert(0, '../../vis/python') # insert path to Python read scripts
import athena_read # utilities for reading Athena++ data # noqa
import socket # recognizing host machine
def prepare(**kwargs):
# check which machine we're running on and configure accordingly
if socket.gethostname() == 'ast1506-astro':
athena.configure('hdf5', 'mpi',
prob='globAccDisk-test-binary_gravity',
flux='roe',
eos='adiabatic',
coord='cylindrical',
hdf5_path='/usr/local/Cellar/hdf5/1.10.4',
**kwargs)
else:
athena.configure('hdf5', 'mpi',
prob='globAccDisk-test-binary_gravity',
flux='roe',
eos='adiabatic',
coord='cylindrical',
**kwargs)
athena.make()
def run(**kwargs):
arguments = ['job/problem_id=stratified-primary-cylindrical',
'output1/file_type=hdf5',
'output1/variable=prim',
'output1/dt=%.20f' % (2.5),
'time/cfl_number=0.3',
'time/tlim=%.20f' % (2.5),
'mesh/nx1=32',
'mesh/x1min=0.5',
'mesh/x1max=1.0',
'mesh/ix1_bc=reflecting',
'mesh/ox1_bc=reflecting',
'mesh/nx2=64',
'mesh/x2min=0.0',
'mesh/x2max=%.20f' % (2.*np.pi),
'mesh/ix2_bc=periodic',
'mesh/ox2_bc=periodic',
'mesh/nx3=16',
'mesh/x3min=-0.5',
'mesh/x3max=0.5',
'mesh/ix3_bc=reflecting',
'mesh/ox3_bc=reflecting',
'meshblock/nx1=16',
'meshblock/nx2=16',
'meshblock/nx3=16',
'hydro/gamma=1.1',
'hydro/dfloor=0.0001',
'hydro/pfloor=0.0001',
'problem/stratified=1',
'problem/binary_component=0',
'problem/GM1=1.0',
'problem/rho=1.0',
'problem/rho0=0.01',
'problem/press=0.01',
'problem/inclination=0.3',
'problem/position_angle=1.0',
'problem/ang_dist_max=0.2']
athena.run('hydro/athinput.binary_gravity', arguments)
def analyze():
initial_state = athena_read.athdf('bin/stratified-primary-cylindrical.out1.00000.athdf')
r0, phi0, z0, rho = [initial_state[key] for key in ['x1v', 'x2v', 'x3v', 'rho']]
z, phi, r = np.meshgrid(z0, phi0, r0, indexing='ij')
#calculate the center of mass of each vertical stencil to be compared with the final state
com_expected = np.sum(rho*z, axis=0) / np.sum(rho, axis=0)
# check if the frames are identical
for i in range(1,2):
test_results = athena_read.athdf('bin/stratified-primary-cylindrical.out1.0000%i.athdf' % i)
rho = test_results['rho']
com = np.sum(rho*z, axis=0) / np.sum(rho, axis=0)
error_rel_com = np.sum(np.abs(com_expected - com)) / ( | np.product(com.shape) | numpy.product |
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage
from scipy.ndimage import binary_fill_holes as fillholes
from skimage import img_as_ubyte
from skimage.util import img_as_float
from skimage.exposure import adjust_sigmoid
from skimage.filters import threshold_otsu, threshold_triangle, rank, laplace, sobel
from skimage.segmentation import clear_border
from skimage.measure import label
from skimage.morphology import closing, square, disk, remove_small_objects, opening, dilation, watershed, erosion
from skimage.color import label2rgb, rgb2gray
from skimage.transform import rescale
import os
from os.path import join
from scipy import ndimage as ndi
def frequency_filter(im, mu, sigma, passtype='low'):
'''
This function applies a lowpass or highpass filter to an image.
Paramters
---------
im : (N, M) ndarray
Grayscale input image.
mu : float, optional
Average for input in low pass filter. Default value is 500.
sigma : float, optional
Standard deviation for input in low pass filter. Default value is 70.
passtype: string
Applies a 'high' or 'low' pass filter. Default value is 'low'.
Returns
-------
out : ndarray
Low or high pass filtered output image.
Examples
--------
>>> image = plt.imread('..\C3-NTG-CFbs_NTG5ECM_1mMRGD_20x_003.tif')
>>> lowpass = frequency_filter(im, 500, 70, passtype='low')
'''
# define x and y based on image shape
y_length, x_length = np.shape(im)
xi = np.linspace(0, x_length-1, x_length)
yi = np.linspace(0, y_length-1, y_length)
x, y = np.meshgrid(xi, yi)
# define lowpass or highpass filter
if passtype == 'low':
gfilt = np.exp(-((x-mu)**2 + (y-mu)**2)/(2*sigma**2))
if passtype == 'high':
gfilt = 1 - np.exp(-((x-mu)**2 + (y-mu)**2)/(2*sigma**2))
fim = np.fft.fft2(im) # moving to spacial domain
fim_c = np.fft.fftshift(fim) # centering
fim_filt = np.multiply(fim_c, gfilt) # apply the filter
fim_uc = np.fft.ifftshift(fim_filt) # uncenter
im_pass = np.real(np.fft.ifft2(fim_uc)) # perform inverse transform
return im_pass
def _check_dtype_supported(ar):
'''
Used in remove_large_objects function and taken from
skimage.morphology package.
'''
# Should use `issubdtype` for bool below, but there's a bug in numpy 1.7
if not (ar.dtype == bool or np.issubdtype(ar.dtype, np.integer)):
raise TypeError("Only bool or integer image types are supported. "
"Got %s." % ar.dtype)
def remove_large_objects(ar, max_size=10000, connectivity=1, in_place=False):
'''
Remove connected components larger than the specified size. (Modified from
skimage.morphology.remove_small_objects)
Parameters
----------
ar : ndarray (arbitrary shape, int or bool type)
The array containing the connected components of interest. If the array
type is int, it is assumed that it contains already-labeled objects.
The ints must be non-negative.
max_size : int, optional (default: 10000)
The largest allowable connected component size.
connectivity : int, {1, 2, ..., ar.ndim}, optional (default: 1)
The connectivity defining the neighborhood of a pixel.
in_place : bool, optional (default: False)
If `True`, remove the connected components in the input array itself.
Otherwise, make a copy.
Raises
------
TypeError
If the input array is of an invalid type, such as float or string.
ValueError
If the input array contains negative values.
Returns
-------
out : ndarray, same shape and type as input `ar`
The input array with small connected components removed.
Examples
--------
>>> from skimage import morphology
>>> a = np.array([[0, 0, 0, 1, 0],
... [1, 1, 1, 0, 0],
... [1, 1, 1, 0, 1]], bool)
>>> b = morphology.remove_large_objects(a, 6)
>>> b
array([[False, False, False, False, False],
[ True, True, True, False, False],
[ True, True, True, False, False]], dtype=bool)
>>> c = morphology.remove_small_objects(a, 7, connectivity=2)
>>> c
array([[False, False, False, True, False],
[ True, True, True, False, False],
[ True, True, True, False, False]], dtype=bool)
>>> d = morphology.remove_large_objects(a, 6, in_place=True)
>>> d is a
True
'''
# Raising type error if not int or bool
_check_dtype_supported(ar)
if in_place:
out = ar
else:
out = ar.copy()
if max_size == 0: # shortcut for efficiency
return out
if out.dtype == bool:
selem = ndi.generate_binary_structure(ar.ndim, connectivity)
ccs = np.zeros_like(ar, dtype=np.int32)
ndi.label(ar, selem, output=ccs)
else:
ccs = out
try:
component_sizes = np.bincount(ccs.ravel())
except ValueError:
raise ValueError("Negative value labels are not supported. Try "
"relabeling the input with `scipy.ndimage.label` or "
"`skimage.morphology.label`.")
if len(component_sizes) == 2:
warn("Only one label was provided to `remove_small_objects`. "
"Did you mean to use a boolean array?")
too_large = component_sizes > max_size
too_large_mask = too_large[ccs]
out[too_large_mask] = 0
return out
def phalloidin_labeled(im, selem=disk(3), mu=500, sigma=70, cutoff=0, gain=100,
min_size=250, max_size=10000, connectivity=1):
"""
Signature: phalloidin_labeled(*args)
Docstring: Segment and label image
Extended Summary
----------------
The colorize function applies preprocessing filters (contrast and high
pass) then defines the threshold value for the desired image. Thresholding
is calculated by the otsu function creates a binarized image by setting
pixel intensities above that thresh value to white, and the ones below to
black (background). Next, it cleans up the image by filling in random noise
within the cell outlines and removes small background objects. It then
labels adjacent pixels with the same value and defines them as a region.
It returns an RGB image with color-coded labels.
Paramters
---------
im : (N, M) ndarray
Grayscale input image.
selem : numpy.ndarray, optional
Area used for separating cells. Default value is
skimage.morphology.disk(3).
cutoff : float, optional
Cutoff of the sigmoid function that shifts the characteristic curve
in horizontal direction. Default value is 0.
gain : float, optional
The constant multiplier in exponential's power of sigmoid function.
Default value is 100.
mu : float, optional
Average for input in low pass filter. Default value is 500.
sigma : float, optional
Standard deviation for input in low pass filter. Default value is 70.
min_size : int, optional
The smallest allowable object size. Default value is 250.
max_size : int, optional
The largest allowable object size. Default value is 10000.
connectivity : int, optional
The connectvitivy defining the neighborhood of a pixel. Default value
is 1.
Returns
-------
out : label_image (ndarray) segmented and object labeled for analysis
Examples
--------
image = plt.imread('C3-NTG-CFbs_NTG5ECM_1mMRGD_20x_003.tif')
label_image = phalloidin_488_binary(image, mu=500, sigma=70,
cutoff=0, gain=100)
image = plt.imread('..\C3-NTG-CFbs_NTG5ECM_1mMRGD_20x_003.tif')
label, overlay = phalloidin_488_segment(image, mu=500, sigma=70,
cutoff=0, gain=100)
"""
# contrast adjustment
im_con = adjust_sigmoid(im, cutoff=cutoff, gain=gain, inv=False)
# contrast + low pass filter
im_lo = frequency_filter(im_con, mu, sigma, passtype='low')
# contrast + low pass + binary
thresh = threshold_otsu(im_lo, nbins=256)
im_bin = im_lo > thresh
# fill holes, separate cells, and remove small/large objects
im_fill = ndimage.binary_fill_holes(im_bin)
im_open = opening(im_fill, selem)
im_clean_i = remove_small_objects(im_open, min_size=min_size,
connectivity=connectivity, in_place=False)
im_clean = remove_large_objects(im_clean_i, max_size=max_size,
connectivity=connectivity, in_place=False)
# labelling regions that are cells
label_image = label(im_clean)
# coloring labels over cells
image_label_overlay = label2rgb(label_image, image=im, bg_label=0)
print(image_label_overlay.shape)
# plot overlay image
fig, ax = plt.subplots(figsize=(10, 6))
ax.imshow(image_label_overlay)
ax.set_axis_off()
plt.tight_layout()
plt.show()
return (label_image)
def SMA_segment(im, mu=500, sigma=70, cutoff=0, gain=100,
min_size=100, connectivity=1):
"""
This function binarizes a Smooth muscle actin (SMA) fluorescence microscopy channel
using contrast adjustment, high pass filter, otsu thresholding, and removal
of small objects.
Paramters
---------
im : (N, M) ndarray
Grayscale input image.
cutoff : float, optional
Cutoff of the sigmoid function that shifts the characteristic curve
in horizontal direction. Default value is 0.
gain : float, optional
The constant multiplier in exponential's power of sigmoid function.
Default value is 100.
mu : float, optional
Average for input in low pass filter. Default value is 500.
sigma : float, optional
Standard deviation for input in low pass filter. Default value is 70.
min_size : int, optional
The smallest allowable object size. Default value is 100.
connectivity : int, optional
The connectvitivy defining the neighborhood of a pixel. Default value
is 1.
Returns
-------
out : label_image (ndarray) segmented and object labeled for analysis,
image_label_overlay (ndarray)
Examples
--------
>>> image = plt.imread('..\C4-NTG-CFbs_NTG5ECM_1mMRGD_20x_003.tif')
>>> label, overlay = SMA_segment(image, mu=500, sigma=70,
cutoff=0, gain=100)
"""
# contrast adjustment
im_con = adjust_sigmoid(im, cutoff=cutoff, gain=gain, inv=False)
# contrast + low pass filter
im_lo = frequency_filter(im_con, mu, sigma, passtype='low')
# contrast + low pass + binary
thresh = threshold_otsu(im_lo, nbins=256)
im_bin = im_lo > thresh
# remove small objects
im_bin_clean = remove_small_objects(im_bin, min_size=min_size,
connectivity=connectivity,
in_place=False)
# labelling regions that are cells
label_image = label(im_bin_clean)
# coloring labels over cells
image_label_overlay = label2rgb(label_image, image=im, bg_label=0)
return label_image, image_label_overlay
def colorize(image, i, x):
"""
Signature: colorize(*args)
Docstring: segment and label image
Extended Summary:
----------------
The colorize function defines the threshold value for the desired image by
the triangle function and then creates a binarized image by setting pixel
intensities above that thresh value to white, and the ones below to black
(background). Next, it closes up the image by filling in random noise
within the cell outlines and smooths/clears out the border. It then labels
adjacent pixels with the same value and defines them as a region. It
returns an RGB image with color-coded labels.
Parameters:
----------
image : 2D array
greyscale image
i : int
dimension of square to be used for binarization
x : float
dimension of image in microns according to imageJ
Returns:
--------
RGB image overlay
int : 2D ndarray
"""
# resizing image
image = rescale(image, x/1024, anti_aliasing=False)
# applying threshold to image
thresh = threshold_triangle(image)
binary = closing(image > thresh, square(i))
binary = ndimage.binary_fill_holes(binary)
# cleaning up boundaries of cells
cleared = clear_border(binary)
# labelling regions that are cells
label_image = label(cleared)
# coloring labels over cells
image_label_overlay = label2rgb(label_image, image=image, bg_label=0)
print(image_label_overlay.shape)
fig, ax = plt.subplots(figsize=(10, 6))
ax.imshow(image_label_overlay)
ax.set_axis_off()
plt.tight_layout()
plt.show()
return (label_image)
def sharpen_nuclei(image, selem=square(8), ksize=10, alpha=0.2, sigma=40,
imshow=True):
"""
Highlight nucleis in the image.
Make a sharp contrast between nucleis and background to highlight nucleis
in the input image, achieved by mean blurring, laplace sharpening, and
Gaussian high-pass filter. Selem, ksize, alpha, sigma parameters have
default values while could be customize by user.
Parameters
----------
image : numpy.ndarray
grayscale image which needs to enhance the nucleis.
selem : numpy.ndarray
area used for scanning in blurring, default to be square(8).
ksize : int
ksize used for laplace transform, default to be 10.
alpha : float
coefficient used in laplace sharpening, default to be 0.2.
sigma : int
power coefficient in Gussian filter, default to be 40.
imshow : bool, str
users choose whether to show the processed images, default to be True.
Returns
----------
Return to 2 processed grayscale images with sharpened nucleis(2 dimension arrays)
in the image using two different sharpening styles.
"""
image = img_as_ubyte(image)
def custom(image):
imin = np.min(image)
imax = np.max(image)
full = imax - imin
new = (image - imin)/full
return new
im = custom(image)
print(im.shape)
threshold2 = np.mean(im) + 3*np.std(im)
print(threshold2)
im1 = im > threshold2
im2 = rank.mean(im1, selem)
im21 = custom(im2)
threshold3 = np.mean(im21) + np.std(im21)
print(threshold3)
im3 = im > threshold3
im5 = laplace(im2, ksize=ksize)
im4 = im2 + alpha*im5
threshold4 = np.mean(im4) + np.std(im4)
im4 = im4 > threshold4
xi = np.linspace(0, (im.shape[1]-1), im.shape[1])
yi = np.linspace(0, (im.shape[0]-1), im.shape[0])
x, y = np.meshgrid(xi, yi)
sigma = sigma
mi = im.shape[1]/2
ni = im.shape[0]/2
gfilt = np.exp(-((x-mi)**2+(y-ni)**2)/(2*sigma**2))
fim = | np.fft.fft2(im1) | numpy.fft.fft2 |
from collections import defaultdict
import librosa
import os
import numpy as np
import h5py
class Audio:
filename = None
project = None
bucket = None
tempo = None
beats = None
features = None
tokens = None
loaded_from_cache = False
has_changed = False
def __init__(self, filename, project):
self.filename = filename
self.project = project
self.features = defaultdict()
self.tokens = defaultdict()
self.signal_has_changed = False
self.feature_has_changed = False
self.token_has_changed = False
self.y = None
self.sr = None
def load(self):
if self.project.cache_features:
self.__load_features_from_cache()
self.__load_tokens_from_cache()
def add_feature(self, feature_name, feature):
self.features[feature_name] = feature
self.feature_has_changed = True
def add_tokens(self, tokens_key, tokens):
self.tokens[tokens_key] = tokens
self.token_has_changed = True
def persist(self):
if self.project.cache_features and self.feature_has_changed:
self.persist_features()
if self.project.cache_tokens and self.token_has_changed:
self.persist_tokens()
if self.project.cache_signal and self.signal_has_changed:
self.persist_signal()
def signal(self):
if self.y is None:
self.y, self.sr = self.__load_signal()
return (self.y, self.sr)
def cleanup(self):
self.y = None
self.sr = None
def persist_features(self):
self.__create_cache_folder()
print('dumping features', self.filename)
with h5py.File(self.cache_filename('features'), "w") as f:
for key in self.features.keys():
f.create_dataset(key, data=self.features[key])
self.feature_has_changed = False
def persist_tokens(self):
print('dumping tokens', self.filename)
with h5py.File(self.cache_filename('tokens'), "w") as f:
for key in self.tokens.keys():
f.attrs[key] = self.tokens[key]
self.token_has_changed = False
def persist_signal(self):
self.__create_cache_folder()
print('dumping audio', self.filename)
with h5py.File(self.cache_filename('audio'), "w") as f:
f.create_dataset('y', data=self.y)
f.attrs["sr"] = self.sr
self.signal_has_changed = False
def clean_cache(self, file_type_str):
if self.cache_filename_exists():
os.remove(self.cache_filename(file_type_str))
def __load_signal(self):
return self.__load_signal_from_cache() or self.__load_signal_from_file()
def __load_signal_from_file(self):
print('loading signal from file - %s' % self.filename)
self.y, self.sr = librosa.load(self.filename)
self.signal_has_changed = True
return (self.y, self.sr)
def __load_signal_from_cache(self):
if not self.cache_filename_exists('audio'):
return None
print('loading signal from cache - %s' % self.filename)
with h5py.File(self.cache_filename('audio'), 'r') as f:
self.y = np.array(f['y'])
self.sr = f.attrs["sr"]
return (self.y, self.sr)
def __load_features_from_cache(self):
if not self.cache_filename_exists('features'):
return
with h5py.File(self.cache_filename('features'), 'r') as f:
for k in f.keys():
self.features[k] = | np.array(f[k]) | numpy.array |
import os
import torch
from torch.utils.data import Dataset
import random
import numpy as np
from torchvision.transforms import transforms
import pickle
from scipy import ndimage
from config import opts
import monai.transforms as mt
config = opts()
def pkload(fname):
with open(fname, 'rb') as f:
return pickle.load(f)
# def MaxMinNormalization(x):
# Max = np.max(x)
# Min = np.min(x)
# x = (x - Min) / (Max - Min)
# return x
class MaxMinNormalization(object):
def __call__(self, sample):
image = sample['image']
label = sample['label']
Max = np.max(image)
Min = np.min(image)
image = (image - Min) / (Max - Min)
return {'image': image, 'label': label}
class Random_Flip(object):
def __call__(self, sample):
image = sample['image']
label = sample['label']
if random.random() < 0.5:
image = np.flip(image, 0)
label = np.flip(label, 0)
if random.random() < 0.5:
image = np.flip(image, 1)
label = np.flip(label, 1)
if random.random() < 0.5:
image = np.flip(image, 2)
label = np.flip(label, 2)
return {'image': image, 'label': label}
class Random_Crop(object):
def __call__(self, sample):
image = sample['image']
label = sample['label']
H = random.randint(0, config.input_H - config.crop_H)
W = random.randint(0, config.input_W - config.crop_W)
D = random.randint(0, config.input_D - config.crop_D)
image = image[H: H + config.crop_H, W: W + config.crop_W, D: D + config.crop_D, ...]
label = label[..., H: H + config.crop_H, W: W + config.crop_W, D: D + config.crop_D]
return {'image': image, 'label': label}
class Random_intencity_shift(object):
def __call__(self, sample, factor=0.1):
image = sample['image']
label = sample['label']
scale_factor = np.random.uniform(1.0-factor, 1.0+factor, size=[1, image.shape[1], 1, image.shape[-1]])
shift_factor = | np.random.uniform(-factor, factor, size=[1, image.shape[1], 1, image.shape[-1]]) | numpy.random.uniform |
# -*- coding:utf-8 -*-
import argparse
import codecs
import os
import random
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.callbacks import ModelCheckpoint, TensorBoard
from modeling_bert import BertForSequenceClassification
from tensorflow.python.keras.models import load_model
from sklearn.metrics import classification_report, confusion_matrix
from finetune.tokenization_bert import BertTokenizer
from finetune.dataset import ChnSentiCorpDataset
import time
from datetime import datetime
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def set_random():
# seed
random.seed(42)
| np.random.seed(42) | numpy.random.seed |
from argparse import Namespace
import os
from copy import copy
import warnings
import glob
import io_mp
import numpy as np
import pandas as pd
import sampler
from nnest import NestedSampler, MCMCSampler
"""
Neural network sampling
python montepython/MontePython.py run -o chains/nn_nested -p input/base2015_ns.param -m NN --NN_sampler nested --NN_n_live_points 100
python montepython/MontePython.py run -o chains/nn_mcmc -p input/base2015.param -m NN --NN_sampler mcmc
--NN_bootstrap_fileroot chains/file
"""
NN_subfolder = 'NN'
NN_prefix = 'NN_'
name_paramnames = '.paramnames'
name_arguments = '.arguments'
str2bool = lambda s: True if s.lower() == 'true' else False
NN_user_arguments = {
# General sampling options
'sampler':
{'help': 'Type of sampler',
'type': str,
'default': 'nested'},
'n_live_points':
{'help': 'Number of live samples',
'type': int,
'default': 100},
'switch':
{'help': 'Switch from rejection sampling to MCMC',
'type': float,
'default': -1},
'train_iters':
{'help': 'Number of training iterations',
'type': int,
'default': 2000},
'mcmc_steps':
{'help': 'Nest sampling MCMC steps',
'type': int,
'default':-1},
'fastslow':
{'help': 'True or False',
'type': str2bool,
'default': True},
# NN
'hidden_dim':
{'help': 'Hidden dimension',
'type': int,
'default': 128},
'hidden_layers':
{'help': 'Number of hidden layers',
'type': int,
'default': 1},
'num_blocks':
{'help': 'Number of flow blocks',
'type': int,
'default': 5},
# Ending conditions
'evidence_tolerance':
{'help': 'Evidence tolerance',
'type': float,
'default': 0.5},
# MCMC sampler options
'bootstrap_fileroot':
{'help': 'Bootstrap chain fileroot',
'type': str}
}
def initialise(cosmo, data, command_line):
"""
Main call to prepare the information for the NeuralNest run.
"""
# Convenience variables
varying_param_names = data.get_mcmc_parameters(['varying'])
derived_param_names = data.get_mcmc_parameters(['derived'])
if getattr(command_line, NN_prefix+'sampler', '').lower() == 'nested':
# Check that all the priors are flat and that all the parameters are bound
is_flat, is_bound = sampler.check_flat_bound_priors(
data.mcmc_parameters, varying_param_names)
if not is_flat:
raise io_mp.ConfigurationError(
'Nested Sampling with NeuralNest is only possible with flat ' +
'priors. Sorry!')
if not is_bound:
raise io_mp.ConfigurationError(
'Nested Sampling with NeuralNest is only possible for bound ' +
'parameters. Set reasonable bounds for them in the ".param"' +
'file.')
# If absent, create the sub-folder NS
NN_folder = os.path.join(command_line.folder, NN_subfolder)
if not os.path.exists(NN_folder):
os.makedirs(NN_folder)
run_num = sum(os.path.isdir(os.path.join(NN_folder,i)) for i in os.listdir(NN_folder)) + 1
# -- Automatic arguments
data.NN_arguments['x_dim'] = len(varying_param_names)
data.NN_arguments['num_derived'] = len(derived_param_names)
data.NN_arguments['verbose'] = True
data.NN_arguments['log_dir'] = os.path.join(NN_folder, str(run_num))
data.NN_arguments['use_gpu'] = False
data.NN_arguments['flow'] = 'nvp'
data.NN_arguments['load_model'] = ''
data.NN_arguments['batch_size'] = 100
if getattr(command_line, NN_prefix+'fastslow'):
data.NN_arguments['num_slow'] = data.block_parameters[0]
else:
data.NN_arguments['num_slow'] = 0
# -- User-defined arguments
for arg in NN_user_arguments:
value = getattr(command_line, NN_prefix+arg)
data.NN_arguments[arg] = value
if arg == 'switch':
if value >= 0:
data.NN_arguments['switch'] = value
elif data.NN_arguments['num_slow'] > 0:
data.NN_arguments['switch'] = 1.0 / (5 * data.NN_arguments['num_slow'])
if getattr(command_line, NN_prefix + 'sampler', '').lower() == 'mcmc':
data.NN_arguments['mcmc_steps'] = getattr(command_line, 'N')
data.NN_param_names = varying_param_names
base_name = os.path.join(NN_folder, 'base')
if run_num == 1:
# Write the NeuralNest arguments and parameter ordering
with open(base_name+name_arguments, 'w') as afile:
for arg in data.NN_arguments:
afile.write(' = '.join(
[str(arg), str(data.NN_arguments[arg])]))
afile.write('\n')
with open(base_name+name_paramnames, 'w') as pfile:
pfile.write('\n'.join(data.NN_param_names+derived_param_names))
def run(cosmo, data, command_line):
derived_param_names = data.get_mcmc_parameters(['derived'])
NN_param_names = data.NN_param_names
nDims = len(data.NN_param_names)
nDerived = len(derived_param_names)
if data.NN_arguments['sampler'].lower() == 'nested':
def prior(cube):
# NN uses cube -1 to 1 so convert to 0 to 1
cube = cube / 2 + 0.5
if len(cube.shape) == 1:
theta = [0.0] * nDims
for i, name in enumerate(data.NN_param_names):
theta[i] = data.mcmc_parameters[name]['prior'] \
.map_from_unit_interval(cube[i])
return np.array([theta])
else:
thetas = []
for c in cube:
theta = [0.0] * nDims
for i, name in enumerate(data.NN_param_names):
theta[i] = data.mcmc_parameters[name]['prior'] \
.map_from_unit_interval(c[i])
thetas.append(theta)
return np.array(thetas)
def loglike(thetas):
logls = []
for theta in thetas:
try:
data.check_for_slow_step(theta)
except KeyError:
pass
for i, name in enumerate(data.NN_param_names):
data.mcmc_parameters[name]['current'] = theta[i]
data.update_cosmo_arguments()
# Compute likelihood
logl = sampler.compute_lkl(cosmo, data)
if not | np.isfinite(logl) | numpy.isfinite |
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
import os
import os.path
import numpy as np
import h5py
from fuel.converters.base import fill_hdf5_file
np.random.seed(104174)
# update to local for easy debugging
data_dir = './'
square = np.array(
[[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 1, 1],
[1, 1, 0, 0, 0, 1, 1],
[1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1]])
triangle = np.array(
[[0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
shapes = [square, triangle, triangle[::-1, :].copy()]
def generate_shapes_image(width, height, nr_shapes=3):
img = np.zeros((height, width))
grp = np.zeros_like(img)
k = 1
for i in range(nr_shapes):
shape = shapes[np.random.randint(0, len(shapes))]
sy, sx = shape.shape
x = np.random.randint(0, width-sx+1)
y = np.random.randint(0, height-sy+1)
region = (slice(y,y+sy), slice(x,x+sx))
img[region][shape != 0] += 1
grp[region][shape != 0] = k
k += 1
grp[img > 1] = 0
img = img != 0
return img, grp
# Definition of the number of instances in training set.
np.random.seed(265076)
nr_train_examples = 50000 # 50000
nr_valid_examples = 10000 # 10000
nr_test_examples = 10000 # 10000
nr_single_examples = 10000 # 10000
width = 20
height = 20
nr_shapes = 3
data = np.zeros((nr_train_examples, height, width), dtype=np.float32)
grps = np.zeros_like(data, dtype=np.uint8)
for i in range(nr_train_examples):
data[i], grps[i] = generate_shapes_image(width, height, nr_shapes)
data_valid = np.zeros((nr_valid_examples, height, width), dtype=np.float32)
grps_valid = np.zeros_like(data_valid, dtype=np.uint8)
for i in range(nr_valid_examples):
data_valid[i], grps_valid[i] = generate_shapes_image(width, height, nr_shapes)
data_test = np.zeros((nr_test_examples, height, width), dtype=np.float32)
grps_test = np.zeros_like(data_test, dtype=np.uint8)
for i in range(nr_test_examples):
data_test[i], grps_test[i] = generate_shapes_image(width, height, nr_shapes)
data_single = np.zeros((nr_single_examples, height, width), dtype=np.float32)
grps_single = np.zeros_like(data_single, dtype=np.uint8)
for i in range(nr_single_examples):
data_single[i], grps_single[i] = generate_shapes_image(width, height, 1)
targets = np.zeros((nr_train_examples, 1), dtype=np.uint8)
targets_valid = | np.zeros((nr_valid_examples, 1), dtype=np.uint8) | numpy.zeros |
# <NAME> 27 July 2018
# This module contains several functions for converting and constrain Gemini observing tot_time constraints.
# get_timing_windows is the main method.
# import tot_time as t
import astropy.units as u
from astropy.time import Time
from multiprocessing import cpu_count
from joblib import Parallel, delayed
import numpy as np
import re
from dt import deltat
from target_table import target_table
def time_window_indices(utc, time_wins, dt, verbose = False):
"""
Convert the times in time_wins to indices in utc.
Parameters
----------
utc : 'astropy.tot_time.core.Time' np.array
UTC tot_time grid for scheduling period (i.e. night)
dt : 'astropy.units'
size of tot_time grid spacing
time_wins : 'astropy.tot_time.core.Time' pair(s)
observation tot_time windows during scheduling period.
Example
-------
An observation with 4 tot_time windows within the current night...
time_wins = [
[<Time object: scale='utc' format='unix' value=1522655300.0>,
<Time object: scale='utc' format='unix' value=1522655388.0>],
[<Time object: scale='utc' format='unix' value=1522657440.0>,
<Time object: scale='utc' format='unix' value=1522657548.0>],
[<Time object: scale='utc' format='unix' value=1522659600.0>,
<Time object: scale='utc' format='unix' value=1522659708.0>],
[<Time object: scale='utc' format='unix' value=1522661760.0>,
<Time object: scale='utc' format='unix' value=1522661868.0>]
]
"""
if verbose:
print('dt', dt)
print('utc range', utc[0].iso, (utc[-1] + dt).iso)
print(time_wins)
nt = len(utc)
i_time_wins = []
if len(time_wins) == 0:
return i_time_wins
else:
for win in time_wins:
if verbose:
print('obs window', win[0].iso, win[1].iso)
# Get index of start of window
win[0].format = 'jd'
if win[0] <= utc[0]:
i_start = 0
i = 0
else:
for i in range(nt):
# print(utc[i].iso, win[0].iso, (utc[i] + dt).iso)
# print(type(utc[i]), type(win[0]), type((utc[i] + dt)))
# print(utc[i].scale, win[0].scale, (utc[i] + dt).scale)
# print(utc[i].format, win[0].format, (utc[i] + dt).format)
# print(utc[i].value, win[0].value, (utc[i] + dt).value)
# print(utc[i].value <= win[0].value, win[0] < utc[i] + dt)
# Note: there is a astropy.tot_time.Time comparison error that is
# due to rounding error (issue: 'Float comparison issues with tot_time
# and quantity #6970'). It appears that as Time objects are manipulated
# they are converted to TAI then back to UTC.
# As a result, equal times were occasionally considered
# neither equal or unequal, raising an error during this algorithm.
# As a work around Time are now compared using their JD float values.
if utc[i].value <= win[0].value < (utc[i] + dt).value:
i_start = i
break
# estimate the index of the end of the window.
# round down to closest integer
ntwin = int((win[1] - win[0]).to('hour')/dt)
i = i + ntwin
# Get index of end of window
win[1].format = 'jd'
if i >= nt:
i_end = nt - 1
else:
for j in range(i, nt):
if utc[j].value <= win[1].value < (utc[j] + dt).value:
i_end = j
break
if verbose:
print('index window boundaries', i_start, i_end)
print('corresponding tot_time grid times', utc[i_start].iso, utc[i_end].iso)
i_time_wins.append([i_start, i_end])
if verbose:
print('i_time_wins:')
[print(i_win) for i_win in i_time_wins]
return i_time_wins
def i_time(times, timegrid):
"""
Return, for a list of times, the indices at which they appear in a tot_time grid.
Note: tot_time strings must be in formats accepted by '~astropy.tot_time.Time'. Preferably ISO format.
Parameters
----------
times : list or array of str
times to check in formats accepted by '~astropy.tot_time.Time'
timegrid : list or array of str
tot_time grid lf observing window in formats accepted by '~astropy.tot_time.Time'
Returns
-------
bool
"""
if len(times) == 0:
return []
else:
timegrid = Time(timegrid)
times = Time(times)
i_times = np.zeros(len(times), dtype=int)
for i in range(0, len(times)):
for j in range(0, len(timegrid) - 1):
if timegrid[j] < times[i] < timegrid[j + 1]:
i_times[i] = j
break
return i_times
def checkwindow(times, timegrid):
"""
Check which times are within the boundaries of a tot_time grid. Return an array of booleans.
Note: tot_time strings must be in formats accepted by '~astropy.tot_time.Time'. Preferably ISO format.
Parameters
----------
times : list or array of str
times to check
timegrid : list or array of str
times in grid (in formats accepted by '~astropy.tot_time.Time')
Returns
-------
np.array of booleans
"""
start = Time(timegrid[0])
end = Time(timegrid[-1])
bools = np.full(len(times), False)
for i in range(0, len(times)):
time = Time(times[i])
if start < time < end:
bools[i] = True
return bools
def convconstraint(time_const, start, end, current_time=None, verbose = False):
"""
Convert and compute tot_time windows within scheduling period from tot_time constraints 'time_const' for and
observation in the ObsTable structure.
Parameters
----------
time_const : str
Time constraint for Gemini Observation formatted as in the catalog browser ascii dump.
Format
------
time_const = '[{start, duration, repeats, period}, {start, duration, repeats, period}, ...]'
start : unix tot_time in milliseconds (-1 = current)
duration : window length in milliseconds (-1 = infinite)
repeats : number of repeats (-1 = infinite)
period : milliseconds between window start times
start : '~astropy.tot_time.core.Time'
Scheduling period start tot_time.
end : '~astropy.tot_time.core.Time'
Scheduling period end tot_time.
current_time : '~astropy.tot_time.core.Time' or None
Current tot_time in simulation (for triggering ToO tot_time constraints).
Returns
-------
time_win : list of '~astropy,tot_time.core.Time' arrays, or None
Array of tot_time pairs of tot_time windows overlapping with scheduling period. Returns None is no tot_time windows
overlap with the scheduling window.
Example
-------
>>> from timing_windows import convconstraint
>>> start = '2018-01-01 00:00:00'
>>> end = '2019-01-01 00:00:00'
>>> time_const = '[{start, duration, repeats, period}, {start, duration, repeats, period}, ...]'
>>> time_wins = convconstraint(time_const, start, end)
"""
if verbose:
print('\ntime_const', time_const)
print('start', start)
print('end', end)
infinity = 3. * 365. * 24. * u.h # infinite tot_time duration
# Split individual tot_time constraint strings into list
string = re.sub('[\[{}\]]', '', time_const).split(',') # remove brackets
string = [tc.strip() for tc in string] # remove whitespace
if verbose:
print('Constraint strings: ', string)
if string[0] == '': # if no tot_time constraints
return [[start, end]]
else: # if observation has tot_time constraints
obs_win = [] # observation tot_time windows
tc = [re.findall(r'[+-]?\d+(?:\.\d+)?', val) for val in string] # split numbers into lists
tc.sort() # sort constraints into chronological order
if verbose:
print('Ordered constraints: ', tc)
for const in tc: # cycle through constraints
# tot_time window start tot_time t0 (unix tot_time format milliseconds).
# for ToOs, the timing constraint must begin at the tot_time of arrival to the queue.
# To do this, set the ToO program start to the tot_time of arrival, and give the
# ToO observation tot_time constraint a t0 value of -1. In this case, the tot_time constraint
# will begin from the new program start tot_time.
t0 = float(const[0])
if t0 == -1: # -1 = current tot_time in simulation
t0 = current_time
else:
t0 = Time((float(const[0]) * u.ms).to_value('s'), format='unix', scale='utc')
duration = float(const[1]) # duration (milliseconds)
if duration == -1.0: # infinite
pass
duration = infinity
else:
duration = duration / 3600000. * u.h
repeats = int(const[2]) # number of repetitions
if repeats == -1: # infinite
repeats = 1000
period = float(const[3]) / 3600000. * u.h # period between windows (milliseconds)
if verbose:
print('t0.iso, duration, repeats, period: ', t0.iso, duration, repeats, period)
n_win = repeats + 1 # number of tot_time windows in constraint including repeats
win_start = t0
for j in range(n_win): # cycle through tot_time window repeats
win_start = win_start + period # start of current tot_time window
win_end = win_start + duration # start of current tot_time window
if verbose:
print('j, window: ', j, [win_start.iso, win_end.iso])
# save tot_time window if there is overlap with schedule period
if win_start < end and start < win_end:
obs_win.append([win_start, win_end])
if verbose:
print('\nadded window')
elif win_end < start: # go to next next window if current window precedes schedule period
pass
else: # stop if current window is past schedule period
break
if not obs_win:
return None
else:
return obs_win
def twilights(twilight_evening, twilight_morning, obs_windows, verbose = False):
"""
Confine observation timing constraints within nautical twilights.
Parameters
----------
twilight_evening : '~astropy.tot_time.core.Time' array
Evening twilight tot_time for scheduling period (UTC)
twilight_morning : '~astropy.tot_time.core.Time' array
Morning twilight tot_time for scheduling period (UTC)
obs_windows : list of '~astropy.tot_time.core.Time' pairs, or None
Observation timing window tot_time-pairs in UTC.
Each observation can have any number of tot_time windows.
Returns
-------
new_windows : list of lists of '~astropy.tot_time.core.Time' pairs or None
New list of tot_time windows constrained within twilights.
"""
new_windows = []
if obs_windows is not None and len(obs_windows) != 0:
for i in range(len(twilight_evening)): # cycle through nights
if verbose:
print('\ntwilights: ', twilight_evening[i].iso, twilight_morning[i].iso)
for j in range(len(obs_windows)): # cycle through tot_time windows
if verbose:
print('time_const[' + str(j) + ']:', obs_windows[j][0].iso, obs_windows[j][1].iso)
# save tot_time window if there is overlap with schedule period
if obs_windows[j][0] < twilight_morning[i] and twilight_evening[i] < obs_windows[j][1]:
# Add window with either twilight times or window edges as boundaries (whichever are innermost).
new_windows.append([max([twilight_evening[i], obs_windows[j][0]]),
min([twilight_morning[i], obs_windows[j][1]])])
if verbose:
print('\tadded:', max([twilight_evening[i], obs_windows[j][0]]).iso,
min([twilight_morning[i], obs_windows[j][1]]).iso)
if verbose:
print('new_windows:')
[print('\t', new_window[0].iso, new_window[1].iso) for new_window in new_windows]
if len(new_windows) == 0:
return None
else:
return new_windows
else:
return None
def instrument(i_obs, obs_inst, obs_disp, obs_fpu, obs_mos, insts, gmos_disp, gmos_fpu, gmos_mos, f2_fpu, f2_mos,
verbose = False):
"""
Constrain observation timing constraints in accordance with the installed instruments
and component configuration on the current tot_time.
Output indices of observations matching the nightly instruments and components.
Parameters
----------
i_obs : integer array
indices in obs_inst, obs_disp, and obs_fpu to check for current night.
obs_inst : list of strings
Observation instruments
obs_disp : list of strings
Observation dispersers
obs_fpu : list of string
Observation focal plane units
obs_mos : list of string
Observation custom mask name
insts : list of strings
Instruments installed on current night
gmos_disp : list of strings
GMOS disperser installed on current night
gmos_fpu : list of strings
GMOS focal plane units (not MOS) installed on current night
gmos_mos : list of strings
GMOS MOS masks installed on current night
f2_fpu : list of strings
Flamingos-2 focal plane units (not MOS) installed on current night
f2_mos : list of strings
Flamingos-2 MOS masks installed on current night
Returns
-------
in_obs : integer array
List indices for observations matching tonight's instrument configuration.
"""
if verbose:
print('Installed instruments')
print('insts', insts)
print('gmos_disp', gmos_disp)
print('gmos_fpu', gmos_fpu)
print('gmos_mos', gmos_mos)
print('f2_fpu', f2_fpu)
print('f2_mos', f2_mos)
print('i_obs', i_obs)
if len(i_obs) == 0:
return []
else:
in_obs = []
# Select i_obs from observation lists
obs_inst = obs_inst[i_obs]
obs_disp = obs_disp[i_obs]
obs_fpu = obs_fpu[i_obs]
for i in range(len(obs_inst)):
if verbose:
print('obs_inst[i], obs_disp[i], obs_fpu[i], obs_mos[i]', obs_inst[i], obs_disp[i], obs_fpu[i], obs_mos[i])
if obs_inst[i] in insts or insts == 'all':
if 'GMOS' in obs_inst[i]:
if ((obs_disp[i] in gmos_disp) or ('all' in gmos_disp))\
and (((obs_fpu[i] in gmos_fpu) or ('all' in gmos_fpu))\
or ((obs_mos[i] in gmos_mos) or (('all' in gmos_mos) and ('Custom Mask' == obs_fpu[i])))):
in_obs.append(i)
if verbose:
print('Added i =', i)
elif 'Flamingos' in obs_inst[i]:
if ((obs_fpu[i] in f2_fpu) or ('all' in f2_fpu))\
or ((obs_mos[i] in f2_mos) or (('all' in f2_mos) and ('Custom Mask' == obs_fpu[i]))):
in_obs.append(i)
if verbose:
print('Added i =', i)
else:
in_obs.append(i)
if verbose:
print('Added i =', i)
return in_obs
def nightly_calendar(twilight_evening, twilight_morning, time_windows, verbose = False):
"""
Sort observation tot_time windows by nightly observing window.
Parameters
----------
twilight_evening : '~astropy.tot_time.core.Time'
Evening twilight tot_time for scheduling period (UTC)
twilight_morning : '~astropy.tot_time.core.Time'
Morning twilight tot_time for scheduling period (UTC)
time_windows : list of lists of '~astropy.tot_time.core.Time' pairs
Array of tot_time windows for all observations.
Returns
-------
i_obs : int array
Indices of observations with a time_window during the night of the provided date.
obs_windows : array of '~astropy.tot_time.core.Time' pair(s)
Observation tot_time windows for current night corresponding to 'i_obs'.
"""
# define start of current day as local noon
night_start = twilight_evening
night_end = twilight_morning
if verbose:
print('\nDate window (start,end): ', night_start.iso, night_end.iso)
i_obs = [] # list of current night's observations
obs_windows = [] # tot_time windows corresponding to i_obs
for i in range(len(time_windows)): # cycle through observations
if verbose:
print('\tobs i:', i)
if time_windows[i] is not None:
obs_wins = []
for j in range(len(time_windows[i])): # cycle through tot_time windows
if verbose:
print('\t\ttime_window[' + str(i) + '][' + str(j) + ']:',
time_windows[i][j][0].iso, time_windows[i][j][1].iso)
# save index if there is overlap with schedule period
if time_windows[i][j][1] >= night_start and night_end >= time_windows[i][j][0]:
obs_wins.append(time_windows[i][j])
if verbose:
print('\t\t\tadded window')
# else:
# print('\t\tnot added')
# if tot_time window(s) overlapped with night, save obs index and window(s)
if len(obs_wins) != 0:
i_obs.append(i)
obs_windows.append(obs_wins)
if verbose:
print('\t\tadded obs index'
' to list')
else:
if verbose:
print('\t\t\ttime_window[' + str(i) + ']:', time_windows[i])
pass
# if verbose:
# print('i_obs', i_obs)
# print('obs_windows', obs_windows)
return i_obs, obs_windows
def elevation_const(targets, i_wins, elev_const):
"""
Restrict tot_time windows for elevation constraints.
If all tot_time windows for a given observation are removed, let time_window[i] = NoneType.
Parameters
----------
targets : '~astropy.table.Table'
Target table for current night (Columns: 'i', 'id', 'ZD', 'HA', 'AZ', 'AM', 'mdist').
i_wins : list of lists
Observation tot_time windows as tot_time grid indices. Each observation may have one or more tot_time windows.
Example
-------
i_wins = [
[[0,2], [4, 10]],
[[0,20]],
[[0,10], [15,20],
...]
elev_const : list of dictionaries
Elevation constraints of observations in observation table
(dictionary keys: {'type':str, 'min': float or '~astropy.units', 'max': float or '~astropy.units'}).
Returns
-------
targets : '~astropy.table.Table' target table
Target table for current night with tot_time windows constrained to meet elevation constraints.
If an observation has no remaining tot_time windows, the table cell is given NoneType.
"""
verbose = False
if len(targets) != 0:
for i in range(len(targets)): # cycle through rows in table
# set new window boundaries to -1 to start
i_start = -1
i_end = -1
if verbose:
print()
print(targets['i'].data[i], elev_const[i])
j = targets['i'].data[i] # elevation constraint index of target
# Get tot_time grid window indices for elevation constraint
if elev_const[j]['type'] == 'Hour Angle':
if verbose:
print('\nHour Angle!')
print(targets['HA'].quantity[i])
print(elev_const[i]['min'])
print(elev_const[i]['max'])
# print(targets['HA'].quantity[i] >= elev_const[i]['min'])
# print(targets['HA'].quantity[i] <= elev_const[i]['max'])
# get indices of hour angles within constraint limits
ii = np.where(np.logical_and(
targets['HA'].quantity[i] >= elev_const[j]['min'],
targets['HA'].quantity[i] <= elev_const[j]['max'])
)[0][:]
if verbose:
print('ii', ii)
# save boundaries of indices within constraint
if len(ii) != 0:
i_start = ii[0]
i_end = ii[-1]
elif elev_const[j]['type'] == 'Airmass':
if verbose:
print('\nAirmass!')
print(targets['AM'][i])
# get indices of airmass within constraint limits
ii = np.where(np.logical_and(targets['AM'][i] >= elev_const[j]['min'],
targets['AM'][i] <= elev_const[j]['max'])
)[0][:]
if verbose:
print('ii', ii)
# save boundaries of indices within constraint
if len(ii) != 0:
i_start = ii[0]
i_end = ii[-1]
else: # skip to next observation if current one has no elevation constraints
if verbose:
print('No elevation constraint!')
continue
# Set new tot_time windows boundaries if observation had elevation constraint
if i_start != -1 and i_end != -1:
if verbose:
print('i_start, i_end: ', i_start, i_end)
# Cycle through observation tot_time windows for current night.
# Adjust each window to satisfy elevation constraint.
j = 0
while True:
if verbose:
print('initial tot_time window:',
i_wins[i][j][0],
i_wins[i][j][1])
# If current tot_time window overlaps with elevation constraint window, set new window.
if i_wins[i][j][0] <= i_end and i_start <= i_wins[i][j][1]:
# Change window to portion of overlap.
i_wins[i][j] = ([max([i_start, i_wins[i][j][0]]),
min([i_end, i_wins[i][j][1]])])
j = j + 1
if verbose:
print('\toverlap of windows:',
i_wins[i][j-1][0],
i_wins[i][j-1][1])
else: # Delete window if there is no overlap
if verbose:
print('i_wins[i],j', i_wins[i], j, type(i_wins[i]), type(i_wins[i][j]))
del i_wins[i][j]
if verbose:
print('\tdelete window')
if j == len(i_wins[i]):
break
if len(i_wins[i]) == 0:
i_wins[i] = None
if verbose:
print('new observation tot_time windows for tonight:')
if i_wins[i] is not None:
for j in range(len(i_wins[i])):
print([i_wins[i][j][0], i_wins[i][j][1]])
else:
print(i_wins[i])
return i_wins
def get_timing_windows(site, timetable, moon, obs, progs, instcal, current_time=None,
verbose_progress=True, verbose=False, debug=False):
"""
Main timing windows algorithm. This is the main method that generates timing windows and the
target data tables.
It performs the following sequence of steps using functions from timing_windows.py and target_table.py.
1. Convert timing window constraints
2. Constrain within plan boundaries and program activation dates
3. Constrain within twilights
4. Organize tot_time windows by date
5. Constrain within instrument calendar
6. Generate a list of nightly target data tables (from target_table.py)
7. Constrain windows within elevation constraints
Return list of target data tables.
Parameters
----------
site : 'astroplan.Observer'
Observatory site object
timetable : 'astropy.table.Table'
Time data table generated by time_table.py
moon : 'astropy.table.Table'
Moon data table generated by moon_table.py
obs : 'astropy.table.Table'
Observation data table generated by observation_table.py
progs : 'astropy.table.Table'
Program status data table generated by program_table.py
instcal : 'astropy.table.Table'
instrument calendar table generated by instrument_table.py
current_time : 'astropy.tot_time.core.Time' [DEFAULT = None]
Current tot_time in simulation (used for setting start tot_time of ToO tot_time constraint)
Returns
-------
targetcal : list of 'astropy.table.Table'
List of target data tables generated by target_table.py.
"""
# verbose_progress = verbose # print progress
# verbose = verbose # basic outputs
verbose2 = debug # detailed outputs
# ====== Convert timing constraints to tot_time windows ======
if verbose_progress:
print('...timing windows (convert tot_time constraints)')
# Compute all tot_time windows of observations within scheduling period boundaries or program activation/deactivation
# times. Whichever are constraining.
# print(obs['i_prog'].data[0])
# print(obs['obs_id'].data[0])
# print(progs['gemprgid'].data[obs['i_prog'].data[0]])
# print(progs['prog_start'].data[obs['i_prog'].data[0]].iso)
# print(progs['prog_end'].data[obs['i_prog'].data[0]].iso)
# print(max(timetable['twilight_evening'].data[0], progs['prog_start'].data[obs['i_prog'].data[0]]))
# print(min(timetable['twilight_morning'].data[-1], progs['prog_end'].data[obs['i_prog'].data[0]]))
ncpu = cpu_count()
time_windows = Parallel(n_jobs=ncpu)(
delayed(convconstraint)(
time_const=obs['time_const'][i],
start=max(timetable['twilight_evening'].data[0], progs['prog_start'].data[obs['i_prog'].data[i]]),
end=min(timetable['twilight_morning'].data[-1], progs['prog_end'].data[obs['i_prog'].data[i]]),
current_time=current_time)
for i in range(len(obs)))
# # Use standard for loop for troubleshooting
# time_windows = [timing_windows.convconstraint(time_const=obs['time_const'][i],
# start=timetable['twilight_evening'][0],
# end=timetable['twilight_morning'][-1])
# for i in range(len(obs))]
# ====== Timing windows (twilights) ======
if verbose_progress:
print('...timing windows (twilights)')
# Constrain tot_time windows to within nautical twilights
time_windows = Parallel(n_jobs=ncpu)(delayed(twilights)(twilight_evening=timetable['twilight_evening'].data,
twilight_morning=timetable['twilight_morning'].data,
obs_windows=time_windows[i])
for i in range(len(obs)))
# ====== Sort tot_time windows and observation indices by day ======
if verbose_progress:
print('...timing windows (organize into nights)')
# By this point, timing windows are sorted by observation.
# Reorganize tot_time windows such that they are sorted by night.
# For each night, make an array of indices corresponding to the
# available observation tot_time windows on that night.
# Make a second array containing the corresponding timing window(s).
i_obs_nightly = [] # Lists of observation indices (one per night).
time_windows_nightly = [] # Lists of corresponding tot_time windows.
for i in range(len(timetable['date'])):
i_obs_tonight, time_windows_tonight = \
nightly_calendar(twilight_evening=timetable['twilight_evening'][i],
twilight_morning=timetable['twilight_morning'][i],
time_windows=time_windows)
i_obs_nightly.append(np.array(i_obs_tonight))
time_windows_nightly.append(time_windows_tonight)
# # Use for loop for easier troubleshooting
# time_windows = [timing_windows.twilights(twilight_evening=timetable['twilight_evening'].data,
# twilight_morning=timetable['twilight_morning'].data,
# obs_windows=time_windows[i])
# for i in range(len(obs))]
# for i in range(len(time_windows_nightly)):
# for j in range(len(time_windows_nightly[i])):
# print(i_obs_nightly[i][j], time_windows_nightly[i][j])
# ====== Timing windows (instrument calendar) ======
if verbose_progress:
print('...timing windows (instrument calendar)')
# Constrain tot_time windows according to the installed instruments and
# component configuration on each night
i_obs_insts = Parallel(n_jobs=ncpu)(delayed(instrument)(i_obs=i_obs_nightly[i],
obs_inst=obs['inst'].data,
obs_disp=obs['disperser'].data,
obs_fpu=obs['fpu'].data,
obs_mos=obs['custom_mask_mdf'].data,
insts=instcal['insts'].data[i],
gmos_disp=instcal['gmos_disp'].data[i],
gmos_fpu=instcal['gmos_fpu'].data[i],
gmos_mos=instcal['gmos_mos'].data[i],
f2_fpu=instcal['f2_fpu'].data[i],
f2_mos = instcal['f2_fpu'].data[i],
verbose=verbose)
for i in range(len(timetable['date'])))
# # Use for loop for easier troubleshooting
# i_obs_insts = [instrument(i_obs=i_obs_nightly[i],
# obs_inst=obs['inst'].data,
# obs_disp=obs['disperser'].data,
# obs_fpu=obs['fpu'].data,
# insts=instcal['insts'].data[i],
# gmos_disp=instcal['gmos_disp'].data[i],
# gmos_fpu=instcal['gmos_fpu'].data[i],
# f2_fpu=instcal['f2_fpu'].data[i])
# for i in range(len(timetable['date']))]
# Get nightly observation indices and tot_time windows from results of the instrument calendar
for i in range(len(timetable['date'])):
i_obs_nightly[i] = [i_obs_nightly[i][j] for j in i_obs_insts[i]]
time_windows_nightly[i] = [time_windows_nightly[i][j] for j in i_obs_insts[i]]
# print observation indices and corresponding tot_time windows on each night
if verbose2:
for i in range(len(time_windows_nightly)):
for j in range(len(time_windows_nightly[i])):
print('obs index: ', i_obs_nightly[i][j])
if time_windows_nightly[i][j] is None:
print('\t', None)
else:
for window in time_windows_nightly[i][j]:
print('\t', window[0].iso, window[1].iso)
# ====== Convert tot_time windows to tot_time grid indices ======
if verbose_progress:
print('...tot_time window indices')
dt = deltat(time_strings=timetable['utc'][0][0:2]) # tot_time grid spacing
i_wins_nightly = []
for i in range(len(time_windows_nightly)):
# i_wins_tonight = Parallel(n_jobs=10)(delayed(time_window_indices)(utc=timetable['utc'].data[i],
# time_wins=time_windows_nightly[i][j],
# dt=dt)
# for j in range(len(time_windows_nightly[i])))
i_wins_tonight = [time_window_indices(utc=timetable['utc'].data[i],
time_wins=time_windows_nightly[i][j],
dt=dt)
for j in range(len(time_windows_nightly[i]))]
i_wins_nightly.append(i_wins_tonight)
# for i in range(len(i_wins_nightly)):
# for j in range(len(i_wins_nightly[i])):
# print(i_obs_nightly[i][j], i_wins_nightly[i][j])
# ====== Target Calendar ======
if verbose_progress:
print('...target data')
# Create list of 'astropy.table.Table' objects (one table per night).
# Each table stores the positional data of each available target throughout
# the night.
# targetcal = Parallel(n_jobs=10)(delayed(target_table)(i_obs=i_obs_nightly[i],
# latitude=site.location.lat,
# lst=timetable['lst'].data[i] * u.hourangle,
# utc=timetable['utc'].data[i],
# obs_id=obs['obs_id'].data,
# obs_ra=obs['ra'].quantity,
# obs_dec=obs['dec'].quantity,
# moon_ra=moon['ra'].data[i] * u.deg,
# moon_dec=moon['dec'].data[i] * u.deg)
# for i in range(len(timetable['date'])))
targetcal = [target_table(i_obs=i_obs_nightly[i],
latitude=site.location.lat,
lst=timetable['lst'].data[i] * u.hourangle,
utc=timetable['utc'].data[i],
obs_id=obs['obs_id'].data,
obs_ra=obs['ra'].quantity,
obs_dec=obs['dec'].quantity,
moon_ra=moon['ra'].data[i] * u.deg,
moon_dec=moon['dec'].data[i] * u.deg)
for i in range(len(timetable['date']))]
# ====== Timing windows (elevation constraint) ======
if verbose_progress:
print('...timing windows (elevation constraint)')
# Constrain timing windows to satisfy elevation constraints.
# (can be either airmass or hour angle limits).
# targetcal = Parallel(n_jobs=10)(delayed(elevation_const)(targets=targetcal[i],
# elev_const=obs['elev_const'])
# for i in range(len(timetable['date'])))
# Use for loop for troubleshooting
i_wins_nightly = [elevation_const(targets=targetcal[i],
i_wins=i_wins_nightly[i],
elev_const=obs['elev_const'].data)
for i in range(len(timetable['date']))]
# ====== Add tot_time window column to target tables ======
for i in range(len(targetcal)):
targetcal[i]['i_wins'] = i_wins_nightly[i]
# ====== Clean up target tables ======
# Remove observations from target calender if the elevation constraint
# process removed all timing windows for a given night.
# Remove corresponding rows from tables in targetcal.
for targets in targetcal:
if len(targets) != 0:
if verbose2:
# print observation indices and remaining timing windows
# one current night.
for j in range(len(targets)):
print('\t', targets['i'][j])
if targets['i_wins'][j] is not None:
for k in range(len(targets['i_wins'][j])):
print('\t\t', targets['i_wins'][j][k][0], targets['i_wins'][j][k][1])
else:
print('\t\t', targets['i_wins'][j])
# get indices for current night of observations with no remaining timing windows
# ii_del = np.where([len(targets['time_wins'].data[j]) == 0 for j in range(len(targets))])[0][:]
ii_del = np.where(targets['i_wins'].data == None)[0][:]
if verbose2:
print('ii_del', ii_del)
# delete these rows from the corresponding targetcal target_tables
if len(ii_del) != 0:
for j in sorted(ii_del, reverse=True): # delete higher indices first
targets.remove_row(j)
# print target tables
if verbose:
[print(targets) for targets in targetcal]
if verbose2:
# print nightly observations and tot_time windows
for i in range(len(targetcal)):
if len(targetcal[i]) != 0:
print('\ntargetcal[i][\'i\']:\n', targetcal[i]['i'].data)
print('\nNight (start,end):', timetable['utc'][i][0].iso, timetable['utc'][i][-1].iso)
print('\nTwilights:', timetable['twilight_evening'][i].iso, timetable['twilight_morning'][i].iso)
print('Date:', timetable['date'][i])
print('time_windows:')
for j in range(len(targetcal[i]['i_wins'])):
print('\ti:', targetcal[i]['i'][j])
if targetcal[i]['i_wins'][j] is not None:
for k in range(len(targetcal[i]['i_wins'].data[j])):
print('\t\t', targetcal[i]['i_wins'].data[j][k][0],
targetcal[i]['i_wins'].data[j][k][1])
else:
print('\t\t', 'None')
else:
print('\ntargetcal[i][\'i\']:\n', targetcal[i])
return targetcal
def test_checkwindow():
print('\ntest_checkwindow()...')
times = ['2018-07-02 03:20:00', '2018-07-02 06:45:00', '2018-07-03 02:30:00', '2018-07-03 04:45:00']
utc = ['2018-07-01 22:49:57.001', '2018-07-02 10:37:57.001']
print('times to check', times)
print('window', utc)
print(checkwindow(times, utc))
assert checkwindow(times, utc).all() == np.array([True, True, False, False]).all()
print('Test successful!')
return
def test_i_time():
print('\ntest_i_time()...')
times = ['2018-07-02 03:20:00', '2018-07-02 06:45:00']
utc = ['2018-07-01 22:49:57.001', '2018-07-01 23:49:57.001', '2018-07-02 00:49:57.001', '2018-07-02 01:49:57.001',
'2018-07-02 02:49:57.001', '2018-07-02 03:49:57.001', '2018-07-02 04:49:57.001', '2018-07-02 05:49:57.001',
'2018-07-02 06:49:57.001', '2018-07-02 07:49:57.001', '2018-07-02 08:49:57.001', '2018-07-02 09:49:57.001']
print('times to get indices', times)
print('tot_time array', utc)
print(i_time(times, utc))
assert i_time(times, utc).all() == np.array([4, 7]).all()
print('Test successful!')
return
def test_constraint():
print('\ntest_constraint()...')
# time_const = '[{1524614400000 -1 0 0}]'
time_const = '[{1488592145000 3600000 -1 140740000}]'
# time_const = '[{1522713600000 3600000 -1 108000000}]'
start = Time('2018-04-10 00:00:00')
end = Time('2018-04-12 00:00:00')
print('time_const', time_const)
print('start, end of schedule window: ', start, end)
const = convconstraint(time_const, start, end)
print('Timing window (start,end):')
[[print(c.iso) for c in con] for con in const]
assert const[0][0].iso == Time('2018-04-10 10:08:45.000').iso
assert const[0][1].iso == Time('2018-04-10 11:08:45.000').iso
print('Test successful!')
return
def test_instrument():
from astropy.table import Table
print('\ntest_instrument()...')
inst = np.array(['GMOS'])
disperser = np.array(['B'])
fpu = np.array(['A'])
i_obs = [ | np.array([0], dtype=int) | numpy.array |
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Process Fault Detection via SVDD in metal etch dataset
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% read data
import numpy as np
X_train = np.loadtxt('Metal_etch_2DPCA_trainingData.csv', delimiter=',')
#%% bandwidth via modified mean criteria
import scipy.spatial
N = X_train.shape[0]
phi = 1/np.log(N-1)
delta = -0.14818008*np.power(phi,4) + 0.2846623624*np.power(phi,3) - 0.252853808*np.power(phi,2) + 0.159059498*phi - 0.001381145
D2 = np.sum(scipy.spatial.distance.pdist(X_train, 'sqeuclidean'))/(N*(N-1)/2) # pdist computes pairwise distances between observations
sigma = np.sqrt(D2/np.log((N-1)/delta*delta))
gamma = 1/(2*sigma*sigma)
#%% SVM fit
from sklearn.svm import OneClassSVM
model = OneClassSVM(nu=0.01, gamma=0.025).fit(X_train) # nu corresponds to f
#%% predict for test data
X_test = | np.loadtxt('Metal_etch_2DPCA_testData.csv', delimiter=',') | numpy.loadtxt |
"""
Unit-aware replacements for numpy functions.
"""
from __future__ import absolute_import
from functools import wraps
import pkg_resources
import numpy as np
from .fundamentalunits import (Quantity, wrap_function_dimensionless,
wrap_function_remove_dimensions,
fail_for_dimension_mismatch, is_dimensionless,
DIMENSIONLESS)
__all__ = [
'log', 'log10', 'exp',
'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh',
'arcsinh', 'arccosh', 'arctanh',
'diagonal', 'ravel', 'trace', 'dot',
'where',
'ones_like', 'zeros_like',
'arange', 'linspace'
]
def where(condition, *args, **kwds): # pylint: disable=C0111
if len(args) == 0:
# nothing to do
return np.where(condition, *args, **kwds)
elif len(args) == 2:
# check that x and y have the same dimensions
fail_for_dimension_mismatch(args[0], args[1],
'x and y need to have the same dimensions')
if is_dimensionless(args[0]):
return np.where(condition, *args, **kwds)
else:
# as both arguments have the same unit, just use the first one's
dimensionless_args = [ | np.asarray(arg) | numpy.asarray |
import numpy as np
from hypothesis import given, strategies as st
import pytest
import pyGPA.phase_unwrap as pu
# This test code was written by the `hypothesis.extra.ghostwriter` module
# and is provided under the Creative Commons Zero public domain dedication.
@pytest.mark.filterwarnings("ignore:invalid value encountered in true_divide")
@given(kmax=st.integers(1, 30))
def test_equivalent_phase_unwrap_ref_phase_unwrap(kmax):
N = 256
xx, yy = np.meshgrid(np.arange(N), np.arange(N), indexing='ij')
psi0 = (yy+xx) / (4*np.sqrt(2))
psi = pu._wrapToPi(psi0)
weight = | np.ones_like(psi) | numpy.ones_like |
# -*- coding: utf-8 -*-
"""
Created on Sat May 5 23:57:12 2018
@author: Arthur
"""
import numpy as np
import matplotlib.pyplot as plt
from AdamANN import AdamANN_clf
from sklearn.preprocessing import StandardScaler
N = 100 # number of points per class
D = 2 # dimensionality
K = 3 # number of classes
X = np.zeros((N*K,D)) # data matrix (each row = single example)
y = np.zeros(N*K, dtype='uint8') # class labels
for j in range(K):
ix = range(N*j,N*(j+1))
r = np.linspace(0.0,1,N) # radius
t = np.linspace(j*4,(j+1)*4,N) + np.random.randn(N)*0.2 # theta
X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
y[ix] = j
# lets visualize the data:
#plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral)
#plt.show()
normalizer = StandardScaler()
X = normalizer.fit_transform(X)
# set hyperparameters
hidden_units = [50,50]
hidden_func = 'relu'
alpha = 0
p_dropout = 0.1
epoch = 100
learning_rate = 0.01
learn_decay = 10
batch_size = 100
NN_clf = AdamANN_clf(hidden_units, hidden_func, alpha, p_dropout, epoch, learning_rate,
learn_decay, batch_size, verbose=True)
NN_clf.fit(X, y)
y_pred = NN_clf.predict(X)
train_accuracy = accuracy_score(y, y_pred)
print('Train accuracy :', train_accuracy)
def plot_decision_boundary(model, X, y):
# Set min and max values and give it some padding
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
h = 0.01
# Generate a grid of points with distance h between them
xx1, xx2 = np.meshgrid( | np.arange(x1_min, x1_max, h) | numpy.arange |
import numpy as np
import jesse.indicators as ta
from jesse.factories import fake_range_candle_from_range_prices
from .data.test_candles_indicators import *
def test_acosc():
candles = np.array(mama_candles)
single = ta.acosc(candles)
seq = ta.acosc(candles, sequential=True)
assert type(single).__name__ == 'AC'
assert round(single.osc, 2) == -21.97
assert round(single.change, 2) == -9.22
assert seq.osc[-1] == single.osc
assert len(seq.osc) == len(candles)
def test_ad():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.ad(candles)
seq = ta.ad(candles, sequential=True)
assert round(single, 0) == 6346031
assert len(seq) == len(candles)
assert seq[-1] == single
def test_adosc():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.adosc(candles, fast_period=3, slow_period=10)
seq = ta.adosc(candles, fast_period=3, slow_period=10, sequential=True)
assert round(single / 1000000, 3) == -1.122
assert len(seq) == len(candles)
assert seq[-1] == single
def test_adx():
candles = np.array(adx_candles)
single = ta.adx(candles)
seq = ta.adx(candles, sequential=True)
assert round(single) == 26
assert len(seq) == len(candles)
assert seq[-1] == single
def test_adxr():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.adxr(candles, period=14)
seq = ta.adxr(candles, period=14, sequential=True)
assert round(single, 0) == 36
assert len(seq) == len(candles)
assert seq[-1] == single
def test_alligator():
candles = np.array(mama_candles)
single = ta.alligator(candles)
seq = ta.alligator(candles, sequential=True)
assert type(single).__name__ == 'AG'
assert round(single.teeth, 0) == 236
assert round(single.jaw, 0) == 233
assert round(single.lips, 0) == 222
assert seq.teeth[-1] == single.teeth
assert len(seq.teeth) == len(candles)
def test_ao():
candles = np.array(mama_candles)
single = ta.ao(candles)
seq = ta.ao(candles, sequential=True)
assert round(single.osc, 0) == -46
assert len(seq[-1]) == len(candles)
assert seq.osc[-1] == single.osc
def test_apo():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.apo(candles, fast_period=12, slow_period=26, matype=1)
seq = ta.apo(candles, fast_period=12, slow_period=26, matype=1, sequential=True)
assert round(single, 2) == -15.32
assert len(seq) == len(candles)
assert seq[-1] == single
def test_aroon():
candles = np.array(mama_candles)
aroon = ta.aroon(candles, period=14)
assert type(aroon).__name__ == 'AROON'
assert round(aroon.down, 2) == 100
assert round(aroon.up, 2) == 64.29
seq_aroon = ta.aroon(candles, period=14, sequential=True)
assert seq_aroon.down[-1] == aroon.down
assert len(seq_aroon.down) == len(candles)
assert len(seq_aroon.up) == len(candles)
def test_aroon_osc():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.aroonosc(candles, period=14)
seq = ta.aroonosc(candles, period=14, sequential=True)
assert round(single, 2) == -35.71
assert len(seq) == len(candles)
assert seq[-1] == single
def test_atr():
candles = np.array(atr_candles)
single = ta.atr(candles)
seq = ta.atr(candles, sequential=True)
assert round(single, 1) == 2.8
assert len(seq) == len(candles)
assert seq[-1] == single
def test_avgprice():
candles = np.array(mama_candles)
single = ta.avgprice(candles)
seq = ta.avgprice(candles, sequential=True)
assert round(single, 1) == 149.8
assert len(seq) == len(candles)
assert seq[-1] == single
def test_beta():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.beta(candles)
seq = ta.beta(candles, sequential=True)
assert round(single, 2) == -0.31
assert len(seq) == len(candles)
assert seq[-1] == single
def test_bandpass():
candles = np.array(mama_candles)
bp = ta.bandpass(candles)
assert type(bp).__name__ == 'BandPass'
assert round(bp.bp, 2) == -7.56
assert round(bp.bp_normalized, 2) == -0.29
assert bp.signal == 1
assert round(bp.trigger, 2) == -0.27
seq_bp = ta.bandpass(candles, sequential=True)
assert seq_bp.bp[-1] == bp.bp
assert len(seq_bp.bp) == len(candles)
assert len(seq_bp.bp_normalized) == len(candles)
assert len(seq_bp.signal) == len(candles)
assert len(seq_bp.trigger) == len(candles)
def test_bollinger_bands():
candles = np.array(bollinger_bands_candles)
bb = ta.bollinger_bands(candles)
u, m, l = bb
assert type(bb).__name__ == 'BollingerBands'
assert round(u, 1) == 145.8
assert round(m, 1) == 141.2
assert round(l, 1) == 136.7
seq_bb = ta.bollinger_bands(candles, sequential=True)
assert seq_bb.upperband[-1] == u
assert len(seq_bb.upperband) == len(candles)
assert len(seq_bb.middleband) == len(candles)
assert len(seq_bb.lowerband) == len(candles)
def test_bollinger_bands_width():
candles = np.array(bollinger_bands_width_candles)
single = ta.bollinger_bands_width(candles)
seq = ta.bollinger_bands_width(candles, sequential=True)
assert round(single, 4) == 0.0771
assert len(seq) == len(candles)
assert seq[-1] == single
def test_bop():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.bop(candles)
seq = ta.bop(candles, sequential=True)
assert round(single, 2) == -0.92
assert len(seq) == len(candles)
assert seq[-1] == single
def test_cc():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.cc(candles)
seq = ta.cc(candles, sequential=True)
assert round(single, 0) == -41
assert len(seq) == len(candles)
assert seq[-1] == single
def test_cci():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.cci(candles, period=14)
seq = ta.cci(candles, period=14, sequential=True)
assert round(single, 2) == -285.29
assert len(seq) == len(candles)
assert seq[-1] == single
def test_cfo():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.cfo(candles)
seq = ta.cfo(candles, sequential=True)
assert round(single, 2) == -66.53
assert len(seq) == len(candles)
assert seq[-1] == single
def test_cg():
candles = np.array(mama_candles)
single = ta.cg(candles)
seq = ta.cg(candles, sequential=True)
assert round(single, 2) == -5.37
assert len(seq) == len(candles)
assert seq[-1] == single
def test_chande():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single_long = ta.chande(candles)
seq_long = ta.chande(candles, sequential=True)
single_short = ta.chande(candles, direction="short")
seq_short = ta.chande(candles, direction="short", sequential=True)
assert round(single_long, 0) == 213
assert round(single_short, 0) == 165
assert len(seq_short) == len(candles)
assert len(seq_long) == len(candles)
assert seq_long[-1] == single_long
assert seq_short[-1] == single_short
def test_chop():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.chop(candles)
seq = ta.chop(candles, sequential=True)
assert round(single, 2) == 28.82
assert len(seq) == len(candles)
assert seq[-1] == single
def test_cksp():
candles = np.array(mama_candles)
single = ta.cksp(candles)
assert type(single).__name__ == 'CKSP'
assert round(single.long, 2) == 247.62
assert round(single.short, 2) == 127.89
seq = ta.cksp(candles, sequential=True)
assert seq.long[-1] == single.long
assert seq.short[-1] == single.short
assert len(seq.long) == len(candles)
assert len(seq.short) == len(candles)
def test_cmo():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.cmo(candles, period=9)
seq = ta.cmo(candles, period=9, sequential=True)
assert round(single, 0) == -70
assert len(seq) == len(candles)
assert seq[-1] == single
def test_correl():
candles = np.array(mama_candles)
single = ta.correl(candles)
seq = ta.correl(candles, sequential=True)
assert round(single, 2) == 0.58
assert len(seq) == len(candles)
assert seq[-1] == single
def test_correlation_cycle():
candles = np.array(mama_candles)
single = ta.correlation_cycle(candles)
assert type(single).__name__ == 'CC'
assert round(single.real, 2) == 0.23
assert round(single.imag, 2) == 0.38
assert round(single.angle, 2) == -55.87
assert round(single.state, 2) == -1
seq = ta.correlation_cycle(candles, sequential=True)
assert seq.real[-1] == single.real
assert seq.imag[-1] == single.imag
assert seq.angle[-1] == single.angle
assert seq.state[-1] == single.state
assert len(seq.real) == len(candles)
assert len(seq.imag) == len(candles)
assert len(seq.angle) == len(candles)
assert len(seq.state) == len(candles)
def test_cvi():
candles = np.array(mama_candles)
single = ta.cvi(candles)
seq = ta.cvi(candles, sequential=True)
assert round(single, 2) == 196.8
assert len(seq) == len(candles)
assert seq[-1] == single
def test_damiani_volatmeter():
candles = np.array(mama_candles)
single = ta.damiani_volatmeter(candles)
assert type(single).__name__ == 'DamianiVolatmeter'
assert round(single.vol, 2) == 1.39
assert round(single.anti, 2) == 0.93
seq = ta.damiani_volatmeter(candles, sequential=True)
assert seq.vol[-1] == single.vol
assert seq.anti[-1] == single.anti
assert len(seq.vol) == len(candles)
assert len(seq.anti) == len(candles)
def test_dec_osc():
candles = np.array(mama_candles)
single = ta.dec_osc(candles)
seq = ta.dec_osc(candles, sequential=True)
assert round(single, 0) == -20
assert len(seq) == len(candles)
assert seq[-1] == single
def test_decycler():
candles = np.array(mama_candles)
single = ta.decycler(candles)
seq = ta.decycler(candles, sequential=True)
assert round(single, 0) == 233
assert len(seq) == len(candles)
assert seq[-1] == single
def test_dema():
candles = np.array(dema_candles)
single = ta.dema(candles, 9)
seq = ta.dema(candles, 9, sequential=True)
assert round(single, 0) == 165
assert len(seq) == len(candles)
assert seq[-1] == single
def test_devstop():
candles = np.array(mama_candles)
single = ta.devstop(candles)
seq = ta.devstop(candles, sequential=True)
assert round(single, 0) == 248.0
assert len(seq) == len(candles)
assert seq[-1] == single
def test_di():
candles = np.array(mama_candles)
single = ta.di(candles, period=14)
assert type(single).__name__ == 'DI'
assert round(single.plus, 2) == 10.80
assert round(single.minus, 1) == 45.3
seq = ta.di(candles, period=14, sequential=True)
assert seq.plus[-1] == single.plus
assert seq.minus[-1] == single.minus
assert len(seq.plus) == len(candles)
assert len(seq.minus) == len(candles)
def test_dm():
candles = np.array(mama_candles)
single = ta.dm(candles, period=14)
assert type(single).__name__ == 'DM'
assert round(single.plus, 2) == 36.78
assert round(single.minus, 1) == 154.1
seq = ta.dm(candles, period=14, sequential=True)
assert seq.plus[-1] == single.plus
assert seq.minus[-1] == single.minus
assert len(seq.plus) == len(candles)
assert len(seq.minus) == len(candles)
def test_donchian():
candles = np.array(mama_candles)
single = ta.donchian(candles, period=20)
seq = ta.donchian(candles, period=20, sequential=True)
assert type(single).__name__ == 'DonchianChannel'
assert round(single.upperband, 2) == 277.20
assert round(single.middleband, 2) == 189.20
assert round(single.lowerband, 2) == 101.20
assert seq.middleband[-1] == single.middleband
assert len(seq.upperband) == len(candles)
assert len(seq.middleband) == len(candles)
assert len(seq.lowerband) == len(candles)
def test_dpo():
candles = np.array(dema_candles)
single = ta.dpo(candles)
seq = ta.dpo(candles, sequential=True)
assert round(single, 0) == 22
assert len(seq) == len(candles)
assert seq[-1] == single
def test_dti():
candles = np.array(mama_candles)
single = ta.dti(candles)
seq = ta.dti(candles, sequential=True)
assert round(single, 2) == -32.6
assert len(seq) == len(candles)
assert seq[-1] == single
def test_dx():
candles = np.array(dema_candles)
single = ta.dx(candles)
seq = ta.dx(candles, sequential=True)
assert round(single, 0) == 67
assert len(seq) == len(candles)
assert seq[-1] == single
def test_efi():
candles = np.array(mama_candles)
single = ta.efi(candles)
seq = ta.efi(candles, sequential=True)
assert round(single, 0) == -51628073
assert len(seq) == len(candles)
assert seq[-1] == single
def test_ema():
close_prices = [
204.23, 205.01, 196.9, 197.33, 198.7, 199.86, 202.23, 200.3, 212.3, 210.82603059, 220.84, 218.99,
212.71, 211.01, 213.19, 212.99724894,
212.67760477, 209.85, 187.2, 184.15, 176.99, 175.9, 178.99, 150.96, 133.85, 138.18, 126.32, 125.23,
114.79,
118.73, 110.74409879, 111.72, 124.04, 118.52, 113.64, 119.65, 117.11129288, 109.23, 110.77, 102.65,
91.99
]
candles = fake_range_candle_from_range_prices(close_prices)
single = ta.ema(candles, 8)
seq = ta.ema(candles, 8, sequential=True)
assert round(single, 3) == 108.546
assert len(seq) == len(candles)
assert seq[-1] == single
assert np.isnan(ta.ema(candles, 400))
def test_emd():
candles = np.array(mama_candles)
single = ta.emd(candles)
seq = ta.emd(candles, sequential=True)
assert type(single).__name__ == 'EMD'
assert round(single.middleband, 2) == 3.12
assert round(single.upperband, 2) == 1.21
assert round(single.lowerband, 2) == -0.28
assert seq.middleband[-1] == single.middleband
assert seq.upperband[-1] == single.upperband
assert seq.lowerband[-1] == single.lowerband
assert len(seq.middleband) == len(candles)
assert len(seq.upperband) == len(candles)
assert len(seq.lowerband) == len(candles)
def test_emv():
candles = np.array(mama_candles)
single = ta.emv(candles)
seq = ta.emv(candles, sequential=True)
assert round(single, 0) == -11
assert len(seq) == len(candles)
assert seq[-1] == single
def test_er():
candles = np.array(mama_candles)
single = ta.er(candles)
seq = ta.er(candles, sequential=True)
assert round(single, 2) == 0.02
assert len(seq) == len(candles)
assert round(seq[-1], 2) == round(single, 2)
def test_eri():
candles = np.array(mama_candles)
single = ta.eri(candles)
seq = ta.eri(candles, sequential=True)
assert type(single).__name__ == 'ERI'
assert round(single.bull, 2) == -7.14
assert round(single.bear, 2) == -101.49
assert seq.bull[-1] == single.bull
assert len(seq.bull) == len(candles)
assert len(seq.bear) == len(candles)
def test_fisher():
candles = np.array(mama_candles)
single = ta.fisher(candles, period=9)
seq = ta.fisher(candles, period=9, sequential=True)
assert type(single).__name__ == 'FisherTransform'
assert round(single.fisher, 2) == -1.77
assert round(single.signal, 2) == -1.31
assert seq.fisher[-1] == single.fisher
assert len(seq.fisher) == len(candles)
assert len(seq.signal) == len(candles)
def test_fosc():
candles = np.array(mama_candles)
single = ta.fosc(candles)
seq = ta.fosc(candles, sequential=True)
assert round(single, 0) == -69
assert len(seq) == len(candles)
assert seq[-1] == single
def test_frama():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.frama(candles, window=10, SC=200, FC=10, )
seq = ta.frama(candles, window=10, SC=200, FC=10, sequential=True)
assert round(single, 0) == 219
assert len(seq) == len(candles)
assert seq[-1] == single
def test_fwma():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.fwma(candles)
seq = ta.fwma(candles, sequential=True)
assert round(single, 0) == 161
assert len(seq) == len(candles)
assert seq[-1] == single
def test_gator():
candles = np.array(mama_candles)
single = ta.gatorosc(candles)
seq = ta.gatorosc(candles, sequential=True)
assert type(single).__name__ == 'GATOR'
assert round(single.upper, 2) == 2.39
assert round(single.upper_change, 2) == 0.98
assert round(single.lower, 2) == -13.44
assert round(single.lower_change, 2) == 5.06
assert seq.upper[-1] == single.upper
assert len(seq.upper) == len(candles)
def test_gauss():
candles = np.array(mama_candles)
single = ta.gauss(candles)
seq = ta.gauss(candles, sequential=True)
assert round(single, 0) == 190
assert len(seq) == len(candles)
assert seq[-1] == single
def test_high_pass():
candles = np.array(mama_candles)
single = ta.high_pass(candles)
seq = ta.high_pass(candles, sequential=True)
assert round(single, 0) == -106
assert len(seq) == len(candles)
assert seq[-1] == single
def test_high_pass_2_pole():
candles = np.array(mama_candles)
single = ta.high_pass_2_pole(candles)
seq = ta.high_pass_2_pole(candles, sequential=True)
assert round(single, 0) == -101
assert len(seq) == len(candles)
assert seq[-1] == single
def test_hma():
candles = np.array(mama_candles)
single = ta.hma(candles)
seq = ta.hma(candles, sequential=True)
assert round(single, 0) == 134
assert len(seq) == len(candles)
assert seq[-1] == single
def test_ht_dcperiod():
candles = np.array(mama_candles)
single = ta.ht_dcperiod(candles)
seq = ta.ht_dcperiod(candles, sequential=True)
assert round(single, 0) == 24
assert len(seq) == len(candles)
assert seq[-1] == single
def test_ht_dcphase():
candles = np.array(mama_candles)
single = ta.ht_dcphase(candles)
seq = ta.ht_dcphase(candles, sequential=True)
assert round(single, 0) == 10
assert len(seq) == len(candles)
assert seq[-1] == single
def test_ht_phasor():
candles = np.array(mama_candles)
single = ta.ht_phasor(candles)
seq = ta.ht_phasor(candles, sequential=True)
assert type(single).__name__ == 'IQ'
assert round(single.inphase, 0) == 11
assert round(single.quadrature, 0) == -52
assert seq.inphase[-1] == single.inphase
assert seq.quadrature[-1] == single.quadrature
assert len(seq.inphase) == len(candles)
assert len(seq.quadrature) == len(candles)
def test_ht_sine():
candles = np.array(mama_candles)
single = ta.ht_sine(candles)
seq = ta.ht_sine(candles, sequential=True)
assert type(single).__name__ == 'SINEWAVE'
assert round(single.sine, 2) == 0.18
assert round(single.lead, 2) == 0.82
assert seq.sine[-1] == single.sine
assert seq.lead[-1] == single.lead
assert len(seq.sine) == len(candles)
assert len(seq.lead) == len(candles)
def test_ht_trendline():
candles = np.array(mama_candles)
single = ta.ht_trendline(candles)
seq = ta.ht_trendline(candles, sequential=True)
assert round(single, 0) == 236
assert len(seq) == len(candles)
assert seq[-1] == single
def test_ht_trendmode():
candles = np.array(mama_candles)
single = ta.ht_trendmode(candles)
seq = ta.ht_trendmode(candles, sequential=True)
assert single == 1
assert len(seq) == len(candles)
assert seq[-1] == single
def test_ichimoku_cloud():
candles = np.array(ichimoku_candles)
ic = ta.ichimoku_cloud(candles)
current_conversion_line, current_base_line, span_a, span_b = ic
assert type(ic).__name__ == 'IchimokuCloud'
assert (current_conversion_line, current_base_line, span_a, span_b) == (8861.59, 8861.59, 8466.385, 8217.45)
def test_ichimoku_cloud_seq():
candles = np.array(ichimoku_candles)
conversion_line, base_line, span_a, span_b, lagging_line, future_span_a, future_span_b = ta.ichimoku_cloud_seq(
candles)
seq = ta.ichimoku_cloud_seq(candles, sequential=True)
assert type(seq).__name__ == 'IchimokuCloud'
assert (conversion_line, base_line, span_a, span_b, lagging_line, future_span_a, future_span_b) == (
seq.conversion_line[-1], seq.base_line[-1], seq.span_a[-1], seq.span_b[-1], seq.lagging_line[-1],
seq.future_span_a[-1], seq.future_span_b[-1])
assert (conversion_line, base_line, span_a, span_b, lagging_line, future_span_a, future_span_b) == (
8861.59, 8861.59, 8465.25, 8204.715, 8730.0, 8861.59, 8579.49)
assert len(seq.conversion_line) == len(candles)
def test_ift_rsi():
# use the same candles as dema_candles
candles = np.array(mama_candles)
single = ta.ift_rsi(candles)
seq = ta.ift_rsi(candles, sequential=True)
assert round(single, 2) == 0.89
assert len(seq) == len(candles)
assert seq[-1] == single
def test_itrend():
candles = np.array(mama_candles)
single = ta.itrend(candles)
seq = ta.itrend(candles, sequential=True)
assert type(single).__name__ == 'ITREND'
assert round(single.it, 0) == 223
assert round(single.trigger, 0) == 182
assert single.signal == -1
assert seq.it[-1] == single.it
assert seq.signal[-1] == single.signal
assert seq.trigger[-1] == single.trigger
assert len(seq.it) == len(candles)
def test_jma():
# use the same candles as dema_candles
candles = np.array(mama_candles)
single = ta.jma(candles)
seq = ta.jma(candles, sequential=True)
assert round(single, 2) == 156.72
assert len(seq) == len(candles)
assert seq[-1] == single
def test_kama():
# use the same candles as dema_candles
candles = np.array(dema_candles)
single = ta.kama(candles, 10)
seq = ta.kama(candles, 10, sequential=True)
assert round(single, 0) == 202
assert len(seq) == len(candles)
assert seq[-1] == single
def test_kaufmanstop():
# use the same candles as dema_candles
candles = np.array(dema_candles)
single = ta.kaufmanstop(candles)
seq = ta.kaufmanstop(candles, sequential=True)
assert round(single, 0) == -57
assert len(seq) == len(candles)
assert seq[-1] == single
def test_kdj():
candles = np.array(mama_candles)
kd = ta.kdj(candles)
k, d, j = kd
assert type(kd).__name__ == 'KDJ'
assert round(k, 1) == 13.3
assert round(d, 1) == 15.7
assert round(j, 1) == 8.6
seq_kd = ta.kdj(candles, sequential=True)
assert seq_kd.k[-1] == k
assert len(seq_kd.k) == len(candles)
assert len(seq_kd.d) == len(candles)
assert len(seq_kd.j) == len(candles)
def test_kelner_channels():
candles = np.array(keltner_channel_candles)
kc = ta.keltner(candles)
u, m, l = kc
assert type(kc).__name__ == 'KeltnerChannel'
assert round(u, 1) == 145.0
assert round(m, 1) == 139.7
assert round(l, 1) == 134.4
seq_kc = ta.keltner(candles, sequential=True)
assert seq_kc.upperband[-1] == u
assert len(seq_kc.upperband) == len(candles)
assert len(seq_kc.middleband) == len(candles)
assert len(seq_kc.lowerband) == len(candles)
def test_kst():
candles = np.array(mama_candles)
single = ta.kst(candles)
seq = ta.kst(candles, sequential=True)
assert type(single).__name__ == 'KST'
assert round(single.line, 2) == -93.38
assert round(single.signal, 2) == 31.1
assert seq.line[-1] == single.line
assert seq.signal[-1] == single.signal
assert len(seq.line) == len(candles)
assert len(seq.signal) == len(candles)
def test_kurtosis():
candles = np.array(mama_candles)
single = ta.kurtosis(candles)
seq = ta.kurtosis(candles, sequential=True)
assert round(single, 2) == -0.22
assert len(seq) == len(candles)
assert seq[-1] == single
def test_kvo():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.kvo(candles)
seq = ta.kvo(candles, sequential=True)
assert round(single / 10000000, 2) == -5.52
assert len(seq) == len(candles)
assert seq[-1] == single
def test_linearreg():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.linearreg(candles)
seq = ta.linearreg(candles, sequential=True)
assert round(single, 2) == 179.56
assert len(seq) == len(candles)
assert seq[-1] == single
def test_linearreg_angle():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.linearreg_angle(candles)
seq = ta.linearreg_angle(candles, sequential=True)
assert round(single, 2) == -78.42
assert len(seq) == len(candles)
assert seq[-1] == single
def test_linearreg_intercept():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.linearreg_intercept(candles)
seq = ta.linearreg_intercept(candles, sequential=True)
assert round(single, 2) == 242.98
assert len(seq) == len(candles)
assert seq[-1] == single
def test_linearreg_slope():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.linearreg_slope(candles)
seq = ta.linearreg_slope(candles, sequential=True)
assert round(single, 2) == -4.88
assert len(seq) == len(candles)
assert seq[-1] == single
def test_lrsi():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.lrsi(candles)
seq = ta.lrsi(candles, sequential=True)
assert round(single, 2) == 0.1
assert round(seq[-2], 2) == 0.04
assert len(seq) == len(candles)
assert seq[-1] == single
def test_ma():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.ma(candles, matype=9)
seq = ta.ma(candles, matype=9, sequential=True)
seq_average = ta.ma(seq, matype=9, sequential=True)
assert round(single, 2) == 166.99
assert round(seq[-2], 2) == 203.56
assert round(seq_average[-2], 2) == 212.12
assert len(seq) == len(candles)
assert len(seq_average) == len(candles)
assert seq[-1] == single
def test_macd():
candles = np.array(mama_candles)
single = ta.macd(candles, fast_period=12, slow_period=26, signal_period=9)
seq = ta.macd(candles, fast_period=12, slow_period=26, signal_period=9, sequential=True)
assert type(single).__name__ == 'MACD'
assert round(single.macd, 2) == -15.32
assert round(single.signal, 2) == -4.10
assert round(single.hist, 2) == -11.22
assert seq.macd[-1] == single.macd
assert len(seq.macd) == len(candles)
assert len(seq.signal) == len(candles)
assert len(seq.hist) == len(candles)
def test_macdext():
candles = np.array(mama_candles)
single = ta.macdext(candles, fast_period=12, fast_matype=0, slow_period=26, slow_matype=0, signal_period=9,
signal_matype=0)
seq = ta.macdext(candles, fast_period=12, fast_matype=0, slow_period=26, slow_matype=0, signal_period=9,
signal_matype=0,
sequential=True)
assert type(single).__name__ == 'MACDEXT'
assert round(single.macd, 2) == -23.12
assert round(single.signal, 2) == -18.51
assert round(single.hist, 2) == -4.61
assert seq.macd[-1] == single.macd
assert len(seq.macd) == len(candles)
assert len(seq.signal) == len(candles)
assert len(seq.hist) == len(candles)
def test_median_ad():
candles = np.array(mama_candles)
single = ta.median_ad(candles)
seq = ta.median_ad(candles, sequential=True)
assert round(single, 2) == 6.86
assert len(seq) == len(candles)
assert seq[-1] == single
def test_mean_ad():
candles = np.array(mama_candles)
single = ta.mean_ad(candles)
seq = ta.mean_ad(candles, sequential=True)
assert round(single, 2) == 23.82
assert len(seq) == len(candles)
assert seq[-1] == single
def test_mama():
candles = np.array(mama_candles)
mama = ta.mama(candles, 0.5, 0.05)
assert type(mama).__name__ == 'MAMA'
assert round(mama.mama, 2) == 206.78
assert round(mama.fama, 2) == 230.26
seq_mama = ta.mama(candles, 0.5, 0.05, sequential=True)
assert seq_mama.mama[-1] == mama.mama
assert len(seq_mama.mama) == len(candles)
assert len(seq_mama.fama) == len(candles)
def test_marketfi():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.marketfi(candles)
seq = ta.marketfi(candles, sequential=True)
assert round(single * 100000, 2) == 2.47
assert len(seq) == len(candles)
assert seq[-1] == single
def test_mass():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.mass(candles)
seq = ta.mass(candles, sequential=True)
assert round(single, 2) == 5.76
assert len(seq) == len(candles)
assert seq[-1] == single
def test_mcginley_dynamic():
candles = np.array(mama_candles)
single = ta.mcginley_dynamic(candles)
seq = ta.mcginley_dynamic(candles, sequential=True)
assert round(single, 2) == 107.82
assert len(seq) == len(candles)
assert seq[-1] == single
def test_medprice():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.medprice(candles)
seq = ta.medprice(candles, sequential=True)
assert round(single, 1) == 148.4
assert len(seq) == len(candles)
assert seq[-1] == single
def test_mfi():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.mfi(candles, period=9)
seq = ta.mfi(candles, period=9, sequential=True)
assert round(single, 1) == 31.2
assert len(seq) == len(candles)
assert seq[-1] == single
def test_midpoint():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.midpoint(candles)
seq = ta.midpoint(candles, sequential=True)
assert round(single, 1) == 176.4
assert len(seq) == len(candles)
assert seq[-1] == single
def test_midprice():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.midprice(candles)
seq = ta.midprice(candles, sequential=True)
assert round(single, 1) == 176.6
assert len(seq) == len(candles)
assert seq[-1] == single
def test_minmax():
candles = np.array(mama_candles)
single = ta.minmax(candles)
seq = ta.minmax(candles, sequential=True)
assert type(single).__name__ == 'EXTREMA'
assert round(seq.is_max[-6], 2) == 251.93
assert round(seq.is_min[-15], 2) == 210
assert round(single.last_max, 2) == 251.93
assert round(single.last_min, 2) == 210
assert seq.last_max[-1] == single.last_max
assert seq.last_min[-1] == single.last_min
assert len(seq.is_min) == len(candles)
def test_mom():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.mom(candles, period=9)
seq = ta.mom(candles, period=9, sequential=True)
assert round(single, 2) == -116.09
assert len(seq) == len(candles)
assert seq[-1] == single
def test_msw():
candles = np.array(mama_candles)
single = ta.msw(candles)
seq = ta.msw(candles, sequential=True)
assert type(single).__name__ == 'MSW'
assert round(single.lead, 2) == -0.66
assert round(single.sine, 2) == -1.0
assert seq.lead[-1] == single.lead
assert seq.sine[-1] == single.sine
assert len(seq.sine) == len(candles)
def test_natr():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.natr(candles, period=14)
seq = ta.natr(candles, period=14, sequential=True)
assert round(single, 2) == 22.55
assert len(seq) == len(candles)
assert seq[-1] == single
def test_nvi():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.nvi(candles)
seq = ta.nvi(candles, sequential=True)
assert round(single, 2) == 722.58
assert len(seq) == len(candles)
assert seq[-1] == single
def test_obv():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.obv(candles)
seq = ta.obv(candles, sequential=True)
assert round(single / 1000000, 0) == -6
assert len(seq) == len(candles)
assert seq[-1] == single
def test_pattern_recognizion():
candles = np.array(inverted_hammer_candles)
res = ta.pattern_recognition(candles, pattern_type="CDLINVERTEDHAMMER")
seq = ta.pattern_recognition(candles, pattern_type="CDLINVERTEDHAMMER", sequential=True)
assert len(seq) == len(candles)
assert res == 0
candles = np.array(bullish_engulfing_candles)
res = ta.pattern_recognition(candles, pattern_type="CDLENGULFING")
assert res == 0
candles = np.array(bearish_engulfing_candles)
res = ta.pattern_recognition(candles, pattern_type="CDLENGULFING")
assert res == 0
candles = np.array(hammer_candles)
res = ta.pattern_recognition(candles, pattern_type="CDLHAMMER")
assert res == 0
candles = np.array(doji_candles)
res = ta.pattern_recognition(candles, pattern_type="CDLDOJI")
assert res == 1
def test_pfe():
# use the same candles as mama_candles
candles = np.array(mama_candles)
single = ta.pfe(candles)
seq = ta.pfe(candles, sequential=True)
assert round(single, 2) == -211.85
assert len(seq) == len(candles)
assert seq[-1] == single
def test_pivot():
candles = np.array(mama_candles)
single = ta.pivot(candles, mode=0)
seq = ta.pivot(candles, mode=0, sequential=True)
assert type(single).__name__ == 'PIVOT'
assert seq.r1[-1] == single.r1
assert len(seq.r1) == len(candles)
assert len(seq.r2) == len(candles)
assert len(seq.r3) == len(candles)
assert len(seq.r4) == len(candles)
assert len(seq.pp) == len(candles)
assert len(seq.s1) == len(candles)
assert len(seq.s2) == len(candles)
assert len(seq.s3) == len(candles)
assert len(seq.s4) == len(candles)
def test_pivot1():
candles = np.array(mama_candles)
single = ta.pivot(candles, mode=1)
seq = ta.pivot(candles, mode=1, sequential=True)
assert type(single).__name__ == 'PIVOT'
assert seq.r1[-1] == single.r1
assert len(seq.r1) == len(candles)
assert len(seq.r2) == len(candles)
assert len(seq.r3) == len(candles)
assert len(seq.r4) == len(candles)
assert len(seq.pp) == len(candles)
assert len(seq.s1) == len(candles)
assert len(seq.s2) == len(candles)
assert len(seq.s3) == len(candles)
assert len(seq.s4) == len(candles)
def test_pivot2():
candles = | np.array(mama_candles) | numpy.array |
"""LinearSolver that uses PetSC KSP to solve for a system's derivatives."""
from __future__ import division, print_function
import numpy as np
try:
import petsc4py
from petsc4py import PETSc
except ImportError:
PETSc = None
from openmdao.solvers.solver import LinearSolver
from openmdao.utils.general_utils import warn_deprecation
KSP_TYPES = [
"richardson",
"chebyshev",
"cg",
"groppcg",
"pipecg",
"pipecgrr",
"cgne",
"nash",
"stcg",
"gltr",
"fcg",
"pipefcg",
"gmres",
"pipefgmres",
"fgmres",
"lgmres",
"dgmres",
"pgmres",
"tcqmr",
"bcgs",
"ibcgs",
"fbcgs",
"fbcgsr",
"bcgsl",
"cgs",
"tfqmr",
"cr",
"pipecr",
"lsqr",
"preonly",
"qcg",
"bicg",
"minres",
"symmlq",
"lcd",
"python",
"gcr",
"pipegcr",
"tsirm",
"cgls"
]
def _get_petsc_vec_array_new(vec):
"""
Get the array of values for the given PETSc vector.
Helper function to handle a petsc backwards incompatibility.
Parameters
----------
vec : petsc vector
Vector whose data is being requested.
Returns
-------
ndarray
A readonly copy of the array of values from vec.
"""
return vec.getArray(readonly=True)
def _get_petsc_vec_array_old(vec):
"""
Get the array of values for the given PETSc vector.
Helper function to handle a petsc backwards incompatibility.
Parameters
----------
vec : petsc vector
Vector whose data is being requested.
Returns
-------
ndarray
An array of values from vec.
"""
return vec.getArray()
if PETSc:
try:
petsc_version = petsc4py.__version__
except AttributeError: # hack to fix doc-tests
petsc_version = "3.5"
if PETSc and int((petsc_version).split('.')[1]) >= 6:
_get_petsc_vec_array = _get_petsc_vec_array_new
else:
_get_petsc_vec_array = _get_petsc_vec_array_old
class Monitor(object):
"""
Prints output from PETSc's KSP solvers.
Callable object given to KSP as a callback for printing the residual.
Attributes
----------
_solver : _solver
the openmdao solver.
_norm : float
the current norm.
_norm0 : float
the norm for the first iteration.
"""
def __init__(self, solver):
"""
Store pointer to the openmdao solver and initialize norms.
Parameters
----------
solver : object
the openmdao solver.
"""
self._solver = solver
self._norm = 1.0
self._norm0 = 1.0
def __call__(self, ksp, counter, norm):
"""
Store norm if first iteration, and print norm.
Parameters
----------
ksp : object
the KSP solver.
counter : int
the counter.
norm : float
the norm.
"""
if counter == 0 and norm != 0.0:
self._norm0 = norm
self._norm = norm
self._solver._mpi_print(counter, norm, norm / self._norm0)
self._solver._iter_count += 1
class PETScKrylov(LinearSolver):
"""
LinearSolver that uses PetSC KSP to solve for a system's derivatives.
Attributes
----------
precon : Solver
Preconditioner for linear solve. Default is None for no preconditioner.
_ksp : dist
dictionary of KSP instances (keyed on vector name).
"""
SOLVER = 'LN: PETScKrylov'
def __init__(self, **kwargs):
"""
Declare the solver options.
Parameters
----------
**kwargs : dict
dictionary of options set by the instantiating class/script.
"""
if PETSc is None:
raise RuntimeError("PETSc is not available.")
super(PETScKrylov, self).__init__(**kwargs)
# initialize dictionary of KSP instances (keyed on vector name)
self._ksp = {}
# initialize preconditioner to None
self.precon = None
def _declare_options(self):
"""
Declare options before kwargs are processed in the init method.
"""
super(PETScKrylov, self)._declare_options()
self.options.declare('ksp_type', default='fgmres', values=KSP_TYPES,
desc="KSP algorithm to use. Default is 'fgmres'.")
self.options.declare('restart', default=1000, types=int,
desc='Number of iterations between restarts. Larger values increase '
'iteration cost, but may be necessary for convergence')
self.options.declare('precon_side', default='right', values=['left', 'right'],
desc='Preconditioner side, default is right.')
# changing the default maxiter from the base class
self.options['maxiter'] = 100
def _assembled_jac_solver_iter(self):
"""
Return a generator of linear solvers using assembled jacs.
"""
if self.options['assemble_jac']:
yield self
if self.precon is not None:
for s in self.precon._assembled_jac_solver_iter():
yield s
def _setup_solvers(self, system, depth):
"""
Assign system instance, set depth, and optionally perform setup.
Parameters
----------
system : <System>
pointer to the owning system.
depth : int
depth of the current system (already incremented).
"""
super(PETScKrylov, self)._setup_solvers(system, depth)
if self.precon is not None:
self.precon._setup_solvers(self._system, self._depth + 1)
def _set_solver_print(self, level=2, type_='all'):
"""
Control printing for solvers and subsolvers in the model.
Parameters
----------
level : int
iprint level. Set to 2 to print residuals each iteration; set to 1
to print just the iteration totals; set to 0 to disable all printing
except for failures, and set to -1 to disable all printing including failures.
type_ : str
Type of solver to set: 'LN' for linear, 'NL' for nonlinear, or 'all' for all.
"""
super(PETScKrylov, self)._set_solver_print(level=level, type_=type_)
if self.precon is not None and type_ != 'NL':
self.precon._set_solver_print(level=level, type_=type_)
def mult(self, mat, in_vec, result):
"""
Apply Jacobian matrix (KSP Callback).
The following attributes must be defined when solve is called to
provide information used in this callback:
_system : System
pointer to the owning system.
_vec_name : str
the right-hand-side (RHS) vector name.
_mode : str
'fwd' or 'rev'.
Parameters
----------
mat : PETSc.Mat
PETSc matrix object.
in_vec : PetSC Vector
Incoming vector.
result : PetSC Vector
Empty array into which we place the matrix-vector product.
"""
# assign x and b vectors based on mode
system = self._system
vec_name = self._vec_name
if self._mode == 'fwd':
x_vec = system._vectors['output'][vec_name]
b_vec = system._vectors['residual'][vec_name]
else: # rev
x_vec = system._vectors['residual'][vec_name]
b_vec = system._vectors['output'][vec_name]
# set value of x vector to KSP provided value
x_vec._data[:] = _get_petsc_vec_array(in_vec)
# apply linear
scope_out, scope_in = system._get_scope()
system._apply_linear(self._assembled_jac, [vec_name], self._rel_systems, self._mode,
scope_out, scope_in)
# stuff resulting value of b vector into result for KSP
result.array[:] = b_vec._data
def _linearize_children(self):
"""
Return a flag that is True when we need to call linearize on our subsystems' solvers.
Returns
-------
boolean
Flag for indicating child linerization
"""
precon = self.precon
return (precon is not None) and (precon._linearize_children())
def _linearize(self):
"""
Perform any required linearization operations such as matrix factorization.
"""
if self.precon is not None:
self.precon._linearize()
def solve(self, vec_names, mode, rel_systems=None):
"""
Solve the linear system for the problem in self._system.
The full solution vector is returned.
Parameters
----------
vec_names : list
list of vector names.
mode : string
Derivative mode, can be 'fwd' or 'rev'.
rel_systems : set of str
Names of systems relevant to the current solve.
"""
self._vec_names = vec_names
self._rel_systems = rel_systems
self._mode = mode
system = self._system
options = self.options
if self._system.under_complex_step:
msg = 'PETScKrylov solver is not supported under complex step.'
raise RuntimeError(msg)
maxiter = options['maxiter']
atol = options['atol']
rtol = options['rtol']
for vec_name in vec_names:
self._vec_name = vec_name
# assign x and b vectors based on mode
if self._mode == 'fwd':
x_vec = system._vectors['output'][vec_name]
b_vec = system._vectors['residual'][vec_name]
else: # rev
x_vec = system._vectors['residual'][vec_name]
b_vec = system._vectors['output'][vec_name]
# create numpy arrays to interface with PETSc
sol_array = x_vec._data.copy()
rhs_array = b_vec._data.copy()
# create PETSc vectors from numpy arrays
sol_petsc_vec = PETSc.Vec().createWithArray(sol_array, comm=system.comm)
rhs_petsc_vec = PETSc.Vec().createWithArray(rhs_array, comm=system.comm)
# run PETSc solver
self._iter_count = 0
ksp = self._get_ksp_solver(system, vec_name)
ksp.setTolerances(max_it=maxiter, atol=atol, rtol=rtol)
ksp.solve(rhs_petsc_vec, sol_petsc_vec)
# stuff the result into the x vector
x_vec._data[:] = sol_array
sol_petsc_vec = rhs_petsc_vec = None
def apply(self, mat, in_vec, result):
"""
Apply preconditioner.
Parameters
----------
mat : PETSc.Mat
PETSc matrix object.
in_vec : PETSc.Vector
Incoming vector
result : PETSc.Vector
Empty vector in which the preconditioned in_vec is stored.
"""
if self.precon:
system = self._system
vec_name = self._vec_name
mode = self._mode
# Need to clear out any junk from the inputs.
system._vectors['input'][vec_name].set_const(0.0)
# assign x and b vectors based on mode
if mode == 'fwd':
x_vec = system._vectors['output'][vec_name]
b_vec = system._vectors['residual'][vec_name]
else: # rev
x_vec = system._vectors['residual'][vec_name]
b_vec = system._vectors['output'][vec_name]
# set value of b vector to KSP provided value
b_vec._data[:] = _get_petsc_vec_array(in_vec)
# call the preconditioner
self._solver_info.append_precon()
self.precon.solve([vec_name], mode)
self._solver_info.pop()
# stuff resulting value of x vector into result for KSP
result.array[:] = x_vec._data
else:
# no preconditioner, just pass back the incoming vector
result.array[:] = _get_petsc_vec_array(in_vec)
def _get_ksp_solver(self, system, vec_name):
"""
Get an instance of the KSP solver for `vec_name` in `system`.
Instances will be created on first request and cached for future use.
Parameters
----------
system : `System`
Parent `System` object.
vec_name : string
name of vector.
Returns
-------
KSP
the KSP solver instance.
"""
# use cached instance if available
if vec_name in self._ksp:
return self._ksp[vec_name]
iproc = system.comm.rank
lsize = np.sum(system._var_sizes[vec_name]['output'][iproc, :])
size = | np.sum(system._var_sizes[vec_name]['output']) | numpy.sum |
import sys
from numpy.testing import *
import numpy.core.umath as ncu
import numpy as np
class _FilterInvalids(object):
def setUp(self):
self.olderr = np.seterr(invalid='ignore')
def tearDown(self):
np.seterr(**self.olderr)
class TestDivision(TestCase):
def test_division_int(self):
# int division should follow Python
x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
if 5 / 10 == 0.5:
assert_equal(x / 100, [0.05, 0.1, 0.9, 1,
-0.05, -0.1, -0.9, -1, -1.2])
else:
assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80])
def test_division_complex(self):
# check that implementation is correct
msg = "Complex division implementation check"
x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128)
assert_almost_equal(x**2/x, x, err_msg=msg)
# check overflow, underflow
msg = "Complex division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = x**2/x
assert_almost_equal(y/x, [1, 1], err_msg=msg)
def test_zero_division_complex(self):
err = np.seterr(invalid="ignore", divide="ignore")
try:
x = np.array([0.0], dtype=np.complex128)
y = 1.0/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.nan)/x
assert_(np.isinf(y)[0])
y = complex(np.nan, np.inf)/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.inf)/x
assert_(np.isinf(y)[0])
y = 0.0/x
assert_(np.isnan(y)[0])
finally:
np.seterr(**err)
def test_floor_division_complex(self):
# check that implementation is correct
msg = "Complex floor division implementation check"
x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128)
y = np.array([0., -1., 0., 0.], dtype=np.complex128)
assert_equal(np.floor_divide(x**2,x), y, err_msg=msg)
# check overflow, underflow
msg = "Complex floor division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = np.floor_divide(x**2, x)
assert_equal(y, [1.e+110, 0], err_msg=msg)
class TestPower(TestCase):
def test_power_float(self):
x = np.array([1., 2., 3.])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_equal(x**2, [1., 4., 9.])
y = x.copy()
y **= 2
assert_equal(y, [1., 4., 9.])
assert_almost_equal(x**(-1), [1., 0.5, 1./3])
assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)])
def test_power_complex(self):
x = np.array([1+2j, 2+3j, 3+4j])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j])
assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3])
assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4])
assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)])
assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2])
assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197,
(-117-44j)/15625])
assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j),
ncu.sqrt(3+4j)])
norm = 1./((x**14)[0])
assert_almost_equal(x**14 * norm,
[i * norm for i in [-76443+16124j, 23161315+58317492j,
5583548873 + 2465133864j]])
# Ticket #836
def assert_complex_equal(x, y):
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
for z in [complex(0, np.inf), complex(1, np.inf)]:
err = np.seterr(invalid="ignore")
z = np.array([z], dtype=np.complex_)
try:
assert_complex_equal(z**1, z)
assert_complex_equal(z**2, z*z)
assert_complex_equal(z**3, z*z*z)
finally:
np.seterr(**err)
def test_power_zero(self):
# ticket #1271
zero = np.array([0j])
one = np.array([1+0j])
cinf = np.array([complex(np.inf, 0)])
cnan = np.array([complex(np.nan, np.nan)])
def assert_complex_equal(x, y):
x, y = np.asarray(x), np.asarray(y)
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
# positive powers
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, p), zero)
# zero power
assert_complex_equal(np.power(zero, 0), one)
assert_complex_equal(np.power(zero, 0+1j), cnan)
# negative power
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, -p), cnan)
assert_complex_equal(np.power(zero, -1+0.2j), cnan)
def test_fast_power(self):
x=np.array([1,2,3], np.int16)
assert (x**2.00001).dtype is (x**2.0).dtype
class TestLog2(TestCase):
def test_log2_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.log2(xf), yf)
class TestExp2(TestCase):
def test_exp2_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.exp2(yf), xf)
class TestLogAddExp2(_FilterInvalids):
# Need test for intermediate precisions
def test_logaddexp2_values(self) :
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec in zip(['f','d','g'],[6, 15, 15]) :
xf = np.log2(np.array(x, dtype=dt))
yf = np.log2(np.array(y, dtype=dt))
zf = np.log2(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec)
def test_logaddexp2_range(self) :
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp2(logxf, logyf), logzf)
def test_inf(self) :
err = np.seterr(invalid='ignore')
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
try:
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp2(logxf, logyf), logzf)
finally:
np.seterr(**err)
def test_nan(self):
assert_(np.isnan(np.logaddexp2(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp2(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, 0)))
assert_(np.isnan(np.logaddexp2(0, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
class TestLog(TestCase):
def test_log_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.log(xf), yf)
class TestExp(TestCase):
def test_exp_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.exp(yf), xf)
class TestLogAddExp(_FilterInvalids):
def test_logaddexp_values(self) :
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec in zip(['f','d','g'],[6, 15, 15]) :
xf = np.log(np.array(x, dtype=dt))
yf = np.log(np.array(y, dtype=dt))
zf = np.log(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec)
def test_logaddexp_range(self) :
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
def test_inf(self) :
err = np.seterr(invalid='ignore')
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
try:
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp(logxf, logyf), logzf)
finally:
np.seterr(**err)
def test_nan(self):
assert_(np.isnan(np.logaddexp(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, 0)))
assert_(np.isnan(np.logaddexp(0, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
class TestLog1p(TestCase):
def test_log1p(self):
assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))
assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6))
class TestExpm1(TestCase):
def test_expm1(self):
assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)
assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)
class TestHypot(TestCase, object):
def test_simple(self):
assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
assert_almost_equal(ncu.hypot(0, 0), 0)
def assert_hypot_isnan(x, y):
err = np.seterr(invalid='ignore')
try:
assert_(np.isnan(ncu.hypot(x, y)), "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y)))
finally:
np.seterr(**err)
def assert_hypot_isinf(x, y):
err = np.seterr(invalid='ignore')
try:
assert_(np.isinf(ncu.hypot(x, y)), "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y)))
finally:
np.seterr(**err)
class TestHypotSpecialValues(TestCase):
def test_nan_outputs(self):
assert_hypot_isnan(np.nan, np.nan)
assert_hypot_isnan(np.nan, 1)
def test_nan_outputs(self):
assert_hypot_isinf(np.nan, np.inf)
assert_hypot_isinf(np.inf, np.nan)
assert_hypot_isinf(np.inf, 0)
assert_hypot_isinf(0, np.inf)
def assert_arctan2_isnan(x, y):
assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_ispinf(x, y):
assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_isninf(x, y):
assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_ispzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_isnzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y)))
class TestArctan2SpecialValues(TestCase):
def test_one_one(self):
# atan2(1, 1) returns pi/4.
assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi)
assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi)
def test_zero_nzero(self):
# atan2(+-0, -0) returns +-pi.
assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi)
assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi)
def test_zero_pzero(self):
# atan2(+-0, +0) returns +-0.
assert_arctan2_ispzero(np.PZERO, np.PZERO)
assert_arctan2_isnzero(np.NZERO, np.PZERO)
def test_zero_negative(self):
# atan2(+-0, x) returns +-pi for x < 0.
assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi)
assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi)
def test_zero_positive(self):
# atan2(+-0, x) returns +-0 for x > 0.
assert_arctan2_ispzero(np.PZERO, 1)
assert_arctan2_isnzero(np.NZERO, 1)
def test_positive_zero(self):
# atan2(y, +-0) returns +pi/2 for y > 0.
assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi)
def test_negative_zero(self):
# atan2(y, +-0) returns -pi/2 for y < 0.
assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi)
def test_any_ninf(self):
# atan2(+-y, -infinity) returns +-pi for finite y > 0.
assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
def test_any_pinf(self):
# atan2(+-y, +infinity) returns +-0 for finite y > 0.
assert_arctan2_ispzero(1, np.inf)
assert_arctan2_isnzero(-1, np.inf)
def test_inf_any(self):
# atan2(+-infinity, x) returns +-pi/2 for finite x.
assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi)
def test_inf_ninf(self):
# atan2(+-infinity, -infinity) returns +-3*pi/4.
assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi)
def test_inf_pinf(self):
# atan2(+-infinity, +infinity) returns +-pi/4.
assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi)
def test_nan_any(self):
# atan2(nan, x) returns nan for any x, including inf
assert_arctan2_isnan(np.nan, np.inf)
assert_arctan2_isnan(np.inf, np.nan)
assert_arctan2_isnan(np.nan, np.nan)
class TestLdexp(TestCase):
def _check_ldexp(self, tp):
assert_almost_equal(ncu.ldexp(np.array(2., np.float32),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.float64),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble),
np.array(3, tp)), 16.)
def test_ldexp(self):
# The default Python int type should work
assert_almost_equal(ncu.ldexp(2., 3), 16.)
# The following int types should all be accepted
self._check_ldexp(np.int8)
self._check_ldexp(np.int16)
self._check_ldexp(np.int32)
self._check_ldexp('i')
self._check_ldexp('l')
@dec.knownfailureif(sys.platform == 'win32' and sys.version_info < (2, 6),
"python.org < 2.6 binaries have broken ldexp in the "
"C runtime")
def test_ldexp_overflow(self):
# silence warning emitted on overflow
err = np.seterr(over="ignore")
try:
imax = np.iinfo(np.dtype('l')).max
imin = np.iinfo(np.dtype('l')).min
assert_equal(ncu.ldexp(2., imax), np.inf)
assert_equal(ncu.ldexp(2., imin), 0)
finally:
np.seterr(**err)
class TestMaximum(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.maximum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.maximum.reduce([1,2j]),1)
assert_equal(np.maximum.reduce([1+3j,2j]),1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.maximum(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([nan, nan, nan], dtype=np.complex)
assert_equal(np.maximum(arg1, arg2), out)
def test_object_array(self):
arg1 = np.arange(5, dtype=np.object)
arg2 = arg1 + 1
assert_equal(np.maximum(arg1, arg2), arg2)
class TestMinimum(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.minimum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.minimum.reduce([1,2j]),2j)
assert_equal(np.minimum.reduce([1+3j,2j]),2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.minimum(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([nan, nan, nan], dtype=np.complex)
assert_equal(np.minimum(arg1, arg2), out)
def test_object_array(self):
arg1 = np.arange(5, dtype=np.object)
arg2 = arg1 + 1
assert_equal(np.minimum(arg1, arg2), arg1)
class TestFmax(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmax.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 9)
assert_equal(func(tmp2), 9)
def test_reduce_complex(self):
assert_equal(np.fmax.reduce([1,2j]),1)
assert_equal(np.fmax.reduce([1+3j,2j]),1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmax(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmax(arg1, arg2), out)
class TestFmin(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmin.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 1)
assert_equal(func(tmp2), 1)
def test_reduce_complex(self):
assert_equal(np.fmin.reduce([1,2j]),2j)
assert_equal(np.fmin.reduce([1+3j,2j]),2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmin(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmin(arg1, arg2), out)
class TestFloatingPoint(TestCase):
def test_floating_point(self):
assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
class TestDegrees(TestCase):
def test_degrees(self):
assert_almost_equal(ncu.degrees(np.pi), 180.0)
assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)
class TestRadians(TestCase):
def test_radians(self):
assert_almost_equal(ncu.radians(180.0), np.pi)
assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)
class TestSign(TestCase):
def test_sign(self):
a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
out = np.zeros(a.shape)
tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0])
olderr = np.seterr(invalid='ignore')
try:
res = ncu.sign(a)
assert_equal(res, tgt)
res = ncu.sign(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
finally:
np.seterr(**olderr)
class TestSpecialMethods(TestCase):
def test_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = with_wrap()
r.arr = arr
r.context = context
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
func, args, i = x.context
self.assertTrue(func is ncu.minimum)
self.assertEqual(len(args), 2)
assert_equal(args[0], a)
assert_equal(args[1], a)
self.assertEqual(i, 0)
def test_wrap_with_iterable(self):
# test fix for bug #1026:
class with_wrap(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1).view(cls).copy()
def __array_wrap__(self, arr, context):
return arr.view(type(self))
a = with_wrap()
x = ncu.multiply(a, (1, 2, 3))
self.assertTrue(isinstance(x, with_wrap))
assert_array_equal(x, np.array((1, 2, 3)))
def test_priority_with_scalar(self):
# test fix for bug #826:
class A(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1.0, 'float64').view(cls).copy()
a = A()
x = np.float64(1)*a
self.assertTrue(isinstance(x, A))
assert_array_equal(x, np.array(1))
def test_old_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr):
r = with_wrap()
r.arr = arr
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
def test_priority(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = type(self)()
r.arr = arr
r.context = context
return r
class B(A):
__array_priority__ = 20.
class C(A):
__array_priority__ = 40.
x = np.zeros(1)
a = A()
b = B()
c = C()
f = ncu.minimum
self.assertTrue(type(f(x,x)) is np.ndarray)
self.assertTrue(type(f(x,a)) is A)
self.assertTrue(type(f(x,b)) is B)
self.assertTrue(type(f(x,c)) is C)
self.assertTrue(type(f(a,x)) is A)
self.assertTrue(type(f(b,x)) is B)
self.assertTrue(type(f(c,x)) is C)
self.assertTrue(type(f(a,a)) is A)
self.assertTrue(type(f(a,b)) is B)
self.assertTrue(type(f(b,a)) is B)
self.assertTrue(type(f(b,b)) is B)
self.assertTrue(type(f(b,c)) is C)
self.assertTrue(type(f(c,b)) is C)
self.assertTrue(type(f(c,c)) is C)
self.assertTrue(type(ncu.exp(a) is A))
self.assertTrue(type(ncu.exp(b) is B))
self.assertTrue(type(ncu.exp(c) is C))
def test_failing_wrap(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
raise RuntimeError
a = A()
self.assertRaises(RuntimeError, ncu.maximum, a, a)
def test_default_prepare(self):
class with_wrap(object):
__array_priority__ = 10
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
return arr
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x, np.zeros(1))
assert_equal(type(x), np.ndarray)
def test_prepare(self):
class with_prepare(np.ndarray):
__array_priority__ = 10
def __array_prepare__(self, arr, context):
# make sure we can return a new
return np.array(arr).view(type=with_prepare)
a = np.array(1).view(type=with_prepare)
x = np.add(a, a)
assert_equal(x, np.array(2))
assert_equal(type(x), with_prepare)
def test_failing_prepare(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_prepare__(self, arr, context=None):
raise RuntimeError
a = A()
self.assertRaises(RuntimeError, ncu.maximum, a, a)
def test_array_with_context(self):
class A(object):
def __array__(self, dtype=None, context=None):
func, args, i = context
self.func = func
self.args = args
self.i = i
return np.zeros(1)
class B(object):
def __array__(self, dtype=None):
return np.zeros(1, dtype)
class C(object):
def __array__(self):
return np.zeros(1)
a = A()
ncu.maximum(np.zeros(1), a)
self.assertTrue(a.func is ncu.maximum)
assert_equal(a.args[0], 0)
self.assertTrue(a.args[1] is a)
self.assertTrue(a.i == 1)
assert_equal(ncu.maximum(a, B()), 0)
assert_equal(ncu.maximum(a, C()), 0)
class TestChoose(TestCase):
def test_mixed(self):
c = np.array([True,True])
a = np.array([True,True])
assert_equal(np.choose(c, (a, 1)), np.array([1,1]))
def is_longdouble_finfo_bogus():
info = np.finfo(np.longcomplex)
return not np.isfinite(np.log10(info.tiny/info.eps))
class TestComplexFunctions(object):
funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh,
np.arctanh, np.sin, np.cos, np.tan, np.exp,
np.exp2, np.log, np.sqrt, np.log10, np.log2,
np.log1p]
def test_it(self):
for f in self.funcs:
if f is np.arccosh :
x = 1.5
else :
x = .5
fr = f(x)
fz = f(np.complex(x))
assert_almost_equal(fz.real, fr, err_msg='real part %s'%f)
assert_almost_equal(fz.imag, 0., err_msg='imag part %s'%f)
def test_precisions_consistent(self) :
z = 1 + 1j
for f in self.funcs :
fcf = f(np.csingle(z))
fcd = f(np.cdouble(z))
fcl = f(np.clongdouble(z))
assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s'%f)
assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s'%f)
def test_branch_cuts(self):
# check branch cuts and continuity on them
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1
yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1
yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1
# check against bogus branch cuts: assert continuity between quadrants
yield _check_branch_cut, np.arcsin, [-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arccos, [-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1
yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1 ], 1, 1
yield _check_branch_cut, np.arccosh, [-2j, 2j, 2], [1, 1, 1j], 1, 1
yield _check_branch_cut, np.arctanh, [-2j, 2j, 0], [1, 1, 1j], 1, 1
@dec.knownfailureif(True, "These branch cuts are known to fail")
def test_branch_cuts_failing(self):
# XXX: signed zero not OK with ICC on 64-bit platform for log, see
# http://permalink.gmane.org/gmane.comp.python.numeric.general/25335
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True
# XXX: signed zeros are not OK for sqrt or for the arc* functions
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1, True
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1, True
yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1, True
yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1, True
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1, True
def test_against_cmath(self):
import cmath, sys
# cmath.asinh is broken in some versions of Python, see
# http://bugs.python.org/issue1381
broken_cmath_asinh = False
if sys.version_info < (2,6):
broken_cmath_asinh = True
points = [-1-1j, -1+1j, +1-1j, +1+1j]
name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
atol = 4*np.finfo(np.complex).eps
for func in self.funcs:
fname = func.__name__.split('.')[-1]
cname = name_map.get(fname, fname)
try:
cfunc = getattr(cmath, cname)
except AttributeError:
continue
for p in points:
a = complex(func(np.complex_(p)))
b = cfunc(p)
if cname == 'asinh' and broken_cmath_asinh:
continue
assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s"%(fname,p,a,b))
def check_loss_of_precision(self, dtype):
"""Check loss of precision in complex arc* functions"""
# Check against known-good functions
info = np.finfo(dtype)
real_dtype = dtype(0.).real.dtype
eps = info.eps
def check(x, rtol):
x = x.astype(real_dtype)
z = x.astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arcsinh'))
z = (1j*x).astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1)
assert_( | np.all(d < rtol) | numpy.all |
import numpy as np
from numpy.testing import (run_module_suite, assert_equal, assert_almost_equal,
assert_)
from dipy.reconst.odf import (OdfFit, OdfModel, minmax_normalize, gfa)
from dipy.core.subdivide_octahedron import create_unit_hemisphere
from dipy.sims.voxel import multi_tensor, multi_tensor_odf
from dipy.data import get_sphere
from dipy.core.gradients import gradient_table, GradientTable
_sphere = create_unit_hemisphere(4)
_odf = (_sphere.vertices * [1, 2, 3]).sum(-1)
_gtab = GradientTable(np.ones((64, 3)))
class SimpleOdfModel(OdfModel):
sphere = _sphere
def fit(self, data):
fit = SimpleOdfFit(self, data)
return fit
class SimpleOdfFit(OdfFit):
def odf(self, sphere=None):
if sphere is None:
sphere = self.model.sphere
# Use ascontiguousarray to work around a bug in NumPy
return np.ascontiguousarray((sphere.vertices * [1, 2, 3]).sum(-1))
def test_OdfFit():
m = SimpleOdfModel(_gtab)
f = m.fit(None)
odf = f.odf(_sphere)
assert_equal(len(odf), len(_sphere.theta))
def test_minmax_normalize():
bvalue = 3000
S0 = 1
SNR = 100
sphere = get_sphere('symmetric362')
bvecs = np.concatenate(([[0, 0, 0]], sphere.vertices))
bvals = np.zeros(len(bvecs)) + bvalue
bvals[0] = 0
gtab = gradient_table(bvals, bvecs)
evals = np.array(([0.0017, 0.0003, 0.0003], [0.0017, 0.0003, 0.0003]))
multi_tensor(gtab, evals, S0, angles=[(0, 0), (90, 0)],
fractions=[50, 50], snr=SNR)
odf = multi_tensor_odf(sphere.vertices, evals, angles=[(0, 0), (90, 0)],
fractions=[50, 50])
odf2 = minmax_normalize(odf)
assert_equal(odf2.max(), 1)
assert_equal(odf2.min(), 0)
odf3 = np.empty(odf.shape)
odf3 = minmax_normalize(odf, odf3)
assert_equal(odf3.max(), 1)
assert_equal(odf3.min(), 0)
def test_gfa():
g = gfa(np.ones(100))
assert_equal(g, 0)
g = gfa(np.ones((2, 100)))
assert_equal(g, np.array([0, 0]))
# The following series follows the rule (sqrt(n-1)/((n-1)^2))
g = gfa(np.hstack([np.ones((9)), [0]]))
assert_almost_equal(g, np.sqrt(9./81))
g = gfa(np.hstack([np.ones((99)), [0]]))
assert_almost_equal(g, np.sqrt(99./(99.**2)))
# All-zeros returns a nan with no warning:
g = gfa(np.zeros(10))
assert_( | np.isnan(g) | numpy.isnan |
'''
Script integrating detumble with orbit/magnetic field knowledge
'''
# from detumble.py_funcs import detumble_B_cross,detumble_B_dot,get_B_dot, detumble_B_dot_bang_bang
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
import numpy as np
import scipy.integrate as integrate
from orbit_propagation import get_orbit_pos, get_B_field_at_point
# from GNC.cmake_build_debug import SGP4_cpp as SGP4
# from util_funcs.py_funcs.frame_conversions import eci2ecef
import time_functions_cpp as tfcpp
import frame_conversions_cpp as fccpp
import detumble_cpp as dcpp
import time
import euler_cpp as ecpp
# clear figures
plt.close('all')
pi = math.pi
#--------------------Values from FSW sim----------------------------------------
# # Seed Initial Position/Velocity with TLE - BEESAT-1
# # (optional) - can instead replace this with r_i, v_i as np.array(3)
# line1 = ('1 35933U 09051C 19315.45643387 .00000096 00000-0 32767-4 0 9991')
# line2 = ('2 35933 98.6009 127.6424 0006914 92.0098 268.1890 14.56411486538102')
#
# # Simulation Parameters
# tstart = datetime(2019, 12, 30, 00, 00, 00)
# tstep = .1 # [sec] - 1 Hz
#
# # Initial Spacecraft Attitude
# q_i = np.array([1, 0, 0, 0]) # quaternion
# w_i = np.array([.01, .05, -.03]) # radians/sec
#
# # Spacecraft Properties
# I = np.array([[17,0,0],[0,18,0],[0,0,22]])
# mass = 1.0 # kg
#--------------------End Values from FSW sim---------------------------------------
# inertia properties (add real later)
Ixx = 0.34375
Iyy = 0.34375
Izz = 0.34375
I = np.array([[Ixx, 0.0, 0.0],[0.0, Iyy, 0.0], [0.0, 0.0, Izz]])
max_dipoles = np.array([[8.8e-3], [1.373e-2], [8.2e-3]])
# initial attitude conditions, radians & rad/s
q_0 = np.array([[1.0],[0.0],[0.0],[0.0]]) # initial quaternion, scalar last
w_0 = np.array([[.01],[.05],[-.03]]) # initial rotation rate, rad/s
# initial state: quaternion, rotation rate
x_0 = np.squeeze(np.concatenate((q_0,w_0)))
# initial orbit state conditions, TLE+epoch
epoch = '2019-12-30T00:00:00.00'
line1 = ('1 35933U 09051C 19315.45643387 .00000096 00000-0 32767-4 0 9991')
line2 = ('2 35933 98.6009 127.6424 0006914 92.0098 268.1890 14.56411486538102')
TLE = {'line1': line1, 'line2': line2}
# initial orbit time (fair warning, this is the time for PyCubed, not Orsted)
MJD = 58847.0
GMST_0 = tfcpp.MJD2GMST(MJD)
mean_motion = 14.46/(24*3600)*2*math.pi # mean motion, radians/second
period = 2*pi/mean_motion # Period, seconds
# feed in a vector of times and plot orbit
t0 = 0.0
tf = 600
tstep = .1
times = | np.arange(t0,tf,tstep) | numpy.arange |
import unittest
from unittest.mock import patch
import numpy as np
from deepen import propagation as prop
class DeepenPropagationInitializeParamsTest(unittest.TestCase):
def setUp(self):
self.layer_dims = [2, 3, 1]
self.num_dims = len(self.layer_dims)
def test_returns_correct_number_of_params(self):
number_of_params = 2 * (self.num_dims - 1)
params = prop.initialize_params(self.layer_dims)
self.assertEqual(len(params), number_of_params)
def test_correct_shape_for_weights(self):
params = prop.initialize_params(self.layer_dims)
for l in range(1, self.num_dims):
with self.subTest(l = l):
self.assertEqual(
params['W' + str(l)].shape,
(self.layer_dims[l], self.layer_dims[l-1])
)
def test_correct_shape_for_biases(self):
params = prop.initialize_params(self.layer_dims)
for l in range(1, self.num_dims):
with self.subTest(l = l):
self.assertEqual(
params['b' + str(l)].shape,
(self.layer_dims[l], 1)
)
def test_weights_are_not_zero(self):
params = prop.initialize_params(self.layer_dims)
for l in range(1, self.num_dims):
with self.subTest(l = l):
self.assertTrue(params['W' + str(l)].all())
def test_biases_are_zero(self):
params = prop.initialize_params(self.layer_dims)
for l in range(1, self.num_dims):
with self.subTest(l = l):
self.assertFalse(params['b' + str(l)].any())
class DeepenPropagationLinearForwardTest(unittest.TestCase):
def setUp(self):
self.A = np.array([[1], [2]])
self.W = np.array([[1, 2], [3, 4], [5, 6]])
self.b = np.array([[1], [2], [3]])
self.params = (self.A, self.W, self.b)
# Z = W·A + b
self.Z_expected = np.array([[6], [13], [20]])
def test_Z_has_the_correct_shape(self):
Z, _ = prop.linear_forward(*self.params)
self.assertEqual(Z.shape, self.Z_expected.shape)
def test_Z_is_linear_combination_of_the_inputs(self):
Z, _ = prop.linear_forward(*self.params)
self.assertTrue(np.array_equal(Z, self.Z_expected))
def test_cache_contains_the_inputs(self):
_, cache = prop.linear_forward(*self.params)
subtests = zip(cache, self.params, ('A', 'W', 'b'))
for cached, param, description in subtests:
with self.subTest(parameter = description):
self.assertTrue(np.array_equal(cached, param))
class DeepenPropagationLayerForwardTest(unittest.TestCase):
def setUp(self):
self.A = np.array([[1], [2]])
self.W = np.array([[1, 2], [3, 4], [5, 6]])
self.b = np.array([[1], [2], [3]])
self.params = (self.A, self.W, self.b)
# Z = W·A + b
self.Z = np.array([[6], [13], [20]])
def test_A_has_the_correct_shape(self):
A, _ = prop.layer_forward(*self.params, 'relu')
self.assertTrue(A.shape == self.Z.shape)
def test_linear_cache_contains_the_inputs(self):
_, (linear_cache, _) = prop.layer_forward(*self.params, 'relu')
subtests = zip(linear_cache, self.params, ('A', 'W', 'b'))
for cached, param, description in subtests:
with self.subTest(parameter = description):
self.assertTrue(np.array_equal(cached, param))
def test_activation_cache_has_the_correct_shape(self):
_, (_, activation_cache) = prop.layer_forward(*self.params, 'relu')
self.assertTrue(activation_cache.shape == self.Z.shape)
def test_calls_relu_activation(self):
with unittest.mock.patch(
'deepen.propagation.relu',
wraps = prop.relu
) as relu_spy:
prop.layer_forward(*self.params, 'relu')
relu_spy.assert_called_once()
def test_calls_sigmoid(self):
with unittest.mock.patch(
'deepen.propagation.sigmoid',
wraps = prop.sigmoid
) as sigmoid_spy:
prop.layer_forward(*self.params, 'sigmoid')
sigmoid_spy.assert_called_once()
class DeepenPropagationModelForwardTest(unittest.TestCase):
def setUp(self):
self.X = | np.ones((2, 1)) | numpy.ones |
import math
import torch
import numpy as np
from sklearn.metrics import average_precision_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
def eval_one_epoch(hint, tgan, sampler, src, dst, ts, label, val_e_idx_l=None):
val_acc, val_ap, val_f1, val_auc, val_pr, val_rec = [], [], [], [], [], []
with torch.no_grad():
tgan = tgan.eval()
TEST_BATCH_SIZE = 30
num_test_instance = len(src)
num_test_batch = math.ceil(num_test_instance / TEST_BATCH_SIZE)
for k in range(num_test_batch):
# percent = 100 * k / num_test_batch
# if k % int(0.2 * num_test_batch) == 0:
# logger.info('{0} progress: {1:10.4f}'.format(hint, percent))
s_idx = k * TEST_BATCH_SIZE
e_idx = min(num_test_instance - 1, s_idx + TEST_BATCH_SIZE)
if s_idx == e_idx:
continue
src_l_cut = src[s_idx:e_idx]
dst_l_cut = dst[s_idx:e_idx]
ts_l_cut = ts[s_idx:e_idx]
e_l_cut = val_e_idx_l[s_idx:e_idx] if (val_e_idx_l is not None) else None
# label_l_cut = label[s_idx:e_idx]
size = len(src_l_cut)
src_l_fake, dst_l_fake = sampler.sample(size)
pos_prob, neg_prob = tgan.contrast(src_l_cut, dst_l_cut, dst_l_fake, ts_l_cut, e_l_cut, test=True)
pred_score = np.concatenate([(pos_prob).cpu().numpy(), (neg_prob).cpu().numpy()])
pred_label = pred_score > 0.5
true_label = np.concatenate([np.ones(size), np.zeros(size)])
val_acc.append((pred_label == true_label).mean())
val_ap.append(average_precision_score(true_label, pred_score))
val_f1.append(f1_score(true_label, pred_label))
val_auc.append(roc_auc_score(true_label, pred_score))
val_pr.append(precision_score(true_label, pred_label, zero_division=1))
val_rec.append(recall_score(true_label, pred_label))
return np.mean(val_acc), np.mean(val_ap), | np.mean(val_f1) | numpy.mean |
import numpy as np
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.metrics import mean_absolute_error, r2_score
from sklearn.model_selection import GridSearchCV, PredefinedSplit
from xgboost import XGBRegressor, XGBClassifier
from sparta.tomer.alpha_go.consts import MODE
import pandas as pd
import pdb
class ModelConstructor(object):
def __init__(self, year, X_train, y_train, X_test, y_test, train_data, model, validation_size, workers):
self.year = year
self._train_data = train_data.copy(deep=True)
self.X_train = X_train
self.y_train = np.array(y_train).reshape(-1, )
self.X_test = X_test
self.y_test = np.array(y_test).reshape(-1, )
self.model = model
self.predefined_split_sample_size = 12
self.feature_names = self._train_data.columns.to_list()[:-1]
self.best_params = None
self.validation_size = validation_size
self.workers = workers
self.features_display = X_test.columns.to_list()
@staticmethod
def rf_params():
rf_n_estimators = [int(x) for x in np.linspace(200, 1000, 5)]
rf_n_estimators.append(1500)
rf_n_estimators.append(2000)
rf_n_estimators = [int(x) for x in np.linspace(200, 1000, 3)]
# Maximum number of levels in tree
rf_max_depth = [int(x) for x in np.linspace(5, 55, 11)]
# Add the default as a possible value
rf_max_depth.append(None)
# rf_max_depth = [5, 10]
# Number of features to consider at every split
rf_max_features = ['auto', 'sqrt', 'log2']
# rf_max_features = ['log2']
# Criterion to split on
rf_criterion = ['mse', 'mae']
# rf_criterion = ['mae']
# Minimum number of samples required to split a node
rf_min_samples_split = [int(x) for x in np.linspace(2, 10, 9)]
# rf_min_samples_split = [2, 5, 7]
# Minimum decrease in impurity required for split to happen
rf_min_impurity_decrease = [0.0, 0.05, 0.1]
# rf_min_impurity_decrease = [0.05, 0.1]
# Method of selecting samples for training each tree
rf_bootstrap = [True, False]
# rf_bootstrap = [True]
return {'n_estimators': rf_n_estimators,
'max_depth': rf_max_depth,
'max_features': rf_max_features,
'criterion': rf_criterion,
'min_samples_split': rf_min_samples_split,
'min_impurity_decrease': rf_min_impurity_decrease,
'bootstrap': rf_bootstrap,
'random_state': [42]
}
@staticmethod
def get_rf_best_params():
return {'n_estimators': 300,
'max_depth': 6,
'max_features': 5,
'criterion': 'mae',
'bootstrap': True,
'random_state': 42
}
@staticmethod
def xgboost_params():
xgb_n_estimators = [int(x) for x in np.linspace(200, 2000, 20)]
# xgb_n_estimators = [200, 2000]
# Maximum number of levels in tree
xgb_max_depth = [int(x) for x in np.linspace(2, 20, 10)]
# xgb_max_depth = [2, 20, 10]
# Minimum number of instaces needed in each node
xgb_min_child_weight = [int(x) for x in np.linspace(1, 10, 10)]
# xgb_min_child_weight = [1, 10, 10]
# Learning rate
xgb_eta = [x for x in np.linspace(0.1, 0.6, 6)]
# xgb_eta = [0.1, 0.6]
# Minimum loss reduction required to make further partition
xgb_gamma = [int(x) for x in np.linspace(0, 0.5, 6)]
# xgb_gamma = [0.1, 0.5]
return {'n_estimators': xgb_n_estimators,
'max_depth': xgb_max_depth,
'min_child_weight': xgb_min_child_weight,
'learning_rate': xgb_eta,
'gamma': xgb_gamma,
'random_state': [42]
}
@staticmethod
def get_xgboost_best_params():
return {'n_estimators': 1000,
'max_depth': 2,
'learning_rate': 0.1,
'random_state': 42,
}
# def grid_and_train_model(self, grid_params):
# self.train_data.reset_index(inplace=True)
# self.train_data.sort_values('date', inplace=True)
# self.date_index = pd.to_datetime(self.train_data['date'].unique()).sort_values()
#
# min_training_periods = 12 * 2
# self.training_dates = self.date_index[min_training_periods::6]
#
# for i in range(min_training_periods, len(self.date_index)):
# self.test_date = self.date_index[i]
# self.train_dates = self.date_index[i - min_training_periods:i]
#
# self.valid_dates = self.train_dates[-self.predefined_split_sample_size:]
# self.train_dates = self.train_dates[:-self.predefined_split_sample_size]
# self.X_train = np.array(
# self.train_data.loc[self.train_data['date'].isin(self.train_dates), self.feature_names])
# self.y_train = np.array(
# self.train_data.loc[self.train_data['date'].isin(self.train_dates), 'pct_change'])
# self.X_valid = np.array(
# self.train_data.loc[self.train_data['date'].isin(self.valid_dates), self.feature_names])
# self.y_valid = np.array(
# self.train_data.loc[self.train_data['date'].isin(self.valid_dates), 'pct_change'])
# self.test_fold = list(np.ones(len(self.y_train) - len(self.y_valid))) + list(np.zeros(len(self.y_valid)))
#
# self.X_test = np.array(self.train_data.loc[self.train_data['date'] == self.test_date, self.feature_names])
# self.y_test = np.array(self.train_data.loc[self.train_data['date'] == self.test_date, 'pct_change'])
#
# cv = [[c for c in PredefinedSplit(self.test_fold).split()][0]]
#
# grid_search = GridSearchCV(estimator=self.model, param_grid=grid_params, refit=False,
# scoring='neg_mean_squared_error', cv=cv, n_jobs=-1, verbose=3).fit(self.X_train,
# self.y_train)
#
# self.model = grid_search.best_estimator_
# print(f'Model: {self.model} best params are: {grid_search.best_estimator_}')
#
# self.model.fit(self.X_train, self.y_train)
#
# pickle.dump(self.model, open(Path(LOCAL_PATH) / f'model_{self.test_date: %Y-%M-%d}.pkl', 'wb'))
# def grid_search(self, grid_params, validation_size):
#
# self.X_train.append(self.y_train)
# val_dates = train_dates[-validation_size:]
#
# n_test_obs = processed_data['date'].isin(train_dates).sum()
# n_valid_obs = processed_data['date'].isin(valid_dates).sum()
#
# test_fold_encoding = list(np.concatenate([np.ones(n_test_obs - n_valid_obs), np.zeros(n_valid_obs)]))
#
# cv = [[c for c in PredefinedSplit(test_fold=test_fold_encoding).split()][0]]
#
# grid_search = GridSearchCV(estimator=self.model, param_grid=grid_params, refit=False,
# scoring='neg_mean_squared_error', cv=cv, n_jobs=1).fit(self.X_train,
# self.y_train)
#
# print(f'Model: {self.model} best params are: {grid_search.best_estimator_}')
# return grid_search.best_estimator_
def _grid_and_train_model(self, grid_params):
self._train_data.reset_index(inplace=True)
self._train_data.sort_values('date', inplace=True)
self.train_dates = pd.to_datetime(self._train_data['date'].unique()).sort_values()
val_dates = self.train_dates[-self.validation_size:]
n_test_obs = self._train_data['date'].isin(self.train_dates).sum()
n_valid_obs = self._train_data['date'].isin(val_dates).sum()
test_fold_encoding = list(np.concatenate([np.ones(n_test_obs - n_valid_obs), | np.zeros(n_valid_obs) | numpy.zeros |
import numpy as np
import copy
import scipy
from scipy.stats import norm
from scipy import io,signal
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from weighted_median import *
def check_str_bool(s):
return s in ['True' ,'true', '1', 't', 'y','YES' ,'Yes','yes', 'yeah','Yeah', 'yup', 'certainly', 'uh-huh']
class vec_properties:
def __init__(self,source,ws,time_unit,time_scale_to_seconds,length_unit,length_scale_to_meter):
self.source = source
self.ws = ws
self.time_unit = time_unit
self.time_scale_to_seconds = time_scale_to_seconds
self.length_unit = length_unit
self.length_scale_to_meter = length_scale_to_meter
self.velocity_units = length_unit+'/'+time_unit
def show(self):
print(
'source: ',self.source,'\n',
'window size: ',self.ws,'\n',
'dt: ',self.time_scale_to_seconds,'\n',
'pixel to meter: ',self.length_scale_to_meter,'\n',
'velocity units: ',self.velocity_units,'\n')
class field_properties:
def __init__(self,frame,time,images_path,source,time_unit,time_scale_to_seconds,length_unit,length_scale_to_meter):
self.frame = frame
self.time = time
self.images_path = images_path
self.source = source
self.history = ''
self.time_unit = time_unit
self.time_scale_to_seconds = time_scale_to_seconds
self.length_unit = length_unit
self.length_scale_to_meter = length_scale_to_meter
self.velocity_units = length_unit+'/'+time_unit
def show(self):
print(
'frame: ',self.frame,'\n',
'absolute time: ',self.time,'\n',
'images_path: ',self.images_path,'\n',
'source: ',self.source,'\n',
'dt: ',self.time_scale_to_seconds,'\n',
'pixel to meter: ',self.length_scale_to_meter,'\n',
'length units: ',self.length_scale_to_meter,'\n',
'velocity units: ',self.velocity_units)
class run_properties:
pass
class vector:
def __init__(self,X,Y,U,V,S2N,properties):
self.X = X
self.Y = Y
self.U = U
self.V = V
self.S2N = S2N
self.properties = properties
def convert_units(self,output_length_unit,output_time_unit):
LS = {'mm':0.001, 'cm':0.01, 'm':1.0,'meter':1.0,'meters':1.0, 'km':1000.}
TS = {'ms':0.001, 's':1.0,'second':1.0,'seconds':1.0, 'min':60.,'mins':60.,'h':3600.,'hour':3600.,'hours':3600.}
LS[self.properties.length_unit]=float(self.properties.length_scale_to_meter)
TS[self.properties.time_unit]=float(self.properties.time_scale_to_seconds)
self.X = self.X*(LS[self.properties.length_unit]/LS[output_length_unit])
self.Y = self.Y*(LS[self.properties.length_unit]/LS[output_length_unit])
self.U = self.U*(LS[self.properties.length_unit]/LS[output_length_unit])*(TS[output_time_unit]/TS[self.properties.time_unit])
self.V = self.V*(LS[self.properties.length_unit]/LS[output_length_unit])*(TS[output_time_unit]/TS[self.properties.time_unit])
self.properties.length_unit = output_length_unit
self.properties.length_scale_to_meter = LS[output_length_unit]
self.properties.time_unit = output_time_unit
self.properties.time_scale_to_seconds = TS[output_time_unit]
self.properties.velocity_units = output_length_unit+'/'+output_time_unit
class field:
def __init__(self,field_properties):
self.data = {}
self.filtered = {}
self.properties = field_properties
def __add__(self,other):
check_list = []
check_list.append(self.properties.length_unit == other.properties.length_unit)
check_list.append(self.properties.length_scale_to_meter == other.properties.length_scale_to_meter)
check_list.append(self.properties.time_unit == other.properties.time_unit)
check_list.append(self.properties.time_scale_to_seconds == other.properties.time_scale_to_seconds)
check_list.append(self.properties.velocity_units == other.properties.velocity_units)
if all(check_list):
sum_properties = self.properties
sum_properties.source = 'Sum'
sum_properties.frame = self.properties.frame + ' & ' + other.properties.frame
sum_properties.time = self.properties.time + ' & ' + other.properties.time
sum_properties.images_path = self.properties.images_path + ' & ' + other.properties.images_path
sum_field = field(sum_properties)
for xy in list(self.data.keys()):
sum_field.add_vec(self.data[xy])
for xy in list(other.data.keys()):
sum_field.add_vec(other.data[xy])
return sum_field
else:
print( 'Field properties do not match')
def add_vec(self, vector):
self.data[vector.X,vector.Y] = vector
def check_if_grid_point_exists(self,x,y):
xy = list(self.data.keys())
return (x,y) in xy
def move_to_filtered(self,vector):
self.filtered[vector.X,vector.Y] = copy.deepcopy(vector)
vector.U = np.nan
vector.V = np.nan
vector.properties.source = 'filtered'
def transfer(self,other):
for xy in list(other.data.keys()):
self.add_vec(other.data[xy])
def convert_field_units(self,output_length_unit,output_time_unit):
XY = list(self.data.keys())
if self.properties.length_unit == None or self.properties.length_unit == '':
self.properties.length_unit = str(input('field length units'))
if self.properties.length_scale_to_meter== None or self.properties.length_scale_to_meter == '':
self.length_scale_to_meter = str(input('field length units to meters'))
if self.properties.time_unit == None or self.properties.time_unit == '':
self.properties.time_unit = str(input('field time units'))
if self.properties.time_scale_to_seconds== None or self.properties.time_scale_to_seconds == '':
self.properties.time_scale_to_seconds = str(input('field time units to seconds'))
for xy in XY:
self.data[xy].properties.length_unit = self.properties.length_unit
self.data[xy].properties.length_scale_to_meter = self.properties.length_scale_to_meter
self.data[xy].properties.time_unit = self.properties.time_unit
self.data[xy].properties.time_scale_to_seconds = self.properties.time_scale_to_seconds
self.data[xy].convert_units(output_length_unit,output_time_unit)
self.add_vec(self.data[xy])
self.remove_vec(xy[0],xy[1])
XY0 = list(self.data.keys())[0]
self.properties.length_unit = self.data[XY0].properties.length_unit
self.properties.length_scale_to_meter = self.data[XY0].properties.length_scale_to_meter
self.properties.time_unit = self.data[XY0].properties.time_unit
self.properties.time_scale_to_seconds = self.data[XY0].properties.time_scale_to_seconds
self.properties.velocity_units = self.data[XY0].properties.velocity_units
def remove_vec(self,X,Y,vector=None):
if vector is not None:
del self.data[vector.X,vector.Y]
else:
del self.data[X,Y]
def return_vel(self,x,y):
u = self.data[x,y].U
v = self.data[x,y].V
return u,v
def return_n_closest_neighbors(self,x,y,n=4):
X,Y = self.return_grid()
dist = np.sqrt((X-x)**2+(Y-y)**2)
n_closest_neighbors = [ [(X[ind],Y[ind]),dist[ind]] for ind in dist.argsort()[:n]]
return n_closest_neighbors
def return_closest_neighbors_radius(self,x,y,radius):
X,Y = self.return_grid()
dist = np.sqrt((X-x)**2+(Y-y)**2)
indecies = np.where(dist<radius)
closest_neighbors = [[(X[indecies[0][i]],Y[indecies[0][i]]),dist[indecies[0][i]]] for i in range(len(indecies[0]))]
return closest_neighbors
def return_grid(self):
XY = list(self.data.keys())
X,Y = zip(*XY)
X = np.array(X)
Y = np.array(Y)
return X,Y
def return_all_velocities(self):
XY = list(self.data.keys())
U = np.array([self.data[xy[0],xy[1]].U for xy in XY])
V = np.array([self.data[xy[0],xy[1]].V for xy in XY])
return U,V
def sub_average(self):
XY = list(self.data.keys())
umean,ustd,vmean,vstd = self.mean_velocity()
for i in range(len(XY)):
self.data[XY[i]].U = self.data[XY[i]].U - umean
self.data[XY[i]].V = self.data[XY[i]].V - vmean
def create_mesh_grid(self):
X,Y = self.return_grid()
U,V = self.return_all_velocities()
X_mesh_grid = sorted(list(set(X)))
Y_mesh_grid = sorted(list(set(Y)))
X_mesh_grid,Y_mesh_grid = np.meshgrid(X_mesh_grid,Y_mesh_grid)
U_mesh_grid = np.empty(X_mesh_grid.shape)
U_mesh_grid.fill(np.nan)
V_mesh_grid = np.empty(X_mesh_grid.shape)
V_mesh_grid.fill(np.nan)
for vec_ind in range(len(X)):
x = X[vec_ind]
y = Y[vec_ind]
col = np.array(np.where(X_mesh_grid[0,:]==x))[0,0]
row = np.array(np.where(Y_mesh_grid[:,0]==y))[0,0]
U_mesh_grid[row,col] = U[vec_ind]
V_mesh_grid[row,col] = V[vec_ind]
return X_mesh_grid,Y_mesh_grid[::-1],U_mesh_grid[::-1],V_mesh_grid[::-1]
def s2n_filter(self,threshold):
XY = list(self.data.keys())
for xy in XY:
if self.data[xy].S2N < threshold:
self.move_to_filtered(self.data[xy])
def hist_filter(self,percentage):
def TrueXor(*args):
return sum(args) == 1
hist_u,hist_v,hist2d = self.velocity_histogram()
#strech boundry edges
hist_u[1][0] = hist_u[1][0]-1
hist_u[1][-1] = hist_u[1][-1]+1
hist_v[1][0] = hist_v[1][0]-1
hist_v[1][-1] = hist_v[1][-1]+1
hist2d[1][0] = hist2d[1][0]-1
hist2d[1][-1] = hist2d[1][-1]+1
hist2d[2][0] = hist2d[2][0]-1
hist2d[2][-1] = hist2d[2][-1]+1
XY = list(self.data.keys())
number_of_vectors = len(XY)
for xy in XY:
u = self.data[xy].U
v = self.data[xy].V
if np.isfinite(u) and not np.isfinite(v):
if hist_u[0][np.digitize(u,hist_u[1])-1] / number_of_vectors > percentage/100:
u_iter,v_iter = self.inverse_distance_interpolation(xy[0],xy[1])
if np.isfinite(v_iter):
self.data[xy].V = v_iter
v = v_iter
else:
self.move_to_filtered(self.data[xy])
if np.isfinite(v) and not np.isfinite(u):
if hist_v[0][np.digitize(v,hist_v[1])-1] / number_of_vectors > percentage/100:
u_iter,v_iter = self.inverse_distance_interpolation(xy[0],xy[1])
if np.isfinite(u_iter):
self.data[xy].U = u_iter
u = u_iter
else:
self.move_to_filtered(self.data[xy])
if np.isfinite(v) and np.isfinite(u):
U_histpos = np.digitize(u,hist2d[1])-1
V_histpos = np.digitize(v,hist2d[2])-1
if hist2d[0][U_histpos,V_histpos] / number_of_vectors < percentage/100:
self.move_to_filtered(self.data[xy])
def Z_filter(self,threshold,neighbors=4,power=1):
XY = list(self.data.keys())
for xy in XY:
u = self.data[xy].U
v = self.data[xy].V
closest_neighbors = self.return_n_closest_neighbors(self.data[xy].X,self.data[xy].Y,neighbors+1)[1:]
neighbor_pos , dis = zip(*closest_neighbors)
weights = [(1/d)**power for d in dis]
U,V = zip(*[self.return_vel(pos[0],pos[1]) for pos in neighbor_pos])
median_U = weighted_median(U,weights)
median_V = weighted_median(V,weights)
median_absolute_deviation_U = weighted_median([np.abs(u_neighbor - median_U) for u_neighbor in U],weights)
median_absolute_deviation_V = weighted_median([np.abs(v_neighbor - median_V) for v_neighbor in V],weights)
if 0.6745*(u - median_U) / max(median_absolute_deviation_U,0.01) > threshold:
self.move_to_filtered(self.data[xy])
continue
if 0.6745*(v - median_V) / max(median_absolute_deviation_V,0.01) > threshold:
self.move_to_filtered(self.data[xy])
continue
def max_arg_filter(self,U_bound,V_bound):
XY = list(self.data.keys())
for xy in XY:
U_check = True
V_check = True
if self.data[xy].U > U_bound[1] or self.data[xy].U < U_bound[0]:
U_check=False
if self.data[xy].V > V_bound[1] or self.data[xy].V < V_bound[0]:
V_check=False
if U_check and not V_check:
u_itr,v_itr = self.inverse_distance_interpolation(xy[0],xy[1])
self.data[xy].V = v_itr
elif V_check and not U_check:
u_itr,v_itr = self.inverse_distance_interpolation(xy[0],xy[1])
self.data[xy].U = u_itr
elif not V_check and not U_check:
self.move_to_filtered(self.data[xy])
def mean_velocity(self):
U,V = self.return_all_velocities()
return np.nanmean(U),np.nanstd(U),np.nanmean(V),np.nanstd(V)
def velocity_histogram(self,bins=10):
def remove_nans(u,v):
u = list(u)
v = list(v)
nan_index=[]
for i in range(len(u)):
if not np.isfinite(u[i]) or not np.isfinite(v[i]):
nan_index.append(i)
for index in sorted(nan_index, reverse=True):
del u[index]
del v[index]
return np.array(u),np.array(v)
U,V = self.return_all_velocities()
hist_U = np.histogram(U[np.isfinite(U)],bins)
hist_V = np.histogram(V[np.isfinite(V)],bins)
U,V = remove_nans(U,V)
hist2d = np.histogram2d(U, V, bins)
return hist_U,hist_V,hist2d
def extract_area(self,x_boundry,y_boundry):
area = field(self.properties)
X,Y = self.return_grid()
for i in range(len(X)):
if x_boundry[0]<=X[i]<=x_boundry[1] and y_boundry[0]<=Y[i]<=y_boundry[1]:
area.add_vec(self.data[X[i],Y[i]])
return area
def vel_gradients(self):
X,Y,U,V = self.create_mesh_grid()
Udx,Udy = np.gradient(U)
Vdx,Vdy = np.gradient(V)
return X,Y,Udx,Udy,Vdx,Vdy
def vel_differntial(self):
def least_square_diff(field,grid,axis=0):
if axis==0:
shape = field.shape
dif = np.zeros(shape)
for row in range(shape[0]):
for col in range(2,shape[1]-2):
rs = 2*field[row,col+2]+field[row,col+1]
ls = -field[row,col-1]-2*field[row,col-2]
dis = 10*(grid[row,col+1]-grid[row,col])
dif[row,col] = (rs+ls)/dis
#dif[row,col] = (2*field[row,col+2]+field[row,col+1]-field[row,col-1]-2*field[row,col-2])/10*(grid[row,col+1]-grid[row,col])
return dif
elif axis==1:
shape = field.shape
dif = np.zeros(shape)
for row in range(2,shape[0]-2):
for col in range(shape[1]):
us = 2*field[row-2,col]+field[row-1,col]
ds = -field[row+1,col]-2*field[row+2,col]
dis = 10*(grid[row-1,col]-grid[row,col])
dif[row,col] = (us+ds)/dis
#dif[row,col] = (2*field[row-2,col]+field[row-1,col]-field[row+1,col]-2*field[row+2,col])/10*(grid[row-1,col]-grid[row,col])
return dif
X,Y,U,V = self.create_mesh_grid()
dU_x = least_square_diff(U,X)
dU_y = least_square_diff(U,Y,axis=1)
dV_x = least_square_diff(V,X)
dV_y = least_square_diff(V,Y,axis=1)
return dU_x,dU_y,dV_x,dV_y
def profile(self,axis='y'):
X,Y,U,V = self.create_mesh_grid()
if axis=='y' or axis=='Y':
U_profile = np.nanmean(U,axis=1)[::-1]
V_profile = np.nanmean(V,axis=1)[::-1]
Y_profile = Y[:,0]
return U_profile,V_profile,Y_profile
else:
U_profile = np.nanmean(U,axis=0)[::-1]
V_profile = np.nanmean(V,axis=0)[::-1]
X_profile = X[0,:]
return U_profile,V_profile,X_profile
def vorticity_field(self):
dU_x,dU_y,dV_x,dV_y = self.vel_differntial()
vort = dV_x-dU_y
return vort[2:-2,2:-2]
def inverse_distance_interpolation(self,x,y,number_of_neighbors=5,radius=None,inverse_power=2):
def weigted_velocity(neighbors_vels,weights):
weight_sum=0
weigted_vels=[]
for i in range(len(neighbors_vels)):
if not np.isnan(neighbors_vels[i]):
weight_sum += weights[i]
weigted_vels.append(weights[i]*neighbors_vels[i])
return np.nansum(weigted_vels)/weight_sum
if self.check_if_grid_point_exists(x,y):
if radius is not None:
indecies,distances = zip(*self.return_closest_neighbors_radius(x,y,radius))
else:
indecies,distances = zip(*self.return_n_closest_neighbors(x,y,n=number_of_neighbors+1))
weights = list(np.array(distances[1:])**-float(inverse_power))
neighbors_vel = [self.return_vel(ind[0],ind[1]) for ind in indecies[1:]]
u_vels,v_vels = zip(*neighbors_vel)
inter_u = weigted_velocity(u_vels,weights)
inter_v = weigted_velocity(v_vels,weights)
return inter_u,inter_v
else:
if radius is not None:
indecies,distances = zip(*self.return_closest_neighbors_radius(x,y,radius))
else:
indecies,distances = zip(*self.return_n_closest_neighbors(x,y,n=number_of_neighbors))
weights = list(np.array(distances)**-float(inverse_power))
neighbors_vel = [self.return_vel(ind[0],ind[1]) for ind in indecies]
u_vels,v_vels = zip(*neighbors_vel)
inter_u = weigted_velocity(u_vels,weights)
inter_v = weigted_velocity(v_vels,weights)
return inter_u,inter_v
def interpf(self):
X,Y = self.return_grid()
for ind in range(X.shape[0]):
pos = (X[ind],Y[ind])
u_cur,v_cur = self.return_vel(pos[0],pos[1])
if np.isnan(u_cur) and np.isnan(v_cur):
u_iter,v_iter = self.inverse_distance_interpolation(pos[0],pos[1])
vec = self.data[pos]
vec.U = u_iter
vec.V = v_iter
vec.properties.source = 'Interpolation'
elif np.isnan(u_cur):
u_iter,v_iter = self.inverse_distance_interpolation(pos[0],pos[1])
vec = self.data[pos]
vec.U = u_iter
vec.properties.source = 'Interpolation'
elif np.isnan(v_cur):
u_iter,v_iter = self.inverse_distance_interpolation(pos[0],pos[1])
vec = self.data[pos]
vec.V = v_iter
vec.properties.source = 'Interpolation'
def remap(self,X,Y,shape_of_new_grid=None):
new_feild = field(self.properties)
Xold,Yold = self.return_grid()
if shape_of_new_grid==None:
X = X.flatten()
Y = Y.flatten()
else:
X,Y = np.meshgrid(np.linspace(Xold.min(),Xold.max(),shape_of_new_grid[1]),np.linspace(Yold.min(),Yold.max(),shape_of_new_grid[0]))
X = X.flatten()
Y = Y.flatten()
vec_properties = self.data[Xold[0],Yold[0]].properties
vec_properties.source = 'Interpolation'
for ind in range(len(X)):
u,v = self.inverse_distance_interpolation(X[ind],Y[ind])
vec = vector(X[ind],Y[ind],u,v,0,vec_properties)
new_feild.add_vec(vec)
self.filtered = self.data
self.data = {}
self.transfer(new_feild)
def auto_spatial_correlation(self):
X,Y,U,V = self.create_mesh_grid()
Uc = scipy.signal.convolve2d(U,U[::-1])
Vc = scipy.signal.convolve2d(V,V[::-1])
Uc = Uc - Uc.min()
Vc = Vc - Vc.min()
s_cor = np.sqrt(Uc**2+Vc**2)
dX = X - np.mean(X[0,:])
dY = Y - np.mean(Y[:,0])
return dX,dY,s_cor
class run:
def __init__(self):
self.fields = {}
def add_field(self,field):
self.fields[field.properties.frame] = field
def frames(self):
return list(self.fields.keys())
def remove_field(self,frame,field=None):
if field is not None:
del self.fields[field.properties.frame]
else:
del self.fields[frame]
def remap_run(self,X,Y,shape_of_new_grid=None):
frames = self.frames()
for frame in frames:
self.fields[frame].remap(X,Y,shape_of_new_grid)
def convert_run_units(self,output_length_unit,output_time_unit,run_length_unit=None,run_length_scale_to_meter=None,run_time_unit=None,run_time_scale_to_seconds=None):
same_prop = check_str_bool(input('Do all frames in run have the same properties?'))
if same_prop:
''' After correcting the properties of run use this:
if self.properties.length_unit == None or self.properties.length_unit == '':
self.properties.length_unit = str(input('run length units: '))
if self.properties.length_scale_to_meter== None or self.properties.length_scale_to_meter == '':
self.properties.length_scale_to_meter = str(input('run length units to meters: '))
if self.properties.time_unit == None or self.properties.time_unit == '':
self.properties.time_unit = str(input('run time units: '))
if self.properties.time_scale_to_seconds== None or self.properties.time_scale_to_seconds == '':
self.properties.time_scale_to_seconds = str(input('run time units to seconds: '))
'''
if run_length_unit is None:
self.length_unit = str(input('run length units: '))
else:
self.length_unit = run_length_unit
if run_length_scale_to_meter is None:
self.length_scale_to_meter = str(input('run length units to meters: '))
else:
self.length_scale_to_meter = run_length_scale_to_meter
if run_time_unit is None:
self.time_unit = str(input('run time units: '))
else:
self.time_unit = run_time_unit
if run_time_scale_to_seconds is None:
self.time_scale_to_seconds = str(input('run time units to seconds: '))
else:
self.time_scale_to_seconds = run_time_scale_to_seconds
frames = self.frames()
for frame in frames:
if same_prop:
self.fields[frame].properties.length_unit = self.length_unit
self.fields[frame].properties.length_scale_to_meter = self.length_scale_to_meter
self.fields[frame].properties.time_unit = self.time_unit
self.fields[frame].properties.time_scale_to_seconds = self.time_scale_to_seconds
self.fields[frame].convert_field_units(output_length_unit,output_time_unit)
def check_same_grid_run(self):
frames = self.frames()
base_frame = frames[0]
for frame in frames:
X_base,Y_base = self.fields[base_frame].return_grid()
X_check,Y_check = self.fields[frame].return_grid()
if all(X_base == X_check) and all(Y_base == Y_check):
base_frame = frame
else:
return False
return True
def gp_exists_all_frames(self,x,y,show_missing_frames=False):
frames = self.frames()
gp_exists = [self.fields[f].check_if_grid_point_exists(x,y) for f in frames]
if all(gp_exists):
return True
else:
no_gp_frames = [x for x, y in zip(frames, gp_exists) if y == False]
frames_with_gp = [x for x, y in zip(frames, gp_exists) if y == True]
#allows checking of misssing grid point frames
if show_missing_frames:
print('Frames without the requested grid point ','(',x,',',y,')',': ',no_gp_frames)
return frames_with_gp
def run_grid(self):
frames = self.frames()
Y_agp = []
X_agp =[]
for frame in frames:
X,Y = self.fields[frame].return_grid()
Y_agp += Y.tolist()
Y_agp = sorted(list(set(Y_agp)))
X_agp += X.tolist()
X_agp = sorted(list(set(X_agp)))
return np.meshgrid(np.array(X_agp),np.array(Y_agp))
def grid_point_velocity(self,x,y,frames=None):
if frames==None:
frames = self.frames()
if self.gp_exists_all_frames(x,y):
U = []
V = []
for f in frames:
u,v = self.fields[f].return_vel(x,y)
U.append(u)
V.append(v)
U = np.array(U)
V = np.array(V)
return U,V
else:
U = []
V = []
for f in frames:
u,v = self.fields[f].return_vel(x,y)
U.append(u)
V.append(v)
U = np.array(U)
V = np.array(V)
return U,V
def return_field(self,number_of_field,name_of_frame=None):
if name_of_frame is not None:
return self.fields[name_of_frame]
else:
return self.fields[self.frames()[number_of_field]]
def mean_gp_velocity(self,x,y):
for_all_frames = self.gp_exists_all_frames(x,y)
if for_all_frames==True:
U,V = self.grid_point_velocity(x,y)
U_rms = U - np.nanmean(U)
V_rms = V - np.nanmean(V)
return np.nanmean(U),U_rms,np.nanmean(V),V_rms
else:
U,V = self.grid_point_velocity(x,y,for_all_frames)
U_rms = U - np.nanmean(U)
V_rms = V - np.nanmean(V)
return np.nanmean(U),U_rms,np.nanmean(V),V_rms
def mean_velocity_properties(self):
frames = self.frames()
U_mean = []
V_mean = []
for f in frames:
u_mean,u_std,v_mean,v_std = self.fields[f].mean_velocity()
U_mean.append(u_mean)
V_mean.append(v_mean)
Um = np.mean(U_mean)
Vm = np.mean(V_mean)
U_rms = [(np.sqrt((u-Um)**2)) for u in U_mean]
V_rms = [(np.sqrt((v-Vm)**2)) for v in V_mean]
print('Max in mean U velocity accures in frame: ',frames[U_mean.index(max(U_mean))])
print('Max in mean V velocity accures in frame: ',frames[V_mean.index(max(V_mean))])
U_mean = np.array(U_mean)
V_mean = np.array(V_mean)
U_rms = np.array(U_rms)
V_rms = np.array(V_rms)
return U_mean,U_rms,V_mean,V_rms
def run_mean_velocities(self):
if self.check_same_grid_run():
X,Y = self.run_grid()
frames = self.frames()
shape = (X.shape[0],X.shape[1],len(frames))
U_mean = np.zeros(shape)
V_mean = np.zeros(shape)
for ind in range(len(frames)):
x,y,u,v = self.fields[frames[ind]].create_mesh_grid()
U_mean[:,:,ind] = u[::-1]
V_mean[:,:,ind] = v[::-1]
return np.nanmean(U_mean,axis=2),np.nanmean(V_mean,axis=2)
else:
X,Y = self.run_grid()
U_mean = np.zeros(X.shape)
V_mean = np.zeros(Y.shape)
for row in range(X.shape[0]):
for col in range(X.shape[1]):
u,urms,v,vrms = self.mean_gp_velocity(X[row,col],Y[row,col])
U_mean[row,col] = u
V_mean[row,col] = v
return U_mean,V_mean
def run_reynolds_stress(self,direction='xy'):
X,Y = self.run_grid()
rstress = np.zeros(X.shape)
for row in range(X.shape[0]):
for col in range(X.shape[1]):
#check equation - do you need to multiply by density??
u,urms,v,vrms = self.mean_gp_velocity(X[row,col],Y[row,col])
if direction=='xy' or direction=='yx':
rstress[row,col] = np.nanmean(np.multiply(urms,vrms))
elif direction=='xx':
rstress[row,col] = np.nanmean(np.multiply(urms,urms))
elif direction=='yy':
rstress[row,col] = np.nanmean(np.multiply(vrms,vrms))
return rstress
def frame_reynolds_stress(self,frame,direction='xy'):
X,Y,U,V = self.fields[frame].create_mesh_grid()
rstress = np.zeros(X.shape)
for row in range(X.shape[0]):
for col in range(X.shape[1]):
#check equation - do you need to multiply by density??
u,urms,v,vrms = self.mean_gp_velocity(X[row,col],Y[row,col])
if direction=='xy' or direction=='yx':
rstress[row,col] = (U[row,col] - u)*(V[row,col] - v)
elif direction=='xx':
rstress[row,col] = (U[row,col] - u)**2
elif direction=='yy':
rstress[row,col] = (V[row,col] - v)**2
return rstress
def mean_profile(self,axis='y'):
frames = self.frames()
if axis=='y' or axis=='Y':
Y_agp = []
for frame in frames:
X,Y = self.fields[frame].return_grid()
Y_agp += Y.tolist()
Y_agp = sorted(list(set(Y_agp)))
U_profiles = np.empty((len(Y_agp),len(frames)))
U_profiles.fill(np.nan)
V_profiles = np.empty((len(Y_agp),len(frames)))
V_profiles.fill(np.nan)
Y_agp = np.array(Y_agp)
for col_ind,frame in list(enumerate(frames)):
U_cur_prof,V_cur_prof,Y_cur_prof = self.fields[frame].profile(axis=axis)
for i in range(len(Y_cur_prof)):
row_ind = np.where(Y_agp==Y_cur_prof[i])[0][0]
U_profiles[row_ind,col_ind] = U_cur_prof[i]
V_profiles[row_ind,col_ind] = V_cur_prof[i]
U_mean_profile = np.nanmean(U_profiles,axis=1)[::-1]
U_number_of_vectors = np.sum(np.invert(np.isnan(U_profiles)),1)
V_mean_profile = np.nanmean(V_profiles,axis=1)[::-1]
V_number_of_vectors = np.sum(np.invert( | np.isnan(V_profiles) | numpy.isnan |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from hypothesis import assume
from hypothesis import given
from hypothesis.extra.numpy import arrays
from hypothesis.strategies import integers
from hypothesis.strategies import sampled_from
from hypothesis.strategies import text
from nata.axes import Axis
from nata.axes import GridAxis
from nata.types import AxisType
from nata.types import GridAxisType
from .strategies import anyarray
from .strategies import bounded_intergers
from .strategies import number
def test_Axis_type_check():
assert isinstance(Axis, AxisType)
@given(anyarray())
def test_Axis_default_init(arr):
axis = Axis(arr)
assert axis.name == "unnamed"
assert axis.label == ""
assert axis.unit == ""
assert axis.shape == (
arr.shape[1:] if arr.ndim != 0 and len(arr) == 1 else arr.shape
)
if arr.ndim != 0 and len(arr) == 1:
np.testing.assert_array_equal(axis, arr[0, ...])
else:
np.testing.assert_array_equal(axis, arr)
@given(base=anyarray(), new=anyarray())
def test_Axis_data_change_broadcastable(base, new):
try:
expected = | np.broadcast_to(new, base.shape) | numpy.broadcast_to |
# -*- coding: utf-8 -*-
from __future__ import print_function
from distutils.version import LooseVersion
from numpy import nan, random
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, Timestamp,
date_range)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData, _check_mixed_float
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
class TestDataFrameMissingData(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_dropEmptyRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
original = Series(mat, index=self.frame.index, name='foo')
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna(how='all')
# check that original was preserved
assert_series_equal(frame['foo'], original)
inplace_frame1.dropna(how='all', inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame1['foo'], expected)
smaller_frame = frame.dropna(how='all', subset=['foo'])
inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame2['foo'], expected)
def test_dropIncompleteRows(self):
N = len(self.frame.index)
mat = | random.randn(N) | numpy.random.randn |
from utils import load_data
import torch
import argparse
import numpy as np
import torch.optim as optim
import random
import pickle
import torch.nn as nn
import sklearn
from modularity import greedy_modularity_communities, partition, baseline_spectral, make_modularity_matrix
from utils import make_normalized_adj, edge_dropout, negative_sample
from models import GCNLink, GCNClusterNet, GCNDeep, GCNDeepSigmoid, GCN, GCNClusterGAT,GINClusterNet
from loss_functions import loss_kcenter, loss_modularity
import copy
from kcenter import CenterObjective, make_all_dists, gonzalez_kcenter, greedy_kcenter, make_dists_igraph, rounding
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=True,
help='Disables CUDA training.')
parser.add_argument('--seed', type=int, default=24, help='Random seed.')
parser.add_argument('--lr', type=float, default=0.01,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=50,
help='Number of hidden units.')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--embed_dim', type=int, default=50,
help='Dimensionality of node embeddings')
parser.add_argument('--K', type=int, default=5,
help='How many partitions')
parser.add_argument('--negsamplerate', type=int, default=1,
help='How many negative examples to include per positive in link prediction training')
parser.add_argument('--edge_dropout', type=float, default=0.2,
help='Rate at which to remove edges in link prediction training')
parser.add_argument('--objective', type=str, default='kcenter',
help='What objective to optimize (currently partitioning or modularity')
parser.add_argument('--dataset', type=str, default='synthetic_spa',
help='which network to load')
parser.add_argument('--clustertemp', type=float, default=20,
help='how hard to make the softmax for the cluster assignments')
parser.add_argument('--kcentertemp', type=float, default=100,
help='how hard to make seed selection softmax assignment')
parser.add_argument('--kcentermintemp', type=float, default=0,
help='how hard to make the min over nodes in kcenter training objective')
parser.add_argument('--use_igraph', action='store_true', default=True, help='use igraph to compute shortest paths in twostage kcenter')
parser.add_argument('--train_iters', type=int, default=1000,
help='number of training iterations')
parser.add_argument('--num_cluster_iter', type=int, default=1,
help='number of iterations for clustering')
parser.add_argument('--singletrain', action='store_true', default=False, help='only train on a single instance')
parser.add_argument('--pure_opt', action='store_true', default=False, help='do only optimization, no link prediction needed')
parser.add_argument('--graph_embedding', type=str, default='GCN',
help='embedding layer GCN,GAT or GIN')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
if 'synthetic_spa' not in args.dataset:
directory = args.dataset
else:
directory = 'synthetic_spa'
# Load data
reload_data = True
pure_optimization = args.pure_opt
train_pct = 0.4
if 'synthetic' in args.dataset:
num_graphs = 60
numtest = 30
else: #pubmed dataset
num_graphs = 20
numtest = 8
if reload_data:
bin_adj_all = []
adj_all = []
#features = []
adj_train = []
bin_adj_train = []
features_train = []
features_all = []
dist_all = []
dist_train = []
for i in range(num_graphs):
adj_i, features_i, labels_i, idx_train, idx_val, idx_test = load_data('data/{}/'.format(directory), '{}_{}'.format(args.dataset, i))
bin_adj_i = (adj_i.to_dense() > 0).float()
bin_adj_all.append(bin_adj_i)
adj_all.append(adj_i.coalesce())
features_all.append(features_i)
adj_train_i, features_train_i, labels_train_i, idx_train, idx_val, idx_test = load_data('data/{}/'.format(directory), '{0}_{1}_train_{2:.2f}'.format(args.dataset, i, train_pct))
bin_adj_train_i = (adj_train_i.to_dense() > 0).float()
bin_adj_train.append(bin_adj_train_i)
adj_train.append(adj_train_i.coalesce())
features_train.append(features_train_i)
vals = {}
algs = ['ClusterNet', 'ClusterNet-ft', 'ClusterNet-ft-only', 'GCN-e2e', 'GCN-e2e-ft', 'GCN-e2e-ft-only']
if args.objective == 'modularity':
ts_algos = ['agglomerative', 'recursive', 'spectral']
elif args.objective == 'kcenter':
ts_algos = ['gonzalez', 'greedy']
for algo in ts_algos:
algs.append('train-' + algo)
algs.append('ts-' + algo)
algs.append('ts-ft-' + algo)
algs.append('ts-ft-only-' + algo)
for algo in algs:
vals[algo] = np.zeros(numtest)
aucs_algs = ['ts', 'ts-ft', 'ts-ft-only']
aucs = {}
for algo in aucs_algs:
aucs[algo] = np.zeros(numtest)
if args.objective == 'modularity':
mods_test = [make_modularity_matrix(A) for A in bin_adj_all]
mods_train = [make_modularity_matrix(A) for A in bin_adj_train]
test_object = mods_test
train_object = mods_train
loss_fn = loss_modularity
elif args.objective == 'kcenter':
for i in range(num_graphs):
try:
dist_all.append(torch.load('{}_{}_test_dist.pt'.format(args.dataset, i)))
dist_train.append(torch.load('{}_{}_{:.2f}_train_dist.pt'.format(args.dataset, i, train_pct)))
diameter = dist_all[-1].max()
except:
dist_all_i = make_all_dists(bin_adj_all[i], 100)
diameter = dist_all_i[dist_all_i < 100].max()
dist_all_i[dist_all_i == 100] = diameter
torch.save(dist_all_i, '{}_{}_test_dist.pt'.format(args.dataset, i))
dist_all.append(dist_all_i)
dist_train_i = make_all_dists(bin_adj_train[i], 100)
dist_train_i[dist_train_i == 100] = diameter
torch.save(dist_train_i, '{}_{}_{:.2f}_train_dist.pt'.format(args.dataset, i, train_pct))
dist_train.append(dist_train_i)
obj_train = [CenterObjective(d, diameter, args.kcentermintemp) for d in dist_train]
obj_train_hardmax = [CenterObjective(d, diameter, args.kcentermintemp, hardmax=True) for d in dist_train]
obj_test = [CenterObjective(d, diameter, args.kcentertemp, hardmax=True) for d in dist_all]
obj_test_softmax = [CenterObjective(d, diameter, args.kcentermintemp) for d in dist_all]
test_object = obj_test
train_object = obj_train
loss_fn = loss_kcenter
if pure_optimization:
train_object = test_object
adj_train = adj_all
bin_adj_train = bin_adj_all
dist_train = dist_all
for test_idx in range(1):
if 'pubmed' in args.dataset:
valid_instances = list(range(10, 12))
test_instances= list(range(12, 20))
if 'synthetic' in args.dataset:
test_instances = list(range(20, 50))
valid_instances = list(range(50, 60))
if not args.singletrain:
train_instances = [x for x in range(num_graphs) if x not in test_instances and x not in valid_instances]
else:
train_instances = [0]
nfeat = features_all[0].shape[1]
K = args.K
# Model and optimizer
model_ts = GCNLink(nfeat=nfeat,
nhid=args.hidden,
nout=args.embed_dim,
dropout=args.dropout)
if args.graph_embedding=='GAT':
model_cluster = GCNClusterGAT(nfeat=nfeat,
nhid=args.hidden,
nout=args.embed_dim,
dropout=args.dropout,
K = args.K,
cluster_temp = args.clustertemp)
if args.graph_embedding == 'GCN':
model_cluster = GCNClusterNet(nfeat=nfeat,
nhid=args.hidden,
nout=args.embed_dim,
dropout=args.dropout,
K = args.K,
cluster_temp = args.clustertemp)
if args.graph_embedding == 'GIN':
model_cluster = GINClusterNet(nfeat=nfeat,
nhid=args.hidden,
nout=args.embed_dim,
dropout=args.dropout,
K = args.K,
cluster_temp = args.clustertemp)
#keep a couple of initializations here so that the random seeding lines up
#with results reported in the paper -- removing these is essentially equivalent to
#changing the seed
_ = GCN(nfeat, args.hidden, args.embed_dim, args.dropout)
_ = nn.Parameter(torch.rand(K, args.embed_dim))
#
if args.objective == 'modularity':
model_gcn = GCNDeep(nfeat=nfeat,
nhid=args.hidden,
nout=args.K,
dropout=args.dropout,
nlayers=2)
elif args.objective == 'kcenter':
model_gcn = GCNDeepSigmoid(nfeat=nfeat,
nhid=args.hidden,
nout=1,
dropout=args.dropout,
nlayers=2)
optimizer = optim.Adam(model_cluster.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
losses = []
losses_test = []
num_cluster_iter = args.num_cluster_iter
def get_average_loss(model, adj, bin_adj, bin_adj_for_loss, objectives, instances, features, num_reps = 10, hardmax = False, update = False, algoname = None):
if hardmax:
model.eval()
loss = 0
for _ in range(num_reps):
for idx, i in enumerate(instances):
mu, r, embeds, dist = model(features[i], adj[i], num_cluster_iter)
if hardmax:
r = torch.softmax(100*r, dim=1)
this_loss = loss_fn(mu, r, embeds, dist, bin_adj_for_loss[i], objectives[i], args)
loss += this_loss
if update:
vals[algoname][test_instances.index(i)] = this_loss.item()
if hardmax:
model.train()
return loss/(len(instances)*num_reps)
def get_kcenter_test_loss(model, adj, bin_adj, train_objectives, test_objectives, instances, features, num_reps = 10, hardmax = False, update = False, algoname = None):
loss = 0
for idx, i in enumerate(instances):
best_loss = 100
x_best = None
for _ in range(num_reps):
mu, r, embeds, dist = model(features[i], adj[i], num_cluster_iter)
x = torch.softmax(dist*args.kcentertemp, 0).sum(dim=1)
x = 2*(torch.sigmoid(4*x) - 0.5)
if x.sum() > args.K:
x = args.K*x/x.sum()
train_loss = loss_fn(mu, r, embeds, dist, bin_adj[i], train_objectives[i], args)
if train_loss.item() < best_loss:
best_loss = train_loss.item()
x_best = x
testvals = []; trainvals = []
for _ in range(50):
y = rounding(x_best)
testvals.append(test_objectives[i](y).item())
trainvals.append(train_objectives[i](y).item())
loss += testvals[np.argmin(trainvals)]
if update:
vals[algoname][test_instances.index(i)] = testvals[np.argmin(trainvals)]
return loss/(len(instances))
#Decision-focused training
if True:
for t in range(args.train_iters):
i = np.random.choice(train_instances)
mu, r, embeds, dist = model_cluster(features_train[i], adj_train[i], num_cluster_iter)
if args.objective == 'modularity':
loss = loss_fn(mu, r, embeds, dist, bin_adj_all[i], test_object[i], args)
else:
loss = loss_fn(mu, r, embeds, dist, bin_adj_all[i], obj_test_softmax[i], args)
if args.objective != 'kcenter':
loss = -loss
optimizer.zero_grad()
loss.backward()
if t % 100 == 0 and t != 0:
num_cluster_iter = 5
if t % 10 == 0:
if args.objective == 'modularity':
r = torch.softmax(100*r, dim=1)
loss_train = get_average_loss(model_cluster, adj_train, bin_adj_train, bin_adj_all, test_object, train_instances, features_train, hardmax=True)
loss_test = get_average_loss(model_cluster, adj_train, bin_adj_train, bin_adj_all, test_object, test_instances, features_train, hardmax=True)
loss_valid = get_average_loss(model_cluster, adj_train, bin_adj_train, bin_adj_all, test_object, valid_instances, features_train, hardmax=True)
losses_test.append(loss_test.item())
print(t, loss_train.item(), loss_test.item(), loss_valid.item())
losses.append(loss.item())
optimizer.step()
if args.objective == 'kcenter':
loss_round = get_kcenter_test_loss(model_cluster, adj_train, bin_adj_train, train_object, test_object, test_instances, features_train, update = True, algoname = 'ClusterNet')
elif args.objective == 'modularity':
loss_test = get_average_loss(model_cluster, adj_train, bin_adj_train, bin_adj_all, test_object, test_instances, features_train, hardmax=True, update = True, algoname = 'ClusterNet')
print('after training', np.mean(vals['ClusterNet'][:numtest]), np.std(vals['ClusterNet']))
if args.singletrain:
pickle.dump((vals, aucs), open('results_distributional_singletrain_{}_{}_{}.pickle'.format(args.dataset, args.objective, args.K), 'wb'))
break
def fine_tune(model, features, adj, bin_adj, objective, num_training_iters = 1000):
optimizer = optim.Adam(model.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
for t in range(num_training_iters):
mu, r, embeds, dist = model(features, adj, num_cluster_iter)
loss = loss_fn(mu, r, embeds, dist, bin_adj, objective, args)
if args.objective != 'kcenter':
loss = -loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
num_cluster_iter = 1
loss_finetune = 0
loss_round = 0
for i in test_instances:
model_i = copy.deepcopy(model_cluster)
fine_tune(model_i, features_train[i], adj_train[i], bin_adj_train[i], train_object[i], num_training_iters = 50)
loss_finetune += get_average_loss(model_i, adj_train, bin_adj_train, bin_adj_all, test_object, [i], features_train, hardmax=True, update = True, algoname = 'ClusterNet-ft').item()
if args.objective == 'kcenter':
loss_round += get_kcenter_test_loss(model_i, adj_train, bin_adj_train, train_object, test_object, [i], features_train, update = True, algoname = 'ClusterNet-ft')
print('finetune', np.mean(vals['ClusterNet-ft']), np.std(vals['ClusterNet-ft']))
loss_finetune = 0
loss_round = 0
for i in test_instances:
model_i = GCNClusterNet(nfeat=nfeat,
nhid=args.hidden,
nout=args.embed_dim,
dropout=args.dropout,
K = args.K,
cluster_temp = args.clustertemp)
fine_tune(model_i, features_train[i], adj_train[i], bin_adj_train[i], train_object[i], num_training_iters = 500)
loss_finetune += get_average_loss(model_i, adj_train, bin_adj_train, bin_adj_all, test_object, [i], features_train, hardmax=True, update = True, algoname = 'ClusterNet-ft-only').item()
if args.objective == 'kcenter':
loss_round += get_kcenter_test_loss(model_i, adj_train, bin_adj_train, train_object, test_object, [i], features_train, update = True, algoname = 'ClusterNet-ft-only')
print('finetune only', np.mean(vals['ClusterNet-ft-only']), np.std(vals['ClusterNet-ft-only']))
#Train a two-stage model for link prediction with cross-entropy loss and
#negative sampling
def train_twostage(model_ts, train_instances, test_instances, features, algoname):
optimizer_ts = optim.Adam(model_ts.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
edges = {}
edges_eval = {}
labels_eval = {}
for i in train_instances + test_instances:
edges[i] = adj_train[i].indices().t()
edges_eval_i, labels_eval_i = negative_sample(adj_all[i].indices().t(), 1, bin_adj_all[i])
edges_eval[i] = edges_eval_i
labels_eval[i] = labels_eval_i
def get_evaluation(instances):
test_ce = 0
test_auc = 0
for i in instances:
preds_test_eval = model_ts(features[i], adj_train[i], edges_eval[i])
test_ce += torch.nn.BCEWithLogitsLoss()(preds_test_eval, labels_eval[i])
test_auc_i = sklearn.metrics.roc_auc_score(labels_eval[i].long().detach().numpy(), nn.Sigmoid()(preds_test_eval).detach().numpy())
aucs[algoname][test_instances.index(i)] = test_auc
test_auc += test_auc_i
return test_ce/len(instances), test_auc/len(instances)
for t in range(150):
i = np.random.choice(train_instances)
adj_input = make_normalized_adj(edge_dropout(edges[i], args.edge_dropout), bin_adj_train[i].shape[0])
edges_eval_i, labels_i = negative_sample(edges[i], args.negsamplerate, bin_adj_train[i])
preds = model_ts(features[i], adj_input, edges_eval_i)
loss = torch.nn.BCEWithLogitsLoss()(preds, labels_i)
optimizer_ts.zero_grad()
loss.backward()
if t % 10 == 0:
test_ce, test_auc = get_evaluation(test_instances)
print(t, loss.item(), test_ce.item(), test_auc)
optimizer_ts.step()
def test_twostage(model_ts, test_instances_eval, algoname):
for test_i in test_instances_eval:
#predict probability that all unobserved edges exist
n = adj_train[test_i].shape[0]
indices = torch.tensor(np.arange(n))
to_pred = torch.zeros(n**2, 2)
to_pred[:, 1] = indices.repeat(n)
for i in range(n):
to_pred[i*n:(i+1)*n, 0] = i
to_pred = to_pred.long()
preds = model_ts(features_train[test_i], adj_train[test_i], to_pred)
preds = nn.Sigmoid()(preds).view(n, n)
preds = bin_adj_train[test_i] + (1 - bin_adj_train[test_i])*preds
if args.objective == 'modularity':
r = greedy_modularity_communities(preds, K)
loss = loss_fn(None, r, None, None, bin_adj_all[test_i], test_object[test_i], args).item()
vals[algoname + '-agglomerative'][test_instances.index(test_i)] = loss
r = partition(preds, K)
loss = loss_fn(None, r, None, None, bin_adj_all[test_i], test_object[test_i], args).item()
vals[algoname + '-recursive'][test_instances.index(test_i)] = loss
degrees = preds.sum(dim=1)
preds = torch.diag(1./degrees)@preds
mod_pred = make_modularity_matrix(preds)
r = baseline_spectral(mod_pred, K)
loss = loss_fn(None, r, None, None, bin_adj_all[test_i], test_object[test_i], args).item()
vals[algoname + '-spectral'][test_instances.index(test_i)] = loss
elif args.objective == 'kcenter':
print('making dists')
if args.use_igraph:
dist_ts = make_dists_igraph(preds)
else:
dist_ts = make_all_dists(preds, 100)
diameter = dist_ts[dist_ts < 100].max()
dist_ts[dist_ts == 100] = diameter
print(test_i)
dist_ts = dist_ts.float()
diameter = dist_ts.max()
x = gonzalez_kcenter(dist_ts, K)
loss = obj_test[test_i](x)
vals[algoname + '-gonzalez'][test_instances.index(test_i)] = loss.item()
x = greedy_kcenter(dist_ts, diameter, K)
loss = obj_test[test_i](x)
vals[algoname + '-greedy'][test_instances.index(test_i)] = loss.item()
if True:
print('two stage')
#do pretrained two stage
train_twostage(model_ts, train_instances, test_instances, features_train, 'ts')
test_twostage(model_ts, test_instances, 'ts')
for algo in algs:
if 'ts' in algo and 'ft' not in algo:
print(algo, np.mean(vals[algo]), np.std(vals[algo]))
# do finetuning
loss_agglom_ft = 0; loss_recursive_ft = 0; loss_spectral_ft = 0
loss_greedy_ft = 0; loss_gonzalez_ft = 0
for i in test_instances:
model_i = copy.deepcopy(model_ts)
train_twostage(model_i, [i], [i], features_train, 'ts-ft')
test_twostage(model_ts, [i], 'ts-ft')
for algo in algs:
if 'ts-ft' in algo and 'only' not in algo:
print(algo, np.mean(vals[algo]), np.std(vals[algo]))
#do only finetuning
loss_agglom_ft_only = 0; loss_recursive_ft_only = 0; loss_spectral_ft_only = 0
loss_greedy_ft_only = 0; loss_gonzalez_ft_only = 0
for i in test_instances:
model_i = GCNLink(nfeat=nfeat,
nhid=args.hidden,
nout=args.embed_dim,
dropout=args.dropout)
train_twostage(model_i, [i], [i], features_train, 'ts-ft-only')
test_twostage(model_ts, [i], 'ts-ft-only')
for algo in algs:
if 'ts-ft-only' in algo:
print(algo, np.mean(vals[algo]), np.std(vals[algo]))
if True:
def get_average_loss(model, adj, bin_adj, bin_adj_for_loss, objectives, instances, features, num_reps = 1, hardmax = False, update = False, algoname = None):
loss = 0
for _ in range(num_reps):
for i in instances:
if args.objective == 'modularity':
r = model(features[i], adj[i])
r = torch.softmax(r, dim = 1)
if hardmax:
r = torch.softmax(100*r, dim=1)
this_loss = -loss_fn(None, r, None, None, bin_adj_for_loss[i], objectives[i], args)
elif args.objective == 'kcenter':
x = model(features[i], adj[i])
if x.sum() > K:
x = K*x/x.sum()
this_loss = objectives[i](x)
loss += this_loss
if update:
vals[algoname][test_instances.index(i)] = this_loss.item()
return loss/(len(instances)*num_reps)
def get_kcenter_test_loss(model, adj, bin_adj, train_objectives, test_objectives, instances, features, num_reps = 10, hardmax = False, update = False, algoname = None):
loss = 0
for i in instances:
best_loss = 100
x_best = None
for _ in range(num_reps):
x = model(features[i], adj[i])
if x.sum() > args.K:
x = args.K*x/x.sum()
train_loss = train_objectives[i](x)
if train_loss.item() < best_loss:
best_loss = train_loss.item()
x_best = x
testvals = []; trainvals = []
for _ in range(50):
y = rounding(x_best)
testvals.append(test_objectives[i](y).item())
trainvals.append(train_objectives[i](y).item())
loss += testvals[np.argmin(trainvals)]
if update:
vals[algoname][test_instances.index(i)] = testvals[np.argmin(trainvals)]
return loss/(len(instances))
print('just GCN')
optimizer_gcn = optim.Adam(model_gcn.parameters(), lr = args.lr,
weight_decay = args.weight_decay)
def train_gcn_model(model, train_instances, num_iters = 1000, verbose=False):
for t in range(num_iters):
i = random.choice(train_instances)
loss = get_average_loss(model_gcn, adj_train, bin_adj_train, bin_adj_train, train_object, [i], features_train)
optimizer.zero_grad()
loss.backward()
losses.append(loss.item())
optimizer.step()
if t % 100 == 0 and verbose:
loss_train = get_average_loss(model_gcn, adj_all, bin_adj_all, bin_adj_all, test_object, train_instances, features_all)
loss_test = get_average_loss(model_gcn, adj_train, bin_adj_train, bin_adj_all, test_object, test_instances, features_train, hardmax=True)
losses_test.append(loss_test.item())
print(t, loss.item(), loss_train.item(), loss_test.item())
train_gcn_model(model_gcn, train_instances, num_iters = 1000, verbose = True)
if args.objective == 'kcenter':
loss_round = get_kcenter_test_loss(model_gcn, adj_train, bin_adj_train, train_object, test_object, test_instances, features_train , update = True, algoname = 'GCN-e2e')
elif args.objective == 'modularity':
loss_gcne2e = get_average_loss(model_gcn, adj_train, bin_adj_train, bin_adj_all, test_object, test_instances, features_train, hardmax=True, update = True, algoname = 'GCN-e2e').item()
print('GCN-e2e', np.mean(vals['GCN-e2e']), np.std(vals['GCN-e2e']))
#################
#GCN FINETUNE
#################
loss_finetune = 0
loss_round = 0
for i in test_instances:
model_i = copy.deepcopy(model_gcn)
train_gcn_model(model_i, [i], num_iters = 500)
loss_finetune += get_average_loss(model_i, adj_train, bin_adj_train, bin_adj_all, test_object, [i], features_train, hardmax=True, update = True, algoname = 'GCN-e2e-ft').item()
if args.objective == 'kcenter':
loss_round += get_kcenter_test_loss(model_i, adj_train, bin_adj_train, train_object, test_object, [i], features_train, update = True, algoname = 'GCN-e2e-ft')
print('GCN-e2e-ft', | np.mean(vals['GCN-e2e-ft']) | numpy.mean |
import numpy as np
from base_test import ArkoudaTest
from context import arkouda as ak
from arkouda.dtypes import npstr
"""
Encapsulates unit tests for the numeric module with the exception
of the where method, which is in the where_test module
"""
class NumericTest(ArkoudaTest):
def testSeededRNG(self):
N = 100
seed = 8675309
numericdtypes = [ak.int64, ak.float64, ak.bool, ak.uint64]
for dt in numericdtypes:
# Make sure unseeded runs differ
a = ak.randint(0, 2**32, N, dtype=dt)
b = ak.randint(0, 2**32, N, dtype=dt)
self.assertFalse((a == b).all())
# Make sure seeded results are same
a = ak.randint(0, 2**32, N, dtype=dt, seed=seed)
b = ak.randint(0, 2**32, N, dtype=dt, seed=seed)
self.assertTrue((a == b).all())
# Uniform
self.assertFalse((ak.uniform(N) == ak.uniform(N)).all())
self.assertTrue((ak.uniform(N, seed=seed) == ak.uniform(N, seed=seed)).all())
# Standard Normal
self.assertFalse((ak.standard_normal(N) == ak.standard_normal(N)).all())
self.assertTrue((ak.standard_normal(N, seed=seed) == ak.standard_normal(N, seed=seed)).all())
# Strings (uniformly distributed length)
self.assertFalse(
(ak.random_strings_uniform(1, 10, N) == ak.random_strings_uniform(1, 10, N)).all()
)
self.assertTrue(
(
ak.random_strings_uniform(1, 10, N, seed=seed)
== ak.random_strings_uniform(1, 10, N, seed=seed)
).all()
)
# Strings (log-normally distributed length)
self.assertFalse(
(ak.random_strings_lognormal(2, 1, N) == ak.random_strings_lognormal(2, 1, N)).all()
)
self.assertTrue(
(
ak.random_strings_lognormal(2, 1, N, seed=seed)
== ak.random_strings_lognormal(2, 1, N, seed=seed)
).all()
)
def testCast(self):
N = 100
arrays = {
ak.int64: ak.randint(-(2**48), 2**48, N),
ak.float64: ak.randint(0, 1, N, dtype=ak.float64),
ak.bool: ak.randint(0, 2, N, dtype=ak.bool),
}
roundtripable = set(
(
(ak.bool, ak.bool),
(ak.int64, ak.int64),
(ak.int64, ak.float64),
(ak.int64, npstr),
(ak.float64, ak.float64),
(ak.float64, npstr),
(ak.uint8, ak.int64),
(ak.uint8, ak.float64),
(ak.uint8, npstr),
)
)
for t1, orig in arrays.items():
for t2 in ak.DTypes:
t2 = ak.dtype(t2)
other = ak.cast(orig, t2)
self.assertEqual(orig.size, other.size)
if (t1, t2) in roundtripable:
roundtrip = ak.cast(other, t1)
self.assertTrue(
(orig == roundtrip).all(), f"{t1}: {orig[:5]}, {t2}: {roundtrip[:5]}"
)
self.assertTrue((ak.array([1, 2, 3, 4, 5]) == ak.cast(ak.linspace(1, 5, 5), dt=ak.int64)).all())
self.assertEqual(ak.cast(ak.arange(0, 5), dt=ak.float64).dtype, ak.float64)
self.assertTrue(
(
ak.array([False, True, True, True, True]) == ak.cast(ak.linspace(0, 4, 5), dt=ak.bool)
).all()
)
def testStrCastErrors(self):
intNAN = -(2**63)
intstr = ak.array(["1", "2 ", "3?", "!4", " 5", "-45", "0b101", "0x30", "N/A"])
intans = np.array([1, 2, intNAN, intNAN, 5, -45, 0b101, 0x30, intNAN])
uintNAN = 0
uintstr = ak.array(["1", "2 ", "3?", "-4", " 5", "45", "0b101", "0x30", "N/A"])
uintans = np.array([1, 2, uintNAN, uintNAN, 5, 45, 0b101, 0x30, uintNAN])
floatstr = ak.array(["1.1", "2.2 ", "3?.3", "4.!4", " 5.5", "6.6e-6", "78.91E+4", "6", "N/A"])
floatans = np.array([1.1, 2.2, np.nan, np.nan, 5.5, 6.6e-6, 78.91e4, 6.0, np.nan])
boolstr = ak.array(
["True", "False ", "Neither", "N/A", " True", "true", "false", "TRUE", "NOTTRUE"]
)
boolans = np.array([True, False, False, False, True, True, False, True, False])
validans = ak.array([True, True, False, False, True, True, True, True, False])
for dt, arg, ans in [
(ak.int64, intstr, intans),
(ak.uint64, uintstr, uintans),
(ak.float64, floatstr, floatans),
(ak.bool, boolstr, boolans),
]:
with self.assertRaises(RuntimeError):
ak.cast(arg, dt, errors=ak.ErrorMode.strict)
res = ak.cast(arg, dt, errors=ak.ErrorMode.ignore)
self.assertTrue(np.allclose(ans, res.to_ndarray(), equal_nan=True))
res, valid = ak.cast(arg, dt, errors=ak.ErrorMode.return_validity)
self.assertTrue((valid == validans).all())
self.assertTrue(np.allclose(ans, res.to_ndarray(), equal_nan=True))
def testHistogram(self):
pda = ak.randint(10, 30, 40)
bins, result = ak.histogram(pda, bins=20)
self.assertIsInstance(result, ak.pdarray)
self.assertEqual(20, len(bins))
self.assertEqual(20, len(result))
self.assertEqual(int, result.dtype)
with self.assertRaises(TypeError):
ak.histogram([range(0,10)], bins=1)
with self.assertRaises(TypeError):
ak.histogram(pda, bins='1')
with self.assertRaises(TypeError):
ak.histogram([range(0,10)], bins='1')
def testLog(self):
na = np.linspace(1, 10, 10)
pda = ak.array(na)
self.assertTrue((np.log(na) == ak.log(pda).to_ndarray()).all())
with self.assertRaises(TypeError):
ak.log([range(0,10)])
def testExp(self):
na = np.linspace(1, 10, 10)
pda = ak.array(na)
self.assertTrue((np.exp(na) == ak.exp(pda).to_ndarray()).all())
with self.assertRaises(TypeError):
ak.exp([range(0,10)])
def testAbs(self):
na = np.linspace(1, 10, 10)
pda = ak.array(na)
self.assertTrue((np.abs(na) == ak.abs(pda).to_ndarray()).all())
self.assertTrue((ak.arange(5, 1, -1) == ak.abs(ak.arange(-5, -1))).all())
self.assertTrue((ak.array([5, 4, 3, 2, 1]) == ak.abs(ak.linspace(-5, -1, 5))).all())
with self.assertRaises(TypeError):
ak.abs([range(0, 10)])
def testCumSum(self):
na = np.linspace(1, 10, 10)
pda = ak.array(na)
self.assertTrue((np.cumsum(na) == ak.cumsum(pda).to_ndarray()).all())
# Test uint case
na = np.linspace(1, 10, 10, "uint64")
pda = ak.cast(pda, ak.uint64)
self.assertTrue((np.cumsum(na) == ak.cumsum(pda).to_ndarray()).all())
with self.assertRaises(TypeError):
ak.cumsum([range(0, 10)])
def testCumProd(self):
na = | np.linspace(1, 10, 10) | numpy.linspace |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 9 13:22:31 2021
Model Simulation & Grid Interpolation
@authors: <NAME> & <NAME>
"""
import numpy as np
import sys
from scipy.stats import norm
from scipy.stats import uniform
import scipy.special as sc
import mpmath
import scipy.integrate as si
import scipy.interpolate as interp
import scipy.optimize as optim
from scipy.stats import genextreme
## integration.cpp
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Import C++ function library
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
##
## i.e., RW_marginal, pRW_me_interp, find_xrange_pRW_me
##
import os, ctypes
# g++ -std=c++11 -shared -fPIC -o p_integrand.so p_integrand.cpp
lib = ctypes.CDLL(os.path.abspath('./nonstat_model_noXs_global/p_integrand.so'))
i_and_o_type = np.ctypeslib.ndpointer(ndim=1, dtype=np.float64)
grid_type = np.ctypeslib.ndpointer(ndim=1, dtype=np.float64)
bool_type = np.ctypeslib.ndpointer(ndim=1, dtype='bool')
lib.pRW_me_interp_C.restype = ctypes.c_int
lib.pRW_me_interp_C.argtypes = (i_and_o_type, grid_type, grid_type,
ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_int, ctypes.c_int,
i_and_o_type)
lib.RW_marginal_C.restype = ctypes.c_int
lib.RW_marginal_C.argtypes = (i_and_o_type,
ctypes.c_double, ctypes.c_double, ctypes.c_int,
i_and_o_type)
lib.RW_me_2_unifs.restype = ctypes.c_int
lib.RW_me_2_unifs.argtypes = (i_and_o_type, grid_type, grid_type, ctypes.c_double, i_and_o_type, ctypes.c_double,
ctypes.c_int, ctypes.c_int, ctypes.c_int, i_and_o_type)
lib.find_xrange_pRW_me_C.restype = ctypes.c_int
lib.find_xrange_pRW_me_C.argtypes = (ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_double,
grid_type, grid_type, ctypes.c_double, ctypes.c_double, ctypes.c_double,
ctypes.c_int, i_and_o_type)
lib.pchip.restype = ctypes.c_int
lib.pchip.argtypes = (grid_type, grid_type, grid_type, grid_type, ctypes.c_int, ctypes.c_int)
lib.qRW_me_interp.restype = ctypes.c_int
lib.qRW_me_interp.argtypes = (i_and_o_type, grid_type, grid_type, ctypes.c_double, ctypes.c_double, ctypes.c_double,
ctypes.c_int, ctypes.c_int, i_and_o_type,
grid_type, grid_type, ctypes.c_int, ctypes.c_double, ctypes.c_double)
lib.RW_density_C.restype = ctypes.c_int
lib.RW_density_C.argtypes = (i_and_o_type,
ctypes.c_double, ctypes.c_double, ctypes.c_int,
i_and_o_type)
lib.dRW_me_interp_C.restype = ctypes.c_int
lib.dRW_me_interp_C.argtypes = (i_and_o_type, grid_type, grid_type,
ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_int, ctypes.c_int,
i_and_o_type)
lib.density_interp_grid.restype = ctypes.c_int
lib.density_interp_grid.argtypes = (grid_type, i_and_o_type,
ctypes.c_double, ctypes.c_int, ctypes.c_int,
i_and_o_type, i_and_o_type)
lib.dgev_C.restype = ctypes.c_double
lib.dgev_C.argtypes = (ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_bool)
lib.dnorm_C.restype = ctypes.c_double
lib.dnorm_C.argtypes = (ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_bool)
lib.marg_transform_data_mixture_me_likelihood_C.restype = ctypes.c_double
lib.marg_transform_data_mixture_me_likelihood_C.argtypes = (i_and_o_type, i_and_o_type, i_and_o_type,
bool_type, bool_type, i_and_o_type, i_and_o_type, i_and_o_type,
ctypes.c_double, i_and_o_type, ctypes.c_double,
grid_type, grid_type, ctypes.c_int, ctypes.c_int)
lib.marg_transform_data_mixture_me_likelihood_F.restype = ctypes.c_double
lib.marg_transform_data_mixture_me_likelihood_F.argtypes = (i_and_o_type, i_and_o_type, i_and_o_type,
bool_type, bool_type, i_and_o_type, i_and_o_type, i_and_o_type,
ctypes.c_double, ctypes.c_double, ctypes.c_double,
grid_type, grid_type, ctypes.c_int, ctypes.c_int)
lib.marg_transform_data_mixture_me_likelihood_global.restype = ctypes.c_double
lib.marg_transform_data_mixture_me_likelihood_global.argtypes = (i_and_o_type, i_and_o_type, i_and_o_type,
bool_type, bool_type, i_and_o_type, i_and_o_type, i_and_o_type,
ctypes.c_double, i_and_o_type, ctypes.c_double,
grid_type, grid_type, ctypes.c_int, ctypes.c_int, ctypes.c_int)
lib.Thresh_X_try.restype = ctypes.c_int
lib.Thresh_X_try.argtypes = (i_and_o_type, grid_type, grid_type, ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_double,
ctypes.c_double, ctypes.c_double,
ctypes.c_int, ctypes.c_int, i_and_o_type, i_and_o_type)
lib.X_update.restype = ctypes.c_int
lib.X_update.argtypes = (i_and_o_type, grid_type, grid_type, i_and_o_type, ctypes.c_double, ctypes.c_double,
ctypes.c_double, ctypes.c_double,
ctypes.c_int, ctypes.c_int, ctypes.c_int, i_and_o_type)
lib.unifs_2_RW_me.restype = ctypes.c_int
lib.unifs_2_RW_me.argtypes = (i_and_o_type, grid_type, grid_type, ctypes.c_double, i_and_o_type, ctypes.c_double,
ctypes.c_double, ctypes.c_double,
ctypes.c_int, ctypes.c_int, ctypes.c_int, i_and_o_type)
lib.print_Vec.restype = ctypes.c_double
lib.print_Vec.argtypes = (i_and_o_type, ctypes.c_int, ctypes.c_int)
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Generate Levy random samples
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
##
## i.e., Stable variables with alpha=1/2
##
def rlevy(n, m = 0, s = 1):
if np.any(s < 0):
sys.exit("s must be positive")
return s/norm.ppf(uniform.rvs(0,1,n)/2)**2 + m
## The density for R^phi in which R is levy distributed
def dR_power_phi(x, phi, m=0, s=1, log=False):
x_phi = x**(1/phi)
if np.any(x_phi <= m):
sys.exit("some x**phi <= m")
if np.any(s <= 0):
sys.exit("s must be positive")
tmp = np.sum(np.log(s/(2 * np.pi))/2 - 3 * np.log(x_phi - m)/2 - s/(2 * (x_phi -
m)) + (1/phi-1)*np.log(x)-np.log(phi))
if np.invert(log):
tmp = np.exp(tmp)
return tmp
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Calculate unregularized upper incomplete gamma function
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
##
## The negative a values are allowed
##
def gammaincc_unregulized(a,x):
if(isinstance(x, (int, np.int64, float))): x=np.array([x])
if x.any()<0: sys.exit("x must be positive")
if a>0:
return sc.gamma(a)*sc.gammaincc(a,x)
elif a<0:
return gammaincc_unregulized(a+1,x)/a-(x**a)*np.exp(-x)/a
else:
return mpmath.gammainc(0,x)
## Compare with mpmath.gammainc
## gammaincc_unregulized is more efficient
# import time
#
# start_time = time.time()
# gammaincc_unregulized(-3.62,5)
# time.time() - start_time
# start_time = time.time()
# mpmath.gammainc(-3.62,5)
# time.time() - start_time
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Calculate the exact marginal survival function for R^phi*W
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
##
##
def RW_marginal_uni(x,phi,gamma,survival = True):
tmp1 = gamma/(2*(x**(1/phi)))
tmp2 = (gamma/2)**phi/sc.gamma(0.5)
res = sc.gammainc(0.5,tmp1) + tmp2*gammaincc_unregulized(0.5-phi,tmp1)/x
if survival:
return res
else:
return 1-res
RW_marginal = np.vectorize(RW_marginal_uni)
def RW_marginal_asymp(x,phi,gamma):
if phi<0.5:
moment = ((2*gamma)**phi)*sc.gamma(1-2*phi)/sc.gamma(1-phi)
return moment/x
elif phi>0.5:
return np.sqrt(2*gamma/np.pi)*(x**(-1/(2*phi)))/(1-1/(2*phi))
else:
return np.sqrt(2*gamma/np.pi)*np.log(x)/x
def RW_quantile_asymp(p,phi,gamma):
if phi<0.5:
moment = ((2*gamma)**phi)*sc.gamma(1-2*phi)/sc.gamma(1-phi)
return moment/(1-p)
elif phi>0.5:
return (np.sqrt(2*gamma/np.pi)/(1-1/(2*phi))/(1-p))**(2*phi)
else:
tmp = (1-p)/np.sqrt(2*gamma/np.pi)
return tmp/sc.lambertw(tmp)
# # Compare the exact and asymptotic CDF
# gamma = 1.2; x =10; phi=0.3
# import matplotlib.pyplot as plt
# axes = plt.gca()
# axes.set_ylim([0,0.125])
# X_vals = np.linspace(100,1500,num=200)
# P_vals = RW_marginal(X_vals,phi,gamma)
# P_asym = RW_marginal_asymp(X_vals,phi,gamma)
# plt.plot(X_vals, P_vals, 'b')
# plt.plot(X_vals, P_asym, 'r',linestyle='--');
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Calculate the marginal survival function for R^phi*W + epsilon
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
##
# ---------------- 1. Define integrand in Python: exact form ---------------- #
def mix_distn_integrand(t, xval, phi, tmp1, tmp2, tau_sqd):
diff = xval - t
tmp3 = tmp1/(diff**(1/phi))
res = sc.gammainc(0.5,tmp3) + tmp2*gammaincc_unregulized(0.5-phi,tmp3)/diff
result = res * np.exp(-t**2/(2*tau_sqd))
return result
def pRW_me_uni(xval, phi, gamma, tau_sqd):
tmp1 = gamma/2
tmp2 = ((gamma/2)**phi)/sc.gamma(0.5)
sd = np.sqrt(tau_sqd)
I_1 = si.quad(mix_distn_integrand, -np.inf, xval, args=(xval, phi, tmp1, tmp2, tau_sqd)) # 0.00147
tmp = norm.cdf(xval, loc=0.0, scale=sd)-I_1[0]/np.sqrt(2*np.pi*tau_sqd)
if tmp<0.999:
return tmp
else:
return RW_marginal_uni(xval,phi,gamma,survival = False)
pRW_me = np.vectorize(pRW_me_uni)
# ----------- 2. Define integrand in Python: linear interpolation ----------- #
# Actually BETTER than numerical integration because there are no singular values.
# We use the Trapezoidal rule.
## **** (0). Generate a GRIDDED set of values for P(RW>x) ****
def survival_interp_grid(phi, gamma, grid_size=800):
xp_1 = np.linspace(0.000001, 200, grid_size, endpoint = False)
xp_2 = np.linspace(200.5, 900, int(grid_size/4), endpoint = False)
xp_3 = np.linspace(900.5, 100000, int(grid_size/10), endpoint = False)
xp = np.concatenate((xp_1, xp_2, xp_3))
xp = xp[::-1] # reverse order
xp = np.ascontiguousarray(xp, np.float64) #C contiguous order: xp.flags['C_CONTIGUOUS']=True?
n_xval = len(xp); surv_p = np.empty(n_xval)
tmp_int = lib.RW_marginal_C(xp, phi, gamma, n_xval, surv_p)
if tmp_int!=1: sys.exit('C implementaion failed.')
# surv_p = RW_marginal(xp, phi, gamma)
return (xp, surv_p)
## **** (1). Vectorize univariate function ****
def pRW_me_uni_interp(xval, xp, surv_p, tau_sqd):
tp = xval-xp
integrand_p = np.exp(-tp**2/(2*tau_sqd)) * surv_p
sd = np.sqrt(tau_sqd)
I_1 = sum(np.diff(tp)*(integrand_p[:-1] + integrand_p[1:])/2) # 0.00036
tmp = norm.cdf(xval, loc=0.0, scale=sd)-I_1/np.sqrt(2*np.pi*tau_sqd)
return tmp
def pRW_me_interp_slower(xval, xp, surv_p, tau_sqd):
return np.array([pRW_me_uni_interp(xval_i, xp, surv_p, tau_sqd) for xval_i in xval])
## **** (2). Broadcast matrices and vectorize columns ****
def pRW_me_interp_py(xval, xp, surv_p, tau_sqd, phi, gamma):
if(isinstance(xval, (int, np.int64, float))): xval=np.array([xval], dtype='float64')
tmp = np.zeros(xval.shape) # Store the results
# Use the smooth process CDF values if tau_sqd<0.05
if tau_sqd>0.05:
which = (xval<820)
else:
which = np.repeat(False, xval.shape)
# Calculate for values that are less than 820
if(np.sum(which)>0):
xval_less = xval[which]
tp = xval_less-xp[:,np.newaxis]
integrand_p = np.exp(-tp**2/(2*tau_sqd)) * surv_p[:,np.newaxis]
sd = np.sqrt(tau_sqd)
ncol = integrand_p.shape[1]
I_1 = np.array([np.sum(np.diff(tp[:,index])*(integrand_p[:-1,index] + integrand_p[1:,index])/2)
for index in np.arange(ncol)])
tmp_res = norm.cdf(xval_less, loc=0.0, scale=sd)-I_1/np.sqrt(2*np.pi*tau_sqd)
# Numerical negative when xval is very small
if(np.any(tmp_res<0)): tmp_res[tmp_res<0] = 0
tmp[which] = tmp_res
# Calculate for values that are greater than 820
if(xval.size-np.sum(which)>0):
tmp[np.invert(which)] = RW_marginal(xval[np.invert(which)],phi,gamma,survival = False)
return tmp
## **** (3). Use the C implementation ****
def pRW_me_interp(xval, xp, surv_p, tau_sqd, phi, gamma):
if(isinstance(xval, (int, np.int64, float))): xval=np.array([xval], dtype='float64')
n_xval = len(xval); n_grid = len(xp)
result = np.zeros(n_xval) # Store the results
tmp_int = lib.pRW_me_interp_C(xval, xp, surv_p, tau_sqd, phi, gamma, n_xval, n_grid, result)
if tmp_int!=1: sys.exit('C implementaion failed.')
return result
# ----------- 3. Define integrand in Python: linear interpolation ----------- #
# The grid in the previous version depends on gamma. It's not ideal.
## **** (0). Generate a GRIDDED set of values for the integrand ****
## When phi > 1 and gamma < 0.01 (fixed?) or gamma > 120, the abnormality kicks in quicker.
## When phi=0.7 and gamma=1e-05, it works fine.
def survival_interp_grid1(phi, grid_size=1000):
sp_1 = np.linspace(0.000001, 400, grid_size, endpoint = False)
sp_2 = np.linspace(400.5, 1100, int(grid_size/4), endpoint = False)
sp_3 = np.linspace(1100.5, 100000, int(grid_size/10), endpoint = False)
sp = np.concatenate((sp_1, sp_2, sp_3))
tmp = 1/(sp**(1/phi))
surv_p = sc.gammainc(0.5,tmp) + gammaincc_unregulized(0.5-phi,tmp)/(sp*sc.gamma(0.5))
return (sp, surv_p)
def pRW_me_interp1(xval, sp, surv_p, tau_sqd, phi, gamma):
if(isinstance(xval, (int, np.int64, float))): xval=np.array([xval], dtype='float64')
res = np.zeros(xval.size) # Store the results
tmp1 = (gamma/2)**phi
# If the asymp quantile level reaches 0.98, use the smooth distribution func.
thresh = max(RW_quantile_asymp(0.98,phi,gamma),7.5) # 7.5 is for gamma<0.0001
# Use the smooth process CDF values if tau_sqd<0.05
if tau_sqd>0.05:
which = (xval<thresh)
else:
which = np.repeat(False, xval.shape)
# Calculate for values that are less than 820
if(np.sum(which)>0):
xval_less = xval[which]
tp = xval_less-tmp1*sp[:,np.newaxis]
integrand_p = np.exp(-tp**2/(2*tau_sqd)) * surv_p[:,np.newaxis]
sd = np.sqrt(tau_sqd)
ncol = integrand_p.shape[1]
I_1 = np.array([np.sum(np.diff(sp)*(integrand_p[:-1,index] + integrand_p[1:,index])/2)
for index in np.arange(ncol)])
tmp_res = norm.cdf(xval_less, loc=0.0, scale=sd)-tmp1*I_1/np.sqrt(2*np.pi*tau_sqd)
# Numerical negative when xval is very small
if(np.any(tmp_res<0)): tmp_res[tmp_res<0] = 0
res[which] = tmp_res
# Calculate for values that are greater than 820
if(xval.size-np.sum(which)>0):
res[ | np.invert(which) | numpy.invert |
# -*- coding: utf-8 -*-
""" Implementation of cost-based feature selection/ranking algorithms.
Implementation of the cost-based version of the filter feature selection method
based on Maximal-Relevance-Minimal-Redundancy (mRMR), Joint Mutual Information
(JMI), Joint Mutual Information Maximization (JMIM), a version of
ReliefF that can compute nearest neighbors either with random forests, or with
an L1 distance. A cost-based ranking is also available by penalization of the
random forest feature importance, or by using the feature importance of
a random forest where the sampling of features at each internal node
is proportional to the inverse of their cost.
Moreover, to analyze the rankings for different penalization parameter values,
we also implement corresponding functions that return the different rankings
for each penalization value.
"""
import collections
import copy
import numpy as np
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import mutual_info_regression
from sklearn.ensemble import RandomForestClassifier
from scipy import spatial
from .old.cost_based_methods import _private_proximity_matrix
from ._util import evaluate_proximity_matrix
# To use the R package ranger for RF importance computation
import rpy2.robjects
def random_ranking(X, y, is_disc, cost_vec=None, cost_param=0):
"""
Return a random feature ranking.
"""
# Select features sequentially proportional to their inverse selection probability.
if cost_vec is None:
proba = np.ones(X.shape[1])
else:
assert cost_vec.shape == (X.shape[1],)
proba = cost_vec ** -cost_param
# "Rank" the features by sequentially selecting them proportional to the given probability. We
# first convert to lists because they are easier to deal with for sequential sampling.
candidates = list(np.arange(proba.size))
proba = list(proba)
ranking = []
while candidates:
# We need to renormalize the probabilities each time.
idx = np.random.choice(len(proba), p=np.asarray(proba) / np.sum(proba))
ranking.append(candidates.pop(idx))
proba.pop(idx)
return ranking,
def evaluate_pairwise_mutual_information(X: np.ndarray, is_disc: np.ndarray,
random_seed: int = 123) -> np.ndarray:
"""
Compute all pairwise mutual information scores.
"""
_, num_features = X.shape
matrix_MI = np.zeros((num_features, num_features), dtype=float)
for ii in range(num_features):
if is_disc[ii]: # If the ii-th feature is discrete
# we use the classif version
matrix_MI[ii, :] = mutual_info_classif(X, X[:, ii], discrete_features=is_disc,
random_state=random_seed)
else:
# otherwise we use the continuous (regression) version
matrix_MI[ii, :] = mutual_info_regression(X, X[:, ii], discrete_features=is_disc,
random_state=random_seed)
return matrix_MI
def evaluate_conditional_mutual_information(X: np.ndarray, is_disc: np.ndarray, y: np.ndarray,
random_seed: int = 123) -> np.ndarray:
"""
Compute pairwise mutual information conditional on the class of `y`.
"""
_, num_features = X.shape
# Create a dictionary that will contains the corresponding MI matrices
# conditionally on the different unique values of y
MI_condY = dict()
# For each modality of y
for valY in np.unique(y):
# Initialize a new matrix
matTmp = np.zeros((num_features, num_features), dtype=float)
# Extract the rows of X with this modality of Y
subX = X[y == valY]
# proportion of this modality
proValY = np.mean(y == valY)
is_discForSubX = copy.deepcopy(is_disc)
for featIdx in range(num_features):
if is_disc[featIdx] and len(np.unique(subX[:, featIdx])) == subX.shape[0]:
is_discForSubX[featIdx] = False
# Fill the matrix
for ii in range(num_features):
if is_discForSubX[ii]:
matTmp[ii, :] = proValY * mutual_info_classif(
subX, subX[:, ii], discrete_features=is_discForSubX,
random_state=random_seed)
else:
matTmp[ii, :] = proValY * mutual_info_regression(
subX, subX[:, ii], discrete_features=is_discForSubX,
random_state=random_seed)
MI_condY[valY] = matTmp
return MI_condY
def mRMR(X, y, is_disc, cost_vec=None, cost_param=0, num_features_to_select=None, random_seed=123,
MI_matrix=None, MI_conditional=None):
"""
Cost-based feature ranking with maximum relevance minimum redundancy.
Cost-based adaptation of the filter feature selection algorithm Maximal-
Relevance-Minimal-Redundancy (mRMR, Peng et al. (2005)).
<NAME>, <NAME>, and <NAME>. Feature Selection Based on Mutual
Information: Criteria of Max-Dependency, Max-Relevance, and Min-Redundancy.
IEEE Transactions on pattern analysis and machine intelligence,
27:1226--1238, 2005.
Args:
X (numpy.ndarray):
the numerical features to use as training data, where
each row represents an individual, and each column a feature.
y (list):
a list of integers representing the training data labels.
is_disc (list):
a list of booleans indicating with True if the feature is discrete
and False if continuous.
cost_vec (numpy.ndarray):
the vector of costs represented by a numpy.ndarray with shape
(1, X.shape[1]). If None, the cost is set to zero for each feature.
cost_param (float):
the positive cost penalization parameter. 0 by default.
num_features_to_select (int):
the number of best features to select. If unspecified, does not
select a subset of features but keep all of them.
random_seed (int):
the random seed to use with the mutual_information function
(when computing the Mutual Information (MI) involving one or more
continuous features).
MI_matrix (numpy.ndarray):
the matrix of precomputed pairwise MI between pairs of features to
save times when wanting to use multiple cost values.
By default this matrix is computed in the function.
Returns:
ranking (list):
list containing the indices of the ranked features as specified in
X, in decreasing order of importance.
matrix_MI (numpy.ndarray):
the matrix of precomputed MI between pairs of features.
"""
num_features = X.shape[1]
if cost_vec is None:
# If no cost is specified, then all costs are set as equal to zero
cost_vec = np.zeros(num_features)
# Check on num_features_to_select
if (num_features_to_select is not None):
num_selected_features = min(num_features, num_features_to_select)
else:
num_selected_features = num_features
# unRanked contains the feature indices unranked
unRanked = list(range(num_features))
# If a feature is discrete but with always different values, then
# convert it into a continuous one
# (to handle errors with the MI computation function)
for featIdx in range(num_features):
if is_disc[featIdx] and len(np.unique(X[:, featIdx])) == X.shape[0]:
is_disc[featIdx] = False
# Computing all the MIs I(X_j; y)
initial_scores = mutual_info_classif(X, y, discrete_features=is_disc, random_state=random_seed)
# The cost based will substract lambda*cost for each item of initial_scores
initial_scores_mcost = initial_scores - cost_param*cost_vec
if MI_matrix is None:
matrix_MI = evaluate_pairwise_mutual_information(X, is_disc, random_seed)
else:
matrix_MI = MI_matrix
# ranking contains the indices of the final ranking in decreasing order of importance
ranking = []
# The first selected feature is the one with the maximal penalized I(X_j, Y) value
selected = np.argmax(initial_scores_mcost)
ranking.append(selected)
unRanked.pop(selected)
# Until we have the desired number of selected_features, we apply the selection criterion
for k in range(1, num_selected_features):
featureRel = []
# Compute the criterion to maximize for each unranked covariate
for idx in unRanked:
featureRel.append(initial_scores_mcost[idx] - np.mean(matrix_MI[ranking, idx]))
tmp_idx = np.argmax(featureRel)
ranking.append(unRanked[tmp_idx])
unRanked.pop(tmp_idx)
return ranking, matrix_MI
def JMI(X, y, is_disc, cost_vec=None, cost_param=0, num_features_to_select=None, random_seed=123,
MI_matrix=None, MI_conditional=None):
"""
Cost-based feature ranking based on Joint Mutual Information.
Cost-based adaptation of the filter feature selection algorithm based on
Joint Mutual Information (Yang and Moody (1999)).
<NAME> and <NAME>. Feature selection based on joint mutual information.
In Advances in intelligent data analysis, proceedings of international
ICSC symposium, pages 22—-25, 1999.
Args:
X (numpy.ndarray):
the numerical features to use as training data, where
each row represents an individual, and each column a feature.
y (list):
a list of integers representing the training data labels.
is_disc (list):
a list of booleans indicating with True if the feature is discrete
and False if continuous.
cost_vec (numpy.ndarray):
the vector of costs represented by a numpy.ndarray with shape
(1, X.shape[1]). If None, the cost is set to zero for each feature.
cost_param (float):
the positive cost penalization parameter. 0 by default.
num_features_to_select (int):
the number of best features to select. If unspecified, does not
select a subset of features but keep all of them.
random_seed (int):
the random seed to use with the mutual_information function
(when computing the Mutual Information (MI) involving one or more
continuous features).
MI_matrix (numpy.ndarray):
the matrix of precomputed pairwise MI between pairs of features to
save times when wanting to use multiple cost values.
By default this matrix is computed in the function.
MI_conditional (dict):
a dictionary that contains the precomputed numpy.ndarray of conditional
pairwise MI between features, conditioned to the response values.
Each key is a response modality, and each value is a conditional
MI matrix between features I(X_i, X_j | y=key). Useful to save
computational times when wanting to use multiple cost values, but
by default it is computed in the function.
Returns:
ranking (list):
list containing the indices of the ranked features as specified in
X, in decreasing order of importance.
matrix_MI_Xk_Xj (numpy.ndarray):
the matrix of precomputed MI between pairs of features.
MI_condY (dict):
a dictionary that contains the precomputed numpy.ndarray of conditional
pairwise MI between features, conditioned to the response values.
Each key is a response modality, and each value is a conditional
MI matrix between features I(X_i, X_j | y=key).
"""
num_features = X.shape[1]
if cost_vec is None:
# If no cost is specified, then all costs are set as equal to zero
cost_vec = np.zeros(num_features)
# Check on num_features_to_select
if num_features_to_select is not None:
num_selected_features = min(num_features, num_features_to_select)
else:
num_selected_features = num_features
# unRanked contains the feature indices unranked
unRanked = list(range(num_features))
# If a feature is discrete but with always different values, then
# convert it into a continuous one
# (to handle errors with the MI computation function)
for featIdx in range(num_features):
if is_disc[featIdx] and len(np.unique(X[:, featIdx])) == X.shape[0]:
is_disc[featIdx] = False
# Computing all the MIs I(X_j; y)
initial_scores = mutual_info_classif(X, y, discrete_features=is_disc, random_state=random_seed)
# The cost based will substract lambda*cost for each item of initial_scores
initial_scores_mcost = initial_scores - cost_param * cost_vec
if MI_matrix is None:
matrix_MI_Xk_Xj = evaluate_pairwise_mutual_information(X, is_disc, random_seed)
else:
matrix_MI_Xk_Xj = MI_matrix
# For the Joint mutual information, we also need to compute the matrices
# I(Xk, Xj | Y=y) for y in Y
# Create a dictionary that will contains the corresponding MI matrices given the different
# unique values of y.
if MI_conditional is None:
MI_condY = evaluate_conditional_mutual_information(X, is_disc, y, random_seed)
else:
MI_condY = MI_conditional
# ranking contains the indices of the final ranking in decreasing order of importance
ranking = []
# The first selected feature is the one with the maximal penalized I(X_j, Y) value
selected = np.argmax(initial_scores_mcost)
ranking.append(selected)
unRanked.pop(selected)
# Until we have the desired number of selected_features, we apply the selection criterion
for k in range(1, num_selected_features):
featureRel = []
# Compute the criterion to maximize for each unranked covariate
for idx in unRanked:
vecSummed = np.zeros(len(ranking))
for valY in np.unique(y):
# Compute I(Xk; Xj | Y)
vecSummed += MI_condY[valY][ranking, idx]
criterionVal = initial_scores_mcost[idx] - np.mean(matrix_MI_Xk_Xj[ranking, idx]) \
+ np.mean(vecSummed)
featureRel.append(criterionVal)
tmp_idx = | np.argmax(featureRel) | numpy.argmax |
from os import path
import numpy as np
from numpy.testing import *
import datetime
class TestDateTime(TestCase):
def test_creation(self):
for unit in ['Y', 'M', 'W', 'B', 'D',
'h', 'm', 's', 'ms', 'us',
'ns', 'ps', 'fs', 'as']:
dt1 = np.dtype('M8[750%s]' % unit)
assert dt1 == np.dtype('datetime64[750%s]' % unit)
dt2 = np.dtype('m8[%s]' % unit)
assert dt2 == np.dtype('timedelta64[%s]' % unit)
def test_divisor_conversion_year(self):
assert np.dtype('M8[Y/4]') == np.dtype('M8[3M]')
assert np.dtype('M8[Y/13]') == np.dtype('M8[4W]')
assert np.dtype('M8[3Y/73]') == np.dtype('M8[15D]')
def test_divisor_conversion_month(self):
assert np.dtype('M8[M/2]') == np.dtype('M8[2W]')
assert np.dtype('M8[M/15]') == np.dtype('M8[2D]')
assert np.dtype('M8[3M/40]') == np.dtype('M8[54h]')
def test_divisor_conversion_week(self):
assert np.dtype('m8[W/5]') == np.dtype('m8[B]')
assert np.dtype('m8[W/7]') == | np.dtype('m8[D]') | numpy.dtype |
from __future__ import print_function, division
import os
import sys
import pytest
import warnings
import numpy
from galpy.util import galpyWarning
from test_actionAngle import reset_warning_registry
_TRAVIS= bool(os.getenv('TRAVIS'))
PY2= sys.version < '3'
# Print all galpyWarnings always for tests of warnings
warnings.simplefilter("always",galpyWarning)
#Basic sanity checking: circular orbit should have constant R, zero vR, vT=vc
def test_actionAngleTorus_basic():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import MWPotential, rl, vcirc, \
FlattenedPowerPotential, PlummerPotential
tol= -4.
jr= 10.**-10.
jz= 10.**-10.
aAT= actionAngleTorus(pot=MWPotential)
# at R=1, Lz=1
jphi= 1.
angler= numpy.linspace(0.,2.*numpy.pi,101)
anglephi= numpy.linspace(0.,2.*numpy.pi,101)+1.
anglez= numpy.linspace(0.,2.*numpy.pi,101)+2.
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
assert numpy.all(numpy.fabs(RvR[0]-rl(MWPotential,jphi)) < 10.**tol), \
'circular orbit does not have constant radius for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \
'circular orbit does not have zero radial velocity for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[2]-vcirc(MWPotential,rl(MWPotential,jphi))) < 10.**tol), \
'circular orbit does not have constant vT=vc for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \
'circular orbit does not have zero vertical height for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \
'circular orbit does not have zero vertical velocity for actionAngleTorus'
# at Lz=1.5, using Plummer
tol= -3.25
pp= PlummerPotential(normalize=1.)
aAT= actionAngleTorus(pot=pp)
jphi= 1.5
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
assert numpy.all(numpy.fabs(RvR[0]-rl(pp,jphi)) < 10.**tol), \
'circular orbit does not have constant radius for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \
'circular orbit does not have zero radial velocity for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[2]-vcirc(pp,rl(pp,jphi))) < 10.**tol), \
'circular orbit does not have constant vT=vc for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \
'circular orbit does not have zero vertical height for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \
'circular orbit does not have zero vertical velocity for actionAngleTorus'
# at Lz=0.5, using FlattenedPowerPotential
tol= -4.
fp= FlattenedPowerPotential(normalize=1.)
aAT= actionAngleTorus(pot=fp)
jphi= 0.5
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
assert numpy.all(numpy.fabs(RvR[0]-rl(fp,jphi)) < 10.**tol), \
'circular orbit does not have constant radius for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \
'circular orbit does not have zero radial velocity for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[2]-vcirc(fp,rl(fp,jphi))) < 10.**tol), \
'circular orbit does not have constant vT=vc for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \
'circular orbit does not have zero vertical height for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \
'circular orbit does not have zero vertical velocity for actionAngleTorus'
return None
#Basic sanity checking: close-to-circular orbit should have freq. = epicycle freq.
def test_actionAngleTorus_basic_freqs():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import epifreq, omegac, verticalfreq, rl, \
JaffePotential, PowerSphericalPotential, HernquistPotential
tol= -3.
jr= 10.**-6.
jz= 10.**-6.
jp= JaffePotential(normalize=1.)
aAT= actionAngleTorus(pot=jp)
# at Lz=1
jphi= 1.
om= aAT.Freqs(jr,jphi,jz)
assert numpy.fabs((om[0]-epifreq(jp,rl(jp,jphi)))/om[0]) < 10.**tol, \
'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'
assert numpy.fabs((om[1]-omegac(jp,rl(jp,jphi)))/om[1]) < 10.**tol, \
'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'
assert numpy.fabs((om[2]-verticalfreq(jp,rl(jp,jphi)))/om[2]) < 10.**tol, \
'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'
# at Lz=1.5, w/ different potential
pp= PowerSphericalPotential(normalize=1.)
aAT= actionAngleTorus(pot=pp)
jphi= 1.5
om= aAT.Freqs(jr,jphi,jz)
assert numpy.fabs((om[0]-epifreq(pp,rl(pp,jphi)))/om[0]) < 10.**tol, \
'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'
assert numpy.fabs((om[1]-omegac(pp,rl(pp,jphi)))/om[1]) < 10.**tol, \
'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'
assert numpy.fabs((om[2]-verticalfreq(pp,rl(pp,jphi)))/om[2]) < 10.**tol, \
'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'
# at Lz=0.5, w/ different potential
tol= -2.5 # appears more difficult
hp= HernquistPotential(normalize=1.)
aAT= actionAngleTorus(pot=hp)
jphi= 0.5
om= aAT.Freqs(jr,jphi,jz)
assert numpy.fabs((om[0]-epifreq(hp,rl(hp,jphi)))/om[0]) < 10.**tol, \
'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'
assert numpy.fabs((om[1]-omegac(hp,rl(hp,jphi)))/om[1]) < 10.**tol, \
'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'
assert numpy.fabs((om[2]-verticalfreq(hp,rl(hp,jphi)))/om[2]) < 10.**tol, \
'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'
return None
#Test that orbit from actionAngleTorus is the same as an integrated orbit
def test_actionAngleTorus_orbit():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import MWPotential2014
from galpy.orbit import Orbit
# Set up instance
aAT= actionAngleTorus(pot=MWPotential2014,tol=10.**-5.)
jr,jphi,jz= 0.05,1.1,0.025
# First calculate frequencies and the initial RvR
RvRom= aAT.xvFreqs(jr,jphi,jz,
numpy.array([0.]),
numpy.array([1.]),
numpy.array([2.]))
om= RvRom[1:]
# Angles along an orbit
ts= numpy.linspace(0.,100.,1001)
angler= ts*om[0]
anglephi= 1.+ts*om[1]
anglez= 2.+ts*om[2]
# Calculate the orbit using actionAngleTorus
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
# Calculate the orbit using orbit integration
orb= Orbit([RvRom[0][0,0],RvRom[0][0,1],RvRom[0][0,2],
RvRom[0][0,3],RvRom[0][0,4],RvRom[0][0,5]])
orb.integrate(ts,MWPotential2014)
# Compare
tol= -3.
assert numpy.all(numpy.fabs(orb.R(ts)-RvR[0]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in R'
assert numpy.all(numpy.fabs(orb.vR(ts)-RvR[1]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in vR'
assert numpy.all(numpy.fabs(orb.vT(ts)-RvR[2]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in vT'
assert numpy.all(numpy.fabs(orb.z(ts)-RvR[3]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in z'
assert numpy.all(numpy.fabs(orb.vz(ts)-RvR[4]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in vz'
assert numpy.all(numpy.fabs((orb.phi(ts)-RvR[5]+numpy.pi) % (2.*numpy.pi) -numpy.pi) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in phi'
return None
# Test that actionAngleTorus w/ interp pot gives same freqs as regular pot
# Doesn't work well: TM aborts because our interpolated forces aren't
# consistent enough with the potential for TM's taste, but we test that it at
# at least works somewhat
def test_actionAngleTorus_interppot_freqs():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import LogarithmicHaloPotential, interpRZPotential
lp= LogarithmicHaloPotential(normalize=1.)
ip= interpRZPotential(RZPot=lp,
interpPot=True,
interpDens=True,interpRforce=True,interpzforce=True,
enable_c=True)
aAT= actionAngleTorus(pot=lp)
aATi= actionAngleTorus(pot=ip)
jr,jphi,jz= 0.05,1.1,0.02
om= aAT.Freqs(jr,jphi,jz)
omi= aATi.Freqs(jr,jphi,jz)
assert numpy.fabs((om[0]-omi[0])/om[0]) < 0.2, 'Radial frequency computed using the torus machine does not agree between potential and interpolated potential'
assert numpy.fabs((om[1]-omi[1])/om[1]) < 0.2, 'Azimuthal frequency computed using the torus machine does not agree between potential and interpolated potential'
assert numpy.fabs((om[2]-omi[2])/om[2]) < 0.8, 'Vertical frequency computed using the torus machine does not agree between potential and interpolated potential'
return None
#Test the actionAngleTorus against an isochrone potential: actions
def test_actionAngleTorus_Isochrone_actions():
from galpy.potential import IsochronePotential
from galpy.actionAngle import actionAngleTorus, \
actionAngleIsochrone
ip= IsochronePotential(normalize=1.,b=1.2)
aAI= actionAngleIsochrone(ip=ip)
tol= -6.
aAT= actionAngleTorus(pot=ip,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.])
anglephi= numpy.array([numpy.pi])
anglez= numpy.array([numpy.pi/2.])
# Calculate position from aAT
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
# Calculate actions from aAI
ji= aAI(*RvR)
djr= numpy.fabs((ji[0]-jr)/jr)
dlz= numpy.fabs((ji[1]-jphi)/jphi)
djz= numpy.fabs((ji[2]-jz)/jz)
assert djr < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (djr*100.)
assert dlz < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (dlz*100.)
assert djz < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (djz*100.)
return None
#Test the actionAngleTorus against an isochrone potential: frequencies and angles
def test_actionAngleTorus_Isochrone_freqsAngles():
from galpy.potential import IsochronePotential
from galpy.actionAngle import actionAngleTorus, \
actionAngleIsochrone
ip= IsochronePotential(normalize=1.,b=1.2)
aAI= actionAngleIsochrone(ip=ip)
tol= -6.
aAT= actionAngleTorus(pot=ip,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.1])+numpy.linspace(0.,numpy.pi,101)
angler= angler % (2.*numpy.pi)
anglephi= numpy.array([numpy.pi])+numpy.linspace(0.,numpy.pi,101)
anglephi= anglephi % (2.*numpy.pi)
anglez= numpy.array([numpy.pi/2.])+numpy.linspace(0.,numpy.pi,101)
anglez= anglez % (2.*numpy.pi)
# Calculate position from aAT
RvRom= aAT.xvFreqs(jr,jphi,jz,angler,anglephi,anglez)
# Calculate actions, frequencies, and angles from aAI
ws= aAI.actionsFreqsAngles(*RvRom[0].T)
dOr= numpy.fabs((ws[3]-RvRom[1]))
dOp= numpy.fabs((ws[4]-RvRom[2]))
dOz= numpy.fabs((ws[5]-RvRom[3]))
dar= numpy.fabs((ws[6]-angler))
dap= numpy.fabs((ws[7]-anglephi))
daz= numpy.fabs((ws[8]-anglez))
dar[dar > numpy.pi]-= 2.*numpy.pi
dar[dar < -numpy.pi]+= 2.*numpy.pi
dap[dap > numpy.pi]-= 2.*numpy.pi
dap[dap < -numpy.pi]+= 2.*numpy.pi
daz[daz > numpy.pi]-= 2.*numpy.pi
daz[daz < -numpy.pi]+= 2.*numpy.pi
assert numpy.all(dOr < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Or at %f%%' % (numpy.nanmax(dOr)*100.)
assert numpy.all(dOp < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Ophi at %f%%' % (numpy.nanmax(dOp)*100.)
assert numpy.all(dOz < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Oz at %f%%' % (numpy.nanmax(dOz)*100.)
assert numpy.all(dar < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for ar at %f' % (numpy.nanmax(dar))
assert numpy.all(dap < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for aphi at %f' % (numpy.nanmax(dap))
assert numpy.all(daz < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for az at %f' % (numpy.nanmax(daz))
return None
#Test the actionAngleTorus against a Staeckel potential: actions
def test_actionAngleTorus_Staeckel_actions():
from galpy.potential import KuzminKutuzovStaeckelPotential
from galpy.actionAngle import actionAngleTorus, \
actionAngleStaeckel
delta= 1.2
kp= KuzminKutuzovStaeckelPotential(normalize=1.,Delta=delta)
aAS= actionAngleStaeckel(pot=kp,delta=delta,c=True)
tol= -3.
aAT= actionAngleTorus(pot=kp,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.])
anglephi= numpy.array([numpy.pi])
anglez= numpy.array([numpy.pi/2.])
# Calculate position from aAT
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
# Calculate actions from aAI
ji= aAS(*RvR)
djr= numpy.fabs((ji[0]-jr)/jr)
dlz= numpy.fabs((ji[1]-jphi)/jphi)
djz= numpy.fabs((ji[2]-jz)/jz)
assert djr < 10.**tol, 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Jr at %f%%' % (djr*100.)
assert dlz < 10.**tol, 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Jr at %f%%' % (dlz*100.)
assert djz < 10.**tol, 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Jr at %f%%' % (djz*100.)
return None
#Test the actionAngleTorus against an isochrone potential: frequencies and angles
def test_actionAngleTorus_Staeckel_freqsAngles():
from galpy.potential import KuzminKutuzovStaeckelPotential
from galpy.actionAngle import actionAngleTorus, \
actionAngleStaeckel
delta= 1.2
kp= KuzminKutuzovStaeckelPotential(normalize=1.,Delta=delta)
aAS= actionAngleStaeckel(pot=kp,delta=delta,c=True)
tol= -3.
aAT= actionAngleTorus(pot=kp,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.1])+numpy.linspace(0.,numpy.pi,101)
angler= angler % (2.*numpy.pi)
anglephi= numpy.array([numpy.pi])+numpy.linspace(0.,numpy.pi,101)
anglephi= anglephi % (2.*numpy.pi)
anglez= numpy.array([numpy.pi/2.])+numpy.linspace(0.,numpy.pi,101)
anglez= anglez % (2.*numpy.pi)
# Calculate position from aAT
RvRom= aAT.xvFreqs(jr,jphi,jz,angler,anglephi,anglez)
# Calculate actions, frequencies, and angles from aAI
ws= aAS.actionsFreqsAngles(*RvRom[0].T)
dOr= numpy.fabs((ws[3]-RvRom[1]))
dOp= numpy.fabs((ws[4]-RvRom[2]))
dOz= numpy.fabs((ws[5]-RvRom[3]))
dar= numpy.fabs((ws[6]-angler))
dap= numpy.fabs((ws[7]-anglephi))
daz= numpy.fabs((ws[8]-anglez))
dar[dar > numpy.pi]-= 2.*numpy.pi
dar[dar < -numpy.pi]+= 2.*numpy.pi
dap[dap > numpy.pi]-= 2.*numpy.pi
dap[dap < -numpy.pi]+= 2.*numpy.pi
daz[daz > numpy.pi]-= 2.*numpy.pi
daz[daz < -numpy.pi]+= 2.*numpy.pi
assert numpy.all(dOr < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Or at %f%%' % (numpy.nanmax(dOr)*100.)
assert numpy.all(dOp < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Ophi at %f%%' % (numpy.nanmax(dOp)*100.)
assert numpy.all(dOz < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Oz at %f%%' % (numpy.nanmax(dOz)*100.)
assert numpy.all(dar < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for ar at %f' % (numpy.nanmax(dar))
assert numpy.all(dap < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for aphi at %f' % (numpy.nanmax(dap))
assert numpy.all(daz < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for az at %f' % (numpy.nanmax(daz))
return None
#Test the actionAngleTorus against a general potential w/ actionAngleIsochroneApprox: actions
def test_actionAngleTorus_isochroneApprox_actions():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus, \
actionAngleIsochroneApprox
aAIA= actionAngleIsochroneApprox(pot=MWPotential2014,b=0.8)
tol= -2.5
aAT= actionAngleTorus(pot=MWPotential2014,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.])
anglephi= numpy.array([numpy.pi])
anglez= numpy.array([numpy.pi/2.])
# Calculate position from aAT
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
# Calculate actions from aAIA
ji= aAIA(*RvR)
djr= numpy.fabs((ji[0]-jr)/jr)
dlz= numpy.fabs((ji[1]-jphi)/jphi)
djz= numpy.fabs((ji[2]-jz)/jz)
assert djr < 10.**tol, 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Jr at %f%%' % (djr*100.)
assert dlz < 10.**tol, 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Jr at %f%%' % (dlz*100.)
assert djz < 10.**tol, 'actionAngleTorus and actionAngleMWPotential2014 applied to MWPotential2014 potential disagree for Jr at %f%%' % (djz*100.)
return None
#Test the actionAngleTorus against a general potential w/ actionAngleIsochrone: frequencies and angles
def test_actionAngleTorus_isochroneApprox_freqsAngles():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus, \
actionAngleIsochroneApprox
aAIA= actionAngleIsochroneApprox(pot=MWPotential2014,b=0.8)
tol= -3.5
aAT= actionAngleTorus(pot=MWPotential2014,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.1])+numpy.linspace(0.,numpy.pi,21)
angler= angler % (2.*numpy.pi)
anglephi= numpy.array([numpy.pi])+numpy.linspace(0.,numpy.pi,21)
anglephi= anglephi % (2.*numpy.pi)
anglez= numpy.array([numpy.pi/2.])+numpy.linspace(0.,numpy.pi,21)
anglez= anglez % (2.*numpy.pi)
# Calculate position from aAT
RvRom= aAT.xvFreqs(jr,jphi,jz,angler,anglephi,anglez)
# Calculate actions, frequencies, and angles from aAI
ws= aAIA.actionsFreqsAngles(*RvRom[0].T)
dOr= numpy.fabs((ws[3]-RvRom[1]))
dOp= numpy.fabs((ws[4]-RvRom[2]))
dOz= numpy.fabs((ws[5]-RvRom[3]))
dar= numpy.fabs((ws[6]-angler))
dap= | numpy.fabs((ws[7]-anglephi)) | numpy.fabs |
import numpy as np
from statesegmentation import GSBS
import gsbs_extra
from typing import Tuple
from scipy.spatial.distance import cdist
from scipy.stats import pearsonr, ttest_ind, mannwhitneyu
from sklearn.model_selection import KFold
from hrf_estimation import hrf
import timeit
from help_functions import fit_metrics_simulation, compute_fits_hmm, deltas_states
from brainiak.eventseg.event import EventSegment as HMM
from importlib import reload
def basic_simulation():
results = dict()
real_bounds, subData, _ = generate_simulated_data_HRF(length_std=l, nstates=n, TRfactor=TRfactor, rep=rep)
states = gsbs_extra.GSBS(kmax=n, x=subData[0, :, :], finetune=finetune)
states.fit()
recovered_bounds = np.double(states.get_bounds(k=n)>0)
permut_accuracy, permut_zaccuracy, distance = fit_metrics_simulation(real_bounds, recovered_bounds)
results['perm_accuracy'] = permut_accuracy
results['perm_zaccuracy'] = permut_zaccurcy
results['distance'] = distance
results['real_bounds'] = states.bounds
results['real_states'] = deltas_states(real_bounds)
# simulation 1, vary state length and estimate how accurately we can recover state boundaries
def run_simulation_evlength(length_std, nstates_list, run_HMM, rep, TRfactor=1, finetune=1):
res = dict()
list2 = ['dists_GS','dists_HMM', 'dists_HMMsplit']
nstd = len(length_std)
nstates = len(nstates_list)
for key in list2:
res[key] = np.zeros([nstd, nstates, nstates_list[-1]])
list = ['sim_GS', 'sim_HMM','sim_HMMsplit', 'simz_GS', 'simz_HMM', 'simz_HMMsplit']
for key in list:
res[key] = np.zeros([nstd, nstates])
res['statesreal'] = np.zeros([nstd, nstates, ntime])
res['bounds'] = np.zeros([nstd, nstates, ntime])
res['bounds_HMMsplit'] = np.zeros([nstd, nstates, ntime])
for idxl, l in enumerate(length_std):
for idxn, n in enumerate(nstates_list):
print(rep, l)
bounds, subData, _ = generate_simulated_data_HRF(length_std=l, nstates=n, TRfactor=TRfactor, rep=rep)
states = gsbs_extra.GSBS(kmax=n, x=subData[0,:,:], finetune=finetune)
states.fit()
res['sim_GS'][idxl,idxn], res['simz_GS'][idxl, idxn],res['dists_GS'][idxl,idxn,0:n] = fit_metrics_simulation(bounds, np.double(states.get_bounds(k=n)>0))
res['bounds'][idxl,idxn,:] = states.bounds
res['statesreal'][idxl,idxn,:] = deltas_states(bounds)
if run_HMM is True:
ev = HMM(n, split_merge=False)
ev.fit(subData[0, :, :])
hmm_bounds = np.insert(np.diff(np.argmax(ev.segments_[0], axis=1)), 0, 0).astype(int)
res['sim_HMM'][idxl, idxn], res['simz_HMM'][idxl, idxn], res['dists_HMM'][idxl, idxn, 0:n] = fit_metrics_simulation(bounds, hmm_bounds)
ev = HMM(n, split_merge=True)
ev.fit(subData[0, :, :])
hmm_bounds_split = np.insert(np.diff(np.argmax(ev.segments_[0], axis=1)), 0, 0).astype(int)
res['sim_HMMsplit'][idxl, idxn], res['simz_HMMsplit'][idxl, idxn], res['dists_HMMsplit'][idxl, idxn, 0:n] = fit_metrics_simulation(bounds, hmm_bounds_split)
res['bounds_HMMsplit'][idxl, idxn, :] = hmm_bounds_split
return res
#simulation 2, how do the different fit measures compare, depending on how many states there are (more states should cause more similarity between distinct states)
def run_simulation_compare_nstates( nstates_list, mindist, run_HMM, finetune, zs, rep):
res2 = dict()
metrics = ['LL', 'WAC', 'tdist', 'mdist', 'meddist', 'mwu']
methods = ['HMM', 'GS']
types = ['optimum']
for metric, method, type_ in itertools.product(metrics, methods, types):
key = type_+"_"+metric+"_"+method
res2[key] = np.zeros(len(nstates_list))
metrics = ['LL', 'WAC', 'tdist']
methods = ['GS', 'HMM', 'HMMsplit']
types = ['sim', 'simz']
for metric, method, type_ in itertools.product(metrics, methods, types):
key = type_+"_"+metric+"_"+method
res2[key] = np.zeros(len(nstates_list))
list2 = ['tdist', 'WAC', 'mdist', 'meddist', 'LL_HMM', 'WAC_HMM',
'tdist_HMM', 'fit_W_mean', 'fit_W_std', 'fit_Ball_mean',
'fit_Ball_std', 'fit_Bcon_mean', 'fit_Bcon_std']
for i in list2:
res2[i] = np.zeros([len(nstates_list), maxK+1])
for idxl, l in enumerate(nstates_list):
print(rep, l)
real_bounds, subData,_ = generate_simulated_data_HRF(nstates=l, rep=rep)
data = subData[0, :, :]
states = gsbs_extra.GSBS(x=data, kmax=maxK, outextra=True, dmin=mindist, finetune=finetune)
states.fit()
res2['sim_GS_tdist'][idxl], res2['simz_GS_tdist'][idxl], dist = \
fit_metrics_simulation(real_bounds, states.deltas)
res2['sim_GS_WAC'][idxl], res2['simz_GS_WAC'][idxl], dist = \
fit_metrics_simulation(real_bounds, states.get_deltas(k=states.optimum_WAC))
for metric in ['tdist', 'WAC', 'meddist', 'mdist']:
res2[metric][idxl, :] = getattr(states, metric)
attr = "optimum_" + metric
optimum = getattr(states, attr)
key = "optimum_" + metric + "_GS"
res2[key][idxl] = optimum
res2['fit_W_mean'][idxl, :] = states.all_m_W
res2['fit_W_std'][idxl, :] = states.all_sd_W
res2['fit_Ball_mean'][idxl, :] = states.all_m_Ball
res2['fit_Ball_std'][idxl, :] = states.all_sd_Ball
res2['fit_Bcon_mean'][idxl, :] = states.all_m_Bcon
res2['fit_Bcon_std'][idxl, :] = states.all_sd_Bcon
# HMM
t = None
ind = None
for i in range(2, maxK):
res2['LL_HMM'][idxl, i], res2['WAC_HMM'][idxl, i], res2['tdist_HMM'][idxl, i], \
hmm_bounds, t, ind = compute_fits_hmm(data, i, mindist, type='HMM', y=None, t1=t, ind1=ind, zs=zs)
for metric in ['LL', 'WAC', 'tdist']:
keyword = "%s_HMM" % (metric)
optimum_ncluster = np.argmax(res2[keyword][idxl])
if metric == 'LL':
optimum_ncluster = np.argmax(res2[keyword][idxl][2:90]) + 2
res2['optimum_%s' % keyword] = optimum_ncluster
for method in ['HMM', 'HMMsplit']:
_, _, _, recovered_bounds, t, ind = compute_fits_hmm(
data=data,
k=optimum_ncluster,
mindist=1,
type=method,
y=None,
t1=t,
ind1=ind)
simm, simz, dist = fit_metrics_simulation(real_bounds, recovered_bounds)
keyword = 'sim_%s_%s' % (metric, method)
res2[keyword][idxl] = simm
keyword = 'simz_%s_%s' % (metric, method)
res2[keyword][idxl] = simz
return res2
#simulation 3, can we correctly estimate the number of states in the group when there is ideosyncracy in state boundaries between participants?
def run_simulation_sub_noise( CV_list, sub_std_list, kfold_list, nsub, rep):
res3=dict()
list=['optimum', 'sim_GS','sim_GS_fixK', 'simz_GS', 'simz_GS_fixK']
for key in list:
res3[key] = np.zeros([np.shape(CV_list)[0], np.shape(sub_std_list)[0], np.shape(kfold_list)[0]])
res3['tdist'] = np.zeros([np.shape(CV_list)[0], np.shape(sub_std_list)[0], np.shape(kfold_list)[0], maxK + 1])
list = ['optimum_subopt', 'sim_GS_subopt', 'simz_GS_subopt']
for key in list:
res3[key] = np.zeros([np.shape(sub_std_list)[0], nsub])
for idxs, s in enumerate(sub_std_list):
bounds, subData,_ = generate_simulated_data_HRF(sub_std=s, nsub=nsub, rep=rep)
for idxi, i in enumerate(kfold_list):
print(rep, s, i)
if i>1:
kf = KFold(n_splits=i, shuffle=True)
for idxl, l in enumerate(CV_list):
tdist_temp = np.zeros([i,maxK+1]); optimum_temp = np.zeros(i); GS_sim_temp = np.zeros(i)
GS_sim_temp_fixK = np.zeros(i); simz_temp = np.zeros(i); simz_temp_fixK = np.zeros(i)
count=-1
for train_index, test_index in kf.split(np.arange(0,np.max(kfold_list))):
count=count+1
print(count)
if l is False:
states = gsbs_extra.GSBS(x=np.mean(subData[test_index, :, :], axis=0), kmax=maxK)
elif l is True:
states = gsbs_extra.GSBS(x=np.mean(subData[train_index, :, :], axis=0), y=np.mean(subData[test_index, :, :], axis=0), kmax=maxK)
states.fit()
optimum_temp[count] = states.nstates
tdist_temp[count, :] = states.tdists
GS_sim_temp[count], simz_temp[count], dist = fit_metrics_simulation(bounds, states.deltas)
GS_sim_temp_fixK[count] , simz_temp_fixK[count], dist = fit_metrics_simulation(bounds, states.get_deltas(k=nstates))
res3['optimum'][idxl, idxs, idxi] = np.mean(optimum_temp)
res3['sim_GS'][idxl, idxs, idxi] = np.mean(GS_sim_temp)
res3['sim_GS_fixK'][idxl, idxs, idxi] = | np.mean(GS_sim_temp_fixK) | numpy.mean |
# COVID dataset input readers
#
# <EMAIL>, 2020
import sys
import numpy as np
from datetime import datetime,timedelta
from termcolor import colored
import os
import pandas as pd
#from datetime import datetime
#a = datetime.strptime(dt[0], '%Y-%m-%d')
def todiff(series):
"""
Turn cumulative series into differential
"""
series = np.diff(series, prepend=0)
# Fix possible NaN
series[~np.isfinite(series)] = 0
# Fix possible errors in data (cumulative were not monotonic)
ind = series < 0
if np.sum(series[ind]) != 0:
print(colored(f'{__name__}.todiff: fixing non-monotonic input (negative dx set to 0)', 'red'))
print(series)
series[ind] = 0
return series
def data_processor(meta):
"""
Dataset processor wrapper
"""
evalstr = f"{meta['function']}(meta)"
print(evalstr)
try:
d = eval(evalstr)
return d
except:
print(__name__ + f".data_processor: {colored('Failed to process','yellow')} {meta['isocode']}")
print(f'Error: {sys.exc_info()[0]} {sys.exc_info()[1]}')
def get_isocodes():
isodata = pd.read_csv('./data/iso.csv', comment='#')
code = np.array(isodata['code'])
return code
def get_european_isocodes():
isodata = pd.read_csv('./data/iso.csv', comment='#')
code = np.array(isodata['code'])
continent = np.array(isodata['continent'])
return code[continent == 4] # Europe only
def data_reader_swiss(meta):
"""
Swiss data format reader
"""
# --------------------------------------------------------------------
# DEATHS
df = pd.read_csv('./data/' + meta['filename_deaths'], comment='#')
df = df.sort_index(ascending=meta['ascending'], axis=0)
d = {}
d['dt'] = np.array(df["Date"])
# Turn cumulative into daily
d['deaths'] = todiff(df[meta['region']])
# --------------------------------------------------------------------
# Cases
df = pd.read_csv('./data/' + meta['filename_cases'], comment='#')
df = df.sort_index(ascending=meta['ascending'], axis=0)
# Turn cumulative into daily
d['cases'] = todiff(df[meta['region']])
# --------------------------------------------------------------------
# Tests
df = pd.read_csv('./data/' + meta['filename_tested'], comment='#')
df = df.sort_index(ascending=meta['ascending'], axis=0)
# Turn cumulative into daily
d['tests'] = todiff(df[meta['region']])
# --------------------------------------------------------------------
d['population'] = meta['population']
d['isocode'] = meta['isocode']
# --------------------------------------------------------------------
if (len(d['deaths']) != len(d['cases'])):
raise Exception(__name__ + '.data_reader_swiss: len(deaths) != len(cases)')
if (len(d['cases']) != len(d['tests'])):
raise Exception(__name__ + '.data_reader_swiss: len(cases) != len(tests)')
return d
def data_reader_sweden(meta):
d = {}
d['isocode'] = meta['isocode']
d['population'] = meta['population']
df = pd.read_csv('./data/' + meta['filename_cases'], comment='#')
df = df.loc[df["Region"] == meta['region']]
# --------------------------------------------------------------------
# Iterating the columns, find date columns
dt=list()
for col in df.columns:
if "2020-" in col:
dt.append(col)
d['dt'] = dt
# --------------------------------------------------------------------
# Cases
d['cases'] = np.array(df[dt])[0]
# --------------------------------------------------------------------
# Deaths
df = pd.read_csv('./data/' + meta['filename_deaths'], comment='#')
df = df.loc[df["Region"] == meta['region']]
d['deaths'] = np.array(df[dt])[0]
# --------------------------------------------------------------------
# Tests
# ** NOT AVAILABLE **
d['tests'] = np.zeros(len(dt))*np.nan
return d
def data_reader_usa(meta):
d = {}
d['population'] = meta['population']
d['isocode'] = meta['isocode']
# --------------------------------------------------------------------
# Deaths
df = pd.read_csv('./data/' + meta['filename'], comment='#')
df = df.loc[df["county"] == meta['region']]
d['dt'] = np.array(df['date'])
d['deaths'] = todiff(df['deaths'])
# --------------------------------------------------------------------
# Cases
d['cases'] = todiff(df['cases'])
# --------------------------------------------------------------------
# Tests
d['tests'] = np.zeros(len(d['dt']))*np.nan
return d
def data_reader_heinsberg(meta):
d = {}
d['population'] = meta['population']
d['isocode'] = meta['isocode']
# Cases data
#df = pd.read_csv('./data/' + meta['filename_cases'], comment='#')
#data = df.loc[df["county"] == meta['region']]
# --------------------------------------------------------------------
# Deaths
df = pd.read_csv('./data/' + meta['filename_deaths'], comment='#')
d['dt'] = np.array(df['date'])
d['deaths'] = np.array(df['deaths'])
# --------------------------------------------------------------------
# Cases
d['cases'] = np.zeros(len(d['dt']))*np.nan
# --------------------------------------------------------------------
# Tests
d['tests'] = np.zeros(len(d['dt']))*np.nan
return d
def data_reader_florida(meta):
d = {}
d['population'] = meta['population']
d['isocode'] = meta['isocode']
# Cases data
#df = pd.read_csv('./data/' + meta['filename_cases'], comment='#')
#data = df.loc[df["county"] == meta['region']]
# --------------------------------------------------------------------
# Deaths
df = pd.read_csv('./data/' + meta['filename_deaths'], comment='#')
d['dt'] = np.array(df['date'])
d['deaths'] = np.array(df['deaths'])
# --------------------------------------------------------------------
# Cases
d['cases'] = np.zeros(len(d['dt']))*np.nan #np.array(data["frequency"])
# --------------------------------------------------------------------
# Tests
d['tests'] = np.zeros(len(d['dt']))*np.nan
return d
def data_reader_LA(meta):
"""
LA County data format reader
"""
df = pd.read_csv('./data/' + meta['filename'], comment='#')
df = df.sort_index(ascending=meta['ascending'], axis=0)
d = {}
d['dt'] = np.array(df["date_dt"])
d['cases'] = | np.array(df["new_case"]) | numpy.array |
import os
import time
import shutil
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
from torch.nn.utils import clip_grad_norm_
import numpy as np
from config import parser
args = parser.parse_args()
import pickle
from network import Two_Stream_RNN
from dataloader import Face_Dataset, UtteranceRecord
from sklearn.metrics import mean_squared_error
from torch.autograd import Variable as Variable
import copy
from tqdm import tqdm
import glob
from Same_Length_Sampler import SameLengthBatchSampler
import pandas as pd
class My_loss(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
vx = x - torch.mean(x)
vy = y - torch.mean(y)
rho = torch.sum(vx * vy) / (torch.sqrt(torch.sum(torch.pow(vx, 2))) * torch.sqrt(torch.sum(torch.pow(vy, 2))))
x_m = torch.mean(x)
y_m = torch.mean(y)
x_s = torch.std(x)
y_s = torch.std(y)
ccc = 2*rho*x_s*y_s/(torch.pow(x_s, 2) + torch.pow(y_s, 2) + torch.pow(x_m - y_m, 2))
return -ccc
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def ccc(y_true, y_pred):
true_mean = np.mean(y_true)
pred_mean = np.mean(y_pred)
v_pred = y_pred - pred_mean
v_true = y_true - true_mean
rho = np.sum(v_pred*v_true) / (np.sqrt(np.sum(v_pred**2)) * np.sqrt(np.sum(v_true**2)))
std_predictions = np.std(y_pred)
std_gt = np.std(y_true)
ccc = 2 * rho * std_gt * std_predictions / (
std_predictions ** 2 + std_gt ** 2 +
(pred_mean - true_mean) ** 2)
return ccc, rho
def check_rootfolders():
"""Create log and model folder"""
folders_util = [args.root_log, args.root_model, args.root_output, args.root_tensorboard]
folders_util = ["%s/"%(args.save_root) +folder for folder in folders_util]
for folder in folders_util:
if not os.path.exists(folder):
print('creating folder ' + folder)
os.makedirs(folder)
def main():
root_path = args.root_path
label_name = args.label_name
if args.cnn == 'resnet50':
feature_root = '/media/newssd/OMG_experiments/Extracted_features/resnet50_ferplus_features_fps=30_pool5_7x7_s1'
elif args.cnn == 'vgg':
feature_root = '/media/newssd/OMG_experiments/Extracted_features/vgg_fer_features_fps=30_pool5'
if len(args.store_name)==0:
args.store_name = '_'.join( [label_name,
'cnn:{}'.format(args.cnn),
'loss_type:{}'.format(args.loss_type),
'batch_size:{}'.format(args.batch_size),
'cat_before_gru:{}'.format(args.cat_before_gru),
'freeze:{}'.format(args.freeze),
'fusion:{}'.format(args.fusion)])
if len(args.save_root)==0:
setattr(args, 'save_root', args.store_name)
else:
setattr(args, 'save_root', os.path.join(args.save_root, args.store_name))
print("save experiment to :{}".format(args.save_root))
check_rootfolders()
num_class = 1 if not "_" in args.label_name else 2
setattr(args, 'num_class', num_class)
if args.loss_type == 'mse':
criterion = nn.MSELoss().cuda()
elif args.loss_type=='ccc':
criterion = My_loss().cuda()
else: # another loss is mse or mae
raise ValueError("Unknown loss type")
L = args.length
train_dict = pickle.load(open(args.train_dict, 'rb'))
val_dict = pickle.load(open(args.val_dict, 'rb'))
train_dict.update(val_dict)
train_val_dict = copy.copy(train_dict)
video_names = sorted(list(train_dict.keys()))
np.random.seed(0)
video_indexes = np.random.permutation(len(video_names))
video_names = [video_names[i] for i in video_indexes]
if args.test:
run_5_fold_prediction_on_test_set(feature_root)
for i in range(5):
########################### Modify the classifier ###################
model = Two_Stream_RNN(mlp_hidden_units=args.hidden_units, phase_size=48, phase_channels=2*L,
phase_hidden_size=256, cat_before_gru=args.cat_before_gru, gru_hidden = 64, gru_num_layers=2, fusion=args.fusion)
########################### Modify the classifier ###################
pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Total Params: {}".format(pytorch_total_params))
phasenet_param = sum(p.numel() for p in model.phase_net.parameters() if p.requires_grad)
print("Temporal Stream params: {} ({:.2f})".format( phasenet_param, phasenet_param/float(pytorch_total_params)))
mlp_param = sum(p.numel() for p in model.mlp.parameters() if p.requires_grad)
print("Spatial Stream params: {} ({:.2f})".format( mlp_param, mlp_param/float(pytorch_total_params)))
model.cuda()
if args.cat_before_gru:
params_dict = [{'params': model.rnns.parameters(), 'lr':args.lr},
{'params': model.classifier.parameters(), 'lr':args.lr},
{'params': model.fusion_module.parameters(), 'lr':args.lr}]
else:
params_dict = [{'params': model.rnns_spatial.parameters(), 'lr':args.lr},
{'params': model.rnns_temporal.parameters(), 'lr':args.lr},
{'params': model.classifier.parameters(), 'lr':args.lr},
{'params': model.fusion_module.parameters(), 'lr':args.lr}]
if not args.freeze:
params_dict += [{'params': model.mlp.parameters(), 'lr':args.lr},
{'params': model.phase_net.parameters(), 'lr':args.lr}]
optimizer = torch.optim.SGD(params_dict, # do not set learn rate for mlp and
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
torch.cuda.empty_cache()
cudnn.benchmark = True
length = len(video_names)//5
# five fold cross validation
val_video_names = video_names[i*length:(i+1)*length]
if i==4:
val_video_names = video_names[i*length:]
train_video_names = [name for name in video_names if name not in val_video_names]
train_video_names = video_names # delete it later
train_dict = {key:train_val_dict[key] for key in train_video_names}
val_dict = {key:train_val_dict[key] for key in val_video_names}
train_dataset = Face_Dataset([os.path.join(root_path,'Train'), os.path.join(root_path,'Validation')], feature_root, train_dict, label_name, py_level=args.py_level,
py_nbands=args.py_nbands, sample_rate = args.sample_rate, num_phase=L, phase_size=48, test_mode=False,
return_phase=False)
val_dataset = Face_Dataset([os.path.join(root_path,'Train'), os.path.join(root_path,'Validation')], feature_root, val_dict, label_name, py_level=args.py_level,
py_nbands=args.py_nbands, sample_rate = args.sample_rate, num_phase=L, phase_size=48, test_mode=True,
return_phase=False)
train_batch_sampler = SameLengthBatchSampler(train_dataset.indices_list, batch_size=args.batch_size, drop_last=True)
val_batch_sampler = SameLengthBatchSampler(val_dataset.indices_list, batch_size = args.eval_batch_size, drop_last=True, random=False)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_sampler=train_batch_sampler,
num_workers=args.workers, pin_memory=False)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_sampler=val_batch_sampler,
num_workers=args.workers, pin_memory=False)
print("train dataset:{}".format(len(train_dataset)))
print("val dataset:{}".format(len(val_dataset)))
log = open(os.path.join(args.save_root, args.root_log, 'fold_{}.txt'.format(i)), 'w')
output = "\n Fold: {}\n".format(i)
log.write(output)
log.flush()
best_loss = 1000
best_ccc = -100
val_accum_epochs = 0
for epoch in range(args.epochs):
adjust_learning_rate(optimizer, epoch, args.lr_steps)
train_mean, train_std = train(train_loader, model, criterion, optimizer, epoch, log)
log_train_mean_std = open(os.path.join(args.save_root, args.root_log, 'mean_std_{}.txt'.format(i)), 'w')
log_train_mean_std.write("{} {}".format(train_mean, train_std))
log_train_mean_std.flush()
torch.cuda.empty_cache()
if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:
loss_val, ccc_current_val = validate(val_loader, model, criterion, (epoch + 1) * len(train_loader), log, train_mean, train_std)
is_best_loss = loss_val< best_loss
best_loss = min(loss_val, best_loss)
is_best_ccc = ccc_current_val >best_ccc
best_ccc = max(ccc_current_val , best_ccc)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
}, is_best_loss, is_best_ccc, filename='fold_{}'.format(i))
if not is_best_ccc:
val_accum_epochs+=1
else:
val_accum_epochs=0
if val_accum_epochs>=args.early_stop:
print("validation ccc did not improve over {} epochs, stop".format(args.early_stop))
break
run_5_fold_prediction_on_test_set(feature_root)
def run_5_fold_prediction_on_test_set(feature_root):
test_dataset = Face_Dataset(os.path.join(args.root_path,'Test'), feature_root, args.test_dict, args.label_name, py_level=args.py_level,
py_nbands=args.py_nbands, sample_rate = args.sample_rate, num_phase=args.num_phase, phase_size=48, test_mode=True,
return_phase=False)
print("test dataset:{}".format(len(test_dataset)))
test_batch_sampler = SameLengthBatchSampler(test_dataset.indices_list, batch_size = args.eval_batch_size, drop_last=False, random=False)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_sampler=test_batch_sampler,
num_workers=args.workers, pin_memory=False)
for i in range(5):
file = open(os.path.join(args.save_root, args.root_log, 'mean_std_{}.txt'.format(i)), 'r')
string = file.readline()
train_mean, train_std = string.split(" ")
train_mean = float(train_mean)
train_std = float(train_std)
# resume
model = Two_Stream_RNN(mlp_hidden_units=args.hidden_units, phase_size=48, phase_channels=2*args.num_phase, phase_hidden_size=256, cat_before_gru=args.cat_before_gru)
model.cuda()
saved_model_path = os.path.join(args.save_root, args.root_model, 'fold_{}_best_ccc.pth.tar'.format(i))
checkpoint = torch.load(saved_model_path)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
print(("=> loading checkpoint '{}' epoch:{}".format(saved_model_path, start_epoch)))
preds, names = test(test_loader, model,train_mean=train_mean, train_std=train_std)
df= pd.DataFrame()
df['video'] = pd.Series([n.split(" ")[0] for n in names])
df['utterance'] = pd.Series([n.split(" ")[1] for n in names])
df[args.label_name] = pd.Series([v for v in preds])
df.to_csv(os.path.join(args.save_root, args.root_log, 'test_predictions_{}.csv'.format(i)), index=False)
def train(dataloader, model, criterion, optimizer, epoch, log):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
end = time.time()
optimizer.zero_grad()
model.train()
targets = []
for i, data_batch in enumerate(dataloader):
data_time.update(time.time() - end)
phase_0, phase_1, rgb_features, label, names = data_batch
phase_0 = Variable(phase_0.type('torch.FloatTensor').cuda())
phase_1 = Variable(phase_1.type('torch.FloatTensor').cuda())
rgb_features = Variable(rgb_features.type('torch.FloatTensor').cuda())
label_var = Variable(label.type('torch.FloatTensor').cuda())
out = model([phase_0, phase_1, rgb_features])
loss= criterion(out.squeeze(-1), label_var)
loss.backward()
optimizer.step() # We have accumulated enought gradients
optimizer.zero_grad()
targets.append(label_var.data.cpu().numpy() )
# measure elapsed time
batch_time.update(time.time() - end)
losses.update(loss.item(), label_var.size(0))
end = time.time()
if i % args.print_freq == 0:
output = ('Epoch: [{0}][{1}/{2}], lr: {lr:.6f}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
.format( epoch, i, len(dataloader), batch_time=batch_time,
data_time=data_time, loss=losses, lr=optimizer.param_groups[-1]['lr']))
print(output)
log.write(output + '\n')
log.flush()
torch.cuda.empty_cache()
targets = np.concatenate([array for array in targets], axis=0)
train_mean, train_std = np.mean(targets), np.std(targets)
return train_mean, train_std
def validate(dataloader, model, criterion, iter, log, train_mean=None, train_std=None):
batch_time = AverageMeter()
losses = AverageMeter()
# switch to evaluate mode
model.eval()
# df = pd.DataFrame(columns = ['video','utterance',str(args.label_name)+'_target', str(args.label_name)+'_prediction'])
end = time.time()
targets, preds = [], []
for i, data_batch in enumerate(dataloader):
phase_0, phase_1, rgb_features, label, names = data_batch
if (torch.sum(torch.isnan(phase_0))>0) or (torch.sum(torch.isnan(phase_1))>0) or (torch.sum(torch.isnan(rgb_features))>0):
print()
with torch.no_grad():
phase_0 = Variable(phase_0.type('torch.FloatTensor').cuda())
phase_1 = Variable(phase_1.type('torch.FloatTensor').cuda())
rgb_features = Variable(rgb_features.type('torch.FloatTensor').cuda())
label_var = Variable(label.type('torch.FloatTensor').cuda())
out = model([phase_0, phase_1, rgb_features])
targets.append(label_var.data.cpu().numpy() )
preds.append(out.squeeze(-1).data.cpu().numpy())
loss = criterion(out.squeeze(-1), label_var)
losses.update(loss.item(), label_var.size(0))
# if np.isnan(losses.avg):
# print() # caused by batch size =1
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
output = ('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
.format(
i, len(dataloader), batch_time=batch_time, loss=losses))
print(output)
log.write(output + '\n')
log.flush()
torch.cuda.empty_cache()
targets, preds = np.concatenate([array for array in targets], axis=0), np.concatenate([array for array in preds], axis=0)
mse_func = mean_squared_error
ccc_score = ccc(targets, preds)[0]
mse_loss = mse_func(targets, preds)
if train_mean is None:
output = ' Validation : [{0}][{1}], ccc: {ccc_score:.4f} , loss:{loss_mse:.4f}'.format( i, len(dataloader),
ccc_score=ccc_score, loss_mse = loss)
else:
ccc_corr = ccc(targets, correct(preds, train_mean, train_std))[0]
output = ' Validation : [{0}][{1}], ccc: {ccc_score:.4f}({ccc_corr:.4f}) , mse:{loss_mse:.4f}({loss_mse_c:.4f})'.format( i, len(dataloader),
ccc_score=ccc_score, ccc_corr=ccc_corr, loss_mse = mse_loss, loss_mse_c = mse_func(targets, correct(preds, train_mean, train_std)))
ccc_score = ccc_corr
print(output)
log.write(output + '\n')
log.flush()
return loss, ccc_score
def test(dataloader, model, train_mean=None, train_std=None):
print("Testing...")
# switch to evaluate mode
model.eval()
preds = []
names = []
for i, data_batch in tqdm(enumerate(dataloader)):
phase_0, phase_1, rgb_features, label, name_batch = data_batch
with torch.no_grad():
phase_0 = Variable(phase_0.type('torch.FloatTensor').cuda())
phase_1 = Variable(phase_1.type('torch.FloatTensor').cuda())
rgb_features = Variable(rgb_features.type('torch.FloatTensor').cuda())
out = model([phase_0, phase_1, rgb_features])
preds.append(out.squeeze(-1).data.cpu().numpy())
names.append(name_batch)
preds = np.concatenate([array for array in preds], axis=0)
names = np.concatenate([array for array in names], axis=0)
preds = correct(preds, train_mean, train_std)
return preds, names
def adjust_learning_rate(optimizer, epoch, lr_steps):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
decay = 0.1 ** (sum(epoch >= | np.array(lr_steps) | numpy.array |
"""Various utilities functions."""
import numpy as np
def pack_layers(i, hiddens, o):
"""Create the full NN topology from input size, hidden layers, and output."""
layers = []
layers.append(i)
for h in hiddens:
layers.append(h)
layers.append(o)
return layers
def scarcify(X, u, N):
"""Randomly split a dataset into train-val subsets."""
idx = | np.random.choice(X.shape[0], N, replace=False) | numpy.random.choice |
# from __future__ import division
#-------------------------------------
#
# Started at 06/08/2018 (YuE)
#
# This script based on the previous script
# threeApproachesComparison_v6.py
#
## Upgraded version of python (python3.4): script was rewritten to take into
# account some differences in the descriptions and using of some functions
# (version cma_v3 and more earlier scripts are written under python2).
#
# 07/24/2018: IT IS NOT FINISHED:
#
# Which are still unsatisfactory:
# 1) the absolute values of frictional forces for all methods of calculation,
# 2) their dependence on the ion velocity.
#
# But nevertheless, the dependences of the transmitted energy on the impact
# parameter are close to the inverse quadratic (as it should be!) at all velocities.
#
# 07/27/2018: IT IS NOT FINISHED:
#
# Which are still unsatisfactory:
# 1) the absolute values of frictional forces for all methods of calculation,
# 2) their dependence on the ion velocity.
# The investigation of that is in progress.
#
# Some features were improved, some figures were corrected.
#
#-------------------------------------
#========================================================
#
# This code compairs two approaches: "classical" (from [1]) and
# "magnus" (from [2]).
#
# For "classical" approach the magnetized interaction between ion
# and electron is considered for ion velocities V_i > rmsTrnsvVe.
#
# References:
#
# [1] <NAME>, <NAME>, <NAME>, <NAME>.
# "Physics guide of BETACOOL code. Version 1.1". C-A/AP/#262, November
# 2006, Brookhaven National Laboratory, Upton, NY 11973.
# [2] <NAME>, <NAME>. "New Algorithm for Dynamical Friction
# of Ions in a Magnetized Electron Beam". AIP Conf. Proc. 1812, 05006 (2017).
#
#========================================================
#########################################################
#
# Main issues of the calculations:
#
# 1) Friction force (FF) is calculated in the (P)article (R)est (F)rame,
# i.e. in the frame moving together with both (cooled and cooling)
# beams at a velocity V0;
# 2) Friction force is calculated for each value of ion velocity
# in the interval from .1*rmsTrnsvVe till 10*rmsTrnsvVe;
# 3) Initially assumped that all electrons have a logitudinal
# velocity rmsLongVe and transversal velocity rmsTrnsvVe;
# 4) For each ion velocity the minimal and maximal values of the
# impact parameter are defined. Radius of the shielding of the
# electric field of the ion equals to the value of the maximal
# impact parameter;
# 5) For each impact parameter in the interval from minimal till
# maximal values the transfered momenta deltap_x,y,z are
# calculated;
# 6) Founded transfered momenta allow to calculate the transfered
# energy delta_E =deltap^2/(2*m_e) and to integrate it over
# impact parameter; then (expressions (3.4), (3.5) from [1]):
# FF =-2*pi*n_e*integral_rhoMin^rhoMax delta_E*rho*drho;
# 7) For taking into account the velocity distribution of the
# electrons it is necessary to repeat these calculations for
# each value of the electron's velocity and then integrate result
# over distribution of the velocities.
#
# 10/26/2018:
#
# 8) Item 6 is wrong and correct expression for transfered
# energy delta_E will be used;
# 9) Method (my own) Least Squares Method - LSM is used to fit the
# dependence of transferred momenta on impact parameter;
#
#
# 11/08/2018:
#
# 10) Two functions ('fitting' and 'errFitAB' are defined to realize
# my LSM to find the parameters of the fitting end error of this
# fitting;
#
# 11) Analys of different dependeces between values; graphical
# presentation of these dependences;
#
#########################################################
import os, sys
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import LogNorm
from matplotlib import ticker
from matplotlib import markers
import matplotlib as mpl
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
import scipy.integrate as integrate
from scipy.integrate import quad, nquad, dblquad
from scipy.constants import pi
from scipy import optimize
from statistics import mean
from array import array
#
# All physical constants have its dimension in units in the system CI.
# This code uses units in the system CGS!
#
from scipy.constants import speed_of_light as clight
from scipy.constants import epsilon_0 as eps0
from scipy.constants import mu_0 as mu0
from scipy.constants import elementary_charge as qe
from scipy.constants import electron_mass as me
from scipy.constants import proton_mass as mp
from scipy.constants import Boltzmann as kB
pi=3.14159265358
#
# Physical constants:
#
m_e=9.10938356e-28 # electron mass, g
m_elec=m_e # to keep variable from previous script
m_p=1.672621898e-24 # electron mass, g
M_ion = m_p # to keep variable from previous script
q_e=4.803204673e-10 # electron charge, CGSE unit: sqrt(g*cm^3/sec^2)
q_elec=q_e # to keep variable from previous script
Z_ion = q_e # to keep variable from previous script
cLight=2.99792458e10 # speed of light, cm/sec
eVtoErg=1.6021766208e-12 # 1 eV = 1.6...e-12 erg
CtoPart=2.99792458e9 # 1 C = 1 A*sec = 2.9...e9 particles
m_e_eV = m_e*cLight**2/eVtoErg
#
# Electron beam parameters:
#
Ekin=3.0e4 # kinetic energy, eV
curBeam=0.5 # current density, A/cm^2
dBeam=3.0 # beam diameter, cm
angSpread=3.0 # angular spread, mrad
trnsvT=0.5 # transversal temperature, eV
longT=2.0e-4 # longitudinal temperature, eV (was 2.0e-4)
nField=1 # number ov values of the magnetic field
fieldB=np.zeros(nField) # magnetic field
fieldB[0]=3.e3 # Gs
omega_p=1.0e9 # plasma frequency, 1/sec
n_e=omega_p**2*m_e/(4.*pi*q_e**2) # plasma density, 3.1421e+08 cm-3
n_e1=8.e7 # plasma density, cm-3
omega_p1=np.sqrt(4.*pi*n_e1*q_e**2/m_e) # plasma frequency, 5.0459e+08 1/s
#
# Cooling system parameter:
#
coolLength=150.0 # typical length of the coolong section, cm
#
# HESR:
#
Ekin=90.8e4 # HESR kinetic energy, eV
curBeam=0.5 # HESR current beam, A
dBeam=2.0 # HESR beam diameter, cm
angSpread=0.0 # HESR angular spread, mrad
trnsvT=0.2 # HESR transversal temperature, eV
longT=1.0e-2 # HESR longitudinal temperature, eV (was 2.0e-4)
fieldB[0]=1.e3 # HESR, Gs
coolLength=270.0 # HESR typical length of the coolong section, cm
#
# EIC:
#
angSpread=0.0 # EIC angular spread, mrad
fieldB[0]=5.e4 # EIC, Gs
coolLength=300.0 # EIC typical length of the coolong section, cm
#
# Calculated parameters of the electron beam:
#
V0 = cLight*np.sqrt(Ekin/m_e_eV*(Ekin/m_e_eV+2.))/(Ekin/m_e_eV+1.)
print ('V0 =%e' % V0)
tetaV0=0. # angle between V0 and magnetic field, rad
B_mag=fieldB[0]*np.cos(tetaV0) # magnetic field acting on an electron, Gs
rmsTrnsvVe=np.sqrt(2.*trnsvT*eVtoErg/m_e) # RMS transversal velocity, cm/s
rmsLongVe=np.sqrt(2.*longT*eVtoErg/m_e) # RMS longitudinal velocity, cm/s
# HESR:
dens=curBeam*(CtoPart/q_e)/(pi*(.5*dBeam)**2*V0) # density, 1/cm^3
omega=np.sqrt(4.*pi*dens*q_e**2/m_e) # plasma frequency, 1/s
n_e=dens
omega_p=omega
print ('HESR: dens = %e,omega_p = %e' % (dens,omega_p))
# EIC:
rmsLongVe = 1.0e+7 # cm/s
longT = .5*m_e*rmsLongVe**2/eVtoErg
rmsTrnsvVe = 4.2e+7 # cm/s
trnsvT = .5*m_e*rmsTrnsvVe**2/eVtoErg
print ('EIC: rmsLongVe = %e, longT = %e, rmsTrnsvVe = %e, trnsvT = %e' % \
(rmsLongVe,longT,rmsTrnsvVe,trnsvT))
dens=2.e9 # density, 1/cm^3
omega=np.sqrt(4.*pi*dens*q_e**2/m_e) # plasma frequency, 1/s
n_e=dens
omega_p=omega
print ('EIC: dens = %e,omega_p = %e' % (dens,omega_p))
cyclFreq=q_e*B_mag/(m_e*cLight) # cyclotron frequency, 1/s
rmsRoLarm=rmsTrnsvVe*cyclFreq**(-1) # RMS Larmor radius, cm
dens=omega_p**2*m_e/(4.*pi*q_e**2) # density, 1/cm^3
likeDebyeR=(3./dens)**(1./3.) # "Debye" sphere with 3 electrons, cm
eTempTran=trnsvT # to keep variable from previous script
eTempLong=longT # to keep variable from previous script
coolPassTime=coolLength/V0 # time pass through cooling section, cm
thetaVi=0. # polar angle ion and cooled electron beams, rad
phiVi=0. # azimuth angle ion and cooled electron beams, rad
powV0=round(np.log10(V0))
mantV0=V0/(10**powV0)
pow_n_e=round(np.log10(n_e))
mant_n_e=n_e/(10**pow_n_e)
#
# Formfactor ffForm for friction force:
#
# ffForm = 2*pi*dens*q_e**4/(m_e*V0**2)=
# = 0.5*omega_p**2*q_e**2/V0**2
#
# Dimension of ffForm is force: g*cm/sec**2=erg/cm
#
# 1 MeV/m = 1.e6*eVtoErg/100. g*cm/sec**2 = 1.e4*eVtoErg erg/cm
MeV_mToErg_cm=1.e4*eVtoErg
# ffForm=-.5*omega_p**2*q_e**2/V0**2/MeV_mToErg_cm # MeV/m
eV_mToErg_m=100.*eVtoErg
# ffForm=-.5*omega_p**2*q_e**2/V0**2/eV_mToErg_m # =-6.8226e-12 eV/m
eV_mInErg_cm=100.*eVtoErg
ffForm=-.5*omega_p**2*q_e**2/V0**2/eVtoErg # =-6.8226e-10 eV/cm
ffForm=100.*ffForm # =-6.8226e-08 eV/m
ergToEV = 1./1.60218e-12
#
# Relative velocities of electrons:
#
relVeTrnsv=rmsTrnsvVe/V0
relVeLong=rmsLongVe/V0
print ('V0=%e cm/s, rmsTrnsvVe=%e cm/s (rel = %e), rmsLongVe=%e cm/s (rel = %e)' % \
(V0,rmsTrnsvVe,relVeTrnsv,rmsLongVe,relVeLong))
# Indices:
(Ix, Ipx, Iy, Ipy, Iz, Ipz) = range(6)
stepsNumberOnGyro = 25 # number of the steps on each Larmour period
'''
#
# Opening the input file:
#
inputFile='areaOfImpactParameter_tAC-v6_fig110.data'
print ('Open input file "%s"...' % inputFile)
inpfileFlag=0
try:
inpfile = open(inputFile,'r')
inpfileFlag=1
except:
print ('Problem to open input file "%s"' % inputFile)
if inpfileFlag == 1:
print ('No problem to open input file "%s"' % inputFile)
lines=0 # Number of current line from input file
dataNumber=0 # Number of current value of any types of Data
xAboundary=np.zeros(100)
xBboundary=np.zeros(100)
while True:
lineData=inpfile.readline()
# print ('line=%d: %s' % (lines,lineData))
if not lineData:
break
lines += 1
if lines > 4:
words=lineData.split()
nWords=len(words)
# print ('Data from %d: words=%s, number of entries = %d' % (lines,words,nWords))
xAboundary[dataNumber]=float(words[0])
xBboundary[dataNumber]=float(words[1])
dataNumber += 1
inpfile.close()
print ('Close input file "%s"' % inputFile)
'''
#====================================================================
#
#------------------ Begin of defined functions -----------------------
#
# Larmor frequency electron:
#
def omega_Larmor(mass,B_mag):
return (q_elec)*B_mag/(mass*clight*1.e+2) # rad/sec
#
# Derived quantities:
#
omega_L = omega_Larmor(m_elec,B_mag) # rad/sec
T_larm = 2*pi/omega_L # sec
timeStep = T_larm/stepsNumberOnGyro # time step, sec
print ('omega_Larmor= %e rad/sec, T_larm = %e sec, timeStep = %e sec' % \
(omega_L,T_larm,timeStep))
nLarmorAvrgng=10 # number of averaged Larmor rotations
#
# Data to integrate transferred momemta over the track:
#
timeStep_c=nLarmorAvrgng*stepsNumberOnGyro*timeStep # sec
print ('timeStep_c = %e s' % timeStep_c)
eVrmsTran = np.sqrt(2.*eTempTran*eVtoErg/m_elec) # cm/sec
eVrmsLong = np.sqrt(2.*eTempLong*eVtoErg/m_elec) # cm/sec
kinEnergy = m_elec*(eVrmsTran**2+eVrmsLong**2)/2. # kinetic energy; erg
print ('eVrmsTran = %e cm/sec, eVrmsLong = %e cm/sec, kinEnergy = %e eV' % \
(eVrmsTran,eVrmsLong,ergToEV*kinEnergy))
ro_larmRMS = eVrmsTran/omega_L # cm
print ('ro_larmRMS =%e mkm' % (1.e4*ro_larmRMS))
#
# Electrons are magnetized for impact parameter >> rhoCrit:
#
rhoCrit=math.pow(q_elec**2/(m_elec*omega_L**2),1./3) # cm
print ('rhoCrit (mkm) = ' , 1.e+4*rhoCrit)
#
# Convertion from 6-vector of relectron's "coordinates" to 6-vector
# of guiding-center coordinates:
# z_e=(x_e,px_e,y_e,py_e,z_e,pz_e) --> zgc_e=(phi,p_phi,y_gc,p_gc,z_e,pz_e);
#
def toGuidingCenter(z_e):
mOmega=m_elec*omega_L # g/sec
zgc_e=z_e.copy() # 6-vector
zgc_e[Ix] = np.arctan2(z_e[Ipx]+mOmega*z_e[Iy],z_e[Ipy]) # radians
zgc_e[Ipx]= (((z_e[Ipx]+mOmega*z_e[Iy])**2+z_e[Ipy]**2)/(2.*mOmega)) # g*cm**2/sec
zgc_e[Iy] =-z_e[Ipx]/mOmega # cm
zgc_e[Ipy]= z_e[Ipy]+mOmega*z_e[Ix] # g/sec
return zgc_e
#
# Convertion from 6-vector of guiding-center coordinates to 6-vector
# of electron's "coordinates":
# zgc_e=(phi,p_phi,y_gc,p_gc,z_e,pz_e) --> z_e=(x_e,px_e,y_e,py_e,z_e,pz_e);
#
def fromGuidingCenter(zgc_e):
mOmega=m_elec*omega_L # g/sec
rho_larm=np.sqrt(2.*zgc_e[Ipx]/mOmega) # cm
z_e = zgc_e.copy() # 6-vector
z_e[Ix] = zgc_e[Ipy]/mOmega-rho_larm*np.cos(zgc_e[Ix]) # cm
z_e[Ipx]=-mOmega*zgc_e[Iy] # g*cm/sec
z_e[Iy] = zgc_e[Iy]+rho_larm*np.sin(zgc_e[Ix]) # cm
z_e[Ipy]= mOmega*rho_larm*np.cos(zgc_e[Ix]) # g*cm/sec
return z_e
#
# Matrix to dragg electron through the solenoid with field 'B_mag'
# during time interval 'deltaT':
#
def solenoid_eMatrix(B_mag,deltaT):
slndMtrx=np.identity(6)
omega_L=omega_Larmor(m_elec,B_mag) # rad/sec
mOmega= m_elec*omega_L # g/sec
phi=omega_L*deltaT # phase, rad
cosPhi=math.cos(phi) # dimensionless
sinPhi=math.sin(phi) # dimensionless
cosPhi_1=2.*math.sin(phi/2.)**2 # dimensionless
slndMtrx[Iy, Iy ]= cosPhi # dimensionless
slndMtrx[Ipy,Ipy]= cosPhi # dimensionless
slndMtrx[Iy, Ipy]= sinPhi/mOmega # sec/g
slndMtrx[Ipy,Iy ]=-mOmega*sinPhi # g/sec
slndMtrx[Iz, Ipz]= deltaT/m_elec # sec/g
slndMtrx[Ix, Ipx]= sinPhi/mOmega # sec/g
slndMtrx[Ix, Iy ]= sinPhi # dimensionless
slndMtrx[Ix, Ipy]= cosPhi_1/mOmega # sec/g
slndMtrx[Iy, Ipx]=-cosPhi_1/mOmega # sec/g
slndMtrx[Ipy,Ipx]=-sinPhi # dimensionless
return slndMtrx
#
# Matrix to dragg particle through the drift during time interval 'deltaT':
#
def drift_Matrix(M_prtcl,deltaT):
driftMtrx = np.identity(6)
for i in (Ix,Iy,Iz):
driftMtrx[i,i+1]=deltaT/M_prtcl # sec/g
return driftMtrx
#
# Matrix to dragg electron in the "guiding center" system during time interval 'deltaT':
#
def guidingCenter_Matrix(deltaT):
gcMtrx = np.identity(6)
gcMtrx[Iz,Ipz]=deltaT/m_elec # sec/g
return gcMtrx
#
# Description of the collision during time interval 'deltaT'
# in the system coordinates of "guiding center" of electron
# input - 6-vectors for electron and ion before collision and time step deltaT;
# output - transfered momenta to ion and electron:
#
def guidingCenterCollision(vectrElec_gc,vectrIon,deltaT):
dpIon=np.zeros(3)
dpElec=np.zeros(3)
mOmegaLarm=m_elec*omega_L # g/sec
dpFactor_gc=q_elec**2 # g*cm^3/sec^2
rhoLarm_gc=np.sqrt(2.*vectrElec_gc[1]/mOmegaLarm) # cm
sinOmega_gc=math.sin(vectrElec_gc[0])
cosOmega_gc=math.cos(vectrElec_gc[0])
x_gc=vectrElec_gc[3]/mOmegaLarm # cm
numer=(vectrIon[0]-x_gc)*cosOmega_gc- \
(vectrIon[2]-vectrElec_gc[2])*sinOmega_gc # cm
denom=((vectrIon[0]-x_gc)**2+(vectrIon[2]-vectrElec_gc[2])**2+ \
(vectrIon[4]-vectrElec_gc[4])**2+rhoLarm_gc**2)**(3/2) # cm^3
action=vectrElec_gc[1]+dpFactor_gc*numer*rhoLarm_gc/(omega_L*denom) # g*cm^2/sec
b_gc=np.sqrt((vectrIon[0]-x_gc)**2+ \
(vectrIon[2]-vectrElec_gc[2])**2+ \
(vectrIon[4]-vectrElec_gc[4])**2+2.*action/mOmegaLarm) # cm
# Dimensions of dpIon, deElec are g*cm/sec:
dpIon[0]=-dpFactor_gc*deltaT*(vectrIon[0]-x_gc)/b_gc**3
dpIon[1]=-dpFactor_gc*deltaT*(vectrIon[2]-vectrElec_gc[2])/b_gc**3
dpIon[2]=-dpFactor_gc*deltaT*(vectrIon[4]-vectrElec_gc[4])/b_gc**3
dpElec[0]=-dpIon[0]
dpElec[1]=-dpIon[1]
dpElec[2]=-dpIon[2]
# print ('dpIon[0]=%e, dpIon[1]=%e, dpIon[2]=%e' % \
# (dpIon[0],dpIon[1],dpIon[2]))
return dpIon,dpElec,action,b_gc
#
# "Magnus expansion" description of the collision during time interval 'deltaT'
# in the system coordinates of "guiding center" of electron
# input - 6-vectors for electron and ion before collision and time step deltaT;
# output - transfered momenta to ion and electron and electron y_gc coordinate
# as well calculated parameters C1,C2,C3,b,D1,D2,q for testing:
#
def MagnusExpansionCollision(vectrElec_gc,vectrIon,deltaT):
# print ('Ion: x=%e, y=%e, z=%e' % (vectrIon[0],vectrIon[2],vectrIon[4]))
# print ('Electron: x=%e, y=%e, z=%e' %
# (vectrElec_gc[0],vectrElec_gc[4],vectrElec_gc[4]))
dpIon=np.zeros(3)
dpElec=np.zeros(3)
mOmegaLarm=m_elec*omega_L # g/sec
dpFactor_gc=q_elec**2 # g*cm^3/sec^2
rhoLarm_gc=np.sqrt(2.*vectrElec_gc[1]/mOmegaLarm) # cm
sinOmega_gc=math.sin(vectrElec_gc[0])
cosOmega_gc=math.cos(vectrElec_gc[0])
x_gc=vectrElec_gc[3]/mOmegaLarm # cm
numer=(vectrIon[0]-x_gc)*cosOmega_gc- \
(vectrIon[2]-vectrElec_gc[2])*sinOmega_gc # cm
denom=((vectrIon[0]-x_gc)**2+(vectrIon[2]-vectrElec_gc[2])**2+ \
(vectrIon[4]-vectrElec_gc[4])**2+rhoLarm_gc**2)**(3./2.) # cm^3
action=vectrElec_gc[1]+dpFactor_gc*numer*rhoLarm_gc/(omega_L*denom) # g*cm^2/sec
# C1=np.sqrt((vectrIon[0]-x_gc)**2+ \
# (vectrIon[2]-vectrElec_gc[2])**2+ \
# (vectrIon[4]-vectrElec_gc[4])**2+2.*action/mOmegaLarm) # cm^2
C1=(vectrIon[0]-x_gc)**2+(vectrIon[2]-vectrElec_gc[2])**2+ \
(vectrIon[4]-vectrElec_gc[4])**2+2.*action/mOmegaLarm # cm^2
C2=2.*((vectrIon[0]-x_gc)*vectrIon[1]/M_ion+ \
(vectrIon[2]-vectrElec_gc[2])*vectrIon[3]/M_ion+ \
(vectrIon[4]-vectrElec_gc[4])* \
(vectrIon[5]/M_ion-vectrElec_gc[5]/m_elec)) # cm^2/sec
C3=(vectrIon[1]/M_ion)**2+(vectrIon[3]/M_ion)**2+ \
(vectrIon[5]/M_ion-vectrElec_gc[5]/m_elec)**2 # cm^2/sec^2
b=np.sqrt(C1+C2*deltaT+C3*deltaT**2) # cm
D1=(2.*C3*deltaT+C2)/b-C2/np.sqrt(C1) # cm/sec
D2=(C2*deltaT+2.*C1)/b-2.*np.sqrt(C1) # cm
q=4.*C1*C3-C2**2 # cm^4/sec^2
# Dimensions of dpIon, deElec are g*cm/sec:
dpIon[0]=-2.*dpFactor_gc/q*((vectrIon[0]-x_gc)*D1-vectrIon[1]/M_ion*D2)
dpIon[1]=-2.*dpFactor_gc/q*((vectrIon[2]-vectrElec_gc[2])*D1- \
vectrIon[3]/M_ion*D2)
dpIon[2]=-2.*dpFactor_gc/q*((vectrIon[4]-vectrElec_gc[4])*D1- \
(vectrIon[5]/M_ion-vectrElec_gc[5]/m_elec)*D2)
dpElec[0]=-dpIon[0]
dpElec[1]=-dpIon[1]
dpElec[2]=-dpIon[2]
dy_gc=dpIon[0]/mOmegaLarm # cm
# print ('dpIon[0]=%e, dpIon[1]=%e, dpIon[2]=%e' % \
# (dpIon[0],dpIon[1],dpIon[2]))
return dpIon,dpElec,action,dy_gc,C1,C2,C3,b,D1,D2,q
#
# Minimized functional (my own Least Squares Method - LSM;
# Python has own routine for LSM - see site
# http://scipy-cookbook.readthedocs.io/items/FittingData.html):
#
# Funcional = {log10(funcY) - [fitB*log10(argX) + fitA]}^2
#
def fitting(nPar1,nPar2,argX,funcY):
log10argX = np.zeros((nPar1,nPar2))
log10funcY = np.zeros((nPar1,nPar2))
for i in range(nVion):
for n in range(nPar1):
log10argX[n,i] = np.log10(argX[n,i])
log10funcY[n,i] = np.log10(funcY[n,i])
sumArgX = np.zeros(nPar2)
sumArgX2 = np.zeros(nPar2)
sumFuncY = np.zeros(nPar2)
sumArgXfuncY= np.zeros(nPar2)
fitA = np.zeros(nPar2)
fitB = np.zeros(nPar2)
for i in range(nPar2):
for n in range(nPar1):
sumArgX[i] += log10argX[n,i]
sumArgX2[i] += log10argX[n,i]**2
sumFuncY[i] += log10funcY[n,i]
sumArgXfuncY[i] += log10argX[n,i]*log10funcY[n,i]
delta = sumArgX[i]**2-nPar1*sumArgX2[i]
fitA[i] = (sumArgX[i]*sumArgXfuncY[i]-sumArgX2[i]*sumFuncY[i])/delta
fitB[i] = (sumArgX[i]*sumFuncY[i]-nPar1*sumArgXfuncY[i])/delta
# print ('fitA(%d) = %e, fitB(%d) = %e' % (i,fitA[i],i,fitB[i]))
argXfit = np.zeros((nPar1,nPar2))
funcYfit = np.zeros((nPar1,nPar2))
funcHi2 = np.zeros(nPar2)
for i in range(nPar2):
factorA = math.pow(10.,fitA[i])
for n in range(nPar1):
argXfit[n,i] = math.pow(10.,log10argX[n,i])
funcYfit[n,i] = factorA*math.pow(argXfit[n,i],fitB[i])
funcHi2[i] += (np.log10(abs(funcY[n,i])) - np.log10(abs(funcYfit[n,i])))**2
return fitA,fitB,funcHi2,argXfit,funcYfit
#
# +-Errors for fitied parameters fitA and fitB:
#
def errFitAB(nPar1,nPar2,argX,funcY,fitA,fitB,funcHi2,errVar,errType):
log10argX = np.zeros((nPar1,nPar2))
log10funcY = np.zeros((nPar1,nPar2))
sumArgX = np.zeros(nPar2)
sumArgX2 = np.zeros(nPar2)
posErrFit = np.zeros(nPar2)
negErrFit = np.zeros(nPar2)
# return posErrFit,negErrFit
stepA = 5.e-4*mean(funcHi2)
stepB = 1.e-4*mean(funcHi2)
# print ('errFitAB: mean(funcHi2) = %e, stepA = %e, stepB = %e' % (mean(funcHi2),stepA,stepB))
for i in range(nPar2):
for n in range(nPar1):
log10argX[n,i] = np.log10(argX[n,i])
log10funcY[n,i] = np.log10(funcY[n,i])
sumArgX[i] += log10argX[n,i]
sumArgX2[i] += log10argX[n,i]**2
for i in range(nPar2):
k = 0
deltaFuncHi2 = 0.
while (deltaFuncHi2 < 1.):
k += 1
if k > 2000:
print ('Break in errFitAB (Fit funcY: case %d); positive error) for %d' % (errVar,i))
break
# print ('i=%d: fitParamtr = %e, funcHi2 = %e' % (i,fitParamtr[i], funcHi2[i]))
curFitA = fitA[i]
if (int(errVar) == 1):
curFitA = fitA[i] + k*stepA
curFuncHi2 = 0.
factorA = math.pow(10.,curFitA)
curFitB = fitB[i]
if (int(errVar) == 2):
curFitB = fitB[i] + k*stepB
curFuncHi2 = 0.
for n in range(nPar1):
curArgX = math.pow(10.,log10argX[n,i])
curFuncYfit = factorA*math.pow(curArgX,curFitB)
curFuncHi2 += (np.log10(abs(curFuncYfit)) - log10funcY[n,i])**2
deltaFuncHi2 = curFuncHi2 - funcHi2[i]
if (int(errVar) == 1):
posErrFit[i] = abs(curFitA - fitA[i])
else:
posErrFit[i] = abs(curFitB - fitB[i])
func1sigma2 = funcHi2[i]/(nPar2-3)
if (int(errVar) == 1):
fitSigma = np.sqrt(sumArgX2[i]/(nPar2*sumArgX2[i]-sumArgX[i]**2)*func1sigma2)
else:
fitSigma = np.sqrt(nPar2/(nPar2*sumArgX2[i]-sumArgX[i]**2)*func1sigma2)
if (int(errType) == 2):
posErrFit[i] = fitSigma
# if (int(errVar) == 1):
# print ('i=%d: fitA = %e + %e (%e), funcHi2 = %e (for %d steps curFuncHi2 = %e)' % \
# (i,fitA[i],posErrFit[i],fitSigma,funcHi2[i],k,curFuncHi2))
# else:
# print ('i=%d: fitB = %e + %e (%e), funcHi2 = %e (for %d steps curFuncHi2 = %e)' % \
# (i,fitB[i],posErrFit[i],fitSigma,funcHi2[i],k,curFuncHi2))
for i in range(nPar2):
k = 0
deltaFuncHi2 = 0.
while (deltaFuncHi2 < 1.):
k += 1
if k > 2000:
print ('Break in errFitAB (Fit funcY: case %d); negative error) for %d' % (errVar,i))
break
curFitA = fitA[i]
if (int(errVar) == 1):
curFitA = fitA[i] - k*stepA
factorA = math.pow(10.,curFitA)
curFitB = fitB[i]
if (int(errVar) == 2):
curFitB = fitB[i] - k*stepB
curFuncHi2 = 0.
for n in range(nPar1):
curArgX = math.pow(10.,log10argX[n,i])
curFuncYfit = factorA*math.pow(curArgX,curFitB)
curFuncHi2 += (np.log10(abs(curFuncYfit)) - log10funcY[n,i])**2
deltaFuncHi2 = curFuncHi2 - funcHi2[i]
if (int(errVar) == 1):
negErrFit[i] = abs(curFitA - fitA[i])
else:
negErrFit[i] = abs(curFitB - fitB[i])
if (int(errType) == 2):
negErrFit[i] = posErrFit[i]
# if (errVar == 1):
# print ('i=%d: fitA = %e - %e, funcHi2 = %e (for %d steps curFuncHi2 = %e)' % \
# (i,fitA[i],posErrFit[i],funcHi2[i],k,curFuncHi2))
# else:
# print ('i=%d: fitB = %e - %e, funcHi2 = %e (for %d steps curFuncHi2 = %e)' % \
# (i,fitB[i],negErrFit[i],funcHi2[i],k,curFuncHi2))
return posErrFit,negErrFit
def fittedGKintegration(xMin,xMax,fitA,fitB):
#
# "Gauss-Kronrod" method of integration (GK)
#
#
# Points (psi_i) and weigths (w_i) to integrate for interval from -1 to 1;
# These data are from <NAME>. "Handbook of Mathematical Science".
# 5th Edition, CRC Press, Inc, 1978.
#
# To integrate for interval from 0 to 1 it is necessary to change points
# psi_i with points ksi_i=(1+psi_i)/2;
#
# For method with order N for function F(x):
# int_(-1)^1 = sum_1^N [w_i* F(psi_i)];
#
# In case of integration over interval from a to b:
# int_(a)^b = (b-a)/2 * sum_1^N [w_i* F(x_i)], where
# x_i = (b-a)*psi_i/2+(a+b)/2.
#
#----------------------------------------------------
#
# Data for GK:
#
#----------------------------------------------------
nPoints_GK = 16
psi_16=np.array([-0.9894009, -0.9445750, -0.8656312, -0.7554044, -0.6178762, \
-0.4580168, -0.2816036, -0.0950125, 0.0950125, 0.2816036, \
0.4580168, 0.6178762, 0.7554044, 0.8656312, 0.9445750, \
0.9894009])
w_16 =np.array([ 0.0271525, 0.0622535, 0.0951585, 0.1246290, 0.1495960, \
0.1691565, 0.1826034, 0.1894506, 0.1894506, 0.1826034, \
0.1691565, 0.1495960, 0.1246290, 0.0951585, 0.0622535, \
0.0271525])
y = np.zeros(nPoints_GK)
yIntegrated = 0.
for n in range(nPoints_GK):
xCrrnt = psi_16[n]*(xMax-xMin)/2 + (xMax+xMin)/2.
factorA = math.pow(10.,fitA)
y[n] = factorA*math.pow(xCrrnt,fitB)
yIntegrated += (xMax-xMin)*w_16[n]*y[n]*xCrrnt
return y,yIntegrated
#------------------ End of defined functions -----------------------
#
#====================================================================
sphereNe=3.
R_e=math.pow(sphereNe/n_e,1./3) # cm
print ('R_e (cm)=%e' % R_e)
ro_Larm = eVrmsTran/omega_L # cm
print ('ro_Larm (cm)=%e' % ro_Larm)
impctPrmtrMin=2.*ro_Larm
# rhoDependenceFlag = 1 # skip calculation of rho dependence if = 0!
#============ Important flags ===========================
#
# Taking into account the transfer of momenta for both particles
# (for "classical" only):
dpTransferFlag = 1 # no taking into account if = 0!
#
saveFilesFlag = 0 # no saving if = 0!
#
plotFigureFlag = 1 # plot if = 1!
#
#========================================================
nVion=50
Vion=np.zeros(nVion)
VionLong=np.zeros(nVion)
VionTrnsv=np.zeros(nVion)
VionRel=np.zeros(nVion)
vIonMin=4.e-3*eVrmsTran
vIonMax=10.*eVrmsTran
vIonMinRel=vIonMin/V0
vIonMaxRel=vIonMax/V0
print ('VionMin=%e (vIonMinRel=%e), vIonMax=%e (vIonMaxRel=%e)' % \
(vIonMin,vIonMinRel,vIonMax,vIonMaxRel))
vIonLogStep=math.log10(vIonMax/vIonMin)/(nVion-1)
R_debye=np.zeros(nVion)
R_pass=np.zeros(nVion)
R_pass_1=np.zeros(nVion) # for longT=0. --> eVrmsLong=0.
impctPrmtrMax=np.zeros(nVion)
impctPrmtrMax_1=np.zeros(nVion) # for longT=0. --> eVrmsLong=0.
for i in range(nVion):
crrntLogVionRel=math.log10(vIonMinRel)+i*vIonLogStep
VionRel[i]=math.pow(10.,crrntLogVionRel)
Vion[i]=VionRel[i]*V0
VionLong[i]=Vion[i]*np.cos(thetaVi)
VionTrnsv[i]=Vion[i]*np.sin(thetaVi)
R_debye[i]=np.sqrt(Vion[i]**2+eVrmsTran**2+eVrmsLong**2)/omega_p
R_pass[i]=np.sqrt(Vion[i]**2+eVrmsLong**2)*coolPassTime
R_pass_1[i]=np.sqrt(Vion[i]**2+0.*eVrmsLong**2)*coolPassTime
help=max(R_debye[i],R_e)
impctPrmtrMax[i]=min(help,R_pass[i])
impctPrmtrMax_1[i]=min(help,R_pass_1[i])
#-----------------------------------------------------------------
# Checking of corection of the maximal impact parameter on depence
# of preset number of minimal Larmor turns
#
larmorTurnsMin=[10,20,30,40]
impctPrmtrMaxCrrctd=np.zeros((nVion,4))
impctPrmtrMaxCrrctdRel=np.zeros((nVion,4))
for n in range (4):
for i in range(nVion):
impctPrmtrMaxCrrctd[i,n]=impctPrmtrMax[i]* \
np.sqrt(1.- (pi*larmorTurnsMin[n]*eVrmsLong/omega_L/impctPrmtrMax[i])**2)
impctPrmtrMaxCrrctdRel[i,n]=impctPrmtrMaxCrrctd[i,n]/impctPrmtrMax[i]
#
# First plotting:
#
if (plotFigureFlag == 0):
fig10 = plt.figure(10)
plt.semilogx(impctPrmtrMax,impctPrmtrMaxCrrctdRel[:,0],'-r', \
impctPrmtrMax,impctPrmtrMaxCrrctdRel[:,1],'-b', \
impctPrmtrMax,impctPrmtrMaxCrrctdRel[:,2],'-g', \
impctPrmtrMax,impctPrmtrMaxCrrctdRel[:,3],'-m',linewidth=2)
plt.grid(True)
hold=True
plt.xlabel('Maximal Impact parameter $R_{max}$, cm',color='m',fontsize=16)
plt.ylabel('$R_{max}^{Crrctd}/R_{Max}$',color='m',fontsize=16)
# plt.xlim([.9*min(impctPrmtrMax),1.1*max(impctPrmtrMax)])
plt.xlim([1.e-2,1.1*max(impctPrmtrMax)])
plt.ylim([.986,1.001])
titleHeader='$R_{max}^{Crrctd}=R_{Max} \cdot [1-(\pi\cdot N_{Larm} \cdot'
titleHeader += '\Delta_{e||}/(\omega_{Larm} \cdot R_{max})]^{1/2}$'
plt.title(titleHeader,color='m',fontsize=16)
plt.legend([('$N_{Larm}=$%2d' % larmorTurnsMin[0]), \
('$N_{Larm}=$%2d' % larmorTurnsMin[1]), \
('$N_{Larm}=$%2d' % larmorTurnsMin[2]), \
('$N_{Larm}=$%2d' % larmorTurnsMin[3])],loc='lower center',fontsize=14)
if (saveFilesFlag == 1):
fig10.savefig('picturesCMA/correctedRmax_fig10cma.png')
print ('File "picturesCMA/correctedRmax_fig10cma.png" is written')
xLimit=[.9*VionRel[0],1.1*VionRel[nVion-1]]
#
# Typs of collisions:
#
if (plotFigureFlag == 0):
fig3151=plt.figure (3151)
plt.loglog(VionRel,impctPrmtrMax,'-r', VionRel,impctPrmtrMax_1,'--r', \
[VionRel[0],VionRel[nVion-1]],[impctPrmtrMin,impctPrmtrMin],'-b',linewidth=2)
plt.grid(True)
hold=True
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=14)
plt.ylabel('Impact Parameter, cm',color='m',fontsize=14)
titleHeader= \
'Types of Collisions: $V_{e0}=%4.2f\cdot10^{%2d}$ cm/s, $B=%6.1f$ Gs'
plt.title(titleHeader % (mantV0,powV0,fieldB[0]),color='m',fontsize=16)
plt.xlim(xLimit)
yLimit=[8.e-4,.6]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(1.6e-3,5.e-4,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(4.4e-5,.0018,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
plt.text(3.e-4,1.75e-3,'$R_{min}=2\cdot<rho_\perp>$',color='k',fontsize=16)
plt.text(7.e-4,5.e-2,'$R_{max}$',color='k',fontsize=16)
plt.text(2.85e-5,3.3e-3,'$R_{max}$ $for$ $T_{e||}=0$',color='k',fontsize=16)
plt.plot([VionRel[0],VionRel[nVion-1]],[20.*rhoCrit,20.*rhoCrit],color='k')
plt.text(1.e-4,7.e-3,'Magnetized Collisions',color='r',fontsize=20)
plt.text(1.e-4,10.e-4,'Adiabatic or Fast Collisions',color='r',fontsize=20)
plt.text(2.25e-5,.275,'Collisions are Screened',color='r',fontsize=20)
plt.text(1.6e-5,1.e-3,'$ \cong 20\cdot R_{Crit}$',color='k',fontsize=16)
if (saveFilesFlag == 1):
fig3151.savefig('picturesCMA_v7/impctPrmtr_fig3151cma.png')
print ('File "picturesCMA_v7/impctPrmtr_fig3151cma.png" is written')
#
# Picture for HESR:
#
if (plotFigureFlag == 0):
fig3151=plt.figure (3151)
plt.loglog(VionRel,impctPrmtrMax,'-r', VionRel,impctPrmtrMax_1,'--r', \
[VionRel[0],VionRel[nVion-1]],[impctPrmtrMin,impctPrmtrMin],'-b',linewidth=2)
plt.grid(True)
hold=True
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=14)
plt.ylabel('Impact Parameter, cm',color='m',fontsize=14)
titleHeader= \
'HESR Types of Collisions: $V_{e0}=%3.1f\cdot10^{%2d}$cm/s, $B=%3.1f$T'
plt.title(titleHeader % (mantV0,powV0,1.e-4*fieldB[0]),color='m',fontsize=16)
plt.xlim(xLimit)
yLimit=[8.e-4,.6]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(4.4e-4,8.4e-4,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(1.e-4,8.4e-4,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
plt.text(3.7e-6,3.4e-3,'$R_{min}=2\cdot<rho_\perp>$',color='b',fontsize=16)
plt.text(2.8e-4,.1,'$R_{max}$',color='k',fontsize=16)
plt.text(1.e-4,1.8e-2,'$R_{max}$ $for$ $T_{e||}=0$',color='k',fontsize=16)
plt.plot([VionRel[0],VionRel[nVion-1]],[20.*rhoCrit,20.*rhoCrit],color='k')
plt.text(6.8e-5,7.e-3,'Magnetized Collisions',color='r',fontsize=20)
plt.text(6.8e-5,1.2e-3,'Weak Collisions',color='r',fontsize=20)
plt.text(2.3e-5,1.95e-3,'Adiabatic or Fast Collisions',color='r',fontsize=20)
plt.text(2.e-5,.275,'Screened Collisions',color='r',fontsize=20)
plt.text(3.58e-6,2.05e-3,'$\cong$20$\cdot$$R_{Crit}$',color='k',fontsize=16)
if (saveFilesFlag == 1):
# fig3151.savefig('picturesCMA_v7/impctPrmtr_fig3151cma.png')
# print ('File "picturesCMA_v7/impctPrmtr_fig3151cma.png" is written')
fig3151.savefig('HESRimpctPrmtr_fig3151cma.png')
print ('File "HESRimpctPrmtr_fig3151cma.png" is written')
#
# Picture for EIC:
#
if (plotFigureFlag == 0):
fig3151=plt.figure (3151)
plt.loglog(VionRel,impctPrmtrMax,'-r', VionRel,impctPrmtrMax_1,'--r', \
[VionRel[0],VionRel[nVion-1]],[impctPrmtrMin,impctPrmtrMin],'-b',linewidth=2)
plt.grid(True)
hold=True
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=14)
plt.ylabel('Impact Parameter, cm',color='m',fontsize=14)
titleHeader= \
'EIC Types of Collisions: $V_{e0}=%3.1f\cdot10^{%2d}$cm/s, $B=%3.1f$T'
plt.title(titleHeader % (mantV0,powV0,1.e-4*fieldB[0]),color='m',fontsize=16)
plt.xlim(xLimit)
yLimit=[5.e-5,.3]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(9.e-4,4.e-5,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(1.7e-4,3.e-5,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
plt.text(6.3e-6,1.1e-4,'$R_{min}=2\cdot<rho_\perp>$',color='b',fontsize=16)
plt.text(1.e-4,2.1e-2,'$R_{max}$',color='k',fontsize=16)
plt.text(2.57e-5,5.e-3,'$R_{max}$ $for$ $T_{e||}=0$',color='k',fontsize=16)
plt.plot([VionRel[0],VionRel[nVion-1]],[20.*rhoCrit,20.*rhoCrit],color='k')
plt.text(2.3e-5,1.e-3,'Magnetized Collisions',color='r',fontsize=20)
# plt.text(6.8e-5,1.2e-3,'Weak Collisions',color='r',fontsize=20)
plt.text(1.1e-5,5.7e-5,'Weak or Adiabatic or Fast Collisions',color='r',fontsize=16)
plt.text(2.e-5,.15,'Screened Collisions',color='r',fontsize=20)
plt.text(2.5e-3,1.7e-4,'$\cong$20$\cdot$$R_{Crit}$',color='k',fontsize=16)
if (saveFilesFlag == 1):
# fig3151.savefig('picturesCMA_v7/impctPrmtr_fig3151cma.png')
# print ('File "picturesCMA_v7/impctPrmtr_fig3151cma.png" is written')
fig3151.savefig('EICimpctPrmtr_fig3151cma.png')
print ('File "EICimpctPrmtr_fig3151cma.png" is written')
# plt.show()
# sys.exit()
#
# Magnetized collisions:
#
if (plotFigureFlag == 0):
fig209=plt.figure (209)
plt.loglog(VionRel,R_debye,'-r',VionRel,R_pass,'-b', \
VionRel,R_pass_1,'--b',linewidth=2)
plt.grid(True)
hold=True
plt.plot([VionRel[0],VionRel[nVion-1]],[R_e,R_e],color='m',linewidth=2)
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=16)
plt.ylabel('$R_{Debye}$, $R_{Pass}$, $R_e$, cm',color='m',fontsize=16)
# titleHeader='Magnetized Collision: $R_{Debye}$, $R_{Pass}$, $R_e$: $V_{e0}=%5.3f\cdot10^{%2d}$cm/s'
# plt.title(titleHeader % (mantV0,powV0),color='m',fontsize=16)
plt.title('Magnetized Collisions: $R_{Debye}$, $R_{Pass}$, $R_e$',color='m',fontsize=16)
plt.xlim(xLimit)
yLimit=[1.e-3,10.]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(1.6e-3,5.5e-4,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(4.4e-5,0.001175,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
plt.text(3.e-5,2.45e-3,'$R_e$',color='k',fontsize=16)
plt.text(3.e-5,5.e-2,'$R_{Debye}$',color='k',fontsize=16)
plt.text(3.e-5,1.8e-2,'$R_{Pass}$',color='k',fontsize=16)
plt.text(4.5e-5,4.8e-3,'$R_{Pass}$ $for$ $T_{e||}=0$',color='k',fontsize=16)
plt.text(8.3e-5,4.0,('$V_{e0}=%5.3f\cdot10^{%2d}$cm/s' % (mantV0,powV0)), \
color='m',fontsize=16)
if (saveFilesFlag == 1):
fig209.savefig('picturesCMA/rDebye_rLikeDebye_rPass_fig209cma.png')
print ('File "picturesCMA/rDebye_rLikeDebye_rPass_fig209cma.png" is written')
#
# Coulomb logarithm evaluation:
#
clmbLog = np.zeros(nVion)
for i in range(nVion):
clmbLog[i] = math.log(impctPrmtrMax[i]/impctPrmtrMin)
# clmbLog[i] = math.log(impctPrmtrMax_1[i]/impctPrmtrMin)
if (plotFigureFlag == 0):
fig3155=plt.figure (3155)
plt.semilogx(VionRel,clmbLog,'-xr',linewidth=2)
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=14)
plt.ylabel('Coulomb Logarithm $L_c$',color='m',fontsize=14)
plt.title('Coulomb Logarithm: $L_c$ = $ln(R_{max}/R_{min})$',color='m',fontsize=16)
yLimit=[min(clmbLog)-.1,max(clmbLog)+.1]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(1.6e-3,5.,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(3.4e-5,5.,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
if (saveFilesFlag == 1):
fig3155.savefig('picturesCMA_v7/coulombLogrthm_fig3155cma.png')
print ('File "picturesCMA_v7/coulombLogrthm_fig3155cma.png" is written')
#
# matrix for electron with .5*timeStep_c:
#
matr_elec_c=guidingCenter_Matrix(.5*timeStep_c)
#
# matrix for ion with mass M_ion and .5*timeStep_c:
#
matr_ion_c=drift_Matrix(M_ion,.5*timeStep_c)
larmorTurns = 10
nImpctPrmtr = 50
rhoMin = impctPrmtrMin
rhoMax = np.zeros(nVion)
log10rhoMin = math.log10(rhoMin)
crrntImpctPrmtr = np.zeros(nImpctPrmtr)
halfLintr = np.zeros((nImpctPrmtr,nVion))
pointAlongTrack = np.zeros((nImpctPrmtr,nVion))
totalPoints = 0
for i in range(nVion):
rhoMax[i] = impctPrmtrMax[i]* \
np.sqrt(1.- (pi*larmorTurns*eVrmsLong/omega_L/impctPrmtrMax[i])**2)
rhoMax[i] = impctPrmtrMax[i]
# rhoMax[i] = impctPrmtrMax_1[i] # for checking!
# print ('rhoMax(%d) = %e' % (i,rhoMax[i]))
log10rhoMax = math.log10(rhoMax[i])
log10rhoStep = (log10rhoMax-log10rhoMin)/(nImpctPrmtr)
# print ('Vion(%d) = %e, rhoMax = %e' % (i,Vion[i],rhoMax[i]))
for n in range(nImpctPrmtr):
log10rhoCrrnt = log10rhoMin+(n+0.5)*log10rhoStep
rhoCrrnt = math.pow(10.,log10rhoCrrnt)
# print (' rhoCrrnt(%d) = %e' % (n,rhoCrrnt))
halfLintr[n,i] = np.sqrt(rhoMax[i]**2-rhoCrrnt**2) # half length of interaction; cm
timeHalfPath = halfLintr[n,i]/eVrmsLong # 0.5 time of interaction; sec
numbLarmor = int(2.*timeHalfPath/T_larm)
pointAlongTrack[n,i] = int(2.*timeHalfPath/timeStep_c)
totalPoints += pointAlongTrack[n,i]
# print (' %d: rhoCrrnt = %e, numbLarmor = %d, pointAlongTrack = %d' % \
# (n,rhoCrrnt,numbLarmor,pointAlongTrack[n,i]))
# print ('totalPoints = %d' % totalPoints)
totalPoints = int(totalPoints)
nnTotalPoints=np.arange(0,2*totalPoints-1,1)
arrayA=np.zeros(2*totalPoints)
arrayB=np.zeros(2*totalPoints)
bCrrnt_c = np.zeros(2*totalPoints)
#
# Variables for different testing:
#
b_gc = np.zeros(totalPoints)
action_gc = np.zeros(totalPoints)
C1test = np.zeros(totalPoints)
C2test = np.zeros(totalPoints)
C3test = np.zeros(totalPoints)
b_ME = np.zeros(totalPoints)
D1test = np.zeros(totalPoints)
D2test = np.zeros(totalPoints)
qTest = np.zeros(totalPoints)
action_ME = np.zeros(totalPoints)
actn_gc_ME_rel = np.zeros(totalPoints)
indxTest = 0
rhoInit = np.zeros((nImpctPrmtr,nVion))
#
# "Classical" approach:
#
deltaPx_c = np.zeros((nImpctPrmtr,nVion))
deltaPy_c = np.zeros((nImpctPrmtr,nVion))
deltaPz_c = np.zeros((nImpctPrmtr,nVion))
ionVx_c = np.zeros((nImpctPrmtr,nVion))
ionVy_c = np.zeros((nImpctPrmtr,nVion))
ionVz_c = | np.zeros((nImpctPrmtr,nVion)) | numpy.zeros |
# Copyright (c) Facebook, Inc. and its affiliates.
import itertools
import json
import logging
import numpy as np
import os
from collections import OrderedDict
import PIL.Image as Image
import pycocotools.mask as mask_util
import torch
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.utils.comm import all_gather, is_main_process, synchronize
from detectron2.utils.file_io import PathManager
from .evaluator import DatasetEvaluator
class SemSegEvaluator(DatasetEvaluator):
"""
Evaluate semantic segmentation metrics.
"""
def __init__(self, dataset_name, distributed=True, output_dir=None, *, num_classes=None, ignore_label=None, write_outputs=False):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
distributed (bool): if True, will collect results from all ranks for evaluation.
Otherwise, will evaluate the results in the current process.
output_dir (str): an output directory to dump results.
num_classes, ignore_label: deprecated argument
"""
self._logger = logging.getLogger(__name__)
if num_classes is not None:
self._logger.warn(
"SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata."
)
if ignore_label is not None:
self._logger.warn(
"SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata."
)
self._dataset_name = dataset_name
self._distributed = distributed
self._output_dir = output_dir
self._write_outputs = write_outputs
self._cpu_device = torch.device("cpu")
self.input_file_to_gt_file = {
dataset_record["file_name"]: dataset_record["sem_seg_file_name"]
for dataset_record in DatasetCatalog.get(dataset_name)
}
meta = MetadataCatalog.get(dataset_name)
# Dict that maps contiguous training ids to COCO category ids
try:
c2d = meta.stuff_dataset_id_to_contiguous_id
self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()}
except AttributeError:
self._contiguous_id_to_dataset_id = None
self._class_names = meta.stuff_classes
self._num_classes = len(meta.stuff_classes)
if num_classes is not None:
assert self._num_classes == num_classes, f"{self._num_classes} != {num_classes}"
self._ignore_label = ignore_label if ignore_label is not None else meta.ignore_label
def reset(self):
self._conf_matrix = np.zeros((self._num_classes + 1, self._num_classes + 1), dtype=np.int64)
self._predictions = []
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a model.
It is a list of dicts. Each dict corresponds to an image and
contains keys like "height", "width", "file_name".
outputs: the outputs of a model. It is either list of semantic segmentation predictions
(Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic
segmentation prediction in the same format.
"""
from cityscapesscripts.helpers.labels import trainId2label
pred_output = os.path.join(self._output_dir, 'predictions')
if not os.path.exists(pred_output):
os.makedirs(pred_output)
pred_colour_output = os.path.join(self._output_dir, 'colour_predictions')
if not os.path.exists(pred_colour_output):
os.makedirs(pred_colour_output)
for input, output in zip(inputs, outputs):
output = output["sem_seg"].argmax(dim=0).to(self._cpu_device)
pred = np.array(output, dtype=np.uint8)
pred64 = np.array(output, dtype=np.int64) # to use it on bitcount for conf matrix
with PathManager.open(self.input_file_to_gt_file[input["file_name"]], "rb") as f:
gt = np.array(Image.open(f), dtype=np.int64)
gt[gt == self._ignore_label] = self._num_classes
self._conf_matrix += np.bincount(
(self._num_classes + 1) * pred64.reshape(-1) + gt.reshape(-1),
minlength=self._conf_matrix.size,
).reshape(self._conf_matrix.shape)
if self._write_outputs:
file_name = input["file_name"]
basename = os.path.splitext(os.path.basename(file_name))[0]
pred_filename = os.path.join(pred_output, basename + '.png')
Image.fromarray(pred).save(pred_filename)
# colour prediction
output = output.numpy()
pred_colour_filename = os.path.join(pred_colour_output, basename + '.png')
pred_colour = 255 * np.ones([output.shape[0],output.shape[1],3], dtype=np.uint8)
for train_id, label in trainId2label.items():
#if label.ignoreInEval:
# continue
#pred_colour[np.broadcast_to(output == train_id, pred_colour.shape)] = 0 #label.color
pred_colour[(output == train_id),0] = label.color[0]
pred_colour[(output == train_id),1] = label.color[1]
pred_colour[(output == train_id),2] = label.color[2]
Image.fromarray(pred_colour).save(pred_colour_filename)
#self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"]))
def evaluate(self):
"""
Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):
* Mean intersection-over-union averaged across classes (mIoU)
* Frequency Weighted IoU (fwIoU)
* Mean pixel accuracy averaged across classes (mACC)
* Pixel Accuracy (pACC)
"""
if self._distributed:
synchronize()
conf_matrix_list = all_gather(self._conf_matrix)
self._predictions = all_gather(self._predictions)
self._predictions = list(itertools.chain(*self._predictions))
if not is_main_process():
return
self._conf_matrix = np.zeros_like(self._conf_matrix)
for conf_matrix in conf_matrix_list:
self._conf_matrix += conf_matrix
'''if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "sem_seg_predictions.json")
with PathManager.open(file_path, "w") as f:
f.write(json.dumps(self._predictions))'''
print(self._conf_matrix)
acc = np.full(self._num_classes, np.nan, dtype=np.float)
iou = np.full(self._num_classes, np.nan, dtype=np.float)
tp = self._conf_matrix.diagonal()[:-1].astype(np.float)
pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float)
class_weights = pos_gt / np.sum(pos_gt)
pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float)
acc_valid = pos_gt > 0
acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]
iou_valid = (pos_gt + pos_pred) > 0
union = pos_gt + pos_pred - tp
iou[acc_valid] = tp[acc_valid] / union[acc_valid]
macc = np.sum(acc[acc_valid]) / np.sum(acc_valid)
miou = | np.sum(iou[acc_valid]) | numpy.sum |
# -*- coding: utf-8 -*-
"""
@author: <NAME>, University of Bristol, <EMAIL>
This programme will take an input array of peaks in 1D I vs q data (such as those returned from the finder programme),
and returns a dictionary of possible phases that the data can take on, along with the miller plane index and the peaks
used to for that possible phase assignment. There are separate (but almost identical) methods for distinguishing cubic phases and
Lamellar/Inverse Hexagonal ones. It is recommended that having used the peak finding programme, the phase is attempted to be assigned
by using the number of peaks found in the data. In general from the author's experience, the La and HII phases produce fewer Bragg peaks,
such that if a condition were used along the lines of if len(peaks)<3: La_HII_possible_phases(peaks, etc) else: Q_possible_phases(peaks etc)
then there should be a good chance of assigning the correct phase. Otherwise there is a risk of simultaneously assigning the HII along
with a cubic one. Worst comes to worst... The old fashioned hand method won't fail...
The information passed to the dictionary at the end should be enough to plot I vs q data with information about which peak has been
indexed as which, along with information about the lattice parameter and phase. See the optional plot in the finder.py programme for
more of an idea about the kind of way that matplotlib can plot something like this, using a combination of plt.axvline and plt.text.
At the bottom of this programme there is an example set of data in a comment that can be run through to see what result to expect at the end.
"""
import numpy as np
"""
La_HII_possible_phases works similarly to Q_possible_phases, in that it uses a statistical methodology to work out which peaks can
be assigned to which phase. However, as fewer peaks are expected to be passed to this module, it simply determines the phase by finding
a consistent lattice parameter, and taking the longest assignment from La or HII given to it.
La_HII_possible_phases will return a dictionary keyed by phase name, with values of lattice parameter, hkl plane factors, and the peaks
correspondingly assigned.
pass the following parameters to this function:
peaks - an array of peaks that have previously been found elsewhere
"""
def La_HII_possible_phases(peaks):
La_ratios=np.array([1,2,3])[:,np.newaxis]
HII_ratios=np.sqrt(np.array([1,3,4])[:,np.newaxis])
La_init = 2*np.pi*(1/peaks)*La_ratios
HII_init = (2/np.sqrt(3))*2*np.pi*(1/peaks)*HII_ratios
La=np.ndarray.flatten(La_init)
HII=np.ndarray.flatten(HII_init)
values=np.concatenate((La,HII))
hist,bin_edges=np.histogram(values,bins=2*np.size(values))
inds=np.digitize(values,bin_edges)-1
hist_max_bin_pos=np.where(inds==np.argmax(hist))[0]
La_sourced=hist_max_bin_pos[np.where(hist_max_bin_pos<len(La))]
HII_sourced=hist_max_bin_pos[np.where(hist_max_bin_pos>len(La)-1)]
n=np.reshape(np.arange(0,np.size(La_init)),np.shape(La_init))
La_peaks=np.zeros(0)
La_factors=np.zeros(0)
HII_peaks=np.zeros(0)
HII_factors=np.zeros(0)
for a in range(0,len(La_sourced)):
La_hkl=La_ratios[np.where(np.mod(La_sourced[a],np.size(n))==n)[0]][0][0]
La_peak=peaks[np.where(np.mod(La_sourced[a],np.size(n))==n)[1]][0]
La_peaks=np.append(La_peaks,La_peak)
La_factors=np.append(La_factors,La_hkl)
for b in range(0,len(HII_sourced)):
HII_hkl=HII_ratios[np.where(np.mod(HII_sourced[b],np.size(n))==n)[0]][0][0]
HII_peak=peaks[np.where(np.mod(HII_sourced[b],np.size(n))==n)[1]][0]
HII_peaks=np.append(HII_peaks,HII_peak)
HII_factors=np.append(HII_factors,HII_hkl)
phase_dict={}
if len(La_peaks)>len(HII_peaks):
phase_dict['La']=np.mean(values[np.where(inds==np.argmax(hist))]),La_factors,La_peaks
elif len(HII_peaks)>len(La_peaks):
phase_dict['HII']=np.mean(values[np.where(inds==np.argmax(hist))]),HII_factors,HII_peaks
return phase_dict
"""
Q_possible_phases works by creating matrices of lattice parameter values that can arise having declared that any peak that
has been found can be indexed as any miller index for any phase. These values are then collapsed into a single 1D array,
which is investigated as a histogram. The number of bins in teh histogram is arbitrarily taken as twice the number of values,
so care should taken. Peaks in the histogram will arise at the points where there are matching values
resulting from peaks being correctly indexed in the correct phase. The possible_phases takes a threshold number, such that
bins with more values in it than the threshold are considered to be possible phase values. This is due to the fact
that because of symmetry degeneracies, 'correct' phase values may arise from more than a single phase matrix. The values
in the bins which exceed threshold population are then investigated for their origins: which peak and index were
responsible for bringing them about?
The Q_possible_phases will return a dictionary, keyed through lattice parameters, with associated values of the phase (D=0, P=1, G=3),
the peaks that have been indexed, and the indicies assigned to the peak.
pass the following parameters to this function:
peaks - an array of peaks that have previously been found elsewhere
"""
def Q_possible_phases(peaks):
#define the characteristic peak ratios
QIID=np.array([2,3,4,6,8,9,10,11])[:,np.newaxis]
QIIP=np.array([2,4,6,8,10,12,14])[:,np.newaxis]
QIIG=np.array([6,8,14,16,20,22,24])[:,np.newaxis]
QIID_ratios=np.sqrt(QIID)
QIIP_ratios=np.sqrt(QIIP)
QIIG_ratios=np.sqrt(QIIG)
'''
1) create matrices of all possible lattice parameter values
2) flatten each matrix to one dimension
3) combine the matricies into one
'''
D_init = 2*np.pi*(1/peaks)*QIID_ratios
P_init = 2*np.pi*(1/peaks)*QIIP_ratios
G_init = 2*np.pi*(1/peaks)*QIIG_ratios
'''
n_D, n_P, n_G are arrays of integers running from 0 to the size of the respective initial arrays. They will be used later
on to determine the source of where matching lattice parameter values have arisen from.
'''
n_D=np.reshape(np.arange(0,np.size(D_init)),np.shape(D_init))
n_P=np.reshape(np.arange(0,np.size(P_init)),np.shape(P_init))
n_G=np.reshape(np.arange(0,np.size(G_init)),np.shape(G_init))
n=np.reshape(np.arange(0,np.size(np.ndarray.flatten(np.concatenate((n_D,n_G,n_P))))),np.shape(np.concatenate((n_D,n_G,n_P))))
D=np.ndarray.flatten(D_init)
P=np.ndarray.flatten(P_init)
G=np.ndarray.flatten(G_init)
values=np.concatenate((D,P,G))
#histogram the data so that we have some bins. bin number increase is arbitrary.
hist, bin_edges=np.histogram(values,bins=np.int(2*np.size(values)))
#digitise the data (see numpy docs for explanations)
inds=np.digitize(values,bin_edges)
#will return the possible phases, their lattice parameters, and the peaks and hkl index from which they arise as a dictionary.
phase_dict={}
for i in range(0, np.size(values)):
try:
#find the values from the values array which are actually present in each bin and put them in the values array
binned_values=values[np.where(inds==i)]
#this size filtering is completely arbitrary.
if np.size(binned_values)>5:
#trace where the values in the bin originated from in the arrays.
positions_array=np.zeros(0)
for k in range(0, np.size(binned_values)):
positions_array=np.append(positions_array,np.where(binned_values[k]==values)[0])
#look at the distribution of the origin of the arrays - they should be group dependent on the phase.
#D_sourced, P_sourced, G_sourced are the positions in the values array where the matching peaks have come from
final_pos_array=np.unique(positions_array)
#split the positions up into which cubic phase calculation they have come from.
D_factors=np.where(final_pos_array<np.size(D))[0][0:]
P_factors=(np.where(final_pos_array<=(np.size(P)+np.size(D))-1)[0][0:])[np.size(D_factors):]
G_factors=np.where(final_pos_array> (np.size(P)+np.size(D))-1)[0][0:]
#correspond the positions in the factors arrays to where they come from in the final positions array
D_sourced=final_pos_array[D_factors].astype(int)
P_sourced=final_pos_array[P_factors].astype(int)
G_sourced=final_pos_array[G_factors].astype(int)
'''
want to find where the matching phases have come from in the array to see which one is the real one.
e.g. np.mod(o_sourced[a],n) corrects the position in the o array for running the same length as the sourced array
then find where the value is the same to identify the row
then find from which ratio factor the peak originated from.
'''
D_sourced_factors=np.zeros(0,dtype=np.int)
P_sourced_factors=np.zeros(0,dtype=np.int)
G_sourced_factors=np.zeros(0,dtype=np.int)
D_sourced_peaks=np.zeros(0)
P_sourced_peaks=np.zeros(0)
G_sourced_peaks=np.zeros(0)
for a in range(0,len(D_sourced)):
D_array_position=D_sourced[a]
D_array_comparison_pos=np.mod(D_array_position,np.size(D))
D_position=np.where(D_array_comparison_pos==n)
D_hkl=QIID[D_position[0][0]][0]
D_peak_hkl=peaks[D_position[1][0]]
D_sourced_factors=np.append(D_sourced_factors,np.int(D_hkl))
D_sourced_peaks=np.append(D_sourced_peaks,D_peak_hkl)
for b in range(0,len(P_sourced)):
P_array_position=P_sourced[b]
P_array_comparison_pos=P_array_position-np.size(D)
P_position=np.where(P_array_comparison_pos==n)
P_hkl=QIIP[P_position[0][0]][0]
P_peak_hkl=peaks[P_position[1][0]]
P_sourced_factors=np.append(P_sourced_factors,np.int(P_hkl))
P_sourced_peaks=np.append(P_sourced_peaks,P_peak_hkl)
for c in range(0,len(G_sourced)):
G_array_position=G_sourced[c]
G_array_comparison_pos=G_array_position-np.size(P)-np.size(D)
G_position=np.where(G_array_comparison_pos==n)
G_hkl=QIIG[G_position[0][0]][0]
G_peak_hkl=peaks[G_position[1][0]]
G_sourced_factors=np.append(G_sourced_factors,np.int(G_hkl))
G_sourced_peaks=np.append(G_sourced_peaks,G_peak_hkl)
'''
Only save the phase (as keyed number: D=0, P=1,G=2), and related data to the returned dictionary if
there are more than 3 peaks in there.
As the coincidence of factors between the QIID and QIIP is high, attempt to clarify which phase
is actually present if the same factors have been assigned to the same peaks.
'''
if len(D_sourced_factors) >3 and len(P_sourced_factors) >3:
lp=np.mean((np.mean(values[D_sourced]),np.mean(values[P_sourced])))
#find which set of values is longer and which is shorter
if len(D_sourced_factors)>len(P_sourced_factors):
shorter_factors=P_sourced_factors
shorter_peaks=P_sourced_peaks
longer_factors=D_sourced_factors
longer_peaks=D_sourced_peaks
switch=0
else:
shorter_factors=D_sourced_factors
shorter_peaks=D_sourced_peaks
longer_factors=P_sourced_factors
longer_peaks=P_sourced_peaks
switch=1
#find which pairs of peaks and factors have been assigned.
matching_factors=np.intersect1d(shorter_factors,longer_factors)
matching_peaks=np.intersect1d(shorter_peaks,longer_peaks)
'''
if the shorter set of factors is completely incidental into the longer set, then
the phase can be assigned as being the longer set of factors.
'''
if (len(matching_factors)==len(shorter_factors)) and (len(matching_peaks)==len(shorter_peaks)):
phase_dict[switch]=lp,longer_factors,longer_peaks
elif len(D_sourced_factors) >3 and len(P_sourced_factors) <4:
phase_dict[0] = np.mean(values[D_sourced]), D_sourced_factors, D_sourced_peaks
elif len(D_sourced_factors) <4 and len(P_sourced_factors) >3:
phase_dict[1] = np.mean(values[P_sourced]), P_sourced_factors, P_sourced_peaks
if len(G_sourced_factors) >3:
phase_dict[2] = np.mean(values[G_sourced]), G_sourced_factors, G_sourced_peaks
except IndexError:
pass
return phase_dict
"""
projection_testing is the final clarification stage of identifying which of the possible identified phases are 'real'.
The phases are checked against a fundamental 'mode' that the lattice parameter and phase identified. From this fundamental
value, the peaks in q which should exist can be calculated. These proposed peaks are subsequently checked against the peaks
which actually exist in the data. This is done through constructing a difference matrix, populated by the differences between
the peaks in the projected and physical arrays. The matrix is then searched for where the value is very small - ie. the proposed
peak is present in the physical data. If all or all but one or two of the proposed peaks are present in the physical data,
then it is said that that phase proposed is real, and not a feature of degenerate symmetry in the data. NB! you might want to
change the number of peaks that are acceptably omissible depending on how successful you are. Alternatively: change the
number of peak indicies used for calculations throughout the code.
pass the following parameters to this function:
phase_array - the integer spacing ratios of the proposed phase that needs to be tested.
fundamental - the ratio of a peak value of a phase to the square root of its index. Defined in the main below as the average
of these values across a set of peaks in a proposed phase.
peak_array - the full set of peaks that have been actually been physically found in the data, to test against a set of peaks
which should exist given the peaks present.
lo_q - the same low limit in q that was used to define the width in which peaks are to be found
"""
def Q_projection_testing(phase_array, fundamental, peak_array,lo_q):
#now project the fundamental q value over the phase
projected_values=(np.sqrt(phase_array)*fundamental)[:,np.newaxis]
#check that the first projected peak is within the finding q width:
if projected_values[0]>lo_q:
'''
the matches variable is an evaluation of where peaks that have been projected correspond to peaks that actually exist.
arbitrarily, if the difference in the lengths of the arrays is less than 2, (Ie. all peaks are present or only one or two
are missing in the data) then return a confirmation that the phase is a real assignment of the peaks.
'''
matches=np.where(np.abs(np.subtract(projected_values,peak_array))<0.001)[0]
if len(matches)>3:
return 1
#if the lowest peak is not in the desired q range
else:
return 0
"""
the main module runs the above modules, passing the required data from one to the other.
pass the following parameters to this function:
peaks - an array of peaks that have previously been found elsewhere
lo_q - the same low limit in q that was used to define the width in which peaks are to be found
"""
def Q_main(peaks,lo_q):
QIID_ratios=np.array([2,3,4,6,8,9,10,11])
QIIP_ratios=np.array([2,4,6,8,10,12,14])
QIIG_ratios= | np.array([6,8,14,16,20,22,24]) | numpy.array |
# === Start Python 2/3 compatibility
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import * # noqa pylint: disable=W0401, W0614
from future.builtins.disabled import * # noqa pylint: disable=W0401, W0614
# === End Python 2/3 compatibility
import pytest
import numpy as np
import h5py
from kotekan import runner
from kotekan import visutil
from kotekan import visbuffer
import time
# Skip if HDF5 support not built into kotekan
if not runner.has_hdf5():
pytest.skip("HDF5 support not available.", allow_module_level=True)
start_time = 1_500_000_000
old_timestamp = start_time - 10.0
new_timestamp = start_time + 5.0
old_update_id = f"gains{old_timestamp}"
new_update_id = f"gains{new_timestamp}"
transition_interval = 10.0
new_state = True
global_params = {
"num_elements": 16,
"num_ev": 4,
"total_frames": 20,
"start_time": start_time,
"cadence": 1.0,
"mode": "fill_ij",
"freq_ids": [250],
"buffer_depth": 8,
"updatable_config": "/gains",
"gains": {
"kotekan_update_endpoint": "json",
"start_time": old_timestamp,
"update_id": old_update_id,
"transition_interval": transition_interval,
"new_state": new_state,
},
"wait": True,
"sleep_before": 2.0,
"num_threads": 4,
"dataset_manager": {"use_dataset_broker": False},
}
def gen_gains(filename, mult_factor, num_elements, freq):
"""Create gain file and return the gains and weights."""
nfreq = len(freq)
gain = (
np.arange(nfreq)[:, None] * 1j * np.arange(num_elements)[None, :] * mult_factor
).astype(np.complex64)
# Make some weights zero to test the behaviour of apply_gains
weight = np.ones((nfreq, num_elements), dtype=np.bool8)
weight[:, 1] = False
weight[:, 3] = False
with h5py.File(str(filename), "w") as f:
dset = f.create_dataset("gain", data=gain)
dset2 = f.create_dataset("weight", data=weight)
dset2[...] = weight
freq_ds = f.create_dataset("index_map/freq", (nfreq,), dtype="f")
ipt_ds = f.create_dataset("index_map/input", (num_elements,), dtype="i")
freq_ds[...] = freq
ipt_ds[:] = | np.arange(num_elements) | numpy.arange |
import numpy as np
import pandas as pd
import time
import csv
def file_processing(file_path, encode):
data = []
# 每一個維度儲存一種污染物的資訊
for i in range(18):
data.append([])
with open(file_path, encoding=encode) as file:
rows = csv.reader(file , delimiter=",")
n_row = 0
for row in rows:
if n_row != 0:
for i in range(3, 27):
if row[i] != 'NR':
data[(n_row-1)%18].append(row[i])
else:
data[(n_row-1)%18].append(0)
n_row += 1
data = np.array(data)
return data
def data_processing(data):
# 資料有12個月 X 20天 = 240
# 一個月(20天)連續取10小時的資料 -> 有20x24-9 = 471筆
# 一年連續取10小時資料 -> y 有240x24-9 = 5751筆
#x = np.zeros((5751, 9))
#y = np.zeros((5751, 1))
x_total = []
y_total = []
for n in range(18):
x = np.zeros((5751, 9))
y = np.zeros((5751, 1))
for i in range(5751):
x[i] = data[n][i:i+9]
y[i] = data[n][i+9]
x_total.append(x)
y_total.append(y)
x_total, y_total = np.array(x_total), | np.array(y_total) | numpy.array |
import pandas as pd
from multiprocessing import Pool, RawArray, cpu_count
from portfolio_methods import PortfolioMethods
import numpy as np
import copy
from decorators import timeit
from sklearn.linear_model import LinearRegression
import numpy.random as npr
import json
import datetime as dt
from portfolio import PM, Portfolio
GLOBAL_DICT = {}
def init_worker(returnMatrix, excessReturnMatrix, excessMarketReturn, dates, shapes):
global GLOBAL_DICT
GLOBAL_DICT['returnMatrix'] = np.frombuffer(returnMatrix).reshape(shapes['return'])
GLOBAL_DICT['excessReturnMatrix'] = np.frombuffer(excessReturnMatrix).reshape(shapes['return'])
GLOBAL_DICT['excessMarketReturn'] = np.frombuffer(excessMarketReturn).reshape(shapes['market'])
GLOBAL_DICT['dates'] = dates
def test_global(i):
global GLOBAL_DICT
return GLOBAL_DICT['returnMatrix']
class PortfolioOptimizer:
@timeit
def __init__(self, universeObj):
self.universeObj = universeObj
self.universeObj.make()
self.init()
@timeit
def init(self):
self.returnCols = self.universeObj.assets
self.returns = self.universeObj.data['assetsReturnOnFreq']
self.riskFree = self.universeObj.data['rfOnFreq']
self.market = self.universeObj.data['factorsReturnOnFreq']
self.makeMatrices()
@timeit
def makeMatrices(self):
self.returnMatrix = self.returns.values
self.rfVector = self.riskFree.values
self.excessReturn = self.returnMatrix - self.rfVector
self.marketVec = self.market.values
self.excessMarketReturn = self.marketVec - self.rfVector
self.shapes = {'return': self.returnMatrix.shape,
'market': self.excessMarketReturn.shape}
@timeit
def getRawArrays(self):
returnMatrix = RawArray('d', self.returnMatrix.reshape(np.prod(self.shapes['return'])))
excessReturnMatrix = RawArray('d', self.excessReturn.reshape(np.prod(self.shapes['return'])))
excessMarketReturn = RawArray('d', self.excessMarketReturn.reshape(np.prod(self.shapes['market'])))
return returnMatrix, excessReturnMatrix, excessMarketReturn
@staticmethod
def calculateAllocation(vector):
#pickSize = max([int(abs(vector[-1])), 1])
pickSize = 30
vector = np.abs(vector)#np.abs(np.array(vector[:-1]))##
# Selecting stocks, idx of the # pickSize biggest
idx = (-vector).argsort()[:pickSize]
weights = vector[idx]
allocation = weights/weights.sum()
direction = np.ones(pickSize)
return idx, allocation, direction
@staticmethod
def calculateAllocationLongShort(vector):
#pickSize = max(np.int(np.abs(vector[-1])), 10)
#pickSize = min(pickSize, 30)
pickSize = 30
#vector = vector[:-1]#np.array(vector,dtype=np.float32)[:-1]
direction = (vector > 0).astype(np.float32)
direction[direction == 0] = -1
allocWeight = np.abs(vector)
# Selecting stocks, idx of the # pickSize biggest
idx = ((-allocWeight).argsort()[:pickSize])
weights = allocWeight[idx]
allocation = (weights / weights.sum())
direction = direction[idx]
return idx, allocation, direction
@staticmethod
def fitness_print(vector):
return PortfolioOptimizer.fitness_test1(vector, printData=True)
@staticmethod
def fitness_strategy1(vector, printData=False):
global GLOBAL_DICT
idx, allocation, direction = PortfolioOptimizer.calculateAllocationLongShort(vector)
if (allocation > 0.3).any() and printData == False:
return 0
selectedReturn = GLOBAL_DICT['returnMatrix'][:, idx]
selectedExcessReturn = GLOBAL_DICT['excessReturnMatrix'][:, idx]
marketReturn = GLOBAL_DICT['excessMarketReturn']
dates = GLOBAL_DICT['dates']
pReturn = PM._pReturn(selectedReturn.astype(np.float32), allocation, direction)
pReturnExcess = PM._pReturn(selectedExcessReturn.astype(np.float32), allocation, direction)
mktError = (pReturnExcess - marketReturn)
trackingError = np.std(mktError)
# Portfolio Metrics
alpha, beta = PM._alphaBeta(pReturnExcess, marketReturn, intercept=True)
beta = np.sum(beta)
targetBeta = 0.2
betaPenalty = np.abs(targetBeta - beta)
volMkt = marketReturn.std()
vol = pReturn.std()
mean = pReturnExcess.mean()
maximizeTerm = (1.0 + alpha + pReturnExcess.mean()) * np.sign(alpha)
minimizeTerm = (1 + volMkt + betaPenalty/10 + trackingError/10)
fitness = maximizeTerm / minimizeTerm
return fitness
@staticmethod
def fitness_strategy2(vector, printData=False):
global GLOBAL_DICT
idx, allocation, direction = PortfolioOptimizer.calculateAllocation(vector)
if (allocation > 0.5).any() and printData == False:
return 0
selectedReturn = GLOBAL_DICT['returnMatrix'][:, idx]
selectedExcessReturn = GLOBAL_DICT['excessReturnMatrix'][:, idx]
marketReturn = GLOBAL_DICT['excessMarketReturn']
dates = GLOBAL_DICT['dates']
pReturn = PM._pReturn(selectedReturn.astype(np.float32), allocation, direction)
pReturnExcess = PM._pReturn(selectedExcessReturn.astype(np.float32), allocation, direction)
mktError = (pReturnExcess - marketReturn)
trackingError = np.std(mktError)
# Portfolio Metrics
alpha, beta = PM._alphaBeta(pReturnExcess, marketReturn, intercept=True)
beta = np.sum(beta)
#targetBeta = 0.5
betaPenalty = 0#np.abs(targetBeta - beta)
volMkt = marketReturn.std()
vol = pReturn.std()
mean = pReturnExcess.mean()
maximizeTerm = (1.0 + alpha + pReturnExcess.mean())
minimizeTerm = (1 + volMkt + betaPenalty/10 + trackingError/10)
fitness = maximizeTerm / minimizeTerm
return fitness
@staticmethod
def fitness_best_sharp(vector):
global GLOBAL_DICT
idx, allocation, direction = PortfolioOptimizer.calculateAllocationLongShort(vector)
if (allocation > 0.4).any():
return 0
selectedReturn = GLOBAL_DICT['returnMatrix'][:, idx]
selectedExcessReturn = GLOBAL_DICT['excessReturnMatrix'][:, idx]
marketReturn = GLOBAL_DICT['excessMarketReturn']
dates = GLOBAL_DICT['dates']
cumulative, _ = PM._pReturn(selectedExcessReturn.astype(np.float32), allocation, direction)
pReturnExcess = np.concatenate(([0], | np.diff(cumulative) | numpy.diff |
import murnaghan2017 as m
import numpy as np
# EXAMPLE RUN FOR BCC STRUCTURE USING MURNAGHAN EOS AND
# THE ABINIT CODE.
# THE TEMPLATEDIR DIRECTORY WITH THE TEMPLATE FILE
# AND PSEUDOPOTENTIALS/PAWs NEEDS TO BE SET UP AS DESCRIBED
# IN THE README
energy_driver = 'abinit'
# template file is usually 'abinit.in.template' for abinit,
# 'crystal.template' for socorro,
# or 'elk.in.template' for elk
template_file = 'abinit.in.template'
# scales to loop though. These get multiplied by abc_guess
s = [0.95, 0.975, 1.0, 1.025, 1.05]
# lattice parameter guesses
abc_guess = [6.3, 6.3, 6.3]
# this list of abc arrays gets passed to actual sweep
abc_list = [si* | np.array(abc_guess) | numpy.array |
"""
Copyright 2020, the e-prop team
Full paper: A solution to the learning dilemma for recurrent networks of spiking neurons
Authors: <NAME>*, <NAME>*, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
Training LSNN model to solve framewise phone classification of TIMIT dataset
CUDA_VISIBLE_DEVICES=0 python3 -u solve_timit_with_framewise_lsnn.py
"""
import tensorflow as tf
import numpy as np
import numpy.random as rd
from alif_eligibility_propagation import CustomALIF, exp_convolve
from toolbox.matplotlib_extension import raster_plot, strip_right_top_axis
from toolbox.file_saver_dumper_no_h5py import NumpyAwareEncoder
from tools import TimitDataset, einsum_bij_jk_to_bik, pad_vector
import time
import os
import errno
import json
import datetime
def flag_to_dict(FLAG):
if float(tf.__version__[2:]) >= 5:
flag_dict = FLAG.flag_values_dict()
else:
flag_dict = FLAG.__flags
return flag_dict
script_name = os.path.basename(__file__)[:-3]
time_stamp = datetime.datetime.now().strftime("%Y_%m_%d__%H_%M__%S_%f")
try:
os.makedirs('results')
except OSError as e:
if e.errno != errno.EEXIST:
raise
FLAGS = tf.app.flags.FLAGS
# Accessible parameter from the shell
tf.app.flags.DEFINE_string('comment', '', 'comment attached to output filenames')
tf.app.flags.DEFINE_string('run_id', '', 'comment attached to output filenames')
tf.app.flags.DEFINE_string('checkpoint', '', 'optionally load the pre-trained weights from checkpoint')
tf.app.flags.DEFINE_string('preproc', 'htk', 'Input preprocessing: fbank, mfccs, cochspec, cochspike, htk')
tf.app.flags.DEFINE_string('eprop', None, 'options: [None, symmetric, adaptive, random], None means use BPTT')
tf.app.flags.DEFINE_bool('adam', True, 'use Adam optimizer')
tf.app.flags.DEFINE_bool('plot', False, 'Interactive plot during training (useful for debugging)')
tf.app.flags.DEFINE_bool('reduced_phns', False, 'Use reduced phone set')
tf.app.flags.DEFINE_bool('psp_out', True, 'Use accumulated PSP instead of raw spikes of model as output')
tf.app.flags.DEFINE_bool('verbose', True, '')
tf.app.flags.DEFINE_bool('ramping_learning_rate', True, 'Ramp up the learning rate from 0 to lr_init in first epoch')
tf.app.flags.DEFINE_bool('BAglobal', False, 'Enable broadcast alignment with uniform weights to all neurons')
tf.app.flags.DEFINE_bool('cell_train', True, 'Train the RNN cell')
tf.app.flags.DEFINE_bool('readout_bias', True, 'Use bias variable in readout')
tf.app.flags.DEFINE_bool('rec', True, 'Use recurrent weights. Used to provide a baseline.')
tf.app.flags.DEFINE_string('dataset', '../datasets/timit_processed', 'Path to dataset to use')
tf.app.flags.DEFINE_float('readout_decay', 1e-2, 'Decay readout [and broadcast] weights')
tf.app.flags.DEFINE_bool('loss_from_all_layers', True, 'For multi-layer setup, make readout from all layers.')
#
tf.app.flags.DEFINE_integer('seed', -1, 'seed number')
tf.app.flags.DEFINE_integer('n_epochs', 80, 'number of iteration ')
tf.app.flags.DEFINE_integer('n_layer', 1, 'number of layers')
tf.app.flags.DEFINE_integer('n_regular', 300, 'number of regular spiking units in the recurrent layer.')
tf.app.flags.DEFINE_integer('n_adaptive', 100, 'number of adaptive spiking units in the recurrent layer')
tf.app.flags.DEFINE_integer('print_every', 100, 'print every and store accuracy')
tf.app.flags.DEFINE_integer('lr_decay_every', -1, 'Decay every')
tf.app.flags.DEFINE_integer('batch', 32, 'mini_batch size')
tf.app.flags.DEFINE_integer('test_batch', 32, 'mini_batch size')
tf.app.flags.DEFINE_integer('n_ref', 2, 'Number of refractory steps')
tf.app.flags.DEFINE_integer('n_repeat', 5, 'repeat each input time step for this many simulation steps (ms)')
tf.app.flags.DEFINE_integer('reg_rate', 10, 'target rate for regularization')
tf.app.flags.DEFINE_integer('truncT', -1, 'truncate time to this many input steps (truncT * n_repeat ms)')
#
tf.app.flags.DEFINE_float('dt', 1., 'Membrane time constant of output readouts')
tf.app.flags.DEFINE_float('tau_a', 200, 'Adaptation time constant')
tf.app.flags.DEFINE_bool('tau_a_spread', False, 'Spread time constants uniformly from 0 to tau_a')
tf.app.flags.DEFINE_float('tau_v', 20, 'Membrane time constant of recurrent neurons')
tf.app.flags.DEFINE_bool('tau_v_spread', False, 'Spread time constants uniformly from 0 to tau_v')
tf.app.flags.DEFINE_float('beta', 1.8, 'Scaling constant of the adaptive threshold')
tf.app.flags.DEFINE_float('clip', 0., 'Proportion of connected synpases at initialization')
tf.app.flags.DEFINE_float('l2', 1e-5, '')
tf.app.flags.DEFINE_float('lr_decay', .3, '')
tf.app.flags.DEFINE_float('lr_init', 0.01, '')
tf.app.flags.DEFINE_float('adam_epsilon', 1e-5, '')
tf.app.flags.DEFINE_float('momentum', 0.9, '')
tf.app.flags.DEFINE_float('gd_noise', 0.06 ** 2 * 10, 'Used only when noise_step_start > 0')
tf.app.flags.DEFINE_float('noise_step_start', -1, 'was 1000')
tf.app.flags.DEFINE_float('thr', 0.01, 'Baseline threshold voltage')
tf.app.flags.DEFINE_float('proportion_excitatory', 0.75, 'proportion of excitatory neurons')
tf.app.flags.DEFINE_float('l1', 1e-2, 'l1 regularization that goes with rewiring (irrelevant without rewiring)')
tf.app.flags.DEFINE_float('rewiring_temperature', 0., 'regularization coefficient')
tf.app.flags.DEFINE_float('dampening_factor', 0.3, 'Parameter necessary to approximate the spike derivative')
tf.app.flags.DEFINE_float('tau_out', 3, 'Mikolov: tau for PSP decay at output')
tf.app.flags.DEFINE_float('reg', 50, 'regularization coefficient')
tf.app.flags.DEFINE_float('drop_out_probability', -1., '')
tf.app.flags.DEFINE_integer('cuda_device', -1, '')
if FLAGS.plot:
import matplotlib.pyplot as plt
#
key0 = list(dir(FLAGS))[0]
getattr(FLAGS, key0)
if FLAGS.cuda_device >= 0:
os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.cuda_device)
filename = time_stamp + '_' + FLAGS.comment + '_' + FLAGS.run_id
storage_path = os.path.join('results', script_name, filename)
print("STORING EVERYTHING TO: ", storage_path)
try:
os.makedirs(storage_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if FLAGS.n_repeat < 1:
FLAGS.n_repeat = 1
flagdict = flag_to_dict(FLAGS)
assert isinstance(flagdict, dict)
# After processing the data, this object loads it and prepare it.
dataset = TimitDataset(FLAGS.batch, data_path=FLAGS.dataset, preproc=FLAGS.preproc,
use_reduced_phonem_set=FLAGS.reduced_phns)
n_in = dataset.n_features
# Placeholders loaded from data
features = tf.placeholder(shape=(None, None, dataset.n_features), dtype=tf.float32, name='Features')
audio = tf.placeholder(shape=(None, None), dtype=tf.float32, name='Audio')
phns = tf.placeholder(shape=(None, None), dtype=tf.int64, name='Labels')
seq_len = tf.placeholder(dtype=tf.int32, shape=[None], name="SeqLen")
keep_prob = tf.placeholder(dtype=tf.float32, shape=(), name="KeepProb")
weighted_relevant_mask = tf.placeholder(shape=(None, None), dtype=tf.float32, name="RelevanceMask")
batch_size = tf.Variable(0, dtype=tf.int32, trainable=False, name="BatchSize")
# Non-trainable variables that are used to implement a decaying learning rate and count the iterations
lr = tf.Variable(FLAGS.lr_init, dtype=tf.float32, trainable=False, name="LearningRate")
global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name="GlobalStep")
lr_update = tf.assign(lr, lr * FLAGS.lr_decay)
gd_noise = tf.Variable(0, dtype=tf.float32, trainable=False, name="GDNoise")
# Op to ramping learning rate
n_iteration_per_epoch = 100
ramping_learning_rate_values = tf.linspace(0., 1., num=n_iteration_per_epoch)
clipped_global_step = tf.minimum(global_step, n_iteration_per_epoch - 1)
ramping_learning_rate_op = tf.assign(lr, FLAGS.lr_init * ramping_learning_rate_values[clipped_global_step])
# Frequencies
regularization_f0 = FLAGS.reg_rate / 1000
def batch_to_feed_dict(batch, is_train):
'''
Create the dictionnary that is fed into the Session.run(..) calls.
:param batch:
:return:
'''
features_np, phns_np, seq_len_np, wav_np = batch
n_time = max([len(i) for i in wav_np])
wav_np = np.stack([pad_vector(w, n_time) for w in wav_np], axis=0)
# print("input max ", np.max(features_np))
n_batch, n_time, n_features = features_np.shape
relevance_mask_np = [(np.arange(n_time) < seq_len_np[i]) / seq_len_np[i] for i in range(n_batch)]
relevance_mask_np = np.array(relevance_mask_np)
if FLAGS.n_repeat > 1:
# Extend sequences with the repeat in time
features_np = np.repeat(features_np, FLAGS.n_repeat, axis=1)
seq_len_np *= FLAGS.n_repeat
if FLAGS.truncT > 0 and is_train:
in_steps_len = phns_np.shape[1]
if in_steps_len <= FLAGS.truncT:
print("truncT (", FLAGS.truncT, ") too long! setting to smaller size found = ", in_steps_len - 1)
FLAGS.truncT = in_steps_len - 1
max_step_offset = in_steps_len - FLAGS.truncT
rnd_step_offset = rd.randint(low=0, high=max_step_offset)
features_np = features_np[:, rnd_step_offset * FLAGS.n_repeat:(rnd_step_offset + FLAGS.truncT) * FLAGS.n_repeat,
:]
phns_np = phns_np[:, rnd_step_offset:rnd_step_offset + FLAGS.truncT]
seq_len_np = np.array(seq_len_np)
seq_len_np[seq_len_np > FLAGS.truncT] = FLAGS.truncT
relevance_mask_np = relevance_mask_np[:, rnd_step_offset:rnd_step_offset + FLAGS.truncT]
n_batch, n_time, n_features = features_np.shape
phns_labels = phns_np
return {features: features_np, phns: phns_labels, seq_len: seq_len_np, weighted_relevant_mask: relevance_mask_np,
batch_size: n_batch, keep_prob: FLAGS.drop_out_probability if is_train else 1., audio: wav_np}
if FLAGS.tau_a_spread:
taua = rd.choice([1, 0.5], size=FLAGS.n_regular + FLAGS.n_adaptive) * FLAGS.tau_a
else:
taua = FLAGS.tau_a
if FLAGS.tau_v_spread:
tauv = rd.choice([1, 0.5], size=FLAGS.n_regular + FLAGS.n_adaptive) * FLAGS.tau_v
else:
tauv = FLAGS.tau_v
flagdict['tauas'] = taua.tolist() if type(taua) is not float else taua
flagdict['tauvs'] = tauv.tolist() if type(tauv) is not float else tauv
with open(os.path.join(storage_path, 'flags.json'), 'w') as f:
json.dump(flagdict, f, indent=2)
def get_cell(tag, n_input=n_in):
# converting thr and beta parameters
thr_new = FLAGS.thr / (1 - np.exp(-FLAGS.dt / tauv)) if np.isscalar(tauv) else \
[FLAGS.thr / (1 - np.exp(-FLAGS.dt / tv)) for tv in tauv]
if np.isscalar(tauv) and np.isscalar(taua):
beta_new = FLAGS.beta * (1 - np.exp(-FLAGS.dt / taua)) / (1 - np.exp(-FLAGS.dt / tauv))
print("565 new threshold = {:.4g}\n565 new beta = {:.4g}".format(thr_new, beta_new))
beta_new = np.concatenate([np.zeros(FLAGS.n_regular), np.ones(FLAGS.n_adaptive) * beta_new])
elif np.isscalar(tauv) and not np.isscalar(taua):
beta_new = np.array([FLAGS.beta * (1 - np.exp(-FLAGS.dt / ta)) / (1 - np.exp(-FLAGS.dt / tauv)) for ta in taua])
beta_new[:FLAGS.n_regular] = 0
elif not np.isscalar(tauv) and np.isscalar(taua):
beta_new = np.array([FLAGS.beta * (1 - np.exp(-FLAGS.dt / taua)) / (1 - np.exp(-FLAGS.dt / tv)) for tv in tauv])
beta_new[:FLAGS.n_regular] = 0
elif not np.isscalar(tauv) and not np.isscalar(taua):
beta_new = np.array(
[FLAGS.beta * (1 - np.exp(-FLAGS.dt / ta)) / (1 - np.exp(-FLAGS.dt / tv)) for ta, tv in zip(taua, tauv)])
beta_new[:FLAGS.n_regular] = 0
else:
raise NotImplementedError("Nonexistant combination of taua tauv")
return CustomALIF(n_in=n_input, n_rec=FLAGS.n_regular + FLAGS.n_adaptive, tau=tauv,
dt=FLAGS.dt, tau_adaptation=taua, beta=beta_new, thr=thr_new,
dampening_factor=FLAGS.dampening_factor,
tag=tag, n_refractory=FLAGS.n_ref,
stop_gradients=FLAGS.eprop is not None, rec=FLAGS.rec
)
# Cell model used to solve the task, we have two because we used a bi-directional network
cell_forward = get_cell("FW")
cell_backward = get_cell("BW")
# Define the graph for the RNNs processing
with tf.variable_scope('RNNs'):
def bi_directional_lstm(inputs, layer_number):
if FLAGS.drop_out_probability > 0:
inputs = tf.nn.dropout(inputs, keep_prob=keep_prob)
with tf.variable_scope('BiDirectionalLayer' + str(layer_number)):
if layer_number == 0:
cell_f = cell_forward
cell_b = cell_backward
else:
cell_f = get_cell("FW" + str(layer_number), n_input=2 * (FLAGS.n_regular + FLAGS.n_adaptive))
cell_b = get_cell("BW" + str(layer_number), n_input=2 * (FLAGS.n_regular + FLAGS.n_adaptive))
outputs_forward, _ = tf.nn.dynamic_rnn(cell_f, inputs, dtype=tf.float32, scope='ForwardRNN')
outputs_backward, _ = tf.nn.dynamic_rnn(cell_b, tf.reverse(inputs, axis=[1]), dtype=tf.float32,
scope='BackwardRNN')
outputs_forward, _, _, _ = outputs_forward
outputs_backward, _, _, _ = outputs_backward
outputs_backward = tf.reverse(outputs_backward, axis=[1])
outputs = tf.concat([outputs_forward, outputs_backward], axis=2)
return outputs
inputs = features
output_list = []
for k_layer in range(FLAGS.n_layer):
outputs = bi_directional_lstm(inputs, k_layer)
output_list.append(outputs)
inputs = outputs
if FLAGS.loss_from_all_layers:
outputs = tf.concat(output_list, axis=2)
n_outputs = (FLAGS.n_regular + FLAGS.n_adaptive) * 2 * FLAGS.n_layer
else:
outputs = output_list[-1]
n_outputs = (FLAGS.n_regular + FLAGS.n_adaptive) * 2
if FLAGS.drop_out_probability > 0:
outputs = tf.nn.dropout(outputs, keep_prob=keep_prob)
@tf.custom_gradient
def BA_logits(psp, W_out, BA_out):
logits = einsum_bij_jk_to_bik(psp, W_out)
def grad(dy):
dloss_dw_out = tf.einsum('btj,btk->jk', psp, dy)
dloss_dba_out = tf.einsum('btj,btk->jk', psp, dy) if FLAGS.eprop == 'adaptive' else tf.zeros_like(BA_out)
dloss_dpsp = tf.einsum('bik,jk->bij', dy, BA_out)
return [dloss_dpsp, dloss_dw_out, dloss_dba_out]
return logits, grad
# Define the graph for the output processing
with tf.name_scope('Output'):
n_neurons = (FLAGS.n_regular + FLAGS.n_adaptive) * 2
if FLAGS.n_repeat > 1:
new_shape = tf.convert_to_tensor((batch_size, -1, FLAGS.n_repeat, n_outputs), dtype=tf.int32)
outputs_ds = tf.reshape(outputs, shape=new_shape)
outputs_ds = tf.reduce_mean(outputs_ds, axis=2)
else:
outputs_ds = outputs
psp_decay_new = FLAGS.psp_out * (1 - np.exp(-FLAGS.dt / taua)) / (1 - np.exp(-FLAGS.dt / tauv))
print("566 psp readout decay = {:.4g}".format(psp_decay_new))
lsnn_out = exp_convolve(outputs_ds, decay=np.exp(-FLAGS.dt / FLAGS.tau_out)) if FLAGS.psp_out else outputs_ds
N_output_classes_with_blank = dataset.n_phns + 1
w_out = tf.Variable(rd.randn(n_outputs, N_output_classes_with_blank) / np.sqrt(n_outputs),
dtype=tf.float32, name="OutWeights")
if FLAGS.eprop in ['adaptive', 'random']:
if FLAGS.BAglobal:
BA_out = tf.constant(np.ones((n_outputs, N_output_classes_with_blank)) / np.sqrt(n_outputs),
dtype=tf.float32, name='BroadcastWeights')
else:
if FLAGS.eprop == 'adaptive':
init_w_out = rd.randn(n_outputs, N_output_classes_with_blank) / np.sqrt(n_outputs)
BA_out = tf.Variable(init_w_out, dtype=tf.float32, name='BroadcastWeights')
else:
init_w_out = rd.randn(n_outputs, N_output_classes_with_blank)
BA_out = tf.constant(init_w_out, dtype=tf.float32, name='BroadcastWeights')
phn_logits = BA_logits(lsnn_out, w_out, BA_out)
else:
print("Broadcast alignment disabled!")
phn_logits = einsum_bij_jk_to_bik(lsnn_out, w_out)
if FLAGS.readout_bias:
b_out = tf.Variable(np.zeros(N_output_classes_with_blank), dtype=tf.float32, name="OutBias")
phn_logits += b_out
if FLAGS.eprop == 'adaptive':
weight_decay = tf.constant(FLAGS.readout_decay, dtype=tf.float32)
w_out_decay = tf.assign(w_out, w_out - weight_decay * w_out)
BA_decay = tf.assign(BA_out, BA_out - weight_decay * BA_out)
KolenPollackDecay = [BA_decay, w_out_decay]
# Firing rate regularization
with tf.name_scope('RegularizationLoss'):
av = tf.reduce_mean(outputs, axis=(0, 1)) / FLAGS.dt
regularization_coeff = tf.Variable(np.ones(n_outputs) * FLAGS.reg,
dtype=tf.float32, trainable=False)
loss_reg = tf.reduce_sum(tf.square(av - regularization_f0) * regularization_coeff)
# Define the graph for the loss function and the definition of the error
with tf.name_scope('Loss'):
loss_pred = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=phns, logits=phn_logits)
loss_pred = tf.reduce_sum(loss_pred * weighted_relevant_mask, axis=1)
loss_pred = tf.reduce_mean(loss_pred)
loss = loss_pred + loss_reg
if FLAGS.l2 > 0:
losses_l2 = [tf.reduce_sum(tf.square(w)) for w in tf.trainable_variables()]
loss += FLAGS.l2 * tf.reduce_sum(losses_l2)
phn_prediction = tf.argmax(phn_logits, axis=2)
is_correct = tf.equal(phns, phn_prediction)
is_correct_float = tf.cast(is_correct, dtype=tf.float32)
ler = tf.reduce_sum(is_correct_float * weighted_relevant_mask, axis=1)
ler = 1. - tf.reduce_mean(ler)
decoded = phn_prediction
# Define the training step operation
with tf.name_scope('Train'):
if not FLAGS.adam:
opt = tf.train.MomentumOptimizer(lr, momentum=FLAGS.momentum)
else:
opt = tf.train.AdamOptimizer(lr, epsilon=FLAGS.adam_epsilon, beta1=FLAGS.momentum)
get_noise = lambda var: tf.random_normal(shape=tf.shape(var), stddev=gd_noise)
grads = opt.compute_gradients(loss)
if not FLAGS.cell_train:
grads = [(g + get_noise(v), v) for g, v in grads if 'CustomALIF_' not in v.name]
else:
grads = [(g + get_noise(v), v) for g, v in grads]
train_var_list = [var for g, var in grads]
train_step = opt.apply_gradients(grads, global_step=global_step)
print("NUM OF TRAINABLE", len(train_var_list))
for v in train_var_list:
print(v.name)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
if len(FLAGS.checkpoint) > 0:
ckpt_vars = [v[0] for v in tf.train.list_variables(FLAGS.checkpoint[:-11])]
var_names = [v.name for v in tf.get_collection_ref(tf.GraphKeys.GLOBAL_VARIABLES)]
variables_can_be_restored = [v for v in tf.get_collection_ref(tf.GraphKeys.GLOBAL_VARIABLES) if
v.name[:-2] in ckpt_vars]
saver = tf.train.Saver(variables_can_be_restored)
saver.restore(sess, FLAGS.checkpoint)
print("Model restored from ", FLAGS.checkpoint)
else:
saver = tf.train.Saver()
if FLAGS.plot:
plt.ion()
fig, ax_list = plt.subplots(nrows=3, figsize=(12, 6))
def sparse_tensor_to_string(sp_phn_tensor, i_batch):
selection = sp_phn_tensor.indices[:, 0] == i_batch
phn_list = sp_phn_tensor.values[selection]
str_phn_list = [dataset.vocabulary[k] for k in phn_list]
str_phn_list = ['_' if phn == 'sil' else phn for phn in str_phn_list]
return ' '.join(str_phn_list)
def update_plot(result_plot_values):
for k in range(ax_list.shape[0]):
ax = ax_list[k]
ax.clear()
strip_right_top_axis(ax)
txt = dataset.meta_data_develop[0]['text']
if FLAGS.preproc == 'cochspike':
seq_len = max([i[-1] for i in np.nonzero(result_plot_values['features'][0])])
else:
seq_len = np.argmax(
(result_plot_values['features'][0] == 0.).all(axis=1)) + 50 # add 50ms to see full net activity
ax_list[0].set_title(txt)
if "cochspike" in FLAGS.preproc:
raster_plot(ax_list[0], result_plot_values['features'][0])
else:
ax_list[0].imshow(result_plot_values['features'][0].T, aspect="auto")
ax_list[0].set_xticklabels([])
ax_list[0].set_ylabel('Audio features')
i = 0 # len(dataset.meta_data_develop[0]) - 1
ind_change = np.where(np.diff(dataset.phonem_stack_develop[i]) != 0)[0]
phns_change = dataset.phonem_stack_develop[i][ind_change]
if FLAGS.n_repeat > 1:
ind_change *= FLAGS.n_repeat
ax_list[0].set_xticks(np.concatenate([[0], ind_change]))
tick_labels = [dataset.vocabulary[k] for k in phns_change]
tick_labels = ['_' if lab == 'sil' else lab for lab in tick_labels]
tick_labels.append(' ')
ax_list[0].set_xticklabels(tick_labels)
# raster_plot(ax_list[1], result_plot_values['outputs'][0][:, 0:(FLAGS.n_regular + FLAGS.n_adaptive):5])
raster_plot(ax_list[1], result_plot_values['outputs'][0])
ax_list[1].set_ylabel('LSNN\nsubsampled')
ax_list[1].set_xticklabels([])
logits = result_plot_values['phn_logits'][0].T
if FLAGS.n_repeat > 1:
logits = np.repeat(logits, repeats=FLAGS.n_repeat, axis=1)
# print("logits shape", logits.shape)
ax_list[2].imshow(logits, aspect="auto")
ax_list[2].set_ylabel('logits')
ax_list[2].set_xlabel('time in ms')
fig.subplots_adjust(hspace=0.2)
for ax in ax_list:
ax.set_xlim([0, seq_len])
ax.grid(color='black', alpha=0.4, linewidth=0.4)
plt.draw()
plt.pause(1)
results = {
'loss_list': [],
'ler_list': [],
'ler_test_list': [],
'n_synapse': [],
'iteration_list': [],
'epoch_list': [],
'training_time_list': [],
'fr_max_list': [],
'fr_avg_list': [],
}
training_time = 0
testing_time = 0
test_result_tensors = {'ler': ler,
'loss': loss,
'loss_pred': loss_pred,
'loss_reg': loss_reg,
'learning_rate': lr,
'av': av,
}
train_result_tensors = {'train_step': train_step}
total_parameters = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
print("TOTAL NUM OF PARAMETERS = ", total_parameters)
def compute_result(type="validation"):
assert type in ["validation", "test"]
total_batch_size = dataset.n_develop if type == "validation" else dataset.n_test
n_minibatch = total_batch_size // FLAGS.test_batch
mini_batch_sizes = [FLAGS.test_batch for _ in range(n_minibatch)]
if total_batch_size - (n_minibatch * FLAGS.test_batch) != 0:
mini_batch_sizes = mini_batch_sizes + [total_batch_size - (n_minibatch * FLAGS.test_batch)]
feed_dict = None
collect_results = {k: [] for k in test_result_tensors.keys()}
for idx, mb_size in enumerate(mini_batch_sizes):
selection = np.arange(mb_size)
selection = selection + np.ones_like(selection) * idx * FLAGS.test_batch
if type == "validation":
data = dataset.get_next_validation_batch(selection)
elif type == "test":
data = dataset.get_next_test_batch(selection)
feed_dict = batch_to_feed_dict(data, is_train=False)
run_output = sess.run(test_result_tensors, feed_dict=feed_dict)
for k, value in run_output.items():
collect_results[k].append(value)
plot_result = None
if type == "validation":
plot_result = sess.run(result_plot_tensors, feed_dict=feed_dict)
mean_result = {key: np.mean(collect_results[key]) for key in collect_results.keys()}
return mean_result, plot_result
min_valid_err = 1.
while dataset.current_epoch <= FLAGS.n_epochs:
k_iteration = sess.run(global_step)
if k_iteration == FLAGS.noise_step_start:
sess.run(tf.assign(gd_noise, FLAGS.gd_noise))
print('Setting gradient noise standard deviation to: {}'.format(sess.run(gd_noise)))
if k_iteration < 100 and FLAGS.ramping_learning_rate:
old_lr = sess.run(lr)
new_lr = sess.run(ramping_learning_rate_op)
if k_iteration == 0:
print('Ramping learning rate during first epoch: {:.2g} -> {:.2g}'.format(old_lr, new_lr))
if FLAGS.lr_decay_every > 0 and | np.mod(k_iteration, FLAGS.lr_decay_every) | numpy.mod |
import numpy as np
def ApplyWindowFunction(t,v,WindowFunction=None,Param=None):
'''
Apply a window function to a time series.
Inputs
======
t : float
Time array
v : float
Time series data to be windowed
WindowFunction : None | str
If None - no window is applied, otherwise the string names the
window function to be applied (see below for list of functions)
Param : float
Sometimes a window function may be modified by some parameter,
setting this keyword to None will force the routine to use a
default value where needed.
Returns
=======
vw : float
Time series data, v, with the appropriate window function
applied to it.
Window Functions
================
Function | Param
--------------------|-------------
None | N/A
'cosine-bell' | float (percentage)
'hamming' | float (percentage)
'triangle' | float (percentage)
'welch' | float (percentage)
'blackman' | float (percentage)
'nuttall' | float (percentage)
'blackman-nuttall' | float (percentage)
'flat-top' | float (percentage)
'cosine' | float (percentage)
'gaussian' | tuple: (float (width),float (percentage))
'''
WF = { 'none': (_WFNone,0.0),
'cosine-bell': (_WFCosineBell,10.0),
'hamming': (_WFHamming,50.0),
'hann': (_WFHann,50.0),
'triangle': (_WFTriangle,50.0),
'welch': (_WFWelch,50.0),
'blackman': (_WFBlackman,50.0),
'nuttall': (_WFNuttall,50.0),
'blackman-nuttall': (_WFBlackmanNuttall,50.0),
'flat-top': (_WFFlatTop,50.0),
'cosine': (_WFCosine,10.0),
'gaussian': (_WFGaussian,(0.4,50.0))}
# get the appropriate window function and parameters
Func,Pdef = WF.get(WindowFunction,(_WFNone,0.0))
#check if anycustom parameters are being used
if Param is None:
P = Pdef
else:
P = Param
#apply to data
return Func(t,v,P)
def WindowScaleFactor(WindowFunction=None,Param=None):
'''
Work out the scaling factor for the amplitude due to the choice of
window function.
'''
SF = { 'none': (_SFNone,0.0),
'cosine-bell': (_SFCosineBell,10.0),
'hamming': (_SFHamming,50.0),
'hann': (_SFHann,50.0),
'triangle': (_SFTriangle,50.0),
'welch': (_SFWelch,50.0),
'blackman': (_SFBlackman,50.0),
'nuttall': (_SFNuttall,50.0),
'blackman-nuttall': (_SFBlackmanNuttall,50.0),
'flat-top': (_SFFlatTop,50.0),
'cosine': (_SFCosine,10.0),
'gaussian': (_SFGaussian,(0.4,50.0))}
# get the appropriate window function and parameters
Func,Pdef = SF.get(WindowFunction,(_SFNone,0.0))
#check if anycustom parameters are being used
if Param is None:
P = Pdef
else:
P = Param
return Func(P)
def _WFNone(t,v,P):
'''
No window function - just return original array.
'''
return v
def _SFNone(P):
'''
Scaling factor of the uniform window.
'''
return 1.0
def _WFCosineBell(t,v,P=10.0):
'''
This will multiply the date by the Split cosine bell function.
Inputs
======
t : float
Time array
v : float
Time series to be windowed
P : float
Percentage of time series at each end to be part of the cosine.
The remaining 100 - 2*P % is left unchanged (if you set to 50.0,
then the whole window has the function applied to it).
Returns
=======
out : float
Windowed version of v
'''
#get the time range
t0 = np.nanmin(t)
t1 = np.nanmax(t)
tr = t1 - t0
#get a scaled time array
ts = 100.0*(t - t0)/tr
#work out the indices for each section
i0 = np.where(ts < P)[0]
i2 = np.where(ts > (100 - P))[0]
#calculate the window function
w = np.ones(t.size,dtype=v.dtype)
#first section
w[i0] = 0.5 + 0.5*np.cos((ts[i0]/P + 1.0)*np.pi)
#last section
w[i2] = 0.5 + 0.5*np.cos(np.pi*(ts[i2] - (100 - P))/P)
#multiply by v
out = v*w
return out
def _SFCosineBell(P):
'''
Scaling factor of the cosine-bell function.
'''
#work out the proportions of the window to which the function is
#applied
#untouched portion
p0 = 0.01*(100 - 2*P)
#touched portion
p1 = 1.0 - p0
#this is the integral of the window function if it were to apply
#to the entire window of data
Iwind = 0.5
#calculate the overall factor
sf = p0 + Iwind*p1
return sf
def _WFHamming(t,v,P=50.0):
'''
This will multiply the date by the Hamming window function.
Inputs
======
t : float
Time array
v : float
Time series to be windowed
P : float
Percentage of window to have the function applied at each end
(if you set to 50.0, then the whole window has the function
applied to it).
Returns
=======
out : float
Windowed version of v
'''
#get the time range
t0 = np.nanmin(t)
t1 = np.nanmax(t)
tr = t1 - t0
#get a scaled time array
ts = 100.0*(t - t0)/tr
#work out the indices for each section
i0 = np.where(ts < P)[0]
i2 = np.where(ts > (100 - P))[0]
#calculate the window function
w = np.ones(t.size,dtype=v.dtype)
#first section
w[i0] = 0.53836 - 0.46164*np.cos(np.pi*ts[i0]/P)
#last section
w[i2] = 0.53836 - 0.46164*np.cos(np.pi*(1.0 + (ts[i2] - (100 - P))/P))
#multiply by v
out = v*w
return out
def _SFHamming(P):
'''
Scaling factor of the Hamming function.
'''
#work out the proportions of the window to which the function is
#applied
#untouched portion
p0 = 0.01*(100 - 2*P)
#touched portion
p1 = 1.0 - p0
#this is the integral of the window function if it were to apply
#to the entire window of data
Iwind = 0.53836
#calculate the overall factor
sf = p0 + Iwind*p1
return sf
def _WFHann(t,v,P=50.0):
'''
This will multiply the date by the Hann window (sometimes
erroneously called the "Hanning" window). This is similar to the
Hamming window, but is touches zero at each end.
Inputs
======
t : float
Time array
v : float
Time series to be windowed
P : float
Percentage of window to have the function applied at each end
(if you set to 50.0, then the whole window has the function
applied to it).
Returns
=======
out : float
Windowed version of v
'''
#get the time range
t0 = np.nanmin(t)
t1 = np.nanmax(t)
tr = t1 - t0
#get a scaled time array
ts = 100.0*(t - t0)/tr
#work out the indices for each section
i0 = np.where(ts < P)[0]
i2 = np.where(ts > (100 - P))[0]
#calculate the window function
w = np.ones(t.size,dtype=v.dtype)
#first section
w[i0] = 0.5 - 0.5*np.cos(np.pi*ts[i0]/P)
#last section
w[i2] = 0.5 - 0.5*np.cos(np.pi*(1.0 + (ts[i2] - (100 - P))/P))
#multiply by v
out = v*w
return out
def _SFHann(P):
'''
Scaling factor of the Hann function.
'''
#work out the proportions of the window to which the function is
#applied
#untouched portion
p0 = 0.01*(100 - 2*P)
#touched portion
p1 = 1.0 - p0
#this is the integral of the window function if it were to apply
#to the entire window of data
Iwind = 0.5
#calculate the overall factor
sf = p0 + Iwind*p1
return sf
def _WFTriangle(t,v,P=50.0):
'''
This will multiply the date by the Triangle window.
Inputs
======
t : float
Time array
v : float
Time series to be windowed
P : float
Percentage of window to have the function applied at each end
(if you set to 50.0, then the whole window has the function
applied to it).
Returns
=======
out : float
Windowed version of v
'''
#get the time range
t0 = np.nanmin(t)
t1 = np.nanmax(t)
tr = t1 - t0
#get a scaled time array
ts = 100.0*(t - t0)/tr
#work out the indices for each section
i0 = np.where(ts < P)[0]
i2 = np.where(ts > (100 - P))[0]
#calculate the window function
w = np.ones(t.size,dtype=v.dtype)
#first part
w[i0] = 1.0 - np.abs(ts[i0] - P)/P
#second part
w[i2] = 1.0 - np.abs(ts[i2] - (100 - P))/P
out = w*v
return out
def _SFTriangle(P):
'''
Scaling factor of the Triangle function.
'''
#work out the proportions of the window to which the function is
#applied
#untouched portion
p0 = 0.01*(100 - 2*P)
#touched portion
p1 = 1.0 - p0
#this is the integral of the window function if it were to apply
#to the entire window of data
Iwind = 0.5
#calculate the overall factor
sf = p0 + Iwind*p1
return sf
def _WFWelch(t,v,P=5.0):
'''
This will multiply the date by the Welch window.
Inputs
======
t : float
Time array
v : float
Time series to be windowed
P : float
Percentage of window to have the function applied at each end
(if you set to 50.0, then the whole window has the function
applied to it).
Returns
=======
out : float
Windowed version of v
'''
#get the time range
t0 = np.nanmin(t)
t1 = np.nanmax(t)
tr = t1 - t0
#get a scaled time array
ts = 100.0*(t - t0)/tr
#work out the indices for each section
i0 = np.where(ts < P)[0]
i2 = np.where(ts > (100 - P))[0]
#calculate the window function
w = np.ones(t.size,dtype=v.dtype)
#first part
w[i0] = 1 - ((ts[i0] - P)/P)**2
#second part
w[i2] = 1 - ((ts[i2] - (100 - P))/P)**2
out = w*v
return out
def _SFWelch(P):
'''
Scaling factor of the Welch function.
'''
#work out the proportions of the window to which the function is
#applied
#untouched portion
p0 = 0.01*(100 - 2*P)
#touched portion
p1 = 1.0 - p0
#this is the integral of the window function if it were to apply
#to the entire window of data
Iwind = 2.0/3.0
#calculate the overall factor
sf = p0 + Iwind*p1
return sf
def _WFBlackman(t,v,P=50.0):
'''
This will multiply the date by the Blackman window.
Inputs
======
t : float
Time array
v : float
Time series to be windowed
P : float
Percentage of window to have the function applied at each end
(if you set to 50.0, then the whole window has the function
applied to it).
Returns
=======
out : float
Windowed version of v
'''
#get the time range
t0 = np.nanmin(t)
t1 = np.nanmax(t)
tr = t1 - t0
#get a scaled time array
ts = 100.0*(t - t0)/tr
#work out the indices for each section
i0 = np.where(ts < P)[0]
i2 = np.where(ts > (100 - P))[0]
#calculate the window function
w = np.ones(t.size,dtype=v.dtype)
#some constants
a0 = 7938.0/18608
a1 = 9240.0/18608
a2 = 1430.0/18608
#first part
w[i0] = a0 - a1*np.cos(np.pi*ts[i0]/P) + a2*np.cos(2*np.pi*ts[i0]/P)
#second part
w[i2] = a0 - a1*np.cos(np.pi*(ts[i2] - (100 - 2*P))/P) + a2*np.cos(2*np.pi*(ts[i2] - (100 - 2*P))/P)
out = w*v
return out
def _SFBlackman(P):
'''
Scaling factor of the Blackman function.
'''
#work out the proportions of the window to which the function is
#applied
#untouched portion
p0 = 0.01*(100 - 2*P)
#touched portion
p1 = 1.0 - p0
#this is the integral of the window function if it were to apply
#to the entire window of data
Iwind = 7938.0/18608
#calculate the overall factor
sf = p0 + Iwind*p1
return sf
def _WFNuttall(t,v,P=50.0):
'''
This will multiply the date by the Nuttall window.
Inputs
======
t : float
Time array
v : float
Time series to be windowed
P : float
Percentage of window to have the function applied at each end
(if you set to 50.0, then the whole window has the function
applied to it).
Returns
=======
out : float
Windowed version of v
'''
#get the time range
t0 = np.nanmin(t)
t1 = np.nanmax(t)
tr = t1 - t0
#get a scaled time array
ts = 100.0*(t - t0)/tr
#work out the indices for each section
i0 = np.where(ts < P)[0]
i2 = np.where(ts > (100 - P))[0]
#calculate the window function
w = np.ones(t.size,dtype=v.dtype)
#some constants
a0 = 0.355768
a1 = 0.487396
a2 = 0.144232
a3 = 0.012604
#first part
w[i0] = a0 - a1*np.cos(np.pi*ts[i0]/P) + a2*np.cos(2*np.pi*ts[i0]/P) - a3*np.cos(3*np.pi*ts[i0]/P)
#second part
w[i2] = a0 - a1*np.cos(np.pi*(ts[i2] - (100 - 2*P))/P) + a2*np.cos(2*np.pi*(ts[i2] - (100 - 2*P))/P) - a3*np.cos(3*np.pi*(ts[i2] - (100 - 2*P))/P)
out = w*v
return out
def _SFNuttall(P):
'''
Scaling factor of the Nuttall function.
'''
#work out the proportions of the window to which the function is
#applied
#untouched portion
p0 = 0.01*(100 - 2*P)
#touched portion
p1 = 1.0 - p0
#this is the integral of the window function if it were to apply
#to the entire window of data
Iwind = 0.355768
#calculate the overall factor
sf = p0 + Iwind*p1
return sf
def _WFBlackmanNuttall(t,v,P=50.0):
'''
This will multiply the date by the Blackman-Nuttall window.
Inputs
======
t : float
Time array
v : float
Time series to be windowed
P : float
Percentage of window to have the function applied at each end
(if you set to 50.0, then the whole window has the function
applied to it).
Returns
=======
out : float
Windowed version of v
'''
#get the time range
t0 = np.nanmin(t)
t1 = np.nanmax(t)
tr = t1 - t0
#get a scaled time array
ts = 100.0*(t - t0)/tr
#work out the indices for each section
i0 = np.where(ts < P)[0]
i2 = np.where(ts > (100 - P))[0]
#calculate the window function
w = np.ones(t.size,dtype=v.dtype)
#some constants
a0 = 0.3635819
a1 = 0.4891775
a2 = 0.1365995
a3 = 0.0106411
#first part
w[i0] = a0 - a1*np.cos(np.pi*ts[i0]/P) + a2*np.cos(2*np.pi*ts[i0]/P) - a3*np.cos(3*np.pi*ts[i0]/P)
#second part
w[i2] = a0 - a1*np.cos(np.pi*(ts[i2] - (100 - 2*P))/P) + a2*np.cos(2*np.pi*(ts[i2] - (100 - 2*P))/P) - a3*np.cos(3*np.pi*(ts[i2] - (100 - 2*P))/P)
out = w*v
return out
def _SFBlackmanNuttall(P):
'''
Scaling factor of the Blackman-Nuttall function.
'''
#work out the proportions of the window to which the function is
#applied
#untouched portion
p0 = 0.01*(100 - 2*P)
#touched portion
p1 = 1.0 - p0
#this is the integral of the window function if it were to apply
#to the entire window of data
Iwind = 0.3635819
#calculate the overall factor
sf = p0 + Iwind*p1
return sf
def _WFFlatTop(t,v,P=0.0):
'''
This will multiply the date by the flat top window.
Inputs
======
t : float
Time array
v : float
Time series to be windowed
P : float
Percentage of window to have the function applied at each end
(if you set to 50.0, then the whole window has the function
applied to it).
Returns
=======
out : float
Windowed version of v
'''
#get the time range
t0 = np.nanmin(t)
t1 = | np.nanmax(t) | numpy.nanmax |
import pygame
import sys
import numpy
pygame.init()
FPS = 60
WIN_WIDTH = 600
WIN_HEIGHT = 400
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RUNNING = True
RADIUS = 4
EPS = 0.0000001
clock = pygame.time.Clock()
sc = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))
FONT = pygame.font.Font(None, 20)
def lieInBoardersOfPolinom(dots, p0): #dimensionalTest
minx = dots[0][0]
miny = dots[0][1]
maxx = dots[0][0]
maxy = dots[0][0]
for i in dots:
if i[0] > maxx:
maxx = i[0]
if i[0] < minx:
minx = i[0]
if i[1] > maxy:
maxy = i[1]
if i[1] < miny:
miny = i[1]
if minx <= p0[0] <= maxx and miny <= p0[1] <= maxy:
return True
else:
return False
def isZero(number):
return numpy.abs(number) <= EPS
def det(p1, p2, p3, p4):
return numpy.linalg.det(numpy.array([p2 - p1, p4 - p3]))
def dot(p1, p2, p3, p4):
return numpy.dot(p2 - p1, p4 - p3)
def isIntersected(p1, p2, p3, p4):
d1 = det(p1, p2, p1, p4)
d2 = det(p1, p2, p1, p3)
d3 = det(p3, p4, p3, p1)
d4 = det(p3, p4, p3, p2)
if d1 * d2 <= 0 and d3 * d4 <= 0:
return True
else:
return False
def lieInside(p1, p2, p3, p4):
d1 = det(p1, p2, p1, p4)
d2 = det(p1, p2, p1, p3)
d3 = det(p3, p4, p3, p1)
d4 = det(p3, p4, p3, p2)
if isZero(d1) and isZero(d2) and isZero(d3) and isZero(d4):
c1 = dot(p1, p3, p1, p4)
c2 = dot(p1, p3, p1, p4)
c3 = dot(p3, p1, p3, p2)
c4 = dot(p4, p1, p4, p2)
if c1 <= 0 or c2 <= 0 or c3 <= 0 or c4 <= 0:
return True
else:
return False
else:
return False
def next(i, n):
return i+1 if i+1 != n else 0
def prev(i, n):
return i-1 if i-1 != -1 else n-1
def getSide(p0, p1, p2):
det = numpy.linalg.det(numpy.array([p0 - p1, p2 - p1]))
return -1 if det > 0 else 1 if det < 0 else 0
def belongToLine(p0, p1, p2):
return True if getSide(p0, p1, p2) == 0 else False
p0 = numpy.array([200, 200])
DOTS = [numpy.array([100, 100]),
numpy.array([150, 200]),
numpy.array([150, 220]),
numpy.array([120, 200]),
numpy.array([100, 200]),
numpy.array([250, 280]),
| numpy.array([400, 300]) | numpy.array |
#-*- coding:utf-8 -*-
#'''
# Created on 19-5-11 下午2:25
#
# @Author: <NAME>(laygin)
#'''
import numpy as np
import cv2
def quads_area(quads):
'''
:param quads:(n, 4, 2) for quadrilateral points
:return:
'''
p0, p1, p2, p3 = quads[:, 0], quads[:, 1], quads[:, 2], quads[:, 3]
a1 = np.abs(np.cross(p0-p1, p1-p2)) / 2
a2 = np.abs(np.cross(p0-p3, p3-p2)) / 2
return a1+a2
def clip_quads(quads, clip_box, alpha=0.25):
'''
:param quads: shape is (n, 4, 2)
:param clip_box:[0, 0, w, h]
:param alpha:
:return:
'''
areas_ = quads_area(quads)
quads[:,:,0] = np.minimum(np.maximum(quads[:,:,0], clip_box[0]), clip_box[2]) # 0<= x <= w
quads[:, :, 1] = np.minimum(np.maximum(quads[:, :, 1], clip_box[1]), clip_box[3]) # 0<= y <= h
delta_area = (areas_ - quads_area(quads)) / (areas_ + 1e-6)
mask = (delta_area < (1-alpha)) & (quads_area(quads)>0)
return quads[mask, :, :]
class RandomCrop(object):
"""Crop randomly the image in a sample.
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, image, coors):
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h,
left: left + new_w]
for i in range(len(coors)):
coors[i] -= [left, top]
coors[i] = clip_quads(coors[i], [0, 0, new_w, new_h], 0.25)
return image, coors
class CenterCrop(object):
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, image, coors):
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = int((h - new_h) // 2)
left = int((w - new_w) // 2)
image = image[top: top + new_h,
left: left + new_w]
for i in range(len(coors)):
coors[i] -= [left, top]
coors[i] = clip_quads(coors[i], [0, 0, new_w, new_h], 0.25)
return image, coors
class Resize(object):
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, image, coors, size=None):
h, w = image.shape[:2]
if size is not None:
self.output_size = size
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
img = cv2.resize(image, (new_w, new_h))
for i in range(len(coors)):
coors[i][:, :, 0] = coors[i][:, :, 0] * new_w / w
coors[i][:,:,1] = coors[i][:,:,1] * new_h / h
return img, coors
'''augmentation'''
class RandomBrightness(object):
def __init__(self, delta=32):
assert 255 >= delta >= 0, 'delta is invalid'
self.delta = delta
def __call__(self, img, coors=None):
img = img.astype(np.float32)
if np.random.randint(0,2):
delta = np.random.uniform(-self.delta, self.delta)
img += delta
return | np.clip(img, 0, 255) | numpy.clip |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.