prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#!/usr/bin/env python
# use python3
import numpy as np
import time
#import sys
import argparse
import scipy
from scipy import ndimage as nd
#from dpLoadh5 import dpLoadh5
from dpFRAG import dpFRAG
import _FRAG_extension as FRAG_extension
# python wrapper for new RAG building routine
def build_frag_new(supervoxels, nsupervoxels, pad=True, nbhd=1, conn=3, steps=None, max_step=None, nalloc_rag=50,
nalloc_borders=1000):
dtype=np.uint32; test=np.zeros((2,2),dtype=dtype)
if type(supervoxels) != type(test):
raise Exception( 'In build_frag, supervoxels is not *NumPy* array')
if len(supervoxels.shape) != 3:
raise Exception( 'In build_frag, supervoxels shape not 3 dimensional')
if not supervoxels.flags.contiguous or | np.isfortran(supervoxels) | numpy.isfortran |
# -*- coding: utf-8 -*-
# Copyright (c) 2020 <NAME>
# Adapted and modified from a MATLAB routine written by <NAME>
"""Functions to compute pseudo transfer entropy (pTE).
This module provides a set of functions to compute pTE between different
time series.
Functions
---------------------
* normalisa -- L2 normalization, can be replaced by the
sklearn.preprocessing.normalize(*args) function
* embed -- generates matrices containing segments of the original time
series, depending on the embedding size chosen.
* timeshifted -- creeates time shifted surrogates. The sign on the shift means
that the time series that must be shifted is the independent one
* pTE -- Computes the pseudo transfer entropy between time series.
Libraries required
---------------------
import numpy as np
import scipy.signal as sps
from collections import deque
"""
import numpy as np
import scipy.signal as sps
from collections import deque
def normalisa(a, order=2, axis=-1):
l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
l2[l2 == 0] = 1
return a / np.expand_dims(l2, axis)
def embed(x, embd, lag):
N = len(x)
hidx = np.arange(embd * lag, step=lag)
vidx = np.arange(N - (embd - 1) * lag)
vidx = vidx.T
Nv = len(vidx)
U = np.array([x, ] * embd)
W = np.array([hidx, ] * Nv).T + np.array([vidx, ] * embd)
u = np.zeros((embd, Nv))
for i in range(embd):
for j in range(Nv):
u[i, j] = U[i, W[i, j]]
return u.T
def timeshifted(timeseries, shift):
ts = deque(timeseries)
ts.rotate(shift)
return np.asarray(ts)
def iaaft(x, maxiter=1000, atol=1e-8, rtol=1e-10):
"""Return iterative amplitude adjusted Fourier transform surrogates.
this function have been taken from teh NoLiTSA package,
Copyright (c) 2015-2016, <NAME>.
All rights reserved.
Returns phase randomized, amplitude adjusted (IAAFT) surrogates with
the same power spectrum (to a very high accuracy) and distribution
as the original data using an iterative scheme (Schreiber & Schmitz
1996).
Parameters
----------
x : array
1-D real input array of length N containing the time series.
maxiter : int, optional (default = 1000)
Maximum iterations to be performed while checking for
convergence. The scheme may converge before this number as
well (see Notes).
atol : float, optional (default = 1e-8)
Absolute tolerance for checking convergence (see Notes).
rtol : float, optional (default = 1e-10)
Relative tolerance for checking convergence (see Notes).
Returns
-------
y : array
Surrogate series with (almost) the same power spectrum and
distribution.
i : int
Number of iterations that have been performed.
e : float
Root-mean-square deviation (RMSD) between the absolute squares
of the Fourier amplitudes of the surrogate series and that of
the original series.
Notes
-----
To check if the power spectrum has converged, we see if the absolute
difference between the current (cerr) and previous (perr) RMSDs is
within the limits set by the tolerance levels, i.e., if abs(cerr -
perr) <= atol + rtol*perr. This follows the convention used in
the NumPy function numpy.allclose().
Additionally, atol and rtol can be both set to zero in which
case the iterations end only when the RMSD stops changing or when
maxiter is reached.
"""
# Calculate "true" Fourier amplitudes and sort the series.
ampl = np.abs(np.fft.rfft(x))
sort = np.sort(x)
# Previous and current error.
perr, cerr = (-1, 1)
# Start with a random permutation.
t = np.fft.rfft(np.random.permutation(x))
for i in range(maxiter):
# Match power spectrum.
s = np.real(np.fft.irfft(ampl * t / np.abs(t), n=len(x)))
# Match distribution by rank ordering.
y = sort[np.argsort(np.argsort(s))]
t = np.fft.rfft(y)
cerr = np.sqrt(np.mean((ampl ** 2 - np.abs(t) ** 2) ** 2))
# Check convergence.
if abs(cerr - perr) <= atol + rtol * abs(perr):
break
else:
perr = cerr
# Normalize error w.r.t. mean of the "true" power spectrum.
return y, i, cerr / np.mean(ampl ** 2)
def pTE(z, tau=1, dimEmb=1, surr=None, Nsurr=19):
"""Returns pseudo transfer entropy.
Parameters
----------
z : array
array of arrays, containing all the time series.
tau : integer
delay of the embedding.
dimEMb : integer
embedding dimension, or model order.
surr : string
if 'ts' it computes the maximum value obtained using 19 times shifted
surrogates
if 'iaaft' it computes the maximum value obtained using 19 times shifted
surrogates
Returns
-------
pte : array
array of arrays. The dimension is (# time series, # time series).
The diagonal is 0, while the off diagonal term (i, j) corresponds
to the pseudo transfer entropy from time series i to time series j.
ptesurr : array
array of arrays. The dimension is (# time series, # time series).
The diagonal is 0, while the off diagonal term (i, j) corresponds
to the pseudo transfer entropy from time series i to surrogate time
series j.
In case of surrogates it returns pte and the maximum value obtained with
surrogares ptesurr
"""
NN, T = np.shape(z)
Npairs = NN * (NN - 1)
pte = np.zeros((NN, NN))
ptesurr = np.zeros((NN, NN))
z = normalisa(sps.detrend(z))
channels = np.arange(NN, step=1)
for i in channels:
EmbdDumm = embed(z[i], dimEmb + 1, tau)
Xtau = EmbdDumm[:, :-1]
for j in channels:
if i != j:
Yembd = embed(z[j], dimEmb + 1, tau)
Y = Yembd[:, -1]
Ytau = Yembd[:, :-1]
XtYt = np.concatenate((Xtau, Ytau), axis=1)
YYt = np.concatenate((Y[:, np.newaxis], Ytau), axis=1)
YYtXt = np.concatenate((YYt, Xtau), axis=1)
if dimEmb > 1:
ptedum = np.linalg.det(np.cov(XtYt.T)) * np.linalg.det(np.cov(YYt.T)) / (
np.linalg.det(np.cov(YYtXt.T)) * np.linalg.det(np.cov(Ytau.T)))
else:
ptedum = np.linalg.det(np.cov(XtYt.T)) * np.linalg.det(np.cov(YYt.T)) / (
np.linalg.det( | np.cov(YYtXt.T) | numpy.cov |
import unittest
import astropy_healpix as aph
import healvis
import numpy as np
import pytest
from astropy.units import sday, rad
from pyuvsim.analyticbeam import AnalyticBeam
from vis_cpu import HAVE_GPU
from hera_sim.defaults import defaults
from hera_sim import io
from hera_sim import vis
from hera_sim.antpos import linear_array
from hera_sim.visibilities import VisCPU, HealVis
SIMULATORS = (HealVis, VisCPU)
if HAVE_GPU:
class VisGPU(VisCPU):
"""Simple mock class to make testing VisCPU with use_gpu=True easier"""
def __init__(self, *args, **kwargs):
super().__init__(*args, use_gpu=True, **kwargs)
SIMULATORS = SIMULATORS + (VisGPU,)
np.random.seed(0)
NTIMES = 10
BM_PIX = 31
NPIX = 12 * 16 ** 2
NFREQ = 5
@pytest.fixture
def uvdata():
defaults.set("h1c")
return io.empty_uvdata(
Nfreqs=NFREQ,
integration_time=sday.to("s") / NTIMES,
Ntimes=NTIMES,
array_layout={
0: (0, 0, 0),
},
start_time=2456658.5,
conjugation="ant1<ant2",
)
@pytest.fixture
def uvdataJD():
defaults.set("h1c")
return io.empty_uvdata(
Nfreqs=NFREQ,
integration_time=sday.to("s") / NTIMES,
Ntimes=NTIMES,
array_layout={
0: (0, 0, 0),
},
start_time=2456659,
)
def test_healvis_beam(uvdata):
freqs = np.unique(uvdata.freq_array)
# just anything
point_source_pos = np.array([[0, uvdata.telescope_location_lat_lon_alt[0]]])
point_source_flux = np.array([[1.0]] * len(freqs))
hv = HealVis(
uvdata=uvdata,
sky_freqs=np.unique(uvdata.freq_array),
point_source_flux=point_source_flux,
point_source_pos=point_source_pos,
nside=2 ** 4,
)
assert len(hv.beams) == 1
assert isinstance(hv.beams[0], healvis.beam_model.AnalyticBeam)
def test_healvis_beam_obsparams(tmpdir):
# Now try creating with an obsparam file
direc = tmpdir.mkdir("test_healvis_beam")
with open(direc.join("catalog.txt"), "w") as fl:
fl.write(
"""SOURCE_ID RA_J2000 [deg] Dec_J2000 [deg] Flux [Jy] Frequency [Hz]
HERATEST0 68.48535 -28.559917 1 100000000.0
"""
)
with open(direc.join("telescope_config.yml"), "w") as fl:
fl.write(
"""
beam_paths:
0 : 'uniform'
telescope_location: (-30.72152777777791, 21.428305555555557, 1073.0000000093132)
telescope_name: MWA
"""
)
with open(direc.join("layout.csv"), "w") as fl:
fl.write(
"""Name Number BeamID E N U
Tile061 40 0 -34.8010 -41.7365 1.5010
Tile062 41 0 -28.0500 -28.7545 1.5060
Tile063 42 0 -11.3650 -29.5795 1.5160
Tile064 43 0 -9.0610 -20.7885 1.5160
"""
)
with open(direc.join("obsparams.yml"), "w") as fl:
fl.write(
"""
freq:
Nfreqs: 1
channel_width: 80000.0
start_freq: 100000000.0
sources:
catalog: {0}/catalog.txt
telescope:
array_layout: {0}/layout.csv
telescope_config_name: {0}/telescope_config.yml
time:
Ntimes: 1
integration_time: 11.0
start_time: 2458098.38824015
""".format(
direc.strpath
)
)
hv = HealVis(obsparams=direc.join("obsparams.yml").strpath)
beam = hv.beams[0]
print(beam)
print(type(beam))
print(beam.__class__)
assert isinstance(beam, healvis.beam_model.AnalyticBeam)
def test_JD(uvdata, uvdataJD):
freqs = np.unique(uvdata.freq_array)
# put a point source in
point_source_pos = np.array([[0, uvdata.telescope_location_lat_lon_alt[0]]])
point_source_flux = np.array([[1.0]] * len(freqs))
viscpu1 = VisCPU(
uvdata=uvdata,
sky_freqs=np.unique(uvdata.freq_array),
point_source_flux=point_source_flux,
point_source_pos=point_source_pos,
nside=2 ** 4,
).simulate()
viscpu2 = VisCPU(
uvdata=uvdataJD,
sky_freqs=np.unique(uvdataJD.freq_array),
point_source_flux=point_source_flux,
point_source_pos=point_source_pos,
nside=2 ** 4,
).simulate()
assert viscpu1.shape == viscpu2.shape
assert not np.allclose(viscpu1, viscpu2, atol=0.1)
@pytest.fixture
def uvdata2():
defaults.set("h1c")
return io.empty_uvdata(
Nfreqs=NFREQ,
integration_time=sday.to("s") / NTIMES,
Ntimes=NTIMES,
array_layout={0: (0, 0, 0), 1: (1, 1, 0)},
start_time=2456658.5,
conjugation="ant1<ant2",
)
def create_uniform_sky(nbase=4, scale=1, nfreq=NFREQ):
"""Create a uniform sky with total (integrated) flux density of `scale`"""
nside = 2 ** nbase
npix = 12 * nside ** 2
return np.ones((nfreq, npix)) * scale / (4 * np.pi)
@pytest.mark.parametrize("simulator", SIMULATORS)
def test_shapes(uvdata, simulator):
I_sky = create_uniform_sky()
v = simulator(
uvdata=uvdata,
sky_freqs=np.unique(uvdata.freq_array),
sky_intensity=I_sky,
)
assert v.simulate().shape == (uvdata.Nblts, 1, NFREQ, 1)
@pytest.mark.parametrize("precision, cdtype", [(1, np.complex64), (2, complex)])
def test_dtypes(uvdata, precision, cdtype):
I_sky = create_uniform_sky()
sim = VisCPU(
uvdata=uvdata,
sky_freqs=np.unique(uvdata.freq_array),
sky_intensity=I_sky,
precision=precision,
)
v = sim.simulate()
assert v.dtype == cdtype
@pytest.mark.parametrize("simulator", SIMULATORS)
def test_zero_sky(uvdata, simulator):
I_sky = create_uniform_sky(scale=0)
sim = simulator(
uvdata=uvdata, sky_freqs=np.unique(uvdata.freq_array), sky_intensity=I_sky
)
v = sim.simulate()
np.testing.assert_equal(v, 0)
@pytest.mark.parametrize("simulator", SIMULATORS)
def test_autocorr_flat_beam(uvdata, simulator):
I_sky = create_uniform_sky(nbase=6)
sim = simulator(
uvdata=uvdata,
sky_freqs=np.unique(uvdata.freq_array),
sky_intensity=I_sky,
)
v = sim.simulate()
# Account for factor of 2 between Stokes I and 'xx' pol for vis_cpu
if simulator == VisCPU:
v *= 2.0
np.testing.assert_allclose(np.abs(v), np.mean(v), rtol=1e-5)
np.testing.assert_almost_equal(np.abs(v), 0.5, 2)
@pytest.mark.parametrize("simulator", SIMULATORS)
def test_single_source_autocorr(uvdata, simulator):
freqs = np.unique(uvdata.freq_array)
# put a point source in that will go through zenith.
point_source_pos = np.array([[0, uvdata.telescope_location_lat_lon_alt[0]]])
point_source_flux = np.array([[1.0]] * len(freqs))
v = simulator(
uvdata=uvdata,
sky_freqs=np.unique(uvdata.freq_array),
point_source_flux=point_source_flux,
point_source_pos=point_source_pos,
nside=2 ** 4,
).simulate()
# Account for factor of 2 between Stokes I and 'xx' pol for vis_cpu
if simulator == VisCPU:
v *= 2.0
# Make sure the source is over the horizon half the time
# (+/- 1 because of the discreteness of the times)
# 1e-3 on either side to account for float inaccuracies.
assert (
-1e-3 + (NTIMES / 2.0 - 1.0) / NTIMES
<= np.round(np.abs(np.mean(v)), 3)
<= (NTIMES / 2.0 + 1.0) / NTIMES + 1e-3
)
@pytest.mark.parametrize("simulator", SIMULATORS)
def test_single_source_autocorr_past_horizon(uvdata, simulator):
freqs = np.unique(uvdata.freq_array)
# put a point source in that will never be up
point_source_pos = np.array(
[[0, uvdata.telescope_location_lat_lon_alt[0] + 1.1 * np.pi / 2]]
)
point_source_flux = np.array([[1.0]] * len(freqs))
v = simulator(
uvdata=uvdata,
sky_freqs=np.unique(uvdata.freq_array),
point_source_flux=point_source_flux,
point_source_pos=point_source_pos,
nside=2 ** 4,
).simulate()
assert np.abs( | np.mean(v) | numpy.mean |
import numpy as np
def C_b_v(angles):
"""
:param angles: Euler angles, np.ndarray, shape: (3,1)
:return: transition matrix from b-frame to v-frame, np.ndarray, shape: (3,3)
"""
phi, theta, psi = angles.flatten()
result = np.zeros(shape=(3, 3))
# first row
result[0, 0] = np.cos(psi) * np.cos(theta)
result[0, 1] = np.cos(psi) * np.sin(theta) * np.sin(phi) - np.sin(psi) * np.cos(phi)
result[0, 2] = np.cos(psi) * np.sin(theta) * np.cos(phi) + np.sin(psi) * np.sin(phi)
# second row
result[1, 0] = np.sin(psi) * np.cos(theta)
result[1, 1] = np.sin(psi) * np.sin(theta) * np.sin(phi) + np.cos(psi) * np.cos(phi)
result[1, 2] = np.sin(psi) * np.sin(theta) * np.cos(phi) - np.cos(psi) * np.sin(phi)
# third row
result[2, 0] = -np.sin(theta)
result[2, 1] = np.cos(theta) * np.sin(phi)
result[2, 2] = np.cos(theta) * np.cos(phi)
return result
def f_euler_update(x, u, w, delta_t):
"""
:param x: state vector, np.ndarray, shape: (15,1)
:param u: measurements vector, np.ndarray, shape: (6,1)
:param w: noise vector, np.ndarray, shape: (6,1)
:param delta_t: time step, scalar
:return: deltas of eulaer angles, np.ndarray, shape: (3,1)
"""
omega_x, omega_y, omega_z = u.flatten()[:3]
phi, theta, psi = x.flatten()[:3]
result = np.zeros(shape=3)
result[0] = (omega_y * np.sin(phi) + omega_z * np.cos(phi)) * np.tan(theta) + omega_x
result[1] = omega_y * np.cos(phi) - omega_z * np.sin(phi)
result[2] = (omega_y * np.sin(phi) + omega_z * np.cos(phi)) * (1. / np.cos(theta))
return result.reshape(-1, 1) * delta_t
def omega_unbiased(omega, bias, noise):
return omega - bias - noise
def acc_unbiased(acc, bias, noise):
return acc - bias - noise
def f(x, u, w, delta_t, g_v=None):
"""
:param x: state vector, np.ndarray, shape: (15,1)
:param u: measurements vector, np.ndarray, shape: (6,1)
:param w: noise vector, np.ndarray, shape: (6,1)
:param delta_t: time step, scalar
:param g_v: acceleration of gravity, np.ndarray: shape: (3,)
:return: state vector at the next time step, np.ndarray, shape: (15,1)
"""
if g_v is None:
g_v = np.array([0, 0, -9.81])
result = np.zeros(shape=15)
angles = x.flatten()[:3]
pose_coordinates = x.flatten()[3:6] # x,y,z
velocity = x.flatten()[6:9] # v_x, v_y, v_z
omega_imu = u.flatten()[:3] # measurements from gyroscope
acc_imu = u.flatten()[3:] # measurements from accelerometer
noise_omega = w.flatten()[:3] # omega noise
noise_acc = w.flatten()[3:] # acceleration noise
bias_omega = x.flatten()[9:12] # bias in gyroscope
bias_acc = x.flatten()[12:] # bias in accelerometer
u_unbiased = np.hstack((omega_unbiased(omega=omega_imu, bias=bias_omega, noise=noise_omega),
acc_unbiased(acc=acc_imu, bias=bias_acc, noise=noise_acc)))
trans_matrix = C_b_v(angles)
result[:3] = angles + f_euler_update(x=x, u=u_unbiased, w=w, delta_t=delta_t).flatten()
result[3:6] = pose_coordinates + velocity * delta_t + \
0.5 * delta_t ** 2 * (trans_matrix @ u_unbiased[3:] + g_v)
result[6:9] = velocity + delta_t * (trans_matrix @ u_unbiased[3:] + g_v)
result[9:12] = bias_omega
result[12:15] = bias_acc
return result.reshape(-1, 1)
def jac_f_euler_angles(x, u, delta_t):
"""
:param x: state vector, np.ndarray, shape: (15,1)
:param u: measurements vector, np.ndarray, shape: (6,1)
:param delta_t: time step, scalar
:return: the derivetive of f_euler_update function by angles.
np.ndarray, shape: (3, 3)
"""
phi, theta, psi = x.flatten()[:3]
omega_x, omega_y, omega_z = u.flatten()[:3]
result = np.zeros(shape=(3, 3))
# first row
result[0, 0] = (omega_y * np.cos(phi) - omega_z * np.sin(phi)) * np.tan(theta)
result[0, 1] = (omega_y * np.sin(phi) + omega_z * np.cos(phi)) * (1. / np.cos(theta)) ** 2
# second row
result[1, 0] = -omega_y * np.sin(phi) - omega_z * np.cos(phi)
# third row
result[2, 0] = (omega_y * np.cos(phi) - omega_z * np.sin(phi)) * (1. / np.cos(theta))
result[2, 1] = (omega_y * np.sin(phi) + omega_z * np.cos(phi)) * (np.sin(theta) / (np.cos(theta) ** 2))
return result * delta_t
def jac_c_b_v_angles(angles, acc): # uff...
"""
:param angles: Euler angles, np.ndarray, shape: (3,1)
:param acc: accelerations, np.ndarray, shape: (3, 1)
:return: the derivetive of C_b_v @ acc function by angles.
np.ndarray, shape: (3, 3)
"""
phi, theta, psi = angles.flatten()
a_x, a_y, a_z = acc.flatten()
result = np.zeros(shape=(3, 3))
# first row
result[0, 0] = a_y * (np.cos(psi) * np.sin(theta) * np.cos(phi) + np.sin(psi) * np.sin(phi)) + \
a_z * (-np.cos(psi) * np.sin(theta) * np.sin(phi) + np.sin(psi) * np.cos(phi))
result[0, 1] = a_x * (-np.cos(psi) * np.sin(theta)) + \
a_y * (np.cos(psi) * np.cos(theta) * np.sin(phi)) + \
a_z * (np.cos(psi) * np.cos(theta) * np.cos(phi))
result[0, 2] = a_x * (-np.sin(psi) * np.cos(theta)) + \
a_y * (-np.sin(psi) * np.sin(theta) * np.sin(phi) - np.cos(psi) * np.cos(phi)) + \
a_z * (-np.sin(psi) * np.sin(theta) * np.cos(phi) + np.cos(psi) * np.sin(phi))
# second row
result[1, 0] = a_y * (np.sin(psi) * np.sin(theta) * np.cos(phi) - np.cos(psi) * np.sin(phi)) + \
a_z * (-np.sin(psi) * np.sin(theta) * np.sin(phi) - np.cos(psi) * np.cos(phi))
result[1, 1] = a_x * (-np.sin(psi) * np.sin(theta)) + \
a_y * (np.sin(psi) * np.cos(theta) * np.sin(phi)) + \
a_z * (np.sin(psi) * np.cos(theta) * np.cos(phi))
result[1, 2] = a_x * (np.cos(psi) * np.cos(theta)) + \
a_y * (np.cos(psi) * np.sin(theta) * np.sin(phi) - np.sin(psi) * np.cos(phi)) + \
a_z * (np.cos(psi) * np.sin(theta) * np.cos(phi) + np.sin(psi) * np.sin(phi))
result[2, 0] = a_y * (np.cos(theta) * np.cos(psi)) + \
a_z * (-np.cos(theta) * np.sin(phi))
result[2, 1] = a_x * (-np.cos(theta)) + \
a_y * (-np.sin(theta) * | np.sin(phi) | numpy.sin |
import numpy as np
from numpy.testing import assert_almost_equal
import pytest
from sequgen.deterministic.triangular_peak import triangular_peak
def test_with_required_args():
t_predict = np.linspace(0, 5, 11)
actual = triangular_peak(t_predict, width_base_left=1.0, width_base_right=1.0, location=1.0)
expected = np.array([0, 0.5, 1.0, 0.5, 0., 0., 0., 0., 0., 0., 0.])
assert_almost_equal(actual, expected)
def test_with_required_args_and_height():
t_predict = np.linspace(0, 5, 11)
actual = triangular_peak(t_predict, width_base_left=1.0, width_base_right=1.0, location=1.0, height=2.0)
expected = np.array([0, 1.0, 2.0, 1.0, 0., 0., 0., 0., 0., 0., 0.])
assert_almost_equal(actual, expected)
def test_with_required_args_and_skewness():
t_predict = np.linspace(0, 5, 11)
location = 1.0
actual = triangular_peak(t_predict, width_base_left=1.0, width_base_right=2.0, location=location)
expected = np.array([0, 0.5, 1.0, 0.75, 0.5, 0.25, 0., 0., 0., 0., 0.])
default_height = 1.0
assert actual[t_predict == location] == default_height, "expected maximum height at t == location"
assert_almost_equal(actual, expected)
def test_with_zero_base_left():
t_predict = np.linspace(0, 5, 11)
with pytest.raises(AssertionError) as excinfo:
triangular_peak(t_predict, width_base_left=0.0, width_base_right=2.0, location=1.0)
assert "width_base_left should be > 0" in str(excinfo.value)
def test_with_small_base_left_regular_sampling():
t_predict = np.linspace(0, 5, 11)
actual = triangular_peak(t_predict, width_base_left=1e-9, width_base_right=2.0, location=1.0)
expected = np.array([0., 0., 1., 0.75, 0.50, 0.25, 0., 0., 0., 0., 0.])
assert_almost_equal(actual, expected)
def test_with_small_base_left_irregular_sampling():
t_predict = np.asarray([0, 1-1e-9, 1, 2, 3, 10])
actual = triangular_peak(t_predict, width_base_left=1e-9, width_base_right=2.0, location=1.0)
expected = np.array([0., 0., 1., 0.50, 0., 0.])
assert_almost_equal(actual, expected)
def test_with_zero_base_right_regular_sampling():
t_predict = np.linspace(0, 5, 11)
with pytest.raises(AssertionError) as excinfo:
triangular_peak(t_predict, width_base_left=2.0, width_base_right=0.0, location=3.0)
assert "width_base_right should be > 0" in str(excinfo.value)
def test_with_small_base_right_regular_sampling():
t_predict = | np.linspace(0, 5, 11) | numpy.linspace |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# import logging, sys
# logging.disable(sys.maxsize)
# import warnings
# warnings.filterwarnings("ignore", category=RuntimeWarning)
from tqdm import tqdm, trange
import os
import numpy as np
from scipy.optimize import curve_fit
from scipy.stats import gmean
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter, LogFormatter, NullFormatter
## astylo
from astylo.iolib import fclean, read_fits, write_fits
from astylo.ipro import iconvolve, iswarp, sextract, icrop
from astylo.calib import intercalib
from astylo.mlib import f_lin, f_lin0
from astylo.alib import pix2sr, get_pc
##---------------------------
## Initialisation
##---------------------------
## Local
from param_irc_Y12 import (
verbose, src, Nmc, path_idl, path_irc, path_ker,
parobs, fits_irc, csv_ker,
path_tmp, path_out, path_build, path_tests,
phot, path_phot, path_cal,
)
Nobs = len(parobs)
Nmc = 0
##---------------------------
## Build slits
##---------------------------
for i in range(Nobs):
sext = sextract(path_irc, parobs[i])
Nsub = 1
## Check Y12 spectra
if sext.slit_width==3:
## Ns
Nsub = 2
elif sext.slit_width==2:
## Nh
Nsub = 6
## MC add pointing unc
for j in range(Nmc+1):
if j==0:
sext.spec_build(fits_irc[i], Nsub=Nsub)
else:
sext.spec_build(fits_irc[i]+'_'+str(j), Nsub=Nsub)
##---------------------------
## Combine slits
##---------------------------
hdr_n3 = read_fits(path_irc + parobs[0][0] + '/irc_specred_out_' + parobs[0][1]+'/'+parobs[0][2]).header
swp = iswarp(fits_irc, hdr_n3,
# center='9:55:52,69:40:45',
pixscale=6, tmpdir=path_build,
verbose=verbose)
## Reprendre MC adding spec unc
##------------------------------
for j in trange(Nmc+1, #leave=False,
desc='<iswarp> IRC Combining [MC]'):
if j==0:
comb = swp.combine(fits_irc, \
'wgt_avg', keepedge=True, \
tmpdir=path_build+'MC_no/', \
filOUT=path_out+src+'_IRC_0')
else:
fits_irc_mc = []
for f in fits_irc:
fits_irc_mc.append(f+'_'+str(j))
comb = swp.combine(fits_irc_mc, \
keepedge=True, uncpdf='splitnorm', \
tmpdir=path_build+'MC_'+str(j)+'/', \
filOUT=path_out+src+'_IRC_'+str(j))
## Cal unc
##---------
mcimage = []
for j in trange(Nmc+1, #leave=False,
desc='IRC Reading [MC]'):
if j==0:
hd0 = read_fits(path_out+src+'_IRC_0')
header = hd0.header
wvl = hd0.wave
else:
hd = read_fits(path_out+src+'_IRC_'+str(j))
mcimage.append(hd.data)
if Nmc>1:
mcimage = np.array(mcimage)
unc = | np.nanstd(mcimage, axis=0) | numpy.nanstd |
"""
========================================================================
Comparison of kernel regression (KR) and support vector regression (SVR)
========================================================================
Toy example of 1D regression using kernel regression (KR) and support vector
regression (SVR). KR provides an efficient way of selecting a kernel's
bandwidth via leave-one-out cross-validation, which is considerably faster
that an explicit grid-search as required by SVR. The main disadvantages are
that it does not support regularization and is not robust to outliers.
"""
from py_qt import bootstrap as bs
import matplotlib.pyplot as plt
from py_qt import npr_methods
import numpy as np
from py_qt import nonparam_regression as smooth
from py_qt import plot_fit
import tensorflow as tf
import requests
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
from sklearn.learning_curve import learning_curve
import matplotlib.pyplot as plt
from kernel_regression import KernelRegression
np.random.seed(0)
def f(x):
return 3*np.cos(x/2) + x**2/5 + 3
xs = np.random.rand(200) * 10
ys = f(xs) + 2* | np.random.randn(*xs.shape) | numpy.random.randn |
# -*- coding: utf-8 -*-
"""
transform.py
This module contains functions that transform matrix inputs into different
forms that are of use in bigger functions where they are called. These
functions focus mainly on overlapping repeated structures and annotation
markers.
The module contains the following functions:
* remove_overlaps
Removes any pairs of repeats with the same length and annotation marker
where at least one pair of repeats overlap in time.
* __create_anno_remove_overlaps
Turns rows of repeats into marked rows with annotation markers for the
start indices and zeroes otherwise. After removing the annotations that
have overlaps, the function creates separate arrays for annotations with
overlaps and annotations without overlaps. Finally, the annotation markers
are checked and fixed if necessary.
* __separate_anno_markers
Expands vector of non-overlapping repeats into a matrix representation.
The matrix representation is a visual record of where all of the
repeats in a song start and end.
"""
import numpy as np
from .utilities import reconstruct_full_block, add_annotations
def remove_overlaps(input_mat, song_length):
"""
Removes any pairs of repeat length and specific annotation marker
where there exists at least one pair of repeats that overlap in time.
Args
----
input_mat : np.ndarray[int]
List of pairs of repeats with annotations marked. The first
two columns refer to the first repeat or the pair, the second
two refer to the second repeat of the pair, the fifth column
refers to the length of the repeats, and the sixth column
contains the annotation markers.
song_length : int
Number of audio shingles.
Returns
-------
lst_no_overlaps : np.ndarray[int]
List of pairs of repeats with annotations marked. All the
repeats of a given length and with a specific annotation
marker do not overlap in time.
matrix_no_overlaps : np.ndarray[int]
Matrix representation of lst_no_overlaps with one row for
each group of repeats.
key_no_overlaps : np.ndarray[int]
Vector containing the lengths of the repeats encoded in
each row of matrix_no_overlaps.
annotations_no_overlaps : np.ndarray[int]
Vector containing the annotation markers of the repeats
encoded in each row of matrix_no_overlaps.
all_overlap_lst : np.ndarray[int]
List of pairs of repeats with annotations marked removed
from input_mat. For each pair of repeat length and specific
annotation marker, there exist at least one pair of repeats
that do overlap in time.
"""
# Create a vector of unique repeat lengths
bw_vec = | np.unique(input_mat[:, 4]) | numpy.unique |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cmath
import numpy as np
import pytest
import cirq
from cirq.linalg import matrix_commutes
def test_is_diagonal():
assert cirq.is_diagonal(np.empty((0, 0)))
assert cirq.is_diagonal(np.empty((1, 0)))
assert cirq.is_diagonal(np.empty((0, 1)))
assert cirq.is_diagonal(np.array([[1]]))
assert cirq.is_diagonal(np.array([[-1]]))
assert cirq.is_diagonal(np.array([[5]]))
assert cirq.is_diagonal(np.array([[3j]]))
assert cirq.is_diagonal(np.array([[1, 0]]))
assert cirq.is_diagonal(np.array([[1], [0]]))
assert not cirq.is_diagonal(np.array([[1, 1]]))
assert not cirq.is_diagonal(np.array([[1], [1]]))
assert cirq.is_diagonal(np.array([[5j, 0], [0, 2]]))
assert cirq.is_diagonal(np.array([[1, 0], [0, 1]]))
assert not cirq.is_diagonal(np.array([[1, 0], [1, 1]]))
assert not cirq.is_diagonal(np.array([[1, 1], [0, 1]]))
assert not cirq.is_diagonal(np.array([[1, 1], [1, 1]]))
assert not cirq.is_diagonal(np.array([[1, 0.1], [0.1, 1]]))
assert cirq.is_diagonal(np.array([[1, 1e-11], [1e-10, 1]]))
def test_is_diagonal_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_diagonal(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert not cirq.is_diagonal(np.array([[1, 0], [-0.6, 1]]), atol=atol)
# Error isn't accumulated across entries.
assert cirq.is_diagonal(np.array([[1, 0.5], [-0.5, 1]]), atol=atol)
assert not cirq.is_diagonal(np.array([[1, 0.5], [-0.6, 1]]), atol=atol)
def test_is_hermitian():
assert cirq.is_hermitian(np.empty((0, 0)))
assert not cirq.is_hermitian(np.empty((1, 0)))
assert not cirq.is_hermitian(np.empty((0, 1)))
assert cirq.is_hermitian(np.array([[1]]))
assert cirq.is_hermitian(np.array([[-1]]))
assert cirq.is_hermitian(np.array([[5]]))
assert not cirq.is_hermitian(np.array([[3j]]))
assert not cirq.is_hermitian(np.array([[0, 0]]))
assert not cirq.is_hermitian( | np.array([[0], [0]]) | numpy.array |
import os
import sys
import math
import pickle
import pdb
import argparse
import random
from tqdm import tqdm
from shutil import copy
import torch
from torch import nn, optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
import numpy as np
import scipy.io
from scipy.linalg import qr
import igraph
from random import shuffle
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from util import *
from models import *
from sklearn import manifold
# from dataset import *
parser = argparse.ArgumentParser(description='Train Variational Autoencoders for DAGs')
# general settings
parser.add_argument('--data-name', default='threeStageOpamp', help='graph dataset name')
parser.add_argument('--save-appendix', default='',
help='what to append to data-name as save-name for results')
parser.add_argument('--only-test', action='store_true', default=False,
help='if True, perform some experiments without training the model')
parser.add_argument('--backup', action='store_true', default=True,
help='if True, copy current py files to result dir')
parser.add_argument('--save-interval', type=int, default=1, metavar='N',
help='how many epochs to wait each time to save model states')
parser.add_argument('--sample-number', type=int, default=10, metavar='N',
help='how many samples to generate each time')
parser.add_argument('--gpu', type=int, default=3, help='which gpu to use')
# training settings
# parser.add_argument('--model', default='DVAE_hybirdLoss', help='model to use')
parser.add_argument('--model', default='DVAE', help='model to use')
# parser.add_argument('--data_file', type=str, default='dataset_withoutY', help='dataset original file to use')
parser.add_argument('--trainSet_size', type=int, default=2000, help='control the size of training set')
parser.add_argument('--hs', type=int, default=501, metavar='N',
help='hidden size of GRUs')
parser.add_argument('--nz', type=int, default=10, metavar='N',
help='number of dimensions of latent vectors z')
parser.add_argument('--load_model_path', default='', help='model path to loaded')
parser.add_argument('--load_model_name', default='500', help='model name to loaded')
# optimization settings
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 1e-4)')
parser.add_argument('--epochs', type=int, default=500, metavar='N',
help='number of epochs to train')
parser.add_argument('--batch_size', type=int, default=16, metavar='N',
help='batch size during training')
parser.add_argument('--infer-batch-size', type=int, default=128, metavar='N',
help='batch size during inference')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
args = parser.parse_args()
torch.manual_seed(args.seed)
gpu = 'cuda:' + str(args.gpu)
device = torch.device(gpu if torch.cuda.is_available() else 'cpu')
np.random.seed(args.seed)
random.seed(args.seed)
print(args)
'''Prepare data'''
args.file_dir = os.getcwd()
args.res_dir = os.path.join(args.file_dir, 'results/{}{}'.format(args.data_name,
args.save_appendix))
if not os.path.exists(args.res_dir):
os.makedirs(args.res_dir)
pkl_name = os.path.join(args.res_dir, args.data_name + '.pkl')
# check whether to load pre-stored pickle data
if os.path.isfile(pkl_name):
with open(pkl_name, 'rb') as f:
train_data, test_data, graph_args = pickle.load(f)
# otherwise process the raw data and save to .pkl
else:
# data_file = args.data_file
# train_data, test_data, graph_args = load_CIRCUIT_graphs(data_file)
train_data, test_data, graph_args = load_CIRCUIT_graphs()
train_data = train_data[:args.trainSet_size]
with open(pkl_name, 'wb') as f:
pickle.dump((train_data, test_data, graph_args), f)
if args.backup:
# backup current .py files
copy('train.py', args.res_dir)
copy('models.py', args.res_dir)
copy('util.py', args.res_dir)
# save command line input
cmd_input = 'python ' + ' '.join(sys.argv) + '\n'
with open(os.path.join(args.res_dir, 'cmd_input.txt'), 'a') as f:
f.write(cmd_input)
print('Command line input: ' + cmd_input + ' is saved.')
'''Prepare the model'''
# model
model = eval(args.model)(
max_n=graph_args.max_n,
fs=graph_args.edge_feature,
nvt=graph_args.nvt,
START_TYPE=0,
END_TYPE=1,
hs=args.hs,
nz=args.nz
)
# optimizer and scheduler
optimizer = optim.Adam(model.parameters(), lr=args.lr)
scheduler = ReduceLROnPlateau(optimizer, 'min', factor=0.1, patience=10, verbose=True)
model.to(device)
'''
# plot sample train/test graphs
if not (os.path.exists(os.path.join(args.res_dir, 'train_graph_id0.pdf')) or os.path.exists(os.path.join(args.res_dir, 'train_graph_id0.png'))):
for data in ['train_data', 'test_data']:
G = [g for g, y in eval(data)[:10]]
for i, g in enumerate(G):
name = '{}_graph_id{}'.format(data[:-5], i)
plot_DAG(g, args.res_dir, name)
'''
'''Define some train/test functions'''
def train(epoch):
model.train()
train_loss = 0
recon_loss = 0
kld_loss = 0
pred_loss = 0
pbar = tqdm(train_data)
g_batch = []
y_batch = []
min_dist = 1
max_dist = 0
for i, (g, y) in enumerate(pbar):
g_batch.append(g)
y_batch.append(y)
if len(g_batch) == args.batch_size or i == len(train_data) - 1:
optimizer.zero_grad()
g_batch = model._collate_fn(g_batch)
'''
mu, logvar = model.encode(g_batch)
loss, recon, kld = model.loss(mu, logvar, g_batch)
'''
loss, recon, kld = model(g_batch)
# if epoch % 100 ==0 and i == len(train_data) - 1:
# Hv
for vi in range(0, model.max_n):
# print("vi:", vi)
Hvi = model._get_vertex_state(g_batch, vi)
'''
for j in range(Hvi.size()[0]):
for k in range(j+1, Hvi.size()[0]):
dist = torch.cosine_similarity(Hvi[j], Hvi[k], dim=0)
min_dist = min(dist, min_dist)
max_dist = max(dist, max_dist)
'''
# print("min_dist:", min_dist)
# print("max_dist:", max_dist)
# print(Hvi.size()[0])
# print(i, Hvi)
pbar.set_description('Epoch: %d, loss: %0.4f, recon: %0.4f, kld: %0.4f' % (
epoch, loss.item() / len(g_batch), recon.item() / len(g_batch), kld.item() / len(g_batch)))
loss.backward()
# train_loss += float(loss)
# recon_loss += float(recon)
# kld_loss += float(kld)
train_loss += loss.item()
recon_loss += recon.item()
kld_loss += kld.item()
optimizer.step()
g_batch = []
y_batch = []
print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss / len(train_data)))
return train_loss, recon_loss, kld_loss
def test():
# test recon accuracy
test_model.eval()
encode_times = 1
decode_times = 1
Nll = 0
n_perfect = 0
print('Testing begins...')
print('Performance on the train data: ')
pbar1 = tqdm(train_data)
g_batch = []
y_batch = []
for i, (g, y) in enumerate(pbar1):
g_batch.append(g)
y_batch.append(y)
if len(g_batch) == args.infer_batch_size or i == len(train_data) - 1:
g = test_model._collate_fn(g_batch)
mu, logvar = test_model.encode(g)
_, nll, _ = test_model.loss(mu, logvar, g)
pbar1.set_description('recon loss: {:.4f}'.format(nll.item() / len(g_batch)))
Nll += nll.item()
# construct igraph g from tensor g to check recon quality
for _ in range(encode_times):
z = test_model.reparameterize(mu, logvar)
for _ in range(decode_times):
g_recon = test_model.decode(z)
n_perfect += sum(is_same_DAG(g0, g1) for g0, g1 in zip(g, g_recon))
g_batch = []
y_batch = []
Nll /= len(train_data)
acc = n_perfect / (len(train_data) * encode_times * decode_times)
print('Trainset average recon loss: {0}, recon accuracy: {1:.4f}'.format(Nll, acc))
print('Performence on the test data: ')
pbar = tqdm(test_data)
g_batch = []
y_batch = []
Nll = 0
n_perfect = 0
for i, (g, y) in enumerate(pbar):
g_batch.append(g)
y_batch.append(y)
if len(g_batch) == args.infer_batch_size or i == len(test_data) - 1:
g = test_model._collate_fn(g_batch)
mu, logvar = test_model.encode(g)
print("mu", mu)
print("logvar", logvar)
_, nll, _ = test_model.loss(mu, logvar, g)
pbar.set_description('recon loss: {:.4f}'.format(nll.item() / len(g_batch)))
# Nll += nll.item()
Nll += float(nll)
# construct igraph g from tensor g to check recon quality
for _ in range(encode_times):
z = test_model.reparameterize(mu, logvar)
for _ in range(decode_times):
g_recon = test_model.decode(z)
n_perfect += sum(is_same_DAG(g0, g1) for g0, g1 in zip(g, g_recon))
if i == len(test_data) - 1:
for j in range(g_batch[-1].vcount()):
print("True paramaters of graph node ", j)
print(g_batch[-1].vs[j]['param'])
print("Decoded paramaters of graph node ", j)
print(g_recon[-1].vs[j]['param'])
g_batch = []
y_batch = []
Nll /= len(test_data)
acc = n_perfect / (len(test_data) * encode_times * decode_times)
print('Testset average recon loss: {0}, recon accuracy: {1:.4f}'.format(Nll, acc))
# return Nll, acc
def visualize_recon(epoch, current_model):
current_model.eval()
# draw some reconstructed train/test graphs to visualize recon quality
for i, (g, y) in enumerate(test_data[:10] + train_data[:10]):
g_recon = current_model.encode_decode(g)[0] # remove []
name0 = 'graph_epoch{}_id{}_original'.format(epoch, i)
plot_DAG(g, args.res_dir, name0)
name1 = 'graph_epoch{}_id{}_recon'.format(epoch, i)
plot_DAG(g_recon, args.res_dir, name1)
def extract_latent(data):
model.eval()
Z = []
Y = []
g_batch = []
for i, (g, y) in enumerate(tqdm(data)):
# copy igraph
# otherwise original igraphs will save the H states and consume more GPU memory
g_ = g.copy()
g_batch.append(g_)
if len(g_batch) == args.infer_batch_size or i == len(data) - 1:
g_batch = model._collate_fn(g_batch)
mu, _ = model.encode(g_batch)
mu = mu.cpu().detach().numpy()
Z.append(mu)
g_batch = []
Y.append(y)
return np.concatenate(Z, 0), np.array(Y)
def save_latent_representations(epoch):
Z_train, Y_train = extract_latent(train_data)
Z_test, Y_test = extract_latent(test_data)
latent_pkl_name = os.path.join(args.res_dir, args.data_name +
'_latent_epoch{}.pkl'.format(epoch))
latent_mat_name = os.path.join(args.res_dir, args.data_name +
'_latent_epoch{}.mat'.format(epoch))
with open(latent_pkl_name, 'wb') as f:
pickle.dump((Z_train, Y_train, Z_test, Y_test), f)
print('Saved latent representations to ' + latent_pkl_name)
scipy.io.savemat(latent_mat_name,
mdict={
'Z_train': Z_train,
'Z_test': Z_test,
'Y_train': Y_train,
'Y_test': Y_test
}
)
def visualize_tsne(current_model):
# latent_mat_name = os.path.join(args.res_dir, args.data_name + '_latent_epoch{}.mat'.format(args.epochs))
load_model_path = os.path.join(args.file_dir, 'results\\{}'.format(args.data_name))
load_mat_name = 'threeStageOpamp_latent_epoch500.mat'
latent_mat_name = os.path.join(load_model_path, load_mat_name)
latent_data = scipy.io.loadmat(latent_mat_name)
# print(np.shape(latent_data['Z_train']))
Z = np.concatenate((latent_data['Z_train'], latent_data['Z_test']), axis=0)
Y = np.concatenate((latent_data['Y_train'], latent_data['Y_test']), axis=0)
Y_min = np.min(Y, axis=0)
Y_max = np.max(Y, axis=0)
Y = (Y - Y_min) / (Y_max - Y_min)
data = np.concatenate((train_data, test_data), axis=0)
id = np.zeros(args.trainSet_size)
for i, (g, _) in enumerate(data):
if g.vcount() == 8:
id[i] = 1
tsne = manifold.TSNE(n_components=2, init='pca', random_state=501)
Z_tsne = tsne.fit_transform(Z)
print(Z_tsne)
d = | np.where(id == 0) | numpy.where |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import networkx as networkx
import numpy as numpy
import scipy as scipy
import scipy.integrate
class SEIRSModel():
"""
A class to simulate the Deterministic SEIRS Model
===================================================
Params: beta Rate of transmission (exposure)
sigma Rate of infection (upon exposure)
gamma Rate of recovery (upon infection)
xi Rate of re-susceptibility (upon recovery)
mu_I Rate of infection-related death
mu_0 Rate of baseline death
nu Rate of baseline birth
beta_D Rate of transmission (exposure) for individuals with detected infections
sigma_D Rate of infection (upon exposure) for individuals with detected infections
gamma_D Rate of recovery (upon infection) for individuals with detected infections
mu_D Rate of infection-related death for individuals with detected infections
theta_E Rate of baseline testing for exposed individuals
theta_I Rate of baseline testing for infectious individuals
psi_E Probability of positive test results for exposed individuals
psi_I Probability of positive test results for exposed individuals
q Probability of quarantined individuals interacting with others
initE Init number of exposed individuals
initI Init number of infectious individuals
initD_E Init number of detected infectious individuals
initD_I Init number of detected infectious individuals
initR Init number of recovered individuals
initF Init number of infection-related fatalities
(all remaining nodes initialized susceptible)
"""
def __init__(self, initN, beta, sigma, gamma, xi=0, mu_I=0, mu_0=0, nu=0, p=0,
beta_D=None, sigma_D=None, gamma_D=None, mu_D=None,
theta_E=0, theta_I=0, psi_E=0, psi_I=0, q=0,
initE=0, initI=10, initD_E=0, initD_I=0, initR=0, initF=0):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = beta
self.sigma = sigma
self.gamma = gamma
self.xi = xi
self.mu_I = mu_I
self.mu_0 = mu_0
self.nu = nu
self.p = p
# Testing-related parameters:
self.beta_D = beta_D if beta_D is not None else self.beta
self.sigma_D = sigma_D if sigma_D is not None else self.sigma
self.gamma_D = gamma_D if gamma_D is not None else self.gamma
self.mu_D = mu_D if mu_D is not None else self.mu_I
self.theta_E = theta_E if theta_E is not None else self.theta_E
self.theta_I = theta_I if theta_I is not None else self.theta_I
self.psi_E = psi_E if psi_E is not None else self.psi_E
self.psi_I = psi_I if psi_I is not None else self.psi_I
self.q = q if q is not None else self.q
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tmax = 0 # will be set when run() is called
self.tseries = numpy.array([0])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.N = numpy.array([int(initN)])
self.numE = numpy.array([int(initE)])
self.numI = numpy.array([int(initI)])
self.numD_E = numpy.array([int(initD_E)])
self.numD_I = numpy.array([int(initD_I)])
self.numR = numpy.array([int(initR)])
self.numF = numpy.array([int(initF)])
self.numS = numpy.array([self.N[-1] - self.numE[-1] - self.numI[-1] - self.numD_E[-1] - self.numD_I[-1] - self.numR[-1] - self.numF[-1]])
assert(self.numS[0] >= 0), "The specified initial population size N must be greater than or equal to the initial compartment counts."
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@staticmethod
def system_dfes(t, variables, beta, sigma, gamma, xi, mu_I, mu_0, nu,
beta_D, sigma_D, gamma_D, mu_D, theta_E, theta_I, psi_E, psi_I, q):
S, E, I, D_E, D_I, R, F = variables # varibles is a list with compartment counts as elements
N = S + E + I + D_E + D_I + R
dS = - (beta*S*I)/N - q*(beta_D*S*D_I)/N + xi*R + nu*N - mu_0*S
dE = (beta*S*I)/N + q*(beta_D*S*D_I)/N - sigma*E - theta_E*psi_E*E - mu_0*E
dI = sigma*E - gamma*I - mu_I*I - theta_I*psi_I*I - mu_0*I
dDE = theta_E*psi_E*E - sigma_D*D_E - mu_0*D_E
dDI = theta_I*psi_I*I + sigma_D*D_E - gamma_D*D_I - mu_D*D_I - mu_0*D_I
dR = gamma*I + gamma_D*D_I - xi*R - mu_0*R
dF = mu_I*I + mu_D*D_I
return [dS, dE, dI, dDE, dDI, dR, dF]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run_epoch(self, runtime, dt=0.1):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create a list of times at which the ODE solver should output system values.
# Append this list of times as the model's timeseries
t_eval = numpy.arange(start=self.t, stop=self.t+runtime, step=dt)
# Define the range of time values for the integration:
t_span = (self.t, self.t+runtime)
# Define the initial conditions as the system's current state:
# (which will be the t=0 condition if this is the first run of this model,
# else where the last sim left off)
init_cond = [self.numS[-1], self.numE[-1], self.numI[-1], self.numD_E[-1], self.numD_I[-1], self.numR[-1], self.numF[-1]]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Solve the system of differential eqns:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
solution = scipy.integrate.solve_ivp(lambda t, X: SEIRSModel.system_dfes(t, X, self.beta, self.sigma, self.gamma, self.xi, self.mu_I, self.mu_0, self.nu,
self.beta_D, self.sigma_D, self.gamma_D, self.mu_D, self.theta_E, self.theta_I, self.psi_E, self.psi_I, self.q
),
t_span=[self.t, self.tmax], y0=init_cond, t_eval=t_eval
)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Store the solution output as the model's time series and data series:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.tseries = numpy.append(self.tseries, solution['t'])
self.numS = numpy.append(self.numS, solution['y'][0])
self.numE = numpy.append(self.numE, solution['y'][1])
self.numI = numpy.append(self.numI, solution['y'][2])
self.numD_E = numpy.append(self.numD_E, solution['y'][3])
self.numD_I = numpy.append(self.numD_I, solution['y'][4])
self.numR = numpy.append(self.numR, solution['y'][5])
self.numF = numpy.append(self.numF, solution['y'][6])
self.t = self.tseries[-1]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run(self, T, dt=0.1, checkpoints=None, verbose=False):
if(T>0):
self.tmax += T
else:
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-process checkpoint values:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(checkpoints):
numCheckpoints = len(checkpoints['t'])
paramNames = ['beta', 'sigma', 'gamma', 'xi', 'mu_I', 'mu_0', 'nu',
'beta_D', 'sigma_D', 'gamma_D', 'mu_D',
'theta_E', 'theta_I', 'psi_E', 'psi_I', 'q']
for param in paramNames:
# For params that don't have given checkpoint values (or bad value given),
# set their checkpoint values to the value they have now for all checkpoints.
if(param not in list(checkpoints.keys())
or not isinstance(checkpoints[param], (list, numpy.ndarray))
or len(checkpoints[param])!=numCheckpoints):
checkpoints[param] = [getattr(self, param)]*numCheckpoints
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Run the simulation loop:
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if(not checkpoints):
self.run_epoch(runtime=self.tmax, dt=dt)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print("t = %.2f" % self.t)
if(verbose):
print("\t S = " + str(self.numS[-1]))
print("\t E = " + str(self.numE[-1]))
print("\t I = " + str(self.numI[-1]))
print("\t D_E = " + str(self.numD_E[-1]))
print("\t D_I = " + str(self.numD_I[-1]))
print("\t R = " + str(self.numR[-1]))
print("\t F = " + str(self.numF[-1]))
else: # checkpoints provided
for checkpointIdx, checkpointTime in enumerate(checkpoints['t']):
# Run the sim until the next checkpoint time:
self.run_epoch(runtime=checkpointTime-self.t, dt=dt)
# Having reached the checkpoint, update applicable parameters:
print("[Checkpoint: Updating parameters]")
for param in paramNames:
setattr(self, param, checkpoints[param][checkpointIdx])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print("t = %.2f" % self.t)
if(verbose):
print("\t S = " + str(self.numS[-1]))
print("\t E = " + str(self.numE[-1]))
print("\t I = " + str(self.numI[-1]))
print("\t D_E = " + str(self.numD_E[-1]))
print("\t D_I = " + str(self.numD_I[-1]))
print("\t R = " + str(self.numR[-1]))
print("\t F = " + str(self.numF[-1]))
if(self.t < self.tmax):
self.run_epoch(runtime=self.tmax-self.t, dt=dt)
return True
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def total_num_infections(self, t_idx=None):
if(t_idx is None):
return (self.numE[:] + self.numI[:] + self.numD_E[:] + self.numD_I[:])
else:
return (self.numE[t_idx] + self.numI[t_idx] + self.numD_E[t_idx] + self.numD_I[t_idx])
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def plot(self, ax=None, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True):
import matplotlib.pyplot as pyplot
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create an Axes object if None provided:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(not ax):
fig, ax = pyplot.subplots()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Prepare data series to be plotted:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fseries = self.numF/self.N if plot_percentages else self.numF
Eseries = self.numE/self.N if plot_percentages else self.numE
Dseries = (self.numD_E+self.numD_I)/self.N if plot_percentages else (self.numD_E+self.numD_I)
D_Eseries = self.numD_E/self.N if plot_percentages else self.numD_E
D_Iseries = self.numD_I/self.N if plot_percentages else self.numD_I
Iseries = self.numI/self.N if plot_percentages else self.numI
Rseries = self.numR/self.N if plot_percentages else self.numR
Sseries = self.numS/self.N if plot_percentages else self.numS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the reference data:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(dashed_reference_results):
dashedReference_tseries = dashed_reference_results.tseries[::int(self.N/100)]
dashedReference_IDEstack = (dashed_reference_results.numI + dashed_reference_results.numD_I + dashed_reference_results.numD_E + dashed_reference_results.numE)[::int(self.N/100)] / (self.N if plot_percentages else 1)
ax.plot(dashedReference_tseries, dashedReference_IDEstack, color='#E0E0E0', linestyle='--', label='$I+D+E$ ('+dashed_reference_label+')', zorder=0)
if(shaded_reference_results):
shadedReference_tseries = shaded_reference_results.tseries
shadedReference_IDEstack = (shaded_reference_results.numI + shaded_reference_results.numD_I + shaded_reference_results.numD_E + shaded_reference_results.numE) / (self.N if plot_percentages else 1)
ax.fill_between(shaded_reference_results.tseries, shadedReference_IDEstack, 0, color='#EFEFEF', label='$I+D+E$ ('+shaded_reference_label+')', zorder=0)
ax.plot(shaded_reference_results.tseries, shadedReference_IDEstack, color='#E0E0E0', zorder=1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the stacked variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
topstack = numpy.zeros_like(self.tseries)
if(any(Fseries) and plot_F=='stacked'):
ax.fill_between(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, topstack+Fseries), topstack, color=color_F, alpha=0.5, label='$F$', zorder=2)
ax.plot( numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, topstack+Fseries), color=color_F, zorder=3)
topstack = topstack+Fseries
if(any(Eseries) and plot_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, topstack+Eseries), topstack, color=color_E, alpha=0.5, label='$E$', zorder=2)
ax.plot( numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, topstack+Eseries), color=color_E, zorder=3)
topstack = topstack+Eseries
if(combine_D and plot_D_E=='stacked' and plot_D_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), topstack, color=color_D_E, alpha=0.5, label='$D_{all}$', zorder=2)
ax.plot( numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), color=color_D_E, zorder=3)
topstack = topstack+Dseries
else:
if(any(D_Eseries) and plot_D_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), topstack, color=color_D_E, alpha=0.5, label='$D_E$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), color=color_D_E, zorder=3)
topstack = topstack+D_Eseries
if(any(D_Iseries) and plot_D_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, topstack+D_Iseries), topstack, color=color_D_I, alpha=0.5, label='$D_I$', zorder=2)
ax.plot( | numpy.ma.masked_where(D_Iseries<=0, self.tseries) | numpy.ma.masked_where |
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
# Runtime data augmentation
def get_augmented(
X_train,
Y_train,
X_val=None,
Y_val=None,
batch_size=32,
seed=0,
data_gen_args = dict(
rotation_range=10.,
#width_shift_range=0.02,
height_shift_range=0.02,
shear_range=5,
#zoom_range=0.3,
horizontal_flip=True,
vertical_flip=False,
fill_mode='constant'
)):
# Train data, provide the same seed and keyword arguments to the fit and flow methods
X_datagen = ImageDataGenerator(**data_gen_args)
Y_datagen = ImageDataGenerator(**data_gen_args)
X_datagen.fit(X_train, augment=True, seed=seed)
Y_datagen.fit(Y_train, augment=True, seed=seed)
X_train_augmented = X_datagen.flow(X_train, batch_size=batch_size, shuffle=True, seed=seed)
Y_train_augmented = Y_datagen.flow(Y_train, batch_size=batch_size, shuffle=True, seed=seed)
def make_into_dataset(it):
def get_i(i):
return it[i]
r_dataset = tf.data.Dataset.range(len(it))
dataset = r_dataset.map(lambda i: tf.numpy_function(get_i, [i], tf.float32))
shape = it[0].shape
dataset = dataset.map(lambda x: tf.ensure_shape(x, shape))
return dataset
X_train_dataset = make_into_dataset(X_train_augmented)
Y_train_dataset = make_into_dataset(Y_train_augmented)
#train_generator = zip(X_train_augmented, Y_train_augmented)
train_generator = tf.data.Dataset.zip((X_train_dataset, Y_train_dataset))
if not (X_val is None) and not (Y_val is None):
# Validation data, no data augmentation, but we create a generator anyway
X_datagen_val = ImageDataGenerator(**data_gen_args)
Y_datagen_val = ImageDataGenerator(**data_gen_args)
X_datagen_val.fit(X_val, augment=True, seed=seed)
Y_datagen_val.fit(Y_val, augment=True, seed=seed)
X_val_augmented = X_datagen_val.flow(X_val, batch_size=batch_size, shuffle=True, seed=seed)
Y_val_augmented = Y_datagen_val.flow(Y_val, batch_size=batch_size, shuffle=True, seed=seed)
# combine generators into one which yields image and masks
val_generator = zip(X_val_augmented.x, Y_val_augmented.x)
return train_generator, val_generator
else:
return train_generator
def plot_segm_history(history, metrics=['iou', 'val_iou'], losses=['loss', 'val_loss']):
# summarize history for iou
plt.figure(figsize=(12,6))
for metric in metrics:
plt.plot(history.history[metric], linewidth=3)
plt.suptitle('metrics over epochs', fontsize=20)
plt.ylabel('metric', fontsize=20)
plt.xlabel('epoch', fontsize=20)
#plt.yticks(np.arange(0.3, 1, step=0.02), fontsize=35)
#plt.xticks(fontsize=35)
plt.legend(metrics, loc='center right', fontsize=15)
plt.show()
# summarize history for loss
plt.figure(figsize=(12,6))
for loss in losses:
plt.plot(history.history[loss], linewidth=3)
plt.suptitle('loss over epochs', fontsize=20)
plt.ylabel('loss', fontsize=20)
plt.xlabel('epoch', fontsize=20)
#plt.yticks(np.arange(0, 0.2, step=0.005), fontsize=35)
#plt.xticks(fontsize=35)
plt.legend(losses, loc='center right', fontsize=15)
plt.show()
def mask_to_red(mask):
'''
Converts binary segmentation mask from white to red color.
Also adds alpha channel to make black background transparent.
'''
img_size = mask.shape[0]
c1 = mask.reshape(img_size,img_size)
c2 = np.zeros((img_size,img_size))
c3 = | np.zeros((img_size,img_size)) | numpy.zeros |
import logging
import numpy as np
import scipy.integrate
class ZNDSolver(object):
"""Solver for steady solution"""
def __init__(self, config, reaction_rate):
self._config = config
self._reaction_rate = reaction_rate
self._max_lamda = 1.0 - self._config.lambda_tol
self._logger = logging.getLogger(__name__)
self._compute_parameters()
def compute(self, grid):
self._logger.info('Starting ZND structure computations')
assert grid[0] < 0.0, 'Left boundary should be negative'
#assert grid[-1] == 0.0, 'Right boundary should be zero'
msg = ('Domain length {0:.16f} is smaller than computed steady '
'reaction length {1:.16f}')
msg = msg.format(np.abs(grid[0]), self.reaction_length)
#assert grid[0] <= -self.reaction_length, msg
self._grid = grid
# self._positive_grid = np.linspace(
# 0.0, np.abs(grid[0]), len(self._grid))
self._positive_grid = np.flipud( | np.abs(self._grid) | numpy.abs |
#!/usr/bin/env python
# Edit this script to add your team's training code.
from helper_code import *
import numpy as np, os, sys, joblib
from sklearn.impute import SimpleImputer
from sklearn.ensemble import RandomForestClassifier
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras.applications import EfficientNetB0
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
import csv
import pandas as pd
from numpy.lib import stride_tricks
#import torch
#from torch.utils.data import Dataset, DataLoader
#import torchvision
#from torchvision.transforms import transforms
import tensorflow as tf
import cv2
################################################################################
#
# Training function
#
################################################################################
# Train your model. This function is **required**. Do **not** change the arguments of this function.
def training_code(data_directory, model_directory):
# Find header and recording files.
print('Finding header and recording files...')
header_files, recording_files = find_challenge_files(data_directory)
num_recordings = len(recording_files)
# Create a folder for the model if it does not already exist.
if not os.path.isdir(model_directory):
os.mkdir(model_directory)
# Extract classes from dataset.
print('Extracting classes...')
classes = set()
for header_file in header_files:
header = load_header(header_file)
classes |= set(get_labels(header))
#print(classes)
if all(is_integer(x) for x in classes):
classes = sorted(classes, key=lambda x: int(x))
else:
classes = sorted(classes)
num_classes = len(classes)
print('Extracting classes...###################')
# Extract features and labels from dataset.
print('Extracting features and labels...')
data = np.zeros((num_recordings, 14), dtype=np.float32) # 14 features: one feature for each lead, one feature for age, and one feature for sex
labels = np.zeros((num_recordings, num_classes), dtype=np.bool) # One-hot encoding of classes
os.remove("myfile.csv")
for i in range(num_recordings):
print(' {}/{}...'.format(i+1, num_recordings))
# Load header and recording.
header = load_header(header_files[i])
recording = load_recording(recording_files[i])
#age, sex, rms = get_features(header, recording, twelve_leads)
#data[i, 0:12] = rms
current_labels = get_labels(header)
for label in current_labels:
if label in classes:
train_data = get_signal_spectrum(header, recording, twelve_leads, label,i)
j = classes.index(label)
#labels[i, j] = 1
#Train 12-lead ECG model.
print('Training 12-lead ECG model...')
leads = twelve_leads
filename = os.path.join(model_directory, 'twelve_lead_ecg_model.sav')
col_Names=["filepath", "label"]
train_data = pd.read_csv('myfile.csv',names=col_Names)
#channel112 = (ch1, ch2, ch3,ch4,ch5,ch6,ch7, ch8, ch9,ch10,ch11,ch12)
train_data_chl12 = train_data[train_data['filepath'].str.contains('chl_1|chl_2|chl_3|chl_4|chl_5|chl_6|chl_7|chl_8|chl_9|chl_10|chl_11|chl_12')]
classes, leads, imputer, classifier = our_clsf_model(data_directory,train_data_chl12,leads)
save_model(filename, classes, leads, imputer, classifier)
# Train 6-lead ECG model.
print('Training 6-lead ECG model...')
leads = six_leads
filename = os.path.join(model_directory, 'six_lead_ecg_model.sav')
col_Names=["filepath", "label"]
train_data = pd.read_csv('myfile.csv',names=col_Names)
#channel6 = (ch1, ch2, ch3,ch4,ch5,ch6)
train_data_chl6 = train_data[train_data['filepath'].str.contains('chl_1|chl_2|chl_3|chl_4|chl_5|chl_6')]
classes, leads, imputer, classifier = our_clsf_model(data_directory,train_data_chl6,leads)
save_model(filename, classes, leads, imputer, classifier)
# Train 3-lead ECG model.
print('Training 3-lead ECG model...')
leads = three_leads
filename = os.path.join(model_directory, 'three_lead_ecg_model.sav')
col_Names=["filepath", "label"]
train_data = pd.read_csv('myfile.csv',names=col_Names)
#channel3 = (ch1, ch2, ch8)
train_data_chl3 = train_data[train_data['filepath'].str.contains('chl_1|chl_2|chl_8')]
classes, leads, imputer, classifier = our_clsf_model(data_directory,train_data_chl3,leads)
save_model(filename, classes, leads, imputer, classifier)
# Train 2-lead ECG model.
print('Training 2-lead ECG model...')
leads = two_leads
filename = os.path.join(model_directory, 'two_lead_ecg_model.sav')
col_Names=["filepath", "label"]
train_data = pd.read_csv('myfile.csv',names=col_Names)
#channel2 = (ch1, ch11)
train_data_chl2 = train_data[train_data['filepath'].str.contains('chl_1|chl_11')]
classes, leads, imputer, classifier = our_clsf_model(data_directory,train_data_chl2,leads)
save_model(filename, classes, leads, imputer, classifier)
################################################################################
#
# File I/O functions
#
################################################################################
# Save your trained models.
def save_model(filename, classes, leads, imputer, classifier):
# Construct a data structure for the model and save it.
d = {'classes': classes, 'leads': leads, 'imputer': imputer, 'classifier': classifier}
joblib.dump(d, filename, protocol=0)
# Load your trained 12-lead ECG model. This function is **required**. Do **not** change the arguments of this function.
def load_twelve_lead_model(model_directory):
filename = os.path.join(model_directory, 'twelve_lead_ecg_model.sav')
return load_model(filename)
# Load your trained 6-lead ECG model. This function is **required**. Do **not** change the arguments of this function.
def load_six_lead_model(model_directory):
filename = os.path.join(model_directory, 'six_lead_ecg_model.sav')
return load_model(filename)
# Load your trained 2-lead ECG model. This function is **required**. Do **not** change the arguments of this function.
def load_two_lead_model(model_directory):
filename = os.path.join(model_directory, 'two_lead_ecg_model.sav')
return load_model(filename)
# Generic function for loading a model.
def load_model(filename):
return joblib.load(filename)
################################################################################
#
# Running trained model functions
#
################################################################################
# Run your trained 12-lead ECG model. This function is **required**. Do **not** change the arguments of this function.
def run_twelve_lead_model(model, header, recording):
return run_model(model, header, recording)
# Run your trained 6-lead ECG model. This function is **required**. Do **not** change the arguments of this function.
def run_six_lead_model(model, header, recording):
return run_model(model, header, recording)
# Run your trained 2-lead ECG model. This function is **required**. Do **not** change the arguments of this function.
def run_two_lead_model(model, header, recording):
return run_model(model, header, recording)
# Generic function for running a trained model.
def run_model(model, header, recording):
classes = model['classes']
leads = model['leads']
imputer = model['imputer']
classifier = model['classifier']
os.remove("myfile.csv")
# Load features.
num_leads = len(leads)
current_labels = get_labels(header)
for label in current_labels:
if label in classes:
train_data = get_signal_spectrum(header, recording, leads, label, 1)
#labels[i, j] = 1
col_Names=["filepath", "label"]
train_data = pd.read_csv('myfile.csv',names=col_Names)
if num_leads == 12:
#channel112 = (ch1, ch2, ch3,ch4,ch5,ch6,ch7, ch8, ch9,ch10,ch11,ch12)
train_data_chl = train_data[train_data['filepath'].str.contains('chl_1|chl_2|chl_3|chl_4|chl_5|chl_6|chl_7|chl_8|chl_9|chl_10|chl_11|chl_12')]
elif num_leads == 6:
#channel6 = (ch1, ch2, ch3,ch4,ch5,ch6)
train_data_chl = train_data[train_data['filepath'].str.contains('chl_1|chl_2|chl_3|chl_4|chl_5|chl_6')]
elif num_leads == 3:
#channel3 = (ch1, ch2, ch8)
train_data_chl = train_data[train_data['filepath'].str.contains('chl_1|chl_2|chl_8')]
else:
#channel2 = (ch1, ch11)
train_data_chl = train_data[train_data['filepath'].str.contains('chl_1|chl_11')]
image_dir = "Database_Image"
IMG_SIZE = 224
idg = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.3,fill_mode='nearest',horizontal_flip = True,rescale=1./255)
filepath="model/weights_best.hdf5"
classifier.load_weights(filepath)
train_data_generator = idg.flow_from_dataframe(train_data_chl, directory = image_dir, x_col = "filepath", y_col = "label", target_size=(IMG_SIZE , IMG_SIZE ),
class_mode = "categorical", shuffle = True)
imputer=SimpleImputer().fit(train_data_generator)
train_data_generator = imputer.transform(train_data_generator)
# Predict labels and probabilities.
labels = classifier.predict(train_data_generator)
labels = np.asarray(labels, dtype=np.int)[0]
probabilities = classifier.predict_proba(train_data_generator)
probabilities = np.asarray(probabilities, dtype=np.float32)[:, 0, 1]
return classes, labels, probabilities
################################################################################
#
# Other functions
#
################################################################################
# Extract features from the header and recording.
def get_features(header, recording, leads):
# Extract age.
age = get_age(header)
if age is None:
age = float('nan')
# Extract sex. Encode as 0 for female, 1 for male, and NaN for other.
sex = get_sex(header)
if sex in ('Female', 'female', 'F', 'f'):
sex = 0
elif sex in ('Male', 'male', 'M', 'm'):
sex = 1
else:
sex = float('nan')
# Reorder/reselect leads in recordings.
available_leads = get_leads(header)
indices = list()
for lead in leads:
i = available_leads.index(lead)
indices.append(i)
recording = recording[indices, :]
# Pre-process recordings.
amplitudes = get_amplitudes(header, leads)
baselines = get_baselines(header, leads)
num_leads = len(leads)
for i in range(num_leads):
recording[i, :] = amplitudes[i] * recording[i, :] - baselines[i]
# Compute the root mean square of each ECG lead signal.
rms = np.zeros(num_leads, dtype=np.float32)
for i in range(num_leads):
x = recording[i, :]
rms[i] = np.sqrt(np.sum(x**2) / np.size(x))
return age, sex, rms
def get_signal_spectrum(header, recording, leads, label, k):
# print('Storing classes...')
mapped_scored_labels = np.array(list(csv.reader(open('dx_mapping_scored.csv'))))
data_directory_image = 'Database_Image/'
if not os.path.isdir(data_directory_image):
os.mkdir(data_directory_image)
# Reorder/reselect leads in recordings.
available_leads = get_leads(header)
indices = list()
for lead in leads:
i = available_leads.index(lead)
indices.append(i)
recording = recording[indices, :]
# Pre-process recordings.
amplitudes = get_amplitudes(header, leads)
baselines = get_baselines(header, leads)
num_leads = len(leads)
for i in range(num_leads):
recording[i, :] = amplitudes[i] * recording[i, :] - baselines[i]
index_value = np.where(mapped_scored_labels == label)[0]
for i in range(num_leads):
if index_value:
index_lbl = ''.join(filter(str.isalpha,str(mapped_scored_labels[index_value,2])))
label_img_path = (data_directory_image +"chl_"+str(i+1)+"/"+ ''.join(filter(str.isalpha,str(mapped_scored_labels[index_value,2])))+"/")
if not os.path.isdir(label_img_path):
os.makedirs(label_img_path, exist_ok = True)
else:
index_lbl = str('NR')
label_img_path = data_directory_image+"chl_"+str(i+1)+"/"+ "NR/"
if not os.path.isdir(label_img_path):
os.makedirs(label_img_path, exist_ok = True)
x = recording[i, :]
sample_rate = get_frequency(header)
samples = x
Time = np.linspace(0, len(samples) / sample_rate, num=len(samples))
filename = str(k+1) +'_'+str(i+1)+'.png'
plotpath1 = str(label_img_path + filename)
with open('myfile.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow([str(plotpath1), index_lbl])
img = plotstft(samples,sample_rate, plotpath=plotpath1)
return img,index_lbl
""" short time fourier transform of signal """
def stft(sig, frameSize, overlapFac=0.5, window=np.hanning):
win = window(frameSize)
hopSize = int(frameSize - np.floor(overlapFac * frameSize))
# zeros at beginning (thus center of 1st window should be for sample nr. 0)
samples = np.append(np.zeros(int(np.floor(frameSize/2.0))), sig)
# cols for windowing
cols = np.ceil( (len(samples) - frameSize) / float(hopSize)) + 1
# zeros at end (thus samples can be fully covered by frames)
samples = np.append(samples, np.zeros(frameSize))
frames = stride_tricks.as_strided(samples, shape=(int(cols), frameSize),
strides=(samples.strides[0]*hopSize,
samples.strides[0])).copy()
frames *= win
return np.fft.rfft(frames)
""" scale frequency axis logarithmically """
def logscale_spec(spec, sr, factor=20.):
timebins, freqbins = | np.shape(spec) | numpy.shape |
"""
Plot figures for the TreeTime validation, comparison with other methods on the
simulated dataset.
To plot the validation results, CSV files generated by the
'generate_simulated_data.py' script are required.
The script plots the reconstruction of the mutation rate and the tiome of the
most recent common ancestor in comparison with other methods (LSD, BEAST)
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as mplcm
import matplotlib.colors as colors
import os, sys
import pandas
from Bio import Phylo
import utility_functions_beast as beast_utils
import utility_functions_simulated_data as sim_utils
from plot_defaults import *
def read_treetime_results_csv(fname):
"""
Read results of the TreeTime simulations
Args:
- fname: path to the input file
Returns:
- df: Table of results as pandas data-frame
"""
columns = ['File', 'Sim_Tmrca', 'Tmrca', 'mu', 'R', 'R2_int']
df = pandas.read_csv(fname, names=columns,header=0)
#filter obviously failed simulations
df = df[[len(str(k)) > 10 for k in df.File]]
df = df[df.R > 0.1]
# some very basic preprocessing
df['dTmrca'] = -(df['Sim_Tmrca'] - df['Tmrca'])
df['Sim_mu'] = map(lambda x: float(x.split("/")[-1].split('_')[6][2:]), df.File)
df['Ns'] = map(lambda x: int(x.split("/")[-1].split('_')[3][2:]), df.File)
df['Ts'] = map(lambda x: int(x.split("/")[-1].split('_')[4][2:]), df.File)
df['N'] = map(lambda x: int(x.split("/")[-1].split('_')[2][1:]), df.File)
df['T'] = df['Ns']*df['Ts']
df['Nmu'] = (df['N']*df['Sim_mu'])
return df
def read_lsd_results_csv(fname):
"""
Read results of the LSd simulations
Args:
- fname: path to the input file
Returns:
- df: Table of results as pandas data-frame
"""
columns = ['File', 'Sim_Tmrca', 'Tmrca', 'mu', 'obj']
df = pandas.read_csv(fname, names=columns,header=0)
# Filter out obviously wrong data
df = df[[len(k) > 10 for k in df.File]]
#Some basic preprocessing
df['dTmrca'] = -(df['Sim_Tmrca'] - df['Tmrca'])
df['Sim_mu'] = map(lambda x: float(x.split("/")[-1].split('_')[6][2:]), df.File)
df['Ns'] = map(lambda x: int(x.split("/")[-1].split('_')[3][2:]), df.File)
df['Ts'] = map(lambda x: int(x.split("/")[-1].split('_')[4][2:]), df.File)
df['N'] = map(lambda x: int(x.split("/")[-1].split('_')[2][1:]), df.File)
df['T'] = df['Ns']*df['Ts']
df['Nmu'] = (df['N']*df['Sim_mu'])
return df
def read_beast_results_csv(fname):
"""
Read results of the BEAST simulations
Args:
- fname: path to the input file
Returns:
- df: Table of results as pandas data-frame
"""
columns = ['File', 'N', 'Sim_Tmrca', 'Sim_mu', 'Ns', 'Ts', 'T', 'Nmu',
'LH', 'LH_std', 'Tmrca', 'Tmrca_std', 'mu', 'mu_std']
df = pandas.read_csv(fname, names=columns,header=0)
df = df[[len(k) > 10 for k in df.File]]
#import ipdb; ipdb.set_trace()
df['dTmrca'] = -(df['Sim_Tmrca'] - df['Tmrca'])
return df
def create_pivot_table(df, T_over_N=None, mean_or_median='median'):
"""
Create the pivot table to plot from the raw dataframe.
Args:
- df (pandas.DataFrame): the raw dataframe as read from a CSV file. Regardless
of the source data (TreeTime, LSD, or BEAST), dataframe is processed in the
unified way as soon as it has the following columns:
- T: (the tot evolution time, or the tree diameter)
- N: (population size)
- Nmu: (N*Mu - product of the pop sizez to the mutation rate used in simulations)
- Sim_mu: mutation rate used in simulations
- mu: reconstructed mutation rate
- dTmrca: difference between the real and reconstructed Tmrca values
- T_over_N(float or None): the total evolution time expressed in the expected
coalescence times scale. If not None, only those datapoints with correspnditng
T/N values will be left for the pivot. Otherwise, no filtering is performed.
By default: the following values are available:
- 2.
- 4.
- 10.
NOTE: any other values can be produced by re-running the simulations
(generate_simulated_data_submit.py script) with other parameters
- mean_or_median(str, possible values: 'mean', 'median'): how errorbars should
be calculated.
- 'mean': the datapoint is placed in the mean position, errorbars show
the standard deviation
- 'median': datapoint is the median of the distribution, the errorbars are
quinatiles.
"""
if T_over_N is not None:
DF = df[ df["T"] / df["N"] == T_over_N ]
else:
DF = df
N_MUS = np.unique(DF.Nmu)
N_MUS_idxs = np.ones(N_MUS.shape, dtype=bool)
mu_mean = []
mu_err = []
tmrca_mean = []
tmrca_err = []
for idx, N_MU in enumerate(N_MUS):
idxs = DF.Nmu == N_MU
if idxs.sum() == 0:
N_MUS_idxs[idx] = False
continue
dMu = -(DF.Sim_mu[idxs] - DF.mu[idxs])/DF.Sim_mu[idxs]
dMu.sort_values(inplace=True)
#dMu = dMu[int(dMu.shape[0]*0.05) : int(dMu.shape[0]*0.95)]
dTmrca = DF.dTmrca[idxs]/DF.N[idxs]
dTmrca.sort_values(inplace=True)
#dTmrca = dTmrca[int(dTmrca.shape[0]*0.05) : int(dTmrca.shape[0]*0.95)]
if mean_or_median == "mean":
mu_mean.append(np.mean(dMu))
mu_err.append(np.std(dMu))
tmrca_mean.append(np.mean(dTmrca))
tmrca_err.append(np.std(dTmrca))
else:
q75, q25 = np.percentile(dMu, [75 ,25])
mu_err.append((q75 - q25)) #np.std(DF.dTmrca[idxs])
mu_mean.append(np.median(dMu))
q75, q25 = | np.percentile(dTmrca, [75 ,25]) | numpy.percentile |
#!/usr/bin/env python3
import click
import os
import random
import numpy as np
import torch
from torch import nn
from emtgan.common import *
from emtgan.datasets import *
from emtgan.models import *
from emtgan.utils import *
random.seed(1234)
np.random.seed(1234)
# set hyperparameters
discriminator_lr = 0.001
generator_lr = 0.001
num_epochs = 200
ensembles = 10
weight_decay = 0
betas = (
0.5,
0.999
)
lambda_adv = 1
lambda_cycle = 10
lambda_ident = 5
lambda_comp = 1e-4
CC0 = False
variant = ''
if CC0:
lambda_cycle = 0
variant = 'CC0'
enable_scheduling = True
def model_error(G, x, y):
input_branch_1, input_branch_2 = np.split(x, 2, 1)
input_1 = np2torch(input_branch_1)
input_2 = np2torch(input_branch_2)
op_branch_1 = G(input_1)
op_branch_2 = G(input_2)
op_branch_1 = torch2np(torch.cat([input_1[:,:2], op_branch_1], 1))
op_branch_2 = torch2np(torch.cat([input_2[:,:2], op_branch_2], 1))
y_1, y_2 = np.split(y, 2, 1)
dcap = np.linalg.norm(y_1 - y_2, axis=1)
d = np.linalg.norm((unnormalize(op_branch_1) - unnormalize(op_branch_2))[:,:3], axis=1)
return d - dcap
def model_MSE(G, x, y):
d_err = model_error(G, x, y)
err = d_err
return np.sum(np.square(err)) / x.shape[0]
def train_iteration(epoch, iteration, D_cl, opt_D_cl, D_lc, opt_D_lc, G_cl, G_lc, opt_G, Xlab, Xcarm, ycarm):
real, fake = make_labels_hard(Xlab.size(0))
lab_1, lab_2 = torch.split(Xlab, len(input_features), 1)
carm_1, carm_2 = torch.split(Xcarm, len(input_features), 1)
### train generators ###
opt_G.zero_grad()
fake_lab_1 = torch.cat([carm_1[:,:2], G_cl(carm_1)], 1)
fake_lab_2 = torch.cat([carm_2[:,:2], G_cl(carm_2)], 1)
fake_carm_1 = torch.cat([lab_1[:,:2], G_lc(lab_1)], 1)
fake_carm_2 = torch.cat([lab_2[:,:2], G_lc(lab_2)], 1)
## adversarial loss ##
# how well can G fool D?
loss_D_cl_adv = bceloss(D_cl(torch.cat([fake_lab_1, fake_lab_2], 1)), real)
loss_D_lc_adv = bceloss(D_lc(torch.cat([fake_carm_1, fake_carm_2], 1)), real)
loss_adv = (loss_D_cl_adv + loss_D_lc_adv) / 2
## cycle loss ##
# enforce cycle consistency
recov_lab = torch.cat([fake_carm_1[:,:2], G_cl(fake_carm_1)], 1)
recov_carm = torch.cat([fake_lab_1[:,:2], G_lc(fake_lab_1)], 1)
loss_recov_lab = mse(recov_lab, lab_1)
loss_recov_carm = mse(recov_carm, carm_1)
loss_cycle = (loss_recov_lab + loss_recov_carm) / 2
## identity loss ##
loss_ident_lab = mse(lab_1, torch.cat([lab_1[:,:2], G_cl(lab_1)], 1))
loss_ident_carm = mse(carm_1, torch.cat([carm_1[:,:2], G_lc(carm_1)], 1))
loss_ident = (loss_ident_lab + loss_ident_carm) / 2
d_fake = torch.norm(tensor_unnormalize(fake_lab_1)[:,:3] - tensor_unnormalize(fake_lab_2)[:,:3], 2, 1)
y_1, y_2 = torch.split(ycarm, 3, 1)
d_real = torch.norm(y_1 - y_2, 2, 1)
loss_comp = mse(d_fake, d_real)
## total loss for both generators ##
loss_G = lambda_adv * loss_adv + lambda_cycle * loss_cycle + lambda_ident * loss_ident + lambda_comp * loss_comp
torch.nn.utils.clip_grad_norm_(G_lc.parameters(), 1.0)
torch.nn.utils.clip_grad_norm_(G_cl.parameters(), 1.0)
loss_G.backward()
opt_G.step()
real, fake = make_labels_soft(Xlab.size(0))
### train discriminators
## D_cl
opt_D_cl.zero_grad()
fake_lab_1 = torch.cat([carm_1[:,:2], G_cl(carm_1)], 1)
fake_lab_2 = torch.cat([carm_2[:,:2], G_cl(carm_2)], 1)
loss_real = bceloss(D_cl(Xlab), real) + bceloss(D_cl(Xcarm), fake)
loss_fake = bceloss(D_cl(torch.cat([fake_lab_1, fake_lab_2], 1)), fake)
loss_D_cl = (loss_real + loss_fake) / 3
torch.nn.utils.clip_grad_norm_(D_cl.parameters(), 1.0)
loss_D_cl.backward()
opt_D_cl.step()
## D_lc
opt_D_lc.zero_grad()
fake_carm_1 = torch.cat([lab_1[:,:2], G_lc(lab_1)], 1)
fake_carm_2 = torch.cat([lab_2[:,:2], G_lc(lab_2)], 1)
loss_real = bceloss(D_lc(Xcarm), real) + bceloss(D_lc(Xlab), fake)
loss_fake = bceloss(D_lc(torch.cat([fake_carm_1, fake_carm_2], 1)), fake)
loss_D_lc = (loss_real + loss_fake) / 3
torch.nn.utils.clip_grad_norm_(D_lc.parameters(), 1.0)
loss_D_lc.backward()
opt_D_lc.step()
return dict(
discriminator_CL=loss_D_cl,
discriminator_LC=loss_D_lc,
cycle=lambda_cycle * loss_cycle,
adversarial=lambda_adv * loss_adv,
ident=lambda_ident * loss_ident,
comp=lambda_comp * loss_comp,
generator=loss_G
)
def train_model():
val_losses = np.array([])
min_val_loss_total = np.inf
num_iterations = min(len(lab_dataloader), len(carm_dataloader))
for model_num in range(ensembles):
#### Discriminators ####
## D for c-arm --> lab conversion
D_cl = CycleGANDiscriminatorNetwork().to(cuda)
initialize_weights_normal(D_cl)
opt_D_cl = optim.Adam(D_cl.parameters(), lr=discriminator_lr, betas=betas)
## D for lab --> c-arm conversion
D_lc = CycleGANDiscriminatorNetwork().to(cuda)
initialize_weights_normal(D_lc)
opt_D_lc = optim.Adam(D_lc.parameters(), lr=discriminator_lr, betas=betas)
#### Generators ####
## G for c-arm --> lab conversion
G_cl = CycleGANGeneratorNetwork().to(cuda)
initialize_weights_normal(G_cl)
## G for lab --> c-arm conversion
G_lc = CycleGANGeneratorNetwork().to(cuda)
initialize_weights_normal(G_lc)
opt_G = optim.Adam(chain(G_lc.parameters(), G_cl.parameters()), lr=generator_lr, betas=betas)
min_val_loss = np.inf
min_val_index = 0
hist_epoch = np.array([])
hist_train_losses = {}
hist_val_loss = np.array([])
if enable_scheduling:
sched_G = optim.lr_scheduler.LambdaLR(opt_G, lr_lambda=DecayLambda(num_epochs, 0, num_epochs // 2).step)
sched_D_cl = optim.lr_scheduler.LambdaLR(opt_D_cl, lr_lambda=DecayLambda(num_epochs, 0, num_epochs // 2).step)
sched_D_lc = optim.lr_scheduler.LambdaLR(opt_D_lc, lr_lambda=DecayLambda(num_epochs, 0, num_epochs // 2).step)
## adversarial training
for epoch in range(num_epochs):
train_losses = {}
for iteration in range(num_iterations):
lab_batch = next(iter(lab_dataloader))
carm_batch = next(iter(carm_dataloader))
Xlab = lab_batch['x'].float().to(cuda)
Xcarm = carm_batch['x'].float().to(cuda)
ycarm = carm_batch['gt'].float().to(cuda)
losses = train_iteration(
epoch,
iteration,
D_cl, opt_D_cl,
D_lc, opt_D_lc,
G_cl, G_lc, opt_G,
Xlab, Xcarm,
ycarm
)
for key, value in losses.items():
if key not in train_losses:
train_losses[key] = np.array([])
train_losses[key] = np.append(train_losses[key], np.mean(torch2np(losses[key])))
#update_loss_dict(hist_train_losses, train_losses)
if enable_scheduling:
sched_G.step()
sched_D_cl.step()
sched_D_lc.step()
# average training loss
hist_epoch = np.append(hist_epoch, epoch)
# compute validation loss
val_loss = model_MSE(G_cl, xval_N, yval)#np.mean(train_losses['generator'])
hist_val_loss = | np.append(hist_val_loss, val_loss) | numpy.append |
import numpy as np
from itertools import count
def remove_linebreak(path):
# path = 'clustalo-500_ph_new.fasta'
stem = path.split('.')[0]
ext = path.split('.')[1]
input_data = path
output_data = stem + '_stripped.' + ext
with open(input_data, 'r') as in_file, \
open(output_data, 'w') as out_file:
for ii,row in enumerate(in_file):
row = row.strip('\n')
if ii > 0 and ">" in row:
out_file.write('\n')
out_file.write(row)
if ">" in row:
out_file.write('\n')
return output_data
def remove_mafft_train(path):
stem = path.split('.')[0]
ext = path.split('.')[1]
input_data = path
output_data = stem + '_predict_chunk.' + ext
with open(input_data, 'r') as fas_file,\
open(output_data, 'w') as out_file:
read_iter = iter(fas_file.readline, '')
for fas_head, fas_data in zip(read_iter, read_iter):
if ">TEST" in fas_head:
# print(fas_head)
out_file.write(fas_head.strip('\n\r')+'\n')
out_file.write(fas_data.strip('\n\r')+'\n')
return output_data
def change_to_sparse_categorical(symbols):
canonical_symbols = '-ACDEFGHIKLMNPQRSTVWY'
canonical_symbols = dict(zip(canonical_symbols, count()))
# canonical_symbols = {'-': 0, 'A': 1, 'C': 2, 'D': 3, ...}
new_symbols = []
for s in symbols:
if s in canonical_symbols:
new_symbols.append(canonical_symbols[s])
else:
new_symbols.append(0)
return | np.array(new_symbols) | numpy.array |
"""Generate torsional profiles for all rotatable bonds.
This script will read in an SD file containing one or more molecules and add an
SD property encoding the torsional profile (generated using TorsionNet) for each
rotatable bond.
This script requires you to have a trained TorsionNet model and the associated
standard scaler. Use the notebooks in the notebooks/ folder to generate these.
Example:
python generate_profiles.py data/test_molecules.sdf test_molecules_out.sdf notebooks/model.h5 notebooks/scaler.pkl
"""
import time
import pickle
import logging
import argparse
import numpy as np
import tensorflow as tf
from openeye import oechem
from torsion.model import get_sf_elements
from torsion.confs import get_torsional_confs
from torsion.dihedral import (
extract_torsion_atoms,
get_canonical_torsions,
extract_molecule_torsion_data,
get_molecule_torsion_fragments,
)
from torsion.inchi_keys import get_specific_dihedral_inchi_key
from torsion.utils.process_sd_data import (
get_sd_data,
TOTAL_STRAIN_TAG,
reorder_sd_props,
generate_energy_profile_sd_data_1d,
extract_numeric_data_from_profile_str,
)
from torsion.utils.interpolate import get_global_min_interp1d
SPECIFIC_INCHI_TAG = 'specific_inchi'
NUM_TORSION_PROFILES_TAG = 'NUM_TORSIONNET_TORSION_PROFILES'
PROFILE_TAG = "TORSIONNET_PROFILE"
HAS_PROFILES_TAG = 'has_profiles'
SKIP_TORSION_TAG = 'skip_torsion'
ENERGY_PROFILE_TAG = 'ENERGY_PROFILE'
TORSION_ATOMS_FRAGMENT_TAG = 'TORSION_ATOMS_FRAGMENT'
LOW_PREDICTION_CONFIDENCE_TAG = 'LOW'
NUM_LOW_CONFIDENCE_TORSIONS_TAG = 'NUM_LOW_CONFIDENCE_TORSIONS'
OFFSET_THRESHOLD = 1.0
HIGH_PREDICTION_CONFIDENCE_TAG = 'HIGH'
PROFILE_OFFSET_TAG = 'profile_offset'
STRAIN_TAG = 'TORSIONNET_STRAIN'
def has_undesirable_elements(mol):
'''
returns True if molecule contains any element other than
H, C, N, O, F, S, Cl, or P
@param mol:
@type mol: OEGraphMol
@return: bool
'''
atomsHC = oechem.OEOrAtom(oechem.OEIsHydrogen(), oechem.OEIsCarbon())
atomsNO = oechem.OEOrAtom(oechem.OEIsNitrogen(), oechem.OEIsOxygen())
atomsFS = oechem.OEOrAtom(oechem.OEHasAtomicNum(9), oechem.OEIsSulfur())
atomsHCNO = oechem.OEOrAtom(atomsHC, atomsNO)
atomsHCNOFS = oechem.OEOrAtom(atomsHCNO, atomsFS)
atomsHCNOFSCl = oechem.OEOrAtom(atomsHCNOFS, oechem.OEHasAtomicNum(17))
atomsHCNOFSClP = oechem.OEOrAtom(atomsHCNOFSCl, oechem.OEIsPhosphorus())
undesirable_atom = mol.GetAtom(oechem.OENotAtom(atomsHCNOFSClP))
if undesirable_atom is not None:
return True
return False
def parse_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser()
# Logging arguments
verbosity_group = parser.add_mutually_exclusive_group()
verbosity_group.add_argument(
"-v",
"--verbose",
dest="verbose_count",
action="count",
default=0,
help="increases log verbosity for each occurence.",
)
verbosity_group.add_argument(
"-q",
"--quiet",
action="store_const",
const=-1,
default=0,
dest="verbose_count",
help="quiet output (show errors only)",
)
# Program arguments
parser.add_argument(
"--in", dest='infile', type=str, help="Input file (.sdf) containing one or more molecules."
)
parser.add_argument(
"--out",
dest="outfile",
type=str,
help="Output file (.sdf) containing molecules annotated with torsional profiles.",
)
parser.add_argument("--model", dest="model_file", type=str, help="Path to trained TorsionNet model.")
parser.add_argument("--scaler", dest="scaler_file", type=str, help="Path to scaler object used to scale input features to TorsionNet.")
args = parser.parse_args()
# Setup logging basics
base_loglevel = 30
verbosity = max(min(args.verbose_count, 2), -1)
loglevel = base_loglevel - (verbosity * 10)
logging.basicConfig(level=loglevel, format="%(message)s")
logging.getLogger().setLevel(loglevel)
logging.debug("DEBUG messages will be shown.") # 10 verbosity = 2
logging.info("INFO messages will be shown.") # 20 verbosity = 1
logging.warning("WARNING messages will be shown.") # 30 verbosity = 0
logging.error("ERROR messages will be shown.") # 40 verbosity = -1
return args
def generate_torsion_profile(mol_list):
sf_map = {}
for graph_mol in mol_list:
if oechem.OECount(graph_mol, oechem.OEIsRotor()) == 0:
logging.warning('WARNING: Skipping molecule %s... rotor count is zero', graph_mol.GetTitle())
continue
frag_mols = get_molecule_torsion_fragments(graph_mol)
if len(frag_mols) == 0:
logging.warning('WARNING: Skipping molecule %s... cannot identify torsional fragments', graph_mol.GetTitle())
continue
_, torsion_data = extract_molecule_torsion_data(graph_mol, frag_mols)
for frag_mol in frag_mols:
if has_undesirable_elements(frag_mol) or oechem.OECount(frag_mol, oechem.OEIsPhosphorus()) > 0:
logging.warning('WARNING: Skipping a fragment in molecule %s... fragment has undesirable elements', graph_mol.GetTitle())
continue
# skip fragments with one or more formal charge
skip_torsion = False
if oechem.OECount(frag_mol, oechem.OEHasFormalCharge(1)) > 0 \
or oechem.OECount(frag_mol, oechem.OEHasFormalCharge(2)) > 0:
skip_torsion = True
specific_inchi = get_specific_dihedral_inchi_key(frag_mol)
if specific_inchi not in sf_map:
sf_list = get_profile_sf(frag_mol)
sf_map[specific_inchi] = sf_list
torsion_data_items = torsion_data[specific_inchi]
for torsion_data_item in torsion_data_items:
a_idx, b_idx, c_idx, d_idx, _ = torsion_data_item
b = graph_mol.GetAtom(oechem.OEHasAtomIdx(b_idx))
c = graph_mol.GetAtom(oechem.OEHasAtomIdx(c_idx))
bond = graph_mol.GetBond(b, c)
if skip_torsion:
bond.SetData(SKIP_TORSION_TAG, True)
tor_atoms_str = ' '.join(list(
map(str, [a_idx, b_idx, c_idx, d_idx])))
if not bond.HasData(TORSION_ATOMS_FRAGMENT_TAG):
bond.SetData(TORSION_ATOMS_FRAGMENT_TAG, tor_atoms_str)
bond.SetData(SPECIFIC_INCHI_TAG, specific_inchi)
else:
tmp_data = bond.GetData(TORSION_ATOMS_FRAGMENT_TAG)
tmp_data = tmp_data + ':' + tor_atoms_str
bond.SetData(TORSION_ATOMS_FRAGMENT_TAG, tmp_data)
graph_mol.SetData(HAS_PROFILES_TAG, False)
for bond in graph_mol.GetBonds(oechem.OEIsRotor()):
if bond.HasData(TORSION_ATOMS_FRAGMENT_TAG):
graph_mol.SetData(HAS_PROFILES_TAG, True)
break
return mol_list, sf_map
def get_profile_sf(mol):
'''
Generates torsional conformations and corresponding
symmetric functions. Returns list containing
symmetry functions and angles
@param mol: OEMol
@return: list[list, list]
'''
torsional_mols = get_torsional_confs(mol)
sf_list = []
for torsional_mol in torsional_mols:
rsf = get_sf_elements(torsional_mol)
torsion_angle = float(get_sd_data(torsional_mol, 'TORSION_ANGLE'))
if torsion_angle > 180.0:
torsion_angle = torsion_angle - 360.0
sf_list.append((torsion_angle, rsf))
sf_list.sort()
return sf_list
def calculate_ml_profiles(mols, sf_map, model, scaler):
num_mols = len(mols)
for count, mol in enumerate(mols):
logging.info("Generating molecule profile %d/%d", (count+1), num_mols)
if mol.HasData(HAS_PROFILES_TAG) and mol.GetData(HAS_PROFILES_TAG) == True:
for bond in mol.GetBonds(oechem.OEIsRotor()):
if bond.HasData(ENERGY_PROFILE_TAG) and bond.HasData(PROFILE_OFFSET_TAG):
continue
if bond.HasData(SPECIFIC_INCHI_TAG):
specific_inchi = bond.GetData(SPECIFIC_INCHI_TAG)
profile, offset = calculate_sf_ML_profile(model, scaler,
sf_map[specific_inchi])
bond.SetData(ENERGY_PROFILE_TAG, profile)
bond.SetData(PROFILE_OFFSET_TAG, float(offset))
def calculate_sf_ML_profile(regr, scaler, sf_list):
X = | np.array([sf for angle, sf in sf_list]) | numpy.array |
import argparse
import numpy as np
import sklearn
from parallelm.mlops import mlops as mlops
# use below import if user wants to user RegressionMetrics predefined metrics names.
from parallelm.mlops.metrics_constants import RegressionMetrics
from parallelm.mlops.stats.bar_graph import BarGraph
from sklearn.datasets import make_regression
from sklearn.svm import SVR
def parse_args():
"""
Parse Arguments from component
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument("--num_samples", help="# samples")
parser.add_argument("--num_features", help="# features")
parser.add_argument("--kernel", help="Kernel")
parser.add_argument("--degree", help="Degree")
parser.add_argument("--gamma", help="Gamma")
parser.add_argument("--tol", help="Tol")
parser.add_argument("--max_iter", dest="max_iter", type=int, required=False, default=100,
help='Maximum number of iterations')
parser.add_argument("--output-model", help="Data file to save model")
options = parser.parse_args()
return options
def main():
pm_options = parse_args()
print("PM: Configuration:")
print("PM: # Sample: [{}]".format(pm_options.num_samples))
print("PM: # Features: [{}]".format(pm_options.num_features))
print("PM: Kernel: [{}]".format(pm_options.kernel))
print("PM: Degree: [{}]".format(pm_options.degree))
print("PM: Gamma: [{}]".format(pm_options.gamma))
print("PM: Tolerance: [{}]".format(pm_options.tol))
print("PM: Maximum iterations: [{}]".format(pm_options.max_iter))
print("PM: Output model: [{}]".format(pm_options.output_model))
# Initialize MLOps Library
mlops.init()
num_samples = int(pm_options.num_samples)
num_features = int(pm_options.num_features)
# Create synthetic data using scikit learn
X, y = make_regression(n_samples=num_samples,
n_features=num_features,
n_informative=2,
random_state=42)
# for making labels all positive
y = y + -1 * | np.min(y) | numpy.min |
"""
Test for file IO
"""
import pytest
import numpy as np
from bioptim import OdeSolver
from .utils import TestUtils
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK4, OdeSolver.RK8, OdeSolver.IRK])
def test_muscle_driven_ocp(ode_solver):
bioptim_folder = TestUtils.bioptim_folder()
static_arm = TestUtils.load_module(bioptim_folder + "/examples/muscle_driven_ocp/static_arm.py")
ode_solver = ode_solver()
ocp = static_arm.prepare_ocp(
bioptim_folder + "/examples/muscle_driven_ocp/arm26.bioMod",
final_time=2,
n_shooting=10,
weight=1,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (40, 1))
np.testing.assert_almost_equal(g, np.zeros((40, 1)), decimal=6)
# Check some of the results
q, qdot, tau, mus = sol.states["q"], sol.states["qdot"], sol.controls["tau"], sol.controls["muscles"]
if isinstance(ode_solver, OdeSolver.IRK):
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.14351611580879933)
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([0.07, 1.4]))
np.testing.assert_almost_equal(q[:, -1], np.array([-0.94511299, 3.07048865]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0.0, 0.0]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([0.41149114, -0.55863385]))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([0.00147561, 0.00520749]))
np.testing.assert_almost_equal(tau[:, -1], np.array([-0.00027953, 0.00069257]))
np.testing.assert_almost_equal(
mus[:, 0],
np.array([2.29029533e-06, 1.64976642e-01, 1.00004898e-01, 4.01974257e-06, 4.13014984e-06, 1.03945583e-01]),
)
np.testing.assert_almost_equal(
mus[:, -1],
np.array([4.25940361e-03, 3.21754460e-05, 3.12984790e-05, 2.00725054e-03, 1.99993619e-03, 1.81725854e-03]),
)
elif isinstance(ode_solver, OdeSolver.RK8):
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.14350914060136277)
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([0.07, 1.4]))
np.testing.assert_almost_equal(q[:, -1], np.array([-0.94510844, 3.07048231]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0.0, 0.0]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([0.41151235, -0.55866253]))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([0.00147777, 0.00520795]))
np.testing.assert_almost_equal(tau[:, -1], np.array([-0.00027953, 0.00069258]))
np.testing.assert_almost_equal(
mus[:, 0],
np.array([2.28863414e-06, 1.65011897e-01, 1.00017224e-01, 4.01934660e-06, 4.12974244e-06, 1.03954780e-01]),
)
np.testing.assert_almost_equal(
mus[:, -1],
np.array([4.25990460e-03, 3.21893307e-05, 3.13077447e-05, 2.01209936e-03, 2.00481801e-03, 1.82353344e-03]),
)
else:
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.14350464848810182)
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([0.07, 1.4]))
np.testing.assert_almost_equal(q[:, -1], np.array([-0.9451058, 3.0704789]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0.0, 0.0]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([0.4115254, -0.5586797]))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([0.0014793, 0.0052082]))
np.testing.assert_almost_equal(tau[:, -1], np.array([-0.0002795, 0.0006926]))
np.testing.assert_almost_equal(
mus[:, 0],
np.array([2.2869218e-06, 1.6503522e-01, 1.0002514e-01, 4.0190181e-06, 4.1294041e-06, 1.0396051e-01]),
)
np.testing.assert_almost_equal(
mus[:, -1],
np.array([4.2599283e-03, 3.2188697e-05, 3.1307377e-05, 2.0121186e-03, 2.0048373e-03, 1.8235679e-03]),
)
# save and load
TestUtils.save_and_load(sol, ocp, False)
# simulate
TestUtils.simulate(sol)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK4]) # Only one solver since it is very long
def test_muscle_activations_with_contact_driven_ocp(ode_solver):
# TODO: This test should be removed when DynamicsFcn.MUSCLE_ACTIVATIONS_AND_TORQUE_DRIVEN_WITH_CONTACT is
# unitary tested
# Load static_arm_with_contact
bioptim_folder = TestUtils.bioptim_folder()
static_arm = TestUtils.load_module(bioptim_folder + "/examples/muscle_driven_ocp/static_arm_with_contact.py")
ode_solver = ode_solver()
ocp = static_arm.prepare_ocp(
bioptim_folder + "/examples/muscle_driven_ocp/arm26_with_contact.bioMod",
final_time=2,
n_shooting=10,
weight=1,
ode_solver=ode_solver,
)
sol = ocp.solve()
if isinstance(ode_solver, OdeSolver.IRK):
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.14351397970185203)
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (60, 1))
np.testing.assert_almost_equal(g, np.zeros((60, 1)), decimal=6)
# Check some of the results
q, qdot, tau, mus = sol.states["q"], sol.states["qdot"], sol.controls["tau"], sol.controls["muscles"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([0, 0.07, 1.4]))
np.testing.assert_almost_equal(q[:, -1], np.array([0.0081671, -0.94509584, 3.07047323]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0, 0.0, 0.0]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([0.00093981, 0.41157421, -0.55870943]))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([-3.49332839e-07, 1.47494809e-03, 5.20721575e-03]))
np.testing.assert_almost_equal(tau[:, -1], np.array([-2.72476211e-06, -2.79524486e-04, 6.92600551e-04]))
np.testing.assert_almost_equal(
mus[:, 0],
np.array([2.29081617e-06, 1.64961906e-01, 9.99986809e-02, 4.01995665e-06, 4.13036938e-06, 1.03940164e-01]),
)
np.testing.assert_almost_equal(
mus[:, -1],
np.array([4.25988708e-03, 3.21882769e-05, 3.13076618e-05, 2.01160287e-03, 2.00431774e-03, 1.82289866e-03]),
)
elif isinstance(ode_solver, OdeSolver.RK8):
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.14350699571954104)
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (60, 1))
np.testing.assert_almost_equal(g, np.zeros((60, 1)), decimal=6)
# Check some of the results
q, qdot, tau, mus = sol.states["q"], sol.states["qdot"], sol.controls["tau"], sol.controls["muscles"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([0, 0.07, 1.4]))
np.testing.assert_almost_equal(q[:, -1], np.array([0.00816709, -0.94509077, 3.07046606]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0, 0.0, 0.0]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([0.00093983, 0.411599, -0.55874465]))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([-3.77284867e-07, 1.47710422e-03, 5.20766354e-03]))
np.testing.assert_almost_equal(tau[:, -1], np.array([-2.72484502e-06, -2.79525145e-04, 6.92616311e-04]))
np.testing.assert_almost_equal(
mus[:, 0],
np.array([2.28911678e-06, 1.64996819e-01, 1.00010798e-01, 4.01956674e-06, 4.12996816e-06, 1.03949142e-01]),
)
np.testing.assert_almost_equal(
mus[:, -1],
np.array([4.25994595e-03, 3.21879960e-05, 3.13075455e-05, 2.01165125e-03, 2.00436616e-03, 1.82298538e-03]),
)
else:
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.1435025030068162)
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (60, 1))
np.testing.assert_almost_equal(g, np.zeros((60, 1)), decimal=6)
# Check some of the results
q, qdot, tau, mus = sol.states["q"], sol.states["qdot"], sol.controls["tau"], sol.controls["muscles"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([0, 0.07, 1.4]))
np.testing.assert_almost_equal(q[:, -1], np.array([0.0081671, -0.9450881, 3.0704626]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0, 0.0, 0.0]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([0.0009398, 0.4116121, -0.5587618]))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([-3.9652660e-07, 1.4785825e-03, 5.2079505e-03]))
np.testing.assert_almost_equal(tau[:, -1], np.array([-2.7248808e-06, -2.7952503e-04, 6.9262306e-04]))
np.testing.assert_almost_equal(
mus[:, 0],
np.array([2.2873915e-06, 1.6502014e-01, 1.0001872e-01, 4.0192359e-06, 4.1296273e-06, 1.0395487e-01]),
)
np.testing.assert_almost_equal(
mus[:, -1],
np.array([4.2599697e-03, 3.2187363e-05, 3.1307175e-05, 2.0116712e-03, 2.0043861e-03, 1.8230214e-03]),
)
# save and load
TestUtils.save_and_load(sol, ocp, False)
# simulate
TestUtils.simulate(sol)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK4]) # Only one solver since it is very long
def test_muscle_excitation_with_contact_driven_ocp(ode_solver):
# Load contact_forces_inequality_constraint_muscle_excitations
bioptim_folder = TestUtils.bioptim_folder()
contact = TestUtils.load_module(
bioptim_folder
+ "/examples/muscle_driven_with_contact/contact_forces_inequality_constraint_muscle_excitations.py"
)
boundary = 50
ode_solver = ode_solver()
ocp = contact.prepare_ocp(
bioptim_folder + "/examples/muscle_driven_with_contact/2segments_4dof_2contacts_1muscle.bioMod",
phase_time=0.3,
n_shooting=10,
min_bound=boundary,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.14525619)
# Check some of the results
q, qdot, mus_states, tau, mus_controls = (
sol.states["q"],
sol.states["qdot"],
sol.states["muscles"],
sol.controls["tau"],
sol.controls["muscles"],
)
if isinstance(ode_solver, OdeSolver.IRK):
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (110, 1))
np.testing.assert_almost_equal(g[:90], np.zeros((90, 1)), decimal=6)
np.testing.assert_array_less(-g[90:], -boundary)
expected_pos_g = np.array(
[
[51.5414325],
[52.77742181],
[57.57780262],
[62.62940016],
[65.1683722],
[66.33551167],
[65.82614885],
[63.06016376],
[57.23683342],
[50.47124118],
[156.35594176],
[136.1362431],
[89.86994764],
[63.41325331],
[57.493027],
[55.09716611],
[53.77813649],
[52.90987628],
[52.19502561],
[50.56093511],
]
)
np.testing.assert_almost_equal(g[90:], expected_pos_g)
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([0.0, 0.0, -0.75, 0.75]))
np.testing.assert_almost_equal(
q[:, -1], np.array([-3.40708085e-01, 1.34155553e-01, -2.22589697e-04, 2.22589697e-04])
)
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0.0, 0.0, 0.0, 0.0]))
np.testing.assert_almost_equal(
qdot[:, -1], np.array([-2.01858700e00, 4.49316671e-04, 4.03717411e00, -4.03717411e00])
)
# initial and final muscle state
np.testing.assert_almost_equal(mus_states[:, 0], np.array([0.5]))
np.testing.assert_almost_equal(mus_states[:, -1], np.array([0.52946019]))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([-54.08860398]))
np.testing.assert_almost_equal(tau[:, -1], np.array([-26.70209712]))
np.testing.assert_almost_equal(mus_controls[:, 0], np.array([0.48071638]))
np.testing.assert_almost_equal(mus_controls[:, -1], np.array([0.40159522]))
elif isinstance(ode_solver, OdeSolver.RK8):
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (110, 1))
np.testing.assert_almost_equal(g[:90], np.zeros((90, 1)), decimal=6)
| np.testing.assert_array_less(-g[90:], -boundary) | numpy.testing.assert_array_less |
import cv2
import argparse
import numpy as np
from tqdm import trange
import imageio
import time
W = 256
H = 256
ref_ptr = []
src_ptr = []
dst_ptr = []
src_flag = False
#src: [77,132;109,144;184,116;149,143;115,176;151,177]
#dst: [77,112;120,107;196,89;154,101;119,154;161,146]
def clickndraw(event, x, y, flags, param):
global ref_ptr, src_flag, src_ptr, dst_ptr
if event == cv2.EVENT_LBUTTONDOWN:
ref_ptr.append((x, y))
src_flag = not src_flag
if src_flag:
src_ptr.append((x, y))
else:
if x - W < 0:
x = 0
else:
x = x - W
dst_ptr.append((x, y))
elif event == cv2.EVENT_LBUTTONUP:
ref_ptr.append((x, y))
if src_flag:
src_ptr.append((x, y))
else:
if x - W < 0:
x = 0
else:
x = x - W
dst_ptr.append((x, y))
def hardware_beier_neely(src_out, dst_out, src_img, dst_img):
src_coord = src_out.read().split('\n')
dst_coord = dst_out.read().split('\n')
morphed_img = np.zeros(src_img.shape)
alpha = 1.0
for i in range(len(src_coord)-1):
src_point = eval(src_coord[i])
dst_point = eval(dst_coord[i])
#print(src_point)
#print(dst_point)
morphed_img[i % 256, i // 256, :] = alpha * src_img[src_point[1], src_point[0], :] + (1-alpha) * dst_img[dst_point[1], dst_point[0], :]
return morphed_img
def beier_neely(input_line, target_line, img):
h, w, c = img.shape
point_num = input_line.shape[0]
line_num = point_num // 2
morphed_img = np.zeros(img.shape)
for y in range(1, h+1):
for x in range(1, w+1):
X = np.array([y, x])
X_source = np.zeros(2,)
d_sum = np.zeros(2, )
weighted_sum = 0.
if line_num > 0:
for i in range(1, line_num+1):
i_1 = (i-1) * 2 + 1
P_i = np.array([target_line[i_1 - 1][0], target_line[i_1 - 1][1]])
Q_i = np.array([target_line[i_1][0], target_line[i_1][1]])
QP_i = Q_i - P_i
P_i_src = np.array([input_line[i_1 - 1][0], input_line[i_1 - 1][1]])
Q_i_src = np.array([input_line[i_1][0], input_line[i_1][1]])
QP_i_src = Q_i_src - P_i_src
u = np.dot((X - P_i), QP_i) / (np.dot(QP_i, QP_i))
v = np.dot((X - P_i), np.array([-1 * QP_i[1], QP_i[0]])) / np.sqrt(np.dot(QP_i, QP_i))
X_i_src = P_i_src + u * QP_i_src + (v * np.array([-1 * QP_i_src[1], QP_i_src[0]]) / np.sqrt(np.dot(QP_i_src, QP_i_src)))
D_i = X_i_src - X
if u < 0:
dist = np.sqrt(np.dot(X - P_i, X - P_i))
elif u > 1:
dist = np.sqrt(np.dot(X - Q_i, X - Q_i))
else:
dist = np.abs(v)
length = np.sqrt( | np.dot(QP_i, QP_i) | numpy.dot |
"""
created on Jan 29, 2014
@author: <NAME>, jajcay(at)cs.cas.cz
based on class by <NAME> -- https://github.com/vejmelkam/ndw-climate --
last update on Sep 26, 2017
"""
import csv
from datetime import date, timedelta, datetime
import numpy as np
from dateutil.relativedelta import relativedelta
from pyclits.functions import detrend_with_return
class DataField:
"""
Class holds the time series of a geophysical field. The fields for reanalysis data are
3-dimensional - two spatial and one temporal dimension. The fields for station data contains
temporal dimension and location specification.
"""
def __init__(self, data_folder='', data=None, lons=None, lats=None, time=None, verbose=False):
"""
Initializes either an empty data set or with given values.
"""
self.data_folder = data_folder
self.data = data
self.lons = lons
self.lats = lats
self.time = time
self.location = None # for station data
self.missing = None # for station data where could be some missing values
self.station_id = None # for station data
self.station_elev = None # in metres, for station data
self.var_name = None
self.nans = False
self.cos_weights = None
self.data_mask = None
self.verbose = verbose
def __str__(self):
"""
String representation.
"""
if self.data is not None:
return ("Geo data of shape %s as time x lat x lon." % str(self.data.shape))
else:
return("Empty DataField instance.")
def shape(self):
"""
Prints shape of data field.
"""
if self.data is not None:
return self.data.shape
else:
raise Exception("DataField is empty.")
def __getitem__(self, key):
"""
getitem representation.
"""
if self.data is not None:
return self.data[key]
else:
raise Exception("DataField is empty.")
def load(self, filename=None, variable_name=None, dataset='ECA-reanalysis', print_prog=True):
"""
Loads geophysical data from netCDF file for reanalysis or from text file for station data.
Now supports following datasets: (dataset - keyword passed to function)
ECA&D E-OBS gridded dataset reanalysis - 'ECA-reanalysis'
ECMWF gridded reanalysis - 'ERA'
NCEP/NCAR Reanalysis 1 - 'NCEP'
"""
from netCDF4 import Dataset
if dataset == 'ECA-reanalysis':
d = Dataset(self.data_folder + filename, 'r')
v = d.variables[variable_name]
data = v[:] # masked array - only land data, not ocean/sea
self.data = data.data.copy() # get only data, not mask
self.data[data.mask] = np.nan # filled masked values with NaNs
self.lons = d.variables['longitude'][:]
self.lats = d.variables['latitude'][:]
self.time = d.variables['time'][:] # days since 1950-01-01 00:00
self.time += date.toordinal(date(1950, 1, 1))
self.var_name = variable_name
if np.any(np.isnan(self.data)):
self.nans = True
if print_prog:
print("Data saved to structure. Shape of the data is %s" % (str(self.data.shape)))
print("Lats x lons saved to structure. Shape is %s x %s" % (str(self.lats.shape[0]), str(self.lons.shape[0])))
print("Time stamp saved to structure as ordinal values where Jan 1 of year 1 is 1")
print("The first data value is from %s and the last is from %s" % (str(self.get_date_from_ndx(0)), str(self.get_date_from_ndx(-1))))
print("Default temporal sampling in the data is %.2f day(s)" % (np.nanmean(np.diff(self.time))))
if np.any(np.isnan(self.data)):
print("The data contains NaNs! All methods are compatible with NaNs, just to let you know!")
d.close()
elif dataset == 'ERA':
d = Dataset(self.data_folder + filename, 'r')
v = d.variables[variable_name]
data = v[:]
if isinstance(data, np.ma.masked_array):
self.data = data.data.copy() # get only data, not mask
self.data[data.mask] = np.nan # filled masked values with NaNs
else:
self.data = data
self.lons = d.variables['longitude'][:]
self.lats = d.variables['latitude'][:]
if 'level' in d.variables.keys():
self.level = d.variables['level'][:]
self.time = d.variables['time'][:] # hours since 1900-01-01 00:00
self.time = self.time / 24.0 + date.toordinal(date(1900, 1, 1))
self.var_name = variable_name
if np.any(np.isnan(self.data)):
self.nans = True
if print_prog:
print("Data saved to structure. Shape of the data is %s" % (str(self.data.shape)))
print("Lats x lons saved to structure. Shape is %s x %s" % (str(self.lats.shape[0]), str(self.lons.shape[0])))
print("Time stamp saved to structure as ordinal values where Jan 1 of year 1 is 1")
print("The first data value is from %s and the last is from %s" % (str(self.get_date_from_ndx(0)), str(self.get_date_from_ndx(-1))))
print("Default temporal sampling in the data is %.2f day(s)" % (np.nanmean(np.diff(self.time))))
if np.any(np.isnan(self.data)):
print("The data contains NaNs! All methods are compatible with NaNs, just to let you know!")
d.close()
elif dataset == 'NCEP':
d = Dataset(self.data_folder + filename, 'r')
v = d.variables[variable_name]
data = v[:] # masked array - only land data, not ocean/sea
if isinstance(data, np.ma.masked_array):
self.data = data.data.copy() # get only data, not mask
self.data[data.mask] = np.nan # filled masked values with NaNs
else:
self.data = data
self.lons = d.variables['lon'][:]
if np.any(self.lons < 0):
self._shift_lons_to_360()
self.lats = d.variables['lat'][:]
if 'level' in d.variables.keys():
self.level = d.variables['level'][:]
self.time = d.variables['time'][:] # hours or days since some date
date_since = self._parse_time_units(d.variables['time'].units)
if "hours" in d.variables['time'].units:
self.time = self.time / 24.0 + date.toordinal(date_since)
elif "days" in d.variables['time'].units:
self.time += date.toordinal(date_since)
elif "months" in d.variables['time'].units:
from dateutil.relativedelta import relativedelta
for t in range(self.time.shape[0]):
self.time[t] = date.toordinal(date_since + relativedelta(months=+int(self.time[t])))
self.var_name = variable_name
if np.any(np.isnan(self.data)):
self.nans = True
if print_prog:
print("Data saved to structure. Shape of the data is %s" % (str(self.data.shape)))
print("Lats x lons saved to structure. Shape is %s x %s" % (str(self.lats.shape[0]), str(self.lons.shape[0])))
print("Time stamp saved to structure as ordinal values where Jan 1 of year 1 is 1")
print("The first data value is from %s and the last is from %s" % (str(self.get_date_from_ndx(0)), str(self.get_date_from_ndx(-1))))
print("Default temporal sampling in the data is %.2f day(s)" % (np.nanmean(np.diff(self.time))))
if np.any(np.isnan(self.data)):
print("The data contains NaNs! All methods are compatible with NaNs, just to let you know!")
d.close()
elif dataset == 'arbitrary':
d = Dataset(self.data_folder + filename, 'r')
v = d.variables[variable_name]
data = v[:] # masked array - only land data, not ocean/sea
if isinstance(data, np.ma.masked_array):
self.data = data.data.copy() # get only data, not mask
self.data[data.mask] = np.nan # filled masked values with NaNs
self.data_mask = data.mask.copy()
else:
self.data = data.copy()
self.data = np.squeeze(self.data)
for key in d.variables.keys():
if key == variable_name:
continue
if 'lat' in str(d.variables[key].name):
self.lats = d.variables[key][:]
if 'lon' in str(d.variables[key].name):
self.lons = d.variables[key][:]
if np.any(self.lons < 0):
self._shift_lons_to_360()
try: # handling when some netCDF variable hasn't assigned units
if 'since' in d.variables[key].units:
self.time = d.variables[key][:]
date_since = self._parse_time_units(d.variables[key].units)
if "hours" in d.variables[key].units:
self.time = self.time / 24.0 + date.toordinal(date_since)
elif "seconds" in d.variables[key].units:
self.time = self.time / 86400. + date.toordinal(date_since)
elif "days" in d.variables[key].units:
self.time += date.toordinal(date_since)
elif "months" in d.variables[key].units:
from dateutil.relativedelta import relativedelta
for t in range(self.time.shape[0]):
self.time[t] = date.toordinal(date_since + relativedelta(months = +int(self.time[t])))
except AttributeError:
pass
self.var_name = variable_name
if np.any(np.isnan(self.data)):
self.nans = True
if print_prog:
print("Data saved to structure. Shape of the data is %s" % (str(self.data.shape)))
print("Lats x lons saved to structure. Shape is %s x %s" % (str(self.lats.shape[0]), str(self.lons.shape[0])))
print("Time stamp saved to structure as ordinal values where Jan 1 of year 1 is 1")
print("The first data value is from %s and the last is from %s" % (str(self.get_date_from_ndx(0)), str(self.get_date_from_ndx(-1))))
print("Default temporal sampling in the data is %.2f day(s)" % (np.nanmean(np.diff(self.time))))
if np.any(np.isnan(self.data)):
print("The data contains NaNs! All methods are compatible with NaNs, just to let you know!")
d.close()
else:
raise Exception("Unknown or unsupported dataset!")
def _shift_lons_to_360(self):
"""
Shifts lons to 0-360 degree east.
"""
self.lons[self.lons < 0] += 360
ndx = np.argsort(self.lons)
self.lons = self.lons[ndx]
self.data = self.data[..., ndx]
@staticmethod
def _parse_time_units(time_string):
"""
Parses time units from netCDF file, returns date since the record.
"""
date_split = time_string.split('-')
y = ("%04d" % int(date_split[0][-4:]))
m = ("%02d" % int(date_split[1]))
d = ("%02d" % int(date_split[2][:2]))
return datetime.strptime("%s-%s-%s" % (y, m, d), '%Y-%m-%d')
def load_station_data(self, filename, dataset='ECA-station', print_prog=True, offset_in_file=0):
"""
Loads station data, usually from text file. Uses numpy.loadtxt reader.
"""
if dataset == 'Klem_day':
raw_data = np.loadtxt(self.data_folder + filename) # first column is continous year and second is actual data
self.data = np.array(raw_data[:, 1])
time = []
# use time iterator to go through the dates
y = int(np.modf(raw_data[0, 0])[1])
if np.modf(raw_data[0, 0])[0] == 0:
start_date = date(y, 1, 1)
delta = timedelta(days = 1)
d = start_date
while len(time) < raw_data.shape[0]:
time.append(d.toordinal())
d += delta
self.time = np.array(time)
self.location = 'Praha-Klementinum, Czech Republic'
print("Station data from %s saved to structure. Shape of the data is %s" % (self.location, str(self.data.shape)))
print("Time stamp saved to structure as ordinal values where Jan 1 of year 1 is 1")
if dataset == 'ECA-station':
with open(self.data_folder + filename, 'rb') as f:
time = []
data = []
missing = []
i = 0 # line-counter
reader = csv.reader(f)
for row in reader:
i += 1
if i == 16 + offset_in_file: # line with location
c_list = filter(None, row[1].split(" "))
del c_list[-2:]
country = ' '.join(c_list).lower()
station = ' '.join(row[0].split(" ")[7:]).lower()
self.location = station.title() + ', ' + country.title()
if i > 20 + offset_in_file: # actual data - len(row) = 5 as STAID, SOUID, DATE, TG, Q_TG
staid = int(row[0])
value = float(row[3])
year = int(row[2][:4])
month = int(row[2][4:6])
day = int(row[2][6:])
time.append(date(year, month, day).toordinal())
if value == -9999.:
missing.append(date(year, month, day).toordinal())
data.append(np.nan)
else:
data.append(value/10.)
self.station_id = staid
self.data = np.array(data)
self.time = np.array(time)
self.missing = np.array(missing)
if print_prog:
print("Station data from %s saved to structure. Shape of the data is %s" % (self.location, str(self.data.shape)))
print("Time stamp saved to structure as ordinal values where Jan 1 of year 1 is 1")
if self.missing.shape[0] != 0 and self.verbose:
print("** WARNING: There were some missing values! To be precise, %d missing values were found!" % (self.missing.shape[0]))
def copy_data(self):
"""
Returns the copy of data.
"""
return self.data.copy()
def copy(self, temporal_ndx=None):
"""
Returns a copy of DataField with data, lats, lons and time fields.
If temporal_ndx is not None, copies only selected temporal part of data.
"""
copied = DataField()
copied.data = self.data.copy()
copied.time = self.time.copy()
if temporal_ndx is not None:
copied.data = copied.data[temporal_ndx]
copied.time = copied.time[temporal_ndx]
if self.lats is not None:
copied.lats = self.lats.copy()
if self.lons is not None:
copied.lons = self.lons.copy()
if self.location is not None:
copied.location = self.location
if self.missing is not None:
copied.missing = self.missing.copy()
if self.station_id is not None:
copied.station_id = self.station_id
if self.station_elev is not None:
copied.station_elev = self.station_elev
if self.var_name is not None:
copied.var_name = self.var_name
if self.cos_weights is not None:
copied.cos_weights = self.cos_weights
if self.data_mask is not None:
copied.data_mask = self.data_mask
copied.nans = self.nans
return copied
def select_date(self, date_from, date_to, apply_to_data=True, exclusive=True):
"""
Selects the date range - date_from is inclusive, date_to is exclusive. Input is date(year, month, day).
"""
d_start = date_from.toordinal()
d_to = date_to.toordinal()
if exclusive:
ndx = np.logical_and(self.time >= d_start, self.time < d_to)
else:
ndx = np.logical_and(self.time >= d_start, self.time <= d_to)
if apply_to_data:
self.time = self.time[ndx] # slice time stamp
self.data = self.data[ndx, ...] # slice data
if self.data_mask is not None and self.data_mask.ndim > 2:
self.data_mask = self.data_mask[ndx, ...] # slice missing if exists
if self.missing is not None:
missing_ndx = np.logical_and(self.missing >= d_start, self.missing < d_to)
self.missing = self.missing[missing_ndx] # slice missing if exists
return ndx
def get_sliding_window_indexes(self, window_length, window_shift, unit='m', return_half_dates=False):
"""
Returns list of indices for sliding window analysis.
If return_half_dates is True, also returns dates in the middle of the interval for reference.
"""
from dateutil.relativedelta import relativedelta
if unit == 'm':
length = relativedelta(months = +window_length)
shift = relativedelta(months = +window_shift)
elif unit == 'd':
length = relativedelta(days = +window_length)
shift = relativedelta(days = +window_shift)
elif unit == 'y':
length = relativedelta(years = +window_length)
shift = relativedelta(years = +window_shift)
else:
raise Exception("Unknown time unit! Please, use one of the 'd', 'm', 'y'!")
ndxs = []
if return_half_dates:
half_dates = []
window_start = self.get_date_from_ndx(0)
window_end = window_start + length
while window_end <= self.get_date_from_ndx(-1):
ndx = self.select_date(window_start, window_end, apply_to_data=False)
ndxs.append(ndx)
if return_half_dates:
half_dates.append(window_start + (window_end - window_start) / 2)
window_start += shift
window_end = window_start + length
# add last
ndxs.append(self.select_date(window_start, window_end, apply_to_data=False))
if return_half_dates:
half_dates.append(window_start + (self.get_date_from_ndx(-1) - window_start) / 2)
if np.sum(ndxs[-1]) != np.sum(ndxs[-2]) and self.verbose:
print("**WARNING: last sliding window is shorter than others! (%d vs. %d in others)"
% (np.sum(ndxs[-1]), np.sum(ndxs[-2])))
if return_half_dates:
return ndxs, half_dates
else:
return ndxs
def create_time_array(self, date_from, sampling='m'):
"""
Creates time array for already saved data in 'self.data'.
From date_from to date_from + data length. date_from is inclusive.
Sampling:
'm' for monthly, could be just 'm' or '3m' as three-monthly
'd' for daily
'xh' where x = {1, 6, 12} for sub-daily.
"""
if 'm' in sampling:
if 'm' != sampling:
n_months = int(sampling[:-1])
timedelta = relativedelta(months=+n_months)
elif 'm' == sampling:
timedelta = relativedelta(months=+1)
elif sampling == 'd':
timedelta = relativedelta(days=+1)
elif sampling in ['1h', '6h', '12h']:
hourly_data = int(sampling[:-1])
timedelta = relativedelta(hours=+hourly_data)
elif sampling == 'y':
timedelta = relativedelta(years=+1)
else:
raise Exception("Unknown sampling.")
d_now = date_from
self.time = np.zeros((self.data.shape[0],))
for t in range(self.data.shape[0]):
self.time[t] = d_now.toordinal()
d_now += timedelta
def get_date_from_ndx(self, ndx):
"""
Returns the date of the variable from given index.
"""
return date.fromordinal(np.int(self.time[ndx]))
def get_spatial_dims(self):
"""
Returns the spatial dimensions of the data as list.
"""
return list(self.data.shape[-2:])
def find_date_ndx(self, date):
"""
Returns index which corresponds to the date. Returns None if the date is not contained in the data.
"""
d = date.toordinal()
pos = np.nonzero(self.time == d)
if not np.all(np.isnan(pos)):
return int(pos[0])
else:
return None
def get_closest_lat_lon(self, lat, lon):
"""
Returns closest lat, lon index in the data.
"""
return [np.abs(self.lats - lat).argmin(), np.abs(self.lons - lon).argmin()]
def select_months(self, months, apply_to_data=True):
"""
Subselects only certain months. Input as a list of months number.
"""
ndx = filter(lambda i: date.fromordinal(int(self.time[i])).month in months, range(len(self.time)))
if apply_to_data:
self.time = self.time[ndx]
self.data = self.data[ndx, ...]
return ndx
def select_lat_lon(self, lats, lons, apply_to_data = True):
"""
Selects region in lat/lon. Input is for both [from, to], both are inclusive. If None, the dimension is not modified.
"""
if self.lats is not None and self.lons is not None:
if lats is not None:
lat_ndx = np.nonzero(np.logical_and(self.lats >= lats[0], self.lats <= lats[1]))[0]
else:
lat_ndx = np.arange(len(self.lats))
if lons is not None:
if lons[0] < lons[1]:
lon_ndx = np.nonzero(np.logical_and(self.lons >= lons[0], self.lons <= lons[1]))[0]
elif lons[0] > lons[1]:
l1 = list(np.nonzero(np.logical_and(self.lons >= lons[0], self.lons <= 360))[0])
l2 = list(np.nonzero(np.logical_and(self.lons >= 0, self.lons <= lons[1]))[0])
lon_ndx = np.array(l1 + l2)
else:
lon_ndx = np.arange(len(self.lons))
if apply_to_data:
if self.data.ndim >= 3:
d = self.data.copy()
d = d[..., lat_ndx, :]
self.data = d[..., lon_ndx].copy()
self.lats = self.lats[lat_ndx]
self.lons = self.lons[lon_ndx]
if self.data_mask is not None:
d = self.data_mask
d = d[..., lat_ndx, :]
self.data_mask = d[..., lon_ndx]
elif self.data.ndim == 2: # multiple stations data
d = self.data.copy()
d = d[:, lat_ndx]
self.lons = self.lons[lat_ndx]
self.lats = self.lats[lat_ndx]
if lons is not None:
if lons[0] < lons[1]:
lon_ndx = np.nonzero(np.logical_and(self.lons >= lons[0], self.lons <= lons[1]))[0]
elif lons[0] > lons[1]:
l1 = list(np.nonzero(np.logical_and(self.lons >= lons[0], self.lons <= 360))[0])
l2 = list(np.nonzero(np.logical_and(self.lons >= 0, self.lons <= lons[1]))[0])
lon_ndx = np.array(l1 + l2)
else:
lon_ndx = np.arange(len(self.lons))
self.data = d[:, lon_ndx].copy()
self.lons = self.lons[lon_ndx]
self.lats = self.lats[lon_ndx]
if np.any(np.isnan(self.data)):
self.nans = True
else:
self.nans = False
return lat_ndx, lon_ndx
else:
raise Exception('Slicing data with no spatial dimensions, probably station data.')
def cut_lat_lon(self, lats_to_cut, lons_to_cut):
"""
Cuts region in lats/lons (puts NaNs in the selected regions).
Input is for both [from, to], both are inclusive. If None, the dimension is not modified.
"""
if self.lats is not None and self.lons is not None:
if lats_to_cut is not None:
lat_ndx = np.nonzero(np.logical_and(self.lats >= lats_to_cut[0], self.lats <= lats_to_cut[1]))[0]
if lons_to_cut is None:
self.data[..., lat_ndx, :] = np.nan
if lons_to_cut is not None:
if lons_to_cut[0] < lons_to_cut[1]:
lon_ndx = np.nonzero(np.logical_and(self.lons >= lons_to_cut[0], self.lons <= lons_to_cut[1]))[0]
elif lons_to_cut[0] > lons_to_cut[1]:
l1 = list(np.nonzero(np.logical_and(self.lons >= lons_to_cut[0], self.lons <= 360))[0])
l2 = list(np.nonzero(np.logical_and(self.lons >= 0, self.lons <= lons_to_cut[1]))[0])
lon_ndx = np.array(l1 + l2)
if lats_to_cut is None:
self.data[..., lon_ndx] = np.nan
if lats_to_cut is not None and lons_to_cut is not None:
for lat in lat_ndx:
for lon in lon_ndx:
self.data[..., lat, lon] = np.nan
else:
raise Exception('Slicing data with no spatial dimensions, probably station data.')
def select_level(self, level):
"""
Selects the proper level from the data. Input should be integer >= 0.
"""
if self.data.ndim > 3:
self.data = self.data[:, level, ...]
self.level = self.level[level]
else:
raise Exception('Slicing level in single-level data.')
def extract_day_month_year(self):
"""
Extracts the self.time field into three fields containg days, months and years.
"""
n_days = len(self.time)
days = np.zeros((n_days,), dtype = np.int)
months = np.zeros((n_days,), dtype = np.int)
years = np.zeros((n_days,), dtype = np.int)
for i,d in zip(range(n_days), self.time):
dt = date.fromordinal(int(d))
days[i] = dt.day
months[i] = dt.month
years[i] = dt.year
return days, months, years
def latitude_cos_weights(self):
"""
Returns a grid with scaling weights based on cosine of latitude.
"""
if (np.all(self.cos_weights) is not None) and (self.cos_weights.shape == self.get_spatial_dims()):
return self.cos_weights
cos_weights = np.zeros(self.get_spatial_dims())
for ndx in range(self.lats.shape[0]):
cos_weights[ndx, :] = np.cos(self.lats[ndx] * np.pi/180.) ** 0.5
self.cos_weights = cos_weights
return cos_weights
def missing_day_month_year(self):
"""
Extracts the self.missing field (if exists and is non-empty) into three fields containing days, months and years.
"""
if (self.missing is not None) and (self.missing.shape[0] != 0):
n_days = len(self.missing)
days = np.zeros((n_days,), dtype = np.int)
months = np.zeros((n_days,), dtype = np.int)
years = np.zeros((n_days,), dtype = np.int)
for i,d in zip(range(n_days), self.missing):
dt = date.fromordinal(int(d))
days[i] = dt.day
months[i] = dt.month
years[i] = dt.year
return days, months, years
else:
raise Exception('Luckily for you, there is no missing values!')
def flatten_field(self, f = None):
"""
Reshape the field to 2dimensions such that axis 0 is temporal and axis 1 is spatial.
If f is None, reshape the self.data field, else reshape the f field.
Should only be used with single-level data.
"""
if f is None:
if self.data.ndim == 3:
self.data = np.reshape(self.data, (self.data.shape[0], | np.prod(self.data.shape[1:]) | numpy.prod |
'''
Source: https://www.kaggle.com/helmehelmuto/cnn-keras-and-innvestigate
Use as a test benchmark
'''
import numpy as np
import pandas as pd
# Merge the two Data set together
df = pd.read_csv('../input/pdb_data_no_dups.csv').merge(pd.read_csv('../input/pdb_data_seq.csv'), how='inner', on='structureId')
# Drop rows with missing labels
df = df[[type(c) == type('') for c in df.classification.values]]
df = df[[type(c) == type('') for c in df.sequence.values]]
# select proteins
df = df[df.macromoleculeType_x == 'Protein']
df.reset_index()
df.shape
import matplotlib.pyplot as plt
from collections import Counter
# count numbers of instances per class
cnt = Counter(df.classification)
# select only 10 most common classes!
top_classes = 10
# sort classes
sorted_classes = cnt.most_common()[:top_classes]
classes = [c[0] for c in sorted_classes]
counts = [c[1] for c in sorted_classes]
print("at least " + str(counts[-1]) + " instances per class")
# apply to dataframe
print(str(df.shape[0]) + " instances before")
df = df[[c in classes for c in df.classification]]
print(str(df.shape[0]) + " instances after")
seqs = df.sequence.values
lengths = [len(s) for s in seqs]
# visualize
fig, axarr = plt.subplots(1,2, figsize=(20,5))
axarr[0].bar(range(len(classes)), counts)
plt.sca(axarr[0])
plt.xticks(range(len(classes)), classes, rotation='vertical')
axarr[0].set_ylabel('frequency')
axarr[1].hist(lengths, bins=100, normed=False)
axarr[1].set_xlabel('sequence length')
axarr[1].set_ylabel('# sequences')
plt.show()
from sklearn.preprocessing import LabelBinarizer
# Transform labels to one-hot
lb = LabelBinarizer()
Y = lb.fit_transform(df.classification)
from keras.preprocessing import text, sequence
from keras.preprocessing.text import Tokenizer
from sklearn.model_selection import train_test_split
# maximum length of sequence, everything afterwards is discarded!
max_length = 256
#create and fit tokenizer
tokenizer = Tokenizer(char_level=True)
tokenizer.fit_on_texts(seqs)
#represent input data as word rank number sequences
X = tokenizer.texts_to_sequences(seqs)
X = sequence.pad_sequences(X, maxlen=max_length)
from keras.models import Sequential
from keras.layers import Dense, Conv1D, MaxPooling1D, Flatten
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
embedding_dim = 8
# create the model
model = Sequential()
model.add(Embedding(len(tokenizer.word_index)+1, embedding_dim, input_length=max_length))
model.add(Conv1D(filters=64, kernel_size=6, padding='same', activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Conv1D(filters=32, kernel_size=3, padding='same', activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(top_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=.2)
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=15, batch_size=128)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
import itertools
train_pred = model.predict(X_train)
test_pred = model.predict(X_test)
print("train-acc = " + str(accuracy_score(np.argmax(y_train, axis=1), np.argmax(train_pred, axis=1))))
print("test-acc = " + str(accuracy_score(np.argmax(y_test, axis=1), np.argmax(test_pred, axis=1))))
# Compute confusion matrix
cm = confusion_matrix(np.argmax(y_test, axis=1), np.argmax(test_pred, axis=1))
# Plot normalized confusion matrix
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
np.set_printoptions(precision=2)
plt.figure(figsize=(10,10))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion matrix')
plt.colorbar()
tick_marks = np.arange(len(lb.classes_))
plt.xticks(tick_marks, lb.classes_, rotation=90)
plt.yticks(tick_marks, lb.classes_)
#for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
# plt.text(j, i, format(cm[i, j], '.2f'), horizontalalignment="center", color="white" if cm[i, j] > cm.max() / 2. else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
print(classification_report( | np.argmax(y_test, axis=1) | numpy.argmax |
# @author <NAME> <<EMAIL>>, Interactive Robotics Lab, Arizona State University
import json
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_error
import pickle
from utils.voice import Voice
# Where to find the results file?
# FILE_PATH = "ours_full_cl.json"
FILE_PATH = "val_result.json"
# Where to find the normalization
# NORM_PATH = "../GDrive/normalization_v2.pkl"
NORM_PATH = "../GDrive/normalization_custom.pkl"
normalization = pickle.load(open(NORM_PATH, mode="rb"), encoding="latin1")
norm = np.take(normalization["values"], indices=[0,1,2,3,4,5,30], axis=1)
voice_class = Voice(load=False)
def normalize(value, v_min, v_max):
if (value.shape[1] != v_min.shape[0] or v_min.shape[0] != v_max.shape[0] or
len(value.shape) != 2 or len(v_min.shape) != 1 or len(v_max.shape) != 1):
raise ArrayDimensionMismatch()
value = np.copy(value)
v_min = np.tile(np.expand_dims(v_min, 0), [value.shape[0], 1])
v_max = np.tile(np.expand_dims(v_max, 0), [value.shape[0], 1])
value = (value - v_min) / (v_max - v_min)
return value
def rotateCoordinates(px, py, angle=-45):
r_px = px * np.cos(np.deg2rad(angle)) + py * np.sin(np.deg2rad(angle))
r_py = py * np.cos(np.deg2rad(angle)) - px * np.sin( | np.deg2rad(angle) | numpy.deg2rad |
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 16 13:34:51 2019
@author: jaime
#"""
import h5py as h5
from circle_fit import least_squares_circle
import pandas as pd
import re as re
from sys import platform
import numpy as np
import os
cmy = 365 * 24 * 60 * 60. * 100
class UserChoice(Exception):
def __init__(self, message):
self.message = message
# Print iterations progress
def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█', printEnd="\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in per cent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end=printEnd)
# Print New Line on Complete
if iteration == total:
print()
def get_model_name(model_dir):
if 'win' in platform:
if model_dir[-1] == r'\\':
model_dir -= r'\\'
return re.split(r'\\', model_dir)[-1]
else:
if model_dir[-1] == '/':
model_dir -= '/'
return re.split('/', model_dir)[-1]
def velocity_rescale(df, scf):
df = df / scf * cmy
return df
def viscosity_rescale(df, scf):
df = np.log10(df * scf)
return df
def dim_eval(res):
# Not likely to be a 1D model.
if len(res) > 2:
return 3
else:
return 2
def get_res(model_dir):
# Make the file path
filename = model_dir + 'Mesh.linearMesh.00000.h5'
# Read everything
data = h5.File(filename, 'r')
res = data.attrs['mesh resolution']
# Get the dimensions:
ndims = dim_eval(res)
if ndims == 2:
return {'x': res[0] + 1, 'y': res[1] + 1}, ndims
else:
return {'x': res[0] + 1, 'y': res[1] + 1, 'z': res[2] + 1}, ndims
def ts_writer(ts_in):
# Making the timestep text:
return str(ts_in).zfill(5)
def get_time(mdir, ts):
data = h5.File(mdir + 'timeInfo.' + ts + '.h5', 'r')
time_out = data['currentTime'][0]
return time_out
def get_nproc(mdir):
data = h5.File(mdir + '/timeInfo.00000.h5', 'r')
return data['nproc'][0]
# %%
class UwLoader:
def __init__(self, model_dir, ts=0, scf=1e22, get_time_only=False):
if model_dir[-1] != '/':
self.model_dir = model_dir + '/'
else:
self.model_dir = model_dir
# Verify if the path is correct:
if not os.path.isdir(model_dir):
raise FileNotFoundError('No such model exists.')
self.res, self.dim = get_res(self.model_dir)
# Cores are not needed for now.
# Initiate a boundary coordinate
self.boundary = {}
# Set the default scaling:
self.scf = scf
# Save the model name
self.model_name = get_model_name(model_dir)
# Save an empty list/dict for any slicing that will be done
self.performed_slices = []
# Get the number of processors used
self.nproc = get_nproc(model_dir)
# set th initial timestep:
self.current_step = ts_writer(ts)
self.time_Ma = np.round(get_time(self.model_dir, self.current_step) * self.scf / (365 * 24 * 3600) / 1e6, 3)
if not get_time_only:
# Initiate a output dataframe
self.output = None
self._get_mesh()
# if get_all:
self.get_all()
self.starting_output = self.output # for slices
def set_current_ts(self, step):
"""
Function to reset the model output and replace the output object.
"""
# Reinstanciate the object with a new timestep:
self.__init__(model_dir=self.model_dir, ts=step, scf=self.scf)
##################################################
# RETRIEVING INFORMATION #
##################################################
def get_all(self):
"""
Function to get all existing variables from the current working directory.
"""
# print('Getting all variables...')
self.get_material()
self.get_velocity()
self.get_strain()
self.get_stress()
self.get_viscosity()
self.get_temperature()
# Get mesh information:
def _get_mesh(self):
# Set the file path:
filename = self.model_dir + 'Mesh.linearMesh.' + \
self.current_step + '.h5'
# Read the h5 file:
data = h5.File(filename, 'r')
# Get the information from the file:
mesh_info = data['vertices'][()]
# Write the info accordingly:
if self.dim == 2:
self.output = pd.DataFrame(data=mesh_info, columns=['x', 'y'], dtype='float')
else:
# in 3D:
self.output = pd.DataFrame(data=mesh_info, columns=['x', 'y', 'z'], dtype='float')
# Save the model dimensions:
axes = self.output.columns.values
max_dim = self.output.max().values
min_dim = self.output.min().values
for axis, min_val, max_val in zip(axes, min_dim, max_dim):
self.boundary[axis] = [min_val, max_val]
def get_velocity(self):
try:
self.scf
except NameError:
raise ValueError('No Scaling Factor detected!')
if type(self.output) == dict:
self._get_mesh()
# Set the file path:
filename = self.model_dir + 'VelocityField.' + \
self.current_step + '.h5'
# Read the h5 file:
data = h5.File(filename, 'r')
# Get the information from the file:
vel_info = data['data'][()]
# Write the info accordingly:
if self.dim == 2:
velocity = pd.DataFrame(data=vel_info, columns=['vx', 'vy'])
else:
# in 3D:
velocity = pd.DataFrame(data=vel_info, columns=['vx', 'vy', 'vz'])
# Rescale
velocity = velocity_rescale(velocity, self.scf)
# Merge with the current output dataframe
self.output = self.output.merge(velocity, left_index=True, right_index=True)
def get_viscosity(self, convert_to_log=True):
try:
self.scf
except:
raise ValueError('No Scaling Factor detected!')
if self.output is None:
self._get_mesh()
# Set the file path:
filename = self.model_dir + 'ViscosityField.' + \
self.current_step + '.h5'
# Read the h5 file:
data = h5.File(filename, 'r')
# Get the information from the file:
mat_info = data['data'][()]
# Write the info accordingly:
viscosity = pd.DataFrame(data=mat_info,
columns=['eta'])
# Rescale
if convert_to_log:
viscosity = viscosity_rescale(viscosity, self.scf)
else:
viscosity *= self.scf
# Merge:
self.output = self.output.merge(viscosity, left_index=True, right_index=True)
def get_material(self):
# Set the file path:
filename = self.model_dir + 'MaterialIndexField.' + \
self.current_step + '.h5'
# Read the h5 file:
data = h5.File(filename, 'r')
# Get the information from the file:
mat_info = data['data'][()]
# Write the info accordingly:
material = pd.DataFrame(data=mat_info, columns=['mat'])
# Merge
self.output = self.output.merge(material, left_index=True, right_index=True)
def get_temperature(self):
# Set the file path:
filename = self.model_dir + 'TemperatureField.' + \
self.current_step + '.h5'
# Read the h5 file:
data = h5.File(filename, 'r')
# Get the information from the file:
temp_info = data['data'][()]
# Write the info accordingly:
temperature = pd.DataFrame(data=temp_info, columns=['temp_K'])
temperature['temp_C'] = temperature.temp_K - 273.15
# Merge:
self.output = self.output.merge(temperature, left_index=True, right_index=True)
# Get the strain information
def get_strain(self):
# Set the file path:
filename = self.model_dir + 'recoveredStrainRateField.' + \
self.current_step + '.h5'
filename2 = self.model_dir + 'recoveredStrainRateInvariantField.' + \
self.current_step + '.h5'
# Read the h5 file:
data = h5.File(filename, 'r')
invariant = True
try:
data2 = h5.File(filename2, 'r')
except OSError:
invariant = False
# Get the information from the file:
strain_info = data['data'][()]
if invariant:
invariant_info = data2['data'][()]
# Write the info accordingly:
if self.dim == 2:
strain = pd.DataFrame(data=strain_info,
columns=['e_xx', 'e_yy', 'e_xy'])
else:
# in 3D:
strain = pd.DataFrame(data=strain_info,
columns=['e_xx', 'e_yy', 'e_zz',
'e_xy', 'e_xz', 'e_yz'])
# Rescale this variable, strain scales inversely to scf:
strain /= self.scf
# Add the invariant
if invariant:
strain['e_II'] = invariant_info
else:
# Calculate the invariant using the known components!
if self.dim == 2:
strain['e_II'] = np.sqrt(0.5 * (strain.e_xx ** 2 + strain.e_yy ** 2) + strain.e_xy ** 2)
else:
strain['e_II'] = np.sqrt(0.5 * (strain.e_xx ** 2 + strain.e_yy ** 2 + strain.e_zz ** 2) +
strain.e_xy ** 2 + strain.e_xz ** 2 + strain.e_yz ** 2)
# Merge with the output dataframe
self.output = self.output.merge(strain, left_index=True, right_index=True)
# Get the stress information
def get_stress(self):
# Set the file path:
filename = self.model_dir + 'recoveredDeviatoricStressField.' + \
self.current_step + '.h5'
filename2 = self.model_dir + 'recoveredDeviatoricStressInvariantField.' + \
self.current_step + '.h5'
# Read the h5 file:
data = h5.File(filename, 'r')
invariant = True
try:
data2 = h5.File(filename2, 'r')
except OSError:
invariant = False
# Get the information from the file:
stress_info = data['data'][()]
if invariant:
invariant_info = data2['data'][()]
# Write the info accordingly:
if self.dim == 2:
stress = pd.DataFrame(data=stress_info,
columns=['s_xx', 's_yy', 's_xy'])
else:
# in 3D:
stress = pd.DataFrame(data=stress_info,
columns=['s_xx', 's_yy', 's_zz',
's_xy', 's_xz', 's_yz'])
# Add the invariant
if invariant:
stress['s_II'] = invariant_info
# Merge:
self.output = self.output.merge(stress, left_index=True, right_index=True)
class SubductionModel(UwLoader, ):
def __init__(self, model_dir, horizontal_direction='x',
vertical_direction='y', surface_value=0, **kwargs):
# Initiate the uwobject
super().__init__(model_dir=model_dir, **kwargs)
self.horizontal_direction = horizontal_direction
self.vertical_direction = vertical_direction
self.surface_value = surface_value
self.get_material()
# Correct the depth scale:
self.correct_depth(vertical_direction=vertical_direction)
# Detect the trench position
self.trench = self.find_trench()
def get_curvature_radius(self, plate_id=4):
# TODO: FIX THIS SHIT
# Get the passive tracer position
MI, Pos = [], []
for core in np.arange(1, self.nproc + 1):
# Load the PTS file:
PTS = h5.File('{}/passiveTracerSwarm.{}.{:g}of{:g}.h5'.format(self.model_dir, self.current_step,
core,
self.nproc), mode='r')
# if there's an output from the file
if len(PTS.keys()) != 0:
MI.append(PTS['MaterialIndex'][()])
Pos.append(PTS['Position'][()])
# Get the values
MI = np.array(np.vstack(MI))
Pos = np.array(np.vstack(Pos))
# Prepare a dataframe for filtering
temp = {'mat': MI[:, 0], 'x': Pos[:, 0], 'y': Pos[:, 1]}
data = pd.DataFrame(temp)
data = data.sort_values(by='x')
# Correct the depth:
data.y = np.abs(data.y - data.y.max())
# Limit the data vertically?
# for dx in np.arange(100, 2000, 10)*1e3:
# TODO: add an automatic detection system for this
if plate_id == 4:
data = data[self.trench - 750e3 < data.x]
data = data[data.x <= self.trench + 200e3]
# elif plate_id == 6:
# data = data[self.trench - 400e3 < data.x]
# data = data[data.x <= self.trench + 200e3]
elif plate_id not in [6, 4]:
raise Exception('Currently invalid plate_id')
# data = data[data.y <= 200e3]
# Deal with the zigzagging by applying a window mean:
# avg_position = data[data.mat == int(plate_id)].rolling(window=5).mean().dropna()
avg_position = data[data.mat == int(plate_id)].dropna()
# Adjust for slab buckling and draping, clear the first "curvature" change using the 2nd derivative
x = avg_position.x.to_numpy()
y = avg_position.y.to_numpy()
# Fit the ellipse:
X = np.array([x, y])
# Different approaches
xc, yc, r, res = least_squares_circle(X.T)
# print('dx = {}, r = {}, res = {}'.format(dx, r, res))
return r, (xc, yc)
def find_trench(self, filter=True): # , horizontal_plane='xz', override_dim_check=False
"""
Function that returns the surface position of the subduction trench, following the
minimum divergence method.
TODO: 3D
Returns:
2D: Horizontal position of the trench
3D: Coordinate array for a line that represents the trench along the horizontal plane
"""
# Check for dimensions
# if self.dim == 2:
# Get the vertical coordinates
hdir = self.output[self.horizontal_direction]
vdir = self.output[self.vertical_direction]
# Get a surface slice
surface_index = vdir[vdir == self.surface_value].index
# Get velocity fields
condition = False
while not condition:
try:
# If this is being called by an external object, try and detect the velocities
vx = self.output.vx.iloc[surface_index].to_numpy()
condition = True
except AttributeError:
# If this is the first loading of the SubductionModel object or the velocities aren't present
self.get_velocity()
# Extract just the vertical velocity
vy = self.output.vy.iloc[surface_index].to_numpy()
# Calculate the fields 1st derivative
dvx = np.gradient(vx)
dx = np.gradient(hdir[surface_index])
# Calculate divergence (i.e. scalar change of a vector field)
div_v = dvx / dx
if filter:
div_v = div_v[30:-30]
# Store the trench id:
trench_id = div_v == min(div_v)
trench_id = np.array(trench_id)
trench_id = np.pad(trench_id, 30, mode='constant', constant_values=0)
# trench_id = np.argmax(trench_id == 1) + 30
else:
# Store the trench id:
trench_id = div_v == min(div_v)
return float(hdir[surface_index][trench_id])
# return trench_id
# elif self.ndims == 3:
def get_polarity(self, op_material=4, plate_thickness=100., horizontal_plane='xz', trench_direction='z'):
# TODO: Adapt 2D
"""
Function for finding the overriding plate at a critical depth. This depth is 25% deeper than the expected thickness.
Parameters
> uw_object: an object created with the uw_model script, loaded with timestep, mesh and material.
> op_material: the ID or range of IDS for the overriding plate crust.
> plate_thickness: self-explanatory, maximum expected thickness for the lithosphere in km
> horizontal_plane: indicate the horizontal plane directions, by default 'xy'.
Options: 'xy', 'yz', 'xz'
> trench_direction: indicate the along trench direction, by default 'z'.
Options: 'x', 'y', 'z'
Returns:
New dataframe under model.polarity.
model.polarity with two columns: along trench axis positions and polarity state.
Zero (0) represents normal (i.e. initial polarity) while one (1) represents a reversed state.
Example use:
model = uw_model('path/to/model')
model.set_current_ts(time)
model.get_material()
model.get_polarity()
"""
# Set the critical depth:
critical_depth = 1.25 * plate_thickness * 1e3
if self.dim == 3:
# Catch a few errors:
if type(horizontal_plane) != str:
raise TypeError('Plane must be a string!')
if len(horizontal_plane) != 2:
raise ValueError('Plane can only contain two letters!')
if len(trench_direction) != 1:
raise ValueError('Trench direction is a single letter!')
# ====================================== CHECK VALIDITY ======================================
# Ensure the strings are correctly formatted.
horizontal_plane = "".join(sorted(horizontal_plane.lower())) # Correctly sorted and in lower case.
trench_direction = trench_direction.lower()
# Check if the plane is valid:
valid_planes = ['xy', 'yz', 'xz']
check = np.sum([sorted(horizontal_plane) == sorted(valid) for valid in valid_planes])
if check == 0:
raise ValueError('Plane is invalid. Please try a combination of ''x'', ''y'' and ''z''.')
# Check the plane direction:
slice_direction = 'xyz'
for char in horizontal_plane:
slice_direction = slice_direction.replace(char, '')
# Check if the direction of the trench is valid:
valid_direction = ['x', 'y', 'z']
check = np.sum([trench_direction == valid for valid in valid_direction])
if check == 0:
raise ValueError('Trench is invalid. Please try ''x'', ''y'' or ''z''.')
# Remove any slices:
self.remove_slices()
# Create a slice at that depth:
self.set_slice(slice_direction, value=self.output.y.max() - critical_depth, find_closest=True)
else:
# ================================ DETECT THE POLARITY ========================================
# Create a slice at that depth:
self.set_slice('y', value=self.output.y.max() - critical_depth, find_closest=True)
# Create a database just for the next operations, saves on memory and code:
reversed_index = self.output[self.output.mat == op_material].index.to_numpy()
# Detect along trench direction where it is reversed:
trench_dir_reverse = self.output[trench_direction].loc[reversed_index].unique()
# Remove any slices:
self.remove_slices()
# Create a zeros array, each zero will represent the normal polarity
polarity = pd.DataFrame(data=np.array([self.output[trench_direction].to_numpy(),
np.zeros(self.output.x.shape)]).T,
columns=(trench_direction, 'state'))
# Check all locations where trench direction reversed is found:
_, _, reversed_index = np.intersect1d(trench_dir_reverse,
self.output[trench_direction].to_numpy(),
return_indices=True)
# This only adds a zero to a single value of that trench_direction value:
polarity.loc[reversed_index, 'state'] = 1
# Copy those values for all trench_direction values:
for td in trench_dir_reverse:
polarity.state[polarity[trench_direction] == td] = 1
# Add polarity to the main frame
self.output = self.output.merge(polarity, left_index=True, right_index=True)
# Check slices that were made before:
needed_slices = self.performed_slices.copy()
# Remake the ones deleted:
for slices in needed_slices:
print(f'Making slice: {slices}')
self.set_slice(**slices)
# Broadcast the polarity into the output?
def get_swarm(self, n_particles=5e3, assume_yes=False, correct_depth=False):
"""TODO: WRITE THE DOCUMENTATION"""
# CHECK if the user is sure of what they're doing
if not assume_yes:
while True:
user_input = input('Reading swarms could potentially take a VERY long time. Do you wish to continue? '
'(Y/N) ')
if user_input.lower() == 'y':
break
elif user_input.lower() == 'n':
raise UserChoice('User terminated the operation.')
# Start the output lists:
density, position, material = [], [], []
# for each of the cores
print('Amount of particles per core: {}'.format(int(n_particles)))
for core in range(1, self.nproc + 1):
# Load their respective file
data = h5.File(self.model_dir + "/materialSwarm.{}.{}of{}.h5".format(self.current_step, core, self.nproc),
mode='r')
# Get a "low" amount of random points (around 10k):
index = np.random.choice(len(data['Position']), int(n_particles))
# Append to the list:
density.append(data['DensityLabel'][()][index])
position.append(data['Position'][()][index])
material.append(data['MaterialIndex'][()][index])
# Add a progress bar to this VERY lengthy progress
printProgressBar(core, self.nproc, prefix='Reading swarm data at timestep {}:'.format(self.current_step),
suffix='complete', length=50)
# Concatenate all the information
position = np.concatenate(position)
density = np.concatenate(density)
material = np.concatenate(density)
# add these properties to the object
self.particle_data = pd.DataFrame(position, columns=['x', 'y', 'z'])
self.particle_data['density'] = density
self.particle_data['material'] = material
if correct_depth:
self.particle_data.y = np.abs(self.particle_data.y - self.particle_data.y.max())
def swarms_to_nodes(self):
"""TODO: combine all the output DFS into a single one.
For now this will just merge nodal positions with the swarm data"""
import scipy.spatial as spatial
# Get nodal positions:
if self.dim == 3:
mesh = self.output[['x', 'y', 'z']].to_numpy()
else:
mesh = self.output[['x', 'y']].to_numpy()
# Initiate the tree:
self._particle_tree = spatial.cKDTree(self.particle_data[['x', 'y', 'z']])
# Get the grid spacing (this assumes regular grids) TODO: allow irregular grids
dx = np.diff(self.output.x.unique())[0]
# Create a final density list:
density = np.zeros(self.output.x.shape)
for point, k in zip(mesh, range(mesh.shape[0])):
# add a progress bar:
printProgressBar(k, mesh.shape[0] - 1,
prefix='Interpolating density data at timestep {}:'.format(self.current_step),
suffix='complete', length=50)
# At each nodal point get the 50 (?) closest particles:
swarm_index = self._get_neighbour_swarms(point, k=10)
# At each point, integrate the density of the swarms into the node point
density[k] = self.particle_data.iloc[swarm_index].density.mean()
if | np.isnan(density[k]) | numpy.isnan |
import time
import subprocess
from collections import namedtuple, defaultdict
import logging
import json
import os
import yaml
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
import threading
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class State:
def __init__(self, model, optim, criterion):
self.model = model
self.optim = optim
self.criterion = criterion
self.epoch, self.iteration = 0, 0
def loadTensorBoard(outdir):
t = threading.Thread(target=launchTensorBoard, args=([outdir]))
t.start()
def launchTensorBoard(tensorBoardPath):
print("tensorboard --logdir=" + tensorBoardPath)
ret = os.system("tensorboard --logdir=" + tensorBoardPath)
if ret != 0:
syspath = os.path.dirname(sys.executable)
print(os.path.dirname(sys.executable))
ret = os.system(syspath + "/" + "tensorboard --logdir=" + tensorBoardPath)
return
class Orn_Uhlen:
def __init__(self, n_actions, mu=0, theta=0.15, sigma=0.2):
self.n_actions = n_actions
self.X = np.ones(n_actions) * mu
self.mu = mu
self.sigma = sigma
self.theta = theta
def reset(self):
self.X = np.ones(self.n_actions) * self.mu
def sample(self):
dX = self.theta * (self.mu - self.X)
dX += self.sigma * np.random.randn(self.n_actions)
self.X += dX
return torch.FloatTensor(self.X)
class FeatureExtractor(object):
def __init__(self):
super().__init__()
def getFeatures(self, obs):
pass
class NothingToDo(FeatureExtractor):
def __init__(self, env):
super().__init__()
ob = env.reset()
self.outSize = len(ob)
def getFeatures(self, obs):
return obs
###### Pour Gridworld #############################"
class MapFromDumpExtractor(FeatureExtractor):
def __init__(self, env):
super().__init__()
outSize = env.start_grid_map.reshape(1, -1).shape[1]
self.outSize = outSize
def getFeatures(self, obs):
# prs(obs)
return obs.reshape(1, -1)
class MapFromDumpExtractor2(FeatureExtractor):
def __init__(self, env):
super().__init__()
outSize = env.start_grid_map.reshape(1, -1).shape[1]
self.outSize = outSize * 3
def getFeatures(self, obs):
state = np.zeros((3, np.shape(obs)[0], np.shape(obs)[1]))
state[0] = np.where(obs == 2, 1, state[0])
state[1] = np.where(obs == 4, 1, state[1])
state[2] = np.where(obs == 6, 1, state[2])
return state.reshape(1, -1)
class DistsFromStates(FeatureExtractor):
def __init__(self, env):
super().__init__()
self.outSize = 16
def getFeatures(self, obs):
# prs(obs)
# x=np.loads(obs)
x = obs
# print(x)
astate = list(map(lambda x: x[0] if len(x) > 0 else None, np.where(x == 2)))
astate = np.array(astate)
a3 = np.where(x == 3)
d3 = np.array([0])
if len(a3[0]) > 0:
astate3 = np.concatenate(a3).reshape(2, -1).T
d3 = np.power(astate - astate3, 2).sum(1).min().reshape(1)
# d3 = np.array(d3).reshape(1)
a4 = np.where(x == 4)
d4 = np.array([0])
if len(a4[0]) > 0:
astate4 = np.concatenate(a4).reshape(2, -1).T
d4 = np.power(astate - astate4, 2).sum(1).min().reshape(1)
# d4 = np.array(d4)
a5 = np.where(x == 5)
d5 = np.array([0])
# prs(a5)
if len(a5[0]) > 0:
astate5 = np.concatenate(a5).reshape(2, -1).T
d5 = np.power(astate - astate5, 2).sum(1).min().reshape(1)
# d5 = np.array(d5)
a6 = np.where(x == 6)
d6 = np.array([0])
if len(a6[0]) > 0:
astate6 = np.concatenate(a6).reshape(2, -1).T
d6 = np.power(astate - astate6, 2).sum(1).min().reshape(1)
# d6=np.array(d6)
# prs("::",d3,d4,d5,d6)
ret = np.concatenate((d3, d4, d5, d6)).reshape(1, -1)
ret = | np.dot(ret.T, ret) | numpy.dot |
import numpy as np
from scipy.spatial import distance_matrix
def get_num_points(config):
if config.test_model is None:
return config.num_training_points
else:
return config.num_test_points
def get_random_capacities(n):
capacities = np.random.randint(9, size=n) + 1
depot_capacity_map = {
10: 20,
20: 30,
50: 40,
100: 50
}
capacities[0] = depot_capacity_map.get(n - 1, 50)
return capacities
class Problem:
def __init__(self, locations, capacities):
self.locations = locations.copy()
self.capacities = capacities.copy()
self.distance_matrix = distance_matrix(self.locations, self.locations)
self.total_customer_capacities = np.sum(capacities[1:])
self.change_at = np.zeros([len(self.locations) + 1])
self.no_improvement_at = {}
self.num_solutions = 0
self.num_traversed = np.zeros((len(locations), len(locations)))
self.distance_hashes = set()
def record_solution(self, solution, distance):
inv_dist = 1.0 / distance
self.num_solutions += inv_dist
for path in solution:
if len(path) > 2:
for to_index in range(1, len(path)):
self.num_traversed[path[to_index - 1]][path[to_index]] += inv_dist
self.num_traversed[path[to_index]][path[to_index - 1]] += inv_dist
def add_distance_hash(self, distance_hash):
self.distance_hashes.add(distance_hash)
def get_location(self, index):
return self.locations[index]
def get_capacity(self, index):
return self.capacities[index]
def get_num_customers(self):
return len(self.locations) - 1
def get_distance(self, from_index, to_index):
return self.distance_matrix[from_index][to_index]
def get_frequency(self, from_index, to_index):
return self.num_traversed[from_index][to_index] / (1.0 + self.num_solutions)
def reset_change_at_and_no_improvement_at(self):
self.change_at = np.zeros([len(self.locations) + 1])
self.no_improvement_at = {}
def mark_change_at(self, step, path_indices):
for path_index in path_indices:
self.change_at[path_index] = step
def mark_no_improvement(self, step, action, index_first, index_second=-1, index_third=-1):
key = '{}_{}_{}_{}'.format(action, index_first, index_second, index_third)
self.no_improvement_at[key] = step
def should_try(self, action, index_first, index_second=-1, index_third=-1):
key = '{}_{}_{}_{}'.format(action, index_first, index_second, index_third)
no_improvement_at = self.no_improvement_at.get(key, -1)
return self.change_at[index_first] >= no_improvement_at or \
self.change_at[index_second] >= no_improvement_at or \
self.change_at[index_third] >= no_improvement_at
def generate_problem(config):
| np.random.seed(config.problem_seed) | numpy.random.seed |
import numpy as np
import pandas as pd
import sys
import os
import pandas.core.indexes
sys.modules['pandas.indexes'] = pandas.core.indexes
import time
import yaml
import json
import matplotlib.pyplot as plt
import keras
import tensorflow as tf
from keras.models import Sequential, load_model, Model
from keras.layers import Dense, Dropout, Flatten, Conv3D, MaxPooling3D, BatchNormalization, Activation, Input, concatenate
from keras.callbacks import EarlyStopping
from keras.backend.tensorflow_backend import set_session
from keras.utils import multi_gpu_model
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import ParameterGrid
from helper import dataset, model
from imaging_predictive_models import imaging_dataset
from clinical_predictive_models import clinical_dataset, MLP
from multimodal_prediction_helper import multimodal_dataset
from keras_helper import EpochEvaluation
#### ENVIRONMENT AND SESSION SET UP ####################################################################
# set the environment variable
os.environ["KERAS_BACKEND"] = "tensorflow"
# Silence INFO logs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
# create a configuration protocol
config = tf.ConfigProto()
# set the allow_growth option to true in the protocol
config.gpu_options.allow_growth = True
# define GPU to use
config.gpu_options.visible_device_list = "0,1"
# start a sesstion that uses the configuration protocol
set_session(tf.Session(config=config))
#### READ CONFIGURATION FILE ###########################################################################
def join(loader,node):
seq = loader.construct_sequence(node)
return ''.join(str(i) for i in seq)
yaml.add_constructor('!join',join)
cfg = yaml.load(open('config.yml', 'r'))
#### ASSIGN PATHS AND VARIABLES #########################################################################
dataset_name = cfg['dataset name']
img_splits_path = cfg['imaging dataset']['splits path']
img_feat_splits_path = 'data/' + cfg['imaging dataset']['feature splits path']
img_models_path = cfg['imaging dataset']['models path']
img_params_folder = '../TOF-based/modeling_results/1kplus_multimodal/params/'
img_scores_folder = '../TOF-based/modeling_results/1kplus_multimodal/performance_scores/'
clin_splits_path = cfg['clinical dataset']['splits path']
clin_feat_splits_path = 'data/'+ cfg['clinical dataset']['feature splits path']
clin_models_path = cfg['clinical dataset']['models path']
clin_params_folder = '../clinical parameter-based/modeling_results/1kplus_multimodal/params/'
clin_scores_folder = '../clinical parameter-based/modeling_results/1kplus_multimodal/performance_scores/'
num_splits = cfg['number of runs']
#### LOAD BOTH CLINICAL AND IMAGING DATA #################################################################
img_data = imaging_dataset(dataset_name)
img_sets = img_data.assign_train_val_test_sets(img_splits_path)
clin_data = clinical_dataset(dataset_name)
clin_sets = clin_data.assign_train_val_test_sets(clin_splits_path)
features = multimodal_dataset(dataset_name)
features.load_feature_sets(img_feat_splits_path, clin_feat_splits_path)
def train_and_evaluate_CNN(training_data, test_data, params, num_training_runs = 100):
X_tr, y_tr = training_data
X_te, y_te = test_data
AUC_trs = []
AUC_tes = []
for i in range(num_training_runs):
model = Sequential()
model.add(Conv3D(params['num_filters'][0], params['arc_params']['filter_size'], strides = params['arc_params']['filter_stride'],
padding="same",kernel_regularizer= keras.regularizers.l2(params['l2_reg']),input_shape=(156,192,64,1)))
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size= params['arc_params']['pool_size']))
model.add(Conv3D(params['num_filters'][1], params['arc_params']['filter_size'], strides = params['arc_params']['filter_stride'],
padding="same",kernel_regularizer= keras.regularizers.l2(params['l2_reg']) ))
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=params['arc_params']['pool_size']))
model.add(Conv3D(params['num_filters'][2], params['arc_params']['filter_size'], strides = params['arc_params']['filter_stride'],
padding="same",kernel_regularizer= keras.regularizers.l2(params['l2_reg'])))
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=params['arc_params']['pool_size']))
model.add(Flatten())
model.add(Dense(params['num_neurons_in_powers']*params['num_filters'][2], activation='relu',kernel_regularizer= keras.regularizers.l2(params['l2_reg'])))
model.add(Dropout(params['dropout']))
model.add(Dense(2 , activation='softmax',kernel_regularizer= keras.regularizers.l2(params['l2_reg'])))
optimizer = keras.optimizers.Adam(lr = params['learning_rate'])
model.compile(loss='binary_crossentropy',optimizer=optimizer)
parallel_model = multi_gpu_model(model, 2)
parallel_model.compile(loss='binary_crossentropy',optimizer=optimizer)
e_stop = EarlyStopping(monitor = 'val_loss', min_delta = 0.02, patience = 2
, mode='auto')
callbacks = [e_stop]
start = time.time()
history = parallel_model.fit(X_tr, y_tr, callbacks = callbacks, validation_data = (X_te,y_te),
batch_size = params['batch_size'], epochs=20,verbose = 0)
end = time.time()
model.set_weights(parallel_model.get_weights())
probs_tr = model.predict(X_tr, batch_size = 8)
probs_te = model.predict(X_te, batch_size = 8)
score_tr = roc_auc_score(y_tr, probs_tr)
score_te = roc_auc_score(y_te, probs_te)
AUC_trs.append(score_tr)
AUC_tes.append(score_te)
print('Training time for run %i was around %i minutes'%(i, np.floor((end-start)/60)))
keras.backend.clear_session()
return AUC_trs, AUC_tes
def train_and_evaluate_MLP(training_data, test_data, params, num_training_runs = 100):
X_tr, y_tr = training_data
X_te, y_te = test_data
AUC_trs = []
AUC_tes = []
for i in range(num_training_runs):
e_stop = EarlyStopping(monitor = 'val_loss', min_delta = 0.01, patience = 5, mode='min')
callbacks = [e_stop]
optimizer = keras.optimizers.Adam(lr = params['learning_rate'])
model = Sequential()
model.add(Dense(params['num_neurons'],input_dim = 7, kernel_initializer = 'glorot_uniform', activation = 'relu', kernel_regularizer = keras.regularizers.l2(params['l2_ratio'])))
model.add(Dropout(params['dropout_rate']))
model.add(Dense(2, kernel_initializer = 'glorot_uniform', activation = 'softmax', kernel_regularizer = keras.regularizers.l2(params['l2_ratio'])))
model.compile(loss = 'binary_crossentropy', optimizer = optimizer)
history = model.fit(X_tr, y_tr, callbacks= callbacks, validation_data = (X_te, y_te), epochs = 100, batch_size = params['batch_size'], verbose = 0)
probs_tr = model.predict(X_tr, batch_size = 8)
probs_te = model.predict(X_te, batch_size = 8)
score_tr = roc_auc_score(y_tr, probs_tr)
score_te = roc_auc_score(y_te, probs_te)
AUC_trs.append(score_tr)
AUC_tes.append(score_te)
keras.backend.clear_session()
return AUC_trs, AUC_tes
def train_and_evaluate_end_to_end(img_X_tr, clin_X_tr, y_tr, img_X_te, clin_X_te, y_te, params,num_training_runs = 100):
AUC_trs = []
AUC_tes = []
for i in range(num_training_runs):
img_input = Input(shape= (156,192,64,1), name='image_input')
clin_input = Input(shape= (clin_X_tr.shape[1],), name='clinical_input')
x1 = Conv3D(params['num_filters'][0], (3,3,3), strides = (1,1,1),padding="same",
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(img_input)
x1 = Activation('relu')(x1)
x1 = MaxPooling3D(pool_size=(3,3,3))(x1)
x1 = Conv3D(params['num_filters'][1], (3,3,3), strides = (1,1,1),padding="same",
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x1)
x1 = Activation('relu')(x1)
x1 = MaxPooling3D(pool_size=(3,3,3))(x1)
x1 = Conv3D(params['num_filters'][2], (3,3,3), strides = (1,1,1),padding="same",
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x1)
x1 = Activation('relu')(x1)
x1 = MaxPooling3D(pool_size=(3,3,3))(x1)
x1 = Flatten()(x1)
x1 = Dense(params['num_filters'][2]*2, activation='relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x1)
x1 = Dropout(params['dropout_rate'])(x1)
x1 = Dense(params['num_neurons_embedding'][1], activation='relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x1)
x2 = Dense(params['num_neurons_MLP'], activation = 'relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(clin_input)
x2 = Dropout(params['dropout_rate'])(x2)
x2 = Dense(params['num_neurons_embedding'][0], activation='relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x2)
x = concatenate([x1, x2])
x = Dense(params['num_neurons_final'], activation = 'relu',
kernel_regularizer= keras.regularizers.l1(params['l2_ratio']))(x)
x= Dropout(params['dropout_rate'])(x)
output = Dense(2,activation= 'softmax', kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x)
model = Model(inputs=[img_input, clin_input], outputs=[output])
optimizer = keras.optimizers.Adam(lr = params['learning_rate'])
model.compile(loss='binary_crossentropy', optimizer = optimizer)
e_stop = EarlyStopping(monitor = 'val_loss', min_delta = 0.02, patience = 2, mode='auto')
callbacks = [e_stop]
start= time.time()
history = model.fit(
{'image_input' : img_X_tr,
'clinical_input' : clin_X_tr},#inputs
y_tr, #output
callbacks = callbacks,
validation_data= ([img_X_te, clin_X_te],y_te),
epochs=20,
batch_size= params['batch_size'],
verbose=0)
end= time.time()
probs_tr = model.predict([img_X_tr,clin_X_tr],batch_size = 8)
probs_te = model.predict([img_X_te,clin_X_te],batch_size = 8)
score_tr = roc_auc_score(y_tr, probs_tr)
score_te = roc_auc_score(y_te, probs_te)
AUC_trs.append(score_tr)
AUC_tes.append(score_te)
print('Training time for run %i was around %i minutes'%(i, np.floor((end-start)/60)))
keras.backend.clear_session()
return AUC_trs, AUC_tes
def train_and_evaluate_feat_extract(img_X_tr, clin_X_tr, y_tr, img_X_te, clin_X_te, y_te, params,num_training_runs = 100):
AUC_trs = []
AUC_tes = []
for i in range(num_training_runs):
img_input = Input(shape= (img_X_tr.shape[1],), name='image_input')
clin_input = Input(shape= (clin_X_tr.shape[1],), name='clinical_input')
dense1 = Dense(params['num_neurons_embedding'][0], activation = 'relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(clin_input)
dense2 = Dense(params['num_neurons_embedding'][1], activation = 'relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(img_input)
x = concatenate([dense1, dense2])
x = Dense(params['num_neurons_final'], activation = 'relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x)
x= Dropout(params['dropout_rate'])(x)
output = Dense(2, activation= 'softmax', kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x)
optimizer = keras.optimizers.Adam(lr = params['learning_rate'])
model = Model(inputs=[img_input, clin_input], outputs=[output])
model.compile(loss='binary_crossentropy', optimizer = optimizer)
e_stop = EarlyStopping(monitor = 'val_loss', min_delta = 0.01, patience = 5, mode='auto')
callbacks = [e_stop]
history = model.fit({'image_input' : img_X_tr,
'clinical_input' : clin_X_tr},
y_tr,
callbacks = callbacks,
validation_data= ([img_X_te, clin_X_te],y_te),
epochs=100,
batch_size= params['batch_size'],
verbose=0)
probs_tr = model.predict([img_X_tr,clin_X_tr],batch_size = 8)
probs_te = model.predict([img_X_te,clin_X_te],batch_size = 8)
score_tr = roc_auc_score(y_tr, probs_tr)
score_te = roc_auc_score(y_te, probs_te)
AUC_trs.append(score_tr)
AUC_tes.append(score_te)
keras.backend.clear_session()
return AUC_trs, AUC_tes
# fix seed
np.random.seed(1)
tf.set_random_seed(2)
import random as rn
rn.seed(3)
options = [ 'CNN', 'end-to-end']
if 'MLP' in options:
for i in range(num_splits):
X_tr = clin_sets[i]['train_data']
y_tr = clin_sets[i]['train_labels']
X_val = clin_sets[i]['val_data']
y_val = clin_sets[i]['val_labels']
X_te = clin_sets[i]['test_data']
y_te = clin_sets[i]['test_labels']
X_train = np.concatenate((X_tr,X_val))
y_train = np.concatenate((y_tr,y_val))
y_tr = pd.get_dummies(y_tr)
y_val = pd.get_dummies(y_val)
y_te = pd.get_dummies(y_te)
y_train = pd.get_dummies(y_train.reshape(250,))
with open(clin_params_folder+ 'best_MLP_multimodal_tuning_parameters_split_'+str(i+1)+'.json') as json_file:
tuning_params = json.load(json_file)
print(tuning_params)
AUC_trs, AUC_tes = train_and_evaluate_MLP((X_train,y_train),(X_te,y_te),tuning_params,num_training_runs=100)
np.savetxt('../clinical parameter-based/modeling_results/1kplus_multimodal/performance_scores/outer_loop_AUC_performance_over_100_runs_model_'+str(i+1)+'.csv', [AUC_trs, AUC_tes], delimiter=",")
if 'CNN' in options:
for i in range(num_splits):
X_tr = img_sets[i]['train_data']
y_tr = img_sets[i]['train_labels']
X_val = img_sets[i]['val_data']
y_val = img_sets[i]['val_labels']
X_te = img_sets[i]['test_data']
y_te = img_sets[i]['test_labels']
X_train = np.concatenate((X_tr,X_val))
y_train = np.concatenate((y_tr,y_val))
y_tr = pd.get_dummies(y_tr)
y_val = pd.get_dummies(y_val)
y_te = pd.get_dummies(y_te)
y_train = pd.get_dummies(y_train)
with open(img_params_folder+ 'best_tuning_params_split_'+str(i+1)+'.json') as json_file:
tuning_params = json.load(json_file)
print(tuning_params)
AUC_trs, AUC_tes = train_and_evaluate_CNN((X_train,y_train),(X_te,y_te),tuning_params,num_training_runs=100)
np.savetxt('../TOF-based/modeling_results/1kplus_multimodal/performance_scores/outer_loop_AUC_performance_over_100_runs_model_'+str(i+1)+'.csv', [AUC_trs, AUC_tes], delimiter=",")
if 'feature' in options:
for i in range(num_splits):
img_X_tr = features.img_sets[i]['train_data']
img_X_val = features.img_sets[i]['val_data']
img_X_train = | np.concatenate((img_X_tr,img_X_val)) | numpy.concatenate |
import cv2 # state of the art computer vision algorithms library
import numpy as np # fundamental package for scientific computing
import matplotlib.pyplot as plt # 2D plotting library producing publication quality figures
import pyrealsense2 as rs # Intel RealSense cross-platform open-source API
import math
import time
# Constants
COLS = 1280
ROWS = 720
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
PLOT UTIL
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def plot_data(z_axis, x_vec, y_vec, plot_size, line1, mean_line, title):
if(len(y_vec) >= plot_size):
y_vec[-1] = z_axis
line1, mean_line = live_plotter(x_vec, y_vec, line1, mean_line, title)
y_vec = np.append(y_vec[1:],0.0)
else:
y_vec.append(z_axis)
def live_plotter(x_vec, y1_data, line1, mean_line, identifier='', pause_time=0.001):
if line1==[]:
# this is the call to matplotlib that allows dynamic plotting
plt.ion()
fig = plt.figure(figsize=(13,6))
ax = fig.add_subplot(111)
# create a variable for the line so we can later update it
line1, = ax.plot(x_vec, y1_data, '-o', alpha=0.8)
mean_line, = ax.plot(x_vec, [np.mean(y1_data)] * len(x_vec), label='Mean', linestyle='--')
ax.legend((line1, line1), ('mean:' + str(np.mean(y1_data)), 'std:' + str(np.std(y1_data))))
#update plot label/title
plt.ylabel('Z axis')
plt.title('{}'.format(identifier))
plt.show()
# after the figure, axis, and line are created, we only need to update the y-data
line1.set_ydata(y1_data)
mean_line.set_ydata([np.mean(y1_data)] * len(x_vec))
plt.legend((line1, line1), ('mean:' + str(np.mean(y1_data)), 'std:' + str(np.std(y1_data))))
# adjust limits if new data goes beyond bounds
if | np.min(y1_data) | numpy.min |
'''Trains CGAN on MNIST using Keras
CGAN is Conditional Generative Adversarial Network.
This version of CGAN is similar to DCGAN. The difference mainly
is that the z-vector of geneerator is conditioned by a one-hot label
to produce specific fake images. The discriminator is trained to
discriminate real from fake images that are conditioned on
specific one-hot labels.
[1] Radford, Alec, <NAME>, and <NAME>.
"Unsupervised representation learning with deep convolutional
generative adversarial networks." arXiv preprint arXiv:1511.06434 (2015).
[2] <NAME>, and <NAME>. "Conditional generative
adversarial nets." arXiv preprint arXiv:1411.1784 (2014).
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.keras.layers import Activation, Dense, Input
from tensorflow.keras.layers import Conv2D, Flatten
from tensorflow.keras.layers import Reshape, Conv2DTranspose
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import concatenate
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.models import Model
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import load_model
import numpy as np
import math
import matplotlib.pyplot as plt
import os
import argparse
def build_generator(inputs, labels, image_size):
"""Build a Generator Model
Inputs are concatenated before Dense layer.
Stack of BN-ReLU-Conv2DTranpose to generate fake images.
Output activation is sigmoid instead of tanh in orig DCGAN.
Sigmoid converges easily.
Arguments:
inputs (Layer): Input layer of the generator (the z-vector)
labels (Layer): Input layer for one-hot vector to condition
the inputs
image_size: Target size of one side (assuming square image)
Returns:
generator (Model): Generator Model
"""
image_resize = image_size // 4
# network parameters
kernel_size = 5
layer_filters = [128, 64, 32, 1]
x = concatenate([inputs, labels], axis=1)
x = Dense(image_resize * image_resize * layer_filters[0])(x)
x = Reshape((image_resize, image_resize, layer_filters[0]))(x)
for filters in layer_filters:
# first two convolution layers use strides = 2
# the last two use strides = 1
if filters > layer_filters[-2]:
strides = 2
else:
strides = 1
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same')(x)
x = Activation('sigmoid')(x)
# input is conditioned by labels
generator = Model([inputs, labels], x, name='generator')
return generator
def build_discriminator(inputs, labels, image_size):
"""Build a Discriminator Model
Inputs are concatenated after Dense layer.
Stack of LeakyReLU-Conv2D to discriminate real from fake.
The network does not converge with BN so it is not used here
unlike in DCGAN paper.
Arguments:
inputs (Layer): Input layer of the discriminator (the image)
labels (Layer): Input layer for one-hot vector to condition
the inputs
image_size: Target size of one side (assuming square image)
Returns:
discriminator (Model): Discriminator Model
"""
kernel_size = 5
layer_filters = [32, 64, 128, 256]
x = inputs
y = Dense(image_size * image_size)(labels)
y = Reshape((image_size, image_size, 1))(y)
x = concatenate([x, y])
for filters in layer_filters:
# first 3 convolution layers use strides = 2
# last one uses strides = 1
if filters == layer_filters[-1]:
strides = 1
else:
strides = 2
x = LeakyReLU(alpha=0.2)(x)
x = Conv2D(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same')(x)
x = Flatten()(x)
x = Dense(1)(x)
x = Activation('sigmoid')(x)
# input is conditioned by labels
discriminator = Model([inputs, labels], x, name='discriminator')
return discriminator
def train(models, data, params):
"""Train the Discriminator and Adversarial Networks
Alternately train Discriminator and Adversarial networks by batch.
Discriminator is trained first with properly labelled real and fake images.
Adversarial is trained next with fake images pretending to be real.
Discriminator inputs are conditioned by train labels for real images,
and random labels for fake images.
Adversarial inputs are conditioned by random labels.
Generate sample images per save_interval.
Arguments:
models (list): Generator, Discriminator, Adversarial models
data (list): x_train, y_train data
params (list): Network parameters
"""
# the GAN models
generator, discriminator, adversarial = models
# images and labels
x_train, y_train = data
# network parameters
batch_size, latent_size, train_steps, num_labels, model_name = params
# the generator image is saved every 500 steps
save_interval = 500
# noise vector to see how the generator output evolves during training
noise_input = np.random.uniform(-1.0, 1.0, size=[16, latent_size])
# one-hot label the noise will be conditioned to
noise_class = np.eye(num_labels)[np.arange(0, 16) % num_labels]
# number of elements in train dataset
train_size = x_train.shape[0]
print(model_name,
"Labels for generated images: ",
np.argmax(noise_class, axis=1))
for i in range(train_steps):
# train the discriminator for 1 batch
# 1 batch of real (label=1.0) and fake images (label=0.0)
# randomly pick real images from dataset
rand_indexes = np.random.randint(0, train_size, size=batch_size)
real_images = x_train[rand_indexes]
# corresponding one-hot labels of real images
real_labels = y_train[rand_indexes]
# generate fake images from noise using generator
# generate noise using uniform distribution
noise = np.random.uniform(-1.0,
1.0,
size=[batch_size, latent_size])
# assign random one-hot labels
fake_labels = np.eye(num_labels)[np.random.choice(num_labels,
batch_size)]
# generate fake images conditioned on fake labels
fake_images = generator.predict([noise, fake_labels])
# real + fake images = 1 batch of train data
x = np.concatenate((real_images, fake_images))
# real + fake one-hot labels = 1 batch of train one-hot labels
labels = np.concatenate((real_labels, fake_labels))
# label real and fake images
# real images label is 1.0
y = np.ones([2 * batch_size, 1])
# fake images label is 0.0
y[batch_size:, :] = 0.0
# train discriminator network, log the loss and accuracy
loss, acc = discriminator.train_on_batch([x, labels], y)
log = "%d: [discriminator loss: %f, acc: %f]" % (i, loss, acc)
# train the adversarial network for 1 batch
# 1 batch of fake images conditioned on fake 1-hot labels
# w/ label=1.0
# since the discriminator weights are frozen in
# adversarial network only the generator is trained
# generate noise using uniform distribution
noise = np.random.uniform(-1.0,
1.0,
size=[batch_size, latent_size])
# assign random one-hot labels
fake_labels = np.eye(num_labels)[np.random.choice(num_labels,
batch_size)]
# label fake images as real or 1.0
y = np.ones([batch_size, 1])
# train the adversarial network
# note that unlike in discriminator training,
# we do not save the fake images in a variable
# the fake images go to the discriminator input
# of the adversarial for classification
# log the loss and accuracy
loss, acc = adversarial.train_on_batch([noise, fake_labels], y)
log = "%s [adversarial loss: %f, acc: %f]" % (log, loss, acc)
print(log)
if (i + 1) % save_interval == 0:
# plot generator images on a periodic basis
plot_images(generator,
noise_input=noise_input,
noise_class=noise_class,
show=False,
step=(i + 1),
model_name=model_name)
# save the model after training the generator
# the trained generator can be reloaded for
# future MNIST digit generation
generator.save(model_name + ".h5")
def plot_images(generator,
noise_input,
noise_class,
show=False,
step=0,
model_name="gan"):
"""Generate fake images and plot them
For visualization purposes, generate fake images
then plot them in a square grid
Arguments:
generator (Model): The Generator Model for fake images generation
noise_input (ndarray): Array of z-vectors
show (bool): Whether to show plot or not
step (int): Appended to filename of the save images
model_name (string): Model name
"""
os.makedirs(model_name, exist_ok=True)
filename = os.path.join(model_name, "%05d.png" % step)
images = generator.predict([noise_input, noise_class])
print(model_name , " labels for generated images: ", np.argmax(noise_class, axis=1))
plt.figure(figsize=(2.2, 2.2))
num_images = images.shape[0]
image_size = images.shape[1]
rows = int(math.sqrt(noise_input.shape[0]))
for i in range(num_images):
plt.subplot(rows, rows, i + 1)
image = np.reshape(images[i], [image_size, image_size])
plt.imshow(image, cmap='gray')
plt.axis('off')
plt.savefig(filename)
if show:
plt.show()
else:
plt.close('all')
def build_and_train_models():
# load MNIST dataset
(x_train, y_train), (_, _) = mnist.load_data()
# reshape data for CNN as (28, 28, 1) and normalize
image_size = x_train.shape[1]
x_train = np.reshape(x_train, [-1, image_size, image_size, 1])
x_train = x_train.astype('float32') / 255
num_labels = np.amax(y_train) + 1
y_train = to_categorical(y_train)
model_name = "cgan_mnist"
# network parameters
# the latent or z vector is 100-dim
latent_size = 100
batch_size = 64
train_steps = 40000
lr = 2e-4
decay = 6e-8
input_shape = (image_size, image_size, 1)
label_shape = (num_labels, )
# build discriminator model
inputs = Input(shape=input_shape, name='discriminator_input')
labels = Input(shape=label_shape, name='class_labels')
discriminator = build_discriminator(inputs, labels, image_size)
# [1] or original paper uses Adam,
# but discriminator converges easily with RMSprop
optimizer = RMSprop(lr=lr, decay=decay)
discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
discriminator.summary()
# build generator model
input_shape = (latent_size, )
inputs = Input(shape=input_shape, name='z_input')
generator = build_generator(inputs, labels, image_size)
generator.summary()
# build adversarial model = generator + discriminator
optimizer = RMSprop(lr=lr*0.5, decay=decay*0.5)
# freeze the weights of discriminator during adversarial training
discriminator.trainable = False
outputs = discriminator([generator([inputs, labels]), labels])
adversarial = Model([inputs, labels],
outputs,
name=model_name)
adversarial.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
adversarial.summary()
# train discriminator and adversarial networks
models = (generator, discriminator, adversarial)
data = (x_train, y_train)
params = (batch_size, latent_size, train_steps, num_labels, model_name)
train(models, data, params)
def test_generator(generator, class_label=None):
noise_input = np.random.uniform(-1.0, 1.0, size=[16, 100])
step = 0
if class_label is None:
num_labels = 10
noise_class = np.eye(num_labels)[ | np.random.choice(num_labels, 16) | numpy.random.choice |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for miscellaneous functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.ops.nn_impl import _compute_sampled_logits
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test as test_lib
class ZeroFractionTest(test_lib.TestCase):
def _ZeroFraction(self, x):
assert x.shape
total_elements = np.prod(x.shape)
nonzeros = np.count_nonzero(x.flatten())
return 1.0 - nonzeros / total_elements
@test_util.run_deprecated_v1
def testZeroFraction(self):
x_shape = [5, 17]
x_np = np.random.randint(0, 2, size=x_shape).astype(np.float32)
y_np = self._ZeroFraction(x_np)
x_tf = constant_op.constant(x_np)
x_tf.set_shape(x_shape)
y_tf = nn_impl.zero_fraction(x_tf)
y_tf_np = self.evaluate(y_tf)
eps = 1e-8
self.assertAllClose(y_tf_np, y_np, eps)
@test_util.run_deprecated_v1
def testZeroFractionEmpty(self):
x = np.zeros(0)
y = self.evaluate(nn_impl.zero_fraction(x))
self.assertTrue(np.isnan(y))
@test_util.run_deprecated_v1
def testZeroFraction2_27Zeros(self):
sparsity = nn_impl.zero_fraction(
array_ops.zeros([int(2**27 * 1.01)], dtype=dtypes.int8))
self.assertAllClose(1.0, self.evaluate(sparsity))
@test_util.run_deprecated_v1
def testZeroFraction2_27Ones(self):
sparsity = nn_impl.zero_fraction(
array_ops.ones([int(2**27 * 1.01)], dtype=dtypes.int8))
self.assertAllClose(0.0, self.evaluate(sparsity))
@test_util.run_deprecated_v1
def testUnknownSize(self):
value = array_ops.placeholder(dtype=dtypes.float32)
sparsity = nn_impl.zero_fraction(value)
with self.cached_session() as sess:
self.assertAllClose(
0.25,
sess.run(sparsity, {value: [[0., 1.], [0.3, 2.]]}))
class SoftmaxTest(test_lib.TestCase, parameterized.TestCase):
def _softmax(self, x):
assert len(x.shape) == 2
m = x.max(1)[:, np.newaxis]
u = np.exp(x - m)
z = u.sum(1)[:, np.newaxis]
return u / z
@test_util.run_in_graph_and_eager_modes
def testSoftmax(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
y_np = self._softmax(x_np)
x_tf = constant_op.constant(x_np)
y_tf = nn_ops.softmax_v2(x_tf)
y_tf_last_dim = nn_ops.softmax_v2(x_tf, 1)
y_tf_np = self.evaluate(y_tf)
y_tf_last_dim_np = self.evaluate(y_tf_last_dim)
eps = 1e-3
self.assertAllClose(y_tf_np, y_np, eps)
self.assertAllClose(y_tf_last_dim_np, y_np, eps)
def testSoftmaxAxes(self):
arr = np.linspace(0., 1, 12).reshape(3, 4)
x_neg_axis = nn_ops.softmax_v2(arr, axis=-2)
y_pos_axis = nn_ops.softmax_v2(arr, axis=0)
z_gt_axis = nn_ops.softmax_v2(arr, axis=0)
x_neg_axis_tf = self.evaluate(x_neg_axis)
y_pos_axis_tf = self.evaluate(y_pos_axis)
z_gt_axis_tf = self.evaluate(z_gt_axis)
eps = 1e-3
self.assertAllClose(x_neg_axis_tf, y_pos_axis_tf, eps)
self.assertAllClose(y_pos_axis_tf, z_gt_axis_tf, eps)
@parameterized.parameters(((5, 10),), ((2, 3, 4),))
@test_util.run_deprecated_v1
def testGradient(self, x_shape):
x_np = np.random.randn(*x_shape).astype(np.float64)
with self.cached_session():
x_tf = constant_op.constant(x_np)
y_tf = nn_ops.softmax_v2(x_tf)
err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
x_shape)
eps = 2e-8
self.assertLess(err, eps)
class LogPoissonLossTest(test_lib.TestCase):
def _log_poisson_loss(self, x, z, compute_full_loss=False):
lpl = np.exp(x) - z * x
if compute_full_loss:
stirling_approx = z * np.log(z) - z + 0.5 * np.log(2. * np.pi * z)
lpl += np.ma.masked_array(stirling_approx, mask=(z <= 1)).filled(0.)
return lpl
@test_util.run_in_graph_and_eager_modes
def testLogPoissonLoss(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
z_np = np.random.randint(0, 5, size=x_shape).astype(np.float32)
y_np = self._log_poisson_loss(x_np, z_np, compute_full_loss=False)
y_np_stirling = self._log_poisson_loss(x_np, z_np, compute_full_loss=True)
y_tf = nn_impl.log_poisson_loss(z_np, x_np, compute_full_loss=False)
y_tf_stirling = nn_impl.log_poisson_loss(z_np, x_np, compute_full_loss=True)
y_tf_np = self.evaluate(y_tf)
y_tf_np_stirling = self.evaluate(y_tf_stirling)
eps = 1e-3
self.assertAllClose(y_tf_np, y_np, eps)
self.assertAllClose(y_tf_np_stirling, y_np_stirling, eps)
@test_util.run_deprecated_v1
def testGradient(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float64)
z_np = np.random.randint(0, 5, size=x_shape).astype(np.float64)
with self.cached_session():
x_tf = constant_op.constant(x_np)
y_tf = nn_impl.log_poisson_loss(z_np, x_tf, compute_full_loss=False)
y_tf_stirling = nn_impl.log_poisson_loss(
z_np, x_tf, compute_full_loss=True)
err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
x_shape)
err_stirling = gradient_checker.compute_gradient_error(
x_tf, x_shape, y_tf_stirling, x_shape)
eps = 1e-6
self.assertLess(err, eps)
self.assertLess(err_stirling, eps)
class LogSoftmaxTest(test_lib.TestCase, parameterized.TestCase):
def _log_softmax(self, x):
assert len(x.shape) == 2
m = x.max(1)[:, np.newaxis]
u = x - m
return u - np.log(np.sum(np.exp(u), 1, keepdims=True))
@test_util.run_in_graph_and_eager_modes
def testLogSoftmax(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
y_np = self._log_softmax(x_np)
x_tf = constant_op.constant(x_np)
y_tf = nn_ops.log_softmax_v2(x_tf)
y_tf_np = self.evaluate(y_tf)
eps = 1e-3
self.assertAllClose(y_tf_np, y_np, eps)
def testLogSoftmaxAxes(self):
arr = np.linspace(0., 1, 12).reshape(3, 4)
x_neg_axis = nn_ops.log_softmax_v2(arr, axis=-2)
y_pos_axis = nn_ops.log_softmax_v2(arr, axis=0)
z_gt_axis = nn_ops.log_softmax_v2(arr, axis=0)
x_neg_axis_tf = self.evaluate(x_neg_axis)
y_pos_axis_tf = self.evaluate(y_pos_axis)
z_gt_axis_tf = self.evaluate(z_gt_axis)
eps = 1e-3
self.assertAllClose(x_neg_axis_tf, y_pos_axis_tf, eps)
self.assertAllClose(y_pos_axis_tf, z_gt_axis_tf, eps)
@parameterized.parameters(((5, 10),), ((2, 3, 4),))
@test_util.run_deprecated_v1
def testGradient(self, x_shape):
x_np = np.random.randn(*x_shape).astype(np.float64)
with self.cached_session():
x_tf = constant_op.constant(x_np)
y_tf = nn_ops.log_softmax_v2(x_tf)
err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
x_shape)
eps = 1e-7
self.assertLess(err, eps)
class L2LossTest(test_lib.TestCase):
@test_util.run_in_graph_and_eager_modes
def testL2Loss(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant(
[1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="x", dtype=dtype)
l2loss = nn_ops.l2_loss(x)
value = self.evaluate(l2loss)
self.assertAllClose(7.0, value)
@test_util.run_deprecated_v1
def testGradient(self):
x_shape = [20, 7, 3]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
with self.cached_session():
x = constant_op.constant(x_val, name="x")
output = nn_ops.l2_loss(x)
err = gradient_checker.compute_gradient_error(x, x_shape, output, [1])
print("L2Loss gradient err = %g " % err)
err_tolerance = 1e-10
self.assertLess(err, err_tolerance)
class L2NormalizeTest(test_lib.TestCase):
def _l2Normalize(self, x, dim):
if isinstance(dim, list):
norm = np.linalg.norm(x, axis=tuple(dim))
for d in dim:
norm = np.expand_dims(norm, d)
return x / norm
else:
norm = np.apply_along_axis(np.linalg.norm, dim, x)
return x / np.expand_dims(norm, dim)
@test_util.run_in_graph_and_eager_modes
def testL2Normalize(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float32)
for dim in range(len(x_shape)):
y_np = self._l2Normalize(x_np, dim)
x_tf = constant_op.constant(x_np, name="x")
y_tf = nn_impl.l2_normalize_v2(x_tf, dim)
self.assertAllClose(y_np, self.evaluate(y_tf))
@test_util.run_in_graph_and_eager_modes
def testL2NormalizeDimArray(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float32)
dim = [1, 2]
y_np = self._l2Normalize(x_np, dim)
x_tf = constant_op.constant(x_np, name="x")
y_tf = nn_impl.l2_normalize_v2(x_tf, dim)
self.assertAllClose(y_np, self.evaluate(y_tf))
@test_util.run_deprecated_v1
def testL2NormalizeGradient(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float64)
for dim in range(len(x_shape)):
with self.cached_session():
x_tf = constant_op.constant(x_np, name="x")
y_tf = nn_impl.l2_normalize_v2(x_tf, dim)
err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
x_shape)
print("L2Normalize gradient err = %g " % err)
self.assertLess(err, 1e-4)
class DropoutTest(test_lib.TestCase):
def testDropout(self):
# Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate
# that it is producing approximately the right number of ones over a large
# number of samples, based on the keep probability.
x_dim = 40
y_dim = 30
num_iter = 10
for keep_prob in [0.1, 0.5, 0.8]:
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
dropout = nn_ops.dropout(t, rate=(1 - keep_prob))
final_count = 0
self.assertEqual([x_dim, y_dim], dropout.get_shape())
for _ in xrange(0, num_iter):
value = self.evaluate(dropout)
final_count += np.count_nonzero(value)
# Verifies that there are only two values: 0 and 1/keep_prob.
sorted_value = np.unique(np.sort(value))
self.assertEqual(0, sorted_value[0])
self.assertAllClose(1 / keep_prob, sorted_value[1])
# Check that we are in the 15% error range
expected_count = x_dim * y_dim * keep_prob * num_iter
rel_error = math.fabs(final_count - expected_count) / expected_count
print(rel_error)
self.assertTrue(rel_error < 0.15)
def testShapedDropout(self):
# Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate
# that it is producing approximately the right number of ones over a large
# number of samples, based on the keep probability. This time with shaped
# noise.
x_dim = 40 * 30
y_dim = 3
num_iter = 10
for keep_prob in [0.1, 0.5, 0.8]:
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
dropout = nn_ops.dropout(t, rate=(1 - keep_prob), noise_shape=[x_dim, 1])
self.assertEqual([x_dim, y_dim], dropout.get_shape())
final_count = 0
for _ in xrange(0, num_iter):
value = self.evaluate(dropout)
final_count += np.count_nonzero(value)
# Verifies that there are only two values: 0 and 1/keep_prob.
sorted_value = np.unique(np.sort(value))
self.assertEqual(0, sorted_value[0])
self.assertAllClose(1 / keep_prob, sorted_value[1])
# Check that we are in the 15% error range
expected_count = x_dim * y_dim * keep_prob * num_iter
rel_error = math.fabs(final_count - expected_count) / expected_count
print(rel_error)
self.assertTrue(rel_error < 0.15)
def testShapedDropoutCorrelation(self):
# Runs a shaped dropout and tests that the correlations are correct.
x_dim = 40
y_dim = 30
num_iter = 10
for keep_prob in [0.1, 0.5, 0.8]:
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
dropout = nn_ops.dropout(t, rate=(1 - keep_prob), noise_shape=[x_dim, 1])
self.assertEqual([x_dim, y_dim], dropout.get_shape())
for _ in xrange(0, num_iter):
value = self.evaluate(dropout)
# Verifies that each y column as only one type of activation.
for i in xrange(x_dim):
sorted_value = np.unique(np.sort(value[i, :]))
self.assertEqual(sorted_value.size, 1)
@test_util.run_deprecated_v1
def testDropoutPlaceholderKeepProb(self):
# Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate
# that it is producing approximately the right number of ones over a large
# number of samples, based on the keep probability.
x_dim = 40
y_dim = 30
num_iter = 10
for keep_prob in [0.1, 0.5, 0.8]:
with self.cached_session():
t = constant_op.constant(
1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
keep_prob_placeholder = array_ops.placeholder(dtypes.float32)
dropout = nn_ops.dropout(t, keep_prob_placeholder)
final_count = 0
self.assertEqual([x_dim, y_dim], dropout.get_shape())
for _ in xrange(0, num_iter):
value = dropout.eval(feed_dict={keep_prob_placeholder: keep_prob})
final_count += np.count_nonzero(value)
# Verifies that there are only two values: 0 and 1/keep_prob.
sorted_value = np.unique(np.sort(value))
self.assertEqual(0, sorted_value[0])
self.assertAllClose(1 / keep_prob, sorted_value[1])
# Check that we are in the 15% error range
expected_count = x_dim * y_dim * keep_prob * num_iter
rel_error = math.fabs(final_count - expected_count) / expected_count
print(rel_error)
self.assertTrue(rel_error < 0.15)
@test_util.run_deprecated_v1
def testShapedDropoutUnknownShape(self):
x_dim = 40
y_dim = 30
keep_prob = 0.5
x = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
dropout_x = nn_ops.dropout(
x,
rate=(1 - keep_prob),
noise_shape=array_ops.placeholder(dtypes.int32))
self.assertEqual(x.get_shape(), dropout_x.get_shape())
def testPartialShapedDropout(self):
x_dim = 40 * 30
y_dim = 3
num_iter = 10
for keep_prob in [0.1, 0.5, 0.8]:
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
# Set noise_shape=[None, 1] which means [x_dim, 1].
dropout = nn_ops.dropout(t, rate=(1 - keep_prob), noise_shape=[None, 1])
self.assertEqual([x_dim, y_dim], dropout.get_shape())
final_count = 0
for _ in xrange(0, num_iter):
value = self.evaluate(dropout)
final_count += np.count_nonzero(value)
# Verifies that there are only two values: 0 and 1/keep_prob.
sorted_value = np.unique(np.sort(value))
self.assertEqual(0, sorted_value[0])
self.assertAllClose(1 / keep_prob, sorted_value[1])
# Check that we are in the 15% error range
expected_count = x_dim * y_dim * keep_prob * num_iter
rel_error = math.fabs(final_count - expected_count) / expected_count
print(rel_error)
self.assertTrue(rel_error < 0.15)
@test_util.run_deprecated_v1
def testInvalidKeepProb(self):
x_dim = 40
y_dim = 30
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
with self.assertRaises(ValueError):
nn_ops.dropout(t, -1.0)
with self.assertRaises(ValueError):
nn_ops.dropout(t, 1.1)
with self.assertRaises(ValueError):
nn_ops.dropout(t, [0.0, 1.0])
with self.assertRaises(ValueError):
nn_ops.dropout(t, array_ops.placeholder(dtypes.float64))
with self.assertRaises(ValueError):
nn_ops.dropout(t, array_ops.placeholder(dtypes.float32, shape=[2]))
@test_util.run_deprecated_v1
def testInvalidRate(self):
x_dim = 40
y_dim = 30
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
with self.assertRaises(ValueError):
nn_ops.dropout_v2(t, -1.0)
with self.assertRaises(ValueError):
nn_ops.dropout_v2(t, 1.1)
with self.assertRaises(ValueError):
nn_ops.dropout_v2(t, [0.0, 1.0])
def testLargeRate(self):
x_dim = 40
y_dim = 30
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
_ = nn_ops.dropout_v2(t, 0.9)
def testVariableRef(self):
x = variable_scope.get_variable("x", shape=[10, 10], dtype=dtypes.float32)
_ = nn_ops.dropout(x, keep_prob=0.1)
@test_util.run_deprecated_v1
def testShapedDropoutShapeError(self):
# Runs shaped dropout and verifies an error is thrown on misshapen noise.
x_dim = 40
y_dim = 30
keep_prob = 0.5
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
with self.assertRaises(ValueError):
_ = nn_ops.dropout(
t, rate=(1 - keep_prob), noise_shape=[x_dim, y_dim + 10])
with self.assertRaises(ValueError):
_ = nn_ops.dropout(t, rate=(1 - keep_prob), noise_shape=[x_dim, y_dim, 5])
with self.assertRaises(ValueError):
_ = nn_ops.dropout(t, rate=(1 - keep_prob), noise_shape=[x_dim + 3])
with self.assertRaises(ValueError):
_ = nn_ops.dropout(t, rate=(1 - keep_prob), noise_shape=[x_dim])
# test that broadcasting proceeds
_ = nn_ops.dropout(t, rate=(1 - keep_prob), noise_shape=[y_dim])
_ = nn_ops.dropout(t, rate=(1 - keep_prob), noise_shape=[1, y_dim])
_ = nn_ops.dropout(t, rate=(1 - keep_prob), noise_shape=[x_dim, 1])
_ = nn_ops.dropout(t, rate=(1 - keep_prob), noise_shape=[1, 1])
def testNoDropout(self):
x = array_ops.zeros((5,))
y = nn_ops.dropout(x, rate=0)
self.assertAllEqual(x, y)
y = nn_ops.dropout_v2(x, rate=0)
self.assertAllEqual(x, y)
def testDropoutWithIntegerInputs(self):
x = constant_op.constant([1, 1, 1, 1, 1])
with self.assertRaises(ValueError):
_ = nn_ops.dropout(x, 0.5)
class ComputeSampledLogitsTest(test_lib.TestCase):
def setUp(self):
self._eps = 1e-3
def _GenerateTestData(self, num_classes, dim, batch_size, num_true, labels,
sampled, subtract_log_q):
"""Randomly generates input/output data for a single test case.
This function returns numpy constants for use in a test case.
Args:
num_classes: An int. The number of embedding classes in the test case.
dim: An int. The dimension of the embedding.
batch_size: An int. The batch size.
num_true: An int. The number of target classes per training example.
labels: A list of batch_size * num_true ints. The target classes.
sampled: A list of indices in [0, num_classes).
subtract_log_q: A bool corresponding to the parameter in
_compute_sampled_logits().
Returns:
weights: Embedding weights to use as test input. It is a numpy array
of shape [num_classes, dim]
biases: Embedding biases to use as test input. It is a numpy array
of shape [num_classes].
hidden_acts: Forward activations of the network to use as test input.
It is a numpy array of shape [batch_size, dim].
sampled_vals: A tuple based on `sampled` to use as test input in the
format returned by a *_candidate_sampler function.
exp_logits: The output logits expected from _compute_sampled_logits().
It is a numpy array of shape [batch_size, num_true + len(sampled)].
exp_labels: The output labels expected from _compute_sampled_logits().
It is a numpy array of shape [batch_size, num_true + len(sampled)].
"""
weights = np.random.randn(num_classes, dim).astype(np.float32)
biases = np.random.randn(num_classes).astype(np.float32)
hidden_acts = np.random.randn(batch_size, dim).astype(np.float32)
true_exp = np.full([batch_size, 1], fill_value=0.5, dtype=np.float32)
sampled_exp = np.full([len(sampled)], fill_value=0.5, dtype=np.float32)
sampled_vals = (sampled, true_exp, sampled_exp)
sampled_w, sampled_b = weights[sampled], biases[sampled]
true_w, true_b = weights[labels], biases[labels]
true_logits = np.sum(
hidden_acts.reshape((batch_size, 1, dim)) * true_w.reshape(
(batch_size, num_true, dim)),
axis=2)
true_b = true_b.reshape((batch_size, num_true))
true_logits += true_b
sampled_logits = np.dot(hidden_acts, sampled_w.T) + sampled_b
if subtract_log_q:
true_logits -= np.log(true_exp)
sampled_logits -= np.log(sampled_exp[np.newaxis, :])
exp_logits = np.concatenate([true_logits, sampled_logits], axis=1)
exp_labels = np.hstack((np.ones_like(true_logits) / num_true,
np.zeros_like(sampled_logits)))
return weights, biases, hidden_acts, sampled_vals, exp_logits, exp_labels
def _ShardTestEmbeddings(self, weights, biases, num_shards):
"""Shards the weights and biases returned by _GenerateTestData.
Args:
weights: The weights returned by _GenerateTestData.
biases: The biases returned by _GenerateTestData.
num_shards: The number of shards to create.
Returns:
sharded_weights: A list of size `num_shards` containing all the weights.
sharded_biases: A list of size `num_shards` containing all the biases.
"""
with ops.Graph().as_default() as g:
sharded_weights = variable_scope.get_variable(
"w",
partitioner=partitioned_variables.fixed_size_partitioner(num_shards),
initializer=constant_op.constant(weights))
sharded_biases = variable_scope.get_variable(
"b",
partitioner=partitioned_variables.fixed_size_partitioner(num_shards),
initializer=constant_op.constant(biases))
with self.session(graph=g) as sess:
variables.global_variables_initializer().run()
return self.evaluate([list(sharded_weights), list(sharded_biases)])
def testShapes(self):
np.random.seed(0)
num_classes = 5
batch_size = 3
for num_true in range(1, 5):
labels = np.random.randint(
low=0, high=num_classes, size=batch_size * num_true)
(weights, biases, hidden_acts, sampled_vals, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=num_true,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=False)
logits_tensor, labels_tensor = _compute_sampled_logits(
weights=constant_op.constant(weights),
biases=constant_op.constant(biases),
labels=constant_op.constant(
labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_vals,
subtract_log_q=False,
remove_accidental_hits=False,
partition_strategy="div",
name="sampled_logits_basic_num_true_%d" % num_true)
got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])
self.assertEqual(exp_logits.shape, got_logits.shape, self._eps)
self.assertEqual(exp_labels.shape, got_labels.shape, self._eps)
def testBasic(self):
"""Without accidental hit removal or subtract_log_q."""
np.random.seed(0)
num_classes = 5
batch_size = 3
for num_true in range(1, 5):
labels = np.random.randint(
low=0, high=num_classes, size=batch_size * num_true)
(weights, biases, hidden_acts, sampled_vals, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=num_true,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=False)
logits_tensor, labels_tensor = _compute_sampled_logits(
weights=constant_op.constant(weights),
biases=constant_op.constant(biases),
labels=constant_op.constant(
labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_vals,
subtract_log_q=False,
remove_accidental_hits=False,
partition_strategy="div",
name="sampled_logits_basic_num_true_%d" % num_true)
got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])
self.assertAllClose(exp_logits, got_logits, self._eps)
self.assertAllClose(exp_labels, got_labels, self._eps)
def testAccidentalHitRemoval(self):
"""With accidental hit removal, no subtract_log_q."""
np.random.seed(0)
num_classes = 5
batch_size = 3
sampled = [1, 0, 2, 3]
for num_true in range(1, 5):
labels = np.random.randint(
low=0, high=num_classes, size=batch_size * num_true)
(weights, biases, hidden_acts, sampled_vals, _,
_) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=num_true,
labels=labels,
sampled=sampled,
subtract_log_q=False)
logits_tensor, _ = _compute_sampled_logits(
weights=constant_op.constant(weights),
biases=constant_op.constant(biases),
labels=constant_op.constant(
labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
inputs=constant_op.constant(hidden_acts),
num_sampled=len(sampled),
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_vals,
subtract_log_q=False,
remove_accidental_hits=True,
partition_strategy="div",
name="sampled_logits_accidental_hit_removal_num_true_%d" % num_true)
# Test that the exponentiated logits of accidental hits are near 0.
# First we need to find the hits in this random test run:
labels_reshape = labels.reshape((batch_size, num_true))
got_logits = self.evaluate(logits_tensor)
for row in xrange(batch_size):
row_labels = labels_reshape[row, :]
for col in xrange(len(sampled)):
if sampled[col] in row_labels:
# We need to add the num_true_test offset into logits_*
self.assertNear(
np.exp(got_logits[row, col + num_true]), 0., self._eps)
def testSubtractLogQ(self):
"""With subtract_log_q, no accidental hit removal."""
np.random.seed(0)
num_classes = 5
batch_size = 3
for num_true in range(1, 5):
labels = np.random.randint(
low=0, high=num_classes, size=batch_size * num_true)
(weights, biases, hidden_acts, sampled_vals, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=num_true,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=True)
logits_tensor, labels_tensor = _compute_sampled_logits(
weights=constant_op.constant(weights),
biases=constant_op.constant(biases),
labels=constant_op.constant(
labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_vals,
subtract_log_q=True,
remove_accidental_hits=False,
partition_strategy="div",
name="sampled_logits_subtract_log_q_num_true_%d" % num_true)
got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])
self.assertAllClose(exp_logits, got_logits, self._eps)
self.assertAllClose(exp_labels, got_labels, self._eps)
def testSharded(self):
"""With sharded weights and sharded biases."""
np.random.seed(0)
num_classes = 5
batch_size = 3
for num_true in range(1, 5):
labels = np.random.randint(
low=0, high=num_classes, size=batch_size * num_true)
(weights, biases, hidden_acts, sampled_vals, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=num_true,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=False)
weight_shards, bias_shards = self._ShardTestEmbeddings(
weights, biases, num_shards=3)
logits_tensor, labels_tensor = _compute_sampled_logits(
weights=[constant_op.constant(shard) for shard in weight_shards],
biases=[constant_op.constant(shard) for shard in bias_shards],
labels=constant_op.constant(
labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_vals,
subtract_log_q=False,
remove_accidental_hits=False,
partition_strategy="div",
name="sampled_logits_sharded_num_true_%d" % num_true)
got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])
self.assertAllClose(exp_logits, got_logits, self._eps)
self.assertAllClose(exp_labels, got_labels, self._eps)
def testNCELoss(self):
# A simple test to verify the numerics.
def _SigmoidCrossEntropyWithLogits(logits, targets):
# logits, targets: float arrays of the same shape.
assert logits.shape == targets.shape
pred = 1. / (1. + np.exp(-logits))
eps = 0.0001
pred = np.minimum(np.maximum(pred, eps), 1 - eps)
return -targets * np.log(pred) - (1. - targets) * np.log(1. - pred)
np.random.seed(0)
num_classes = 5
batch_size = 3
labels = [0, 1, 2]
(weights, biases, hidden_acts, sampled_vals, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=1,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=True)
exp_nce_loss = np.sum(
_SigmoidCrossEntropyWithLogits(exp_logits, exp_labels), 1)
got_nce_loss = nn_impl.nce_loss_v2(
weights=constant_op.constant(weights),
biases=constant_op.constant(biases),
labels=constant_op.constant(labels, shape=(batch_size, 1)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=1,
sampled_values=sampled_vals)
self.assertAllClose(exp_nce_loss, self.evaluate(got_nce_loss), 1e-4)
# Test with sharded weights and sharded biases.
weight_shards, bias_shards = self._ShardTestEmbeddings(
weights, biases, num_shards=3)
got_nce_loss = nn_impl.nce_loss_v2(
weights=[constant_op.constant(shard) for shard in weight_shards],
biases=[constant_op.constant(shard) for shard in bias_shards],
labels=constant_op.constant(labels, shape=(batch_size, 1)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=1,
sampled_values=sampled_vals)
self.assertAllClose(exp_nce_loss, self.evaluate(got_nce_loss), 1e-4)
def testSampledSoftmaxLoss(self):
# A simple test to verify the numerics.
def _SoftmaxCrossEntropyWithLogits(logits, targets):
# logits, targets: float arrays of the same shape.
assert logits.shape == targets.shape
stable_exp_logits = np.exp(
logits - np.amax(logits, axis=1, keepdims=True))
pred = stable_exp_logits / np.sum(stable_exp_logits, 1, keepdims=True)
return -np.sum(targets * np.log(pred + 1.0e-20), axis=1)
| np.random.seed(0) | numpy.random.seed |
"""Test :class:`qibo.abstractions.gates.M` as standalone and as part of circuit."""
import pytest
import numpy as np
from qibo import models, gates, K
def assert_result(result, decimal_samples=None, binary_samples=None,
decimal_frequencies=None, binary_frequencies=None):
if decimal_frequencies is not None:
assert result.frequencies(False) == decimal_frequencies
if binary_frequencies is not None:
assert result.frequencies(True) == binary_frequencies
if decimal_samples is not None:
K.assert_allclose(result.samples(False), decimal_samples)
if binary_samples is not None:
K.assert_allclose(result.samples(True), binary_samples)
@pytest.mark.parametrize("n", [0, 1])
@pytest.mark.parametrize("nshots", [100, 1000000])
def test_measurement_gate(backend, n, nshots):
state = np.zeros(4)
state[-n] = 1
result = gates.M(0)(K.cast(state), nshots=nshots)
assert_result(result, n * np.ones(nshots), n * np.ones((nshots, 1)),
{n: nshots}, {str(n): nshots})
def test_multiple_qubit_measurement_gate(backend):
state = np.zeros(4)
state[2] = 1
result = gates.M(0, 1)(K.cast(state), nshots=100)
target_binary_samples = np.zeros((100, 2))
target_binary_samples[:, 0] = 1
assert_result(result, 2 * np.ones((100,)), target_binary_samples,
{2: 100}, {"10": 100})
def test_measurement_gate_errors(backend):
gate = gates.M(0)
# attempting to use `controlled_by`
with pytest.raises(NotImplementedError):
gate.controlled_by(1)
# attempting to construct unitary
with pytest.raises(ValueError):
matrix = gate.matrix
# calling on bad state
with pytest.raises(TypeError):
gate("test", 100)
def test_measurement_circuit(backend, accelerators):
c = models.Circuit(4, accelerators)
c.add(gates.X(0))
c.add(gates.M(0))
result = c(nshots=100)
assert_result(result,
| np.ones((100,)) | numpy.ones |
"""
analyze EEG data
Created by <NAME> on 13-06-2018.
Copyright (c) 2018 DvM. All rights reserved.
"""
import os
import mne
import pickle
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mne.filter import filter_data
from mne.time_frequency import tfr_array_morlet
from mne.baseline import rescale
from scipy.signal import hilbert
from numpy.fft import fft, ifft,rfft, irfft
from support.FolderStructure import *
from support.support import trial_exclusion
from signals.signal_processing import *
from IPython import embed
class TF(FolderStructure):
def __init__(self, beh, eeg, laplacian=True):
'''
Arguments
- - - - -
Returns
- - - -
'''
self.beh = beh
self.EEG = eeg
self.laplacian = laplacian
def selectTFData(self, laplacian, excl_factor):
'''
Arguments
- - - - -
Returns
- - - -
'''
# load processed behavior and eeg
beh = self.beh
EEG = self.EEG
# check whether trials need to be excluded
if type(excl_factor) == dict: # remove unwanted trials from beh
beh, EEG = trial_exclusion(beh, EEG, excl_factor)
# select electrodes of interest
picks = mne.pick_types(EEG.info, eeg=True, exclude='bads')
eegs = EEG._data[:,picks,:]
if laplacian:
pass
# TODO: Implement laplacian
# x,y,z = np.vstack([EEG.info['chs'][i]['loc'][:3] for i in picks]).T
# leg_order = 10 if picks.size <=100 else 12
# eegs = laplacian_filter(eegs, x, y, z, leg_order = leg_order, smoothing = 1e-5)
return eegs, beh
def RESS(self, sfreq, time_oi, peakwidth = .5, neighfreq = 1, neighwidt = 1, peak_freqs = [6, 7.5], elec_oi = ['Oz','O2']):
# set FFT parameters
sfreq = eeg.info['sfreq']
nfft = np.ceil(sfreq/.1 ) # .1 Hz resolution
t_idx_s, t_idx_e = [np.argmin(abs(eeg.times - t)) for t in time_oi]
hz = np.linspace(0,sfreq,nfft)
# extract eeg data (should be implemented in cnd loop)
data = eeg._data[cnd_mask,:,:]
#dataX = np.mean(abs(fft(data[:,t_idx_s:t_idx_e, nfft, axis = 2)/ (t_idx_e - t_idx_s)), axis = 0) # This needs to be checked!!!!!
def FGFilter(self, X, sfreq, f, fwhm):
"""[summary]
Arguments:
X {[type]} -- [description]
sfreq { [type]} -- [description]
f {[type]} -- [description]
fwhm {[type]} -- [description]
Returns:
[type] -- [description]
"""
# compute and apply filter
# frequencies
hz = np.linspace(0,sfreq,X.shape[1])
# create Gaussian (CHECK THIS)
s = fwhm*(2 * np.pi-1)/(4*np.pi) # normalized width
x = hz-f # shifted frequencies
fx = np.exp(-.5*(x/s)**2) # gaussian
fx = fx/np.max(fx) # gain-normalized
# filter data
filtX = 2 * np.real(ifft( fft(data, [],2)* fx, [],2))
#filtdat = 2*real( ifft( bsxfun(@times,fft(data,[],2),fx) ,[],2) );
# compute empirical frequency and standard deviation
#idx = dsearchn(hz',f);
#emp_vals[1] = hz[idx]
# find values closest to .5 after MINUS before the peak
#emp_vals[2] = hz(idx-1+dsearchn(fx(idx:end)',.5)) - hz(dsearchn(fx(1:idx)',.5))
return filtdat, emp_vals
@staticmethod
def nextpow2(i):
'''
Gives the exponent of the next higher power of 2
'''
n = 1
while 2**n < i:
n += 1
return n
@staticmethod
def topoFlip(eegs, var, ch_names, left = []):
'''
Flips the topography of trials where the stimuli of interest was presented
on the left (i.e. right hemifield). After running this function it is as if
all stimuli are presented right (i.e. the left hemifield)
Arguments
- - - - -
eegs(array): eeg data
var (array|list): location info per trial
ch_names (list): list of channel names
left (list): list containing stimulus labels indicating spatial position
Returns
- - - -
inst (instance of ERP): The modified instance
'''
# dictionary to flip topographic layout
flip_dict = {'Fp1':'Fp2','AF7':'AF8','AF3':'AF4','F7':'F8','F5':'F6','F3':'F4',\
'F1':'F2','FT7':'FT8','FC5':'FC6','FC3':'FC4','FC1':'FC2','T7':'T8',\
'C5':'C6','C3':'C4','C1':'C2','TP7':'TP8','CP5':'CP6','CP3':'CP4',\
'CP1':'CP2','P9':'P10','P7':'P8','P5':'P6','P3':'P4','P1':'P2',\
'PO7':'PO8','PO3':'PO4','O1':'O2'}
idx_l = np.sort(np.hstack([ | np.where(var == l) | numpy.where |
# coding:utf-8
import os
import argparse
import numpy as np
import json
import cv2
import torch
import copy
from typing import Any, Iterator, List, Union
from detectron2.structures import (
Boxes,
PolygonMasks,
BoxMode,
polygons_to_bitmask
)
from mask_encoding import DctMaskEncoding
class IOUMetric(object):
"""
Class to calculate mean-iou using fast_hist method
"""
def __init__(self, num_classes):
self.num_classes = num_classes
self.hist = np.zeros((num_classes, num_classes))
def _fast_hist(self, label_pred, label_true):
mask = (label_true >= 0) & (label_true < self.num_classes)
hist = np.bincount(
self.num_classes * label_true[mask].astype(int) +
label_pred[mask], minlength=self.num_classes ** 2).reshape(self.num_classes, self.num_classes)
return hist
def add_batch(self, predictions, gts):
for lp, lt in zip(predictions, gts):
self.hist += self._fast_hist(lp.flatten(), lt.flatten())
def evaluate(self):
acc = np.diag(self.hist).sum() / self.hist.sum()
acc_cls = np.diag(self.hist) / self.hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(self.hist) / (self.hist.sum(axis=1) + self.hist.sum(axis=0) - | np.diag(self.hist) | numpy.diag |
import math
import warnings
import numpy as np
import scipy.sparse as sp
__all__ = ['median', 'nanmedian', 'nansum', 'nanmean', 'nanvar', 'nanstd',
'nanmin', 'nanmax', 'nanargmin', 'nanargmax', 'rankdata',
'nanrankdata', 'ss', 'nn', 'partsort', 'argpartsort', 'replace',
'anynan', 'allnan',
'bincount', 'valuecount', 'countnans', 'stats',
'contingency', 'nanequal']
def median(arr, axis=None):
"Slow median function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
y = np.median(arr, axis=axis)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
return y
def nansum(arr, axis=None):
"Slow nansum function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
y = np.nansum(arr, axis=axis)
if not hasattr(y, "dtype"):
y = arr.dtype.type(y)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
return y
def nanmedian(arr, axis=None):
"Slow nanmedian function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
y = scipy_nanmedian(arr, axis=axis)
if not hasattr(y, "dtype"):
if issubclass(arr.dtype.type, np.inexact):
y = arr.dtype.type(y)
else:
y = np.float64(y)
if y.dtype != arr.dtype:
if issubclass(arr.dtype.type, np.inexact):
y = y.astype(arr.dtype)
if (y.size == 1) and (y.ndim == 0):
y = y[()]
return y
def nanmean(arr, axis=None):
"Slow nanmean function used for unaccelerated ndim/dtype combinations."
return np.nanmean(arr, axis=axis)
def nanvar(arr, axis=None, ddof=0):
"Slow nanvar function used for unaccelerated ndim/dtype combinations."
return np.nanvar(arr, axis=axis, ddof=ddof)
def nanstd(arr, axis=None, ddof=0):
"Slow nanstd function used for unaccelerated ndim/dtype combinations."
return np.nanstd(arr, axis=axis, ddof=ddof)
def nanmin(arr, axis=None):
"Slow nanmin function used for unaccelerated ndim/dtype combinations."
y = np.nanmin(arr, axis=axis)
if not hasattr(y, "dtype"):
# Numpy 1.5.1 doesn't return object with dtype when input is all NaN
y = arr.dtype.type(y)
return y
def nanmax(arr, axis=None):
"Slow nanmax function used for unaccelerated ndim/dtype combinations."
y = np.nanmax(arr, axis=axis)
if not hasattr(y, "dtype"):
# Numpy 1.5.1 doesn't return object with dtype when input is all NaN
y = arr.dtype.type(y)
return y
def nanargmin(arr, axis=None):
"Slow nanargmin function used for unaccelerated ndim/dtype combinations."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.nanargmin(arr, axis=axis)
def nanargmax(arr, axis=None):
"Slow nanargmax function used for unaccelerated ndim/dtype combinations."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.nanargmax(arr, axis=axis)
def rankdata(arr, axis=None):
"Slow rankdata function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
if axis is None:
arr = arr.ravel()
axis = 0
elif axis < 0:
axis = range(arr.ndim)[axis]
y = np.empty(arr.shape)
itshape = list(arr.shape)
itshape.pop(axis)
for ij in np.ndindex(*itshape):
ijslice = list(ij[:axis]) + [slice(None)] + list(ij[axis:])
y[ijslice] = scipy_rankdata(arr[ijslice].astype('float'))
return y
def nanrankdata(arr, axis=None):
"Slow nanrankdata function used for unaccelerated ndim/dtype combinations."
arr = np.asarray(arr)
if axis is None:
arr = arr.ravel()
axis = 0
elif axis < 0:
axis = range(arr.ndim)[axis]
y = np.empty(arr.shape)
y.fill(np.nan)
itshape = list(arr.shape)
itshape.pop(axis)
for ij in np.ndindex(*itshape):
ijslice = list(ij[:axis]) + [slice(None)] + list(ij[axis:])
x1d = arr[ijslice].astype(float)
mask1d = ~np.isnan(x1d)
x1d[mask1d] = scipy_rankdata(x1d[mask1d])
y[ijslice] = x1d
return y
def ss(arr, axis=0):
"Slow sum of squares used for unaccelerated ndim/dtype combinations."
return scipy_ss(arr, axis)
def nn(arr, arr0, axis=1):
"Slow nearest neighbor used for unaccelerated ndim/dtype combinations."
arr = np.array(arr, copy=False)
arr0 = np.array(arr0, copy=False)
if arr.ndim != 2:
raise ValueError("`arr` must be 2d")
if arr0.ndim != 1:
raise ValueError("`arr0` must be 1d")
if axis == 1:
d = (arr - arr0) ** 2
elif axis == 0:
d = (arr - arr0.reshape(-1,1)) ** 2
else:
raise ValueError("`axis` must be 0 or 1.")
d = d.sum(axis)
idx = np.argmin(d)
return np.sqrt(d[idx]), idx
def partsort(arr, n, axis=-1):
"Slow partial sort used for unaccelerated ndim/dtype combinations."
return np.sort(arr, axis)
def argpartsort(arr, n, axis=-1):
"Slow partial argsort used for unaccelerated ndim/dtype combinations."
return np.argsort(arr, axis)
def replace(arr, old, new):
"Slow replace (inplace) used for unaccelerated ndim/dtype combinations."
if type(arr) is not np.ndarray:
raise TypeError("`arr` must be a numpy array.")
if not issubclass(arr.dtype.type, np.inexact):
if old != old:
# int arrays do not contain NaN
return
if int(old) != old:
raise ValueError("Cannot safely cast `old` to int.")
if int(new) != new:
raise ValueError("Cannot safely cast `new` to int.")
if old != old:
mask = | np.isnan(arr) | numpy.isnan |
"""
larsen.py
Created by <NAME>, Jul. 2016.
Brigham Young University
"""
from openmdao.api import IndepVarComp, Component, Group
import numpy as np
from fusedwake.gcl.python.gcl import GCLarsen #use w/GCL.f
def add_larsen_params_IndepVarComps(openmdao_object, nTurbines, datasize):
# add variable tree and indep-var stuff for Larsen
openmdao_object.add('lp0', IndepVarComp('model_params:Ia', val=0.0, pass_by_object=True,
desc='ambient turbulence intensity'), promotes=['*'])
openmdao_object.add('lp1', IndepVarComp('model_params:air_density', val=0.0, units='kg/m*m*m',
pass_by_object=True), promotes=['*'])
openmdao_object.add('lp2', IndepVarComp('model_params:windSpeedToCPCT_wind_speed', np.zeros(datasize), units='m/s',
desc='range of wind speeds', pass_by_obj=True), promotes=['*'])
openmdao_object.add('lp3', IndepVarComp('model_params:windSpeedToCPCT_CP', np.zeros(datasize),
desc='power coefficients', pass_by_obj=True), promotes=['*'])
openmdao_object.add('lp4', IndepVarComp('model_params:windSpeedToCPCT_CT', np.zeros(datasize),
desc='thrust coefficients', pass_by_obj=True), promotes=['*'])
# TODO make hubHeight a standard connection
openmdao_object.add('lp5', IndepVarComp('hubHeight', np.zeros(nTurbines)), promotes=['*'])
class GC_Larsen(Component):
"""
This component was written by <NAME>
"""
def __init__(self, nTurbines, direction_id=0, model_options=None):
super(GC_Larsen, self).__init__()
self.add_param('wind_speed', val=8.0, units='m/s')
self.add_param('wind_direction', val=0.0)
self.direction_id = direction_id
self.datasize = model_options['datasize']
self.wf_instance = model_options['wf_instance']
# coordinates
self.add_param('turbineXw', val=np.zeros(nTurbines), units='m')
self.add_param('turbineYw', val=np.zeros(nTurbines), units='m')
self.add_param('turbineZ', val=np.zeros(nTurbines), units='m')
self.add_param('rotorDiameter', val=np.zeros(nTurbines), units='m')
self.add_param('hubHeight', np.zeros(nTurbines), units='m')
self.add_param('model_params:Ia', val=0.0) # Ambient Turbulence Intensity
self.add_param('model_params:air_density', val=0.0, units='kg/m*m*m')
self.add_param('Ct_length', val = 0.0)
self.add_param('model_params:windSpeedToCPCT_wind_speed', np.zeros(self.datasize), units='m/s',
desc='range of wind speeds', pass_by_obj=True)
self.add_param('model_params:windSpeedToCPCT_CP', np.zeros(self.datasize),
desc='power coefficients', pass_by_obj=True)
self.add_param('model_params:windSpeedToCPCT_CT', | np.zeros(self.datasize) | numpy.zeros |
import numpy as np
import cv2
from collections import deque
import pickle
import os
class ImageProcessor:
"""
Class used to process an image for the LaneDetector. Applies both color and gradient thresholding and produces a set of
images (undistored, thresholded and warped) that can be used for debugging.
"""
def __init__(self, calibration_data_file):
# Camera calibration data
calibration_data = self._load_calibration_data(file_path = calibration_data_file)
self.mtx = calibration_data['mtx']
self.dist = calibration_data['dist']
# Gradient and color thresholding parameters
self.sobel_kernel = 5
self.grad_x_thresh = (15, 255) # Sobel x threshold
self.grad_y_thresh = (25, 255) # Sobel y threshold
self.grad_mag_thresh = (40, 255) # Sobel mag threshold
self.grad_dir_thresh = (0.7, 1.3) # Sobel direction range
self.grad_v_thresh = (180, 255) # HSV, V channel threshold to filter gradient
self.r_thresh = (195, 255) # RGB, Red channel threshold
self.s_thresh = (100, 255) # HSL, S channel threshold
self.l_thresh = (195, 255) # HSL, L channel threshold
self.b_thresh = (150, 255) # LAB, B channel threshold
self.v_thresh = (140, 255) # HSV, V channel threshold
# Perspective transformation parameters
# slope = (y2 - y1) / (x2 - x1)
# intercept = y1 - slope * x1
# top left, top right = (570, 470), (722, 470)
# bottom left, bottom right = (220, 720), (1110, 720)
self.persp_src_left_line = (-0.7142857143, 877.142857146) # Slope and intercept for left line
self.persp_src_right_line = (0.6443298969, 4.793814441) # Slope and intercept for right line
self.persp_src_top_pct = 0.645 # Percentage from the top
self.persp_src_bottom_pct = 0.02 # Percentage from bottom
self.persp_dst_x_pct = 0.22 # Destination offset percent
self.persp_src = None
self.persp_dst = None
def _load_calibration_data(self, file_path = os.path.join('camera_cal', 'calibration.p')):
with open(file_path, 'rb') as f:
return pickle.load(f)
def _warp_coordinates(self, img):
if self.persp_src is None or self.persp_dst is None:
cols = img.shape[1]
rows = img.shape[0]
src_top_offset = rows * self.persp_src_top_pct
src_bottom_offset = rows * self.persp_src_bottom_pct
left_slope, left_intercept = self.persp_src_left_line
right_slope, right_intercept = self.persp_src_right_line
top_left = [(src_top_offset - left_intercept) / left_slope, src_top_offset]
top_right = [(src_top_offset - right_intercept) / right_slope, src_top_offset]
bottom_left = [(rows - src_bottom_offset - left_intercept) / left_slope, rows - src_bottom_offset]
bottom_right = [(rows - src_bottom_offset - right_intercept) / right_slope, rows - src_bottom_offset]
#Top left, Top right, Bottom right, Bottom left
src = np.float32([top_left, top_right, bottom_right, bottom_left])
dst_x_offset = cols * self.persp_dst_x_pct
top_left = [dst_x_offset, 0]
top_right = [cols - dst_x_offset, 0]
bottom_left = [dst_x_offset, rows]
bottom_right = [cols - dst_x_offset, rows]
dst = | np.float32([top_left, top_right, bottom_right, bottom_left]) | numpy.float32 |
from bin.process import load_names
from time import time
import numpy as np
import sys
from scipy.io import mmwrite
from scipy.sparse import vstack
from scanorama.scanorama import *
if __name__ == '__main__':
from bin.config import data_names, names, namespace, path, output, metadata, write, tsne, uncorrected, dimred
datasets, genes_list, cells_list, n_cells = load_names(data_names, norm=False)
t0 = time()
datasets_moved, datasets_dimred, datasets_norm, datasets, genes = correct(
datasets, genes_list, ds_names=names,
sigma=150, return_matrices=True, dimred=dimred
)
if VERBOSE:
print('Integrated and batch corrected panoramas in {:.3f}s'
.format(time() - t0))
if write or tsne:
cells = []
for c, name in zip(cells_list, names):
for cell in c:
cells.append('%s:%s' % (cell, name))
if write:
mmwrite(output + '%s_counts.mtx' % namespace, vstack(datasets), field='integer')
mmwrite(output + '%s_lognorm.mtx' % namespace, vstack(datasets_norm))
mmwrite(output + '%s_dimred.mtx' % namespace, vstack(datasets_dimred))
mmwrite(output + '%s_moved.mtx' % namespace, vstack(datasets_moved))
with open(output + '%s_genes_list.txt' % namespace, 'w') as o:
o.write('\n'.join(genes))
with open(output + '%s_cells_list.txt' % namespace, 'w') as o:
o.write('\n'.join(cells))
if tsne:
calculate_tsne(vstack(datasets_moved), cells, namespace, output)
# metadata_into_file(embedding, labels, names, output, cells_list, namespace, metadata)
if uncorrected:
# Uncorrected.
datasets, genes_list, cells_list, n_cells = load_names(data_names)
datasets, genes = merge_datasets(datasets, genes_list)
datasets_dimred = dimensionality_reduce(datasets)
labels = []
names = []
curr_label = 0
for i, a in enumerate(datasets):
labels += list( | np.zeros(a.shape[0]) | numpy.zeros |
"""
voxel.py
-----------
Convert meshes to a simple voxel data structure and back again.
"""
import numpy as np
from . import util
from . import remesh
from . import caching
from . import grouping
from .constants import log, _log_time
class Voxel(object):
def __init__(self, *args, **kwargs):
self._data = caching.DataStore()
self._cache = caching.Cache(id_function=self._data.crc)
@caching.cache_decorator
def marching_cubes(self):
"""
A marching cubes Trimesh representation of the voxels.
No effort was made to clean or smooth the result in any way;
it is merely the result of applying the scikit-image
measure.marching_cubes function to self.matrix.
Returns
---------
meshed: Trimesh object representing the current voxel
object, as returned by marching cubes algorithm.
"""
meshed = matrix_to_marching_cubes(matrix=self.matrix,
pitch=self.pitch,
origin=self.origin)
return meshed
@property
def pitch(self):
# stored as TrackedArray with a single element
return self._data['pitch'][0]
@pitch.setter
def pitch(self, value):
self._data['pitch'] = value
@property
def shape(self):
"""
The shape of the matrix for the current voxel object.
Returns
---------
shape: (3,) int, what is the shape of the 3D matrix
for these voxels
"""
return self.matrix.shape
@caching.cache_decorator
def filled_count(self):
"""
Return the number of voxels that are occupied.
Returns
--------
filled: int, number of voxels that are occupied
"""
return int(self.matrix.sum())
@caching.cache_decorator
def volume(self):
"""
What is the volume of the filled cells in the current voxel object.
Returns
---------
volume: float, volume of filled cells
"""
volume = self.filled_count * (self.pitch**3)
return volume
@caching.cache_decorator
def points(self):
"""
The center of each filled cell as a list of points.
Returns
----------
points: (self.filled, 3) float, list of points
"""
points = matrix_to_points(matrix=self.matrix,
pitch=self.pitch,
origin=self.origin)
return points
def point_to_index(self, point):
"""
Convert a point to an index in the matrix array.
Parameters
----------
point: (3,) float, point in space
Returns
---------
index: (3,) int tuple, index in self.matrix
"""
point = np.asanyarray(point)
if point.shape != (3,):
raise ValueError('to_index requires a single point')
index = np.round((point - self.origin) /
self.pitch).astype(int)
index = tuple(index)
return index
def is_filled(self, point):
"""
Query a point to see if the voxel cell it lies in is filled or not.
Parameters
----------
point: (3,) float, point in space
Returns
---------
is_filled: bool, is cell occupied or not
"""
index = self.point_to_index(point)
in_range = (np.array(index) < np.array(self.shape)).all()
if in_range:
is_filled = self.matrix[index]
else:
is_filled = False
return is_filled
class VoxelMesh(Voxel):
def __init__(self,
mesh,
pitch,
max_iter=10,
size_max=None,
method='subdivide'):
"""
A voxel representation of a mesh that will track changes to
the mesh.
At the moment the voxels are not filled in and only represent
the surface.
Parameters
----------
mesh: Trimesh object
pitch: float, how long should each edge of the voxel be
size_max: float, maximum size (in mb) of a data structure that
may be created before raising an exception
"""
super(VoxelMesh, self).__init__()
self._method = method
self._data['mesh'] = mesh
self._data['pitch'] = pitch
self._data['max_iter'] = max_iter
@caching.cache_decorator
def matrix_surface(self):
"""
The voxels on the surface of the mesh as a 3D matrix.
Returns
---------
matrix: self.shape np.bool, if a cell is True it is occupied
"""
matrix = sparse_to_matrix(self.sparse_surface)
return matrix
@caching.cache_decorator
def matrix_solid(self):
"""
The voxels in a mesh as a 3D matrix.
Returns
---------
matrix: self.shape np.bool, if a cell is True it is occupied
"""
matrix = sparse_to_matrix(self.sparse_solid)
return matrix
@property
def matrix(self):
"""
A matrix representation of the surface voxels.
In the future this is planned to return a filled voxel matrix
if the source mesh is watertight, and a surface voxelization
otherwise.
Returns
---------
matrix: self.shape np.bool, cell occupancy
"""
if self._data['mesh'].is_watertight:
return self.matrix_solid
return self.matrix_surface
@property
def origin(self):
"""
The origin of the voxel array.
Returns
------------
origin: (3,) float, point in space
"""
populate = self.sparse_surface
return self._cache['origin']
@caching.cache_decorator
def sparse_surface(self):
"""
Filled cells on the surface of the mesh.
Returns
----------------
voxels: (n, 3) int, filled cells on mesh surface
"""
if self._method == 'ray':
func = voxelize_ray
elif self._method == 'subdivide':
func = voxelize_subdivide
else:
raise ValueError('voxelization method incorrect')
voxels, origin = func(
mesh=self._data['mesh'],
pitch=self._data['pitch'],
max_iter=self._data['max_iter'][0])
self._cache['origin'] = origin
return voxels
@caching.cache_decorator
def sparse_solid(self):
"""
Filled cells inside and on the surface of mesh
Returns
----------------
filled: (n, 3) int, filled cells in or on mesh.
"""
filled = fill_voxelization(self.sparse_surface)
return filled
def as_boxes(self, solid=False):
"""
A rough Trimesh representation of the voxels with a box
for each filled voxel.
Parameters
-----------
solid: bool, if True return boxes for sparse_solid
Returns
---------
mesh: Trimesh object made up of one box per filled cell.
"""
if solid:
filled = self.sparse_solid
else:
filled = self.sparse_surface
# center points of voxels
centers = (filled * self.pitch).astype(np.float64)
centers += self.origin - (self.pitch / 2.0)
mesh = multibox(centers=centers, pitch=self.pitch)
return mesh
def show(self, solid=False):
"""
Convert the current set of voxels into a trimesh for visualization
and show that via its built- in preview method.
"""
self.as_boxes(solid=solid).show()
@_log_time
def voxelize_subdivide(mesh,
pitch,
max_iter=10,
edge_factor=2.0):
"""
Voxelize a surface by subdividing a mesh until every edge is
shorter than: (pitch / edge_factor)
Parameters
-----------
mesh: Trimesh object
pitch: float, side length of a single voxel cube
max_iter: int, cap maximum subdivisions or None for no limit.
edge_factor: float,
Returns
-----------
voxels_sparse: (n,3) int, (m,n,p) indexes of filled cells
origin_position: (3,) float, position of the voxel
grid origin in space
"""
max_edge = pitch / edge_factor
if max_iter is None:
longest_edge = np.linalg.norm(mesh.vertices[mesh.edges[:, 0]] -
mesh.vertices[mesh.edges[:, 1]],
axis=1).max()
max_iter = max(int(np.ceil(np.log2(longest_edge / max_edge))), 0)
# get the same mesh sudivided so every edge is shorter
# than a factor of our pitch
v, f = remesh.subdivide_to_size(mesh.vertices,
mesh.faces,
max_edge=max_edge,
max_iter=max_iter)
# convert the vertices to their voxel grid position
hit = v / pitch
# Provided edge_factor > 1 and max_iter is large enough, this is
# sufficient to preserve 6-connectivity at the level of voxels.
hit = np.round(hit).astype(int)
# remove duplicates
unique, inverse = grouping.unique_rows(hit)
# get the voxel centers in model space
occupied_index = hit[unique]
origin_index = occupied_index.min(axis=0)
origin_position = origin_index * pitch
voxels_sparse = (occupied_index - origin_index)
return voxels_sparse, origin_position
def local_voxelize(mesh, point, pitch, radius, fill=True, **kwargs):
"""
Voxelize a mesh in the region of a cube around a point. When fill=True,
uses proximity.contains to fill the resulting voxels so may be meaningless
for non-watertight meshes. Useful to reduce memory cost for small values of
pitch as opposed to global voxelization.
Parameters
-----------
mesh : trimesh.Trimesh
Source geometry
point : (3, ) float
Point in space to voxelize around
pitch : float
Side length of a single voxel cube
radius : int
Number of voxel cubes to return in each direction.
kwargs : parameters to pass to voxelize_subdivide
Returns
-----------
voxels : (m, m, m) bool
Array of local voxels where m=2*radius+1
origin_position : (3,) float
Position of the voxel grid origin in space
"""
from scipy import ndimage
# make sure point is correct type/shape
point = np.asanyarray(point, dtype=np.float64).reshape(3)
# this is a gotcha- radius sounds a lot like it should be in
# float model space, not int voxel space so check
if not isinstance(radius, int):
raise ValueError('radius needs to be an integer number of cubes!')
# Bounds of region
bounds = np.concatenate((point - (radius + 0.5) * pitch,
point + (radius + 0.5) * pitch))
# faces that intersect axis aligned bounding box
faces = list(mesh.triangles_tree.intersection(bounds))
# didn't hit anything so exit
if len(faces) == 0:
return np.array([], dtype=np.bool), np.zeros(3)
local = mesh.submesh([[f] for f in faces], append=True)
# Translate mesh so point is at 0,0,0
local.apply_translation(-point)
sparse, origin = voxelize_subdivide(local, pitch, **kwargs)
matrix = sparse_to_matrix(sparse)
# Find voxel index for point
center = np.round(-origin / pitch).astype(np.int64)
# pad matrix if necessary
prepad = np.maximum(radius - center, 0)
postpad = np.maximum(center + radius + 1 - matrix.shape, 0)
matrix = np.pad(matrix, np.stack((prepad, postpad), axis=-1),
mode='constant')
center += prepad
# Extract voxels within the bounding box
voxels = matrix[center[0] - radius:center[0] + radius + 1,
center[1] - radius:center[1] + radius + 1,
center[2] - radius:center[2] + radius + 1]
local_origin = point - radius * pitch # origin of local voxels
# Fill internal regions
if fill:
regions, n = ndimage.measurements.label(~voxels)
distance = ndimage.morphology.distance_transform_cdt(~voxels)
representatives = [np.unravel_index((distance * (regions == i)).argmax(),
distance.shape) for i in range(1, n + 1)]
contains = mesh.contains(
np.asarray(representatives) *
pitch +
local_origin)
where = np.where(contains)[0] + 1
# use in1d vs isin for older numpy versions
internal = np.in1d(regions.flatten(), where).reshape(regions.shape)
voxels = np.logical_or(voxels, internal)
return voxels, local_origin
@_log_time
def voxelize_ray(mesh,
pitch,
per_cell=[2, 2],
**kwargs):
"""
Voxelize a mesh using ray queries.
Parameters
-------------
mesh : Trimesh object
Mesh to be voxelized
pitch : float
Length of voxel cube
per_cell : (2,) int
How many ray queries to make per cell
Returns
-------------
voxels : (n, 3) int
Voxel positions
origin : (3, ) int
Origin of voxels
"""
# how many rays per cell
per_cell = np.array(per_cell).astype(np.int).reshape(2)
# edge length of cube voxels
pitch = float(pitch)
# create the ray origins in a grid
bounds = mesh.bounds[:, :2].copy()
# offset start so we get the requested number per cell
bounds[0] += pitch / (1.0 + per_cell)
# offset end so arange doesn't short us
bounds[1] += pitch
# on X we are doing multiple rays per voxel step
step = pitch / per_cell
# 2D grid
ray_ori = util.grid_arange(bounds, step=step)
# a Z position below the mesh
z = np.ones(len(ray_ori)) * (mesh.bounds[0][2] - pitch)
ray_ori = np.column_stack((ray_ori, z))
# all rays are along positive Z
ray_dir = np.ones_like(ray_ori) * [0, 0, 1]
# if you have pyembree this should be decently fast
hits = mesh.ray.intersects_location(ray_ori, ray_dir)[0]
# just convert hit locations to integer positions
voxels = np.round(hits / pitch).astype(np.int64)
# offset voxels by min, so matrix isn't huge
origin = voxels.min(axis=0)
voxels -= origin
return voxels, origin
def fill_voxelization(occupied):
"""
Given a sparse surface voxelization, fill in between columns.
Parameters
--------------
occupied: (n, 3) int, location of filled cells
Returns
--------------
filled: (m, 3) int, location of filled cells
"""
# validate inputs
occupied = | np.asanyarray(occupied, dtype=np.int64) | numpy.asanyarray |
'''
Author: Dr. <NAME>
Required packages: scikit-learn, scipy, numpy
This library contains a class for individual pointclouds ( ProcessPC() ), and classes for lists of pointclouds
( iterator_points_pointlabels() , iterator_points() , iterator_binaryVoxels() , iterator_binaryVoxels_pointlabels() ).
ProcessPC() is useful for various applications where you want a single pointcloud to be an object, and you wish you
to mutate that object in different ways (e.g. voxelise, rasterise, normalise, rotate, etc.)
The iterator classes are designed for machine learning applications. They are useful as iterators when you want to
use several pointclouds to train (or predict with) a machine learning model (e.g. return the next batch of
pointclouds to train on).
'''
import numpy as np
import copy
import random
from scipy import signal #only for bev vertical density
from scipy import ndimage #only for bev max height
from sklearn.neighbors import KDTree # only for ground normalisation
# Insert another class which, given a list of pointclouds, splits data into training and val upon init.
# Contains get batch, when called it, pulls random batch of pointclouds as list, loops through list using ProcessPC
def gaussian_kernel(size,mu=0.0,sigma=1.0):
x, y = np.meshgrid(np.linspace(-1, 1, size), np.linspace(-1, 1, size))
d = np.sqrt(x * x + y * y)
g = np.exp(-((d - mu) ** 2 / (2.0 * sigma ** 2)))
return g
class iterator_points_pointlabels():
def __init__(self, xyz_list=None,labels=None,returns=None, numClasses=None, batchsize=None ):
self.data_num = len(xyz_list)
if numClasses is None:
self.nb_class = np.size(np.unique(labels[0])) # only counts number of classes in first pcd
else:
self.nb_class = numClasses
self.pc_list = []
for i in range(len(xyz_list)):
if (labels is None) & (returns is None):
self.pc_list.append(ProcessPC(xyz_data=xyz_list[i]))
elif (labels is None) & (returns is not None):
self.pc_list.append(ProcessPC(xyz_data=xyz_list[i], pc_returns=returns[i]))
elif (labels is not None) & (returns is None):
self.pc_list.append(ProcessPC(xyz_data=xyz_list[i], pc_labels=labels[i]))
else:
self.pc_list.append(ProcessPC(xyz_data=xyz_list[i],pc_labels=labels[i],pc_returns=returns[i]))
if labels is None:
self.flag_label = 0
else:
self.flag_label = 1
if returns is None:
self.flag_return = 0
else:
self.flag_return = 1
if batchsize is not None:
self.batchsize = batchsize
else:
self.batchsize = self.data_num
self.current_batch = np.arange(self.batchsize)
def next_batch( self, augment=False, pre_process=False, addAxis=False ):
# augments the current batch once
# use with network_pointnet_bs1 (only works for batchsize of 1)
pc_temp = copy.deepcopy(self.pc_list[self.current_batch[0]])
if augment==True:
pc_temp.augmentation_Rotation()
pc_temp.augmentation_Flipping()
pc_batch = pc_temp.pc.copy()
if addAxis: # if using bsK, want output to be K x 1 x N x 3, where k=1
pc_batch = pc_batch[np.newaxis,...]
pc_batch = pc_batch[np.newaxis, ...]
if pre_process:
pc_batch[0, ...] -= np.min(pc_batch[i, ...], axis=0)
pc_batch[0, ...] /= np.max(pc_batch[i, ...])
if self.flag_label == 1:
labels = np.array(pc_temp.pc_labels)
labels_onehot = np.zeros((len(labels), self.nb_class))
labels_onehot[np.arange(len(labels)), labels] = 1
if self.flag_return == 1:
pc_returns_batch = np.array(pc_temp.pc_returns)
# update current batch
self.current_batch += self.batchsize
self.current_batch[self.current_batch >= self.data_num] = \
self.current_batch[self.current_batch >= self.data_num] - self.data_num
if (self.flag_label == 1) & (self.flag_return == 0):
return pc_batch, labels_onehot, labels
elif (self.flag_label == 0) & (self.flag_return == 1):
return pc_batch, pc_returns_batch
elif (self.flag_label == 1) & (self.flag_return == 1):
return pc_batch, labels_onehot, labels, pc_returns_batch
else:
return pc_batch
def next_batch2( self, augment=False, numAugs=0, pre_process=False, angle_x_randLim=0, angle_y_randLim=0, normalise=False, newAxis_loc=1 ):
# appends numAugs different augmentations to the current batch
n_points = self.pc_list[self.current_batch[0]].pc.shape[0]
pc_batch = np.empty( ( [self.batchsize*(numAugs+1),n_points,3] ) )
labels = np.empty( ( [self.batchsize*(numAugs+1),n_points] ) )
returns_batch = np.empty( ( [self.batchsize*(numAugs+1),n_points] ) )
for j in range(numAugs+1):
for i in range(self.batchsize):
pc_temp = copy.deepcopy(self.pc_list[self.current_batch[i]])
if (augment==True)&(j>0): # leave one set un-augmented
pc_temp.augmentation_Rotation(angle_x_randLim=angle_x_randLim, angle_y_randLim=angle_y_randLim)
pc_temp.augmentation_Flipping()
if normalise:
pc_temp.normalisation()
pc_batch[(j*self.batchsize)+i,...] = pc_temp.pc.copy()
labels[(j*self.batchsize)+i,...] = pc_temp.pc_labels.copy()
if self.flag_return == 1:
returns_batch[(j * self.batchsize) + i, ...] = pc_temp.pc_returns.copy()
# pre-process
if pre_process:
pc_batch[0, ...] -= np.min(pc_batch[i, ...], axis=0)
pc_batch[0, ...] /= np.max(pc_batch[i, ...])
if newAxis_loc == 1:
pc_batch = pc_batch[:, np.newaxis, ...]
elif newAxis_loc == 0:
pc_batch = pc_batch[np.newaxis, ...]
#labels = np.array(pc_temp.pc_labels)
#labels = np.tile(labels[:,np.newaxis],(1,numAugs+1))
labels_onehot = np.zeros((self.batchsize*(numAugs+1) , n_points , self.nb_class))
#labels_onehot[:,np.arange(n_points), labels.astype(np.int).T] = 1
xv, yv = np.meshgrid(np.arange(0, (self.batchsize*(numAugs+1))), np.arange(0, n_points))
labels_onehot[np.ravel(xv), np.ravel(yv), np.ravel( labels.astype(np.int).T )] = 1
#labels = np.tile(labels[np.newaxis,:],[numAugs+1,1])
# update current batch
self.current_batch += self.batchsize
self.current_batch[self.current_batch >= self.data_num] = \
self.current_batch[self.current_batch >= self.data_num] - self.data_num
if self.flag_return == 1:
return pc_batch, labels_onehot,labels, returns_batch
else:
return pc_batch, labels_onehot,labels
def get_pc(self, idx=[1], augment=False, angle_x=0, angle_y=0, angle_z=30, pre_process=False, addAxis=False, normalise=False, newAxis_loc=1 ):
# default not to augment, but if so you can specify the rotations. Default rotation only about z
n_points = self.pc_list[idx[0]].pc.shape[0]
pc_batch = np.empty(([len(idx), n_points, 3]))
if self.flag_label==1:
labels = np.empty(([len(idx), n_points]))
if self.flag_return == 1:
returns_batch = np.empty(([len(idx), n_points]))
for i in range(len(idx)):
pc_temp = copy.deepcopy(self.pc_list[idx[i]])
if augment==True:
pc_temp.augmentation_Rotation(angle_x=angle_x, angle_y=angle_y, angle_z=angle_z)
#pc_temp.augmentation_Flipping()
if normalise:
pc_temp.normalisation()
pc_batch[i,...] = pc_temp.pc.copy()
if self.flag_label == 1:
labels[i,:] = np.array(pc_temp.pc_labels)
if self.flag_return == 1:
returns_batch[i,:] = np.array(pc_temp.pc_returns)
if addAxis:
if newAxis_loc == 1:
pc_batch = pc_batch[:, np.newaxis, ...]
elif newAxis_loc == 0:
pc_batch = pc_batch[np.newaxis, ...]
if self.flag_label == 1:
labels_onehot = np.zeros((len(idx), n_points, self.nb_class))
xv, yv = np.meshgrid(np.arange(0, (len(idx))), np.arange(0, n_points))
labels_onehot[np.ravel(xv), np.ravel(yv), np.ravel(labels.astype(np.int).T)] = 1
if (self.flag_label == 1) & (self.flag_return == 0):
return pc_batch, labels_onehot, labels
elif (self.flag_label == 0) & (self.flag_return == 1):
return pc_batch, returns_batch
elif (self.flag_label == 1) & (self.flag_return == 1):
return pc_batch, labels_onehot, labels, returns_batch
else:
return pc_batch
def indexed_batch( self, idx=[1], augment=False, numAugs=0, pre_process=False, angle_x_randLim=0, angle_y_randLim=0, normalise=False, newAxis_loc=1, adapt_num_classes=False ):
# appends numAugs different augmentations to the current batch
n_points = self.pc_list[idx[0]].pc.shape[0]
pc_batch = np.empty( ( [len(idx)*(numAugs+1),n_points,3] ) )
labels = np.empty( ( [len(idx)*(numAugs+1),n_points] ) )
returns_batch = np.empty(([len(idx)*(numAugs+1),n_points]))
for j in range(numAugs+1):
for i in range(len(idx)):
pc_temp = copy.deepcopy(self.pc_list[idx[i]])
if (augment==True)&(j>0): # leave one set un-augmented
pc_temp.augmentation_Rotation(angle_x_randLim=angle_x_randLim, angle_y_randLim=angle_y_randLim)
pc_temp.augmentation_Flipping()
#pc_temp.augmentation_Shuffle()
if normalise:
pc_temp.normalisation()
pc_batch[(j*len(idx))+i,...] = pc_temp.pc.copy()
labels[(j*len(idx))+i,...] = pc_temp.pc_labels.copy()
if self.flag_return == 1:
returns_batch[(j*len(idx))+i,...] = pc_temp.pc_returns.copy()
# pre-process
if pre_process:
pc_batch[0, ...] -= np.min(pc_batch[i, ...], axis=0)
pc_batch[0, ...] /= np.max(pc_batch[i, ...])
if newAxis_loc == 1:
pc_batch = pc_batch[:,np.newaxis, ...]
elif newAxis_loc == 0:
pc_batch = pc_batch[np.newaxis, ...]
#labels = np.array(pc_temp.pc_labels)
#labels = np.tile(labels[:,np.newaxis],(1,numAugs+1))
if adapt_num_classes: # allows number of classes (and hence size of onehot) to be modified each batch
self.nb_class = len(np.unique(labels))
labels_onehot = np.zeros((len(idx)*(numAugs+1) , n_points , self.nb_class))
#labels_onehot[:,np.arange(n_points), labels.astype(np.int).T] = 1
xv, yv = np.meshgrid(np.arange(0, (len(idx)*(numAugs+1))), np.arange(0, n_points))
labels_onehot[np.ravel(xv), np.ravel(yv), np.ravel( labels.astype(np.int).T )] = 1
#labels = np.tile(labels[np.newaxis,:],[numAugs+1,1])
if self.flag_return == 1:
return pc_batch, labels_onehot,labels, returns_batch
else:
return pc_batch, labels_onehot,labels
def reset_batch(self):
""" Resets the current batch to the beginning.
"""
self.current_batch = np.arange(self.batchsize)
def shuffle(self):
""" Randomly permutes all dataSamples (and corresponding targets).
"""
random.shuffle(self.pc_list)
class iterator_points():
def __init__(self, xyz_list=None,labels=None, batchsize=None ):
self.data_num = len(xyz_list)
self.nb_class = np.max(labels)+1
self.first_batch = 1
self.current_batch = []
self.labels = labels
self.pc_list = []
for i in range(len(xyz_list)):
self.pc_list.append(ProcessPC(xyz_list[i]))
if batchsize is not None:
self.batchsize = batchsize
else:
self.batchsize = self.data_num
self.current_batch = np.arange(self.batchsize)
def next_batch( self, augment=False, pre_process=False ):
# augments the current batch once
if self.first_batch:
self.current_batch = (np.arange(self.batchsize)).tolist()
self.first_batch = 0
else:
self.current_batch = (np.array(self.current_batch) + self.batchsize).tolist()
if self.current_batch[-1] > (self.data_num - self.batchsize):
self.first_batch = 1
pc_temp = copy.deepcopy(self.pc_list[self.current_batch[0]])
if augment==True:
pc_temp.augmentation_Rotation()
pc_temp.augmentation_Flipping()
pc_batch = pc_temp.pc.copy()
pc_batch = pc_batch[np.newaxis,...]
# pre-process - scale between [-1,1]
#if pre_process:
#og_batch = 2*(og_batch-0.5)
labels = np.array(self.labels)
labels = labels[self.current_batch]
labels_onehot = np.zeros((len(labels), self.nb_class))
labels_onehot[np.arange(len(labels)), labels] = 1
# update current batch
self.current_batch += self.batchsize
self.current_batch[self.current_batch >= self.data_num] = \
self.current_batch[self.current_batch >= self.data_num] - self.data_num
return pc_batch, labels_onehot, labels
def next_batch2( self, augment=False, numAugs=0, pre_process=False ):
# appends numAugs different augmentations to the current batch
if self.first_batch:
self.current_batch = (np.arange(self.batchsize)).tolist()
self.first_batch = 0
else:
self.current_batch = (np.array(self.current_batch) + self.batchsize).tolist()
if self.current_batch[-1] > (self.data_num - self.batchsize):
self.first_batch = 1
n_points = self.pc_list[self.current_batch[0]].pc.shape[0]
pc_batch = np.empty( ( [self.batchsize*(numAugs+1),n_points,3] ) )
for j in range(numAugs+1):
pc_temp = copy.deepcopy(self.pc_list[self.current_batch[0]])
if (augment==True)&(j>1): # leave one set un-augmented
pc_temp.augmentation_Rotation()
pc_temp.augmentation_Flipping()
pc_batch[j,...] = pc_temp.pc.copy()
# pre-process - scale between [-1,1]
#if pre_process:
#og_batch = 2*(og_batch-0.5)
labels = np.array(self.labels)
labels = labels[self.current_batch]
labels = np.tile(labels,(numAugs+1))
labels_onehot = np.zeros((len(labels), self.nb_class))
labels_onehot[np.arange(len(labels)), labels] = 1
# update current batch
self.current_batch += self.batchsize
self.current_batch[self.current_batch >= self.data_num] = \
self.current_batch[self.current_batch >= self.data_num] - self.data_num
return pc_batch, labels_onehot, labels
def get_pc(self, idx=[1], augment=False, angle_x=0, angle_y=0, angle_z=30):
# default not to augment, but if so you can specify the rotations. Default rotation only about z
pc_temp = copy.deepcopy(self.pc_list[idx[0]])
if augment==True:
pc_temp.augmentation_Rotation(angle_x=angle_x, angle_y=angle_y, angle_z=angle_z)
#pc_temp.augmentation_Flipping()
pc_batch = pc_temp.pc.copy()
pc_batch = pc_batch[np.newaxis, ...]
labels = np.array(self.labels)
labels = labels[idx]
labels_onehot = np.zeros((len(labels), self.nb_class))
labels_onehot[np.arange(len(labels)), labels] = 1
return pc_batch, labels_onehot, labels
def reset_batch(self):
""" Resets the current batch to the beginning.
"""
self.current_batch = np.arange(self.batchsize)
def shuffle(self):
""" Randomly permutes all dataSamples (and corresponding targets).
"""
if self.labels is not None:
zipped = list(zip(self.pc_list, self.labels))
random.shuffle(zipped)
self.pc_list, self.labels = list(zip(*zipped))
else:
random.shuffle(self.pc_list)
class iterator_binaryVoxels_pointlabels():
def __init__(self, xyz_list=None,labels=None,returns=None, res=0.1, gridSize=[32,32,32], numClasses=None, batchsize=None):
# make sure to input more than one pcd
self.data_num = len(xyz_list)
if numClasses is None:
self.nb_class = np.size(np.unique(labels[0])) # only counts number of classes in first pcd
else:
self.nb_class = numClasses
self.pc_list = []
for i in range(len(xyz_list)):
if (labels is None) & (returns is None):
self.pc_list.append(ProcessPC(xyz_data=xyz_list[i]))
elif (labels is None) & (returns is not None):
self.pc_list.append(ProcessPC(xyz_data=xyz_list[i], pc_returns=returns[i]))
elif (labels is not None) & (returns is None):
self.pc_list.append(ProcessPC(xyz_data=xyz_list[i], pc_labels=labels[i]))
else:
self.pc_list.append(ProcessPC(xyz_data=xyz_list[i],pc_labels=labels[i],pc_returns=returns[i]))
self.res=res
self.gridSize=np.array(gridSize)
if labels is None:
self.flag_label = 0
else:
self.flag_label = 1
if returns is None:
self.flag_return = 0
else:
self.flag_return = 1
if batchsize is not None:
self.batchsize = batchsize
else:
self.batchsize = self.data_num
self.current_batch = np.arange(self.batchsize)
def next_batch( self, augment=False, pre_process=False, outputOffset=False ):
# augments the current batch once
og_batch = np.empty( ( [self.batchsize,1] + self.gridSize.tolist() ) )
offset = np.empty(([self.batchsize] + [3]))
if self.flag_label == 1:
og_labels_batch = np.empty(([self.batchsize, self.nb_class+1] + self.gridSize.tolist())) #+1 for free space
if self.flag_return == 1:
og_returns_batch = np.empty(([self.batchsize, 1] + self.gridSize.tolist()))
for i in range(self.batchsize):
pc_temp = copy.deepcopy(self.pc_list[self.current_batch[i]])
if augment==True:
pc_temp.augmentation_Rotation( )
pc_temp.augmentation_Flipping( )
pc_temp.occupancyGrid_Binary( res_=self.res, gridSize_=self.gridSize )
og_batch[i,...] = pc_temp.og.copy()
offset[i, :] = pc_temp._ProcessPC__centre
if self.flag_label == 1:
pc_temp.occupancyGrid_Labels()
og_labels_batch[i, ...] = pc_temp.og_labels.copy()
if self.flag_return == 1:
pc_temp.occupancyGrid_Returns()
og_returns_batch[i,...] = pc_temp.og_returns.copy()
# pre-process - scale between [-1,1]
if pre_process:
og_batch = 2*(og_batch-0.5)
# update current batch
self.current_batch += self.batchsize
self.current_batch[self.current_batch >= self.data_num] = \
self.current_batch[self.current_batch >= self.data_num] - self.data_num
if outputOffset is False:
if (self.flag_label == 1)&(self.flag_return == 0):
return og_batch, og_labels_batch
elif (self.flag_label == 0)&(self.flag_return == 1):
return og_batch, og_returns_batch
elif (self.flag_label == 1)&(self.flag_return == 1):
return og_batch, og_labels_batch, og_returns_batch
else:
return og_batch
else:
if (self.flag_label == 1)&(self.flag_return == 0):
return og_batch, og_labels_batch, offset
elif (self.flag_label == 0)&(self.flag_return == 1):
return og_batch, og_returns_batch, offset
elif (self.flag_label == 1)&(self.flag_return == 1):
return og_batch, og_labels_batch, og_returns_batch, offset
else:
return og_batch, offset
def next_batch2( self, augment=False, numAugs=0, pre_process=False,angle_x_randLim=0, angle_y_randLim=0 ):
# appends numAugs different augmentations to the current batch
og_batch = np.empty( ( [self.batchsize*(numAugs+1),1] + self.gridSize.tolist() ) )
if self.flag_label == 1:
og_labels_batch = np.empty( ( [self.batchsize*(numAugs+1),self.nb_class+1] + self.gridSize.tolist() ) )
if self.flag_return == 1:
og_returns_batch = np.empty(([self.batchsize * (numAugs + 1), 1] + self.gridSize.tolist()))
for j in range(numAugs+1):
for i in range(self.batchsize):
pc_temp = copy.deepcopy(self.pc_list[self.current_batch[i]])
# augment pointcloud
if (augment==True)&(j>0): # leave one set un-augmented
pc_temp.augmentation_Rotation(angle_x_randLim=angle_x_randLim, angle_y_randLim=angle_y_randLim )
pc_temp.augmentation_Flipping( )
# occupancy grid
pc_temp.occupancyGrid_Binary( res_=self.res, gridSize_=self.gridSize )
og_batch[(j*self.batchsize)+i,...] = pc_temp.og.copy()
# labelled occupancy grid
if self.flag_label == 1:
pc_temp.occupancyGrid_Labels()
og_labels_batch[(j*self.batchsize)+i, ...] = pc_temp.og_labels.copy()
# occupancy grid with returns
if self.flag_return == 1:
pc_temp.occupancyGrid_Returns()
og_returns_batch[(j*self.batchsize)+i,...] = pc_temp.og_returns.copy()
# pre-process - scale between [-1,1]
if pre_process:
og_batch = 2*(og_batch-0.5)
# update current batch
self.current_batch += self.batchsize
self.current_batch[self.current_batch >= self.data_num] = \
self.current_batch[self.current_batch >= self.data_num] - self.data_num
if (self.flag_label == 1) & (self.flag_return == 0):
return og_batch, og_labels_batch
elif (self.flag_label == 0) & (self.flag_return == 1):
return og_batch, og_returns_batch
elif (self.flag_label == 1) & (self.flag_return == 1):
return og_batch, og_labels_batch, og_returns_batch
else:
return og_batch
def get_pc(self, idx=[1,2,3], augment=False, angle_x=0, angle_y=0, angle_z=30, angle_x_randLim=0, angle_y_randLim=0, outputOffset=False ):
# default not to augment, but if so you can specify the rotations. Default rotation only about z. set to none for random rotation
# useful for inference because doesnt need labels
og_batch = np.empty( ( [len(idx),1] + self.gridSize.tolist() ) )
offset = np.empty( ( [len(idx)] + [3] ) )
if self.flag_label == 1:
og_labels_batch = np.empty(([len(idx), self.nb_class+1] + self.gridSize.tolist()))
if self.flag_return == 1:
og_returns_batch = np.empty(([len(idx), 1] + self.gridSize.tolist()))
for i in range(len(idx)):
pc_temp = copy.deepcopy(self.pc_list[idx[i]])
if augment==True:
pc_temp.augmentation_Rotation(angle_x=angle_x, angle_y=angle_y, angle_z=angle_z, angle_x_randLim=angle_x_randLim, angle_y_randLim=angle_y_randLim )
#pc_temp.augmentation_Flipping()
pc_temp.occupancyGrid_Binary( res_=self.res, gridSize_=self.gridSize )
og_batch[i,...] = pc_temp.og.copy()
offset[i,:] = pc_temp._ProcessPC__centre
if self.flag_label == 1:
pc_temp.occupancyGrid_Labels()
og_labels_batch[i,...] = pc_temp.og_labels.copy()
if self.flag_return == 1:
pc_temp.occupancyGrid_Returns()
og_returns_batch[i,...] = pc_temp.og_returns.copy()
if outputOffset is False:
if (self.flag_label == 1)&(self.flag_return == 0):
return og_batch, og_labels_batch
elif (self.flag_label == 0)&(self.flag_return == 1):
return og_batch, og_returns_batch
elif (self.flag_label == 1)&(self.flag_return == 1):
return og_batch, og_labels_batch, og_returns_batch
else:
return og_batch
else:
if (self.flag_label == 1)&(self.flag_return == 0):
return og_batch, og_labels_batch, offset
elif (self.flag_label == 0)&(self.flag_return == 1):
return og_batch, og_returns_batch, offset
elif (self.flag_label == 1)&(self.flag_return == 1):
return og_batch, og_labels_batch, og_returns_batch, offset
else:
return og_batch, offset
def reset_batch(self):
""" Resets the current batch to the beginning.
"""
self.current_batch = np.arange(self.batchsize)
def shuffle(self):
""" Randomly permutes all dataSamples (and corresponding targets).
"""
random.shuffle(self.pc_list)
class iterator_binaryVoxels(): # onehot, object labels
def __init__(self, xyz_list=None,labels=None, res=0.1, gridSize=[32,32,32], batchsize=None):
self.data_num = len(xyz_list)
self.nb_class = np.max(labels)+1
self.labels = labels
self.pc_list = []
for i in range(len(xyz_list)):
self.pc_list.append(ProcessPC(xyz_list[i]))
self.res=res
self.gridSize=np.array(gridSize)
if batchsize is not None:
self.batchsize = batchsize
else:
self.batchsize = self.data_num
self.current_batch = np.arange(self.batchsize)
def next_batch( self, augment=False, pre_process=False ):
# augments the current batch once
og_batch = np.empty( ( [self.batchsize,1] + self.gridSize.tolist() ) )
for i in range(self.batchsize):
pc_temp = copy.deepcopy(self.pc_list[self.current_batch[i]])
if augment==True:
pc_temp.augmentation_Rotation()
pc_temp.augmentation_Flipping()
pc_temp.occupancyGrid_Binary( res_=self.res, gridSize_=self.gridSize )
og_batch[i,...] = pc_temp.og.copy()
# pre-process - scale between [-1,1]
if pre_process:
og_batch = 2*(og_batch-0.5)
labels = np.array(self.labels)
labels = labels[self.current_batch]
labels_onehot = np.zeros((len(labels), self.nb_class))
labels_onehot[np.arange(len(labels)), labels] = 1
# update current batch
self.current_batch += self.batchsize
self.current_batch[self.current_batch >= self.data_num] = \
self.current_batch[self.current_batch >= self.data_num] - self.data_num
return og_batch, labels_onehot, labels
def next_batch2( self, augment=False, numAugs=0, pre_process=False ):
# appends numAugs different augmentations to the current batch
og_batch = np.empty( ( [self.batchsize*(numAugs+1),1] + self.gridSize.tolist() ) )
for j in range(numAugs+1):
for i in range(self.batchsize):
pc_temp = copy.deepcopy(self.pc_list[self.current_batch[i]])
if (augment==True)&(j>1): # leave one set un-augmented
pc_temp.augmentation_Rotation()
pc_temp.augmentation_Flipping()
pc_temp.occupancyGrid_Binary( res_=self.res, gridSize_=self.gridSize )
og_batch[(j*self.batchsize)+i,...] = pc_temp.og.copy()
# pre-process - scale between [-1,1]
if pre_process:
og_batch = 2*(og_batch-0.5)
labels = np.array(self.labels)
labels = labels[self.current_batch]
labels = np.tile(labels,(numAugs+1))
labels_onehot = np.zeros((len(labels), self.nb_class))
labels_onehot[np.arange(len(labels)), labels] = 1
# update current batch
self.current_batch += self.batchsize
self.current_batch[self.current_batch >= self.data_num] = \
self.current_batch[self.current_batch >= self.data_num] - self.data_num
return og_batch, labels_onehot, labels
def get_pc(self, idx=[1,2,3], augment=False, angle_x=0, angle_y=0, angle_z=30):
# default not to augment, but if so you can specify the rotations. Default rotation only about z
og_batch = np.empty( ( [len(idx),1] + self.gridSize.tolist() ) )
for i in range(len(idx)):
pc_temp = copy.deepcopy(self.pc_list[idx[i]])
if augment==True:
pc_temp.augmentation_Rotation(angle_x=angle_x, angle_y=angle_y, angle_z=angle_z)
#pc_temp.augmentation_Flipping()
pc_temp.occupancyGrid_Binary( res_=self.res, gridSize_=self.gridSize )
og_batch[i,...] = pc_temp.og.copy()
labels = np.array(self.labels)
labels = labels[idx]
labels_onehot = np.zeros((len(labels), self.nb_class))
labels_onehot[np.arange(len(labels)), labels] = 1
return og_batch, labels_onehot, labels
def reset_batch(self):
""" Resets the current batch to the beginning.
"""
self.current_batch = np.arange(self.batchsize)
def shuffle(self):
""" Randomly permutes all dataSamples (and corresponding targets).
"""
if self.labels is not None:
zipped = list(zip(self.pc_list, self.labels))
random.shuffle(zipped)
self.pc_list, self.labels = list(zip(*zipped))
else:
random.shuffle(self.pc_list)
class ProcessPC():
# this class is designed to store one pointcloud.
# it can augment that pointcloud and convert it to an occupancy grid
def __init__( self, xyz_data = None , pc_labels=None , pc_returns=None ):
# accepts N x 3 pointcloud array (x,y,z) and stores it
self.og = [] # occupancy grid
self.og_labels = []
self.og_returns = []
self.raster = []
if np.shape(xyz_data)[1]==3:
self.pc = xyz_data # pointcloud
else:
raise ValueError('Input pointcloud incorrect.')
self.pc_labels = pc_labels
self.pc_returns = pc_returns
self.bev_verticalDensity = []
self.bev_maxHeight = []
self.bev_meanReturn = []
self.__flag_recentred = False
self.__index_x = 0
self.__index_y = 0
self.__index_z = 0
self.__centre = [0,0,0]
def recentre( self, centre_ ):
self.pc -= centre_
self.__flag_recentred = True
self.__centre = centre_
def occupancyGrid_Binary( self, res_=0.1, gridSize_=np.array( (32,32,32) ), centre=None ):
""" Convert point cloud to occupancy grid representation
Assumes point cloud has been recentred
- input:
pc_: point cloud Mx3 numpy matrix
res_: resolution of each cell in metres
gridSize_: size of the occupancy grid (DEFAULT: 32x32x32)
- output:
---: occupancy grid is a class variable
Note: this fnc modifies the input pcd, so make a copy
"""
# initialise grid
self.og = np.zeros( gridSize_ )
# recentre the point cloud about the mean if it is not done already
if not self.__flag_recentred:
if centre is None: # no centre specified
c = np.mean( self.pc, axis=0 )
c[2] = np.max( self.pc[:,2] )
self.recentre( c )
if isinstance(res_,(list)):
h_offset = gridSize_[2] / 2. * res_[2]
else:
h_offset = gridSize_[2] / 2. * res_
self.pc[:, 2] += h_offset # additional offset
self.__centre[2] -= h_offset # update offset info
else:
c = centre # centre specified
self.recentre(c)
# get index of points within grid
if isinstance(res_,(list)): # possible to specify different res for each dimension
self.__index_x = np.array(np.clip(np.floor((self.pc[:, 0] - (-gridSize_[0] / 2. * res_[0])) / res_[0]), 0, gridSize_[0] - 1), dtype=int)
self.__index_y = np.array(np.clip(np.floor((self.pc[:, 1] - (-gridSize_[1] / 2. * res_[1])) / res_[1]), 0, gridSize_[1] - 1), dtype=int)
self.__index_z = np.array(np.clip(np.floor((self.pc[:, 2] - (-gridSize_[2] / 2. * res_[2])) / res_[2]), 0, gridSize_[2] - 1), dtype=int)
else:
self.__index_x = np.array( np.clip( np.floor( ( self.pc[:,0]-(-gridSize_[0]/2.*res_) )/res_ ), 0, gridSize_[0]-1 ), dtype=int)
self.__index_y = np.array( np.clip( np.floor( ( self.pc[:,1]-(-gridSize_[1]/2.*res_) )/res_ ), 0, gridSize_[1]-1 ), dtype=int)
self.__index_z = np.array( np.clip( np.floor( ( self.pc[:,2]-(-gridSize_[2]/2.*res_) )/res_ ), 0, gridSize_[2]-1 ), dtype=int)
# set cells to occupied
self.og[self.__index_x,self.__index_y,self.__index_z] = 1.
#self.og_labels = np.zeros( np.hstack( (3,gridSize_) ) , dtype=np.int) #2 is for one-hot, two_classes
self.og_labels = np.zeros(np.hstack((3, gridSize_)), dtype=np.int)
self.og_returns = np.zeros( gridSize_ )
def og_label_util(self, cube, class_label, occupancy):
# cube - which cube to work on
# class_label - class number of point
# occupancy - binary
self.og_labels[cube, self._ProcessPC__index_x[self.pc_labels == class_label], self._ProcessPC__index_y[
self.pc_labels == class_label],
self._ProcessPC__index_z[self.pc_labels == class_label]] = occupancy # foliage
def occupancyGrid_Labels( self ):
# cube 0: background, 1: foliage_clutter, 2:stem
# class label 0: foliage, 1: lower stem, 2: upper stem, 3: clutter
self.og_labels[0, ...] = 1
self.og_label_util(1, 0, 1)
self.og_label_util(1, 3, 1)
self.og_label_util(0, 0, 0)
self.og_label_util(0, 3, 0)
self.og_label_util(2, 1, 1)
self.og_label_util(2, 2, 1)
self.og_label_util(0, 1, 0)
self.og_label_util(1, 1, 0)
self.og_label_util(0, 2, 0)
self.og_label_util(1, 2, 0)
def occupancyGrid_Returns( self ):
self.og_returns[self.__index_x, self.__index_y, self.__index_z] = self.pc_returns
def writeOG(self, filename_='/tmp/og.txt'):
f = open(filename_, 'w')
for i in range(len(self.__index_x)):
f.write(
str(self.__index_x[i]) + ',' + str(self.__index_y[i]) + ',' + str(self.__index_z[i]) + ',' + str(1) + '\n')
f.close()
def augmentation_Rotation( self, angle_x=None, angle_y=None, angle_z=None, angle_x_randLim=10, angle_y_randLim=10 ):
""" Augment the input point cloud by randomly rotating it
- input:
pc_: point cloud Mx3 numpy matrix
rotation (degrees): default rotation is random. Put 0 if you want to fix rotation about an axis
*add in z lim, but set default to full circle
- output:
---: augmentations are applied in place
"""
# randomly sample angles
if angle_x==None:
angle_x = ( np.random.rand(1)-0.5 )*np.deg2rad( float(angle_x_randLim) ) # default max roll, pitch rotation is 10.
else:
angle_x = np.deg2rad( float(angle_x) )
if angle_y == None:
angle_y = (np.random.rand(1) - 0.5) * np.deg2rad( float(angle_y_randLim) )
else:
angle_y = np.deg2rad( float(angle_y) )
if angle_z == None:
angle_z = np.random.rand(1)[0]*2.*np.pi # rotate full circle
else:
angle_z = np.deg2rad(float(angle_z))
# generation rotation matrix
Rx = self.__RotationX( angle_x )
Ry = self.__RotationX( angle_y )
Rz = self.__RotationZ( angle_z )
R = np.dot( np.dot( Rz, Ry ), Rx )
# rotate point cloud
self.pc = np.vstack( [ self.pc.T, np.ones( self.pc.shape[0] ) ] ) # append ones
self.pc = np.dot( R, self.pc ) # rotate -> pc_rot = R*X
self.pc = self.pc[0:3,:].T # remove ones
def __RotationX( self, angle_ ):
""" 3d rotation about the x axis
- input:
angle_: angle of rotation in radians
- output:
Rx: rotation matrix (no translation)
"""
Rx = np.array( [ [ 1., 0., 0., 0. ],
[ 0., np.cos( angle_ ), -np.sin( angle_ ), 0. ],
[ 0., np.sin( angle_ ), np.cos( angle_ ), 0. ],
[ 0., 0., 0., 1. ]
] )
return Rx
def __RotationY( self, angle_ ):
""" 3d rotation about the y axis
- input:
angle_: angle of rotation in radians
- output:
Ry: rotation matrix (no translation)
"""
Ry = np.array( [ [ np.cos( angle_ ), 0., np.sin( angle_ ), 0. ],
[ 0., 1., 0., 0. ],
[ -np.sin( angle_ ), 0., np.cos( angle_ ), 0. ],
[ 0., 0., 0., 1. ]
] )
return Ry
def __RotationZ( self, angle_ ):
""" 3d rotation about the z axis
- input:
angle_: angle of rotation in radians
- output:
Rx: rotation matrix (no translation)
"""
Rz = np.array( [ [ np.cos( angle_ ), -np.sin( angle_ ), 0., 0. ],
[ np.sin( angle_ ), np.cos( angle_ ), 0., 0. ],
[ 0., 0., 1., 0. ],
[ 0., 0., 0., 1. ]
] )
return Rz
def augmentation_Translation( self ):
""" Augment the input point cloud by randomly translating it
- input:
pc_: point cloud Mx3 numpy matrix
- output:
---: augmentations are applied in place
"""
pass
def augmentation_Flipping( self ):
""" Augment the input point cloud by randomly flipping it
- input:
pc_: point cloud Mx3 numpy matrix
- output:
---: augmentations are applied in place
"""
selectFlip = np.int( np.random.rand(1)[0]*4. )
# 0 - nothing
if selectFlip == 1: # about y axis
self.pc[:,0] *= -1.
elif selectFlip == 2: # about x axis
self.pc[:,1] *= -1.
elif selectFlip == 3: # about x and y axis
self.pc[:,[0,1]] *= -1.
def augmentation_Shuffle( self ):
temp = np.hstack((self.pc, self.pc_labels[:,np.newaxis]))
np.random.shuffle(temp)
self.pc = temp[:,:3]
self.pc_labels = temp[:,3]
def normalisation( self ):
l = self.pc.shape[0]
centroid = np.mean(self.pc, axis=0)
self.pc = self.pc - centroid
m = np.max(np.sqrt(np.sum(self.pc ** 2, axis=1)))
self.pc = self.pc / m
def vertical_density( self , support_window=1, weighted=True ):
if support_window==1:
self.bev_verticalDensity = np.sum(self.og, axis=2) / np.shape(self.og)[2]
else:
self.bev_verticalDensity = np.sum(self.og, axis=2)
if weighted is True:
g = gaussian_kernel(support_window, mu=0.0, sigma=1.0)
self.bev_verticalDensity = signal.convolve2d(self.bev_verticalDensity,g,mode='same')
self.bev_verticalDensity = self.bev_verticalDensity / (np.shape(self.og)[2] * np.sum(g))
else:
self.bev_verticalDensity = signal.convolve2d(self.bev_verticalDensity,np.ones((support_window,support_window)),mode='same')
self.bev_verticalDensity = self.bev_verticalDensity / (np.shape(self.og)[2]*(support_window**2))
def max_height( self, support_window=1, weighted=True ):
if support_window==1: #old way
self.bev_maxHeight = np.zeros((np.shape(self.og)[0],np.shape(self.og)[1]))
for i in range(np.shape(self.og)[0]):
for j in range(np.shape(self.og)[1]):
if np.sum(self.og[i,j,:]>0) > 0:
self.bev_maxHeight[i,j] = np.where(self.og[i,j,:]>0)[0][0]
else: #new way
self.bev_maxHeight = np.zeros((np.shape(self.og)[0], np.shape(self.og)[1]))
non_empties = np.where(self.og > 0)
self.bev_maxHeight[non_empties[0], non_empties[1]] = non_empties[2]
if weighted is True: # dont do these for support_window==1
g = gaussian_kernel(support_window, mu=0.0, sigma=1.0)
self.bev_maxHeight = ndimage.maximum_filter(self.bev_maxHeight, footprint=g>0.6)
else:
self.bev_maxHeight = ndimage.maximum_filter(self.bev_maxHeight, footprint=np.ones((support_window,support_window)))
def mean_returns( self, support_window=1, weighted=True ):
if support_window==1:
self.bev_meanReturn = np.mean(self.og_returns, axis=2)
else:
self.bev_meanReturn = np.sum(self.og_returns, axis=2)
if weighted is True:
g = gaussian_kernel(support_window, mu=0.0, sigma=1.0)
self.bev_meanReturn = signal.convolve2d(self.bev_meanReturn,g,mode='same')
self.bev_meanReturn = self.bev_meanReturn / (np.shape(self.og)[2] * np.sum(g))
else:
self.bev_meanReturn = signal.convolve2d(self.bev_meanReturn,np.ones((support_window, support_window)),mode='same')
self.bev_meanReturn = self.bev_meanReturn / (np.shape(self.og)[2] * (support_window ** 2))
def max_returns( self, support_window=1, weighted=True ):
self.bev_maxReturn = np.max(self.og_returns, axis=2)
if support_window>1:
if weighted is True:
g = gaussian_kernel(support_window, mu=0.0, sigma=1.0)
self.bev_maxReturn = signal.convolve2d(self.bev_maxReturn,g,mode='same')
else:
self.bev_maxReturn = signal.convolve2d(self.bev_maxReturn,np.ones((support_window, support_window)),mode='same')
def rasterise( self, res_=0.1, gridSize_=np.array( (32,32) ) ):
""" Convert point cloud to 2D raster representation
Assumes point cloud has been recentred
- input:
pc_: point cloud Mx3 numpy matrix
res_: resolution of each cell in metres
gridSize_: size of the occupancy grid (DEFAULT: 32x32)
- output:
---: 2D raster is a class variable
"""
# initialise grid
self.raster = np.zeros( gridSize_ )
# recentre the point cloud about the mean if it is not done already
if not self.__flag_recentred:
c = np.mean( self.pc, axis=0 )
self.recentre( c )
# get index of points within grid
if isinstance(res_,(list)): # possible to specify different res for each dimension
self.__index_x = np.array(np.clip(np.floor((self.pc[:, 0] - (-gridSize_[0] / 2. * res_[0])) / res_[0]), 0, gridSize_[0] - 1), dtype=int)
self.__index_y = np.array(np.clip(np.floor((self.pc[:, 1] - (-gridSize_[1] / 2. * res_[1])) / res_[1]), 0, gridSize_[1] - 1), dtype=int)
else:
self.__index_x = np.array( np.clip( | np.floor( ( self.pc[:,0]-(-gridSize_[0]/2.*res_) )/res_ ) | numpy.floor |
import json
from typing import Union, Optional, Tuple, List
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from shared import LANG_TO_INT
class DataSplitter:
def __init__(self, path: str, vectorizer: Optional[Union[DictVectorizer, TfidfVectorizer, CountVectorizer]] = None, seed: Optional[int] = None, scale: bool = True):
self.data_path = path
self.vectorizer = vectorizer or DictVectorizer(sparse=False)
self.transformer = TfidfTransformer() if type(self.vectorizer) == CountVectorizer else None
self.scale = type(self.vectorizer) not in (TfidfVectorizer, CountVectorizer) and scale
self.scaler = StandardScaler()
self.random_seed = seed
def collect_features_data(self) -> Tuple[Union[np.ndarray, List[str]], np.ndarray]:
if type(self.vectorizer) == DictVectorizer:
return self._collect_dict_vectorizer_features()
elif type(self.vectorizer) in (TfidfVectorizer, CountVectorizer):
return self._collect_tfidf_features()
else:
raise NotImplementedError
def _collect_dict_vectorizer_features(self) -> Tuple[np.ndarray, np.ndarray]:
examples = []
ys = []
with open(self.data_path, "r") as file:
for line in file:
info = json.loads(line)
examples.append(info["features"])
ys.append(LANG_TO_INT[info["lang"]])
return np.array(examples), | np.array(ys) | numpy.array |
import os
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib_logo import logo
import seaborn as sns
import pysam
RC = str.maketrans('ACGT', 'TGCA')
def rev_comp(seq):
return seq.translate(RC)[::-1]
IUPAC = {
'A': 'A', 'C': 'C', 'G': 'G', 'U': 'U',
'S': 'GC', 'W': 'AU', 'R': 'AG', 'Y': 'CU',
'B': 'CGU', 'H': 'ACU',
'N': 'ACGU',
}
IUPAC_INV = {
'A': 'B', 'C': 'D', 'G': 'H', 'U': 'V',
'S': 'W', 'W': 'S', 'R': 'Y', 'Y': 'R',
'N': 'N'
}
SE_CARTOON = mpimg.imread(os.path.join(os.path.split(__file__)[0], 'data/se_cartoon.png'))
A3_CARTOON = mpimg.imread(os.path.join(os.path.split(__file__)[0], 'data/a3_cartoon.png'))
def iupac_classify(seq, consensus):
clss = []
for o, e in zip(seq, consensus):
if o in IUPAC[e]:
clss.append(e)
else:
clss.append(IUPAC_INV[e])
return ''.join(clss)
def u5_classify(donor_seq):
assert not len(donor_seq) % 2
ws = len(donor_seq) // 2
return iupac_classify(donor_seq[ws - 2: ws], 'AG')
def u6_classify(donor_seq):
assert not len(donor_seq) % 2
ws = len(donor_seq) // 2
return iupac_classify(donor_seq[ws + 2: ws + 5], 'RAG')
def acceptor_classify(acceptor_seq):
assert not len(acceptor_seq) % 2
ws = len(acceptor_seq) // 2
return iupac_classify(acceptor_seq[ws - 5: ws - 2], 'UGC')
def edit_distance(seq1, seq2):
ed = 0
for i, j in zip(seq1, seq2):
if i != j:
ed += 1
return ed
def _seq_test(seq, exp):
exp = IUPAC[exp]
return seq in exp
def perc_seq_pos(seqs, pos, nt):
i = len(seqs[0]) // 2 + pos - 1
res = [_seq_test(s[i], nt) for s in seqs]
return res, np.mean(res) * 100
def perc_seq_switch_pos(seqs_1, seqs_2, pos, nt1, nt2):
i = len(seqs_1[0]) // 2 + pos - 1
res = [_seq_test(s1[i], nt1) & _seq_test(s2[i], nt2) for s1, s2 in zip(seqs_1, seqs_2)]
return res, | np.mean(res) | numpy.mean |
from greenonbrown import green_on_brown
from imutils.video import count_frames, FileVideoStream
import numpy as np
import imutils
import glob
import cv2
import csv
import os
def frame_analysis(exgFile: str, exgsFile: str, hueFile: str, exhuFile: str, HDFile: str):
baseName = os.path.splitext(os.path.basename(exhuFile))[0]
exgVideo = cv2.VideoCapture(exgFile)
print("[INFO] Loaded {}".format(exgFile))
lenexg = count_frames(exgFile, override=True) - 1
exgsVideo = cv2.VideoCapture(exgsFile)
print("[INFO] Loaded {}".format(exgsFile))
lenexgs = count_frames(exgsFile, override=True) - 1
hueVideo = cv2.VideoCapture(hueFile)
print("[INFO] Loaded {}".format(hueFile))
lenhue = count_frames(hueFile, override=True) - 1
exhuVideo = cv2.VideoCapture(exhuFile)
print("[INFO] Loaded {}".format(exhuFile))
lenexhu = count_frames(exhuFile, override=True) - 1
videoHD = cv2.VideoCapture(HDFile)
print("[INFO] Loaded {}".format(HDFile))
lenHD = count_frames(HDFile, override=True) - 1
hdFrame = None
exgFrame = None
exgsFrame = None
hueFrame = None
exhuFrame = None
hdframecount = 0
exgframecount = 0
exgsframecount = 0
hueframecount = 0
exhuframecount = 0
hdFramesAll = []
exgFramesAll = []
exgsFramesAll = []
hueFramesAll = []
exhuFramesAll = []
while True:
k = cv2.waitKey(1) & 0xFF
if k == ord('v') or hdFrame is None:
if hdframecount >= len(hdFramesAll):
hdFrame = next(frame_processor(videoHD, 'hd'))
hdFrame = imutils.resize(hdFrame, height=640)
hdFrame = imutils.rotate(hdFrame, angle=180)
hdframecount += 1
hdFramesAll.append(hdFrame)
else:
hdFrame = hdFramesAll[hdframecount]
hdframecount += 1
if k == ord('q') or exgFrame is None:
if exgframecount >= len(exgFramesAll):
exgFrame = next(frame_processor(exgVideo, 'exg'))
exgframecount += 1
exgFramesAll.append(exgFrame)
else:
exgFrame = exgFramesAll[exgframecount]
exgframecount += 1
if k == ord('w') or exgsFrame is None:
if exgsframecount >= len(exgsFramesAll):
exgsFrame = next(frame_processor(exgsVideo, 'exgs'))
exgsframecount += 1
exgsFramesAll.append(exgsFrame)
else:
exgsFrame = exgsFramesAll[exgsframecount]
exgsframecount += 1
if k == ord('e') or hueFrame is None:
if hueframecount >= len(hueFramesAll):
hueFrame = next(frame_processor(hueVideo, 'hsv'))
hueframecount += 1
hueFramesAll.append(hueFrame)
else:
hueFrame = hueFramesAll[hueframecount]
hueframecount += 1
if k == ord('r') or exhuFrame is None:
if exhuframecount >= len(exhuFramesAll):
exhuFrame = next(frame_processor(exhuVideo, 'exhu'))
exhuframecount += 1
exhuFramesAll.append(exhuFrame)
else:
exhuFrame = exhuFramesAll[exhuframecount]
exhuframecount += 1
if k == ord('b'):
if hdframecount > 0:
hdframecount -= 1
hdFrame = hdFramesAll[hdframecount]
else:
hdFrame = hdFramesAll[hdframecount]
if k == ord('a'):
if exgframecount > 0:
exgframecount -= 1
exgFrame = exgFramesAll[exgframecount]
else:
exgFrame = exgFramesAll[exgframecount]
if k == ord('s'):
if exgsframecount > 0:
exgsframecount -= 1
exgsFrame = exgsFramesAll[exgsframecount]
else:
exgsFrame = exgsFramesAll[exgsframecount]
if k == ord('d'):
if hueframecount > 0:
hueframecount -= 1
hueFrame = hueFramesAll[hueframecount]
else:
hueFrame = hueFramesAll[hueframecount]
if k == ord('f'):
if exhuframecount > 0:
exhuframecount -= 1
exhuFrame = exhuFramesAll[exhuframecount]
else:
exhuFrame = exhuFramesAll[exhuframecount]
# save current frames for the video comparison
if k == ord('y'):
cv2.imwrite('images/frameGrabs/{}_frame{}_exg.png'.format(baseName, exgframecount), exgFrame)
cv2.imwrite('images/frameGrabs/{}_frame{}_exgs.png'.format(baseName, exgsframecount), exgsFrame)
cv2.imwrite('images/frameGrabs/{}_frame{}_hue.png'.format(baseName, hueframecount), hueFrame)
cv2.imwrite('images/frameGrabs/{}_frame{}_exhu.png'.format(baseName, exhuframecount), exhuFrame)
print('[INFO] All frames written.')
# write text on each video frame
exgVis = exgFrame.copy()
exgsVis = exgsFrame.copy()
hueVis = hueFrame.copy()
exhuVis = exhuFrame.copy()
cv2.putText(exhuVis, 'exhu: {} / {}'.format(exhuframecount, lenexhu), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.putText(hueVis, 'hue: {} / {}'.format(hueframecount, lenhue), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.putText(exgsVis, 'exgs: {} / {}'.format(exgsframecount, lenexgs), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.putText(exgVis, 'exg: {} / {}'.format(exgframecount, lenexg), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.putText(hdFrame, 'HD: {} / {}'.format(hdframecount, lenHD), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
# stack the video frames
topRow = np.hstack((exgVis, exgsVis))
bottomRow = np.hstack((hueVis, exhuVis))
combined = np.vstack((topRow, bottomRow))
combined = | np.hstack((combined, hdFrame)) | numpy.hstack |
# regression y=Xw using block sparse Bayesian learning framework
#
# {y,X} are known, and w is assumed to be 'sparse' or 'block sparse'
# the indices of the non-zero blocks can be either known or unknown
#
# Authors: <NAME> <<EMAIL>>
# License: BSD 3 Clause
#
# For the BSBL-BO algorithm:
#
#@article{zhang2013extension,
# author={<NAME>. and <NAME>.},
# journal={Signal Processing, IEEE Transactions on},
# title={Extension of SBL Algorithms for the Recovery of Block Sparse Signals With Intra-Block Correlation},
# year={2013},
# month={April},
# volume={61},
# number={8},
# pages={2009-2015},
# doi={10.1109/TSP.2013.2241055},
# ISSN={1053-587X},}
#
# For the BSBL-FM algorithm:
#
#@article{liu2014energy,
# author = "<NAME> and <NAME> and <NAME> and <NAME> and <NAME>",
# title = "Energy efficient telemonitoring of physiological signals via compressed sensing: A fast algorithm and power consumption evaluation ",
# journal = "Biomedical Signal Processing and Control ",
# volume = "11",
# number = "0",
# pages = "80 - 88",
# year = "2014",
# issn = "1746-8094",
# doi = "http://dx.doi.org/10.1016/j.bspc.2014.02.010",
# url = "http://www.sciencedirect.com/science/article/pii/S1746809414000366",
# }
#
# For the application of wireless telemonitoring via CS:
#
#@article{zhang2013compressed,
# author={<NAME> and <NAME> and <NAME>. and <NAME>.},
# journal={Biomedical Engineering, IEEE Transactions on},
# title={Compressed Sensing for Energy-Efficient Wireless Telemonitoring of Noninvasive Fetal ECG Via Block Sparse Bayesian Learning},
# year={2013},
# month={Feb},
# volume={60},
# number={2},
# pages={300-309},
# doi={10.1109/TBME.2012.2226175},
# ISSN={0018-9294},}
#
from __future__ import print_function
import numpy as np
import scipy.linalg as lp
# print parameters
def print_vars(clf):
print ('----------------------------INFO------------------------------')
print ('apply lambda learning rule (learn_lambda) = %d' % clf.learn_lambda)
print ('initial guess of noise (lambda_init) = %g' % clf.lamb)
print ('BSBL algorithm exit criterion (epsilon) = %g' % clf.epsilon)
print ('BSBL maximum iterations (max_iters) = %d' % clf.max_iters)
print ('intra-block correlation (learn_type) = %d' % clf.learn_type)
print ('Gamma pruning rules (prune_gamma) = %g' % clf.prune_gamma)
print ('--------------------------------------------------------------')
# vector to column (M,1) vector
def v2m(v):
return v.reshape((v.shape[0],1))
# M = A*B*C
def dot3(A,B,C):
return np.dot(np.dot(A, B), C)
# ravel list of 'unequal arrays' into a row vector
def ravel_list(d):
r = np.array([], dtype='int')
for i in xrange(d.shape[0]):
r = np.r_[r,d[i]]
return r
# extract block spacing information
def block_parse(blk_start_loc, N):
blk_len_list = np.r_[blk_start_loc[1:], N] - blk_start_loc
is_equal_block = (np.sum(np.abs(blk_len_list - blk_len_list.mean())) == 0)
return blk_len_list, is_equal_block
# exploit AR(1) correlation in Covariance matrices
# r_scale : scale the estimated coefficient
# r_init : initial guess of r when no-basis is included
# r_thd : the threshold of r to make the covariance matrix p.s.d
# the larger the block, the smaller the value
def coeff_r(Cov, gamma, index, r_scale=1.1, r_init=0.90, r_thd=0.999):
r0 = 0.
r1 = 0.
for i in index:
temp = Cov[i] / gamma[i]
r0 += temp.trace()
r1 += temp.trace(offset=1)
# this method tend to under estimate the correlation
if np.size(index) == 0:
r = r_init
else:
r = r_scale * r1/(r0 + 1e-8)
# constrain the Toeplitz matrix to be p.s.d
if (np.abs(r) >= r_thd):
r = r_thd * np.sign(r)
return r
# generate toeplitz matrix
def gen_toeplitz(r,l):
jup = np.arange(l)
bs = r**jup
B = lp.toeplitz(bs)
return B
#
class bo:
"""
BSBL-BO : Bound Optimization Algos of BSBL framework
Recover block sparse signal (1D) exploiting intra-block correlation,
given the block partition.
The algorithm solves the inverse problem for the block sparse
model with known block partition:
y = X * w + v
Variables
---------
X : array, shape = (n_samples, n_features)
Training vectors.
y : array, shape = (n_samples)
Target values for training vectors
w : array, shape = (n_features)
sparse/block sparse weight vector
Parameters
----------
'learn_lambda' : (1) if (SNR<10dB), learn_lambda=1
(2) if (SNR>10dB), learn_lambda=2
(3) if noiseless, learn_lambda=0
[ Default value: learn_lambda=2 ]
'lambda_init' : initial guess of the noise variance
[ Default value: lambda_init=1e-2 ]
'r_init' : initial value for correlation coefficient
[ Default value: 0.90 ]
'epsilon' : convergence criterion
'max_iters' : Maximum number of iterations.
[ Default value: max_iters = 500 ]
'verbose' : print debuging information
'prune_gamma' : threshold to prune out small gamma_i
(generally, 10^{-3} or 10^{-2})
'learn_type' : learn_type = 0: Ignore intra-block correlation
learn_type = 1: Exploit intra-block correlation
[ Default: learn_type = 1 ]
"""
# constructor
def __init__(self, learn_lambda=2, lambda_init=1e-2, r_init=0.90,
epsilon=1e-8, max_iters=500, verbose=0,
learn_type=1, prune_gamma=1e-2):
self.learn_lambda = learn_lambda
self.lamb = lambda_init
self.r_init = r_init
self.epsilon = epsilon
self.max_iters = max_iters
self.verbose = verbose
self.learn_type = learn_type
self.prune_gamma = prune_gamma
# fit y
def fit_transform(self, X, y, blk_start_loc=None):
#
self.scale = y.std()
y = y / self.scale
M, N = X.shape
# automatically set block partition
if blk_start_loc==None:
blkLen = int(N/16.)
blk_start_loc = np.arange(0,N,blkLen)
blk_len_list, self.is_equal_block = block_parse(blk_start_loc, N)
# init variables
nblock = blk_start_loc.shape[0]
self.nblock = nblock
w = np.zeros(N,dtype='float')
Sigma0 = [np.identity(blk_len_list[i]) for i in range(nblock)]
Sigma_w = [np.identity(blk_len_list[i]) for i in range(nblock)]
Cov_x = [np.identity(blk_len_list[i]) for i in range(nblock)]
B = [np.identity(blk_len_list[i]) for i in range(nblock)]
invB = [np.identity(blk_len_list[i]) for i in range(nblock)]
block_slice = np.array([blk_start_loc[i] + np.arange(blk_len_list[i]) for i in xrange(nblock)])
gamma = np.ones(nblock, dtype='float')
HX = [np.identity(blk_len_list[i]) for i in range(nblock)]
Hy = [np.zeros(blk_len_list[i]) for i in range(nblock)]
# loops
for count in xrange(self.max_iters):
# prune weights as their hyperparameter goes to zero
# index -- 0:unused, 1:used
index = np.argwhere(gamma > self.prune_gamma).ravel()
if (index.shape[0] == 0):
self.print_zero_vector()
raise TypeError('w is a zero-vector, exiting.')
# calculate XBX^T
XBX = np.zeros((M,M), dtype=float)
for i in index:
Xi = X[:, block_slice[i]]
XBX += np.dot(np.dot(Xi, Sigma0[i]), Xi.T)
invXBX = lp.inv(XBX + self.lamb * np.identity(M))
#
for i in index:
Xi = X[:, block_slice[i]]
Hi = np.dot(Xi.T, invXBX)
Hy[i] = np.dot(Hi, y)
HX[i] = np.dot(Hi, Xi)
# now we update basis
w_old = w.copy()
for i in index:
seg = block_slice[i]
w[seg] = np.dot(Sigma0[i], Hy[i])
Sigma_w[i] = Sigma0[i] - np.dot(np.dot(Sigma0[i], HX[i]), Sigma0[i])
mu_v = v2m(w[seg])
Cov_x[i] = Sigma_w[i] + np.dot(mu_v, mu_v.T)
#=========== Learn correlation structure in blocks ===========
# 0: do not consider correlation structure in each block
# 1: constrain all the blocks have the same correlation structure
if self.learn_type == 1:
r = coeff_r(Cov_x, gamma, index, r_init=self.r_init)
if self.is_equal_block:
jup = np.arange(Cov_x[0].shape[0])
bs = r**jup
B0 = lp.toeplitz(bs)
invB0 = lp.inv(B0)
for i in index:
B[i] = B0
invB[i] = invB0
else:
for i in index:
jup = np.arange(B[i].shape[0])
bs = r**jup
B[i] = lp.toeplitz(bs)
invB[i] = lp.inv(B[i])
# estimate gammas
gamma_old = gamma.copy()
for i in index:
denom = np.sqrt(np.dot(HX[i], B[i]).trace())
gamma[i] = gamma_old[i] * lp.norm(np.dot(lp.sqrtm(B[i]), Hy[i])) / denom
Sigma0[i] = B[i] * gamma[i]
# estimate lambda
if self.learn_lambda == 1:
lambComp = 0.
for i in index:
Xi = X[:,block_slice[i]];
lambComp += np.dot(np.dot(Xi, Sigma_w[i]), Xi.T).trace()
self.lamb = lp.norm(y - np.dot(X, w))**2./N + lambComp/N;
elif self.learn_lambda == 2:
lambComp = 0.
for i in index:
lambComp += np.dot(Sigma_w[i], invB[i]).trace() / gamma_old[i]
self.lamb = lp.norm(y - np.dot(X, w))**2./N + self.lamb * (w.size - lambComp)/N
#================= Check stopping conditions, eyc. ==============
dmu = (np.abs(w_old - w)).max(0); # only SMV currently
if (dmu < self.epsilon):
break
if (count >= self.max_iters):
break
# exit
self.count = count + 1
self.gamma = gamma
self.index = index
# let's convert the backyard:
w_ret = np.zeros(N)
relevant_slice = ravel_list(block_slice[index])
w_ret[relevant_slice] = w[relevant_slice]
return w_ret * self.scale
# print zero-vector warning
def print_zero_vector(self):
print ('--------------------------WARNING-----------------------------')
print ('x becomes zero vector. The solution may be incorrect.')
print ('Current prune_gamma = %g, and Current epsilon = %g' % \
(self.prune_gamma, self.epsilon))
print ('Try smaller values of prune_gamma and epsilon or normalize y')
print ('--------------------------------------------------------------')
#
# compute logobj cost likelihood for BSBL-FM
# L(i) = log(|I + A_is_i|) - q_i^T(I + A_is_i)^{-1}A_iq_i
def logobj(s,q,A,L):
As = np.dot(A, s)
Aq = np.dot(A, q)
ml = np.log(np.abs(lp.det(np.identity(L) + As))) - \
dot3(q.T.conj(), lp.inv(np.identity(L) + As), Aq)
return ml
# calculate Sigma_ii:
# \Sigma_{ii} = (A^{-1} + S)^{-1} = (I + AS)^{-1}*A
def calc_sigmaii(A, S):
L = A.shape[0]
return np.dot(lp.inv(np.eye(L) + np.dot(A, S)), A)
# extract the ith block index 'within' current basis
def extract_segment(idx, basis_book, blk_len_list):
N = sum(blk_len_list[basis_book])
istart = 0
for i in basis_book:
if (i == idx):
seg = np.arange(istart, istart+blk_len_list[i])
break;
istart += blk_len_list[i]
#
seg_others = np.ones(N, dtype='bool')
seg_others[seg] = False
return seg, seg_others
#
class fm:
"""
BSBL-FM : fast marginalized bsbl algos
Recover block sparse signal (1D) exploiting intra-block correlation,
given the block partition.
The algorithm solves the inverse problem for the block sparse
model with known block partition:
y = X * w + v
Variables
---------
X : array, shape = (n_samples, n_features)
Training vectors.
y : array, shape = (n_samples)
Target values for training vectors
w : array, shape = (n_features)
sparse/block sparse weight vector
Parameters
----------
'learn_lambda' : (1) if (SNR<10dB), learn_lambda=1
(2) if (SNR>10dB), learn_lambda=2
(3) if noiseless, learn_lambda=0
[ Default value: learn_lambda=2 ]
'lambda_init' : initial guess of the noise variance
[ Default value: lambda_init=1e-2 ]
'r_init' : initial value for correlation coefficient
[ Default value: 0.90 ]
'epsilon' : convergence criterion
'max_iters' : Maximum number of iterations.
[ Default value: max_iters = 500 ]
'verbose' : print debuging information
'prune_gamma' : threshold to prune out small gamma_i
(generally, 10^{-3} or 10^{-2})
'learn_type' : learn_type = 0: Ignore intra-block correlation
learn_type = 1: Exploit intra-block correlation
[ Default: learn_type = 1 ]
"""
# constructor
def __init__(self, learn_lambda=2, r_init=0.90, lambda_init=1e-2,
epsilon=1e-4, max_iters=500, verbose=0,
learn_type=1, prune_gamma=1e-2):
self.learn_lambda = learn_lambda
self.lamb = lambda_init
self.r_init = r_init
self.epsilon = epsilon
self.max_iters = max_iters
self.verbose = verbose
self.learn_type = learn_type
self.prune_gamma = prune_gamma
# fit y
def fit_transform(self, X, y, blk_start_loc=None):
"""
solve y = Xw + v, with block indices specified by blk_start_loc
Parameters
----------
X : MxN np.array
y : M np.array
blk_start_loc : block indices, [Optional]
if unspecified, it will uniformly devide v
into 16 blocks
Output
------
w : N np.array
"""
# normalize y
self.scale = y.std()
y = y / self.scale
M, N = X.shape
# automatically set block partition
if blk_start_loc==None:
blkLen = int(N/16.)
blk_start_loc = | np.arange(0,N,blkLen) | numpy.arange |
r"""
srundplug: Undulator spectra calculations. An easy (or not too difficult)
interface to make these calculations using Srw, Urgent, and Us.
functions (summary):
calc1d<code> returns (e,f)
f=flux (phot/s/0.1%bw) versus e=photon energy in eV
calc2d<code> returns (h,v,p)
p=power density (W/mm^2) versus h and v slit
directions in mm
calc3d<code> returns (e,h,v,f)
f = flux (phot/s/0.1%bw/mm^2) versus e=energy in eV,
h and v slit directions in mm
"""
__author__ = "<NAME>"
__contact__ = "<EMAIL>"
__copyright__ = "ESRF, 2014-2019"
#
#---------------------------- IMPORT ------------------------------------------
#
import os
import sys
import time
import array
import platform
import numpy
import shutil # to copy files
#SRW
USE_URGENT= True
USE_US = True
USE_SRWLIB = True
USE_PYSRU = False
if USE_SRWLIB:
try:
import oasys_srw.srwlib as srwlib
except:
USE_SRWLIB = False
print("SRW is not available")
#catch standard optput
try:
from io import StringIO # Python3
except ImportError:
from StringIO import StringIO # Python2
try:
import matplotlib.pylab as plt
except ImportError:
print("failed to import matplotlib. Do not try to do on-line plots.")
from srxraylib.plot.gol import plot, plot_contour, plot_surface, plot_image, plot_show
########################################################################################################################
#
# GLOBAL NAMES
#
########################################################################################################################
# #Physical constants (global, by now)
import scipy.constants as codata
codata_mee = numpy.array(codata.physical_constants["electron mass energy equivalent in MeV"][0])
m2ev = codata.c * codata.h / codata.e # lambda(m) = m2eV / energy(eV)
# counter for output files
scanCounter = 0
# try:
# from xoppylib.xoppy_util import locations
# except:
# raise Exception("IMPORT")
# directory where to find urgent and us binaries
try:
from xoppylib.xoppy_util import locations
home_bin = locations.home_bin()
except:
import platform
if platform.system() == 'Linux':
home_bin='/scisoft/xop2.4/bin.linux/'
print("srundplug: undefined home_bin. It has been set to ", home_bin)
elif platform.system() == 'Darwin':
home_bin = "/scisoft/xop2.4/bin.darwin/"
print("srundplug: undefined home_bin. It has been set to ", home_bin)
elif platform.system() == 'Windows':
home_bin = ""
print("srundplug: undefined home_bin. It has been set to ", home_bin)
else:
raise FileNotFoundError("srundplug: undefined home_bin")
#check
#if os.path.isfile(home_bin + 'us') == False:
# raise FileNotFoundError("srundplug: File not found: "+home_bin+'us')
#if os.path.isfile(home_bin + 'urgent') == False:
# raise FileNotFoundError("srundplug: File not found: " + home_bin + 'urgent')
# directory where to find urgent and us binaries
try:
home_bin
except NameError:
#home_bin='/users/srio/Oasys/Orange-XOPPY/orangecontrib/xoppy/bin.linux/'
home_bin='/scisoft/xop2.4/bin.linux/'
print("srundplug: undefined home_bin. It has been set to ",home_bin)
#check
#if os.path.isfile(home_bin+'us') == False:
# print("srundplug: File not found: "+home_bin+'us')
#if os.path.isfile(home_bin+'urgent') == False:
# sys.exit("srundplug: File not found: "+home_bin+'urgent')
########################################################################################################################
#
# 1D: calc1d<code> Flux calculations
#
########################################################################################################################
def calc1d_pysru(bl,photonEnergyMin=3000.0,photonEnergyMax=55000.0,photonEnergyPoints=5,
npoints_grid=51,zero_emittance=False,fileName=None,fileAppend=False):
r"""
run pySRU for calculating flux
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
t0 = time.time()
print("Inside calc1d_pysru")
from pySRU.Simulation import create_simulation
from pySRU.ElectronBeam import ElectronBeam
from pySRU.MagneticStructureUndulatorPlane import MagneticStructureUndulatorPlane
from pySRU.TrajectoryFactory import TrajectoryFactory, TRAJECTORY_METHOD_ANALYTIC,TRAJECTORY_METHOD_ODE
from pySRU.RadiationFactory import RadiationFactory,RADIATION_METHOD_NEAR_FIELD, \
RADIATION_METHOD_APPROX_FARFIELD
myBeam = ElectronBeam(Electron_energy=bl['ElectronEnergy'], I_current=bl['ElectronCurrent'])
myUndulator = MagneticStructureUndulatorPlane(K=bl['Kv'], period_length=bl['PeriodID'], length=bl['PeriodID']*bl['NPeriods'])
is_quadrant = 1
if is_quadrant:
X = numpy.linspace(0,0.5*bl['gapH'],npoints_grid)
Y = numpy.linspace(0,0.5*bl['gapV'],npoints_grid)
else:
X = numpy.linspace(-0.5*bl['gapH'],0.5*bl['gapH'],npoints_grid)
Y = numpy.linspace(-0.5*bl['gapH'],0.5*bl['gapH'],npoints_grid)
#
# Warning: The automatic calculation of Nb_pts_trajectory dependens on the energy at this setup and it
# will kept constant over the full spectrum. Therefore, the setup here is done for the most
# "difficult" case, i.e., the highest energy.
# Setting photon_energy=None will do it at the first harmonic, and it was found that the flux
# diverges at high energies in some cases (energy_radiated_approximation_and_farfield)
#
simulation_test = create_simulation(magnetic_structure=myUndulator,electron_beam=myBeam,
magnetic_field=None, photon_energy=photonEnergyMax,
traj_method=TRAJECTORY_METHOD_ODE,Nb_pts_trajectory=None,
rad_method=RADIATION_METHOD_NEAR_FIELD, Nb_pts_radiation=None,
initial_condition=None, distance=bl['distance'],XY_are_list=False,X=X,Y=Y)
# simulation_test.trajectory.plot()
simulation_test.print_parameters()
# simulation_test.radiation.plot(title=("radiation in a screen for first harmonic"))
print("Integrated flux at resonance: %g photons/s/0.1bw"%(simulation_test.radiation.integration(is_quadrant=is_quadrant)))
energies = numpy.linspace(photonEnergyMin,photonEnergyMax,photonEnergyPoints)
eArray,intensArray = simulation_test.calculate_spectrum_on_slit(abscissas_array=energies,use_eV=1,is_quadrant=is_quadrant,do_plot=0)
#**********************Saving results
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
f.write("\n")
scanCounter +=1
f.write("#S %d Undulator spectrum calculation using pySRU\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD photonEnergyMin = %f\n"%(photonEnergyMin))
f.write("#UD photonEnergyMax = %f\n"%(photonEnergyMax))
f.write("#UD photonEnergyPoints = %d\n"%(photonEnergyPoints))
#
# write flux to file
#
header="#N 4 \n#L PhotonEnergy[eV] PhotonWavelength[A] Flux[phot/sec/0.1%bw] Spectral Power[W/eV]\n"
f.write(header)
for i in range(eArray.size):
f.write(' ' + repr(eArray[i]) + ' ' + repr(m2ev/eArray[i]*1e10) + ' ' +
repr(intensArray[i]) + ' ' +
repr(intensArray[i]*codata.e*1e3) + '\n')
f.close()
if fileAppend:
print("Data appended to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
return (eArray,intensArray)
def calc1d_srw(bl,photonEnergyMin=3000.0,photonEnergyMax=55000.0,photonEnergyPoints=500,zero_emittance=False,
srw_max_harmonic_number=None,fileName=None,fileAppend=False):
r"""
run SRW for calculating flux
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
t0 = time.time()
print("Inside calc1d_srw")
#derived
#TODO calculate the numerical factor using codata
#B0 = bl['Kv']/0.934/(bl['PeriodID']*1e2)
cte = codata.e/(2*numpy.pi*codata.electron_mass*codata.c)
B0 = bl['Kv']/bl['PeriodID']/cte
try:
B0x = bl['Kh']/bl['PeriodID']/cte
except:
B0x = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
if srw_max_harmonic_number == None:
gamma = bl['ElectronEnergy'] / (codata_mee * 1e-3)
try:
Kh = bl['Kh']
except:
Kh = 0.0
resonance_wavelength = (1 + (bl['Kv']**2 + Kh**2) / 2.0) / 2 / gamma**2 * bl["PeriodID"]
resonance_energy = m2ev / resonance_wavelength
srw_max_harmonic_number = int(photonEnergyMax / resonance_energy * 2.5)
print ("Max harmonic considered:%d ; Resonance energy: %g eV\n"%(srw_max_harmonic_number,resonance_energy))
Nmax = srw_max_harmonic_number # 21,61
print('Running SRW (SRWLIB Python)')
if B0x == 0: #*********** Conventional Undulator
harmB = srwlib.SRWLMagFldH() #magnetic field harmonic
harmB.n = 1 #harmonic number ??? Mostly asymmetry
harmB.h_or_v = 'v' #magnetic field plane: horzontal ('h') or vertical ('v')
harmB.B = B0 #magnetic field amplitude [T]
und = srwlib.SRWLMagFldU([harmB])
und.per = bl['PeriodID'] #period length [m]
und.nPer = bl['NPeriods'] #number of periods (will be rounded to integer)
#Container of all magnetic field elements
magFldCnt = srwlib.SRWLMagFldC([und], srwlib.array('d', [0]), srwlib.array('d', [0]), srwlib.array('d', [0]))
else: #***********Undulator (elliptical)
magnetic_fields = []
magnetic_fields.append(srwlib.SRWLMagFldH(1, 'v',
_B=B0,
_ph=0.0,
_s=1, # 1=symmetrical, -1=antisymmetrical
_a=1.0))
magnetic_fields.append(srwlib.SRWLMagFldH(1, 'h',
_B=B0x,
_ph=Kphase,
_s=1,
_a=1.0))
und = srwlib.SRWLMagFldU(_arHarm=magnetic_fields, _per=bl['PeriodID'], _nPer=bl['NPeriods'])
magFldCnt = srwlib.SRWLMagFldC(_arMagFld=[und],
_arXc=srwlib.array('d', [0.0]),
_arYc=srwlib.array('d', [0.0]),
_arZc=srwlib.array('d', [0.0]))
#***********Electron Beam
eBeam = srwlib.SRWLPartBeam()
eBeam.Iavg = bl['ElectronCurrent'] #average current [A]
eBeam.partStatMom1.x = 0. #initial transverse positions [m]
eBeam.partStatMom1.y = 0.
# eBeam.partStatMom1.z = 0 #initial longitudinal positions (set in the middle of undulator)
eBeam.partStatMom1.z = - bl['PeriodID']*(bl['NPeriods']+4)/2 # initial longitudinal positions
eBeam.partStatMom1.xp = 0 #initial relative transverse velocities
eBeam.partStatMom1.yp = 0
eBeam.partStatMom1.gamma = bl['ElectronEnergy']*1e3/codata_mee #relative energy
if zero_emittance:
sigX = 1e-25
sigXp = 1e-25
sigY = 1e-25
sigYp = 1e-25
sigEperE = 1e-25
else:
sigX = bl['ElectronBeamSizeH'] #horizontal RMS size of e-beam [m]
sigXp = bl['ElectronBeamDivergenceH'] #horizontal RMS angular divergence [rad]
sigY = bl['ElectronBeamSizeV'] #vertical RMS size of e-beam [m]
sigYp = bl['ElectronBeamDivergenceV'] #vertical RMS angular divergence [rad]
sigEperE = bl['ElectronEnergySpread']
print("calc1dSrw: starting calculation using ElectronEnergySpead=%e \n"%((sigEperE)))
#2nd order stat. moments:
eBeam.arStatMom2[0] = sigX*sigX #<(x-<x>)^2>
eBeam.arStatMom2[1] = 0 #<(x-<x>)(x'-<x'>)>
eBeam.arStatMom2[2] = sigXp*sigXp #<(x'-<x'>)^2>
eBeam.arStatMom2[3] = sigY*sigY #<(y-<y>)^2>
eBeam.arStatMom2[4] = 0 #<(y-<y>)(y'-<y'>)>
eBeam.arStatMom2[5] = sigYp*sigYp #<(y'-<y'>)^2>
eBeam.arStatMom2[10] = sigEperE*sigEperE #<(E-<E>)^2>/<E>^2
#***********Precision Parameters
arPrecF = [0]*5 #for spectral flux vs photon energy
arPrecF[0] = 1 #initial UR harmonic to take into account
arPrecF[1] = Nmax #final UR harmonic to take into account
arPrecF[2] = 1.5 #longitudinal integration precision parameter
arPrecF[3] = 1.5 #azimuthal integration precision parameter
arPrecF[4] = 1 #calculate flux (1) or flux per unit surface (2)
#***********UR Stokes Parameters (mesh) for Spectral Flux
stkF = srwlib.SRWLStokes() #for spectral flux vs photon energy
#srio stkF.allocate(10000, 1, 1) #numbers of points vs photon energy, horizontal and vertical positions
stkF.allocate(photonEnergyPoints, 1, 1) #numbers of points vs photon energy, horizontal and vertical positions
stkF.mesh.zStart = bl['distance'] #longitudinal position [m] at which UR has to be calculated
stkF.mesh.eStart = photonEnergyMin #initial photon energy [eV]
stkF.mesh.eFin = photonEnergyMax #final photon energy [eV]
stkF.mesh.xStart = bl['gapHcenter'] - bl['gapH']/2 #initial horizontal position [m]
stkF.mesh.xFin = bl['gapHcenter'] + bl['gapH']/2 #final horizontal position [m]
stkF.mesh.yStart = bl['gapVcenter'] - bl['gapV']/2 #initial vertical position [m]
stkF.mesh.yFin = bl['gapVcenter'] + bl['gapV']/2 #final vertical position [m]
#**********************Calculation (SRWLIB function calls)
print('Performing Spectral Flux (Stokes parameters) calculation ... ') # , end='')
srwlib.srwl.CalcStokesUR(stkF, eBeam, und, arPrecF)
print('Done calc1dSrw calculation in %10.3f s'%(time.time()-t0))
#**********************Saving results
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
f.write("\n")
scanCounter +=1
f.write("#S %d Undulator spectrum calculation using SRW\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD photonEnergyMin = %f\n"%(photonEnergyMin))
f.write("#UD photonEnergyMax = %f\n"%(photonEnergyMax))
f.write("#UD photonEnergyPoints = %d\n"%(photonEnergyPoints))
f.write("#UD B0 = %f\n"%(B0))
#
# write flux to file
#
header="#N 4 \n#L PhotonEnergy[eV] PhotonWavelength[A] Flux[phot/sec/0.1%bw] Spectral Power[W/eV]\n"
f.write(header)
eArray = numpy.zeros(photonEnergyPoints)
intensArray = numpy.zeros(photonEnergyPoints)
for i in range(stkF.mesh.ne):
ener = stkF.mesh.eStart+i*(stkF.mesh.eFin-stkF.mesh.eStart)/numpy.array((stkF.mesh.ne-1)).clip(min=1)
if fileName is not None: f.write(' ' + repr(ener) + ' ' + repr(m2ev/ener*1e10) + ' ' +
repr(stkF.arS[i]) + ' ' +
repr(stkF.arS[i]*codata.e*1e3) + '\n')
eArray[i] = ener
intensArray[i] = stkF.arS[i]
if fileName is not None:
f.close()
if fileAppend:
print("Data appended to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
return (eArray,intensArray)
def calc1d_urgent(bl,photonEnergyMin=1000.0,photonEnergyMax=100000.0,photonEnergyPoints=500,zero_emittance=False,fileName=None,fileAppend=False):
r"""
run Urgent for calculating flux
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
global home_bin
print("Inside calc1d_urgent")
t0 = time.time()
for file in ["urgent.inp","urgent.out"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
try:
Kh = bl['Kh']
except:
Kh = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
with open("urgent.inp","wt") as f:
f.write("%d\n"%(1)) # ITYPE
f.write("%f\n"%(bl['PeriodID'])) # PERIOD
f.write("%f\n"%(Kh)) #KX
f.write("%f\n"%(bl['Kv'])) #KY
f.write("%f\n"%(Kphase*180.0/numpy.pi)) #PHASE
f.write("%d\n"%(bl['NPeriods'])) #N
f.write("%f\n"%(photonEnergyMin)) #EMIN
f.write("%f\n"%(photonEnergyMax)) #EMAX
f.write("%d\n"%(photonEnergyPoints)) #NENERGY
f.write("%f\n"%(bl['ElectronEnergy'])) #ENERGY
f.write("%f\n"%(bl['ElectronCurrent'])) #CUR
f.write("%f\n"%(bl['ElectronBeamSizeH']*1e3)) #SIGX
f.write("%f\n"%(bl['ElectronBeamSizeV']*1e3)) #SIGY
f.write("%f\n"%(bl['ElectronBeamDivergenceH']*1e3)) #SIGX1
f.write("%f\n"%(bl['ElectronBeamDivergenceV']*1e3)) #SIGY1
f.write("%f\n"%(bl['distance'])) #D
f.write("%f\n"%(bl['gapHcenter']*1e3)) #XPC
f.write("%f\n"%(bl['gapVcenter']*1e3)) #YPC
f.write("%f\n"%(bl['gapH']*1e3)) #XPS
f.write("%f\n"%(bl['gapV']*1e3)) #YPS
f.write("%d\n"%(50)) #NXP
f.write("%d\n"%(50)) #NYP
f.write("%d\n"%(4)) #MODE
if zero_emittance: #ICALC
f.write("%d\n"%(3))
else:
f.write("%d\n"%(1))
f.write("%d\n"%(-1)) #IHARM
f.write("%d\n"%(0)) #NPHI
f.write("%d\n"%(0)) #NSIG
f.write("%d\n"%(0)) #NALPHA
f.write("%f\n"%(0.00000)) #DALPHA
f.write("%d\n"%(0)) #NOMEGA
f.write("%f\n"%(0.00000)) #DOMEGA
if platform.system() == "Windows":
command = os.path.join(home_bin,'urgent.exe < urgent.inp')
else:
command = "'" + os.path.join(home_bin,"urgent' < urgent.inp")
print("Running command '%s' in directory: %s \n"%(command,os.getcwd()))
os.system(command)
print('Done calc1dUrgent calculation in %10.3f s'%(time.time()-t0))
# write spec file
txt = open("urgent.out").readlines()
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
f.write("\n")
scanCounter +=1
f.write("#S %d Undulator spectrum calculation using Urgent\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD photonEnergyMin = %f\n"%(photonEnergyMin))
f.write("#UD photonEnergyMax = %f\n"%(photonEnergyMax))
f.write("#UD photonEnergyPoints = %d\n"%(photonEnergyPoints))
f.write("#N 10\n")
f.write("#L Energy(eV) Wavelength(A) Flux(ph/s/0.1%bw) Spectral Power(W/eV) imin imax p1 p2 p3 p4\n")
nArray = 0
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
nArray += 1
tmp = tmp.replace('D','e')
if fileName is not None: f.write(tmp)
else:
if fileName is not None: f.write("#UD "+tmp)
if fileName is not None:
f.close()
if fileAppend:
print("Data appended to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
# stores results in numpy arrays for return
eArray = numpy.zeros(nArray)
intensArray = numpy.zeros(nArray)
iArray = -1
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
iArray += 1
tmp = tmp.replace('D','e')
tmpf = numpy.array( [float(j) for j in tmp.split()] )
eArray[iArray] = tmpf[0]
intensArray[iArray] = tmpf[2]
return (eArray,intensArray)
def calc1d_us(bl,photonEnergyMin=1000.0,photonEnergyMax=100000.0,photonEnergyPoints=500,zero_emittance=False,fileName=None,fileAppend=False):
r"""
run US for calculating flux
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
global home_bin
t0 = time.time()
for file in ["us.inp","us.out"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
print("Inside calc1d_us")
with open("us.inp","wt") as f:
f.write("US run\n")
f.write(" %f %f %f Ring-Energy Current\n"%
(bl['ElectronEnergy'],bl['ElectronCurrent']*1e3,bl['ElectronEnergySpread']))
f.write(" %f %f %f %f Sx Sy Sxp Syp\n"%
(bl['ElectronBeamSizeH']*1e3,bl['ElectronBeamSizeV']*1e3,
bl['ElectronBeamDivergenceH']*1e3,bl['ElectronBeamDivergenceV']*1e3) )
f.write(" %f %d 0.000 %f Period N Kx Ky\n"%
(bl['PeriodID']*1e2,bl['NPeriods'],bl['Kv']) )
f.write(" %f %f %d Emin Emax Ne\n"%
(photonEnergyMin,photonEnergyMax,photonEnergyPoints) )
f.write(" %f %f %f %f %f 50 50 D Xpc Ypc Xps Yps Nxp Nyp\n"%
(bl['distance'],bl['gapHcenter']*1e3,bl['gapVcenter']*1e3,bl['gapH']*1e3,bl['gapV']*1e3) )
# f.write(" 4 4 0 Mode Method Iharm\n")
if zero_emittance:
f.write(" 4 3 0 Mode Method Iharm\n")
else:
f.write(" 4 4 0 Mode Method Iharm\n")
f.write(" 0 0 0.0 64 8.0 0 Nphi Nalpha Dalpha2 Nomega Domega Nsigma\n")
f.write("foreground\n")
if platform.system() == "Windows":
command = os.path.join(home_bin,'us.exe < us.inp')
else:
command = "'" + os.path.join(home_bin,'us') + "'"
print("Running command '%s' in directory: %s \n"%(command,os.getcwd()))
os.system(command)
print('Done calc1dUs calculation in %10.3f s'%(time.time()-t0))
txt = open("us.out").readlines()
# write spec file
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
f.write("\n")
scanCounter +=1
f.write("#S %d Undulator spectrum calculation using US\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD photonEnergyMin = %f\n"%(photonEnergyMin))
f.write("#UD photonEnergyMax = %f\n"%(photonEnergyMax))
f.write("#UD photonEnergyPoints = %d\n"%(photonEnergyPoints))
f.write("#N 8\n")
f.write("#L Energy(eV) Wavelength(A) Flux(ph/s/0.1%bw) SpectralPower(W/ev) p1 p2 p3 p4\n")
nArray = 0
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
tmp = tmp.replace('D','e')
tmp = numpy.fromstring(tmp,dtype=float,sep=' ')
if fileName is not None:
f.write(("%g "*8+"\n")%(tmp[0],1e10*m2ev/tmp[0],tmp[1],tmp[1]*1e3*codata.e,tmp[2],tmp[3],tmp[4],tmp[5]))
nArray += 1
else:
if fileName is not None: f.write("#UD "+tmp)
if fileName is not None:
f.close()
if fileAppend:
print("Data appended to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
# stores results in numpy arrays for return
eArray = numpy.zeros(nArray)
intensArray = numpy.zeros(nArray)
iArray = -1
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
iArray += 1
tmp = tmp.replace('D','e')
tmpf = numpy.array( [float(j) for j in tmp.split()] )
eArray[iArray] = tmpf[0]
intensArray[iArray] = tmpf[1]
return (eArray,intensArray)
########################################################################################################################
#
# 2D: calc2d<code> Power density calculations
#
########################################################################################################################
def calc2d_pysru(bl,zero_emittance=False,hSlitPoints=51,vSlitPoints=51,
photonEnergyMin=50.0,photonEnergyMax=2500.0,photonEnergyPoints=2451,
fileName=None,fileAppend=False):
e,h,v,i = calc3d_pysru(bl,zero_emittance=zero_emittance,
photonEnergyMin=photonEnergyMin,photonEnergyMax=photonEnergyMax,photonEnergyPoints=photonEnergyPoints,
hSlitPoints=hSlitPoints,vSlitPoints=vSlitPoints,
fileName=fileName,fileAppend=fileAppend)
e_step = (photonEnergyMax - photonEnergyMin) / photonEnergyPoints
plot(e,(i.sum(axis=2)).sum(axis=1)*(v[1]-v[0])*(h[1]-h[0]),show=0,title="Spectrum for %s"%bl)
return (h,v,i.sum(axis=0)*e_step*codata.e*1e3)
def calc2d_srw(bl,zero_emittance=False,hSlitPoints=101,vSlitPoints=51,
srw_max_harmonic_number=51, # Not needed, kept for eventual compatibility
fileName=None,fileAppend=False,):
r"""
run SRW for calculating power density
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
print("Inside calc2d_srw")
#Maximum number of harmonics considered. This is critical for speed.
cte = codata.e/(2*numpy.pi*codata.electron_mass*codata.c)
B0 = bl['Kv']/bl['PeriodID']/cte
try:
B0x = bl['Kh'] / bl['PeriodID'] / cte
except:
B0x = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
print('Running SRW (SRWLIB Python)')
if B0x == 0: #*********** Conventional Undulator
harmB = srwlib.SRWLMagFldH() #magnetic field harmonic
harmB.n = 1 #harmonic number ??? Mostly asymmetry
harmB.h_or_v = 'v' #magnetic field plane: horzontal ('h') or vertical ('v')
harmB.B = B0 #magnetic field amplitude [T]
und = srwlib.SRWLMagFldU([harmB])
und.per = bl['PeriodID'] # period length [m]
und.nPer = bl['NPeriods'] # number of periods (will be rounded to integer)
magFldCnt = None
magFldCnt = srwlib.SRWLMagFldC([und], array.array('d', [0]), array.array('d', [0]), array.array('d', [0]))
else: #***********Undulator (elliptical)
magnetic_fields = []
magnetic_fields.append(srwlib.SRWLMagFldH(1, 'v',
_B=B0,
_ph=0.0,
_s=1, # 1=symmetrical, -1=antisymmetrical
_a=1.0))
magnetic_fields.append(srwlib.SRWLMagFldH(1, 'h',
_B=B0x,
_ph=Kphase,
_s=1,
_a=1.0))
und = srwlib.SRWLMagFldU(_arHarm=magnetic_fields, _per=bl['PeriodID'], _nPer=bl['NPeriods'])
magFldCnt = srwlib.SRWLMagFldC(_arMagFld=[und],
_arXc=srwlib.array('d', [0.0]),
_arYc=srwlib.array('d', [0.0]),
_arZc=srwlib.array('d', [0.0]))
#***********Electron Beam
eBeam = None
eBeam = srwlib.SRWLPartBeam()
eBeam.Iavg = bl['ElectronCurrent'] #average current [A]
eBeam.partStatMom1.x = 0. #initial transverse positions [m]
eBeam.partStatMom1.y = 0.
# eBeam.partStatMom1.z = 0. #initial longitudinal positions (set in the middle of undulator)
eBeam.partStatMom1.z = - bl['PeriodID']*(bl['NPeriods']+4)/2 # initial longitudinal positions
eBeam.partStatMom1.xp = 0. #initial relative transverse velocities
eBeam.partStatMom1.yp = 0.
eBeam.partStatMom1.gamma = bl['ElectronEnergy']*1e3/codata_mee #relative energy
if zero_emittance:
sigEperE = 1e-25
sigX = 1e-25
sigXp = 1e-25
sigY = 1e-25
sigYp = 1e-25
else:
sigEperE = bl['ElectronEnergySpread'] #relative RMS energy spread
sigX = bl['ElectronBeamSizeH'] #horizontal RMS size of e-beam [m]
sigXp = bl['ElectronBeamDivergenceH'] #horizontal RMS angular divergence [rad]
sigY = bl['ElectronBeamSizeV'] #vertical RMS size of e-beam [m]
sigYp = bl['ElectronBeamDivergenceV'] #vertical RMS angular divergence [rad]
#2nd order stat. moments:
eBeam.arStatMom2[0] = sigX*sigX #<(x-<x>)^2>
eBeam.arStatMom2[1] = 0.0 #<(x-<x>)(x'-<x'>)>
eBeam.arStatMom2[2] = sigXp*sigXp #<(x'-<x'>)^2>
eBeam.arStatMom2[3] = sigY*sigY #<(y-<y>)^2>
eBeam.arStatMom2[4] = 0.0 #<(y-<y>)(y'-<y'>)>
eBeam.arStatMom2[5] = sigYp*sigYp #<(y'-<y'>)^2>
eBeam.arStatMom2[10] = sigEperE*sigEperE #<(E-<E>)^2>/<E>^2
#***********Precision Parameters
arPrecP = [0]*5 #for power density
arPrecP[0] = 1.5 #precision factor
arPrecP[1] = 1 #power density computation method (1- "near field", 2- "far field")
arPrecP[2] = 0.0 #initial longitudinal position (effective if arPrecP[2] < arPrecP[3])
arPrecP[3] = 0.0 #final longitudinal position (effective if arPrecP[2] < arPrecP[3])
arPrecP[4] = 20000 #number of points for (intermediate) trajectory calculation
#***********UR Stokes Parameters (mesh) for power densiyu
stkP = None
stkP = srwlib.SRWLStokes() #for power density
stkP.allocate(1, hSlitPoints, vSlitPoints) #numbers of points vs horizontal and vertical positions (photon energy is not taken into account)
stkP.mesh.zStart = bl['distance'] #longitudinal position [m] at which power density has to be calculated
stkP.mesh.xStart = -bl['gapH']/2.0 #initial horizontal position [m]
stkP.mesh.xFin = bl['gapH']/2.0 #final horizontal position [m]
stkP.mesh.yStart = -bl['gapV']/2.0 #initial vertical position [m]
stkP.mesh.yFin = bl['gapV']/2.0 #final vertical position [m]
#**********************Calculation (SRWLIB function calls)
print('Performing Power Density calculation (from field) ... ')
t0 = time.time()
try:
srwlib.srwl.CalcPowDenSR(stkP, eBeam, 0, magFldCnt, arPrecP)
print('Done Performing Power Density calculation (from field).')
except:
print("Error running SRW")
raise ("Error running SRW")
#**********************Saving results
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
#
# write power density to file as mesh scan
#
scanCounter +=1
f.write("\n#S %d Undulator power density calculation using SRW\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write('\n#U B0 = ' + repr(B0 ) + '\n' )
f.write('\n#U hSlitPoints = ' + repr(hSlitPoints) + '\n' )
f.write('\n#U vSlitPoints = ' + repr(vSlitPoints) + '\n' )
f.write("#N 3 \n#L H[mm] V[mm] PowerDensity[W/mm^2] \n" )
hArray = numpy.zeros(stkP.mesh.nx)
vArray = numpy.zeros(stkP.mesh.ny)
totPower = numpy.array(0.0)
hProfile = numpy.zeros(stkP.mesh.nx)
vProfile = numpy.zeros(stkP.mesh.ny)
powerArray = numpy.zeros((stkP.mesh.nx,stkP.mesh.ny))
# fill arrays
ij = -1
for j in range(stkP.mesh.ny):
for i in range(stkP.mesh.nx):
ij += 1
xx = stkP.mesh.xStart + i*(stkP.mesh.xFin-stkP.mesh.xStart)/(stkP.mesh.nx-1)
yy = stkP.mesh.yStart + j*(stkP.mesh.yFin-stkP.mesh.yStart)/(stkP.mesh.ny-1)
#ij = i*stkP.mesh.nx + j
totPower += stkP.arS[ij]
powerArray[i,j] = stkP.arS[ij]
hArray[i] = xx*1e3 # mm
vArray[j] = yy*1e3 # mm
# dump
if fileName is not None:
for i in range(stkP.mesh.nx):
for j in range(stkP.mesh.ny):
f.write(repr(hArray[i]) + ' ' + repr(vArray[j]) + ' ' + repr(powerArray[i,j]) + '\n')
totPower = totPower * \
(stkP.mesh.xFin-stkP.mesh.xStart)/(stkP.mesh.nx-1)*1e3 * \
(stkP.mesh.yFin-stkP.mesh.yStart)/(stkP.mesh.ny-1)*1e3
hStep = (stkP.mesh.xFin-stkP.mesh.xStart)/(stkP.mesh.nx-1)
# dump profiles
if fileName is not None:
scanCounter +=1
f.write("\n#S %d Undulator power density calculation using SRW: H profile\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write( "#UD Total power [W]: "+repr(totPower)+"\n")
f.write( "#UD FWHM [mm] : "+repr(calc_fwhm(hProfile,hStep)[0]*1e3)+"\n")
f.write( "#N 2 \n")
f.write( "#L H[mm] PowerDensityCentralProfile[W/mm2] \n" )
for i in range(stkP.mesh.nx):
#xx = stkP.mesh.xStart + i*hStep
#f.write(repr(xx*1e3) + ' ' + repr(hProfile[i]) + '\n')
f.write(repr(hArray[i]) + ' ' + \
repr(powerArray[i,int(len(vArray)/2)]) + '\n')
scanCounter +=1
vStep = (stkP.mesh.yFin-stkP.mesh.yStart)/(stkP.mesh.ny-1)
f.write("\n#S %d Undulator power density calculation using SRW: V profile\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write( "#UD Total power [W]: "+repr(totPower)+"\n")
f.write( "#UD FWHM [mm] : "+repr(calc_fwhm(vProfile,vStep)[0]*1e3)+"\n")
f.write( "#N 2 \n")
f.write( "#L V[mm] PowerDensityCentralProfile[W/mm2] \n" )
for j in range(stkP.mesh.ny):
f.write(repr(vArray[j]) + ' ' + \
repr(powerArray[int(len(hArray)/2),j]) + '\n')
f.close()
if fileAppend:
print("Data appended to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
print( "Power density peak SRW: [W/mm2]: "+repr(powerArray.max()))
print( "Total power SRW [W]: "+repr(totPower))
return (hArray, vArray, powerArray)
def calc2d_us(bl,zero_emittance=False,hSlitPoints=51,vSlitPoints=51,fileName=None,fileAppend=False):
r"""
run US for calculating power density
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
global home_bin
print("Inside calc2d_us")
for file in ["us.inp","us.out"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
with open("us.inp","wt") as f:
#f.write("%d\n"%(1)) # ITYPE
#f.write("%f\n"%(bl['PeriodID'])) # PERIOD
f.write("US run\n")
f.write(" %f %f %f Ring-Energy Current\n"%
(bl['ElectronEnergy'],bl['ElectronCurrent']*1e3,bl['ElectronEnergySpread']))
f.write(" %f %f %f %f Sx Sy Sxp Syp\n"%
(bl['ElectronBeamSizeH']*1e3,bl['ElectronBeamSizeV']*1e3,
bl['ElectronBeamDivergenceH']*1e3,bl['ElectronBeamDivergenceV']*1e3) )
f.write(" %f %d 0.000 %f Period N Kx Ky\n"%
(bl['PeriodID']*1e2,bl['NPeriods'],bl['Kv']) )
f.write(" 9972.1 55000.0 500 Emin Emax Ne\n")
f.write(" %f 0.000 0.000 %f %f %d %d D Xpc Ypc Xps Yps Nxp Nyp\n"%
(bl['distance'],bl['gapH']*1e3,bl['gapV']*1e3,hSlitPoints-1,vSlitPoints-1) )
if zero_emittance:
f.write(" 6 3 0 Mode Method Iharm\n")
else:
f.write(" 6 1 0 Mode Method Iharm\n")
f.write(" 0 0 0.0 64 8.0 0 Nphi Nalpha Dalpha2 Nomega Domega Nsigma\n")
f.write("foreground\n")
if platform.system() == "Windows":
command = os.path.join(home_bin,'us.exe < us.inp')
else:
command = "'" + os.path.join(home_bin,'us') + "'"
print("Running command '%s' in directory: %s \n"%(command,os.getcwd()))
print("\n--------------------------------------------------------\n")
os.system(command)
print("Done.")
print("\n--------------------------------------------------------\n")
txt = open("us.out").readlines()
# write spec file
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
f.write("\n")
scanCounter +=1
f.write("#S %d Undulator power density calculation using US\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#N 7\n")
f.write("#L H[mm] V[mm] PowerDensity[W/mm^2] p1 p2 p3 p4\n")
mesh = numpy.zeros((7,(hSlitPoints)*(vSlitPoints)))
hh = numpy.zeros((hSlitPoints))
vv = numpy.zeros((vSlitPoints))
int_mesh = numpy.zeros( ((hSlitPoints),(vSlitPoints)) )
imesh = -1
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
if fileName is not None: f.write(tmp)
tmpf = numpy.array( [float(j) for j in tmp.split()] )
imesh = imesh + 1
mesh[:,imesh] = tmpf
else:
if fileName is not None: f.write("#UD "+tmp)
imesh = -1
for i in range(hSlitPoints):
for j in range(vSlitPoints):
imesh = imesh + 1
hh[i] = mesh[0,imesh]
vv[j] = mesh[1,imesh]
int_mesh[i,j] = mesh[2,imesh]
hhh = numpy.concatenate((-hh[::-1],hh[1:]))
vvv = numpy.concatenate((-vv[::-1],vv[1:]))
tmp = numpy.concatenate( (int_mesh[::-1,:],int_mesh[1:,:]), axis=0)
int_mesh2 = numpy.concatenate( (tmp[:,::-1],tmp[:,1:]),axis=1)
if fileName is not None:
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using US (whole slit)\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#N 3\n")
f.write("#L H[mm] V[mm] PowerDensity[W/mm^2]\n")
for i in range(len(hhh)):
for j in range(len(vvv)):
f.write("%f %f %f\n"%(hhh[i],vvv[j],int_mesh2[i,j]) )
totPower = int_mesh2.sum() * (hh[1]-hh[0]) * (vv[1]-vv[0])
if fileName is not None:
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using US: H profile\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#UD Total power [W]: "+repr(totPower)+"\n")
f.write("#N 2\n")
f.write("#L H[mm] PowerDensity[W/mm2]\n")
for i in range(len(hhh)):
f.write("%f %f\n"%(hhh[i],int_mesh2[i,int(len(vvv)/2)]) )
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using US: V profile\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#UD Total power [W]: "+repr(totPower)+"\n")
f.write("#N 2\n")
f.write("#L V[mm] PowerDensity[W/mm2]\n")
for i in range(len(vvv)):
f.write("%f %f\n"%(vvv[i],int_mesh2[int(len(hhh)/2),i]) )
f.close()
if fileAppend:
print("Data appended to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
print( "Power density peak US: [W/mm2]: "+repr(int_mesh2.max()))
print( "Total power US [W]: "+repr(totPower))
return (hhh, vvv, int_mesh2)
def calc2d_urgent(bl,zero_emittance=False,fileName=None,fileAppend=False,hSlitPoints=21,vSlitPoints=51):
r"""
run Urgent for calculating power density
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
global home_bin
print("Inside calc2d_urgent")
for file in ["urgent.inp","urgent.out"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
try:
Kh = bl['Kh']
except:
Kh = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
with open("urgent.inp","wt") as f:
f.write("%d\n"%(1)) # ITYPE
f.write("%f\n"%(bl['PeriodID'])) # PERIOD
f.write("%f\n"%(Kh)) #KX
f.write("%f\n"%(bl['Kv'])) #KY
f.write("%f\n"%(Kphase*180.0/numpy.pi)) #PHASE
f.write("%d\n"%(bl['NPeriods'])) #N
f.write("1000.0\n") #EMIN
f.write("100000.0\n") #EMAX
f.write("1\n") #NENERGY
f.write("%f\n"%(bl['ElectronEnergy'])) #ENERGY
f.write("%f\n"%(bl['ElectronCurrent'])) #CUR
f.write("%f\n"%(bl['ElectronBeamSizeH']*1e3)) #SIGX
f.write("%f\n"%(bl['ElectronBeamSizeV']*1e3)) #SIGY
f.write("%f\n"%(bl['ElectronBeamDivergenceH']*1e3)) #SIGX1
f.write("%f\n"%(bl['ElectronBeamDivergenceV']*1e3)) #SIGY1
f.write("%f\n"%(bl['distance'])) #D
f.write("%f\n"%(0.00000)) #XPC
f.write("%f\n"%(0.00000)) #YPC
f.write("%f\n"%(bl['gapH']*1e3)) #XPS
f.write("%f\n"%(bl['gapV']*1e3)) #YPS
f.write("%d\n"%(hSlitPoints-1)) #NXP
f.write("%d\n"%(vSlitPoints-1)) #NYP
f.write("%d\n"%(6)) #MODE
if zero_emittance: #ICALC
f.write("%d\n"%(2))
else:
f.write("%d\n"%(1))
f.write("%d\n"%(-200)) #IHARM TODO: check max harmonic number
f.write("%d\n"%(0)) #NPHI
f.write("%d\n"%(0)) #NSIG
f.write("%d\n"%(0)) #NALPHA
f.write("%f\n"%(0.00000)) #DALPHA
f.write("%d\n"%(0)) #NOMEGA
f.write("%f\n"%(0.00000)) #DOMEGA
if platform.system() == "Windows":
command = os.path.join(home_bin,'urgent.exe < urgent.inp')
else:
command = "'" + os.path.join(home_bin,"urgent' < urgent.inp")
print("\n\n--------------------------------------------------------\n")
print("Running command '%s' in directory: %s \n"%(command,os.getcwd()))
os.system(command)
print("Done.")
# write spec file
txt = open("urgent.out").readlines()
if fileName is not None:
if fileAppend:
f = open(fileName,"a")
else:
scanCounter = 0
f = open(fileName,"w")
f.write("#F "+fileName+"\n")
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using Urgent (a slit quadrant)\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#N 4\n")
f.write("#L H[mm] V[mm] PowerDensity[W/mm^2] Flux[Phot/s/0.1%bw]\n")
mesh = numpy.zeros((4,(hSlitPoints)*(vSlitPoints)))
hh = numpy.zeros((hSlitPoints))
vv = numpy.zeros((vSlitPoints))
int_mesh = numpy.zeros( ((hSlitPoints),(vSlitPoints)) )
imesh = -1
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
if fileName is not None: f.write(tmp)
tmp = tmp.replace('D','e')
tmpf = numpy.array( [float(j) for j in tmp.split()] )
imesh = imesh + 1
mesh[:,imesh] = tmpf
else:
if len(tmp) > 0: # remove the last block
if tmp.split(" ")[0] == 'HARMONIC':
break
if fileName is not None: f.write("#UD "+tmp)
imesh = -1
for i in range(hSlitPoints):
for j in range(vSlitPoints):
imesh = imesh + 1
hh[i] = mesh[0,imesh]
vv[j] = mesh[1,imesh]
int_mesh[i,j] = mesh[2,imesh]
hhh = numpy.concatenate((-hh[::-1],hh[1:]))
vvv = numpy.concatenate((-vv[::-1],vv[1:]))
tmp = numpy.concatenate( (int_mesh[::-1,:],int_mesh[1:,:]), axis=0)
int_mesh2 = numpy.concatenate( (tmp[:,::-1],tmp[:,1:]),axis=1)
totPower = int_mesh2.sum() * (hh[1]-hh[0]) * (vv[1]-vv[0])
if fileName is not None:
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using Urgent (whole slit)\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#N 3\n")
f.write("#L H[mm] V[mm] PowerDensity[W/mm^2]\n")
for i in range(len(hhh)):
for j in range(len(vvv)):
f.write("%f %f %f\n"%(hhh[i],vvv[j],int_mesh2[i,j]) )
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using Urgent: H profile\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#UD Total power [W]: "+repr(totPower)+"\n")
f.write("#N 2\n")
f.write("#L H[mm] PowerDensity[W/mm2]\n")
for i in range(len(hhh)):
f.write("%f %f\n"%(hhh[i],int_mesh2[i,int(len(vvv)/2)]) )
scanCounter += 1
f.write("\n#S %d Undulator power density calculation using Urgent: V profile\n"%(scanCounter))
for i,j in bl.items(): # write bl values
f.write ("#UD %s = %s\n" % (i,j) )
f.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
f.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
f.write("#UD Total power [W]: "+repr(totPower)+"\n")
f.write("#N 2\n")
f.write("#L V[mm] PowerDensity[W/mm2]\n")
for i in range(len(vvv)):
f.write("%f %f\n"%(vvv[i],int_mesh2[int(len(hhh)/2),i]) )
f.close()
if fileAppend:
print("Data appended to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
print( "Power density peak URGENT: [W/mm2]: "+repr(int_mesh2.max()))
print( "Total power URGENT [W]: "+repr(totPower))
print("\n--------------------------------------------------------\n\n")
return (hhh, vvv, int_mesh2)
########################################################################################################################
#
# 3D: calc3d<code> Emission calculations
#
########################################################################################################################
def calc3d_srw(bl,photonEnergyMin=3000.0,photonEnergyMax=55000.0,photonEnergyPoints=500,
zero_emittance=False,hSlitPoints=51,vSlitPoints=51,
fileName=None,fileAppend=False):
r"""
run SRW for calculating intensity vs H,V,energy
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
print("Inside calc3d_srw")
if fileName is not None:
if fileAppend:
fout = open(fileName,"a")
else:
scanCounter = 0
fout = open(fileName,"w")
fout.write("#F "+fileName+"\n")
if zero_emittance:
eBeam = _srw_electron_beam(E=bl['ElectronEnergy'],Iavg=bl['ElectronCurrent'],) # no emmitance now
else:
eBeam = _srw_electron_beam(E=bl['ElectronEnergy'], sigE = bl['ElectronEnergySpread'], Iavg=bl['ElectronCurrent'],
sigX=bl['ElectronBeamSizeH'], sigY=bl['ElectronBeamSizeV'],
sigXp=bl['ElectronBeamDivergenceH'], sigYp=bl['ElectronBeamDivergenceV'])
eBeam.partStatMom1.z = - bl['PeriodID'] * (bl['NPeriods'] + 4) / 2 # initial longitudinal positions
#***********Precision Parameters
mesh = srwlib.SRWLRadMesh(photonEnergyMin,photonEnergyMax,photonEnergyPoints,
-bl['gapH']/2,bl['gapH']/2,hSlitPoints,
-bl['gapV']/2,bl['gapV']/2,vSlitPoints,bl['distance'])
cte = codata.e/(2*numpy.pi*codata.electron_mass*codata.c)
B0 = bl['Kv']/bl['PeriodID']/cte
try:
B0x = bl['Kh'] / bl['PeriodID'] / cte
except:
B0x = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
print('Running SRW (SRWLIB Python)')
if B0x == 0: #*********** Conventional Undulator
# harmB = srwlib.SRWLMagFldH() #magnetic field harmonic
# harmB.n = 1 #harmonic number ??? Mostly asymmetry
# harmB.h_or_v = 'v' #magnetic field plane: horzontal ('h') or vertical ('v')
# harmB.B = B0 #magnetic field amplitude [T]
# und = srwlib.SRWLMagFldU([harmB])
# und.per = bl['PeriodID'] # period length [m]
# und.nPer = bl['NPeriods'] # number of periods (will be rounded to integer)
#
# magFldCnt = None
# magFldCnt = srwlib.SRWLMagFldC([und], array.array('d', [0]), array.array('d', [0]), array.array('d', [0]))
und0 = srwlib.SRWLMagFldU([srwlib.SRWLMagFldH(1, 'v', B0)], bl['PeriodID'], bl['NPeriods'])
und = srwlib.SRWLMagFldC([und0], array.array('d', [0]), array.array('d', [0]), array.array('d', [0]))
else: #***********Undulator (elliptical)
magnetic_fields = []
magnetic_fields.append(srwlib.SRWLMagFldH(1, 'v',
_B=B0,
_ph=0.0,
_s=1, # 1=symmetrical, -1=antisymmetrical
_a=1.0))
magnetic_fields.append(srwlib.SRWLMagFldH(1, 'h',
_B=B0x,
_ph=Kphase,
_s=1,
_a=1.0))
und0 = srwlib.SRWLMagFldU(_arHarm=magnetic_fields, _per=bl['PeriodID'], _nPer=bl['NPeriods'])
und = srwlib.SRWLMagFldC([und0], array.array('d', [0]), array.array('d', [0]), array.array('d', [0]))
print('Running SRW (SRWLIB Python)')
#
# #***********UR Stokes Parameters (mesh) for Spectral Flux
# stkF = srwlib.SRWLStokes() #for spectral flux vs photon energy
# stkF.allocate(photonEnergyPoints, hSlitPoints, vSlitPoints) #numbers of points vs photon energy, horizontal and vertical positions
# stkF.mesh.zStart = bl['distance'] #longitudinal position [m] at which UR has to be calculated
# stkF.mesh.eStart = photonEnergyMin #initial photon energy [eV]
# stkF.mesh.eFin = photonEnergyMax #final photon energy [eV]
# stkF.mesh.xStart = -bl['gapH']/2 #initial horizontal position [m]
# stkF.mesh.xFin = bl['gapH']/2 #final horizontal position [m]
# stkF.mesh.yStart = -bl['gapV']/2 #initial vertical position [m]
# stkF.mesh.yFin = bl['gapV']/2 #final vertical position [m]
#**********************Calculation (SRWLIB function calls)
print('Performing Spectral Flux 3d calculation ... ') # , end='')
t0 = time.time()
if zero_emittance:
#
# single electron
#
# arPrecS = [0]*7 #for electric field and single-electron intensity
# arPrecS[0] = 1 #SR calculation method: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"
# arPrecS[1] = 0.01 #relative precision
# arPrecS[2] = 0 #longitudinal position to start integration (effective if < zEndInteg)
# arPrecS[3] = 0 #longitudinal position to finish integration (effective if > zStartInteg)
# arPrecS[4] = 20000 #Number of points for intermediate trajectory calculation
# arPrecS[5] = 1 #Use "terminating terms" (i.e. asymptotic expansions at zStartInteg and zEndInteg) or not (1 or 0 respectively)
# arPrecS[6] = -1 #0.1 #sampling factor for adjusting nx, ny (effective if > 0)
paramSE = [1, 0.01, 0, 0, 50000, 1, 0]
wfr = srwlib.SRWLWfr()
wfr.mesh = mesh
wfr.partBeam = eBeam
wfr.allocate(mesh.ne, mesh.nx, mesh.ny)
# eBeam = SrwDriftElectronBeam(eBeam, und)
srwlib.srwl.CalcElecFieldSR(wfr, 0, und, paramSE)
print('Extracting stokes ... ')
stk = srwlib.SRWLStokes()
stk.mesh = mesh
stk.allocate(mesh.ne, mesh.nx, mesh.ny)
# eBeam = SrwDriftElectronBeam(eBeam, -eBeam.moved)
wfr.calc_stokes(stk)
# Stokes0ToSpec(stk,fname=fileName)
#
# intensArray,eArray,hArray,vArray = Stokes0ToArrays(stk)
Shape = (4,stk.mesh.ny,stk.mesh.nx,stk.mesh.ne)
data = numpy.ndarray(buffer=stk.arS, shape=Shape,dtype=stk.arS.typecode)
data0 = data #[0]
hArray = numpy.linspace(stk.mesh.xStart,stk.mesh.xFin,stk.mesh.nx)
vArray = numpy.linspace(stk.mesh.yStart,stk.mesh.yFin,stk.mesh.ny)
eArray = numpy.linspace(stk.mesh.eStart,stk.mesh.eFin,stk.mesh.ne)
# intensArray = numpy.zeros((eArray.size,hArray.size,vArray.size))
print('Filling output array... ')
intensArray = numpy.zeros((eArray.size,hArray.size,vArray.size))
for ie in range(eArray.size):
for ix in range(hArray.size):
for iy in range(vArray.size):
# intensArray[ie,ix,iy] = data0[iy,ix,ie]
intensArray[ie,ix,iy,] = data[0,iy,ix,ie]
else:
#
# convolution
#
# arPrecS = [0]*7 #for electric field and single-electron intensity
# arPrecS[0] = 1 #SR calculation method: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"
# arPrecS[1] = 0.01 #relative precision
# arPrecS[2] = 0 #longitudinal position to start integration (effective if < zEndInteg)
# arPrecS[3] = 0 #longitudinal position to finish integration (effective if > zStartInteg)
# arPrecS[4] = 20000 #Number of points for intermediate trajectory calculation
# arPrecS[5] = 1 #Use "terminating terms" (i.e. asymptotic expansions at zStartInteg and zEndInteg) or not (1 or 0 respectively)
# arPrecS[6] = -1 #0.1 #sampling factor for adjusting nx, ny (effective if > 0)
paramME = [1, 0.01, 0, 0, 50000, 1, 0]
wfr = srwlib.SRWLWfr()
wfr.mesh = mesh
wfr.partBeam = eBeam
wfr.allocate(mesh.ne, mesh.nx, mesh.ny)
# eBeam = _srw_drift_electron_beam(eBeam, und)
srwlib.srwl.CalcElecFieldSR(wfr, 0, und, paramME)
#
# Extract intensity
#
print('Extracting stokes and filling output array... ')
mesh0 = wfr.mesh
# arI0 = array.array('f', [0]*mesh0.nx*mesh0.ny) #"flat" array to take 2D intensity data
# arI0 = array.array('f', [0]*mesh0.nx*mesh0.ny*mesh.ne) #"flat" array to take 2D intensity data
INTENSITY_TYPE_SINGLE_ELECTRON=0
INTENSITY_TYPE_MULTI_ELECTRON=1
hArray=numpy.linspace(wfr.mesh.xStart,wfr.mesh.xFin, wfr.mesh.nx)
vArray=numpy.linspace(wfr.mesh.yStart,wfr.mesh.yFin, wfr.mesh.ny)
eArray=numpy.linspace(wfr.mesh.eStart,wfr.mesh.eFin, wfr.mesh.ne)
intensArray = numpy.zeros((eArray.size,hArray.size,vArray.size,))
for ie in range(eArray.size):
arI0 = array.array('f', [0]*mesh0.nx*mesh0.ny) #"flat" array to take 2D intensity data
# 6 is for total polarizarion; 0=H, 1=V
srwlib.srwl.CalcIntFromElecField(arI0, wfr, 6, INTENSITY_TYPE_MULTI_ELECTRON, 3, eArray[ie], 0, 0)
Shape = (mesh0.ny,mesh0.nx)
data = numpy.ndarray(buffer=arI0, shape=Shape,dtype=arI0.typecode)
for ix in range(hArray.size):
for iy in range(vArray.size):
intensArray[ie,ix,iy,] = data[iy,ix]
print(' done\n')
print('Done Performing Spectral Flux 3d calculation in sec '+str(time.time()-t0))
if fileName is not None:
print(' saving SE Stokes to h5 file %s...'%fileName)
for ie in range(eArray.size):
scanCounter += 1
fout.write("\n#S %d Undulator 3d flux density (irradiance) calculation using SRW at E=%6.3f eV (whole slit )\n"%(scanCounter,eArray[ie]))
for i,j in bl.items(): # write bl values
fout.write ("#UD %s = %s\n" % (i,j) )
fout.write("#UD hSlitPoints = %f\n"%(hArray.size))
fout.write("#UD vSlitPoints = %f\n"%(vArray.size))
fout.write("#N 3\n")
fout.write("#L H[mm] V[mm] Flux[phot/s/0.1%bw/mm^2]\n")
for i in range(len(hArray)):
for j in range(len(vArray)):
fout.write("%f %f %f\n"%(hArray[i],vArray[j],intensArray[ie,i,j]) )
fout.close()
if fileAppend:
print("Data appended to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
# grid in mm
return (eArray, 1e3*hArray, 1e3*vArray, intensArray)
def calc3d_srw_step_by_step(bl,photonEnergyMin=3000.0,photonEnergyMax=55000.0,photonEnergyPoints=500,
photonEnergyIntelligentGrid=False,
zero_emittance=False,hSlitPoints=51,vSlitPoints=51,
fileName=None,fileAppend=False,):
r"""
run SRW for calculating intensity vs H,V,energy
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
print("Inside calc3d_srw_step_by_step")
if fileName is not None:
if fileAppend:
fout = open(fileName,"a")
else:
scanCounter = 0
fout = open(fileName,"w")
fout.write("#F "+fileName+"\n")
if photonEnergyIntelligentGrid and photonEnergyPoints > 1:
e, f = calc1d_srw(bl,photonEnergyMin=photonEnergyMin,photonEnergyMax=photonEnergyMax,photonEnergyPoints=photonEnergyPoints,
zero_emittance=zero_emittance,srw_max_harmonic_number=None,fileName=None,fileAppend=False)
# cs = numpy.cumsum(f)
from scipy.integrate import cumtrapz
cs = cumtrapz(f,e,initial=0)
cs /= cs[-1]
# plot(cs,e)
# plot(e, numpy.gradient(f,e))
abs = numpy.linspace(0,1.0,photonEnergyPoints)
e1 = numpy.interp(abs,cs,e)
e1[0] = photonEnergyMin
e1[-1] = photonEnergyMax
# print(">>>>>>>e ",e)
# print(">>>>>>>e1: ",e1)
eArray = e1
else:
eArray = numpy.linspace(photonEnergyMin, photonEnergyMax, photonEnergyPoints, )
if zero_emittance:
eBeam = _srw_electron_beam(E=bl['ElectronEnergy'],Iavg=bl['ElectronCurrent'],) # no emmitance now
else:
eBeam = _srw_electron_beam(E=bl['ElectronEnergy'], sigE = bl['ElectronEnergySpread'], Iavg=bl['ElectronCurrent'],
sigX=bl['ElectronBeamSizeH'], sigY=bl['ElectronBeamSizeV'],
sigXp=bl['ElectronBeamDivergenceH'], sigYp=bl['ElectronBeamDivergenceV'])
eBeam.partStatMom1.z = - bl['PeriodID'] * (bl['NPeriods'] + 4) / 2 # initial longitudinal positions
cte = codata.e/(2*numpy.pi*codata.electron_mass*codata.c)
B0 = bl['Kv']/bl['PeriodID']/cte
try:
B0x = bl['Kh'] / bl['PeriodID'] / cte
except:
B0x = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
print('Running SRW (SRWLIB Python)')
if B0x == 0: #*********** Conventional Undulator
und0 = srwlib.SRWLMagFldU([srwlib.SRWLMagFldH(1, 'v', B0)], bl['PeriodID'], bl['NPeriods'])
und = srwlib.SRWLMagFldC([und0], array.array('d', [0]), array.array('d', [0]), array.array('d', [0]))
else: #***********Undulator (elliptical)
magnetic_fields = []
magnetic_fields.append(srwlib.SRWLMagFldH(1, 'v',
_B=B0,
_ph=0.0,
_s=1, # 1=symmetrical, -1=antisymmetrical
_a=1.0))
magnetic_fields.append(srwlib.SRWLMagFldH(1, 'h',
_B=B0x,
_ph=Kphase,
_s=1,
_a=1.0))
und0 = srwlib.SRWLMagFldU(_arHarm=magnetic_fields, _per=bl['PeriodID'], _nPer=bl['NPeriods'])
und = srwlib.SRWLMagFldC([und0], array.array('d', [0]), array.array('d', [0]), array.array('d', [0]))
print('Running SRW (SRWLIB Python)')
#
# #***********UR Stokes Parameters (mesh) for Spectral Flux
# stkF = srwlib.SRWLStokes() #for spectral flux vs photon energy
# stkF.allocate(photonEnergyPoints, hSlitPoints, vSlitPoints) #numbers of points vs photon energy, horizontal and vertical positions
# stkF.mesh.zStart = bl['distance'] #longitudinal position [m] at which UR has to be calculated
# stkF.mesh.eStart = photonEnergyMin #initial photon energy [eV]
# stkF.mesh.eFin = photonEnergyMax #final photon energy [eV]
# stkF.mesh.xStart = -bl['gapH']/2 #initial horizontal position [m]
# stkF.mesh.xFin = bl['gapH']/2 #final horizontal position [m]
# stkF.mesh.yStart = -bl['gapV']/2 #initial vertical position [m]
# stkF.mesh.yFin = bl['gapV']/2 #final vertical position [m]
#**********************Calculation (SRWLIB function calls)
print('Performing Spectral Flux 3d calculation ... ') # , end='')
t0 = time.time()
hArray = numpy.linspace(-bl['gapH'] / 2, bl['gapH'] / 2, hSlitPoints, )
vArray = numpy.linspace(-bl['gapV'] / 2, bl['gapV'] / 2, vSlitPoints, )
intensArray = numpy.zeros((eArray.size, hArray.size, vArray.size,))
timeArray = numpy.zeros_like(eArray)
#
# convolution
#
# arPrecS = [0]*7 #for electric field and single-electron intensity
# arPrecS[0] = 1 #SR calculation method: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"
# arPrecS[1] = 0.01 #relative precision
# arPrecS[2] = 0 #longitudinal position to start integration (effective if < zEndInteg)
# arPrecS[3] = 0 #longitudinal position to finish integration (effective if > zStartInteg)
# arPrecS[4] = 20000 #Number of points for intermediate trajectory calculation
# arPrecS[5] = 1 #Use "terminating terms" (i.e. asymptotic expansions at zStartInteg and zEndInteg) or not (1 or 0 respectively)
# arPrecS[6] = -1 #0.1 #sampling factor for adjusting nx, ny (effective if > 0)
paramME = [1, 0.01, 0, 0, 50000, 1, 0]
t00 = 0
for ie in range(eArray.size):
print("Calculating photon energy: %f (point %d of %d) time:%g"%(eArray[ie],ie+1,eArray.size+1,time.time()-t00))
t00 = time.time()
try:
mesh = srwlib.SRWLRadMesh(eArray[ie], eArray[ie], 1,
-bl['gapH'] / 2, bl['gapH'] / 2, hSlitPoints,
-bl['gapV'] / 2, bl['gapV'] / 2, vSlitPoints, bl['distance'])
wfr = srwlib.SRWLWfr()
wfr.allocate(1, mesh.nx, mesh.ny)
# eBeam = _srw_drift_electron_beam(eBeam, und)
wfr.mesh = mesh
wfr.partBeam = eBeam
srwlib.srwl.CalcElecFieldSR(wfr, 0, und, paramME)
#
# Extract intensity
#
print('Extracting stokes and filling output array... ')
mesh0 = wfr.mesh
# arI0 = array.array('f', [0]*mesh0.nx*mesh0.ny) #"flat" array to take 2D intensity data
# arI0 = array.array('f', [0]*mesh0.nx*mesh0.ny*mesh.ne) #"flat" array to take 2D intensity data
INTENSITY_TYPE_SINGLE_ELECTRON=0
INTENSITY_TYPE_MULTI_ELECTRON=1
arI0 = array.array('f', [0]*mesh0.nx*mesh0.ny) #"flat" array to take 2D intensity data
# 6 is for total polarizarion; 0=H, 1=V
if zero_emittance:
srwlib.srwl.CalcIntFromElecField(arI0, wfr, 6, INTENSITY_TYPE_SINGLE_ELECTRON, 3, eArray[ie], 0, 0)
else:
srwlib.srwl.CalcIntFromElecField(arI0, wfr, 6, INTENSITY_TYPE_MULTI_ELECTRON, 3, eArray[ie], 0, 0)
Shape = (mesh0.ny,mesh0.nx)
data = numpy.ndarray(buffer=arI0, shape=Shape,dtype=arI0.typecode)
for ix in range(hArray.size):
for iy in range(vArray.size):
intensArray[ie,ix,iy,] = data[iy,ix]
except:
print("Error running SRW")
timeArray[ie] = time.time() - t00
print(' done\n')
print('Done Performing Spectral Flux 3d calculation in sec '+str(time.time()-t0))
if fileName is not None:
print(' saving SE Stokes to h5 file %s...'%fileName)
for ie in range(eArray.size):
scanCounter += 1
fout.write("\n#S %d Undulator 3d flux density (irradiance) calculation using SRW at E=%6.3f eV (whole slit )\n"%(scanCounter,eArray[ie]))
for i,j in bl.items(): # write bl values
fout.write ("#UD %s = %s\n" % (i,j) )
fout.write("#UD hSlitPoints = %f\n"%(hArray.size))
fout.write("#UD vSlitPoints = %f\n"%(vArray.size))
fout.write("#N 3\n")
fout.write("#L H[mm] V[mm] Flux[phot/s/0.1%bw/mm^2]\n")
for i in range(len(hArray)):
for j in range(len(vArray)):
fout.write("%f %f %f\n"%(hArray[i],vArray[j],intensArray[ie,i,j]) )
fout.close()
if fileAppend:
print("Data appended to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
# grid in mm
# tmp = intensArray.sum(axis=2).sum(axis=1)
# f = open("tmp.dat",'w')
# for i in range(eArray.size):
# f.write("%f %f %f\n"%(eArray[i],timeArray[i],tmp[i]))
# f.close()
# print("File written to disk: tmp.dat")
return (eArray, 1e3*hArray, 1e3*vArray, intensArray)
def calc3d_urgent(bl,photonEnergyMin=3000.0,photonEnergyMax=55000.0,photonEnergyPoints=500,
zero_emittance=False,hSlitPoints=50,vSlitPoints=50,
fileName=None,fileAppend=False,copyUrgentFiles=False):
r"""
run Urgent for calculating intensity vs H,V,energy
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
global home_bin
print("Inside calc3d_urgent")
if fileName is not None:
if fileAppend:
fout = open(fileName,"a")
else:
scanCounter = 0
fout = open(fileName,"w")
fout.write("#F "+fileName+"\n")
if photonEnergyPoints == 1:
eStep = 0.0
else:
eStep = (photonEnergyMax-photonEnergyMin)/(photonEnergyPoints-1)
eArray = numpy.zeros( photonEnergyPoints )
intensArray = numpy.zeros( photonEnergyPoints )
hArray = numpy.zeros( (hSlitPoints*2-1) )
vArray = numpy.zeros( (vSlitPoints*2-1) )
int_mesh2integrated = numpy.zeros( (hSlitPoints*2-1,vSlitPoints*2-1) )
int_mesh3 = numpy.zeros( (photonEnergyPoints,hSlitPoints*2-1,vSlitPoints*2-1) )
for iEner in range(photonEnergyPoints):
ener = photonEnergyMin + iEner*eStep
eArray[iEner] = ener
for file in ["urgent.inp","urgent.out"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
try:
Kh = bl['Kh']
except:
Kh = 0.0
try:
Kphase = bl['Kphase']
except:
Kphase = 0.0
with open("urgent.inp","wt") as f:
f.write("%d\n"%(1)) # ITYPE
f.write("%f\n"%(bl['PeriodID'])) # PERIOD
f.write("%f\n"%(Kh)) # KX
f.write("%f\n"%(bl['Kv'])) # KY
f.write("%f\n"%(Kphase)) # PHASE
f.write("%d\n"%(bl['NPeriods'])) # N
f.write("%f\n"%(ener)) #EMIN
f.write("100000.0\n") #EMAX
f.write("1\n") #NENERGY
f.write("%f\n"%(bl['ElectronEnergy'])) #ENERGY
f.write("%f\n"%(bl['ElectronCurrent'])) #CUR
f.write("%f\n"%(bl['ElectronBeamSizeH']*1e3)) #SIGX
f.write("%f\n"%(bl['ElectronBeamSizeV']*1e3)) #SIGY
f.write("%f\n"%(bl['ElectronBeamDivergenceH']*1e3)) #SIGX1
f.write("%f\n"%(bl['ElectronBeamDivergenceV']*1e3)) #SIGY1
f.write("%f\n"%(bl['distance'])) #D
f.write("%f\n"%(0.00000)) #XPC
f.write("%f\n"%(0.00000)) #YPC
f.write("%f\n"%(bl['gapH']*1e3)) #XPS
f.write("%f\n"%(bl['gapV']*1e3)) #YPS
f.write("%d\n"%(hSlitPoints-1)) #NXP
f.write("%d\n"%(vSlitPoints-1)) #NYP
f.write("%d\n"%(1)) #MODE
if zero_emittance: #ICALC
f.write("%d\n"%(3))
else:
f.write("%d\n"%(1))
f.write("%d\n"%(-1)) #IHARM TODO: check max harmonic number
f.write("%d\n"%(0)) #NPHI
f.write("%d\n"%(0)) #NSIG
f.write("%d\n"%(0)) #NALPHA
f.write("%f\n"%(0.00000)) #DALPHA
f.write("%d\n"%(0)) #NOMEGA
f.write("%f\n"%(0.00000)) #DOMEGA
if platform.system() == "Windows":
command = os.path.join(home_bin, 'urgent.exe < urgent.inp')
else:
command = "'" + os.path.join(home_bin, "urgent' < urgent.inp")
print("\n\n--------------------------------------------------------\n")
print("Running command '%s' in directory: %s \n"%(command,os.getcwd()))
os.system(command)
print("Done.")
if copyUrgentFiles:
shutil.copy2("urgent.inp","urgent_energy_index%d.inp"%iEner)
shutil.copy2("urgent.out","urgent_energy_index%d.out"%iEner)
# write spec file
txt = open("urgent.out").readlines()
if fileName is not None:
scanCounter += 1
fout.write("\n#S %d Undulator 3d flux density (irradiance) calculation using Urgent at E=%0.3f keV (a slit quadrant)\n"%(scanCounter,ener*1e-3))
for i,j in bl.items(): # write bl values
fout.write ("#UD %s = %s\n" % (i,j) )
fout.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
fout.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
fout.write("#N 7\n")
fout.write("#L H[mm] V[mm] Flux[Phot/s/mm^2/0.1%bw] l1 l2 l3 l4\n")
if zero_emittance:
mesh = numpy.zeros((8,(hSlitPoints)*(vSlitPoints)))
else:
mesh = numpy.zeros((7,(hSlitPoints)*(vSlitPoints)))
hh = numpy.zeros((hSlitPoints))
vv = numpy.zeros((vSlitPoints))
int_mesh = numpy.zeros( ((hSlitPoints),(vSlitPoints)) )
imesh = -1
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
if fileName is not None:
fout.write(tmp)
tmp = tmp.replace('D','e')
tmpf = numpy.array( [float(j) for j in tmp.split()] )
imesh = imesh + 1
mesh[:,imesh] = tmpf
else:
if fileName is not None:
fout.write("#UD "+tmp)
imesh = -1
for i in range(hSlitPoints):
for j in range(vSlitPoints):
imesh = imesh + 1
hh[i] = mesh[0,imesh]
vv[j] = mesh[1,imesh]
int_mesh[i,j] = mesh[2,imesh]
hArray = numpy.concatenate((-hh[::-1],hh[1:]))
vArray = numpy.concatenate((-vv[::-1],vv[1:]))
#hArray = hhh*0.0
#vArray = vvv*0.0
totIntens = 0.0
tmp = numpy.concatenate( (int_mesh[::-1,:],int_mesh[1:,:]), axis=0)
int_mesh2 = numpy.concatenate( (tmp[:,::-1],tmp[:,1:]),axis=1)
if fileName is not None:
scanCounter += 1
fout.write("\n#S %d Undulator 3d flux density (irradiance) calculation using Urgent at E=%6.3f eV (whole slit )\n"%(scanCounter,ener))
for i,j in bl.items(): # write bl values
fout.write ("#UD %s = %s\n" % (i,j) )
fout.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
fout.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
fout.write("#N 3\n")
fout.write("#L H[mm] V[mm] Flux[phot/s/0.1%bw/mm^2]\n")
for i in range(len(hArray)):
for j in range(len(vArray)):
if fileName is not None: fout.write("%f %f %f\n"%(hArray[i],vArray[j],int_mesh2[i,j]) )
int_mesh3[iEner,i,j] = int_mesh2[i,j]
int_mesh2integrated[i,j] += int_mesh2[i,j]
totIntens += int_mesh2[i,j]
totIntens = totIntens * (hh[1]-hh[0]) * (vv[1]-vv[0])
intensArray[iEner] = totIntens
# now dump the integrated power
# convert from phot/s/0,1%bw/mm2 to W/mm^2
int_mesh2integrated = int_mesh2integrated *codata.e*1e3 * eStep
if fileName is not None:
scanCounter += 1
fout.write("\n#S %d Undulator 3d flux density vs H,E (integrated in energy) calculation using Urgent\n"%(scanCounter))
for i,j in bl.items(): # write bl values
fout.write ("#UD %s = %s\n" % (i,j) )
fout.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
fout.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
fout.write("#UD IntegratedPower[W] = %f\n"%( int_mesh2integrated.sum()*(hArray[1]-hArray[0])*(vArray[1]-vArray[0])))
fout.write("#N 3\n")
fout.write("#L H[mm] V[mm] PowerDensity[W/mm^2]\n")
for i in range(len(hArray)):
for j in range(len(vArray)):
fout.write("%f %f %f\n"%(hArray[i],vArray[j],int_mesh2integrated[i,j]) )
#print(">>>>>>>>>>>>>>>power1",int_mesh2integrated.sum()*(hArray[1]-hArray[0])*(vArray[1]-vArray[0]))
#print(">>>>>>>>>>>>>>>power2",intensArray.sum()*codata.e*1e3*(eArray[1]-eArray[0]))
#print(">>>>>>>>>>>>>>>power3",int_mesh3.sum()*codata.e*1e3*(eArray[1]-eArray[0])*(hArray[1]-hArray[0])*(vArray[1]-vArray[0]))
# now dump the spectrum as the sum
scanCounter += 1
fout.write("\n#S %d Undulator 3d flux density vs energy (integrated in H,V) calculation using Urgent\n"%(scanCounter))
for i,j in bl.items(): # write bl values
fout.write ("#UD %s = %s\n" % (i,j) )
fout.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
fout.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
if photonEnergyPoints > 1:
fout.write("#UD IntegratedPower[W] = %f\n"%(intensArray.sum()*codata.e*1e3*(eArray[1]-eArray[0])))
fout.write("#N 3\n")
fout.write("#L photonEnergy[eV] Flux[phot/s/0.1%bw] PowerDensity[W/eV]\n")
for i in range(photonEnergyPoints):
fout.write("%f %f %f\n"%(eArray[i],intensArray[i],intensArray[i]*codata.e*1e3) )
fout.close()
if fileAppend:
print("Data appended to file: %s"%(os.path.join(os.getcwd(),fileName)))
else:
print("File written to disk: %s"%(os.path.join(os.getcwd(),fileName)))
print("\n--------------------------------------------------------\n\n")
# append direct calculation for comparison
# tmp = calc1d_urgent(bl,photonEnergyMin=photonEnergyMin,
# photonEnergyMax=photonEnergyMax,
# photonEnergyPoints=photonEnergyPoints,
# fileName=fileName,fileAppend=True)
# return abscissas in mm
return (eArray, hArray, vArray, int_mesh3)
def calc3d_us(bl,photonEnergyMin=3000.0,photonEnergyMax=55000.0,photonEnergyPoints=500,
zero_emittance=False,hSlitPoints=50,vSlitPoints=50,
fileName=None,fileAppend=True,copyUsFiles=False):
r"""
run Us for calculating intensity vs H,V,energy
input: a dictionary with beamline
output: file name with results
"""
global scanCounter
global home_bin
print("Inside calc3d_us")
if fileName is not None:
if fileAppend:
fout = open(fileName,"a")
else:
scanCounter = 0
fout = open(fileName,"w")
fout.write("#F "+fileName+"\n")
if photonEnergyPoints == 1:
eStep = 0.0
else:
eStep = (photonEnergyMax-photonEnergyMin)/(photonEnergyPoints-1)
eArray = numpy.zeros( photonEnergyPoints )
intensArray = numpy.zeros( photonEnergyPoints )
hArray = numpy.zeros( (hSlitPoints*2-1) )
vArray = numpy.zeros( (vSlitPoints*2-1) )
int_mesh2integrated = numpy.zeros( (hSlitPoints*2-1,vSlitPoints*2-1) )
int_mesh3 = numpy.zeros( (photonEnergyPoints,hSlitPoints*2-1,vSlitPoints*2-1) )
for iEner in range(photonEnergyPoints):
ener = photonEnergyMin + iEner*eStep
eArray[iEner] = ener
for file in ["us.inp","us.out"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
with open("us.inp","wt") as f:
#f.write("%d\n"%(1)) # ITYPE
#f.write("%f\n"%(bl['PeriodID'])) # PERIOD
f.write("US run\n")
f.write(" %f %f %f Ring-Energy Current\n"%
(bl['ElectronEnergy'],bl['ElectronCurrent']*1e3,bl['ElectronEnergySpread']))
f.write(" %f %f %f %f Sx Sy Sxp Syp\n"%
(bl['ElectronBeamSizeH']*1e3,bl['ElectronBeamSizeV']*1e3,
bl['ElectronBeamDivergenceH']*1e3,bl['ElectronBeamDivergenceV']*1e3) )
f.write(" %f %d 0.000 %f Period N Kx Ky\n"%
(bl['PeriodID']*1e2,bl['NPeriods'],bl['Kv']) )
f.write(" %f 55000.0 1 Emin Emax Ne\n"%(ener))
f.write(" %f 0.000 0.000 %f %f %d %d D Xpc Ypc Xps Yps Nxp Nyp\n"%
(bl['distance'],bl['gapH']*1e3,bl['gapV']*1e3,hSlitPoints-1,vSlitPoints-1) )
if zero_emittance:
f.write(" 1 3 0 Mode Method Iharm\n")
else:
f.write(" 1 1 0 Mode Method Iharm\n")
f.write(" 0 0 0.0 64 8.0 0 Nphi Nalpha Dalpha2 Nomega Domega Nsigma\n")
f.write("foreground\n")
if platform.system() == "Windows":
command = os.path.join(home_bin, 'us.exe < us.inp')
else:
command = "'" + os.path.join(home_bin,'us') + "'"
print("\n\n--------------------------------------------------------\n")
print("Running command '%s' in directory: %s \n"%(command,os.getcwd()))
os.system(command)
print("Done.")
if copyUsFiles:
shutil.copy2("us.inp","us_energy_index%d.inp"%iEner)
shutil.copy2("us.out","us_energy_index%d.out"%iEner)
# shutil.copy2("us.log","us%d.log"%iEner)
txt = open("us.out").readlines()
got_error = False
for line in txt:
if "unsuccessful" in line:
got_error = True
totIntens = 0.0
mesh = numpy.zeros((7,(hSlitPoints)*(vSlitPoints)))
hh = numpy.zeros((hSlitPoints))
vv = numpy.zeros((vSlitPoints))
int_mesh = numpy.zeros( ((hSlitPoints),(vSlitPoints)) )
imesh = -1
if not got_error:
# write spec file
if fileName is not None:
scanCounter += 1
fout.write("\n#S %d Undulator 3d flux density (irradiance) calculation using Us at E=%6.3f eV (a slit quadrant)\n"%(scanCounter,ener))
for i,j in bl.items(): # write bl values
fout.write ("#UD %s = %s\n" % (i,j) )
fout.write("#UD hSlitPoints = %f\n"%(hSlitPoints))
fout.write("#UD vSlitPoints = %f\n"%(vSlitPoints))
fout.write("#N 7\n")
fout.write("#L H[mm] V[mm] Flux[phot/s/0.1%bw/mm^2] p1 p2 p3 p4\n")
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
if fileName is not None:
fout.write(tmp)
#tmp = tmp.replace('D','e')
tmpf = numpy.array( [float(j) for j in tmp.split()] )
imesh = imesh + 1
mesh[:,imesh] = tmpf
else:
if fileName is not None:
fout.write("#UD "+tmp)
imesh = -1
for i in range(hSlitPoints):
for j in range(vSlitPoints):
imesh = imesh + 1
hh[i] = mesh[0,imesh]
vv[j] = mesh[1,imesh]
int_mesh[i,j] = mesh[2,imesh]
hArray = numpy.concatenate((-hh[::-1],hh[1:]))
vArray = | numpy.concatenate((-vv[::-1],vv[1:])) | numpy.concatenate |
"""The Gaussian measure."""
import numpy as np
from ...core.optimization.context_manager import ContextManager
from ..typing import BoundsType
from .integration_measure import IntegrationMeasure
class IsotropicGaussianMeasure(IntegrationMeasure):
r"""The isotropic Gaussian measure.
The isotropic Gaussian measure has density
.. math::
p(x)=(2\pi\sigma^2)^{-\frac{D}{2}} e^{-\frac{1}{2}\frac{\|x-\mu\|^2}{\sigma^2}}
where :math:`\mu` is the mean vector and :math:`\sigma^2` is the scalar variance
parametrizing the measure.
:param mean: The mean of the Gaussian measure, shape (num_dimensions, ).
:param variance: The scalar variance of the Gaussian measure.
"""
def __init__(self, mean: np.ndarray, variance: float):
super().__init__("IsotropicGaussianMeasure")
# check mean
if not isinstance(mean, np.ndarray):
raise TypeError("Mean must be of type numpy.ndarray, {} given.".format(type(mean)))
if mean.ndim != 1:
raise ValueError("Dimension of mean must be 1, dimension {} given.".format(mean.ndim))
# check covariance
if not isinstance(variance, float):
raise TypeError("Variance must be of type float, {} given.".format(type(variance)))
if not variance > 0:
raise ValueError("Variance must be positive, current value is {}.".format(variance))
self.mean = mean
self.variance = variance
self.num_dimensions = mean.shape[0]
@property
def full_covariance_matrix(self):
"""The full covariance matrix of the Gaussian measure."""
return self.variance * np.eye(self.num_dimensions)
@property
def can_sample(self) -> bool:
"""Indicates whether the measure has sampling available.
:return: ``True`` if sampling is available. ``False`` otherwise.
"""
return True
def compute_density(self, x: np.ndarray) -> np.ndarray:
"""Evaluates the density at x.
:param x: Points at which density is evaluated, shape (n_points, input_dim).
:return: The density at x, shape (n_points, ).
"""
factor = (2 * np.pi * self.variance) ** (self.num_dimensions / 2)
scaled_diff = (x - self.mean) / (np.sqrt(2 * self.variance))
return np.exp(-np.sum(scaled_diff**2, axis=1)) / factor
def compute_density_gradient(self, x: np.ndarray) -> np.ndarray:
"""Evaluates the gradient of the density at x.
:param x: Points at which the gradient is evaluated, shape (n_points, input_dim).
:return: The gradient of the density at x, shape (n_points, input_dim).
"""
values = self.compute_density(x)
return ((-values / self.variance) * (x - self.mean).T).T
def get_box(self) -> BoundsType:
"""A meaningful box containing the measure.
Outside this box, the measure should be zero or virtually zero.
:return: The meaningful box.
"""
# Note: the factor 10 is somewhat arbitrary but well motivated. If this method is used to get a box for
# data-collection, the box will be 2x 10 standard deviations wide in all directions, centered around the mean.
# Outside the box the density is virtually zero.
factor = 10
lower = self.mean - factor * np.sqrt(self.variance)
upper = self.mean + factor * np.sqrt(self.variance)
return list(zip(lower, upper))
def get_samples(self, num_samples: int, context_manager: ContextManager = None) -> np.ndarray:
"""Samples from the measure.
:param num_samples: The number of samples to be taken.
:param context_manager: The context manager that contains variables to fix and the values to fix them to.
If a context is given, this method samples from the conditional distribution.
:return: The samples, shape (num_samples, input_dim).
"""
samples = self.mean + np.sqrt(self.variance) * | np.random.randn(num_samples, self.num_dimensions) | numpy.random.randn |
"""Numba implementation of some PAC functions."""
import numpy as np
from scipy.special import erfinv
# if Numba not installed, this section should return a Numba-free jit wrapper
try:
import numba
def jit(signature=None, nopython=True, nogil=True, fastmath=True, # noqa
cache=True, **kwargs):
return numba.jit(signature_or_function=signature, cache=cache,
nogil=nogil, fastmath=fastmath, nopython=nopython,
**kwargs)
except:
def jit(*args, **kwargs): # noqa
def _jit(func):
return func
return _jit
@jit("f8[:,:,:](f8[:,:,:], f8[:,:,:])")
def mean_vector_length_nb(pha, amp):
"""Numba-based Mean Vector Length (MVL).
Parameters
----------
pha, amp : array_like
Respectively the arrays of phases of shape (n_pha, n_epochs, n_times)
and the array of amplitudes of shape (n_amp, n_epochs, n_times). Both
arrays should be of type float64 (np.float64)
Returns
-------
pac : array_like
Array of phase amplitude coupling of shape (n_amp, n_pha, n_epochs)
References
----------
Canolty et al. 2006 :cite:`canolty2006high`
"""
n_pha, n_epochs, n_times = pha.shape
n_amp, _, _ = amp.shape
pac = np.zeros((n_amp, n_pha, n_epochs), dtype=np.float64)
# single conversion
exp_pha = np.exp(1j * pha)
amp_comp = amp.astype(np.complex128)
for a in range(n_amp):
for p in range(n_pha):
for tr in range(n_epochs):
_pha = np.ascontiguousarray(exp_pha[p, tr, :])
_amp = np.ascontiguousarray(amp_comp[a, tr, :])
pac[a, p, tr] = abs(np.dot(_amp, _pha))
pac /= n_times
return pac
@jit("f8[:](f8[:], f8[:], u8, b1)")
def _kl_hr_nb(pha, amp, n_bins=18, mean_bins=True):
"""Binarize the amplitude according to phase values.
This function is shared by the Kullback-Leibler Distance and the
Height Ratio.
"""
vecbin = np.linspace(-np.pi, np.pi, n_bins + 1)
phad = np.digitize(pha, vecbin) - 1
u_phad = np.unique(phad)
abin = np.zeros((len(u_phad)), dtype=np.float64)
for n_i, i in enumerate(u_phad):
# find where phase take vecbin values
idx = np.ascontiguousarray((phad == i).astype(np.float64))
m = idx.sum() if mean_bins else 1.
# take the sum of amplitude inside the bin
abin[n_i] = np.dot(np.ascontiguousarray(amp), idx) / m
return abin
@jit("f8[:,:,:](f8[:,:,:], f8[:,:,:], u8)")
def modulation_index_nb(pha, amp, n_bins=18):
"""Numba-based Modulation index (MI).
The modulation index is obtained using the Kullback Leibler Distance which
measures how much the distribution of binned amplitude differs from a
uniform distribution.
Parameters
----------
pha, amp : array_like
Respectively the arrays of phases of shape (n_pha, n_epochs, n_times)
and the array of amplitudes of shape (n_amp, n_epochs, n_times). Both
arrays should be of type float64 (np.float64)
n_bins : int | 18
Number of bins to binarize the amplitude according to phase intervals
(should be np.int64)
Returns
-------
pac : array_like
Array of phase amplitude coupling of shape (n_amp, n_pha, ...)
References
----------
Tort et al. 2010 :cite:`tort2010measuring`
"""
n_pha, n_epochs, n_times = pha.shape
n_amp, _, _ = amp.shape
pac = np.zeros((n_amp, n_pha, n_epochs), dtype=np.float64)
bin_log = np.log(n_bins)
for a in range(n_amp):
for p in range(n_pha):
for tr in range(n_epochs):
# select phase and amplitude
_pha = np.ascontiguousarray(pha[p, tr, :])
_amp = np.ascontiguousarray(amp[a, tr, :])
# get the probability of each amp bin
p_j = _kl_hr_nb(_pha, _amp, n_bins=n_bins, mean_bins=True)
p_j /= p_j.sum()
# log it (only if strictly positive)
if | np.all(p_j > 0.) | numpy.all |
# coding: utf-8
# In[ ]:
#functions for running storm data
#functions for running storm data
def interpolate_storm_path(dsx):
import numpy as np
import geopy.distance
from scipy import interpolate
import xarray as xr
import datetime as dt
#after calculating the distance from the storm it became clear that the storm data is every 6 hours, no matter
#how much it may have moved. So if the storm moved 300 km in 6 hr, when calculating the distance to the storm
#there were points on the storm track that showed large distances because of the separation to the 6hrly storm points
#this subroutine interpolates the storm path onto a higher spatial resolution
#the new storm dataset is carefully put into an identical format with i2 and j2 as dims to match the old format
date_1858 = dt.datetime(1858,11,17,0,0,0) # start date is 11/17/1958
ynew = []
tnew = []
xnew = []
wnew = []
pnew = []
bnew = []
dsx['lon'] = (dsx.lon-180) % 360 - 180 #put -180 to 180
for istep in range(1,dsx.lon.shape[1]):
dif_lat = dsx.lat[0,istep]-dsx.lat[0,istep-1]
dif_lon = dsx.lon[0,istep]-dsx.lon[0,istep-1]
x,y,t = dsx.lon[0,istep-1:istep+1].values,dsx.lat[0,istep-1:istep+1].values,dsx.time[0,istep-1:istep+1].values
w,p,b = dsx.wind[0,istep-1:istep+1].values,dsx.pres[0,istep-1:istep+1].values,dsx.basin[0,istep-1:istep+1].values
x1,y1,t1 = dsx.lon[0,istep-1:istep].values,dsx.lat[0,istep-1:istep].values,dsx.time[0,istep-1:istep].values
w1,p1,b1 = dsx.wind[0,istep-1:istep].values,dsx.pres[0,istep-1:istep].values,dsx.basin[0,istep-1:istep].values
if abs(dif_lat)>abs(dif_lon):
isign = np.sign(dif_lat)
if abs(dif_lat)>0.75:
ynew1 = np.arange(y[0], y[-1], isign.data*0.75)
f = interpolate.interp1d(y, x, assume_sorted=False)
xnew1 = f(ynew1)
f = interpolate.interp1d(y, t, assume_sorted=False)
tnew1 = f(ynew1)
f = interpolate.interp1d(y, w, assume_sorted=False)
wnew1 = f(ynew1)
f = interpolate.interp1d(y, p, assume_sorted=False)
pnew1 = f(ynew1)
f = interpolate.interp1d(y, b, assume_sorted=False)
bnew1 = f(ynew1)
else:
xnew1,ynew1,tnew1,wnew1,pnew1,bnew1 = x1,y1,t1,w1,p1,b1
xnew,ynew,tnew = np.append(xnew,xnew1),np.append(ynew,ynew1),np.append(tnew,tnew1)
wnew,pnew,bnew = np.append(wnew,wnew1),np.append(pnew,pnew1),np.append(bnew,bnew1)
else:
isign = np.sign(dif_lon)
if abs(dif_lon)>0.75:
iwrap_interp = 1
if (x[0]<-90) & (x[-1]>90):
iwrap_interp = -1
x[0]=x[0]+360
if (x[0]>90) & (x[-1]<-90):
iwrap_interp = -1
x[-1]=x[-1]+360
xnew1 = np.arange(x[0], x[-1], iwrap_interp*isign.data*0.75)
f = interpolate.interp1d(x, y, assume_sorted=False)
ynew1 = f(xnew1)
f = interpolate.interp1d(x, t, assume_sorted=False)
tnew1 = f(xnew1)
f = interpolate.interp1d(x, w, assume_sorted=False)
wnew1 = f(xnew1)
f = interpolate.interp1d(x, p, assume_sorted=False)
pnew1 = f(xnew1)
f = interpolate.interp1d(x, b, assume_sorted=False)
bnew1 = f(xnew1)
xnew1 = (xnew1 - 180) % 360 - 180 #put -180 to 180
else:
xnew1,ynew1,tnew1 = x1,y1,t1
wnew1,pnew1,bnew1 = w1,p1,b1
xnew,ynew,tnew = np.append(xnew,xnew1),np.append(ynew,ynew1),np.append(tnew,tnew1)
wnew,pnew,bnew = np.append(wnew,wnew1),np.append(pnew,pnew1),np.append(bnew,bnew1)
x1,y1,t1 = dsx.lon[0,-1].values,dsx.lat[0,-1].values,dsx.time[0,-1].values
w1,p1,b1 = dsx.wind[0,-1].values,dsx.pres[0,-1].values,dsx.basin[0,-1].values
xnew1,ynew1,tnew1 = x1,y1,t1
wnew1,pnew1,bnew1 = w1,p1,b1
xnew,ynew,tnew = np.append(xnew,xnew1),np.append(ynew,ynew1),np.append(tnew,tnew1)
wnew,pnew,bnew = np.append(wnew,wnew1),np.append(pnew,pnew1),np.append(bnew,bnew1)
#print(xnew)
#remove any repeated points
ilen=xnew.size
outputx,outputy,outputt,outputw,outputp,outputb=[],[],[],[],[],[]
for i in range(0,ilen-1):
if (xnew[i]==xnew[i+1]) and (ynew[i]==ynew[i+1]):
continue
else:
outputx,outputy,outputt = np.append(outputx,xnew[i]),np.append(outputy,ynew[i]),np.append(outputt,tnew[i])
outputw,outputp,outputb = np.append(outputw,wnew[i]),np.append(outputp,pnew[i]),np.append(outputb,bnew[i])
outputx,outputy,outputt = np.append(outputx,xnew[-1]),np.append(outputy,ynew[-1]),np.append(outputt,tnew[-1])
outputw,outputp,outputb = np.append(outputw,wnew[-1]),np.append(outputp,pnew[-1]),np.append(outputb,bnew[-1])
xnew,ynew,tnew=outputx,outputy,outputt
wnew,pnew,bnew=outputw,outputp,outputb
#put into xarray
i2,j2=xnew.shape[0],1
tem = np.expand_dims(xnew, axis=0)
xx = xr.DataArray(tem.T,dims=['i2','j2'])
tem = np.expand_dims(ynew, axis=0)
yy = xr.DataArray(tem.T,dims=['i2','j2'])
tem = np.expand_dims(tnew, axis=0)
tt = xr.DataArray(tem.T,dims=['i2','j2'])
tem = np.expand_dims(wnew, axis=0)
ww = xr.DataArray(tem.T,dims=['i2','j2'])
tem = np.expand_dims(pnew, axis=0)
pp = xr.DataArray(tem.T,dims=['i2','j2'])
tem = np.expand_dims(bnew, axis=0)
bb = xr.DataArray(tem.T,dims=['i2','j2'])
dsx_new = xr.Dataset({'lon':xx.T,'lat':yy.T,'time':tt.T,'wind':ww.T,'pres':pp.T,'basin':bb.T})
#add storm translation speed to storm information
tdim_storm = dsx_new.time.size
storm_speed = dsx_new.time.copy(deep=True)*np.nan
for i in range(0,tdim_storm-1):
coords_1 = (dsx_new.lat[0,i], dsx_new.lon[0,i])
coords_2 = (dsx_new.lat[0,i+1], dsx_new.lon[0,i+1])
arclen_temp = geopy.distance.geodesic(coords_1, coords_2).km #distance in km
storm_date1 = np.datetime64(date_1858 + dt.timedelta(days=float(dsx_new.time[0,i])))
storm_date2 = np.datetime64(date_1858 + dt.timedelta(days=float(dsx_new.time[0,i+1])))
arclen_time = storm_date2 - storm_date1
arclen_hr = arclen_time / np.timedelta64(1, 'h')
storm_speed[0,i]=arclen_temp/(arclen_hr)
storm_speed[0,-1]=storm_speed[0,-2]
dsx_new['storm_speed_kmhr']=storm_speed
return dsx_new
def get_dist_grid(lat_point,lon_point,lat_grid,lon_grid):
import geopy.distance
from math import sin, pi
import numpy as np
#this routine takes a point and finds distance to all points in a grid of lat and lon
#it is slowwwwwww
dist_grid = np.empty(lat_grid.shape)
coords_1 = (lat_point, lon_point)
for i in range(0,lat_grid.shape[0]):
for j in range(0,lat_grid.shape[1]):
coords_2 = (lat_grid[i,j], lon_grid[i,j])
arclen_temp = geopy.distance.geodesic(coords_1, coords_2).km #distance in km
dist_grid[i,j]=arclen_temp
return dist_grid
def closest_dist(ds_in,ds_storm):
import xarray as xr
import numpy as np
# m.garcia-reyes 2.4.2019, edited c.gentemann 2.4.2019
# calculate distance closest storm point
# point given as tla,tlo.... storm is in the program
# initialize distances (in km)
# ds_storm['lon'] = (ds_storm.lon + 180) % 360 - 180
# print('here')
dsx_input = ds_storm.copy(deep=True)
ds_storm_new = interpolate_storm_path(dsx_input)
tdim,xdim,ydim=ds_storm_new.lat.shape[1], ds_in.analysed_sst[0,:,0].shape[0], ds_in.analysed_sst[0,0,:].shape[0]
dx_save= | np.zeros([tdim,xdim,ydim]) | numpy.zeros |
import json
import random
import os
import numpy as np
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import copy
import math
import h5py
import models.Constants as Constants
from bisect import bisect_left
import torch.nn.functional as F
import pickle
from pandas.io.json import json_normalize
def resampling(source_length, target_length):
return [round(i * (source_length-1) / (target_length-1)) for i in range(target_length)]
def get_frames_idx(length, n_frames, random_type, equally_sampling=False):
bound = [int(i) for i in np.linspace(0, length, n_frames+1)]
idx = []
all_idx = [i for i in range(length)]
if random_type == 'all_random' and not equally_sampling:
idx = random.sample(all_idx, n_frames)
else:
for i in range(n_frames):
if not equally_sampling:
tmp = np.random.randint(bound[i], bound[i+1])
else:
tmp = (bound[i] + bound[i+1]) // 2
idx.append(tmp)
return sorted(idx)
class VideoDataset(Dataset):
def __init__(self, opt, mode, print_info=False, shuffle_feats=0, specific=-1):
super(VideoDataset, self).__init__()
self.mode = mode
self.random_type = opt.get('random_type', 'segment_random')
assert self.mode in ['train', 'validate', 'test', 'all', 'trainval']
assert self.random_type in ['segment_random', 'all_random']
# load the json file which contains information about the dataset
data = pickle.load(open(opt['info_corpus'], 'rb'))
info = data['info']
self.itow = info['itow']
self.wtoi = {v: k for k, v in self.itow.items()}
self.itoc = info.get('itoc', None)
self.itop = info.get('itop', None)
self.itoa = info.get('itoa', None)
self.length_info = info['length_info']
self.splits = info['split']
if self.mode == 'trainval':
self.splits['trainval'] = self.splits['train'] + self.splits['validate']
self.split_category = info.get('split_category', None)
self.id_to_vid = info.get('id_to_vid', None)
self.captions = data['captions']
self.pos_tags = data['pos_tags']
self.references = pickle.load(open(opt['reference'], 'rb'))
self.specific = specific
self.num_category = opt.get('num_category', 20)
self.max_len = opt["max_len"]
self.n_frames = opt['n_frames']
self.equally_sampling = opt.get('equally_sampling', False)
self.total_frames_length = opt.get('total_frames_length', 60)
self.data_i = [self.load_database(opt["feats_i"]), opt["dim_i"], opt.get("dummy_feats_i", False)]
self.data_m = [self.load_database(opt["feats_m"]), opt["dim_m"], opt.get("dummy_feats_m", False)]
#self.data_i = [[], opt["dim_i"], opt.get("dummy_feats_i", False)]
#self.data_m = [[], opt["dim_m"], opt.get("dummy_feats_m", False)]
self.data_a = [self.load_database(opt["feats_a"]), opt["dim_a"], opt.get("dummy_feats_a", False)]
self.data_s = [self.load_database(opt.get("feats_s", [])), opt.get("dim_s", 10), False]
self.data_t = [self.load_database(opt.get("feats_t", [])), opt.get('dim_t', 10), False]
self.mask_prob = opt.get('teacher_prob', 1)
self.decoder_type = opt['decoder_type']
self.random = np.random.RandomState(opt.get('seed', 0))
self.obj = self.load_database(opt.get('object_path', ''))
self.all_caps_a_round = opt['all_caps_a_round']
self.load_feats_type = opt['load_feats_type']
self.method = opt.get('method', 'mp')
self.demand = opt['demand']
self.opt = opt
if print_info: self.print_info(opt)
self.beta_low, self.beta_high = opt.get('beta', [0, 1])
if (opt.get('triplet', False) or opt.get('knowledge_distillation_with_bert', False)) and self.mode == 'train':
self.bert_embeddings = self.load_database(opt['bert_embeddings'])
else:
self.bert_embeddings = None
if opt.get('load_generated_captions', False):
self.generated_captions = pickle.load(open(opt['generated_captions'], 'rb'))
assert self.mode in ['test']
else:
self.generated_captions = None
self.infoset = self.make_infoset()
def get_references(self):
return self.references
def get_preprocessed_references(self):
return self.captions
def make_infoset(self):
infoset = []
# decide the size of infoset
if self.specific != -1:
# we only evaluate partial examples with a specific category (MSRVTT, [0, 19])
ix_set = [int(item) for item in self.split_category[self.mode][self.specific]]
else:
# we evaluate all examples
ix_set = [int(item) for item in self.splits[self.mode]]
vatex = self.opt['dataset'] == 'VATEX' and self.mode == 'test'
for ix in ix_set:
vid = 'video%d' % ix
if vatex:
category = 0
captions = [[0]]
pos_tags = [[0]]
length_target = [0]
else:
category = self.itoc[ix] if self.itoc is not None else 0
captions = self.captions[vid]
pos_tags = self.pos_tags[vid] if self.pos_tags is not None else ([None] * len(captions))
# prepare length info for each video example, only if decoder_type == 'NARFormmer'
# e.g., 'video1': [0, 0, 3, 5, 0]
if self.length_info is None:
length_target = np.zeros(self.max_len)
else:
length_target = self.length_info[vid]
#length_target = length_target[1:self.max_len+1]
length_target = length_target[:self.max_len]
if len(length_target) < self.max_len:
length_target += [0] * (self.max_len - len(length_target))
#right_sum = sum(length_target[self.max_len+1:])
#length_target[-1] += right_sum
length_target = np.array(length_target) / sum(length_target)
if self.mode == 'train' and self.all_caps_a_round:
# infoset will contain all captions
for i, (cap, pt) in enumerate(zip(captions, pos_tags)):
item = {
'vid': vid,
'labels': cap,
'pos_tags': pt,
'category': category,
'length_target': length_target,
'cap_id': i,
}
infoset.append(item)
else:
if self.generated_captions is not None:
# edit the generated captions
cap = self.generated_captions[vid][-1]['caption']
#print(cap)
labels = [Constants.BOS]
for w in cap.split(' '):
labels.append(self.wtoi[w])
labels.append(Constants.EOS)
#print(labels)
item = {
'vid': vid,
'labels': labels,
'pos_tags': pos_tags[0],
'category': category,
'length_target': length_target
}
else:
# infoset will contain partial captions, one caption per video clip
cap_ix = random.randint(0, len(self.captions[vid]) - 1) if self.mode == 'train' else 0
#print(captions[0])
item = {
'vid': vid,
'labels': captions[cap_ix],
'pos_tags': pos_tags[cap_ix],
'category': category,
'length_target': length_target,
'cap_id': cap_ix,
}
infoset.append(item)
return infoset
def shuffle(self):
random.shuffle(self.infoset)
def __getitem__(self, ix):
vid = self.infoset[ix]['vid']
labels = self.infoset[ix]['labels']
taggings = self.infoset[ix]['pos_tags']
category = self.infoset[ix]['category']
length_target = self.infoset[ix]['length_target']
cap_id = self.infoset[ix].get('cap_id', None)
if cap_id is not None and self.bert_embeddings is not None:
bert_embs = np.asarray(self.bert_embeddings[0][vid])#[cap_id]
else:
bert_embs = None
attribute = self.itoa[vid]
frames_idx = get_frames_idx(
self.total_frames_length,
self.n_frames,
self.random_type,
equally_sampling = True if self.mode != 'train' else self.equally_sampling
) if self.load_feats_type == 0 else None
load_feats_func = self.load_feats if self.load_feats_type == 0 else self.load_feats_padding
feats_i = load_feats_func(self.data_i, vid, frames_idx)
feats_m = load_feats_func(self.data_m, vid, frames_idx, padding=False)#, scale=0.1)
feats_a = load_feats_func(self.data_a, vid, frames_idx)#, padding=False)
feats_s = load_feats_func(self.data_s, vid, frames_idx)
feats_t = load_feats_func(self.data_t, vid, frames_idx)#, padding=False)
results = self.make_source_target(labels, taggings)
tokens, labels, pure_target, taggings = map(
lambda x: results[x],
["dec_source", "dec_target", "pure_target", "tagging"]
)
tokens_1 = results.get('dec_source_1', None)
labels_1 = results.get('dec_target_1', None)
data = {}
data['feats_i'] = torch.FloatTensor(feats_i)
data['feats_m'] = torch.FloatTensor(feats_m)#.mean(0).unsqueeze(0).repeat(self.n_frames, 1)
data['feats_a'] = torch.FloatTensor(feats_a)
data['feats_s'] = F.softmax(torch.FloatTensor(feats_s), dim=1)
#print(feats_t.shape)
data['feats_t'] = torch.FloatTensor(feats_t)
data['tokens'] = torch.LongTensor(tokens)
data['labels'] = torch.LongTensor(labels)
data['pure_target'] = torch.LongTensor(pure_target)
data['length_target'] = torch.FloatTensor(length_target)
data['attribute'] = torch.FloatTensor(attribute)
if tokens_1 is not None:
data['tokens_1'] = torch.LongTensor(tokens_1)
data['labels_1'] = torch.LongTensor(labels_1)
if taggings is not None:
data['taggings'] = torch.LongTensor(taggings)
if bert_embs is not None:
data['bert_embs'] = torch.FloatTensor(bert_embs)
if self.decoder_type == 'LSTM' or self.decoder_type == 'ENSEMBLE':
tmp = np.zeros(self.num_category)
tmp[category] = 1
data['category'] = torch.FloatTensor(tmp)
else:
data['category'] = torch.LongTensor([category])
if frames_idx is not None:
data['frames_idx'] = frames_idx
data['video_ids'] = vid
if len(self.obj):
data['obj'] = torch.FloatTensor(np.asarray(self.obj[0][vid]))
return data
def __len__(self):
return len(self.infoset)
def get_mode(self):
return self.id_to_vid, self.mode
def set_splits_by_json_path(self, json_path):
self.splits = json.load(open(json_path))['videos']
def get_vocab_size(self):
return len(self.get_vocab())
def get_vocab(self):
return self.itow
def print_info(self, opt):
print('vocab size is ', len(self.itow))
print('number of train videos: ', len(self.splits['train']))
print('number of val videos: ', len(self.splits['validate']))
print('number of test videos: ', len(self.splits['test']))
print('load image feats (%d) from %s' % (opt["dim_i"], opt["feats_i"]))
print('load motion feats (%d) from %s' % (opt["dim_m"], opt["feats_m"]))
print('load audio feats (%d )from %s' % (opt["dim_a"], opt["feats_a"]))
print('max sequence length in data is', self.max_len)
print('load feats type: %d' % self.load_feats_type)
def load_database(self, path):
if not path:
return []
database = []
if isinstance(path, list):
for p in path:
if '.hdf5' in p:
database.append(h5py.File(p, 'r'))
else:
if '.hdf5' in path:
database.append(h5py.File(path, 'r'))
return database
def load_feats(self, data, vid, frames_idx, padding=True):
databases, dim, dummy = data
if not len(databases) or dummy:
return np.zeros((self.n_frames, dim))
feats = []
for database in databases:
if vid not in database.keys():
return np.zeros((self.n_frames, dim))
else:
data = np.asarray(database[vid])
if len(data.shape) == 1 and padding:
data = data[np.newaxis, :].repeat(self.total_frames_length, axis=0)
feats.append(data)
if len(feats[0].shape) == 1:
feats = np.concatenate(feats, axis=0)
return feats
feats = np.concatenate(feats, axis=1)
return feats[frames_idx]
def load_feats_padding(self, data, vid, dummy=None, padding=True, scale=1):
databases, dim, _ = data
if not len(databases):
return np.zeros((self.n_frames, dim))
feats = []
for database in databases:
if vid not in database.keys():
if padding:
return np.zeros((self.n_frames, dim))
else:
return np.zeros(dim)
else:
data = np.asarray(database[vid])
if len(data.shape) == 1 and padding:
data = data[np.newaxis, :].repeat(self.total_frames_length, axis=0)
feats.append(data * scale)
if len(feats[0].shape) == 1:
feats = np.concatenate(feats, axis=0)
return feats
feats = np.concatenate(feats, axis=1)
source_length = feats.shape[0]
if source_length > self.n_frames:
frames_idx = get_frames_idx(
source_length,
self.n_frames,
self.random_type,
equally_sampling = True if self.mode != 'train' else self.equally_sampling)
else:
frames_idx = resampling(source_length, self.n_frames)
#frames_idx = [i for i in range(feats.size(0))]
#frames_idx += [-1] * (self.n_frames - feats.size(0))
#print(vid, feats.sum(), feats.shape, frames_idx)
return feats[frames_idx]
def padding(self, seq, add_eos=True):
if seq is None:
return None
res = seq.copy()
if len(res) > self.max_len:
res = res[:self.max_len]
if add_eos:
res[-1] = Constants.EOS
else:
res += [Constants.PAD] * (self.max_len - len(res))
return res
def make_source_target(self, target, tagging):
if self.decoder_type == 'NARFormer':
results = self.source_target_mlm(target[1:-1]) # exclude <bos> <eos>
else:
# ARFormer
results = {
'dec_source': self.padding(target, add_eos=True),
'dec_target': self.padding(target, add_eos=True)
}
assert len(results['dec_source']) == len(results['dec_target'])
if self.method in ['ag', 'nv']:
results.update(self.source_target_visual_word(target=target, pos_tag=tagging))
if 'pure_target' not in results.keys():
results['pure_target'] = self.padding(target.copy(), add_eos=True)
if 'tagging' not in results.keys():
results['tagging'] = self.padding(tagging, add_eos=True)
return results
def source_target_mlm(self, target):
assert target[0] != Constants.BOS
assert target[-1] != Constants.EOS
min_num_masks = 1
dec_source = torch.LongTensor(target)
dec_target_cp = torch.LongTensor(target)
dec_target = torch.LongTensor([Constants.PAD] * len(dec_source))
if self.mode == 'train':
if min_num_masks >= len(dec_source):
ind = np.array([],dtype=np.uint8)
else:
low = max(int(len(dec_source) * self.beta_low), min_num_masks)
high = max(int(len(dec_source) * self.beta_high), min_num_masks+1)
sample_size = self.random.randint(low, high)
ind = self.random.choice(len(dec_source) , size=sample_size, replace=False)
dec_source[ind] = Constants.MASK
dec_target[ind] = dec_target_cp[ind]
else:
dec_source[dec_source!=Constants.PAD] = Constants.MASK
dec_target = dec_target_cp
dec_source = self.padding(dec_source.tolist(), add_eos=False)
dec_target = self.padding(dec_target.tolist(), add_eos=False)
pure_target = self.padding(target, add_eos=False)
return {'dec_source': dec_source, 'dec_target': dec_target, 'pure_target': pure_target}
def source_target_visual_word(self, **kwargs):
target = kwargs['target']
pos_tag = kwargs['pos_tag']
sent_length = len(target[1:-1]) # exclude <bos> <eos>
if self.decoder_type == 'NARFormer':
visual_tag = Constants.BOS
target_tag = Constants.MASK
else:
visual_tag = Constants.MASK
target_tag = Constants.BOS
if self.mode != 'train':
dec_target_1 = [0]
dec_source_1 = [0]
else:
assert len(target) == len(pos_tag)
assert self.itop is not None
dec_target_cp = torch.LongTensor(target[1:-1])
dec_source_1 = self.padding([visual_tag] * sent_length if self.decoder_type == 'NARFormer' else len(target),
add_eos=False if self.decoder_type == 'NARFormer' else True)
# get the position of tokens that have the pos_tag we demand
pos_satisfied_ind = []
for i, item in enumerate(pos_tag[1:-1]):
w = self.itow[target[i+1]]
# we ignore verb ``be''
if self.itop[item] in self.demand and w not in ['is', 'are', 'was', 'were', 'be']:
pos_satisfied_ind.append(i)
pos_satisfied_ind = np.array(pos_satisfied_ind)
# decoder1 need to predict tokens with satisfied pos_tag from scratch
# meanwhile, decoder1 should learn to keep the remaining tokens (i.e., <mask>) unchanged
dec_target_1 = torch.LongTensor([target_tag] * sent_length)
dec_target_1[pos_satisfied_ind] = dec_target_cp[pos_satisfied_ind]
if self.decoder_type == 'NARFormer':
dec_target_1 = self.padding(dec_target_1.tolist(), add_eos=False)
else:
# when training with autoregressive transformer, the first token will be ignored, i.e., label = dec_target_1[1:]
dec_target_1 = self.padding([target[0]] + dec_target_1.tolist() + [Constants.BOS], add_eos=True)
#print(dec_source_1, dec_target_1)
return {'dec_source_1': dec_source_1, 'dec_target_1': dec_target_1}
class BD_Dataset(Dataset):
def __init__(self, opt, mode, print_info=False, shuffle_feats=0, specific=-1, target_ratio=-1):
super(BD_Dataset, self).__init__()
self.mode = mode
self.random_type = opt.get('random_type', 'segment_random')
self.total_frames_length = 60
assert self.mode in ['train', 'validate', 'trainval']
data = pickle.load(open(opt['info_corpus'], 'rb'))
info = data['info']
self.itoc = info.get('itoc', None)
self.splits = info['split']
self.data = pickle.load(open(opt['bd_training_data'], 'rb'))
if self.mode == 'trainval':
self.splits['trainval'] = self.splits['train'] + self.splits['validate']
self.max_len = opt["max_len"]
self.n_frames = opt['n_frames']
self.equally_sampling = opt.get('equally_sampling', False)
self.data_i = [self.load_database(opt["feats_i"]), opt["dim_i"], opt.get("dummy_feats_i", False)]
self.data_m = [self.load_database(opt["feats_m"]), opt["dim_m"], opt.get("dummy_feats_m", False)]
self.data_a = [self.load_database(opt["feats_a"]), opt["dim_a"], opt.get("dummy_feats_a", False)]
self.bd_load_feats = opt.get('bd_load_feats', False)
self.infoset = self.make_infoset()
def load_database(self, path):
if not path:
return []
database = []
if isinstance(path, list):
for p in path:
if '.hdf5' in p:
database.append(h5py.File(p, 'r'))
else:
if '.hdf5' in path:
database.append(h5py.File(path, 'r'))
return database
def load_feats_padding(self, data, vid, dummy=None, padding=True, scale=1):
databases, dim, _ = data
if not len(databases):
return np.zeros((self.n_frames, dim))
feats = []
for database in databases:
if vid not in database.keys():
if padding:
return np.zeros((self.n_frames, dim))
else:
return np.zeros(dim)
else:
data = | np.asarray(database[vid]) | numpy.asarray |
import numpy as np
import matplotlib.pyplot as plt
import plotly.plotly as py
from sys import argv
#%matplotlib inline
from tf_shuffle import shuffle
def check_shuffle(deck):
count = 0
for i in range(len(deck)-2):
diff = deck[i+1] - deck[i]
if (abs(deck[i+2] - deck[i+1]) == diff) and (abs(deck[i+1] - deck[i]) == diff):
count += 1
else:
count = count
return count
def recurse(deck):
count = 0
for i in range(len(deck)-1):
if deck[i] == deck[i+1]:
count+=1
else:
count = count
return count
D0 = np.array(range(0,0))
S0 = shuffle(D0)
DT26 = list(range(0, 26))
DT52 = list(range(0, 52))
DT104 = list(range(0, 104))
deck_list = np.array([DT26, DT52, DT104])
n = len(deck_list)
num_shuffles = 10
shuffle_deck_2 = np.zeros((num_shuffles+1, len(DT26)))
shuffle_deck_3 = np.zeros((num_shuffles+1, len(DT52)))
shuffle_deck_4 = np.zeros((num_shuffles+1, len(DT104)))
shuffle_deck_2[0] = DT26
shuffle_deck_3[0] = DT52
shuffle_deck_4[0] = DT104
print("Let's consider where the original top and bottom cards of the unshuffled deck end up after %s shuffles." %(num_shuffles))
print()
top_card_num_arr = np.zeros(n)
bottom_card_num_arr = np.zeros(n)
init_top_card_index = np.zeros(n)
init_bottom_card_index = np.zeros(n)
new_top_card_index = np.zeros(n)
new_bottom_card_index = np.zeros(n)
S2 = DT26
S3 = DT52
S4 = DT104
for i in range(1, num_shuffles):
S2 = shuffle(S2).tolist()
S3 = shuffle(S3).tolist()
S4 = shuffle(S4).tolist()
shuffle_deck_2[i] = S2
shuffle_deck_3[i] = S3
shuffle_deck_4[i] = S4
shuffled_deck_list = [S2, S3, S4]
for i in range(n):
top_card_num_arr[i] = deck_list[0][0]
bottom_card_num_arr[i] = deck_list[i][-1]
init_bottom_card_index[i] = len(deck_list[i]) - 1
new_top_card_index[i] = shuffled_deck_list[i].index(top_card_num_arr[i])
new_bottom_card_index[i] = shuffled_deck_list[i].index(bottom_card_num_arr[i])
print("The shuffled deck %s is: \n %s \n" %(i+1, shuffled_deck_list[i]) )
for i in range(len(deck_list)):
print("%s cards: \n%s" %(len(deck_list[i]), shuffled_deck_list[i]))
print()
print("%s cards, initial index %s (top card) --> index %s" %(len(deck_list[i]), init_top_card_index[i], new_top_card_index[i]))
print("Top card moved %s positions" %(new_top_card_index[i] - init_top_card_index[i]))
print("%s cards, initial index %s (bottom card) --> index %s" %(len(deck_list[i]), init_bottom_card_index[i], new_bottom_card_index[i]))
print("Bottom card moved %s positions" %(init_bottom_card_index[i] - new_bottom_card_index[i]))
print()
###
#Test Cases
print("Let's look at whether there are still groups of consecutive cards.")
print("We'll consider a consecutive group to be 3 ordered cards in a row.")
print()
print("Let's compare consecutive shuffles of 26 cards:")
print()
grps = np.zeros(num_shuffles)
for row in range(num_shuffles):
print("Shuffle %s: %s\n" %(row, shuffle_deck_2[row]))
grps[row] = check_shuffle(shuffle_deck_2[row])
print("List of number of ordered sequences at each iteration: ", grps)
plt.plot(grps)
plt.show()
print("Let's compare consecutive shuffles of 52 cards:")
print()
grps = | np.zeros(num_shuffles) | numpy.zeros |
"""main threadcount module."""
import json
import csv
from types import SimpleNamespace
from collections import OrderedDict, UserList
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import lmfit
import mpdaf.obj
import astropy.units as u
from . import lines
from . import models
from . import mpdaf_ext # noqa: F401
FLAM16 = u.Unit(1e-16 * u.erg / (u.cm ** 2 * u.s * u.AA))
"""A header["BUNIT"] value we have."""
FLOAT_FMT = ".8g"
"""Default formatting for floats in output files."""
DEFAULT_FIT_INFO = "aic_real bic_real chisqr redchi success".split()
"""Define typical ModelResult information we might want."""
def open_fits_cube(
data_filename, data_hdu_index=None, var_filename=None, var_hdu_index=None, **kwargs
):
"""Load a fits file using :class:`mpdaf.obj.Cube`, and handle variance in separate file.
I highly recommend being explicit in the parameters and not relying on the
guessing that mpdaf can perform.
Parameters
----------
data_filename : str
Path to file containing data
data_hdu_index : int, optional
Index indicating which hdu contains the data (starting with 0), by default None (then the
:class:`mpdaf.obj.Cube` constructor will attempt
to guess the correct extension)
var_filename : str, optional
Path to file containing variance, by default None (No variance will be
loaded. Unless `data_hdu_index` = None, and then the
:class:`mpdaf.obj.Cube` constructor will attempt to
automatically load variance from `data_filename`)
var_hdu_index : int, optional
Index indicating which hdu contains the variance (starting with 0), by
default None (then the :class:`mpdaf.obj.Cube` constructor will attempt
to guess the correct extension)
**kwargs : dict, optional
Any keyword arguments to pass to :class:`mpdaf.obj.Cube`, such as `unit`
Returns
-------
:class:`mpdaf.obj.Cube`
A data cube.
"""
# no variance given:
if var_filename is None:
cube = mpdaf.obj.Cube(data_filename, ext=data_hdu_index, **kwargs)
# data and variance stored in same file:
elif data_filename == var_filename:
cube = mpdaf.obj.Cube(
data_filename, ext=(data_hdu_index, var_hdu_index), **kwargs
)
# data and variance stored in different files:
else:
cube = mpdaf.obj.Cube(data_filename, ext=data_hdu_index, **kwargs)
varcube = mpdaf.obj.Cube(var_filename, ext=var_hdu_index, **kwargs)
# varcube is loaded as masked array.
cube._var = varcube.data.data
cube._mask |= varcube.mask
# test for FLAM16:
if cube.unit == u.dimensionless_unscaled:
if cube.data_header.get("BUNIT") == "FLAM16":
cube.unit = FLAM16
return cube
def de_redshift(wavecoord, z=0, z_initial=0):
r"""De-redshift the WaveCoord in-place.
Parameters
----------
wavecoord : :class:`mpdaf.obj.WaveCoord`
The wavelength coordinate to be de-redshifted
z : float, optional
The redshift of the object whose wavecoord to de-redshift, by default 0 (i.e. no change)
z_initial : float, optional
The redshift currently applied to the wavecoord, by default 0 (i.e. none applied)
Notes
-----
I tried to make z a new attribute in `wavecoord`, but due to details in how
slicing works, this was not a simple change. Therefore z must be stored in
a variable externally to the wavecoord.
"""
wavecoord.set_crval(wavecoord.get_crval() * (1 + z_initial) / (1 + z))
wavecoord.set_step(wavecoord.get_step() * (1 + z_initial) / (1 + z))
return z
# TODO: Add in a part where the user can input in a redshift and move the
# histogram or center line or whatever around. i.e. user input at the end.
def tweak_redshift(
cube,
z_gal,
center_wavelength=lines.OIII5007,
wavelength_range=(-15, 15), # This is in Angstroms
pixel_mask=None,
):
"""Interactively choose a new redshift.
This procedure has several steps.
1. Select which spaxels to use for calculating the redshift via one of these options:
* use the input parameter `pixel_mask`
* Select pixels with a high integrated flux value in the selected wavelength range.
These are likely to be the galaxy. The user will interact with the terminal
and view a plot to interactively change the lower threshold for the desired
pixels. To accept the value plotted, leave the entry blank and press enter.
2. Fit a :class:`~threadcount.models.Const_1GaussModel` to the selected spaxels.
3. Extract the parameter value for 'g1_center' to get the center wavelength
of the fitted gaussian and compute the median center.
4. Calculate the redshift required for the median center to be equal to
`center_wavelength` using the formula::
new_z = (median_center / `center_wavelength`) * (1 + `z_gal`) - 1
5. Display a plot showing the spaxels used and a histogram displaying all the
center wavelengths (with `center_wavelength` subtracted, so it displays
the change from ideal)
Parameters
----------
cube : :class:`mpdaf.obj.Cube`
A datacube containing the wavelength range set in these parameters
z_gal : float
The redshift of the object which has already been applied to the `cube`
center_wavelength : float, optional
The center wavelength of the emission line to fit, by default :const:`threadcount.lines.OIII5007`
wavelength_range : array-like [float, float], optional
The wavelength range to fit, in Angstroms. These are defined as a change
from the `center_wavelength`, by default (-15, 15)
Returns
-------
float
The redshift selected by the user.
"""
plt.close()
print("====================================")
print("Tweak reshift procedure has started.")
print("====================================\n\n")
print("Using line {:.4g} +/- {} A".format(center_wavelength, wavelength_range[1]))
# retrieve spectral subcube from cube.
subcube = cube.select_lambda(
center_wavelength + wavelength_range[0],
center_wavelength + wavelength_range[1],
)
fluxmap = subcube.sum(axis=0)
if pixel_mask is None:
# use the sum of the flux and mask at value, changed by user interaction.
plot_title = (
"Tweak Redshift:\n"
"Spaxels to fit. Set mask level in console.\n"
"Val.=sum of spectrum (arb. units)"
)
limit = interactive_lower_threshold(fluxmap, title=plot_title)
pixel_mask = (fluxmap > limit).mask
fluxmap.mask = pixel_mask
fig, axs = plt.subplots(ncols=2, gridspec_kw={"top": 0.85})
fig.suptitle(
"Tweak z using line {:.4g} +/- {} A".format(
center_wavelength, wavelength_range[1]
)
)
fluxmap.plot(
ax=axs[0],
title="Spaxels included in histogram\nVal.=sum of spectrum (arb. units)",
colorbar="v",
zscale=True,
)
valid_pixels = np.where(~fluxmap.mask)
# loop over valid pixels, do the fit, and store in results list.
results = []
model = models.Const_1GaussModel()
params = None
print("Fitting selected spaxels with gaussian model...")
for y, x in zip(*valid_pixels):
this_mr = subcube[:, y, x].lmfit(model, params=params, method="least_squares")
if params is None:
params = this_mr.params
results += [this_mr]
fit_centers = vget_param_values(results, "g1_center")
# remove invalid values, specifically centers outside the range given:
fit_centers = fit_centers[fit_centers < (center_wavelength + wavelength_range[1])]
fit_centers = fit_centers[fit_centers > (center_wavelength + wavelength_range[0])]
plt.sca(axs[1])
plt.hist(fit_centers - center_wavelength, bins=20)
plt.title("Center wavelength histogram")
plt.xlabel(r"change from {:.5g} $\AA$ [$\AA$]".format(center_wavelength))
plt.axvline(0, color="black", label=r"{:.5g} $\AA$".format(center_wavelength))
plt.axvline(
np.nanmedian(fit_centers) - center_wavelength, color="red", label="median"
)
plt.legend()
plt.show(block=False)
print("Redshift from input settings (for reference) : {}".format(z_gal))
new_z = (np.nanmedian(fit_centers) / center_wavelength) * (1 + z_gal) - 1
print("Redshift calculated from the median of the fit centers: {}".format(new_z))
change_z = input(
"Do you want to update the redshift with the calculated value {} ([y]/n)? ".format(
new_z
)
)
if change_z.lower().startswith("n"):
return_z = z_gal
message = "The original redshift has been kept: {}".format(return_z)
else:
return_z = new_z
message = "The redshift has been updated to {}".format(return_z)
print("Tweak reshift procedure is finished. " + message)
return return_z
def interactive_lower_threshold(image, title=""):
"""Create plot and interact with user to determine the lower threshold for valid data.
The image is plotted, with a mask applied which initially masks the lower 95%
of data. A prompt is given in the console, asking for user input. If the user
enters no input and presses <enter>, that indicates the currently shown level
has been accepted by the user. Otherwise, the user may input a different
number. The plot will be redrawn and the input is requested again.
This function is primarily used to determine the cutoff indicating the
spaxels containing the most flux, hopefully indicating the galaxy center.
We then will use those pixels to fit an emission line, and find the centers.
This can be used to tweak the redshift if desired.
Parameters
----------
image : :class:`mpdaf.obj.image.Image`
An mpdaf image we wish to threshold interactively
title : str, optional
The title given to the plot displaying the `image`, by default ""
Returns
-------
limit : float
The deterimined threshold for valid data.
"""
limit = np.quantile(image.data, 0.95)
m_img = image > limit
m_img.plot(zscale=True, title=title, colorbar="v")
fig = plt.gcf()
plt.show(block=False)
while True:
print("Change the threshold for valid pixels.")
print(
"You may try multiple thresholds. Leave the entry blank and press Enter to confirm your choice."
)
print("current limit: {}".format(limit))
new_limit = input(
"Set new limit: (or leave blank and press Enter to continue) "
)
# if input is convertable to float, redo loop, otherwise exit loop
try:
limit = float(new_limit)
except ValueError or TypeError:
plt.close()
return limit
m_img = image > limit
plt.close(fig)
m_img.plot(zscale=True, title=title, colorbar="v")
fig = plt.gcf()
plt.show(block=False)
def get_param_values(params, param_name, default_value=np.nan):
"""Retrieve parameter value by name from lmfit objects.
Parameters
----------
params : :class:`lmfit.model.ModelResult` or :class:`lmfit.parameter.Parameters`
Input object containing the value you wish to extract
param_name : str
The :class:`lmfit.parameter.Parameter` name, whose value will be returned.
Also may be a :class:`lmfit.model.ModelResult` attribute, such as 'chisqr'
default_value : Any, optional
The return value if the function cannot find the `param_name`, by default np.nan
Returns
-------
float, bool, str, or type(`default_value`)
* If type(`params`) is :class:`~lmfit.parameter.Parameters`: `params`.get(`param_name`).value
* If type('params`) is :class:`~lmfit.model.ModelResult`:
* Tries first: `params`.params.get(`param_name`).value
* Tries second: `params`.get(`param_name`), which allows for ModelResult attributes.
* If all these fail, returns `default_value`
See Also
--------
get_param_values : Use this version of the function on 1 input object
vget_param_values : Use this version of the function on an array of input objects.
This is a vectorized version of this function that you can apply to
arrays (see: https://numpy.org/doc/stable/reference/generated/numpy.vectorize.html)
Examples
--------
>>> import threadcount.fit
>>> from lmfit.models import GaussianModel
>>> model = GaussianModel()
>>> params = model.make_params()
>>> threadcount.fit.get_param_values(params,'sigma')
1.0
>>> # or use the vectorized version:
>>> params2 = model.make_params(sigma=4)
>>> a = np.array([params,params2], dtype=object)
>>> threadcount.fit.vget_param_values(a,"sigma")
array([1., 4.])
"""
# Quick test, because I know sometimes `params` will be None.
if params is None:
return default_value
# The order of the following try/except blocks is from most-nested to least-nested
# extraction.
# 1st: assume `params` is actually a lmfit ModelResult, and that we are
# trying to extract the parameter `param_name` value from that modelresult's params.
try:
return params.params.get(param_name).value
except AttributeError:
pass
# 2nd: assume `params` is a lmfit Parameters object, and that we are
# trying to extract the parameter `param_name` value from it.
try:
return params.get(param_name).value
except AttributeError:
pass
# 3rd: This works for everything else. If `params` is a modelresult and
# if `param_name` is a modelresult attribute, this will return it properly
# If `params` has no attribute `get` (such as if it is type int), then
# default value is returned.
try:
return params.get(param_name, default_value)
except AttributeError:
return default_value
vget_param_values = np.vectorize(get_param_values)
def iter_spaxel(image, index=False):
"""Create an iterator over the spaxels of successive image pixels in a 2d numpy array.
Each call to the iterator returns the value of the array `image` at a spaxel.
The first spaxel to be addressed of image is
pixel 0,0. Thereafter the X-axis pixel index is incremented by one
at each call (modulus the length of the X-axis), and the Y-axis
pixel index is incremented by one each time that the X-axis index
wraps back to zero.
The return value of iter_spaxel() is a python generator that can be
used in loops
Parameters
----------
image : 2d `numpy.ndarray`
The image to be iterated over.
index : bool
If False, return just a value at each iteration.
If True, return both a value and the pixel index
of that spaxel in the image (a tuple of image-array
indexes along the axes (y,x)).
Yields
------
dtype of `image`
"""
if index:
for y, x in np.ndindex(image.shape):
yield image[y, x], (y, x)
else:
for y, x in np.ndindex(image.shape):
yield image[y, x]
def process_settings(default_settings, user_settings_string=""):
"""Combine the default settings with any user settings.
Process the user settings and override the default if a corresponding user
setting exists. Print a warning if a there is a missing user setting.
Parameters
----------
default_settings : dict
A dictionary containing all required settings for the script to run.
user_settings_string : str, optional
A string (created by json.dumps(dictionary) containing user settings.),
by default ""
Returns
-------
:class:`types.SimpleNamespace`
A simple namespace containing the settings, for easier access to attributes.
"""
if user_settings_string == "":
return SimpleNamespace(**default_settings)
# otherwise process them.
user_settings = json.loads(user_settings_string)
# determine if there are missing settings in the user's and report them.
missing = {
k: default_settings[k] for k in default_settings.keys() - user_settings.keys()
}
for k, v in missing.items():
print("Missing setting {}, using default value {}".format(k, v))
final_settings = SimpleNamespace(**user_settings)
final_settings.__dict__.update(**missing)
return final_settings
def process_settings_dict(default_settings, user_settings=None):
"""Combine the default settings with any user settings.
Process the user settings and override the default if a corresponding user
setting exists. Print a warning if a there is a missing user setting.
Parameters
----------
default_settings : dict
A dictionary containing all required settings for the script to run.
user_settings : dict, optional
A dictionary containing user settings, by default None
Returns
-------
:class:`types.SimpleNamespace`
A simple namespace containing the settings, for easier access to attributes.
"""
if not user_settings: # takes care of "", None, and {}
return SimpleNamespace(**default_settings)
# determine if there are missing settings in the user's and report them.
missing = {
k: default_settings[k] for k in default_settings.keys() - user_settings.keys()
}
for k, v in missing.items():
print("Missing setting {}, using default value {}".format(k, v))
final_settings = SimpleNamespace(**user_settings)
final_settings.__dict__.update(**missing)
return final_settings
def get_region(rx, ry=None):
"""Select pixels in ellipse of radius rx, ry from (0,0).
Return an array of np.array([row,col]) that are within an ellipse centered
at [0,0] with radius x of rx and radius y of ry.
Parameters
----------
rx : number or list of numbers [rx, ry]
ry : number
Returns
-------
numpy.ndarray
"""
# try to process a list if it is given as parameter
try:
rx, ry = rx[0], rx[1]
# expect TypeError if rx is not a list.
except TypeError:
pass
# Defaults to a circle if ry=None
if ry is None:
ry = rx
rx = abs(rx)
ry = abs(ry)
rx_int = round(rx)
ry_int = round(ry)
indicies = (np.mgrid[-ry_int : ry_int + 1, -rx_int : rx_int + 1]).T.reshape(-1, 2)
# create boolean array of where inside ellipse is:
rx2 = rx * rx
ry2 = ry * ry
# remember python likes row, column convention, so y comes first.
inside = (
indicies[:, 0] * indicies[:, 0] / ry2 + indicies[:, 1] * indicies[:, 1] / rx2
<= 1
)
return indicies[inside]
def get_reg_image(region):
"""Create kernel image from list of pixels.
The input `region` is typically the output of :func:`get_region`.
This kernel image is used for spatial averaging, and it's values
are either 1 (if included in `region`) or 0.
Parameters
----------
region : list of pixel positions (y, x)
The list of pixel positions relative to an arbitrary point,
usually (0,0) in the case of output from :func:`get_region`, to
set to value 1 in the output image
Returns
-------
2d numpy array
An array consisting of the smallest area that will encompass the list of
pixels in `region`, with the relative shape of `region` preserved. The
array is 0 except for `region` pixels are set to 1.
"""
# calculate the extent of the list of inputs:
mins = region.min(axis=0)
maxs = region.max(axis=0)
shape = maxs - mins + 1
# initialize output
output = np.zeros(shape)
# shift the pixel list by mins to reference the new origin.
inside = [tuple(pix - mins) for pix in region]
# set those pixels in the pixel list to 1.
output[tuple(zip(*inside))] = 1
return output
def spatial_average(cube, kernel_image, **kwargs):
"""Apply kernel image smoothing on every spatial image in a cube.
This function will correctly apply a smoothing image `kernel_image` to the
data and variance arrays in `cube`. The normalization is properly propegated
to the variance array.
Parameters
----------
cube : :class:`mpdaf.obj.cube.Cube`
The data you want smoothed
kernel_image : 2d numpy array
The smoothing image to apply
**kwargs : dict
key word arguments passed to :func:`.mpdaf_ext.correlate2d_norm`
Returns
-------
:class:`mpdaf.obj.cube.Cube`
Spatially smoothed cube.
"""
# determine if variance array of output should be initialized:
var_init = None
if cube.var is not None:
var_init = np.empty
# initialize empty loop output:
output = cube.clone(data_init=np.empty, var_init=var_init)
# loop over all images in cube, and set the output to output.
for ima, k in mpdaf.obj.iter_ima(cube, index=True):
output[k, :, :] = ima.correlate2d_norm(kernel_image)
return output
def get_SNR_map(cube, signal_idx=None, signal_Angstrom=None, nsigma=5, plot=False):
"""Create Image of signal to noise ratio in a given bandwidth.
This bandwidth may be selected in 3 different ways:
1. Choose the indices of the wavlength array to include (`signal_idx`)
2. Choose the wavelengths to include (`signal_Angstrom`)
3. Have the program fit a gaussian to the data, and choose how many sigmas
to include (`nsigma`). (Uses function: :func:`get_SignalBW_idx`)
If multiple of `signal_idx`, `signal_Angstrom`, and `nsigma` are given, the
order of preference is as follows: `signal_idx` overrides all others, then
`signal_Angstrom`, and finally the least preferenced is `nsigma`, which will
only be used if either `signal_idx` or `signal_Angstrom` are not specified.
Parameters
----------
cube : :class:`mpdaf.obj.Cube`
The cube containing data, var, and wave attributes
signal_idx : array [int, int], optional
The indices of the wavelength array to use, by default None
signal_Angstrom : array [float, float], optional
The wavelengths in Angstroms to use, by default None
nsigma : float, optional
Fit a gaussian, and use center wavelength +/- `nsigma` * sigma, by default 5
plot : bool, optional
Plot the whole image spectrum and highlight the SNR bandwidth,
by default False. A tool for troubleshooting/setup.
Returns
-------
:class:`mpdaf.obj.Image`
An Image where the pixel values indicate the signal to noise in the
selected bandwidth. Given a Spectrum for each spaxel, the SNR for the
spaxel is calculated by sum(Spectrum.data)/sqrt(sum(Spectrum.var)).
Examples
--------
Given a Cube with name `this_cube`, then the default bandwidth selection
is to fit a gaussian, and use the gaussian center +/- 5*sigma. This is
implemented by the following command:
>>> import threadcount as tc
>>> snr_image = tc.fit.get_SNR_map(this_cube)
To use the same method but change the width to, for example,
gaussian center +/- 3*sigma, (meaning nsigma=3), then use the following:
>>> snr_image = tc.fit.get_SNR_map(this_cube, nsigma=3)
If you know the specific wavelengths of the bandwidth you would like to use,
(for example, 5000-5020 A) then use the following:
>>> snr_image = tc.fit.get_SNR_map(this_cube, signal_Angstrom=[5000,5020])
And finally, if you know the pixel indices (for example, indices 31-60).
Note, this is an inclusive range, meaning in this case pixel 60 will be
included in the SNR calculation.
>>> snr_image = tc.fit.get_SNR_map(this_cube, signal_idx=[31,60])
"""
if signal_idx is None:
if signal_Angstrom is None:
signal_idx = get_SignalBW_idx(cube, nsigma=nsigma, plot=plot)
plot = False # This is taken care of inside the function.
else:
signal_idx = cube.wave.pixel(signal_Angstrom, nearest=True)
subcube = cube[signal_idx[0] : signal_idx[1] + 1, :, :]
if plot is True:
plt.figure()
spectrum = cube.sum(axis=(1, 2))
title = "Total image spectrum"
try:
title = " ".join([cube.label, title])
except AttributeError:
pass
spectrum.plot(title=title)
plt.axvspan(
*cube.wave.coord(signal_idx),
facecolor=plt.rcParams["axes.prop_cycle"].by_key()["color"][1],
alpha=0.25,
label="SNR range",
zorder=-3,
)
plt.legend()
subcube_sum = subcube.sum(axis=0)
result_image = subcube[0].clone()
result_image.data = subcube_sum.data / np.sqrt(subcube_sum.var)
return result_image
def get_SignalBW_idx(cube, nsigma=5, plot=False):
"""Determine the wavelength indices containing signal.
This function computes an average spectrum using the whole `cube`. Then,
fits a gaussian plus constant (:class:`~threadcount.models.Const_1GaussModel`).
The gaussian center and sigma, along with `nsigma`, are used to compute and
return the indices corresponding to
:math:`[center - nsigma*sigma, center + nsigma*sigma]`.
The plot option may be used for debugging for a visual of the spectrum and
the fit, and the computed range.
Parameters
----------
cube : :class:`mpdaf.obj.Cube`
The cube containing data, var, and wave attributes
nsigma : float, optional
The number of sigmas to include on each side of the gaussian center,
by default 5
plot : bool, optional
Dispaly a plot of the spectrum and fit, with the bandwidth highlighted,
by default False
Returns
-------
array, [int, int]
The indices of the wavelength array corresponding to the calculated
bandwidth.
"""
ydata = np.nanmean(
np.nanmean(cube.data, axis=2), axis=1
) # gives 1d spectrum average for all of data.
x = cube.wave.coord()
gauss_model = models.Const_1GaussModel()
params = gauss_model.guess(data=ydata, x=x)
mod_result = gauss_model.fit(ydata, params, x=x)
center = mod_result.values["g1_center"]
sigma = mod_result.values["g1_sigma"]
low = center - nsigma * sigma
high = center + nsigma * sigma
if plot is True:
plt.figure()
mod_result.plot()
plt.axvspan(
low,
high,
facecolor=plt.rcParams["axes.prop_cycle"].by_key()["color"][2],
alpha=0.25,
label="SNR range",
zorder=-3,
)
plt.legend()
title = "Total image spectrum"
try:
title = " ".join([cube.label, title])
except AttributeError:
pass
plt.suptitle(title)
xrange = [low, high]
# find index of nearest element
xrange_idx = cube.wave.pixel(xrange, nearest=True)
return xrange_idx
def get_index(array, value):
"""Determine the index of 'array' which is closest to `value`.
Parameters
----------
array : float or array/list/iterable of floats
The list of numbers to search. Will be processed with np.array(`array`).
value : float or array/list/iterable of floats
The value(s) to search for in `array`
Returns
-------
int or list of ints
The index (or indices) of array where the value is closest to the search
value.
Examples
--------
>>> get_index([10,11,12,13,14],[13,22])
[3, 4]
>>> get_index(4,[3,0])
[0, 0]
>>> get_index([4,0],10)
0
"""
array = np.array(array)
# value may be a list of values.
try:
value_iter = iter(value)
except TypeError:
# This catches anything if value is not a list.
return (np.abs(array - value)).argmin()
return [(np.abs(array - this_value)).argmin() for this_value in value_iter]
def get_aic(model, error=np.nan):
"""Return the aic_real of a successful fit.
Parameters
----------
model : :class:`lmfit.model.ModelResult`
The modelresult to extract info from.
error : float, optional
The numeric value to assign any unsuccessful modelresult, by default np.nan
Returns
-------
float
The modelresult's aic_real, or `error`
"""
try:
if model.success is True:
return model.aic_real
except AttributeError:
pass
return error
vget_aic = np.vectorize(get_aic, doc="Vectorized :func:`get_aic`.")
def choose_model_aic_single(model_list, d_aic=-150):
r"""Determine best modelresult in a list, chosen by computing :math:`{\Delta}aic`.
Note: we now look at `aic_real`, defined in :meth:`threadcount.lmfit_ext.aic_real`
This function uses the aic (Akaike Information Criterion) to choose between
several models fit to the same data. Our general philosophy: choose simpler
models.
The default change in aic we consider
significant (-150) is quite high compared to a standard -10 you may see in
statistics, since we are intending to identify the model components with
physical processes in the galaxy. This value was chosen by observing
fits to several different spectra and choosing the desired number of gaussian
components by eye, then finding a :math:`{\Delta}aic` which came close to
accomplishing that.
via wikipedia: The :math:`exp((AIC_{min} − AIC_i)/2)` is known as the
relative liklihood of model i.
The numbers returned begin with 1, not 0 as is usual in python. If no results
in `model_list` are valid, then -1 will be returned.
The algorithm goes as follows:
* Lets say `model_list` = [model1, model2] (note the numbers begin with 1).
* If model2.aic_real - model1.aic_real < `d_aic`:
* return 2
* else:
* return 1.
* Lets now say `model_list` = [model1, model2, model3].
* If model2.aic_real - model1.aic_real < `d_aic`:
* This means that model2 is better. We will eliminate
model1 as an option, then apply bullet point 1, with [model2, model3],
returning whichever number is better (so the return value will be 2 or 3).
* else:
* This means that model2 is not better than model1. We will eliminate
model2 as an option, then apply bullet point 1, using [model1, model3],
returning either 1 or 3.
* TODO: I think if we get a choice of 3 from this way, we should flag
it for manual inspection, since it may only be slightly better than
model2 and so our philosophy of less complex is better would be violated.
Parameters
----------
model_list : list of :class:`lmfit.model.ModelResult`
A list of different model results which have been fit to the same data.
Right now, the length must be no longer than 3. The order of the models
is assumed to be from least complex -> more complex.
d_aic : float, optional
The change in fit aic (Akaike Information Criterion) indicating
a significantly better fit, by default -150.
Returns
-------
int
The index+1 of the model chosen with this algorithm. Returns -1 if all
models are invalid.
"""
# Python starts counting at 0 (0-based indexing.). choices starts counting at 1.
# Pay attention that the returned value is the python index +1
# return -1 for invalid:
if model_list is None:
return -1
# return 1 if only one choice:
if len(model_list) == 1:
return 0 + 1
# model list is assumed to be in order simple -> complex.
# philosophy: prefer simpler models.
aic = vget_aic(model_list, error=np.nan)
# if all nans, then return -1 (invalid)
if np.all(np.isnan(aic)):
return -1
# print(np.array(aic)-aic[0])
# now we have different ways of choosing based on if 2 or 3 models:
# TODO: generalize to more than 3. Should be easy enough, given the
# explanation of the algorithm in the docstring.
if len(model_list) == 2:
if (aic[1] - aic[0]) < d_aic:
return (
1 + 1
) # these +1's are for translation to human interaction indexing....
else:
return 0 + 1
if len(model_list) == 3:
if (aic[1] - aic[0]) < d_aic:
# True, this means 2 gaussians better than 1.
# Eliminates id 0 as option and do more tests:
if (aic[2] - aic[1]) < d_aic:
# True, this means 3 gaussians better than 2. choose this.
return 2 + 1
else:
return 1 + 1
else:
# False, this means 2 gaussians not better than 1.
# Eliminates id 1 as option and do more tests:
if (aic[2] - aic[0]) < d_aic:
# True, this means 3 gaussians better than 1. choose this.
return 2 + 1
else:
return 0 + 1
# safest thing to return is 0 i guess?
return 0 + 1
def choose_model_aic(model_list, d_aic=-150):
"""Broadcast :func:`choose_model_aic_single` over array.
Parameters
----------
model_list : array-like, containing :class:`lmfit.model.ModelResult`
Array representing spatial dimensions and the last dimension contains
the model result for different models fitted to that spaxel. Works also
for simply a list of model results for one pixel.
d_aic : float, optional
The change in fit aic (Akaike Information Criterion) indicating
a significantly better fit, by default -150.
Returns
-------
array of shape model_list.shape[:-1] containing int, or int
Spatial array containing the chosen model number, starting with 1.
invalid entries are given the value -1.
See Also
--------
:func:`choose_model_aic_single` : A detailed discussion of this function.
"""
# assume the first dimensions of model_list are spatial and the last is
# the different models.
# Handle a single pixel:
model_list = np.array(model_list)
shape = model_list.shape
if len(shape) == 1:
single = choose_model_aic_single(model_list, d_aic=d_aic)
return single
# if we have passed that block, we know we have an array of size shape to loop over.
# create output
output = | np.empty(shape[:-1], dtype=int) | numpy.empty |
import numpy as np
SOLVER_TYPE_VALUE_ITERATION = 'value_iteration'
class MDPSolver():
def __init__(self, mdp):
self.mdp = mdp
def _value_iteration(self, discount=1.0):
states = self.mdp.state_space()
actions = self.mdp.action_space()
num_states = len(states)
num_actions = len(actions)
state_key_to_idx = dict(zip([s.key for s in states], range(num_states)))
v = np.zeros(num_states)
sas_p = np.zeros((num_states, num_actions, num_states))
sas_r = | np.zeros(sas_p.shape) | numpy.zeros |
import copy
import unittest
import numpy as np
import aspecd.dataset
import aspecd.exceptions
import aspecd.model
import aspecd.utils
class TestModel(unittest.TestCase):
def setUp(self):
self.model = aspecd.model.Model()
def test_instantiate_class(self):
pass
def test_has_name_property(self):
self.assertTrue(hasattr(self.model, 'name'))
def test_name_property_equals_full_class_name(self):
full_class_name = aspecd.utils.full_class_name(self.model)
self.assertEqual(self.model.name, full_class_name)
def test_has_parameters_property(self):
self.assertTrue(hasattr(self.model, 'parameters'))
def test_has_variables_property(self):
self.assertTrue(hasattr(self.model, 'variables'))
def test_has_description_property(self):
self.assertTrue(hasattr(self.model, 'description'))
def test_description_property_describes_abstract_model(self):
self.assertIn('abstract model', self.model.description.lower())
def test_has_references_property(self):
self.assertTrue(hasattr(self.model, 'references'))
def test_description_references_is_list(self):
self.assertTrue(isinstance(self.model.references, list))
def test_has_create_method(self):
self.assertTrue(hasattr(self.model, 'create'))
self.assertTrue(callable(self.model.create))
def test_create_without_parameters_raises(self):
with self.assertRaises(aspecd.exceptions.MissingParameterError):
self.model.create()
def test_create_without_variables_raises(self):
self.model.parameters = [0]
with self.assertRaises(aspecd.exceptions.MissingParameterError):
self.model.create()
def test_create_with_missing_parameter_raises(self):
class MyModel(aspecd.model.Model):
def _sanitise_parameters(self):
if "coefficient" not in self.parameters:
raise aspecd.exceptions.MissingParameterError(
message="Parameter 'coefficient' missing")
model = MyModel()
model.parameters["foo"] = "bar"
model.variables = [np.linspace(0, 1)]
with self.assertRaisesRegex(aspecd.exceptions.MissingParameterError,
"coefficient"):
model.create()
def test_create_returns_calculated_dataset(self):
self.model.parameters = [0]
self.model.variables = [np.linspace(0, 1)]
dataset = self.model.create()
self.assertEqual(aspecd.dataset.CalculatedDataset, type(dataset))
def test_create_sets_calculated_dataset_axis_values(self):
self.model.parameters = [0]
self.model.variables = [np.linspace(0, 1)]
dataset = self.model.create()
np.testing.assert_allclose(dataset.data.axes[0].values,
self.model.variables[0])
def test_create_sets_calculated_dataset_origdata_axis_values(self):
self.model.parameters = [0]
self.model.variables = [np.linspace(0, 1)]
dataset = self.model.create()
np.testing.assert_allclose(dataset._origdata.axes[0].values,
self.model.variables[0])
def test_create_with_2d_sets_calculated_dataset_axis_values(self):
self.model.parameters = [0]
self.model.variables = [np.linspace(0, 1), np.linspace(2, 3)]
dataset = self.model.create()
for index in range(len(self.model.variables)):
np.testing.assert_allclose(dataset.data.axes[index].values,
self.model.variables[index])
def test_create_sets_calculated_dataset_calculation_type(self):
self.model.parameters = [0]
self.model.variables = [np.linspace(0, 1)]
dataset = self.model.create()
self.assertEqual(self.model.name, dataset.metadata.calculation.type)
def test_create_sets_calculated_dataset_calculation_parameters(self):
self.model.parameters = [0]
self.model.variables = [np.linspace(0, 1)]
dataset = self.model.create()
self.assertEqual(self.model.parameters,
dataset.metadata.calculation.parameters)
def test_has_from_dataset_method(self):
self.assertTrue(hasattr(self.model, 'from_dataset'))
self.assertTrue(callable(self.model.from_dataset))
def test_from_dataset_without_dataset_raises(self):
with self.assertRaises(aspecd.exceptions.MissingDatasetError):
self.model.from_dataset()
def test_from_dataset_sets_values(self):
values = np.linspace(5, 50)
dataset = aspecd.dataset.Dataset()
dataset.data.data = np.linspace(0, 1)
dataset.data.axes[0].values = values
self.model.from_dataset(dataset=dataset)
np.testing.assert_allclose(values, self.model.variables[0])
def test_from_2d_dataset_sets_values(self):
dataset = aspecd.dataset.Dataset()
dataset.data.data = np.random.random([10, 5])
self.model.from_dataset(dataset=dataset)
for index in range(len(dataset.data.axes)):
np.testing.assert_allclose(dataset.data.axes[index].values,
self.model.variables[index])
def test_from_dataset_applies_dataset_axes_to_calculated_dataset(self):
dataset = aspecd.dataset.Dataset()
dataset.data.data = np.linspace(0, 1)
dataset.data.axes[0].quantity = 'foo'
dataset.data.axes[1].quantity = 'bar'
self.model.from_dataset(dataset=dataset)
self.model.parameters = [0]
calculated_dataset = self.model.create()
for index in range(len(dataset.data.axes)):
self.assertEqual(dataset.data.axes[index].quantity,
calculated_dataset.data.axes[index].quantity)
def test_has_from_dict_method(self):
self.assertTrue(hasattr(self.model, 'from_dict'))
self.assertTrue(callable(self.model.from_dict))
def test_from_dict_without_dict_raises(self):
with self.assertRaises(aspecd.exceptions.MissingDictError):
self.model.from_dict()
def test_from_dict_sets_parameters(self):
dict_ = {'parameters': {'foo': 42}, 'variables': [np.linspace(0, 1)]}
self.model.from_dict(dict_)
self.assertDictEqual(dict_["parameters"], self.model.parameters)
def test_from_dict_sets_variables(self):
dict_ = {'parameters': {'foo': 42}, 'variables': [np.linspace(0, 1)]}
self.model.from_dict(dict_)
self.assertEqual(dict_["variables"], self.model.variables)
def test_from_dict_sets_only_valid_properties(self):
dict_ = {'foo': 42}
self.model.from_dict(dict_)
self.assertFalse(hasattr(self.model, 'foo'))
class TestCompositeModel(unittest.TestCase):
def setUp(self):
self.model = aspecd.model.CompositeModel()
def test_instantiate_class(self):
pass
def test_has_appropriate_description(self):
self.assertIn('composite model consisting of several weighted models',
self.model.description.lower())
def test_has_models_property(self):
self.assertTrue(hasattr(self.model, 'models'))
def test_has_weights_property(self):
self.assertTrue(hasattr(self.model, 'weights'))
def test_create_with_single_model_equivalent_to_single_model(self):
variables = np.linspace(0, 5)
models = ['Polynomial']
parameters = {'coefficients': [1]}
# Create single model
single_model = aspecd.model.Polynomial()
single_model.parameters = parameters
single_model.variables = variables
single_model_result = single_model.create()
# Create composite model
self.model.models = models
self.model.parameters = [parameters]
self.model.variables = variables
composite_model_result = self.model.create()
self.assertListEqual(list(single_model_result.data.data),
list(composite_model_result.data.data))
def test_weighting_with_single_model(self):
variables = np.linspace(0, 5)
models = ['Polynomial']
parameters = {'coefficients': [1]}
weights = [2]
# Create single model
single_model = aspecd.model.Polynomial()
single_model.parameters = parameters
single_model.variables = variables
single_model_result = single_model.create()
# Create composite model
self.model.models = models
self.model.parameters = [parameters]
self.model.weights = weights
self.model.variables = variables
composite_model_result = self.model.create()
self.assertListEqual(list(single_model_result.data.data * weights[0]),
list(composite_model_result.data.data))
def test_create_with_multiple_models_equivalent_to_sum_of_models(self):
variables = np.linspace(0, 5)
models = ['Sine', 'Exponential']
parameters = [{'amplitude': 10}, {'rate': -4}]
# Create individual models
data = np.zeros(len(variables))
for idx, model_name in enumerate(models):
model = aspecd.utils.object_from_class_name('aspecd.model.' +
model_name)
for key in parameters[idx]:
# noinspection PyUnresolvedReferences
model.parameters[key] = parameters[idx][key]
model.variables = variables
# noinspection PyUnresolvedReferences
model_result = model.create()
data += model_result.data.data
# Create composite model
self.model.models = models
self.model.parameters = parameters
self.model.variables = variables
composite_model_result = self.model.create()
self.assertListEqual(list(data), list(composite_model_result.data.data))
def test_create_with_operator(self):
variables = np.linspace(0, 5)
models = ['Sine', 'Exponential']
parameters = [{'amplitude': 10}, {'rate': -4}]
operators = ['*']
# Create individual models
data = np.zeros(len(variables))
for idx, model_name in enumerate(models):
model = aspecd.utils.object_from_class_name('aspecd.model.' +
model_name)
for key in parameters[idx]:
# noinspection PyUnresolvedReferences
model.parameters[key] = parameters[idx][key]
model.variables = variables
# noinspection PyUnresolvedReferences
model_result = model.create()
if not idx:
data += model_result.data.data
else:
data *= model_result.data.data
# Create composite model
self.model.models = models
self.model.parameters = parameters
self.model.variables = variables
self.model.operators = operators
composite_model_result = self.model.create()
self.assertListEqual(list(data), list(composite_model_result.data.data))
def test_create_with_incompatible_no_of_models_and_parameters_raises(self):
self.model.models = ['Sine', 'Exponential']
self.model.parameters = [{'amplitude': 10}]
self.model.variables = np.linspace(0, 5)
with self.assertRaisesRegex(IndexError,
'Models and parameters count differs'):
self.model.create()
def test_create_with_incompatible_no_of_weights(self):
self.model.models = ['Sine', 'Exponential']
self.model.parameters = [{'amplitude': 10}, {'rate': -4}]
self.model.weights = [2]
self.model.variables = np.linspace(0, 5)
with self.assertRaisesRegex(IndexError,
'Models and weights count differs'):
self.model.create()
def test_create_with_incompatible_no_of_operators(self):
self.model.models = ['Sine', 'Exponential', 'Sine']
self.model.parameters = \
[{'amplitude': 10}, {'rate': -4}, {'frequency': 0.5}]
self.model.operators = ['*']
self.model.variables = np.linspace(0, 5)
with self.assertRaisesRegex(IndexError,
'Models and operators count differs'):
self.model.create()
class TestFamilyOfCurves(unittest.TestCase):
def setUp(self):
self.model = aspecd.model.FamilyOfCurves()
self.variables = np.linspace(0, 6*np.pi)
def test_instantiate_class(self):
pass
def test_has_appropriate_description(self):
self.assertIn('family of curves for a model with one parameter varied',
self.model.description.lower())
def test_has_model_property(self):
self.assertTrue(hasattr(self.model, 'model'))
def test_has_vary_property(self):
self.assertTrue(hasattr(self.model, 'vary'))
def test_create_without_model_raises(self):
with self.assertRaisesRegex(ValueError, 'Missing a model'):
self.model.create()
def test_create_model_with_scalar_varied_parameter_value(self):
self.model.model = "Sine"
self.model.vary["parameter"] = "amplitude"
self.model.vary["values"] = 4
self.model.variables = self.variables
simple_model = aspecd.model.Sine()
simple_model.variables = self.variables
simple_model.parameters["amplitude"] = self.model.vary["values"]
simple_dataset = simple_model.create()
family_of_curves = self.model.create()
self.assertListEqual(list(simple_dataset.data.data),
list(family_of_curves.data.data))
def test_create_model_with_actually_varied_parameter_value(self):
self.model.model = "Sine"
self.model.vary["parameter"] = "amplitude"
self.model.vary["values"] = [2, 4]
self.model.variables = self.variables
simple_model1 = aspecd.model.Sine()
simple_model1.variables = self.variables
simple_model1.parameters["amplitude"] = self.model.vary["values"][0]
simple_dataset1 = simple_model1.create()
simple_model2 = copy.deepcopy(simple_model1)
simple_model2.parameters["amplitude"] = self.model.vary["values"][1]
simple_dataset2 = simple_model2.create()
family_of_curves = self.model.create()
self.assertListEqual(list(simple_dataset1.data.data),
list(family_of_curves.data.data[:, 0]))
self.assertListEqual(list(simple_dataset2.data.data),
list(family_of_curves.data.data[:, 1]))
def test_create_model_sets_quantity_of_additional_axis(self):
self.model.model = "Sine"
self.model.vary["parameter"] = "amplitude"
self.model.vary["values"] = [2, 4]
self.model.variables = self.variables
family_of_curves = self.model.create()
self.assertEqual(self.model.vary["parameter"],
family_of_curves.data.axes[-1].quantity)
class TestZeros(unittest.TestCase):
def setUp(self):
self.model = aspecd.model.Zeros()
self.dataset = aspecd.dataset.CalculatedDataset()
self.dataset.data.data = | np.random.randn(10) | numpy.random.randn |
'''
(*)~---------------------------------------------------------------------------
Pupil - eye tracking platform
Copyright (C) 2012-2018 Pupil Labs
Distributed under the terms of the GNU
Lesser General Public License (LGPL v3.0).
See COPYING and COPYING.LESSER for license details.
---------------------------------------------------------------------------~(*)
'''
"""
av_writer module uses PyAV (ffmpeg or libav backend) to write AV files.
requires:
-
"""
import os,sys,platform
import av
from av.packet import Packet
import numpy as np
from time import time
from fractions import Fraction
#logging
import logging
logger = logging.getLogger(__name__)
from threading import Thread
from threading import Event
import multiprocessing as mp
"""
notes on time_bases and presentation timestamps:
Time_base (fraction) is the conversion factor to multipy the uint64 pts value to seconds
two time bases that we care about exsist:
time_base of the stream (AVStream) this is used for the packet pts/dts
time_base fo the codec (AVCodecContext) this is used for the frame
going from packet pts to frame pts when decoding:
frame.pts = av_rescale_q ( packet. pts , packetTimeBase , frameTimeBase )
..when encoding:
packet.pts = av_rescale_q ( frame. pts , frameTimeBase,packetTimeBase )
Setting the time_base:
The timebase of the codec is settable (and only settable at the beginnig):
currently in PyAV this is done container.add_stream(codec,codec_timebase)
The timebase of the stream is not user settable. It is determined by ffmpeg.
The streamtimebase uses the codec timebase as a hint to find a good value.
The stream timebase in influenced by the contraints/rules of the container as well.
Only when the header of the stream is written stream.time_base is garanteed
to be valid and should only now be accesed.
"""
def write_timestamps(file_loc, timestamps):
directory, video_file = os.path.split(file_loc)
name, ext = os.path.splitext(video_file)
ts_file = '{}_timestamps.npy'.format(name)
ts_loc = os.path.join(directory, ts_file)
ts = np.array(timestamps)
np.save(ts_loc, ts)
class AV_Writer(object):
"""
AV_Writer class
- file_loc: path to file out
- video_stream:
We are creating a
"""
def __init__(self, file_loc,fps=30, video_stream={'codec':'mpeg4','bit_rate': 15000*10e3}, audio_loc=None, use_timestamps=False):
super().__init__()
self.use_timestamps = use_timestamps
self.timestamps = []
# the approximate capture rate.
self.fps = int(fps)
directory, video_file = os.path.split(file_loc)
name, ext = os.path.splitext(video_file)
if ext not in ('.mp4', '.mov', '.mkv'):
logger.warning("media file container should be mp4 or mov. Using a different container is risky.")
self.file_loc = file_loc
self.container = av.open(self.file_loc, 'w')
logger.debug("Opened '{}' for writing.".format(self.file_loc))
if self.use_timestamps:
self.time_base = Fraction(1, 65535) # highest resolution for mp4
else:
self.time_base = Fraction(1000, self.fps*1000) # timebase is fps
self.video_stream = self.container.add_stream(video_stream['codec'], 1/self.time_base)
self.video_stream.bit_rate = video_stream['bit_rate']
self.video_stream.bit_rate_tolerance = video_stream['bit_rate']/20
self.video_stream.thread_count = max(1, mp.cpu_count() - 1)
# self.video_stream.pix_fmt = "yuv420p"
if audio_loc:
audio_dir = os.path.split(audio_loc)[0]
audio_ts_loc = os.path.join(audio_dir, 'audio_timestamps.npy')
audio_exists = os.path.exists(audio_loc) and os.path.exists(audio_ts_loc)
if audio_exists:
self.audio_rec = av.open(audio_loc)
self.audio_ts = | np.load(audio_ts_loc) | numpy.load |
# from numba import njit
from enum import IntEnum
import numpy as np
# import numba
# from numba.experimental import jitclass
from numba import int32, int64, float64, complex128, typed
from numba.core import types
kv_ty = (types.unicode_type, types.int64)
class ModelType(IntEnum):
"""
Identification of different Model types.
"""
GEN_ORD_6 = 0 # 6th order model
VSC_1 = 1
DC_LINE = 2
VS = 3
SAVR = 4
GEN_2_2 = 5 # model 2.2
class CtrlMode(IntEnum):
"""
Identification of converter control modes.
"""
P_VAC = 0
P_Q = 1
VDC_Q = 2
# @njit
def d_vsc_dt(xm, um, model):
"""
Voltage Source Converter differential equations
Parameters
----------
xm : ndarray
State vector.
um : ndarray
Input vector.
model : object
Model parameters.
Returns
-------
dx : ndarray
State derivatives.
"""
i_d = xm[model.x_idx['Id']]
i_q = xm[model.x_idx['Iq']]
# i_dc = x[model.x_idx['Idc']]
Md = xm[model.x_idx['Md']]
Mq = xm[model.x_idx['Mq']]
Madd = xm[model.x_idx['Madd']]
Madq = xm[model.x_idx['Madq']]
Theta_pll = xm[model.x_idx['Theta']]
Xpll = xm[model.x_idx['Xpll']]
Xf = xm[model.x_idx['Xf']]
Xp = xm[model.x_idx['Xp']]
Xq = xm[model.x_idx['Xq']]
Pm = xm[model.x_idx['Pm']]
Qm = xm[model.x_idx['Qm']]
Vm = xm[model.x_idx['Vm']]
vx = um[0]
vy = um[1]
Vdc = um[2]
Pref = um[3]
Qref = um[4]
Vref = um[5]
vd = (vx * np.cos(Theta_pll) + vy * np.sin(Theta_pll))
vq = (-vx * np.sin(Theta_pll) + vy * np.cos(Theta_pll))
wpll = model.Kp_pll * vq + model.Ki_pll * Xpll
# wpll = np.clip(wpll, 0.8, 1.2) # TODO check the limits and make them part of the model
Pac = vd * i_d + vq * i_q
Qac = (vq * i_d - vd * i_q)
Vac = np.sqrt(vd ** 2 + vq ** 2)
if model.Tpm == 0:
Pm = Pac
Qm = Qac
if model.Tvm == 0:
Vm = Vac
if model.ctrl == CtrlMode.VDC_Q: # TODO seperate the control modes to avoid mixup (Vref is used for both ac and dc)
dP = Vdc / Vref - 1
else:
dP = Pref - Pm + model.Kpf * (1 - wpll) + model.Kif * Xf
id_ref = model.Kpp * dP + Xp * model.Kip
dQ = (model.Kq * (Qm - Qref) + model.Kv * (Vm - Vref))
iq_ref = dQ * model.Kpq + Xq * model.Kiq
# id_max = 1
# id_ref = np.clip(id_ref, -id_max, id_max)
# iq_max = np.sqrt(max(0,1-id_ref**2))
# iq_ref = np.clip(iq_ref, -iq_max, iq_max)
vmd = (Madd - wpll * model.Lt * i_q + model.Kpc * (id_ref - i_d) + model.Kic * Md) / Vdc
vmq = (Madq + wpll * model.Lt * i_d + model.Kpc * (iq_ref - i_q) + model.Kic * Mq) / Vdc
dx = np.zeros(len(xm))
dx[model.x_idx['Id']] = model.wn / model.Lt * (vmd - vd - model.Rt * i_d + wpll * model.Lt * i_q) # di_d
dx[model.x_idx['Iq']] = model.wn / model.Lt * (vmq - vq - model.Rt * i_q - wpll * model.Lt * i_d) # di_q
# dx[model.x_idx['Idc']]= (model.wn/(model.Ldc)*(Pac/Vdc-i_dc)) # TODO find a propper equation assuming power
# balance between AC and DC sides
dx[model.x_idx['Md']] = (id_ref - i_d) # dMd
dx[model.x_idx['Mq']] = (iq_ref - i_q) # dMq
dx[model.x_idx['Madd']] = (-Madd + vd) / model.Tad # dMadd
dx[model.x_idx['Madq']] = (-Madq + vq) / model.Tad # dMadq
dx[model.x_idx['Theta']] = (wpll - 1) * model.wn # dTheta_pll
dx[model.x_idx['Xpll']] = vq # dXpll
dx[model.x_idx['Xf']] = (1 - wpll) # dMf
dx[model.x_idx['Xp']] = dP # dMp
dx[model.x_idx['Xq']] = dQ # dMq
if model.Tpm > 0:
dx[model.x_idx['Pm']] = (Pac - Pm) / model.Tpm
dx[model.x_idx['Qm']] = (Qac - Qm) / model.Tpm
if model.Tvm > 0:
dx[model.x_idx['Vm']] = (Vac - Vm) / model.Tvm
return dx
# @njit
def d_dcline_dt(xm, um, model):
"""
DC line differential equations
Parameters
----------
xm : ndarray
State vector.
u : ndarray
Input vector.
model : object
Model parameters.
Returns
-------
dx : ndarray
State derivatives.
"""
Il = xm[model.x_idx['Il']]
Vf = xm[model.x_idx['Vf']]
Vt = xm[model.x_idx['Vt']]
If = um[0]
It = um[1]
dx = np.zeros(len(xm))
dx[model.x_idx['Il']] = model.wn * 1 / (model.L + 1e-6) * (Vf - Vt - model.R * Il)
dx[model.x_idx['Vf']] = model.wn * 2 / model.C * (If - Il - model.G / 2 * Vf)
dx[model.x_idx['Vt']] = model.wn * 2 / model.C * (Il - It - model.G / 2 * Vt)
return dx
# @njit
def d_vs_dt(xm, um, model):
"""
Voltage source differential equations
Parameters
----------
xm : ndarray
State vector.
um : ndarray
Input vector.
model : object
Model parameters.
Returns
-------
dx : ndarray
State derivatives.
"""
phi = xm[model.x_idx['phi']]
Ix = xm[model.x_idx['Ix']]
Iy = xm[model.x_idx['Iy']]
Vx = um[0]
Vy = um[1]
# f = um[2]
fpu = 1 # TODO this should be the measured grid frequency
dphi = 2 * np.pi * 50 * (fpu - 1)
ux_setp = model.V0 * np.cos(phi + dphi)
uy_setp = model.V0 * np.sin(phi + dphi)
dIx = model.wn / model.L * (ux_setp - Vx - model.R * Ix + model.L * Iy)
dIy = model.wn / model.L * (uy_setp - Vy - model.R * Iy - model.L * Ix)
dx = np.zeros(len(xm))
dx[model.x_idx['phi']] = dphi
dx[model.x_idx['Ix']] = dIx
dx[model.x_idx['Iy']] = dIy
return dx
# @njit
def d_gen_ord_6_rms_dt(xm, um, model):
"""
Sixth order generator differential equations.
Generator current is not a state variable and
is calculated from the terminal and subtransient
voltage.
Parameters
----------
xm : ndarray
State vector.
um : ndarray
Input vector.
model : object
Model parameters.
Returns
-------
dx : ndarray
State derivatives.
"""
d = xm[model.x_idx['d']]
w = xm[model.x_idx['w']]
Eqp = xm[model.x_idx['Eqp']]
Eqpp = xm[model.x_idx['Eqpp']]
Edp = xm[model.x_idx['Edp']]
Edpp = xm[model.x_idx['Edpp']]
Efq = xm[model.x_idx['Efq']] # TODO seperate the avr from the generator to simplify using different avr models
Vf = xm[model.x_idx['Vf']]
X_avr = xm[model.x_idx['Xavr']]
Efq = max(Efq, 0) # TODO add limits
vx = um[0]
vy = um[1]
Vref = um[2]
Vac = np.sqrt(vx ** 2 + vy ** 2)
Vd = (vx * np.cos(d) + vy * np.sin(d))
Vq = (-vx * np.sin(d) + vy * np.cos(d))
Id = -(-model.ra * (Vd - Edpp) - model.xqpp * (Vq - Eqpp)) / (model.ra ** 2 + model.xqpp * model.xdpp)
Iq = -(model.xdpp * (Vd - Edpp) - model.ra * (Vq - Eqpp)) / (model.ra ** 2 + model.xqpp * model.xdpp)
Pe = -(Vd * Id + Vq * Iq) + (Id ** 2 + Iq ** 2) * model.ra
# Pe = (Edpp*Id+Eqpp*Iq)+(model.xdpp-model.xqpp)*Id*Iq
delta_w = model.wn * (w - 1)
dx = np.zeros(len(xm))
dx[model.x_idx['d']] = delta_w
dx[model.x_idx['w']] = 1 / (model.Tj) * (model.Pm - Pe - model.D * w) # dw
dx[model.x_idx['Eqp']] = 1 / model.Tdp * (Efq - Eqp + Id * (model.xd - model.xdp))
dx[model.x_idx['Eqpp']] = 1 / model.Tdpp * (Eqp - Eqpp + Id * (model.xdp - model.xdpp))
dx[model.x_idx['Edp']] = 1 / model.Tqp * (-Edp - Iq * (model.xq - model.xqp))
dx[model.x_idx['Edpp']] = 1 / model.Tqpp * (Edp - Edpp - Iq * (model.xqp - model.xqpp))
dEfq = 1 / model.Te * (-Efq + model.Kc * (Vref - Vf) + model.Kc / model.Tc * X_avr)
dx[model.x_idx['Efq']] = dEfq
dx[model.x_idx['Vf']] = 1 / model.Tm * (-Vf + Vac)
dx[model.x_idx['Xavr']] = (Vref - Vf)
return dx
# @njit
def d_gen_ord_6_emt_dt(xm, um, model):
"""
Sixth order generator differential equations.
Generator current is included as a state variable.
Parameters
----------
xm : ndarray
State vector.
um : ndarray
Input vector.
model : object
Model parameters.
Returns
-------
dx : ndarray
State derivatives.
"""
Id = xm[model.x_idx['Id']]
Iq = xm[model.x_idx['Iq']]
d = xm[model.x_idx['d']]
w = xm[model.x_idx['w']]
Eqp = xm[model.x_idx['Eqp']]
Eqpp = xm[model.x_idx['Eqpp']]
Edp = xm[model.x_idx['Edp']]
Edpp = xm[model.x_idx['Edpp']]
Efq = xm[model.x_idx['Efq']] # TODO seperate the avr from the generator to simplify using different avr models
Vf = xm[model.x_idx['Vf']]
X_avr = xm[model.x_idx['Xavr']]
# Efq = np.clip(Efq, 0.0, 5.0)
vx = um[0]
vy = um[1]
Vref = um[2]
Vac = np.sqrt(vx ** 2 + vy ** 2)
Vd = vx * np.cos(d) + vy * np.sin(d)
Vq = -vx * np.sin(d) + vy * np.cos(d)
Pe = (Edpp * Id + Eqpp * Iq) + (model.xdpp - model.xqpp) * Id * Iq
delta_w = model.wn * (w - 1)
dx = np.zeros(len(xm))
dx[model.x_idx['d']] = delta_w
dx[model.x_idx['w']] = (1 / model.Tj) * (model.Pm - Pe - model.D * w)
dx[model.x_idx['Eqp']] = (1 / model.Tdp) * (Efq - Eqp - Id * (model.xd - model.xdp))
dx[model.x_idx['Eqpp']] = (1 / model.Tdpp) * (Eqp - Eqpp - Id * (model.xdp - model.xdpp))
dx[model.x_idx['Edp']] = (1 / model.Tqp) * (-Edp + Iq * (model.xq - model.xqp))
dx[model.x_idx['Edpp']] = (1 / model.Tqpp) * (Edp - Edpp + Iq * (model.xqp - model.xqpp))
dEfq = 1 / model.Te * (-Efq + model.Kc * (Vref - Vf) + model.Kc / model.Tc * X_avr)
dx[model.x_idx['Efq']] = dEfq
dx[model.x_idx['Vf']] = 1 / model.Tm * (-Vf + Vac)
dx[model.x_idx['Xavr']] = (Vref - Vf)
# TODO check the equations for w*E''
dx[model.x_idx['Id']] = model.wn / model.xdpp * (w * Edpp - Vd - model.ra * Id + w * model.xqpp * Iq)
dx[model.x_idx['Iq']] = model.wn / model.xqpp * (w * Eqpp - Vq - model.ra * Iq - w * model.xdpp * Id)
return dx
# @njit
def d_gen_model_2_2_dt(xm, um, model):
"""
Generator model 2.2 differential equations.
Generator current is included as a state variable.
Parameters
----------
xm : ndarray
State vector.
um : ndarray
Input vector.
model : object
Model parameters.
Returns
-------
dx : ndarray
State derivatives.
"""
Id = xm[model.x_idx['Id']]
Iq = xm[model.x_idx['Iq']]
d = xm[model.x_idx['d']]
w = xm[model.x_idx['w']]
psi_d = xm[model.x_idx['psi_d']]
psi_q = xm[model.x_idx['psi_q']]
psi_fd = xm[model.x_idx['psi_fd']]
psi_1d = xm[model.x_idx['psi_1d']]
psi_1q = xm[model.x_idx['psi_1q']]
psi_2q = xm[model.x_idx['psi_2q']]
Efd = xm[model.x_idx['Efd']] # TODO seperate the avr from the generator to simplify using different avr models
Vf = xm[model.x_idx['Vf']]
X_avr = xm[model.x_idx['Xavr']]
# Efd = np.clip(Efd, 0.0, 5.0)
vx = um[0]
vy = um[1]
Vref = um[2]
# Efd = um[3]
Vac = np.sqrt(vx ** 2 + vy ** 2)
Vd = (vx * | np.cos(d) | numpy.cos |
# Copyright (C) 2018, <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>
"""Controllers."""
import six
import abc
import warnings
import numpy as np
from scipy import optimize
from scipy.stats import entropy
@six.add_metaclass(abc.ABCMeta)
class BaseController():
"""Base trajectory optimizer controller."""
@abc.abstractmethod
def fit(self, x0, us_init, *args, **kwargs):
"""Computes the optimal controls.
Args:
x0: Initial state [state_size].
us_init: Initial control path [N, action_size].
*args, **kwargs: Additional positional and key-word arguments.
Returns:
Tuple of
xs: optimal state path [N+1, state_size].
us: optimal control path [N, action_size].
"""
raise NotImplementedError
class iLQR(BaseController):
"""Finite Horizon Iterative Linear Quadratic Regulator."""
def __init__(self, dynamics, cost, N, max_reg=1e10, hessians=False):
"""Constructs an iLQR solver.
Args:
dynamics: Plant dynamics.
cost: Cost function.
N: Horizon length.
max_reg: Maximum regularization term to break early due to
divergence. This can be disabled by setting it to None.
hessians: Use the dynamic model's second order derivatives.
Default: only use first order derivatives. (i.e. iLQR instead
of DDP).
"""
self.dynamics = dynamics
self.cost = cost
self.N = N
self._use_hessians = hessians and dynamics.has_hessians
if hessians and not dynamics.has_hessians:
warnings.warn("hessians requested but are unavailable in dynamics")
# Regularization terms: Levenberg-Marquardt parameter.
# See II F. Regularization Schedule.
self._mu = 1.0
self._mu_min = 1e-6
self._mu_max = max_reg
self._delta_0 = 2.0
self._delta = self._delta_0
self._k = np.zeros((N, dynamics.action_size))
self._K = np.zeros((N, dynamics.action_size, dynamics.state_size))
super(iLQR, self).__init__()
def fit(self, x0, us_init, n_iterations=100, tol=1e-6, on_iteration=None):
"""Computes the optimal controls.
Args:
x0: Initial state [state_size].
us_init: Initial control path [N, action_size].
n_iterations: Maximum number of interations. Default: 100.
tol: Tolerance. Default: 1e-6.
on_iteration: Callback at the end of each iteration with the
following signature:
(iteration_count, x, J_opt, accepted, converged) -> None
where:
iteration_count: Current iteration count.
xs: Current state path.
us: Current action path.
J_opt: Optimal cost-to-go.
accepted: Whether this iteration yielded an accepted result.
converged: Whether this iteration converged successfully.
Default: None.
Returns:
Tuple of
xs: optimal state path [N+1, state_size].
us: optimal control path [N, action_size].
"""
# Reset regularization term.
self._mu = 1.0
self._delta = self._delta_0
# Backtracking line search candidates 0 < alpha <= 1.
alphas = 1.1**(-np.arange(10)**2)
us = us_init.copy()
k = self._k
K = self._K
changed = True
converged = False
for iteration in range(n_iterations):
accepted = False
# Forward rollout only if it needs to be recomputed.
if changed:
(xs, F_x, F_u, L, L_x, L_u, L_xx, L_ux, L_uu, F_xx, F_ux,
F_uu) = self._forward_rollout(x0, us, us_init)
J_opt = L.sum()
changed = False
try:
# Backward pass.
k, K = self._backward_pass(F_x, F_u, L_x, L_u, L_xx, L_ux, L_uu,
F_xx, F_ux, F_uu)
# Backtracking line search.
for alpha in alphas:
xs_new, us_new = self._control(xs, us, k, K, alpha)
J_new = self._trajectory_cost(xs_new, us_new)
if J_new < J_opt:
if np.abs((J_opt - J_new) / J_opt) < tol:
converged = True
J_opt = J_new
xs = xs_new
us = us_new
changed = True
# Decrease regularization term.
self._delta = min(1.0, self._delta) / self._delta_0
self._mu *= self._delta
if self._mu <= self._mu_min:
self._mu = 0.0
# Accept this.
accepted = True
break
except np.linalg.LinAlgError as e:
# Quu was not positive-definite and this diverged.
# Try again with a higher regularization term.
warnings.warn(str(e))
if not accepted:
# Increase regularization term.
self._delta = max(1.0, self._delta) * self._delta_0
self._mu = max(self._mu_min, self._mu * self._delta)
if self._mu_max and self._mu >= self._mu_max:
warnings.warn("exceeded max regularization term")
break
if on_iteration:
on_iteration(iteration, xs, us, J_opt, accepted, converged)
if converged:
break
# Store fit parameters.
self._k = k
self._K = K
self._nominal_xs = xs
self._nominal_us = us
return xs, us
def _control(self, xs, us, k, K, alpha=1.0):
"""Applies the controls for a given trajectory.
Args:
xs: Nominal state path [N+1, state_size].
us: Nominal control path [N, action_size].
k: Feedforward gains [N, action_size].
K: Feedback gains [N, action_size, state_size].
alpha: Line search coefficient.
Returns:
Tuple of
xs: state path [N+1, state_size].
us: control path [N, action_size].
"""
xs_new = np.zeros_like(xs)
us_new = np.zeros_like(us)
xs_new[0] = xs[0].copy()
for i in range(self.N):
# Eq (12).
# Applying alpha only on k[i] as in the paper for some reason
# doesn't converge.
us_new[i] = us[i] + alpha * (k[i] + K[i].dot(xs_new[i] - xs[i]))
# Eq (8c).
xs_new[i + 1] = self.dynamics.f(xs_new[i], us_new[i], i)
return xs_new, us_new
def _trajectory_cost(self, xs, us):
"""Computes the given trajectory's cost.
Args:
xs: State path [N+1, state_size].
us: Control path [N, action_size].
Returns:
Trajectory's total cost.
"""
J = map(lambda args: self.cost.l(*args), zip(xs[:-1], us, range(self.N)))
return sum(J) + self.cost.l(xs[-1], None, self.N, terminal=True)
def _forward_rollout(self, x0, us, local_policy):
"""Apply the forward dynamics to have a trajectory from the starting
state x0 by applying the control path us.
Args:
x0: Initial state [state_size].
us: Control path [N, action_size].
Returns:
Tuple of:
xs: State path [N+1, state_size].
F_x: Jacobian of state path w.r.t. x
[N, state_size, state_size].
F_u: Jacobian of state path w.r.t. u
[N, state_size, action_size].
L: Cost path [N+1].
L_x: Jacobian of cost path w.r.t. x [N+1, state_size].
L_u: Jacobian of cost path w.r.t. u [N, action_size].
L_xx: Hessian of cost path w.r.t. x, x
[N+1, state_size, state_size].
L_ux: Hessian of cost path w.r.t. u, x
[N, action_size, state_size].
L_uu: Hessian of cost path w.r.t. u, u
[N, action_size, action_size].
F_xx: Hessian of state path w.r.t. x, x if Hessians are used
[N, state_size, state_size, state_size].
F_ux: Hessian of state path w.r.t. u, x if Hessians are used
[N, state_size, action_size, state_size].
F_uu: Hessian of state path w.r.t. u, u if Hessians are used
[N, state_size, action_size, action_size].
"""
state_size = self.dynamics.state_size
action_size = self.dynamics.action_size
N = us.shape[0]
xs = np.empty((N + 1, state_size))
F_x = np.empty((N, state_size, state_size))
F_u = np.empty((N, state_size, action_size))
if self._use_hessians:
F_xx = np.empty((N, state_size, state_size, state_size))
F_ux = np.empty((N, state_size, action_size, state_size))
F_uu = np.empty((N, state_size, action_size, action_size))
else:
F_xx = None
F_ux = None
F_uu = None
L = np.empty(N + 1)
L_x = np.empty((N + 1, state_size))
L_u = np.empty((N, action_size))
L_xx = np.empty((N + 1, state_size, state_size))
L_ux = np.empty((N, action_size, state_size))
L_uu = np.empty((N, action_size, action_size))
xs[0] = x0
for i in range(N):
x = xs[i]
u = us[i]
xs[i + 1] = self.dynamics.f(x, u, i)
F_x[i] = self.dynamics.f_x(x, u, i)
F_u[i] = self.dynamics.f_u(x, u, i)
L[i] = self.cost.l(x, u, i, terminal=False)
L_x[i] = self.cost.l_x(x, u, i, terminal=False)
L_u[i] = self.cost.l_u(x, u, i, terminal=False)
L_xx[i] = self.cost.l_xx(x, u, i, terminal=False)
L_ux[i] = self.cost.l_ux(x, u, i, terminal=False)
L_uu[i] = self.cost.l_uu(x, u, i, terminal=False)
if self._use_hessians:
F_xx[i] = self.dynamics.f_xx(x, u, i)
F_ux[i] = self.dynamics.f_ux(x, u, i)
F_uu[i] = self.dynamics.f_uu(x, u, i)
x = xs[-1]
L[-1] = self.cost.l(x, None, N, terminal=True)
L_x[-1] = self.cost.l_x(x, None, N, terminal=True)
L_xx[-1] = self.cost.l_xx(x, None, N, terminal=True)
return xs, F_x, F_u, L, L_x, L_u, L_xx, L_ux, L_uu, F_xx, F_ux, F_uu
def _backward_pass(self,
F_x,
F_u,
L_x,
L_u,
L_xx,
L_ux,
L_uu,
F_xx=None,
F_ux=None,
F_uu=None):
"""Computes the feedforward and feedback gains k and K.
Args:
F_x: Jacobian of state path w.r.t. x [N, state_size, state_size].
F_u: Jacobian of state path w.r.t. u [N, state_size, action_size].
L_x: Jacobian of cost path w.r.t. x [N+1, state_size].
L_u: Jacobian of cost path w.r.t. u [N, action_size].
L_xx: Hessian of cost path w.r.t. x, x
[N+1, state_size, state_size].
L_ux: Hessian of cost path w.r.t. u, x [N, action_size, state_size].
L_uu: Hessian of cost path w.r.t. u, u
[N, action_size, action_size].
F_xx: Hessian of state path w.r.t. x, x if Hessians are used
[N, state_size, state_size, state_size].
F_ux: Hessian of state path w.r.t. u, x if Hessians are used
[N, state_size, action_size, state_size].
F_uu: Hessian of state path w.r.t. u, u if Hessians are used
[N, state_size, action_size, action_size].
Returns:
Tuple of
k: feedforward gains [N, action_size].
K: feedback gains [N, action_size, state_size].
"""
V_x = L_x[-1]
V_xx = L_xx[-1]
k = np.empty_like(self._k)
K = np.empty_like(self._K)
for i in range(self.N - 1, -1, -1):
if self._use_hessians:
Q_x, Q_u, Q_xx, Q_ux, Q_uu = self._Q(
F_x[i], F_u[i], L_x[i], L_u[i], L_xx[i], L_ux[i], L_uu[i],
V_x, V_xx, F_xx[i], F_ux[i], F_uu[i])
else:
Q_x, Q_u, Q_xx, Q_ux, Q_uu = self._Q(F_x[i], F_u[i], L_x[i],
L_u[i], L_xx[i], L_ux[i],
L_uu[i], V_x, V_xx)
# Eq (6).
k[i] = -np.linalg.solve(Q_uu, Q_u)
K[i] = -np.linalg.solve(Q_uu, Q_ux)
# Eq (11b).
V_x = Q_x + K[i].T.dot(Q_uu).dot(k[i])
V_x += K[i].T.dot(Q_u) + Q_ux.T.dot(k[i])
# Eq (11c).
V_xx = Q_xx + K[i].T.dot(Q_uu).dot(K[i])
V_xx += K[i].T.dot(Q_ux) + Q_ux.T.dot(K[i])
V_xx = 0.5 * (V_xx + V_xx.T) # To maintain symmetry.
return np.array(k), np.array(K)
def _Q(self,
f_x,
f_u,
l_x,
l_u,
l_xx,
l_ux,
l_uu,
V_x,
V_xx,
f_xx=None,
f_ux=None,
f_uu=None):
"""Computes second order expansion.
Args:
F_x: Jacobian of state w.r.t. x [state_size, state_size].
F_u: Jacobian of state w.r.t. u [state_size, action_size].
L_x: Jacobian of cost w.r.t. x [state_size].
L_u: Jacobian of cost w.r.t. u [action_size].
L_xx: Hessian of cost w.r.t. x, x [state_size, state_size].
L_ux: Hessian of cost w.r.t. u, x [action_size, state_size].
L_uu: Hessian of cost w.r.t. u, u [action_size, action_size].
V_x: Jacobian of the value function at the next time step
[state_size].
V_xx: Hessian of the value function at the next time step w.r.t.
x, x [state_size, state_size].
F_xx: Hessian of state w.r.t. x, x if Hessians are used
[state_size, state_size, state_size].
F_ux: Hessian of state w.r.t. u, x if Hessians are used
[state_size, action_size, state_size].
F_uu: Hessian of state w.r.t. u, u if Hessians are used
[state_size, action_size, action_size].
Returns:
Tuple of
Q_x: [state_size].
Q_u: [action_size].
Q_xx: [state_size, state_size].
Q_ux: [action_size, state_size].
Q_uu: [action_size, action_size].
"""
# Eqs (5a), (5b) and (5c).
Q_x = l_x + f_x.T.dot(V_x)
Q_u = l_u + f_u.T.dot(V_x)
Q_xx = l_xx + f_x.T.dot(V_xx).dot(f_x)
# Eqs (11b) and (11c).
reg = self._mu * np.eye(self.dynamics.state_size)
Q_ux = l_ux + f_u.T.dot(V_xx + reg).dot(f_x)
Q_uu = l_uu + f_u.T.dot(V_xx + reg).dot(f_u)
if self._use_hessians:
Q_xx += np.tensordot(V_x, f_xx, axes=1)
Q_ux += np.tensordot(V_x, f_ux, axes=1)
Q_uu += np.tensordot(V_x, f_uu, axes=1)
return Q_x, Q_u, Q_xx, Q_ux, Q_uu
'''
iLQR_GPS: iLQR modified for GPS
'''
class iLQR_GPS(BaseController):
"""Finite Horizon Iterative Linear Quadratic Regulator."""
def __init__(self, dynamics, cost_GPS, N, A, B, C, max_reg=1e10, hessians=False, epsilon=1):
"""Constructs an iLQR solver.
Args:
dynamics: Plant dynamics.
cost: Cost function.
N: Horizon length.
max_reg: Maximum regularization term to break early due to
divergence. This can be disabled by setting it to None.
hessians: Use the dynamic model's second order derivatives.
Default: only use first order derivatives. (i.e. iLQR instead
of DDP).
"""
self.dynamics = dynamics
self.cost_GPS = cost_GPS
self.N = N
self._use_hessians = hessians and dynamics.has_hessians
if hessians and not dynamics.has_hessians:
warnings.warn("hessians requested but are unavailable in dynamics")
# Regularization terms: Levenberg-Marquardt parameter.
# See II F. Regularization Schedule.
self._mu = 1.0
self._mu_min = 1e-6
self._mu_max = max_reg
self._delta_0 = 2.0
self._delta = self._delta_0
self._k = np.random.uniform(-0.1, 0.1, (N, dynamics.action_size))
self._K = 0.01 * np.random.normal(0, np.eye(dynamics.action_size, dynamics.state_size),
(N, dynamics.action_size, dynamics.state_size))
cov_u = []
temp = 0.01 * np.eye(dynamics.action_size)
cov_u.append(temp)
self.cov_u = np.array(cov_u*self.N)
### New params
self.epsilon = epsilon
self.A = A
self.B = B
self.C = C
super(iLQR_GPS, self).__init__()
def generate_mean_cov(self, x, u, k, K, A, B, C, mean_old, cov_old, Q_uu):
### EQUATION 2.54, 2.55
mean_xu = []
cov_xu = []
mean = [mean_old[0]]
cov = [cov_old[0]]
for i in range(self.N):
temp = u[i][:, np.newaxis] + k[i][:, np.newaxis] + K[i].dot(mean[-1] - x[i][:, np.newaxis])
temp1 = np.concatenate((mean[-1], temp), axis=0)
mean_new = np.matmul(A, temp1) + B
temp2 = np.matmul(cov[-1], K[i].T)
temp3 = np.linalg.inv(Q_uu[i]) + np.matmul(K[i], temp2)
temp4 = np.matmul(np.matmul(A, np.block([[cov[-1], temp2], [temp2.T, temp3]])), A.T)
cov_new = temp4 + C
mean.append(mean_new)
cov.append(cov_new)
mean_xu.append(temp1)
cov_xu.append(np.block([[cov_old[i], temp2], [temp2.T, temp3]]))
return np.array(mean_xu), np.array(cov_xu), np.array(mean), np.array(cov)
def cost_estimation(self, eta, mean_xu, cov_xu, us, us_old):
### EQUATION 2.50 to 2.57
J_estimate_1 = self._trajectory_cost_estimate(mean_xu)
J_estimate_2 = 0
for i in range(mean_xu.shape[0]):
temp = np.trace(np.matmul(self.cost_GPS.Q, cov_xu[i]))
J_estimate_2 = J_estimate_2 + temp
J_estimate_3 = np.sum(entropy(np.abs(us), np.abs(us_old))) + self.epsilon
J_estimate = J_estimate_1/eta + J_estimate_2/eta - eta * J_estimate_3
return J_estimate
def eta_estimation(self, mean_xu, cov_xu, us, us_old):
### Page 54, 55 eta = [0.001, 10]
eta_max = self.cost_estimation(0.001, mean_xu, cov_xu, us, us_old)
eta_min = self.cost_estimation(10, mean_xu, cov_xu, us, us_old)
if eta_max*eta_min < 0:
print('Doing Brentq')
eta = optimize.brentq(self.cost_estimation, 0.001, 10, args=(mean_xu, cov_xu, us, us_old))
print('New eta ',eta)
else:
print('Doing Log search')
param_range = np.geomspace(0.001, 10, 30)
loss = []
for i in param_range:
temp = self.cost_estimation(i, mean_xu, cov_xu, us, us_old)
loss.append(temp)
opt_index = loss.index(min(loss))
eta = param_range[opt_index]
print('New eta ',eta)
return eta
def _control_GPS(self, xs, us, k, K, alpha=1.0):
"""Applies the controls for a given trajectory.
Args:
xs: Nominal state path [N+1, state_size + action_size].
us: Nominal control path [N, action_size].
k: Feedforward gains [N, action_size].
K: Feedback gains [N, action_size, state_size].
alpha: Line search coefficient.
Returns:
Tuple of
xs: state path [N+1, state_size + action_size].
us: control path [N, action_size].
"""
xs_new = np.zeros_like(xs)
us_new = | np.zeros_like(us) | numpy.zeros_like |
"""
Fichero con funciones implementadas en python, por ejemplo la función necesaria para cargar las imagenes.
<NAME>.
"""
import cv2 as cv
import numpy as np
import os
from sklearn.model_selection import train_test_split
import pandas as pd
PATH_POSITIVE_TRAIN = "ECI.Practica/data/train/pedestrians/"
PATH_NEGATIVE_TRAIN = "ECI.Practica/data/train/background/"
PATH_POSITIVE_TEST = "ECI.Practica/data/test/pedestrians/"
PATH_NEGATIVE_TEST = "ECI.Practica/data/test/background/"
EXAMPLE_POSITIVE = PATH_POSITIVE_TEST + "AnnotationsPos_0.000000_crop_000011b_0.png"
EXAMPLE_NEGATIVE = PATH_NEGATIVE_TEST+"AnnotationsNeg_0.000000_00000002a_0.png"
def loadImages(descriptor_class):
totalClases = []
totalData = []
totalData.extend([descriptor_class.compute(cv.imread(PATH_POSITIVE_TRAIN+file,cv.IMREAD_COLOR)).flatten() for file in os.listdir(PATH_POSITIVE_TRAIN)])
totalClases.extend(1 for file in os.listdir(PATH_POSITIVE_TRAIN))
print("Leidas " + str(len(
[name for name in os.listdir(PATH_POSITIVE_TRAIN) if os.path.isfile(os.path.join(PATH_POSITIVE_TRAIN, name)) ]))
+ " imágenes de entrenamiento -> positivas")
totalData.extend([descriptor_class.compute(cv.imread(PATH_NEGATIVE_TRAIN+file,cv.IMREAD_COLOR)).flatten() for file in os.listdir(PATH_NEGATIVE_TRAIN)])
totalClases.extend(0 for file in os.listdir(PATH_NEGATIVE_TRAIN))
print("Leidas " + str(len(
[name for name in os.listdir(PATH_NEGATIVE_TRAIN) if os.path.isfile(os.path.join(PATH_NEGATIVE_TRAIN, name)) ]))
+ " imágenes de entrenamiento -> negativas")
totalData.extend([descriptor_class.compute(cv.imread(PATH_POSITIVE_TEST+file,cv.IMREAD_COLOR)).flatten() for file in os.listdir(PATH_POSITIVE_TEST)])
totalClases.extend(1 for file in os.listdir(PATH_POSITIVE_TEST))
print("Leidas " + str(len(
[name for name in os.listdir(PATH_POSITIVE_TEST) if os.path.isfile(os.path.join(PATH_POSITIVE_TEST, name)) ]))
+ " imágenes de entrenamiento -> positivas")
totalData.extend([descriptor_class.compute(cv.imread(PATH_NEGATIVE_TEST+file,cv.IMREAD_COLOR)).flatten() for file in os.listdir(PATH_NEGATIVE_TEST)])
totalClases.extend(0 for file in os.listdir(PATH_NEGATIVE_TEST))
print("Leidas " + str(len(
[name for name in os.listdir(PATH_NEGATIVE_TEST) if os.path.isfile(os.path.join(PATH_NEGATIVE_TEST, name)) ]))
+ " imágenes de entrenamiento -> negativas")
totalData = np.array(totalData, dtype=np.float32)
totalClases = np.array(totalClases,dtype=np.int32)
return totalData, totalClases
def loadCompresedData(file_name):
arr = np.load(file_name)
arr = arr.f.arr_0
return arr
def train(trainingData,classes,kernel=cv.ml.SVM_LINEAR, degree = 2):
params = dict(kernel_type = kernel,
svm_type=cv.ml.SVM_C_SVC,
degree=1)
if(kernel == cv.ml.SVM_POLY):
params['degree'] = degree
svm = cv.ml.SVM_create()
svm.setKernel(params['kernel_type'])
svm.setType(params['svm_type'])
svm.setDegree(params['degree'])
svm.train(trainingData,cv.ml.ROW_SAMPLE,classes)
return svm
def calculateMetrics(predictedData,realData):
metrics = dict()
true_positive = sum(np.logical_and(predictedData == 1,realData == 1) == True)
false_positive = sum(np.logical_and(predictedData == 1,realData == 0) == True)
false_negative = sum(np.logical_and(predictedData == 0, realData == 1) == True)
true_negative = sum( | np.logical_and(predictedData == 0, realData == 0) | numpy.logical_and |
# Author: <NAME>(ICSRL)
# Created: 4/14/2020, 7:15 AM
# Email: <EMAIL>
import tensorflow as tf
import numpy as np
from network.loss_functions import huber_loss, mse_loss
from network.network import *
from numpy import linalg as LA
class initialize_network_DeepQLearning():
def __init__(self, cfg, name, vehicle_name):
self.g = tf.Graph()
self.vehicle_name = vehicle_name
self.first_frame = True
self.last_frame = []
with self.g.as_default():
stat_writer_path = cfg.network_path + self.vehicle_name + '/return_plot/'
loss_writer_path = cfg.network_path + self.vehicle_name + '/loss' + name + '/'
self.stat_writer = tf.summary.FileWriter(stat_writer_path)
# name_array = 'D:/train/loss'+'/'+name
self.loss_writer = tf.summary.FileWriter(loss_writer_path)
self.env_type = cfg.env_type
self.input_size = cfg.input_size
self.num_actions = cfg.num_actions
# Placeholders
self.batch_size = tf.placeholder(tf.int32, shape=())
self.learning_rate = tf.placeholder(tf.float32, shape=())
self.X1 = tf.placeholder(tf.float32, [None, cfg.input_size, cfg.input_size, 3], name='States')
# self.X = tf.image.resize_images(self.X1, (227, 227))
self.X = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), self.X1)
self.target = tf.placeholder(tf.float32, shape=[None], name='Qvals')
self.actions = tf.placeholder(tf.int32, shape=[None], name='Actions')
# self.model = AlexNetDuel(self.X, cfg.num_actions, cfg.train_fc)
self.model = C3F2(self.X, cfg.num_actions, cfg.train_fc)
self.predict = self.model.output
ind = tf.one_hot(self.actions, cfg.num_actions)
pred_Q = tf.reduce_sum(tf.multiply(self.model.output, ind), axis=1)
self.loss = huber_loss(pred_Q, self.target)
self.train = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9, beta2=0.99).minimize(
self.loss, name="train")
self.sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
self.saver = tf.train.Saver()
self.all_vars = tf.trainable_variables()
self.sess.graph.finalize()
# Load custom weights from custom_load_path if required
if cfg.custom_load:
print('Loading weights from: ', cfg.custom_load_path)
self.load_network(cfg.custom_load_path)
def get_vars(self):
return self.sess.run(self.all_vars)
def initialize_graphs_with_average(self, agent, agent_on_same_network):
values = {}
var = {}
all_assign = {}
for name_agent in agent_on_same_network:
values[name_agent] = agent[name_agent].network_model.get_vars()
var[name_agent] = agent[name_agent].network_model.all_vars
all_assign[name_agent] = []
for i in range(len(values[name_agent])):
val = []
for name_agent in agent_on_same_network:
val.append(values[name_agent][i])
# Take mean here
mean_val = np.average(val, axis=0)
for name_agent in agent_on_same_network:
# all_assign[name_agent].append(tf.assign(var[name_agent][i], mean_val))
var[name_agent][i].load(mean_val, agent[name_agent].network_model.sess)
def Q_val(self, xs):
target = np.zeros(shape=[xs.shape[0]], dtype=np.float32)
actions = np.zeros(dtype=int, shape=[xs.shape[0]])
return self.sess.run(self.predict,
feed_dict={self.batch_size: xs.shape[0], self.learning_rate: 0, self.X1: xs,
self.target: target, self.actions: actions})
def train_n(self, xs, ys, actions, batch_size, dropout_rate, lr, epsilon, iter):
_, loss, Q = self.sess.run([self.train, self.loss, self.predict],
feed_dict={self.batch_size: batch_size, self.learning_rate: lr, self.X1: xs,
self.target: ys, self.actions: actions})
meanQ = np.mean(Q)
maxQ = np.max(Q)
# Log to tensorboard
self.log_to_tensorboard(tag='Loss', group=self.vehicle_name, value=LA.norm(loss) / batch_size, index=iter)
self.log_to_tensorboard(tag='Epsilon', group=self.vehicle_name, value=epsilon, index=iter)
self.log_to_tensorboard(tag='Learning Rate', group=self.vehicle_name, value=lr, index=iter)
self.log_to_tensorboard(tag='MeanQ', group=self.vehicle_name, value=meanQ, index=iter)
self.log_to_tensorboard(tag='MaxQ', group=self.vehicle_name, value=maxQ, index=iter)
def action_selection(self, state):
target = np.zeros(shape=[state.shape[0]], dtype=np.float32)
actions = np.zeros(dtype=int, shape=[state.shape[0]])
qvals = self.sess.run(self.predict,
feed_dict={self.batch_size: state.shape[0], self.learning_rate: 0.0001,
self.X1: state,
self.target: target, self.actions: actions})
if qvals.shape[0] > 1:
# Evaluating batch
action = np.argmax(qvals, axis=1)
else:
# Evaluating one sample
action = np.zeros(1)
action[0] = np.argmax(qvals)
return action.astype(int)
def log_to_tensorboard(self, tag, group, value, index):
summary = tf.Summary()
tag = group + '/' + tag
summary.value.add(tag=tag, simple_value=value)
self.stat_writer.add_summary(summary, index)
def save_network(self, save_path, episode=''):
save_path = save_path + self.vehicle_name + '/' + self.vehicle_name + '_' + str(episode)
self.saver.save(self.sess, save_path)
print('Model Saved: ', save_path)
def load_network(self, load_path):
self.saver.restore(self.sess, load_path)
def get_weights(self):
xs = np.zeros(shape=(32, 227, 227, 3))
actions = np.zeros(dtype=int, shape=[xs.shape[0]])
ys = np.zeros(shape=[xs.shape[0]], dtype=np.float32)
return self.sess.run(self.weights,
feed_dict={self.batch_size: xs.shape[0], self.learning_rate: 0,
self.X1: xs,
self.target: ys, self.actions: actions})
###########################################################################
# DeepREINFORCE: Class
###########################################################################
class initialize_network_DeepREINFORCE():
def __init__(self, cfg, name, vehicle_name):
self.g = tf.Graph()
self.vehicle_name = vehicle_name
self.iter_baseline = 0
self.iter_policy = 0
self.first_frame = True
self.last_frame = []
self.iter_combined = 0
with self.g.as_default():
stat_writer_path = cfg.network_path + self.vehicle_name + '/return_plot/'
loss_writer_path = cfg.network_path + self.vehicle_name + '/loss' + name + '/'
self.stat_writer = tf.summary.FileWriter(stat_writer_path)
# name_array = 'D:/train/loss'+'/'+name
self.loss_writer = tf.summary.FileWriter(loss_writer_path)
self.env_type = cfg.env_type
self.input_size = cfg.input_size
self.num_actions = cfg.num_actions
# Placeholders
self.batch_size = tf.placeholder(tf.int32, shape=())
self.learning_rate = tf.placeholder(tf.float32, shape=())
self.X1 = tf.placeholder(tf.float32, [None, cfg.input_size, cfg.input_size, 3], name='States')
# self.X = tf.image.resize_images(self.X1, (227, 227))
self.X = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), self.X1)
# self.target = tf.placeholder(tf.float32, shape=[None], name='action_probs')
# self.target_baseline = tf.placeholder(tf.float32, shape=[None], name='baseline')
self.actions = tf.placeholder(tf.int32, shape=[None, 1], name='Actions')
self.G = tf.placeholder(tf.float32, shape=[None, 1], name='G')
self.B = tf.placeholder(tf.float32, shape=[None, 1], name='B')
# Select the deep network
self.model = C3F2_REINFORCE_with_baseline(self.X, cfg.num_actions, cfg.train_fc)
self.predict = self.model.output
self.baseline = self.model.baseline
self.ind = tf.one_hot(tf.squeeze(self.actions), cfg.num_actions)
self.prob_action = tf.reduce_sum(tf.multiply(self.predict, self.ind), axis=1)
loss_policy = tf.reduce_mean(tf.log(tf.transpose([self.prob_action])) * (self.G - self.B))
loss_entropy = -tf.reduce_mean(tf.multiply((tf.log(self.predict) + 1e-8), self.predict))
self.loss_main = -loss_policy - .2 * loss_entropy
self.loss_branch = mse_loss(self.baseline, self.G)
self.train_main = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9, beta2=0.99).minimize(
self.loss_main, name="train_main")
self.train_branch = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9,
beta2=0.99).minimize(
self.loss_branch, name="train_branch")
# self.train_combined = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9,
# beta2=0.99).minimize(
# self.loss_combined, name="train_combined")
self.sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
self.saver = tf.train.Saver()
self.all_vars = tf.trainable_variables()
self.sess.graph.finalize()
# Load custom weights from custom_load_path if required
if cfg.custom_load:
print('Loading weights from: ', cfg.custom_load_path)
self.load_network(cfg.custom_load_path)
def get_vars(self):
return self.sess.run(self.all_vars)
def initialize_graphs_with_average(self, agent, agent_on_same_network):
values = {}
var = {}
all_assign = {}
for name_agent in agent_on_same_network:
values[name_agent] = agent[name_agent].network_model.get_vars()
var[name_agent] = agent[name_agent].network_model.all_vars
all_assign[name_agent] = []
for i in range(len(values[name_agent])):
val = []
for name_agent in agent_on_same_network:
val.append(values[name_agent][i])
# Take mean here
mean_val = np.average(val, axis=0)
for name_agent in agent_on_same_network:
# all_assign[name_agent].append(tf.assign(var[name_agent][i], mean_val))
var[name_agent][i].load(mean_val, agent[name_agent].network_model.sess)
def prob_actions(self, xs):
G = np.zeros(shape=[1], dtype=np.float32)
B = np.zeros(shape=[1], dtype=np.float32)
actions = np.zeros(dtype=int, shape=[xs.shape[0]])
return self.sess.run(self.predict,
feed_dict={self.batch_size: xs.shape[0], self.learning_rate: 0, self.X1: xs,
self.actions: actions,
self.B: B,
self.G: G})
def train_baseline(self, xs, G, actions, lr, iter):
self.iter_baseline += 1
batch_size = xs.shape[0]
B = np.zeros(shape=[xs.shape[0], 1], dtype=np.float32)
_, loss, baseline_val = self.sess.run([self.train_branch, self.loss_branch, self.baseline],
feed_dict={self.batch_size: xs.shape[0], self.learning_rate: lr,
self.X1: xs,
self.actions: actions,
self.B: B,
self.G: G})
max_baseline = | np.max(baseline_val) | numpy.max |
import mock
import mpi4py.MPI
import numpy as np
import pytest
import unittest
import chainer
import chainer.cuda
import chainer.initializers
import chainer.links
import chainer.testing
import chainer.testing.attr
import chainermn
from chainermn.communicators import _communication_utility
from chainermn.communicators.flat_communicator \
import FlatCommunicator
from chainermn.communicators.hierarchical_communicator \
import HierarchicalCommunicator
from chainermn.communicators.naive_communicator \
import NaiveCommunicator
from chainermn.communicators.non_cuda_aware_communicator \
import NonCudaAwareCommunicator
from chainermn.communicators.pure_nccl_communicator \
import PureNcclCommunicator
from chainermn.communicators.single_node_communicator \
import SingleNodeCommunicator
from chainermn.communicators.two_dimensional_communicator \
import TwoDimensionalCommunicator
from chainermn import nccl
class ExampleModel(chainer.Chain):
def __init__(self, dtype=None):
W = None
bias = None
if dtype is not None:
self.dtype = dtype
W = chainer.initializers.Normal(dtype=self.dtype)
bias = chainer.initializers.Zero(dtype=self.dtype)
super(ExampleModel, self).__init__()
with self.init_scope():
self.a = chainer.links.Linear(2, 3, initialW=W, initial_bias=bias)
self.b = chainer.links.Linear(3, 4, initialW=W, initial_bias=bias)
self.c = chainer.links.Linear(None, 5, initialW=W,
initial_bias=bias)
class ExampleMixedModel(chainer.Chain):
def __init__(self):
W16 = chainer.initializers.Normal(dtype=np.float16)
W32 = chainer.initializers.Normal(dtype=np.float32)
bias16 = chainer.initializers.Zero(dtype=np.float16)
bias32 = chainer.initializers.Zero(dtype=np.float32)
super(ExampleMixedModel, self).__init__()
with self.init_scope():
self.a = chainer.links.Linear(2, 3, initialW=W32,
initial_bias=bias32)
self.b = chainer.links.Linear(3, 4, initialW=W16,
initial_bias=bias16)
self.c = chainer.links.Linear(None, 5, initialW=W16,
initial_bias=bias32)
class Param(object):
def __init__(self, param):
self.gpu = False
self.nccl1 = False
self.model_dtype = None
self.allreduce_grad_dtype = None
self.batched_copy = False
self.global_dtype = None
self.__dict__.update(param)
def __repr__(self):
import pprint
return pprint.pformat(self.__dict__)
cpu_params = [Param(p) for p in [
{
'communicator_class': NaiveCommunicator,
'multi_node': True,
}]]
gpu_params = [Param(p) for p in [
{
'communicator_class': NaiveCommunicator,
'multi_node': True,
}, {
'communicator_class': NaiveCommunicator,
'model_dtype': np.float16,
'multi_node': True,
}, {
'communicator_class': FlatCommunicator,
'multi_node': True,
}, {
'communicator_class': FlatCommunicator,
'model_dtype': np.float16,
'multi_node': True,
}, {
'communicator_class': HierarchicalCommunicator,
'multi_node': True,
}, {
'communicator_class': HierarchicalCommunicator,
'model_dtype': np.float16,
'multi_node': True,
}, {
'communicator_class': TwoDimensionalCommunicator,
'multi_node': True,
}, {
'communicator_class': TwoDimensionalCommunicator,
'model_dtype': np.float16,
'multi_node': True,
}, {
'communicator_class': SingleNodeCommunicator,
'multi_node': False,
}, {
'communicator_class': SingleNodeCommunicator,
'model_dtype': np.float16,
'multi_node': False,
}, {
'communicator_class': NonCudaAwareCommunicator,
'multi_node': True,
}, {
'communicator_class': NonCudaAwareCommunicator,
'model_dtype': np.float16,
'multi_node': False,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'allreduce_grad_dtype': np.float16,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'model_dtype': np.float16,
'allreduce_grad_dtype': np.float16,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'model_dtype': np.float64,
'allreduce_grad_dtype': np.float64,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'model_dtype': np.float16,
'allreduce_grad_dtype': np.float16,
'batched_copy': True,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'model_dtype': np.float16,
'allreduce_grad_dtype': np.float32,
'batched_copy': True,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'model_dtype': np.float32,
'allreduce_grad_dtype': np.float32,
'batched_copy': True,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'model_dtype': np.float32,
'allreduce_grad_dtype': np.float16,
'batched_copy': True,
}]]
gpu_mixed_dtype_params = [Param(p) for p in [
{
'communicator_class': NonCudaAwareCommunicator,
'multi_node': True,
}, {
'communicator_class': NaiveCommunicator,
'multi_node': True,
}, {
'communicator_class': TwoDimensionalCommunicator,
'multi_node': True,
}, {
'communicator_class': HierarchicalCommunicator,
'multi_node': True,
}, {
'communicator_class': FlatCommunicator,
'multi_node': True,
}, {
'communicator_class': SingleNodeCommunicator,
'multi_node': False,
}
]]
for global_dtype in [np.float32, np.float16, chainer.mixed16, None]:
for allreduce_dtype in [np.float32, np.float16, None]:
if global_dtype is None and allreduce_dtype is None:
continue
for batched_copy in [True, False]:
gpu_mixed_dtype_params.append(Param({
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'global_dtype': global_dtype,
'allreduce_grad_dtype': allreduce_dtype,
'batched_copy': batched_copy,
}))
mpi_comm = mpi4py.MPI.COMM_WORLD
def create_communicator(param, use_gpu):
if not param.multi_node:
ranks = _communication_utility.init_ranks(mpi_comm)
inter_size = ranks[4]
if inter_size > 1:
pytest.skip('This test is for single node only')
if use_gpu and not param.nccl1 and nccl.get_build_version() < 2000:
pytest.skip('This test requires NCCL version >= 2.0')
if param.communicator_class is PureNcclCommunicator:
communicator = param.communicator_class(
mpi_comm, allreduce_grad_dtype=param.allreduce_grad_dtype,
batched_copy=param.batched_copy)
else:
communicator = param.communicator_class(mpi_comm)
if use_gpu:
chainer.cuda.get_device_from_id(communicator.intra_rank).use()
return communicator
def destroy_communicator(comm):
"""Destroy internal NCCL communicator.
When too many NCCL communicator are alive, NCCL produces
unhandled CUDA error. To avoid this, we need to make sure to
destory NCCL communicator after every use.
"""
if hasattr(comm, 'nccl_comm') and comm.nccl_comm is not None:
comm.nccl_comm.destroy()
comm.nccl_comm = None
if hasattr(comm, 'intra_nccl_cojmm') and comm.intra_nccl_comm is not None:
comm.intra_nccl_comm.destroy()
comm.intra_nccl_comm = None
def check_send_and_recv(communicator, *shape):
if communicator.size < 2:
pytest.skip('This test is for multiple nodes')
if communicator.rank > 0:
rank_prev = (communicator.rank - 1) % communicator.size
data_recv = communicator.recv(source=rank_prev, tag=0)
chainer.testing.assert_allclose(
data_recv, rank_prev * np.ones((shape)))
if communicator.rank < communicator.size - 1:
rank_next = (communicator.rank + 1) % communicator.size
data_send = communicator.rank * \
np.ones((shape)).astype(np.float32)
communicator.send(data_send, dest=rank_next, tag=0)
def check_send_and_recv_tuple(communicator, data):
if communicator.size < 2:
pytest.skip('This test is for multiple nodes')
if communicator.rank > 0:
rank_prev = (communicator.rank - 1) % communicator.size
data_recv = communicator.recv(source=rank_prev, tag=0)
for array0, array1 in zip(data, data_recv):
chainer.testing.assert_allclose(array0, array1)
if communicator.rank < communicator.size - 1:
rank_next = (communicator.rank + 1) % communicator.size
communicator.send(data, dest=rank_next, tag=0)
def check_bcast_data(communicator, model):
model.a.W.data[:] = communicator.rank
model.b.W.data[:] = communicator.rank + 1
model.c.b.data[:] = communicator.rank + 2
communicator.bcast_data(model)
chainer.testing.assert_allclose(model.a.W.data, 0 * np.ones((3, 2)))
chainer.testing.assert_allclose(model.b.W.data, 1 * np.ones((4, 3)))
chainer.testing.assert_allclose(model.c.b.data, 2 * np.ones((5, )))
def check_allreduce_grad(communicator, model):
# We need to repeat twice for regressions on lazy initialization of
# sub communicators.
for _ in range(2):
model.a.W.grad[:] = communicator.rank
model.b.W.grad[:] = communicator.rank + 1
model.c.b.grad[:] = communicator.rank + 2
communicator.allreduce_grad(model)
base = (communicator.size - 1.0) / 2
chainer.testing.assert_allclose(model.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(model.b.W.grad,
(base + 1) * np.ones((4, 3)))
chainer.testing.assert_allclose(model.c.b.grad,
(base + 2) * np.ones((5, )))
def check_allreduce_grad_empty(communicator, model):
# We need to repeat twice for regressions on lazy initialization of
# sub communicators.
for _ in range(2):
model.a.W.grad[:] = communicator.rank
model.b.W.grad[:] = communicator.rank + 1
model.c.b.grad = None
communicator.allreduce_grad(model)
base = (communicator.size - 1.0) / 2
chainer.testing.assert_allclose(model.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(model.b.W.grad,
(base + 1) * np.ones((4, 3)))
def check_allreduce_grad_empty_half(communicator, model):
# We need to repeat twice for regressions on lazy initialization of
# sub communicators.
for _ in range(2):
model.a.W.data[:] = communicator.rank
model.b.W.data[:] = communicator.rank + 1
model.c.b.data[:] = communicator.rank + 2
model.a.W.grad[:] = communicator.rank
model.b.W.grad[:] = communicator.rank + 1
if communicator.rank % 2 == 0:
model.c.b.grad[:] = communicator.rank + 2
else:
model.c.b.grad = None
communicator.allreduce_grad(model, zero_fill=True)
base = (communicator.size - 1.0) / 2
chainer.testing.assert_allclose(model.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(model.b.W.grad,
(base + 1) * np.ones((4, 3)))
v = 0
for i in range(communicator.size):
if i % 2 == 0:
v += i + 2
v /= communicator.size
chainer.testing.assert_allclose(model.c.b.grad,
v * np.ones((5, )))
def check_send_recv(param, use_gpu):
communicator = create_communicator(param, use_gpu)
assert mpi_comm.Get_rank() == communicator.rank
assert mpi_comm.Get_size() == communicator.size
check_send_and_recv(communicator, 50)
check_send_and_recv(communicator, 50, 20)
check_send_and_recv(communicator, 50, 20, 5)
check_send_and_recv(communicator, 50, 20, 5, 3)
data = [np.ones((50)).astype(np.float32)]
check_send_and_recv_tuple(communicator, data)
data = [
np.ones((50)).astype(np.float32),
np.ones((50, 20)).astype(np.float32),
np.ones((50, 20, 5)).astype(np.float32)]
check_send_and_recv_tuple(communicator, data)
destroy_communicator(communicator)
def check_allreduce_grad_mixed_dtype(param, model, use_gpu):
# Checks the actual allreduce communication is performed
# in the correct data type (FP16 or FP32)
comm_class = param.communicator_class
if not param.multi_node:
ranks = _communication_utility.init_ranks(mpi_comm)
inter_size = ranks[4]
if inter_size > 1:
pytest.skip('This test is for single node only')
if comm_class is PureNcclCommunicator:
communicator = comm_class(
mpi_comm, allreduce_grad_dtype=param.allreduce_grad_dtype,
batched_copy=param.batched_copy)
else:
communicator = comm_class(mpi_comm)
mpi_comm.barrier()
# answer type: see the document of `create_communicator`
global_dtype = param.global_dtype
allreduce_dtype = param.allreduce_grad_dtype
# assert test configuration.
assert chainer.get_dtype() == global_dtype
answer_dtype = None
if allreduce_dtype == np.float16:
answer_dtype = np.float16
elif allreduce_dtype == np.float32:
answer_dtype = np.float32
else:
if global_dtype == np.float32:
answer_dtype = np.float32
else:
answer_dtype = np.float16
if use_gpu:
model.to_gpu()
model.a.W.grad[:] = communicator.rank
model.b.W.grad[:] = communicator.rank + 1
model.c.b.grad[:] = communicator.rank + 2
if isinstance(communicator, PureNcclCommunicator):
communicator._init_comms()
with mock.patch.object(communicator, 'nccl_comm',
wraps=communicator.nccl_comm) as mc:
answer_dtype = _communication_utility._get_nccl_type_id(
answer_dtype)
communicator.allreduce_grad(model)
# dtype that was used in the actual communication,
# which is nccl_comm.allReduce
call_args = mc.allReduce.call_args[0]
actual_dtype = call_args[3]
assert answer_dtype == actual_dtype
else:
# For other MPI-based communicators,
# all communication should happen in FP32 as of now, so
# here we just check the results are correct for
# 16-32 mixed models.
communicator.allreduce_grad(model)
base = (communicator.size - 1.0) / 2
chainer.testing.assert_allclose(model.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(model.b.W.grad,
(base + 1) * np.ones((4, 3)))
mpi_comm.barrier()
destroy_communicator(communicator)
def check_collective_communication(param, use_gpu):
communicator = create_communicator(param, use_gpu)
mpi_comm.barrier()
model = ExampleModel(param.model_dtype)
if use_gpu:
model.to_gpu()
check_bcast_data(communicator, model)
model = ExampleModel(param.model_dtype)
if use_gpu:
model.to_gpu()
check_allreduce_grad(communicator, model)
model = ExampleModel(param.model_dtype)
if use_gpu:
model.to_gpu()
check_allreduce_grad_empty(communicator, model)
model = ExampleModel(param.model_dtype)
if use_gpu:
model.to_gpu()
check_allreduce_grad_empty_half(communicator, model)
# Check allreduce debug mode
model = ExampleModel()
if use_gpu:
model.to_gpu()
# The example model includes some nan parameters so the debug mode
# must detect it.
chainer.set_debug(True)
with pytest.raises(ValueError, match=r'.* diverged .*'):
check_allreduce_grad(communicator, model)
chainer.set_debug(False)
# barrier() requires before destructor of PureNcclCommunicator
# because communication may not be finished.
mpi_comm.barrier()
destroy_communicator(communicator)
# chainer.testing.parameterize is not available at functions
@pytest.mark.parametrize('param', cpu_params)
def test_communicator_cpu(param):
check_send_recv(param, False)
check_collective_communication(param, False)
@pytest.mark.parametrize('param', gpu_params)
@chainer.testing.attr.gpu
def test_communicator_gpu(param):
check_send_recv(param, True)
check_collective_communication(param, True)
@pytest.mark.parametrize('param', gpu_mixed_dtype_params)
@chainer.testing.attr.gpu
def test_mixed_dtype_communicator_gpu(param):
model = ExampleMixedModel()
with chainer.using_config('dtype', param.global_dtype):
check_allreduce_grad_mixed_dtype(param, model, True)
class TestPureNcclCommunicator(unittest.TestCase):
def setUp(self):
if nccl.get_build_version() < 2000:
pytest.skip('This test requires NCCL version >= 2.0')
self.mpi_comm = mpi4py.MPI.COMM_WORLD
@chainer.testing.attr.gpu
def test_invalid_allreduce_grad_dtype(self):
with self.assertRaises(ValueError):
PureNcclCommunicator(self.mpi_comm, allreduce_grad_dtype=np.int32)
class TestDifferentDtype(unittest.TestCase):
def setup(self, gpu):
if gpu:
self.communicator = chainermn.create_communicator('flat')
self.device = self.communicator.intra_rank
chainer.cuda.get_device_from_id(self.device).use()
else:
self.communicator = chainermn.create_communicator('naive')
self.device = -1
if self.communicator.size != 2:
pytest.skip('This test is for two processes')
# dtypes to be tested
# DO NOT USE chainer.testing.parameterize
# (because running order of generated test cases is not unique)
self.dtypes = [np.int32, np.int64, np.float32, np.float64]
def teardown(self):
if self.communicator:
destroy_communicator(self.communicator)
def check_send_recv(self, x):
if self.communicator.rank == 0:
self.communicator.send(x, dest=1, tag=0)
y = x
elif self.communicator.rank == 1:
y = self.communicator.recv(source=0, tag=0)
chainer.testing.assert_allclose(y, x)
def test_send_recv_cpu(self):
self.setup(False)
for dtype in self.dtypes:
x = np.arange(18).astype(dtype)
self.check_send_recv(x)
x = np.array(1).astype(dtype)
self.check_send_recv(x)
self.teardown()
@chainer.testing.attr.gpu
def test_send_recv_gpu(self):
self.setup(True)
for dtype in self.dtypes:
x = np.arange(18).astype(dtype)
x = chainer.cuda.to_gpu(x, device=self.device)
self.check_send_recv(x)
self.teardown()
def check_alltoall(self, xs):
x = xs[self.communicator.rank]
ys = self.communicator.alltoall(
tuple([x for _ in range(self.communicator.size)]))
for x, y in zip(xs, ys):
chainer.testing.assert_allclose(x, y)
def test_alltoall_cpu(self):
self.setup(False)
for dtype in self.dtypes:
xs = np.arange(4 * self.communicator.size) \
.reshape(self.communicator.size, 4) \
.astype(dtype)
xs = np.split(xs, self.communicator.size)
self.check_alltoall(xs)
xs = [np.array(1).astype(dtype)] * 4
self.check_alltoall(xs)
self.teardown()
@chainer.testing.attr.gpu
def test_alltoall_gpu(self):
self.setup(True)
for dtype in self.dtypes:
xs = np.arange(4 * self.communicator.size) \
.reshape(self.communicator.size, 4) \
.astype(dtype)
xs = np.split(xs, self.communicator.size)
xs = [chainer.cuda.to_gpu(x, device=self.device) for x in xs]
self.check_alltoall(xs)
xs = [np.array(1).astype(dtype)] * 4
xs = [chainer.cuda.to_gpu(x, device=self.device) for x in xs]
self.check_alltoall(xs)
self.teardown()
def check_allgather(self, xs):
x = xs[self.communicator.rank]
ys = self.communicator.allgather(x)
for x, y in zip(xs, ys):
chainer.testing.assert_allclose(x, y)
def test_allgather_cpu(self):
self.setup(False)
for dtype in self.dtypes:
xs = np.arange(4 * self.communicator.size) \
.reshape(self.communicator.size, 4) \
.astype(dtype)
xs = np.split(xs, self.communicator.size)
self.check_allgather(xs)
x = np.array(1).astype(dtype)
ys = self.communicator.allgather(x)
for y in ys:
chainer.testing.assert_allclose(x, y)
self.teardown()
@chainer.testing.attr.gpu
def test_allgather_gpu(self):
self.setup(True)
for dtype in self.dtypes:
xs = np.arange(4 * self.communicator.size) \
.reshape(self.communicator.size, 4) \
.astype(dtype)
xs = np.split(xs, self.communicator.size)
xs = [chainer.cuda.to_gpu(x, device=self.device) for x in xs]
self.check_allgather(xs)
x = np.array(1).astype(dtype)
x = chainer.cuda.to_gpu(x, device=self.device)
ys = self.communicator.allgather(x)
for y in ys:
chainer.testing.assert_allclose(x, y)
self.teardown()
def check_bcast(self, x):
if self.communicator.rank == 0:
y = self.communicator.bcast(x, root=0)
else:
y = self.communicator.bcast(None, root=0)
chainer.testing.assert_allclose(x, y)
def test_bcast_cpu(self):
self.setup(False)
for dtype in self.dtypes:
x = np.arange(4).astype(dtype)
self.check_bcast(x)
x = np.array(42).astype(dtype)
y = self.communicator.bcast(x)
chainer.testing.assert_allclose(x, y)
self.teardown()
@chainer.testing.attr.gpu
def test_bcast_gpu(self):
self.setup(True)
for dtype in self.dtypes:
x = np.arange(4).astype(dtype)
x = chainer.cuda.to_gpu(x, device=self.device)
self.check_bcast(x)
x = np.array(42).astype(dtype)
x = chainer.cuda.to_gpu(x, device=self.device)
y = self.communicator.bcast(x)
chainer.testing.assert_allclose(x, y)
self.teardown()
def check_gather(self, xs, x1, ans):
x = xs[self.communicator.rank]
ys = self.communicator.gather(x, root=0)
if self.communicator.rank == 0:
for x, y in zip(xs, ys):
chainer.testing.assert_allclose(x, y)
ys = self.communicator.gather(x1, root=0)
if self.communicator.rank == 0:
for a, y in zip(ans, ys):
chainer.testing.assert_allclose(a, y)
def test_gather_cpu(self):
self.setup(False)
for dtype in self.dtypes:
xs = np.arange(4 * self.communicator.size) \
.reshape(self.communicator.size, 4) \
.astype(dtype)
xs = np.split(xs, self.communicator.size)
x = np.array(self.communicator.rank).astype(dtype)
ans = np.arange(self.communicator.size, dtype=dtype)
self.check_gather(xs, x, ans)
self.teardown()
@chainer.testing.attr.gpu
def test_gather_gpu(self):
self.setup(True)
for dtype in self.dtypes:
xs = np.arange(4 * self.communicator.size) \
.reshape(self.communicator.size, 4) \
.astype(dtype)
xs = np.split(xs, self.communicator.size)
xs = [chainer.cuda.to_gpu(x, device=self.device) for x in xs]
x = np.array(self.communicator.rank).astype(dtype)
x = chainer.cuda.to_gpu(x, device=self.device)
ans = np.arange(self.communicator.size, dtype=dtype)
self.check_gather(xs, x, ans)
self.teardown()
def check_scatter(self, xs):
x = xs[self.communicator.rank]
if self.communicator.rank == 0:
y = self.communicator.scatter(xs, root=0)
else:
y = self.communicator.scatter(None, root=0)
chainer.testing.assert_allclose(x, y)
def test_scatter_cpu(self):
self.setup(False)
for dtype in self.dtypes:
xs = np.arange(4 * self.communicator.size) \
.reshape(self.communicator.size, 4) \
.astype(dtype)
xs = np.split(xs, self.communicator.size)
self.check_scatter(xs)
x = | np.array(42) | numpy.array |
# define a class for networks
class Network(object):
'''
Networks have two states: the data state where they are stored as: matrix and
nodes and a viz state where they are stored as: viz.links, viz.row_nodes, viz.
col_nodes.
The goal is to start in a data-state and produce a viz-state of the network
that will be used as input to clustergram.js.
'''
def __init__(self):
# network: data-state
self.dat = {}
self.dat['nodes'] = {}
self.dat['nodes']['row'] = []
self.dat['nodes']['col'] = []
# node_info holds the orderings (ini, clust, rank), classification ('cl'),
# and other general information
self.dat['node_info'] = {}
for inst_rc in self.dat['nodes']:
self.dat['node_info'][inst_rc] = {}
self.dat['node_info'][inst_rc]['ini'] = []
self.dat['node_info'][inst_rc]['clust'] = []
self.dat['node_info'][inst_rc]['rank'] = []
self.dat['node_info'][inst_rc]['info'] = []
# classification is specifically used to color the class triangles
self.dat['node_info'][inst_rc]['cl'] = []
self.dat['node_info'][inst_rc]['value'] = []
# initialize matrix
self.dat['mat'] = []
# mat_info is an optional dictionary
# so I'm not including it by default
# network: viz-state
self.viz = {}
self.viz['row_nodes'] = []
self.viz['col_nodes'] = []
self.viz['links'] = []
def load_tsv_to_net(self, filename):
f = open(filename,'r')
lines = f.readlines()
f.close()
self.load_lines_from_tsv_to_net(lines)
def pandas_load_tsv_to_net(self, file_buffer):
'''
A user can add category information to the columns
'''
import pandas as pd
# get lines and check for category and value info
lines = file_buffer.getvalue().split('\n')
# check for category info in headers
cat_line = lines[1].split('\t')
add_cat = False
if cat_line[0] == '':
add_cat = True
tmp_df = {}
if add_cat:
# read in names and categories
tmp_df['mat'] = pd.read_table(file_buffer, index_col=0, header=[0,1])
else:
# read in names only
tmp_df['mat'] = pd.read_table(file_buffer, index_col=0, header=0)
# save to self
self.df_to_dat(tmp_df)
# add categories if necessary
if add_cat:
cat_line = [i.strip() for i in cat_line]
self.dat['node_info']['col']['cl'] = cat_line[1:]
# make a dict of columns in categories
##########################################
col_in_cat = {}
for i in range(len(self.dat['node_info']['col']['cl'])):
inst_cat = self.dat['node_info']['col']['cl'][i]
inst_col = self.dat['nodes']['col'][i]
if inst_cat not in col_in_cat:
col_in_cat[inst_cat] = []
# collect col names for categories
col_in_cat[inst_cat].append(inst_col)
# save to node_info
self.dat['node_info']['col_in_cat'] = col_in_cat
def load_lines_from_tsv_to_net(self, lines):
import numpy as np
# get row/col labels and data from lines
for i in range(len(lines)):
# get inst_line
inst_line = lines[i].rstrip().split('\t')
# strip each element
inst_line = [z.strip() for z in inst_line]
# get column labels from first row
if i == 0:
tmp_col_labels = inst_line
# add the labels
for inst_elem in range(len(tmp_col_labels)):
# skip the first element
if inst_elem > 0:
# get the column label
inst_col_label = tmp_col_labels[inst_elem]
# add to network data
self.dat['nodes']['col'].append(inst_col_label)
# get row info
if i > 0:
# save row labels
self.dat['nodes']['row'].append(inst_line[0])
# get data - still strings
inst_data_row = inst_line[1:]
# convert to float
inst_data_row = [float(tmp_dat) for tmp_dat in inst_data_row]
# save the row data as an array
inst_data_row = np.asarray(inst_data_row)
# initailize matrix
if i == 1:
self.dat['mat'] = inst_data_row
# add rows to matrix
if i > 1:
self.dat['mat'] = np.vstack( ( self.dat['mat'], inst_data_row ) )
def load_l1000cds2(self, l1000cds2):
import scipy
import numpy as np
# process gene set result
if 'upGenes' in l1000cds2['input']['data']:
# add the names from all the results
all_results = l1000cds2['result']
# grab col nodes - input sig and drugs
self.dat['nodes']['col'] = []
for i in range(len(all_results)):
inst_result = all_results[i]
self.dat['nodes']['col'].append(inst_result['name']+'#'+str(i))
self.dat['node_info']['col']['value'].append(inst_result['score'])
for type_overlap in inst_result['overlap']:
self.dat['nodes']['row'].extend( inst_result['overlap'][type_overlap] )
self.dat['nodes']['row'] = sorted(list(set(self.dat['nodes']['row'])))
# initialize the matrix
self.dat['mat'] = scipy.zeros([ len(self.dat['nodes']['row']), len(self.dat['nodes']['col']) ])
# fill in the matrix with l10000 data
########################################
# fill in gene sigature as first column
for i in range(len(self.dat['nodes']['row'])):
inst_gene = self.dat['nodes']['row'][i]
# get gene index
inst_gene_index = self.dat['nodes']['row'].index(inst_gene)
# if gene is in up add 1 otherwise add -1
if inst_gene in l1000cds2['input']['data']['upGenes']:
self.dat['node_info']['row']['value'].append(1)
else:
self.dat['node_info']['row']['value'].append(-1)
# save the name as a class
for i in range(len(self.dat['nodes']['col'])):
self.dat['node_info']['col']['cl'].append(self.dat['nodes']['col'][i])
# swap keys for aggravate and reverse
if l1000cds2['input']['aggravate'] == False:
# reverse gene set
up_type = 'up/dn'
dn_type = 'dn/up'
else:
# mimic gene set
up_type = 'up/up'
dn_type = 'dn/dn'
# loop through drug results
for inst_result_index in range(len(all_results)):
inst_result = all_results[inst_result_index]
# for non-mimic if up/dn then it should be negative since the drug is dn
# for mimic if up/up then it should be positive since the drug is up
for inst_dn in inst_result['overlap'][up_type]:
# get gene index
inst_gene_index = self.dat['nodes']['row'].index(inst_dn)
# save -1 to gene row and drug column
if up_type == 'up/dn':
self.dat['mat'][ inst_gene_index, inst_result_index ] = -1
else:
self.dat['mat'][ inst_gene_index, inst_result_index ] = 1
# for non-mimic if dn/up then it should be positive since the drug is up
# for mimic if dn/dn then it should be negative since the drug is dn
for inst_up in inst_result['overlap'][dn_type]:
# get gene index
inst_gene_index = self.dat['nodes']['row'].index(inst_up)
# save 1 to gene row and drug column
if dn_type == 'dn/up':
self.dat['mat'][ inst_gene_index, inst_result_index ] = 1
else:
self.dat['mat'][ inst_gene_index, inst_result_index ] = -1
# process a characteristic direction vector result
else:
all_results = l1000cds2['result']
# get gene names
self.dat['nodes']['row'] = l1000cds2['input']['data']['up']['genes'] + l1000cds2['input']['data']['dn']['genes']
# save gene expression values
tmp_exp_vect = l1000cds2['input']['data']['up']['vals'] + l1000cds2['input']['data']['dn']['vals']
for i in range(len(self.dat['nodes']['row'])):
self.dat['node_info']['row']['value'].append(tmp_exp_vect[i])
# gather result names
for i in range(len(all_results)):
inst_result = all_results[i]
# add result to list
self.dat['nodes']['col'].append(inst_result['name']+'#'+str(i))
self.dat['node_info']['col']['cl'].append(inst_result['name'])
# reverse signature, score [1,2]
if l1000cds2['input']['aggravate'] == False:
self.dat['node_info']['col']['value'].append( inst_result['score']-1 )
else:
self.dat['node_info']['col']['value'].append( 1 - inst_result['score'] )
# concat up and down lists
inst_vect = inst_result['overlap']['up'] + inst_result['overlap']['dn']
inst_vect = np.transpose(np.asarray(inst_vect))
inst_vect = inst_vect.reshape(-1,1)
# initialize or add to matrix
if type(self.dat['mat']) is list:
self.dat['mat'] = inst_vect
else:
self.dat['mat'] = np.hstack(( self.dat['mat'], inst_vect))
def load_vect_post_to_net(self, vect_post):
import numpy as np
# get all signatures (a.k.a. columns)
sigs = vect_post['columns']
# get all rows from signatures
all_rows = []
all_sigs = []
for inst_sig in sigs:
# gather sig names
all_sigs.append(inst_sig['col_name'])
# get column
col_data = inst_sig['data']
# gather row names
for inst_row_data in col_data:
# get gene name
all_rows.append( inst_row_data['row_name'] )
# get unique sorted list of genes
all_rows = sorted(list(set(all_rows)))
all_sigs = sorted(list(set(all_sigs)))
print( 'found ' + str(len(all_rows)) + ' rows' )
print( 'found ' + str(len(all_sigs)) + ' columns\n' )
# save genes and sigs to nodes
self.dat['nodes']['row'] = all_rows
self.dat['nodes']['col'] = all_sigs
# initialize numpy matrix of nans
self.dat['mat'] = np.empty((len(all_rows),len(all_sigs)))
self.dat['mat'][:] = np.nan
is_up_down = False
if 'is_up_down' in vect_post:
if vect_post['is_up_down'] == True:
is_up_down = True
if is_up_down == True:
self.dat['mat_up'] = np.empty((len(all_rows),len(all_sigs)))
self.dat['mat_up'][:] = np.nan
self.dat['mat_dn'] = np.empty((len(all_rows),len(all_sigs)))
self.dat['mat_dn'][:] = np.nan
# loop through all signatures and rows
# and place information into self.dat
for inst_sig in sigs:
# get sig name
inst_sig_name = inst_sig['col_name']
# get row data
col_data = inst_sig['data']
# loop through column
for inst_row_data in col_data:
# add row data to signature matrix
inst_row = inst_row_data['row_name']
inst_value = inst_row_data['val']
# find index of row and sig in matrix
row_index = all_rows.index(inst_row)
col_index = all_sigs.index(inst_sig_name)
# save inst_value to matrix
self.dat['mat'][row_index, col_index] = inst_value
if is_up_down == True:
self.dat['mat_up'][row_index, col_index] = inst_row_data['val_up']
self.dat['mat_dn'][row_index, col_index] = inst_row_data['val_dn']
def load_data_file_to_net(self, filename):
# load json from file to new dictionary
inst_dat = self.load_json_to_dict(filename)
# convert dat['mat'] to numpy array and add to network
self.load_data_to_net(inst_dat)
def load_data_to_net(self, inst_net):
''' load data into nodes and mat, also convert mat to numpy array'''
self.dat['nodes'] = inst_net['nodes']
self.dat['mat'] = inst_net['mat']
# convert to numpy array
self.mat_to_numpy_arr()
def export_net_json(self, net_type, indent='no-indent'):
''' export json string of dat '''
import json
from copy import deepcopy
if net_type == 'dat':
exp_dict = deepcopy(self.dat)
# convert numpy array to list
if type(exp_dict['mat']) is not list:
exp_dict['mat'] = exp_dict['mat'].tolist()
elif net_type == 'viz':
exp_dict = self.viz
# make json
if indent == 'indent':
exp_json = json.dumps(exp_dict, indent=2)
else:
exp_json = json.dumps(exp_dict)
return exp_json
def write_json_to_file(self, net_type, filename, indent='no-indent'):
import json
# get dat or viz representation as json string
if net_type == 'dat':
exp_json = self.export_net_json('dat', indent)
elif net_type == 'viz':
exp_json = self.export_net_json('viz', indent)
# save to file
fw = open(filename, 'w')
fw.write( exp_json )
fw.close()
def set_node_names(self, row_name, col_name):
'''give names to the rows and columns'''
self.dat['node_names'] = {}
self.dat['node_names']['row'] = row_name
self.dat['node_names']['col'] = col_name
def mat_to_numpy_arr(self):
''' convert list to numpy array - numpy arrays can not be saved as json '''
import numpy as np
self.dat['mat'] = np.asarray( self.dat['mat'] )
def swap_nan_for_zero(self):
import numpy as np
self.dat['mat'][ np.isnan( self.dat['mat'] ) ] = 0
def filter_row_thresh( self, row_filt_int, filter_type='value' ):
'''
Remove rows from matrix that do not meet some threshold
value: The default filtering is value, in that at least one value in the row
has to be higher than some threshold.
num: Rows can be filtered by the number of non-zero values it has.
sum: Rows can be filtered by the sum of the values
'''
import scipy
import numpy as np
# max vlue in matrix
mat = self.dat['mat']
max_mat = abs(max(mat.min(), mat.max(), key=abs))
# maximum number of measurements
max_num = len(self.dat['nodes']['col'])
mat_abs = abs(mat)
sum_row = np.sum(mat_abs, axis=1)
max_sum = max(sum_row)
# transfer the nodes
nodes = {}
nodes['row'] = []
nodes['col'] = self.dat['nodes']['col']
# transfer the 'info' part of node_info if necessary
node_info = {}
node_info['row'] = []
node_info['col'] = self.dat['node_info']['col']['info']
# filter rows
#################################
for i in range(len(self.dat['nodes']['row'])):
# get row name
inst_nodes_row = self.dat['nodes']['row'][i]
# get node info - disregard ini, clust, and rank orders
if len(self.dat['node_info']['row']['info']) > 0:
inst_node_info = self.dat['node_info']['row']['info'][i]
# get absolute value of row data
row_vect = np.absolute(self.dat['mat'][i,:])
# value: is there at least one value over cutoff
##################################################
if filter_type == 'value':
# calc cutoff
cutoff = row_filt_int * max_mat
# count the number of values above some thresh
found_tuple = np.where(row_vect >= cutoff)
if len(found_tuple[0])>=1:
# add name
nodes['row'].append(inst_nodes_row)
# add info if necessary
if len(self.dat['node_info']['row']['info']) > 0:
node_info['row'].append(inst_node_info)
elif filter_type == 'num':
num_nonzero = np.count_nonzero(row_vect)
# use integer number of non-zero measurements
cutoff = row_filt_int * 10
if num_nonzero>= cutoff:
# add name
nodes['row'].append(inst_nodes_row)
# add info if necessary
if len(self.dat['node_info']['row']['info']) > 0:
node_info['row'].append(inst_node_info)
elif filter_type == 'sum':
inst_row_sum = sum(abs(row_vect))
if inst_row_sum > row_filt_int*max_sum:
# add name
nodes['row'].append(inst_nodes_row)
# add info if necessary
if len(self.dat['node_info']['row']['info']) > 0:
node_info['row'].append(inst_node_info)
# cherrypick data from self.dat['mat']
##################################
# filtered matrix
filt_mat = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_up' in self.dat:
filt_mat_up = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
filt_mat_dn = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_info' in self.dat:
# initialize filtered mat_info dictionary with tuple keys
filt_mat_info = {}
# loop through the rows
for i in range(len(nodes['row'])):
inst_row = nodes['row'][i]
# loop through the cols
for j in range(len(nodes['col'])):
inst_col = nodes['col'][j]
# get row and col index
pick_row = self.dat['nodes']['row'].index(inst_row)
pick_col = self.dat['nodes']['col'].index(inst_col)
# cherrypick
###############
filt_mat[i,j] = self.dat['mat'][pick_row, pick_col]
if 'mat_up' in self.dat:
filt_mat_up[i,j] = self.dat['mat_up'][pick_row, pick_col]
filt_mat_dn[i,j] = self.dat['mat_dn'][pick_row, pick_col]
if 'mat_info' in self.dat:
filt_mat_info[str((i,j))] = self.dat['mat_info'][str((pick_row,pick_col))]
# save nodes array - list of node names
self.dat['nodes'] = nodes
# save node_info array - list of node infos
self.dat['node_info']['row']['info'] = node_info['row']
self.dat['node_info']['col']['info'] = node_info['col']
# overwrite with new filtered data
self.dat['mat'] = filt_mat
# overwrite with up/dn data if necessary
if 'mat_up' in self.dat:
self.dat['mat_up'] = filt_mat_up
self.dat['mat_dn'] = filt_mat_dn
# overwrite mat_info if necessary
if 'mat_info' in self.dat:
self.dat['mat_info'] = filt_mat_info
print( 'final mat shape' + str(self.dat['mat'].shape ) + '\n')
def filter_col_thresh( self, cutoff, min_num_meet ):
'''
remove rows and columns from matrix that do not have at least
min_num_meet instances of a value with an absolute value above cutoff
'''
import scipy
import numpy as np
# transfer the nodes
nodes = {}
nodes['row'] = self.dat['nodes']['row']
nodes['col'] = []
# transfer the 'info' part of node_info if necessary
node_info = {}
node_info['row'] = self.dat['node_info']['row']['info']
node_info['col'] = []
# add cols with non-zero values
#################################
for i in range(len(self.dat['nodes']['col'])):
# get col name
inst_nodes_col = self.dat['nodes']['col'][i]
# get node info - disregard ini, clust, and rank orders
if len(self.dat['node_info']['col']['info']) > 0:
inst_node_info = self.dat['node_info']['col']['info'][i]
# get col vect
col_vect = np.absolute(self.dat['mat'][:,i])
# check if there are nonzero values
found_tuple = np.where(col_vect >= cutoff)
if len(found_tuple[0])>=min_num_meet:
# add name
nodes['col'].append(inst_nodes_col)
# add info if necessary
if len(self.dat['node_info']['col']['info']) > 0:
node_info['col'].append(inst_node_info)
# cherrypick data from self.dat['mat']
##################################
# filtered matrix
filt_mat = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_up' in self.dat:
filt_mat_up = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
filt_mat_dn = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_info' in self.dat:
# initialize filtered mat_info dictionary with tuple keys
filt_mat_info = {}
# loop through the rows
for i in range(len(nodes['row'])):
inst_row = nodes['row'][i]
# loop through the cols
for j in range(len(nodes['col'])):
inst_col = nodes['col'][j]
# get row and col index
pick_row = self.dat['nodes']['row'].index(inst_row)
pick_col = self.dat['nodes']['col'].index(inst_col)
# cherrypick
###############
filt_mat[i,j] = self.dat['mat'][pick_row, pick_col]
if 'mat_up' in self.dat:
filt_mat_up[i,j] = self.dat['mat_up'][pick_row, pick_col]
filt_mat_dn[i,j] = self.dat['mat_dn'][pick_row, pick_col]
if 'mat_info' in self.dat:
filt_mat_info[str((i,j))] = self.dat['mat_info'][str((pick_row,pick_col))]
# save nodes array - list of node names
self.dat['nodes'] = nodes
# save node_info array - list of node infos
self.dat['node_info']['row']['info'] = node_info['row']
self.dat['node_info']['col']['info'] = node_info['col']
# overwrite with new filtered data
self.dat['mat'] = filt_mat
# overwrite with up/dn data if necessary
if 'mat_up' in self.dat:
self.dat['mat_up'] = filt_mat_up
self.dat['mat_dn'] = filt_mat_dn
# overwrite mat_info if necessary
if 'mat_info' in self.dat:
self.dat['mat_info'] = filt_mat_info
print( 'final mat shape' + str(self.dat['mat'].shape ) + '\n')
def filter_network_thresh( self, cutoff, min_num_meet ):
'''
remove rows and columns from matrix that do not have at least
min_num_meet instances of a value with an absolute value above cutoff
'''
import scipy
import numpy as np
# transfer the nodes
nodes = {}
nodes['row'] = []
nodes['col'] = []
# transfer the 'info' part of node_info if necessary
node_info = {}
node_info['row'] = []
node_info['col'] = []
# add rows with non-zero values
#################################
for i in range(len(self.dat['nodes']['row'])):
# get row name
inst_nodes_row = self.dat['nodes']['row'][i]
# get node info - disregard ini, clust, and rank orders
if len(self.dat['node_info']['row']['info']) > 0:
inst_node_info = self.dat['node_info']['row']['info'][i]
# get row vect
row_vect = np.absolute(self.dat['mat'][i,:])
# check if there are nonzero values
found_tuple = np.where(row_vect >= cutoff)
if len(found_tuple[0])>=min_num_meet:
# add name
nodes['row'].append(inst_nodes_row)
# add info if necessary
if len(self.dat['node_info']['row']['info']) > 0:
node_info['row'].append(inst_node_info)
# add cols with non-zero values
#################################
for i in range(len(self.dat['nodes']['col'])):
# get col name
inst_nodes_col = self.dat['nodes']['col'][i]
# get node info - disregard ini, clust, and rank orders
if len(self.dat['node_info']['col']['info']) > 0:
inst_node_info = self.dat['node_info']['col']['info'][i]
# get col vect
col_vect = np.absolute(self.dat['mat'][:,i])
# check if there are nonzero values
found_tuple = np.where(col_vect >= cutoff)
if len(found_tuple[0])>=min_num_meet:
# add name
nodes['col'].append(inst_nodes_col)
# add info if necessary
if len(self.dat['node_info']['col']['info']) > 0:
node_info['col'].append(inst_node_info)
# cherrypick data from self.dat['mat']
##################################
# filtered matrix
filt_mat = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_up' in self.dat:
filt_mat_up = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
filt_mat_dn = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_info' in self.dat:
# initialize filtered mat_info dictionary with tuple keys
filt_mat_info = {}
# loop through the rows
for i in range(len(nodes['row'])):
inst_row = nodes['row'][i]
# loop through the cols
for j in range(len(nodes['col'])):
inst_col = nodes['col'][j]
# get row and col index
pick_row = self.dat['nodes']['row'].index(inst_row)
pick_col = self.dat['nodes']['col'].index(inst_col)
# cherrypick
###############
filt_mat[i,j] = self.dat['mat'][pick_row, pick_col]
if 'mat_up' in self.dat:
filt_mat_up[i,j] = self.dat['mat_up'][pick_row, pick_col]
filt_mat_dn[i,j] = self.dat['mat_dn'][pick_row, pick_col]
if 'mat_info' in self.dat:
filt_mat_info[str((i,j))] = self.dat['mat_info'][str((pick_row,pick_col))]
# save nodes array - list of node names
self.dat['nodes'] = nodes
# save node_info array - list of node infos
self.dat['node_info']['row']['info'] = node_info['row']
self.dat['node_info']['col']['info'] = node_info['col']
# overwrite with new filtered data
self.dat['mat'] = filt_mat
# overwrite with up/dn data if necessary
if 'mat_up' in self.dat:
self.dat['mat_up'] = filt_mat_up
self.dat['mat_dn'] = filt_mat_dn
# overwrite mat_info if necessary
if 'mat_info' in self.dat:
self.dat['mat_info'] = filt_mat_info
print( 'final mat shape' + str(self.dat['mat'].shape ) + '\n')
def keep_max_num_links(self, keep_num_links):
print('\trun keep_max_num_links')
max_mat_value = abs(self.dat['mat']).max()
# check the total number of links
inst_thresh = 0
inst_pct_max = 0
inst_num_links = (abs(self.dat['mat'])>inst_thresh).sum()
print('initially there are '+str(inst_num_links)+' links ')
print('there are initially '+str(inst_num_links)+'\n')
thresh_fraction = 100
while (inst_num_links > keep_num_links):
# increase the threshold as a pct of max value in mat
inst_pct_max = inst_pct_max + 1
# increase threshold
inst_thresh = max_mat_value*(float(inst_pct_max)/thresh_fraction)
# check the number of links above the curr threshold
inst_num_links = (abs(self.dat['mat'])>inst_thresh).sum()
print('there are '+str(inst_num_links)+ ' links at threshold '+str(inst_pct_max)+'pct and value of ' +str(inst_thresh)+'\n')
# if there are no links then increas thresh back up
if inst_num_links == 0:
inst_pct_max = inst_pct_max - 1
inst_thresh = max_mat_value*(float(inst_pct_max)/thresh_fraction)
print('final number of links '+str(inst_num_links))
# replace values that are less than thresh with zero
self.dat['mat'][ abs(self.dat['mat']) < inst_thresh] = 0
# return number of links
return (abs(self.dat['mat'])>inst_thresh).sum()
def cluster_row_and_col(self, dist_type='cosine', linkage_type='average', dendro=True, \
run_clustering=True, run_rank=True):
'''
cluster net.dat and make visualization json, net.viz.
optionally leave out dendrogram colorbar groups with dendro argument
'''
import scipy
import numpy as np
from scipy.spatial.distance import pdist
from copy import deepcopy
# do not make dendrogram is you are not running clusttering
if run_clustering == False:
dendro = False
# make distance matrices
##########################
# get number of rows and columns from self.dat
num_row = len(self.dat['nodes']['row'])
num_col = len(self.dat['nodes']['col'])
# initialize distance matrices
row_dm = scipy.zeros([num_row,num_row])
col_dm = scipy.zeros([num_col,num_col])
# make copy of matrix
tmp_mat = deepcopy(self.dat['mat'])
# calculate distance matrix
row_dm = pdist( tmp_mat, metric=dist_type )
col_dm = pdist( tmp_mat.transpose(), metric=dist_type )
# prevent negative values
row_dm[row_dm < 0] = float(0)
col_dm[col_dm < 0] = float(0)
# initialize clust order
clust_order = self.ini_clust_order()
# initial ordering
###################
clust_order['row']['ini'] = range(num_row, -1, -1)
clust_order['col']['ini'] = range(num_col, -1, -1)
# cluster
if run_clustering == True:
clust_order['row']['clust'], clust_order['row']['group'] = \
self.clust_and_group(row_dm, linkage_type=linkage_type)
clust_order['col']['clust'], clust_order['col']['group'] = \
self.clust_and_group(col_dm, linkage_type=linkage_type)
# rank
if run_rank == True:
clust_order['row']['rank'] = self.sort_rank_nodes('row')
clust_order['col']['rank'] = self.sort_rank_nodes('col')
# save clustering orders to node_info
if run_clustering == True:
self.dat['node_info']['row']['clust'] = clust_order['row']['clust']
self.dat['node_info']['col']['clust'] = clust_order['col']['clust']
else:
self.dat['node_info']['row']['clust'] = clust_order['row']['ini']
self.dat['node_info']['col']['clust'] = clust_order['col']['ini']
if run_rank == True:
self.dat['node_info']['row']['rank'] = clust_order['row']['rank']
self.dat['node_info']['col']['rank'] = clust_order['col']['rank']
else:
self.dat['node_info']['row']['rank'] = clust_order['row']['ini']
self.dat['node_info']['col']['rank'] = clust_order['col']['ini']
# transfer ordereings
# row
self.dat['node_info']['row']['ini'] = clust_order['row']['ini']
self.dat['node_info']['row']['group'] = clust_order['row']['group']
# col
self.dat['node_info']['col']['ini'] = clust_order['col']['ini']
self.dat['node_info']['col']['group'] = clust_order['col']['group']
#!! disabled temporarily
# if len(self.dat['node_info']['col']['cl']) > 0:
# self.calc_cat_clust_order()
# make the viz json - can optionally leave out dendrogram
self.viz_json(dendro)
def calc_cat_clust_order(self):
from clustergrammer import Network
from copy import deepcopy
col_in_cat = self.dat['node_info']['col_in_cat']
# alpha order categories
all_cats = sorted(col_in_cat.keys())
# cluster each category
##############################
# calc clustering of each category
all_cat_orders = []
# this is the ordering of the columns based on their category, not
# including their clustering order on top of their category
tmp_col_names_list = []
for inst_cat in all_cats:
inst_cols = col_in_cat[inst_cat]
# keep a list of the columns
tmp_col_names_list.extend(inst_cols)
cat_net = deepcopy(Network())
cat_net.dat['mat'] = deepcopy(self.dat['mat'])
cat_net.dat['nodes'] = deepcopy(self.dat['nodes'])
# get dataframe, to simplify column filtering
cat_df = cat_net.dat_to_df()
# get subset of dataframe
sub_df = {}
sub_df['mat'] = cat_df['mat'][inst_cols]
# load back to dat
cat_net.df_to_dat(sub_df)
try:
cat_net.cluster_row_and_col('cos')
inst_cat_order = cat_net.dat['node_info']['col']['clust']
except:
inst_cat_order = range(len(cat_net.dat['nodes']['col']))
prev_order_len = len(all_cat_orders)
# add previous order length to the current order number
inst_cat_order = [i+prev_order_len for i in inst_cat_order]
all_cat_orders.extend(inst_cat_order)
# sort tmp_col_names_lust by the integers in all_cat_orders
names_col_cat_clust = [x for (y,x) in sorted(zip(all_cat_orders,tmp_col_names_list))]
# calc category-cluster order
##############################
final_order = []
for i in range(len(self.dat['nodes']['col'])):
# get the rank of the col in the order of col_nodes
inst_col_name = self.dat['nodes']['col'][i]
inst_col_num = names_col_cat_clust.index(inst_col_name)
final_order.append(inst_col_num)
self.dat['node_info']['col']['cl_index'] = final_order
def clust_and_group( self, dm, linkage_type='average' ):
import scipy.cluster.hierarchy as hier
# calculate linkage
Y = hier.linkage( dm, method=linkage_type )
Z = hier.dendrogram( Y, no_plot=True )
# get ordering
inst_clust_order = Z['leaves']
all_dist = self.group_cutoffs()
# generate distance cutoffs
inst_groups = {}
for inst_dist in all_dist:
inst_key = str(inst_dist).replace('.','')
inst_groups[inst_key] = hier.fcluster(Y, inst_dist*dm.max(), 'distance')
inst_groups[inst_key] = inst_groups[inst_key].tolist()
return inst_clust_order, inst_groups
def sort_rank_node_values( self, rowcol ):
import numpy as np
from operator import itemgetter
from copy import deepcopy
# make a copy of nodes and node_info
inst_nodes = deepcopy(self.dat['nodes'][rowcol])
inst_vals = deepcopy(self.dat['node_info'][rowcol]['value'])
tmp_arr = []
for i in range(len(inst_nodes)):
inst_dict = {}
# get name of the node
inst_dict['name'] = inst_nodes[i]
# get value
inst_dict['value'] = inst_vals[i]
tmp_arr.append(inst_dict)
# sort dictionary by value
tmp_arr = sorted( tmp_arr, key=itemgetter('value') )
# get list of sorted nodes
tmp_sort_nodes = []
for inst_dict in tmp_arr:
tmp_sort_nodes.append( inst_dict['name'] )
# get the sorted index
sort_index = []
for inst_node in inst_nodes:
sort_index.append( tmp_sort_nodes.index(inst_node) )
return sort_index
def sort_rank_nodes( self, rowcol ):
import numpy as np
from operator import itemgetter
from copy import deepcopy
# make a copy of node information
inst_nodes = deepcopy(self.dat['nodes'][rowcol])
inst_mat = deepcopy(self.dat['mat'])
sum_term = []
for i in range(len(inst_nodes)):
inst_dict = {}
# get name of the node
inst_dict['name'] = inst_nodes[i]
# sum values of the node
if rowcol == 'row':
inst_dict['total'] = np.sum(inst_mat[i,:])
else:
inst_dict['total'] = np.sum(inst_mat[:,i])
# add this to the list of dicts
sum_term.append(inst_dict)
# sort dictionary by number of terms
sum_term = sorted( sum_term, key=itemgetter('total'), reverse=False )
# get list of sorted nodes
tmp_sort_nodes = []
for inst_dict in sum_term:
tmp_sort_nodes.append(inst_dict['name'])
# get the sorted index
sort_index = []
for inst_node in inst_nodes:
sort_index.append( tmp_sort_nodes.index(inst_node) )
return sort_index
def viz_json(self, dendro=True):
''' make the dictionary for the clustergram.js visualization '''
# get dendrogram cutoff distances
all_dist = self.group_cutoffs()
# make nodes for viz
#####################
# make rows and cols
for inst_rc in self.dat['nodes']:
for i in range(len( self.dat['nodes'][inst_rc] )):
inst_dict = {}
inst_dict['name'] = self.dat['nodes'][inst_rc][i]
inst_dict['ini'] = self.dat['node_info'][inst_rc]['ini'][i]
#!! clean this up so I do not have to get the index here
inst_dict['clust'] = self.dat['node_info'][inst_rc]['clust'].index(i)
inst_dict['rank'] = self.dat['node_info'][inst_rc]['rank'][i]
# add node class cl
if len(self.dat['node_info'][inst_rc]['cl']) > 0:
inst_dict['cl'] = self.dat['node_info'][inst_rc]['cl'][i]
# add node class cl_index
if 'cl_index' in self.dat['node_info'][inst_rc] > 0:
inst_dict['cl_index'] = self.dat['node_info'][inst_rc]['cl_index'][i]
# add node class val
if len(self.dat['node_info'][inst_rc]['value']) > 0:
inst_dict['value'] = self.dat['node_info'][inst_rc]['value'][i]
# add node information
# if 'info' in self.dat['node_info'][inst_rc]:
if len(self.dat['node_info'][inst_rc]['info']) > 0:
inst_dict['info'] = self.dat['node_info'][inst_rc]['info'][i]
# group info
if dendro==True:
inst_dict['group'] = []
for tmp_dist in all_dist:
# read group info in correct order
tmp_dist = str(tmp_dist).replace('.','')
inst_dict['group'].append( float( self.dat['node_info'][inst_rc]['group'][tmp_dist][i] ) )
# append dictionary to list of nodes
self.viz[inst_rc+'_nodes'].append(inst_dict)
# links
########
for i in range(len( self.dat['nodes']['row'] )):
for j in range(len( self.dat['nodes']['col'] )):
if abs( self.dat['mat'][i,j] ) > 0:
inst_dict = {}
inst_dict['source'] = i
inst_dict['target'] = j
inst_dict['value'] = self.dat['mat'][i,j]
# add up/dn values if necessary
if 'mat_up' in self.dat:
inst_dict['value_up'] = self.dat['mat_up'][i,j]
if 'mat_up' in self.dat:
inst_dict['value_dn'] = self.dat['mat_dn'][i,j]
# add information if necessary - use dictionary with tuple key
# each element of the matrix needs to have information
if 'mat_info' in self.dat:
# use tuple string
inst_dict['info'] = self.dat['mat_info'][str((i,j))]
# add highlight if necessary - use dictionary with tuple key
if 'mat_hl' in self.dat:
inst_dict['highlight'] = self.dat['mat_hl'][i,j]
# append link
self.viz['links'].append( inst_dict )
def df_to_dat(self, df):
import numpy as np
import pandas as pd
self.dat['mat'] = df['mat'].values
self.dat['nodes']['row'] = df['mat'].index.tolist()
self.dat['nodes']['col'] = df['mat'].columns.tolist()
# check if there is category information in the column names
if type(self.dat['nodes']['col'][0]) is tuple:
self.dat['nodes']['col'] = [i[0] for i in self.dat['nodes']['col']]
if 'mat_up' in df:
self.dat['mat_up'] = df['mat_up'].values
self.dat['mat_dn'] = df['mat_dn'].values
def dat_to_df(self):
import numpy as np
import pandas as pd
df = {}
# always return 'mat' dataframe
df['mat'] = pd.DataFrame(data = self.dat['mat'], columns=self.dat['nodes']['col'], index=self.dat['nodes']['row'])
if 'mat_up' in self.dat:
df['mat_up'] = pd.DataFrame(data = self.dat['mat_up'], columns=self.dat['nodes']['col'], index=self.dat['nodes']['row'])
df['mat_dn'] = pd.DataFrame(data = self.dat['mat_dn'], columns=self.dat['nodes']['col'], index=self.dat['nodes']['row'])
return df
def make_filtered_views(self, dist_type='cosine', run_clustering=True, \
dendro=True, views=['filter_row_sum','N_row_sum'], calc_col_cats=True, \
linkage_type='average'):
from copy import deepcopy
'''
This will calculate multiple views of a clustergram by filtering the data
and clustering after each filtering. This filtering will keep the top N
rows based on some quantity (sum, num-non-zero, etc).
'''
print('running make_filtered_views')
print('dist_type '+str(dist_type))
# get dataframe dictionary of network and remove rows/cols with all zero values
df = self.dat_to_df()
# each row or column must have at least one non-zero value
threshold = 0.0001
df = self.df_filter_row(df, threshold)
df = self.df_filter_col(df, threshold)
# calculate initial view with no row filtering
##################################################
# swap back in the filtered df to dat
self.df_to_dat(df)
# cluster initial view
self.cluster_row_and_col(dist_type=dist_type, linkage_type=linkage_type, \
run_clustering=run_clustering, dendro=dendro)
# set up views
all_views = []
# generate views for each column category (default to only one)
all_col_cat = ['all_category']
# check for column categories and check whether category specific clustering
# should be calculated
if len(self.dat['node_info']['col']['cl']) > 0 and calc_col_cats:
tmp_cats = sorted(list(set(self.dat['node_info']['col']['cl'])))
# gather all col_cats
all_col_cat.extend(tmp_cats)
for inst_col_cat in all_col_cat:
# make a copy of df to send to filters
send_df = deepcopy(df)
# add N_row_sum views
if 'N_row_sum' in views:
print('add N top views')
all_views = self.add_N_top_views( send_df, all_views, dist_type=dist_type, current_col_cat=inst_col_cat )
if 'filter_row_sum' in views:
all_views = self.add_pct_top_views( send_df, all_views, dist_type=dist_type, current_col_cat=inst_col_cat )
# add views to viz
self.viz['views'] = all_views
print('finished make_filtered_views')
def add_pct_top_views(self, df, all_views, dist_type='cosine', \
current_col_cat='all_category'):
from clustergrammer import Network
from copy import deepcopy
import numpy as np
# make a copy of the network so that filtering is not propagated
copy_net = deepcopy(self)
# filter columns by category if necessary - do this on df, which is a copy
if current_col_cat != 'all_category':
keep_cols = copy_net.dat['node_info']['col_in_cat'][current_col_cat]
df['mat'] = copy_net.grab_df_subset(df['mat'], keep_rows='all', keep_cols=keep_cols)
# gather category key
is_col_cat = False
if len(self.dat['node_info']['col']['cl']) > 0 and current_col_cat=='all_category':
is_col_cat = True
cat_key_col = {}
for i in range(len(self.dat['nodes']['col'])):
cat_key_col[ self.dat['nodes']['col'][i] ] = self.dat['node_info']['col']['cl'][i]
# filter between 0% and 90% of some threshoold
all_filt = range(10)
all_filt = [i/float(10) for i in all_filt]
# row filtering values
mat = deepcopy(df['mat'])
sum_row = np.sum(mat, axis=1)
max_sum = max(sum_row)
for inst_filt in all_filt:
cutoff = inst_filt * max_sum
# make a copy of the network so that filtering is not propagated
copy_net = deepcopy(self)
# make copy of df
inst_df = deepcopy(df)
# filter row in df
inst_df = copy_net.df_filter_row(inst_df, cutoff, take_abs=False)
# filter columns by category if necessary
if current_col_cat != 'all_category':
keep_cols = copy_net.dat['node_info']['col_in_cat'][current_col_cat]
inst_df['mat'] = copy_net.grab_df_subset(inst_df['mat'], keep_rows='all', keep_cols=keep_cols)
if 'mat_up' in inst_df:
# grab up and down data
inst_df['mat_up'] = copy_net.grab_df_subset(inst_df['mat_up'], keep_rows='all', keep_cols=keep_cols)
inst_df['mat_dn'] = copy_net.grab_df_subset(inst_df['mat_dn'], keep_rows='all', keep_cols=keep_cols)
# ini net
net = deepcopy(Network())
# transfer to dat
net.df_to_dat(inst_df)
# add col categories if necessary
if is_col_cat:
inst_col_cats = []
for inst_col_name in copy_net.dat['nodes']['col']:
inst_col_cats.append( cat_key_col[inst_col_name] )
# transfer category information
net.dat['node_info']['col']['cl'] = inst_col_cats
# add col_in_cat
net.dat['node_info']['col_in_cat'] = copy_net.dat['node_info']['col_in_cat']
# try to cluster
try:
try:
# cluster
net.cluster_row_and_col(dist_type=dist_type,run_clustering=True)
except:
# cluster
net.cluster_row_and_col(dist_type=dist_type,run_clustering=False)
# add view
inst_view = {}
inst_view['filter_row_sum'] = inst_filt
inst_view['dist'] = 'cos'
inst_view['col_cat'] = current_col_cat
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = net.viz['row_nodes']
inst_view['nodes']['col_nodes'] = net.viz['col_nodes']
all_views.append(inst_view)
except:
print('\t*** did not cluster pct filtered view')
return all_views
def add_N_top_views(self, df, all_views, dist_type='cosine',\
current_col_cat='all_category'):
from clustergrammer import Network
from copy import deepcopy
# make a copy of hte network
copy_net = deepcopy(self)
# filter columns by category if necessary
if current_col_cat != 'all_category':
keep_cols = copy_net.dat['node_info']['col_in_cat'][current_col_cat]
df['mat'] = copy_net.grab_df_subset(df['mat'], keep_rows='all', keep_cols=keep_cols)
# gather category key
is_col_cat = False
if len(self.dat['node_info']['col']['cl']) > 0 and current_col_cat=='all_category':
is_col_cat = True
cat_key_col = {}
for i in range(len(self.dat['nodes']['col'])):
cat_key_col[ self.dat['nodes']['col'][i] ] = self.dat['node_info']['col']['cl'][i]
# keep the following number of top rows
keep_top = ['all',500,400,300,200,100,90,80,70,60,50,40,30,20,10]
# get copy of df and take abs value, cell line cols and gene rows
df_abs = deepcopy(df['mat'])
# transpose to get gene columns
df_abs = df_abs.transpose()
# sum the values of the genes in the cell lines
tmp_sum = df_abs.sum(axis=0)
# take absolute value to keep most positive and most negative rows
tmp_sum = tmp_sum.abs()
# sort rows by value
tmp_sum.sort(ascending=False)
rows_sorted = tmp_sum.index.values.tolist()
for inst_keep in keep_top:
# initialize df
tmp_df = deepcopy(df)
# filter columns by category if necessary
if current_col_cat != 'all_category':
keep_cols = copy_net.dat['node_info']['col_in_cat'][current_col_cat]
tmp_df['mat'] = copy_net.grab_df_subset(tmp_df['mat'], keep_rows='all', keep_cols=keep_cols)
if 'mat_up' in df:
# grab up and down data
tmp_df['mat_up'] = copy_net.grab_df_subset(tmp_df['mat_up'], keep_rows='all', keep_cols=keep_cols)
tmp_df['mat_dn'] = copy_net.grab_df_subset(tmp_df['mat_dn'], keep_rows='all', keep_cols=keep_cols)
if inst_keep < len(rows_sorted) or inst_keep == 'all':
# initialize netowrk
net = deepcopy(Network())
# filter the rows
if inst_keep != 'all':
# get the labels of the rows that will be kept
keep_rows = rows_sorted[0:inst_keep]
# filter the matrix
tmp_df['mat'] = tmp_df['mat'].ix[keep_rows]
if 'mat_up' in tmp_df:
tmp_df['mat_up'] = tmp_df['mat_up'].ix[keep_rows]
tmp_df['mat_dn'] = tmp_df['mat_dn'].ix[keep_rows]
# filter columns - some columns may have all zero values
tmp_df = self.df_filter_col(tmp_df,0.001)
# transfer to dat
net.df_to_dat(tmp_df)
else:
net.df_to_dat(tmp_df)
# add col categories if necessary
if is_col_cat:
inst_col_cats = []
for inst_col_name in self.dat['nodes']['col']:
inst_col_cats.append( cat_key_col[inst_col_name] )
# transfer category information
net.dat['node_info']['col']['cl'] = inst_col_cats
# add col_in_cat
net.dat['node_info']['col_in_cat'] = copy_net.dat['node_info']['col_in_cat']
# try to cluster
try:
try:
# cluster
net.cluster_row_and_col(dist_type,run_clustering=True)
except:
# cluster
net.cluster_row_and_col(dist_type,run_clustering=False)
# add view
inst_view = {}
inst_view['N_row_sum'] = inst_keep
inst_view['dist'] = 'cos'
inst_view['col_cat'] = current_col_cat
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = net.viz['row_nodes']
inst_view['nodes']['col_nodes'] = net.viz['col_nodes']
all_views.append(inst_view)
except:
print('\t*** did not cluster N filtered view')
return all_views
def fast_mult_views(self, dist_type='cos', run_clustering=True, dendro=True):
import numpy as np
import pandas as pd
from clustergrammer import Network
from copy import deepcopy
'''
This will use Pandas to calculte multiple views of a clustergram
Currently, it is only filtering based on row-sum and it is disregarding
link information (used to add click functionality).
'''
# gather category key
is_col_cat = False
if len(self.dat['node_info']['col']['cl']) > 0:
is_col_cat = True
cat_key_col = {}
for i in range(len(self.dat['nodes']['col'])):
cat_key_col[ self.dat['nodes']['col'][i] ] = self.dat['node_info']['col']['cl'][i]
# get dataframe dictionary of network and remove rows/cols with all zero values
df = self.dat_to_df()
# each row or column must have at least one non-zero value
threshold = 0.001
df = self.df_filter_row(df, threshold)
df = self.df_filter_col(df, threshold)
# calculate initial view with no row filtering
#################################################
# swap back in filtered df to dat
self.df_to_dat(df)
# cluster initial view
self.cluster_row_and_col('cos',run_clustering=run_clustering, dendro=dendro)
# set up views
all_views = []
# set up initial view
inst_view = {}
inst_view['filter_row_sum'] = 0
inst_view['dist'] = 'cos'
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = self.viz['row_nodes']
inst_view['nodes']['col_nodes'] = self.viz['col_nodes']
# add view with no filtering
all_views.append(inst_view)
# filter between 0% and 90% of some threshoold
all_filt = range(10)
all_filt = [i/float(10) for i in all_filt]
# row filtering values
mat = self.dat['mat']
mat_abs = abs(mat)
sum_row = np.sum(mat_abs, axis=1)
max_sum = max(sum_row)
for inst_filt in all_filt:
# skip zero filtering
if inst_filt > 0:
cutoff = inst_filt * max_sum
# filter row
df = self.df_filter_row(df, cutoff, take_abs=True)
print('\tfiltering at cutoff ' + str(inst_filt) + ' mat shape: ' + str(df['mat'].shape))
# ini net
net = deepcopy(Network())
# transfer to dat
net.df_to_dat(df)
# add col categories if necessary
if is_col_cat:
inst_col_cats = []
for inst_col_name in self.dat['nodes']['col']:
inst_col_cats.append( cat_key_col[inst_col_name] )
net.dat['node_info']['col']['cl'] = inst_col_cats
# try to cluster
try:
# cluster
net.cluster_row_and_col('cos')
# add view
inst_view = {}
inst_view['filter_row_sum'] = inst_filt
inst_view['dist'] = 'cos'
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = net.viz['row_nodes']
inst_view['nodes']['col_nodes'] = net.viz['col_nodes']
all_views.append(inst_view)
except:
print('\t*** did not cluster filtered view')
# add views to viz
self.viz['views'] = all_views
print('\tfinished fast_mult_views')
def make_mult_views(self, dist_type='cos',filter_row=['value'], filter_col=False, run_clustering=True, dendro=True):
'''
This will calculate multiple views of a clustergram by filtering the
data and clustering after each fitlering. By default row filtering will
be turned on and column filteirng will not. The filtering steps are defined
as a percentage of the maximum value found in the network.
'''
from clustergrammer import Network
from copy import deepcopy
# filter between 0% and 90% of some to be determined value
all_filt = range(10)
all_filt = [i/float(10) for i in all_filt]
# cluster default view
self.cluster_row_and_col('cos', run_clustering=run_clustering, dendro=dendro)
self.viz['views'] = []
all_views = []
# Perform row filterings
###########################
if len(filter_row) > 0:
# perform multiple types of row filtering
###########################################
for inst_type in filter_row:
for row_filt_int in all_filt:
# initialize new net
net = deepcopy(Network())
net.dat = deepcopy(self.dat)
# filter rows
net.filter_row_thresh(row_filt_int, filter_type=inst_type)
# filter columns since some columns might be all zero
net.filter_col_thresh(0.001,1)
# try to cluster - will not work if there is one row
try:
# cluster
net.cluster_row_and_col('cos')
inst_name = 'filter_row'+'_'+inst_type
# add view
inst_view = {}
inst_view[inst_name] = row_filt_int
inst_view['dist'] = 'cos'
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = net.viz['row_nodes']
inst_view['nodes']['col_nodes'] = net.viz['col_nodes']
all_views.append(inst_view)
except:
print('\t***did not cluster filtered view')
# Default col Filtering
###########################
inst_meet = 1
if filter_col == True:
# col filtering
#####################
for col_filt in all_filt:
# print(col_filt)
# initialize new net
net = deepcopy(Network())
net.dat = deepcopy(self.dat)
filt_value = col_filt * max_mat
# filter cols
net.filter_col_thresh(filt_value, inst_meet)
# try to cluster - will not work if there is one col
try:
# cluster
net.cluster_row_and_col('cos')
# add view
inst_view = {}
inst_view['filter_col'] = col_filt
inst_view['dist'] = 'cos'
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = net.viz['row_nodes']
inst_view['nodes']['col_nodes'] = net.viz['col_nodes']
all_views.append(inst_view)
except:
print('did not cluster filtered view')
# add views to viz
self.viz['views'] = all_views
@staticmethod
def df_filter_row(df, threshold, take_abs=True):
''' filter rows in matrix at some threshold
and remove columns that have a sum below this threshold '''
import pandas as pd
from copy import deepcopy
from clustergrammer import Network
net = Network()
# take absolute value if necessary
if take_abs == True:
df_copy = deepcopy(df['mat'].abs())
else:
df_copy = deepcopy(df['mat'])
ini_rows = df_copy.index.values.tolist()
# transpose df
df_copy = df_copy.transpose()
# sum the values of the rows
tmp_sum = df_copy.sum(axis=0)
# take absolute value to keep most positive and most negative rows
tmp_sum = tmp_sum.abs()
# sort rows by value
tmp_sum.sort(ascending=False)
# filter series using threshold
tmp_sum = tmp_sum[tmp_sum>threshold]
# get keep_row names
keep_rows = sorted(tmp_sum.index.values.tolist())
if len(keep_rows) < len(ini_rows):
# grab the subset of the data
df['mat'] = net.grab_df_subset(df['mat'], keep_rows=keep_rows)
if 'mat_up' in df:
# grab up and down data
df['mat_up'] = net.grab_df_subset(df['mat_up'], keep_rows=keep_rows)
df['mat_dn'] = net.grab_df_subset(df['mat_dn'], keep_rows=keep_rows)
return df
@staticmethod
def df_filter_col(df, threshold, take_abs=True):
''' filter columns in matrix at some threshold
and remove rows that have all zero values '''
import pandas
from copy import deepcopy
from clustergrammer import Network
net = Network()
# take absolute value if necessary
if take_abs == True:
df_copy = deepcopy(df['mat'].abs())
else:
df_copy = deepcopy(df['mat'])
# filter columns to remove columns with all zero values
# transpose
df_copy = df_copy.transpose()
df_copy = df_copy[df_copy.sum(axis=1) > threshold]
# transpose back
df_copy = df_copy.transpose()
# filter rows
df_copy = df_copy[df_copy.sum(axis=1) > 0]
# get df ready for export
if take_abs == True:
inst_rows = df_copy.index.tolist()
inst_cols = df_copy.columns.tolist()
df['mat'] = net.grab_df_subset(df['mat'], inst_rows, inst_cols)
else:
# just transfer the copied data
df['mat'] = df_copy
return df
@staticmethod
def grab_df_subset(df, keep_rows='all', keep_cols='all'):
if keep_cols != 'all':
# filter columns
df = df[keep_cols]
if keep_rows != 'all':
# filter rows
df = df.ix[keep_rows]
return df
@staticmethod
def load_gmt(filename):
f = open(filename, 'r')
lines = f.readlines()
f.close()
gmt = {}
# loop through the lines of the gmt
for i in range(len(lines)):
# get the inst line, strip off the new line character
inst_line = lines[i].rstrip()
inst_term = inst_line.split('\t')[0]
# get the elements
inst_elems = inst_line.split('\t')[2:]
# save the drug-kinase sets
gmt[inst_term] = inst_elems
return gmt
@staticmethod
def load_json_to_dict(filename):
''' load json to python dict and return dict '''
import json
f = open(filename, 'r')
inst_dict = json.load(f)
f.close()
return inst_dict
@staticmethod
def save_dict_to_json(inst_dict, filename, indent='no-indent'):
import json
# save as a json
fw = open(filename, 'w')
if indent == 'indent':
fw.write( json.dumps(inst_dict, indent=2) )
else:
fw.write( json.dumps(inst_dict) )
fw.close()
@staticmethod
def ini_clust_order():
rowcol = ['row','col']
orderings = ['clust','rank','group','ini']
clust_order = {}
for inst_node in rowcol:
clust_order[inst_node] = {}
for inst_order in orderings:
clust_order[inst_node][inst_order] = []
return clust_order
@staticmethod
def threshold_vect_comparison(x, y, cutoff):
import numpy as np
# x vector
############
# take absolute value of x
x_abs = np.absolute(x)
# this returns a tuple
found_tuple = np.where(x_abs >= cutoff)
# get index array
found_index_x = found_tuple[0]
# y vector
############
# take absolute value of y
y_abs = | np.absolute(y) | numpy.absolute |
from scipy.spatial.distance import cdist
import heapq
import numpy as np
import random
from hashlib import sha1
from itertools import zip_longest
def batch_unit_norm(b, epsilon=1e-8):
"""
Give all vectors unit norm along the last dimension
"""
return b / np.linalg.norm(b, axis=-1, keepdims=True) + epsilon
def unit_vectors(n_examples, n_dims):
"""
Create n_examples of synthetic data on the unit
sphere in n_dims
"""
dense = | np.random.normal(0, 1, (n_examples, n_dims)) | numpy.random.normal |
"""
Analyze results and plot figures
"""
# Imports
#==============#
import pandas as pd
import numpy as np
import scipy
import random
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
import bioinf
# Plots for HMM method 5-fold cross validation
#===============================================#
fnt='Arial'
ticks_font = {'fontname':fnt, 'size':'18'}
legend_font = {'family':fnt, 'size':'12'}
label_font = {'family':fnt, 'size':'20'}
plt.rcParams['figure.figsize'] = [6,3]
ec = 'black'
legend_label = ['CBH', 'EG']
# SwissProt Dataset
ex = pd.read_csv('results_final/swiss_kfold.csv')
lw = 0.8
out1 = plt.bar(range(30), ex.diff_score[:30], color='blue',
linewidth=lw, edgecolor=ec)
out2 = plt.bar(range(30,44), ex.diff_score[30:], color='red',
linewidth=lw, edgecolor=ec)
pltout = [x[0] for x in [out1, out2]]
plt.xlabel('Sequence', **label_font)
plt.ylabel('Score difference', **label_font)
plt.xticks(**ticks_font)
plt.yticks([-300,-150,0,150,300,450], **ticks_font)
plt.xlim([-0.6,43.6])
plt.axhline(color='black', linewidth=1)
plt.legend(pltout, legend_label, prop=legend_font,
loc='upper right')
plt.tight_layout()
plt.savefig('plots/swiss_kfold.pdf')
plt.close()
# NCBI dataset
ex = pd.read_csv('results_final/ncbi_kfold.csv')
lw = 0.15
cbhs = list(ex.diff_score[:291])
egs = list(ex.diff_score[291:])
random.shuffle(cbhs)
random.shuffle(egs)
out1 = plt.bar(range(291), cbhs, color='blue', linewidth=lw,
edgecolor='blue')
out2 = plt.bar(range(291,427), egs, color='red', linewidth=lw,
edgecolor='red')
pltout = [x[0] for x in [out1, out2]]
plt.xlabel('Sequence', **label_font)
plt.ylabel('Score difference', **label_font)
plt.xticks(**ticks_font)
plt.yticks([-300,-150,0,150,300,450], **ticks_font)
plt.xlim([-1,428])
plt.axhline(color='black', linewidth=1)
plt.legend(pltout, legend_label, prop=legend_font,
loc='upper right')
plt.tight_layout()
plt.savefig('plots/ncbi_kfold.pdf')
plt.close()
# Pymol commands for loop positions in TreCel7A and TreCel7B
#==============================================================#
# Cel7A
loopstart = [98, 399, 369, 383, 51, 194, 244, 339]
length = [5,13,5,10,6,8,10,4]
cel7a_start = list(loopstart)
cel7a_stop = [loopstart[i] + length[i] - 1 for i in range(8)]
cel7a_pymol = 'select cel7a_loops, '
for i in range(8):
cel7a_pymol += f'resi {cel7a_start[i]}-{cel7a_stop[i]} or '
# Cel7B
fasta = 'fasta/structure_based_alignment/structure6_mafft.fasta'
heads, seqs = bioinf.split_fasta(fasta)
seq7a_msa, seq7b_msa = seqs[0], seqs[3]
seq7a, seq7b = seq7a_msa.replace('-', ''), seq7b_msa.replace('-','')
msastart = [bioinf.resid_to_msa(seq7a_msa, x-1) for x in cel7a_start]
msastop = [bioinf.resid_to_msa(seq7a_msa, x-1) for x in cel7a_stop]
cel7b_start = [bioinf.msa_to_resid(seq7b_msa, x) for x in msastart]
cel7b_stop = [bioinf.msa_to_resid(seq7b_msa, x+1) for x in msastop]
cel7b_pymol = 'select cel7b_loops, '
for i in range(8):
cel7b_pymol += f'resi {cel7b_start[i] + 1}-{cel7b_stop[i]} or '
# Write
with open('plots/loops_pymol.txt', 'w') as pymol:
pymol.write(cel7a_pymol[:-4] + '\n\n')
pymol.write(cel7b_pymol[:-4])
# Pymol selection command to visualize rules on structure
#=========================================================#
pymol_positions = 'select rules, ('
for pos in positions:
pymol_positions += f'resi {pos} or '
pymol_positions = pymol_positions[:-4]
pymol_positions += ') and name ca'
with open('plots/rules_pymol.txt', 'w') as txt:
txt.write(pymol_positions)
# Table for ML subtype performance
#=====================================#
mlkeys = ['dec', 'svm', 'knn', 'log']
features = ['A1', 'A2', 'A3', 'A4', 'B1', 'B2', 'B3', 'B4', 'all8']
store2 = []
columns = []
for key in mlkeys:
excel = pd.read_csv(f'results_final/ml_subtype_pred/{key}.csv', index_col=0)
sens_store, spec_store, acc_store = [], [], []
columns.extend([key + '_sens', key + '_spec', key + '_acc'])
for i in range(len(features)):
sens_store.append(str(round(excel.sens_mean[i], 1)) + ' ± ' + \
str(round(excel.sens_std[i], 1)))
spec_store.append(str(round(excel.spec_mean[i], 1)) + ' ± ' + \
str(round(excel.spec_std[i], 1)))
acc_store.append(str(round(excel.acc_mean[i], 1)) + ' ± ' + \
str(round(excel.acc_std[i], 1)))
store2.extend([sens_store, spec_store, acc_store])
store2 = pd.DataFrame(store2).transpose()
store2.index = features
store2.columns = columns
store2.to_csv('plots/ml_subtype_table.csv')
# Plot MCC values for subtype prediction with ML
#===================================================#
# Variables
mlkeys = ['dec', 'log', 'knn', 'svm']
labels = ['Decision tree', 'Logistic regression', 'KNN', 'SVM']
features = ['A1', 'A2', 'A3', 'A4', 'B1', 'B2', 'B3', 'B4', 'All-8']
colors = ['goldenrod', 'magenta', 'cadetblue', 'red']
# Plot specifications
fnt = 'Arial'
ticks_font = {'fontname':fnt, 'size':'14'}
legend_font = {'family':fnt, 'size':'11'}
label_font = {'family':fnt, 'size':'18'}
plt.rcParams["figure.figsize"] = [11,3]
plt.rcParams['grid.alpha'] = 0.5
for i,key in zip(range(len(mlkeys)), mlkeys):
# Get data
data = pd.read_csv(f'results_final/mcc_data/{key}.csv', index_col=0)
# Boxplot specifications
positions = np.arange(9) * (len(mlkeys) + 3) + i
color = colors[i]
meanprops = {'marker':'o',
'markerfacecolor':color,
'markeredgecolor':'black',
'markersize':2.0,
'linewidth':1.0}
medianprops = {'linestyle':'-',
'linewidth':1.0,
'color':'black'}
boxprops = {'facecolor':color,
'color':'black',
'linewidth':1.0}
flierprops = {'marker':'o',
'markerfacecolor':'black',
'markersize':1,
'markeredgecolor':'black'}
whiskerprops = {'linewidth':1.0}
capprops = {'linewidth':1.0}
# Plot the boxplot
_ = plt.boxplot(
data,
positions=positions,
widths=0.85,#(1, 1, 1),
whis=(0,100), # Percentiles for whiskers
showmeans=False, # Show means in addition to median
patch_artist=True, # Fill with color
meanprops=meanprops, # Customize mean points
medianprops=medianprops, # Customize median points
boxprops=boxprops,
showfliers=False, # Show/hide points beyond whiskers
flierprops=flierprops,
whiskerprops=whiskerprops,
capprops=capprops
)
# Plot dummy scatter points for legend
for i in range(len(mlkeys)):
plt.bar([100], [100], color=colors[i], label=labels[i], edgecolor='black',
linewidth=0.5)
# Specifications
plt.legend(frameon=1, numpoints=1, shadow=0, loc='best',
prop=legend_font)
plt.xticks(np.arange(9) * 7 + 1.5, features, **ticks_font)
plt.yticks(**ticks_font)
plt.ylabel('MCC', **label_font)
plt.ylim((-1.1, 1.1))
plt.xlim((-1,61))
plt.tight_layout()
# Save plot
plt.savefig('plots/mcc_boxwhiskerplot.pdf')
plt.show(); plt.close()
# Plots for outlier detection
#===============================#
looplength = pd.read_csv('results_final/looplength.csv', index_col=0)
subtype = pd.read_csv('results_final/cel7_subtypes.csv', index_col=0)['ncbi_pred_class']
looplength.index = range(len(looplength))
subtype.index = range(len(subtype))
fnt='Arial'
ticks_font = {'fontname':fnt, 'size':'18'}
label_font = {'family':fnt, 'size':'22'}
legend_font = {'family':'Arial', 'size':'14'}
title_font = {'family':fnt, 'size':'30'}
plt.rcParams['figure.figsize'] = [6,4]
# View the distribution to intuitively determine outliers
maxlength = [14, 20, 25, 16, 52, 141, 50, 14] # Values equal or greater than are outliers
topcode_vals = [] # Change the outlier values to top-coded values
for i in range(8):
sortedvals = sorted(looplength.iloc[:,i])
maxval = maxlength[i]
topcode_vals.append(sortedvals[sortedvals.index(maxval) - 1])
color = ['blue' if x<maxval else 'red' for x in sortedvals]
loop = looplength.columns[i]
plt.scatter(range(len(looplength)), sortedvals, color=color,
marker='o')
plt.xticks(**ticks_font)
plt.yticks(**ticks_font)
plt.xlabel('Index', **label_font)
plt.ylabel('Length', **label_font)
plt.title(loop, **title_font)
plt.tight_layout()
#plt.savefig(f'plots/outlier_detection/{loop}.pdf')
plt.show()
plt.close()
# Cap outliers
looplength = looplength.iloc[:,:-1]
for i in range(len(looplength.columns)):
vals = list(looplength.iloc[:,i])
vals = [x if x<maxlength[i] else topcode_vals[i] for x in vals]
looplength.iloc[:,i] = pd.Series(vals)
# Plot loop lengths (box/whisker plot)
#=======================================#
# Get data
cbh_looplength = looplength.iloc[subtype[subtype==1].index]
eg_looplength = looplength.iloc[subtype[subtype==0].index]
data = [cbh_looplength, eg_looplength]
labels = ['CBH', 'EG']
colors = ['lightblue', 'pink']
# Plot specifications
fnt='Arial'
ticks_font = {'fontname':fnt, 'size':'16'}
label_font = {'family':fnt, 'size':'18'}
legend_font = {'family':'Arial', 'size':'12'}
title_font = {'family':fnt, 'size':'20'}
plt.rcParams['figure.figsize'] = [6,3]
plt.rcParams['grid.alpha'] = 0.3
plt.rcParams['axes.axisbelow'] = True
legend_label = ['CBH', 'EG']
for i in range(2):
positions = np.arange(8) * (len(data) + 1) + i
color = colors[i]
medianprops = {'linestyle':'-',
'linewidth':1.0,
'color':'black'}
boxprops = {'facecolor':color,
'color':'black',
'linewidth':1.0}
flierprops = {'marker':'o',
'markerfacecolor':'black',
'markersize':1,
'markeredgecolor':'black'}
whiskerprops = {'linewidth':1.0}
capprops = {'linewidth':1.0}
# Plot the boxplot
_ = plt.boxplot(
data[i],
positions=positions,
widths=0.75,#(1, 1, 1),
whis=(0,100), # Percentiles for whiskers
showmeans=False, # Show means in addition to median
patch_artist=True, # Fill with color
meanprops=meanprops, # Customize mean points
medianprops=medianprops, # Customize median points
boxprops=boxprops,
showfliers=False, # Show/hide points beyond whiskers
flierprops=flierprops,
whiskerprops=whiskerprops,
capprops=capprops
)
# Plot dummy scatter points for legend
for i in range(2):
plt.bar([100], [100], color=colors[i], label=labels[i], edgecolor='black',
linewidth=1.0)
# Plot specifications
plt.legend(frameon=1, numpoints=1, shadow=0, loc='upper center',
prop=legend_font)
plt.xticks(np.arange(8) * 3 + 0.5, cbh_looplength.columns, **ticks_font)
plt.yticks(np.arange(-4, 24, step=4), **ticks_font)
plt.ylabel('Number of residues', **label_font)
plt.ylim((-0.5, 22))
plt.xlim((-1,23))
plt.tight_layout()
plt.savefig('plots/looplength_boxwhiskerplot.pdf')
plt.show(); plt.close()
# Plot relative standard deviation
#===================================#
mean = np.mean(looplength, axis=0)
std = np.std(looplength, axis=0)
cov = std/mean*100
fnt='Arial'
ticks_font = {'fontname':fnt, 'size':'14'}
label_font = {'family':fnt, 'size':'15'}
plt.rcParams['figure.figsize'] = [6,3]
lw=1.3
plt.bar(range(len(cov)), cov, color='brown', linewidth=lw,
edgecolor='black')
plt.xticks(range(len(cov)), cov.index, **ticks_font)
plt.yticks([20,40,60,80,100], **ticks_font)
plt.xlim([-0.45,7.45])
plt.ylim([0,80])
plt.ylabel('Relative standard deviation (%)', **label_font)
plt.tight_layout()
plt.savefig('plots/rsd.pdf')
# Density plots of loop lengths
#=============================================#
fnt='Arial'
ticks_font = {'fontname':fnt, 'size':'18'}
label_font = {'family':fnt, 'size':'22'}
legend_font = {'family':'Arial', 'size':'14'}
title_font = {'family':fnt, 'size':'30'}
plt.rcParams['figure.figsize'] = [6.5, 5]
bw = 0.5 # Change this to control the steepness of the density kernel function
xmin = [-0.5, -0.5, -0.5, -0.5, -0.5, -1.0, -0.5, -0.6]
xmax = [10, 16, 8, 13, 10, 11, 14, 8]
ymax = [0.5, 0.8, 0.8, 0.7, 0.8, 0.9, 0.5, 0.8]
legend_label = ['CBH', 'EG']
for i in range(len(looplength.columns)):
col = looplength.columns[i]
ax1 = sns.kdeplot(cbh_looplength[col], bw=bw, legend=True,
shade=False, color='blue')
ax2 = sns.kdeplot(eg_looplength[col], bw=bw, legend=True,
shade=False, color='red')
ax1.legend(legend_label, loc='best', prop=legend_font)
plt.xticks(**ticks_font)
plt.yticks(np.arange(0,11,2)*0.1, **ticks_font)
plt.xlim((0, xmax[i]))
plt.ylim((0,ymax[i]))
plt.title(col, **title_font)
plt.xlabel('Number of residues', **label_font)
plt.ylabel('Density', **label_font)
plt.tight_layout()
plt.savefig(f'plots/density_plots/{col}.pdf')
plt.show()
plt.close()
# Heatmap of loop length correlation
#====================================#
p_corr, s_corr = [], [] # Pearson's and Spearman's correlation coefficients
for i in range(len(looplength.columns)):
corr_p, corr_s = [], []
for k in range(len(looplength.columns)):
corr_p.append(np.corrcoef(looplength.iloc[:,i],
looplength.iloc[:,k])[0][1])
corr_s.append(scipy.stats.spearmanr(looplength.iloc[:,i],
looplength.iloc[:,k])[0])
p_corr.append(corr_p)
s_corr.append(corr_s)
p_corr = pd.DataFrame(p_corr)
s_corr = pd.DataFrame(s_corr)
p_corr.index = looplength.columns
p_corr.columns = looplength.columns
s_corr.index = looplength.columns
s_corr.columns = looplength.columns
sns.set(font='Arial', font_scale=0.6)
cluster = sns.clustermap(p_corr, cmap='Reds', metric='euclidean',
method='average', figsize=(3.15,3.15),
annot=True, fmt='.2f', annot_kws={'size':6})
cluster.savefig('plots/looplength_corr.pdf')
# Table of classification/association rules
#===========================================#
from subtype_rules import Cel7MSA
cbhmsa = 'fasta/trecel7a_positions_only/cbh_cat.fasta'
eglmsa = 'fasta/trecel7a_positions_only/egl_cat.fasta'
cel7msa = Cel7MSA(cbhmsa, eglmsa)
cel7msa.get_freq(include_gaps=True)
rules = pd.read_csv('results_final/rules/rules_all.csv', index_col=0)
rules_amino = pd.read_csv('results_final/rules/rules_amino.csv', index_col=0)
rules_type = pd.read_csv('results_final/rules/rules_type.csv', index_col=0)
mcc = list(rules.mcc)
min_mcc = np.percentile(mcc, 95) # mcc > 0.73
rules_mcc = rules[rules.mcc >= min_mcc]
rules_amino_mcc = rules_amino[rules_amino.mcc >= min_mcc] # 45 rules
rules_type_mcc = rules_type[rules_type.mcc >= min_mcc] # 45 rules
positions = sorted(set(rules_mcc.tre_pos)) # 42 positions
rules_mcc.to_csv('results_final/rules/rules_mcc.csv')
rules_amino_mcc.to_csv('results_final/rules/rules_amino_mcc.csv')
rules_type_mcc.to_csv('results_final/rules/rules_type_mcc.csv')
rules_amino_table = rules_amino_mcc.loc[:,['tre_pos','rule', 'closest_subsite',
'dist_subsite','sens', 'spec', 'acc', 'mcc']]
rules_amino_table.columns = ['Position', 'Rule', 'Closest subsite',
'Distance to closest subsite (Å)', 'Sensitivity',
'Specificity', 'Accuracy', 'MCC']
rules_amino_table.to_csv('plots/rules_amino_table.csv')
rules_type_table = rules_type_mcc.loc[:,['tre_pos','rule', 'closest_subsite',
'dist_subsite', 'sens', 'spec', 'acc', 'mcc']]
rules_type_table.columns = ['Position', 'Rule', 'Closest subsite',
'Distance to closest subsite (Å)', 'Sensitivity',
'Specificity', 'Accuracy', 'MCC']
rules_type_table.to_csv('plots/rules_type_table.csv')
# Plot Histogram for MCC of rules
#=================================#
fnt='Arial'
ticks_font = {'fontname':fnt, 'size':'20'}
label_font = {'family':fnt, 'size':'22'}
title_font = {'family':fnt, 'size':'24'}
plt.rcParams['figure.figsize'] = [6,3.5]
plt.rcParams['grid.alpha'] = 0.5
plt.rcParams['axes.axisbelow'] = True
weights = np.zeros_like(mcc) + 1/len(mcc)
plt.hist(mcc, bins=12, rwidth=1, color='darkgreen', weights=weights)
plt.xticks(np.arange(-80,101,40)*0.01, **ticks_font)
plt.yticks(np.arange(0,28,5)*0.01, **ticks_font)
plt.xlabel('MCC', **label_font)
plt.ylabel('Relative frequency', **label_font)
plt.tight_layout()
plt.savefig('plots/rules_mcc_dist.pdf')
# Minimum distance between rules' positions and substrate
#============================================================#
dist50 = | np.percentile(rules_mcc.dist_subsite, 50) | numpy.percentile |
# detect peak
from repli1d.analyse_RFD import detect_peaks, compare, smooth
from repli1d.fast_sim import get_fast_MRT_RFDs
from repli1d.expeData import replication_data
from repli1d.single_mol_analysis import compute_info,compute_real_inter_ori
from repli1d.pso import PSO
import sys
import argparse
from repli1d.visu_browser import plotly_blocks
import pylab
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--start', type=int, default=5000)
parser.add_argument('--end', type=int, default=120000)
parser.add_argument('--ch', type=int, default=1)
parser.add_argument('--resolution', type=int, default=5)
parser.add_argument('--ndiff', type=int, default=60)
parser.add_argument('--percentile', type=int, default=82)
parser.add_argument('--cell', type=str, default="K562")
parser.add_argument('--visu', action="store_true")
parser.add_argument('--name', type=str, default="tmp.html")
parser.add_argument('--nsim', type=int, default=500)
parser.add_argument('--signal', type=str, default="peak")
parser.add_argument('--input', action="store_true")
parser.add_argument('--correct', action="store_true")
parser.add_argument('--continuous', action="store_true")
parser.add_argument('--noise', type=float, default=.1)
parser.add_argument('--fspeed', type=float, default=.3)
parser.add_argument('--RFDo', action="store_true")
args = parser.parse_args()
start = args.start
end = args.end
ch = args.ch
cell = args.cell
resolution_polarity = args.resolution
resolution = args.resolution
exp_factor = 4
percentile = args.percentile
fork_speed = args.fspeed
kon = 0.005
ndiff = args.ndiff
nsim = args.nsim
if args.signal == "peak":
x, d3p = detect_peaks(start, end, ch,
resolution_polarity=resolution_polarity,
exp_factor=exp_factor,
percentile=percentile, cell=cell,nanpolate=True)
if args.correct:
x, DNaseI = replication_data(cell, "DNaseI", chromosome=ch,
start=start, end=end,
resolution=resolution, raw=False)
x, CNV = replication_data(cell, "CNV", chromosome=ch,
start=start, end=end,
resolution=resolution, raw=False)
CNV[CNV == 0] = 2
DNaseI[np.isnan(DNaseI)] = 0
DNaseI /= CNV
DNaseIsm = smooth(DNaseI, 100)
DNaseIsm /= | np.mean(DNaseIsm) | numpy.mean |
import shutil
from pathlib import Path
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib.ticker as mtick
from scipy.optimize import minimize_scalar
import filter
kBarWidth = 0.2
def fitLine(row, formantName, start, end, outputDir):
key = '@'.join([row['Filename'], row['Annotation'], formantName])
x = np.arange(2, 11)
y = row[formantName + '_' +
str(start): formantName + '_' + str(end)].to_numpy(dtype='float')
coeff = np.polyfit(x, y, 4)
line1 = np.poly1d(coeff)
line1d = np.polyder(line1, 1)
line1dd = np.polyder(line1, 2)
line1dd_max = minimize_scalar(-line1dd, bounds=(2, 10), method='bounded')
inflection = line1dd_max.x
plt.plot(x, y, 'o')
plt.plot(x, line1(x), label='fitted line')
plt.plot(x, line1d(x), label='1st deriv')
plt.plot(x, line1dd(x), label='2nd deriv')
plt.axvline(x=inflection, linestyle='dashed', label='inflection')
plt.legend(loc='best')
plt.title(key)
# plt.show()
plt.savefig(outputDir / (key + '.png'))
plt.clf()
plt.cla()
# return pd.Series(coeff, index=['x4', 'x3', 'x2', 'x1', 'x0'])
return pd.Series(inflection, index=['Inflection_'+formantName])
def removeChars(s):
for c in [' ', '\\', '/', '^']:
s = s.replace(c, '')
return s
class Analyzer(object):
def RunAnalysis(self, df, group_name, output_base_dir):
raise NotImplementedError
def GetName(self):
raise NotImplementedError
class FormantQuantiles(Analyzer):
def GetName(self):
return "FormantQuantiles"
def GetInputType(self):
return "Formant"
def RunAnalysis(self, df, group_name, output_dir):
# output = df[['Filename']].copy()
# output['Annotation'] = df[['Annotation']]
df['barkF1_25p'] = df[['barkF1_3', 'barkF1_4']].mean(axis=1)
df['barkF1_75p'] = df[['barkF1_8', 'barkF1_9']].mean(axis=1)
df['barkF1_50p'] = df[['barkF1_6']]
df['barkF2_25p'] = df[['barkF2_3', 'barkF2_4']].mean(axis=1)
df['barkF2_75p'] = df[['barkF2_8', 'barkF2_9']].mean(axis=1)
df['barkF2_50p'] = df[['barkF2_6']]
df['diff_F1F1_25p'] = df['barkF1_25p'] - df['barkF2_25p']
df['diff_F1F1_50p'] = df['barkF1_50p'] - df['barkF2_50p']
df['diff_F1F1_75p'] = df['barkF1_75p'] - df['barkF2_75p']
output_debug = pd.concat(
[df[['Filename']],
df[['Annotation']],
df.loc[:, df.columns.str.startswith("barkF1")],
df.loc[:, df.columns.str.startswith("barkF2")],
df.loc[:, df.columns.str.startswith("diff")],
], axis=1)
output = pd.DataFrame(
df.loc[:, df.columns.str.startswith("diff")].mean()).T
output_path = output_dir / (group_name + '.csv')
output_debug_path = output_dir / (group_name + '.debug.csv')
output_debug.to_csv(output_debug_path, index=False)
output.to_csv(output_path, index=False)
class FormantQuantilesByDemographic(Analyzer):
def GetName(self):
return "FormantQuantilesByDemographic"
def GetInputType(self):
return "Formant"
def RunAnalysis(self, df, outer_filters, inner_filters, group_name, output_dir):
for outer_f in outer_filters:
key = outer_f.GetValue()
matched_rows = dict()
for _, row in df.iterrows():
if not outer_f.IsMatched(row):
continue
for inner_f in inner_filters:
if inner_f.IsMatched(row):
matched_rows.setdefault(
inner_f.GetValue(), []).append(row)
if len(matched_rows) == 0:
continue
x = np.arange(3)
for k, v in matched_rows.items():
matched_df = pd.DataFrame(v)
full_group_name = group_name + '@' + outer_f.GetValue() + '@@' + k
df_mean = self.ComputeMean(
matched_df, full_group_name, output_dir)
y = [df_mean['diff_F1F2_25p'][0],
df_mean['diff_F1F2_50p'][0],
df_mean['diff_F1F2_75p'][0]]
plt.bar(x, y, width=kBarWidth, label=k)
x = [xval + kBarWidth for xval in x]
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.xticks([r + kBarWidth for r in range(3)],
('25%', '50%', '75%'))
plt.title(key)
plt.savefig(output_dir / (group_name + '@' +
key + '.png'), bbox_inches="tight")
plt.clf()
plt.cla()
def ComputeMean(self, df, full_group_name, output_dir):
df['barkF1_25p'] = df[['barkF1_3', 'barkF1_4']].mean(axis=1)
df['barkF1_75p'] = df[['barkF1_8', 'barkF1_9']].mean(axis=1)
df['barkF1_50p'] = df[['barkF1_6']]
df['barkF2_25p'] = df[['barkF2_3', 'barkF2_4']].mean(axis=1)
df['barkF2_75p'] = df[['barkF2_8', 'barkF2_9']].mean(axis=1)
df['barkF2_50p'] = df[['barkF2_6']]
df['diff_F1F2_25p'] = df['barkF1_25p'] - df['barkF2_25p']
df['diff_F1F2_50p'] = df['barkF1_50p'] - df['barkF2_50p']
df['diff_F1F2_75p'] = df['barkF1_75p'] - df['barkF2_75p']
output = pd.DataFrame(
df.loc[:, df.columns.str.startswith("diff")].mean()).T
output_path = output_dir / (full_group_name + '.csv')
output_debug_path = output_dir / (full_group_name + '.debug.csv')
output.to_csv(output_path, index=False)
df.to_csv(output_debug_path, index=False)
return output
class FormantRegression(Analyzer):
def GetName(self):
return "FormantRegression"
def GetInputType(self):
return "Formant"
def RunAnalysis(self, df, group_name, output_dir):
s_f1 = df.loc[:, df.columns.str.startswith("barkF1")].mean()
s_f2 = df.loc[:, df.columns.str.startswith("barkF2")].mean()
x = np.arange(0, 9)
y1 = s_f1['barkF1_2': 'barkF1_10'].to_numpy(dtype='float')
y2 = s_f2['barkF2_2': 'barkF2_10'].to_numpy(dtype='float')
coeff1 = np.polyfit(x, y1, 4)
coeff2 = np.polyfit(x, y2, 4)
line1 = np.poly1d(coeff1)
line2 = np.poly1d(coeff2)
# line1d = np.polyder(line1, 1)
# line2d = np.polyder(line2, 1)
line1dd = np.polyder(line1, 2)
line2dd = np.polyder(line2, 2)
line1dd_max = minimize_scalar(-line1dd,
bounds=(0, 8), method='bounded')
line2dd_max = minimize_scalar(-line2dd,
bounds=(0, 8), method='bounded')
inflection1 = line1dd_max.x
inflection2 = line2dd_max.x
df_inflex = pd.DataFrame(
data={'f1_inflection': [inflection1], 'f2_inflection': [inflection2]})
df_inflex.to_csv(output_dir / (group_name + '.csv'), index=False)
# Plot f1/f2
plt.plot(x, y1, 'o')
plt.plot(x, y2, 'x')
plt.plot(x, line1(x), label='F1 fitted')
plt.plot(x, line2(x), label='F2 fitted')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(group_name)
plt.savefig(output_dir / (group_name + '.fitted.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
# plt.plot(x, line1d(x), label='F1 1st deriv')
# plt.plot(x, line2d(x), label='F2 1st deriv')
# Plot deriv and inflection
plt.plot(x, line1dd(x), label='F1 2nd deriv')
plt.plot(x, line2dd(x), label='F2 2nd deriv')
plt.axvline(x=inflection1, linestyle=':', label='F1 inflection')
plt.axvline(x=inflection2, linestyle='-.', label='F2 inflection')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(group_name)
plt.savefig(output_dir / (group_name + '.inflection.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
output_debug_path = output_dir / (group_name + '.debug.csv')
df.to_csv(output_debug_path, index=False)
class HnrRegression(Analyzer):
def GetName(self):
return "HnrRegression"
def GetInputType(self):
return "HNR"
def RunAnalysis(self, df, group_name, output_dir):
for i in range(1, 10):
df['mid_'+str(i)] = df[['HNR_'+str(i),
'HNR_'+str(i+1)]].mean(axis=1)
sy = df.loc[:, df.columns.str.startswith('mid_')].mean()
y = sy['mid_1': 'mid_9'].to_numpy(dtype='float')
x = np.arange(0, 9)
coeff = np.polyfit(x, y, 4)
line1 = np.poly1d(coeff)
line1dd = np.polyder(line1, 2)
line1dd_max = minimize_scalar(-line1dd,
bounds=(0, 8), method='bounded')
inflection = line1dd_max.x
df_inflex = pd.DataFrame(data={'inflection': [inflection]})
df_inflex.to_csv(output_dir / (group_name + '.csv'), index=False)
plt.plot(x, y, 'o')
plt.plot(x, line1(x), label='fitted')
plt.plot(x, line1dd(x), label='2nd deriv')
plt.axvline(x=inflection, linestyle=':', label='inflection')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(group_name)
plt.savefig(output_dir / (group_name + '.png'), bbox_inches="tight")
plt.clf()
plt.cla()
output_debug_path = output_dir / (group_name + '.debug.csv')
df.to_csv(output_debug_path, index=False)
class HnrQuantilesMean(Analyzer):
def GetName(self):
return "HnrQuantilesMean"
def GetInputType(self):
return "HNR"
def RunAnalysis(self, df, group_name, output_dir):
df['HNR_p25'] = df[['HNR_2', 'HNR_3']].mean(axis=1)
df['HNR_p75'] = df[['HNR_7', 'HNR_8']].mean(axis=1)
df['HNR_p50'] = df[['HNR_5']]
output = pd.DataFrame(
df.loc[:, df.columns.str.startswith("HNR_p")].mean()).T
output_path = output_dir / (group_name + '.csv')
output.to_csv(output_path, index=False)
output_debug_path = output_dir / (group_name + '.debug.csv')
df.to_csv(output_debug_path, index=False)
class HnrTTest(Analyzer):
def GetName(self):
return "HnrTTest"
def GetInputType(self):
return "HNR"
def RunAnalysis(self, df, group_name, output_dir):
df['HNR_25p'] = df[['HNR_2', 'HNR_3']].mean(axis=1)
df['HNR_75p'] = df[['HNR_7', 'HNR_8']].mean(axis=1)
df['HNR_50p'] = df[['HNR_5']]
output = pd.DataFrame(
df.loc[:, df.columns.str.startswith("diff")].mean()).T
output_path = output_dir / (group_name + '.csv')
output.to_csv(output_path, index=False)
output_debug_path = output_dir / (group_name + '.debug.csv')
df.to_csv(output_debug_path, index=False)
def ComputeF1F2Diff(df):
df['barkF1_25p'] = df[['barkF1_3', 'barkF1_4']].mean(axis=1)
df['barkF1_75p'] = df[['barkF1_8', 'barkF1_9']].mean(axis=1)
df['barkF2_25p'] = df[['barkF2_3', 'barkF2_4']].mean(axis=1)
df['barkF2_75p'] = df[['barkF2_8', 'barkF2_9']].mean(axis=1)
df['diff_F1_7525'] = df['barkF1_75p'] - df['barkF1_25p']
df['diff_F2_7525'] = df['barkF2_75p'] - df['barkF2_25p']
return df
class FormantQuantilesF1F2Base(Analyzer):
def __init__(self, filter_map):
self.filter_map = filter_map
def RunAnalysis(self, df, group_name, output_dir):
matched_rows_map = {}
for key, _ in self.filter_map.items():
matched_rows_map[key] = []
for _, row in df.iterrows():
for key, filters in self.filter_map.items():
is_all_matched = [f.IsMatched(row) for f in filters]
if np.all(is_all_matched):
matched_rows_map[key].append(row)
matched_df = {}
for key, rows in matched_rows_map.items():
matched_df[key] = pd.DataFrame(rows)
x = np.arange(2)
for key, mdf in matched_df.items():
mdf = ComputeF1F2Diff(mdf)
df_mean = pd.DataFrame(
mdf.loc[:, mdf.columns.str.startswith("diff")].mean()).T
mdf.to_csv(output_dir / (group_name + '@@@' +
key + '.debug.csv'), index=False)
df_mean.to_csv(output_dir / (group_name + '@@@' +
key+'Mean.debug.csv'), index=False)
y = [df_mean['diff_F1_7525'][0], df_mean['diff_F2_7525'][0]]
plt.bar(x, y, width=kBarWidth, label=key)
x = [xval + kBarWidth for xval in x]
plt.xticks([r + kBarWidth for r in range(2)], ('delta_F1', 'delta_F2'))
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(group_name)
plt.savefig(output_dir / (group_name + '.png'), bbox_inches="tight")
plt.clf()
plt.cla()
class FormantQuantilesF1F2SaSb(FormantQuantilesF1F2Base):
def __init__(self):
super().__init__({
'Sa': [filter.IsShanghainese(), filter.IsPosition('a')],
'Sb': [filter.IsShanghainese(), filter.IsPosition('b')],
})
class FormantQuantilesF1F2SbMb(FormantQuantilesF1F2Base):
def __init__(self):
super().__init__({
'Sb': [filter.IsShanghainese(), filter.IsPosition('b')],
'Mb': [filter.IsMandarin(), filter.IsPosition('b')],
})
class FormantQuantilesSbMbBase(Analyzer):
def __init__(self, formant):
self.formant = formant
def RunAnalysis(self, df, group_name, output_dir):
rows_sb = []
rows_mb = []
for _, row in df.iterrows():
if filter.IsShanghainese().IsMatched(row) and filter.IsPosition('b').IsMatched(row):
rows_sb.append(row)
continue
if filter.IsMandarin().IsMatched(row) and filter.IsPosition('b').IsMatched(row):
rows_mb.append(row)
continue
df_sb = pd.DataFrame(rows_sb)
df_sb = ComputeF1F2Diff(df_sb)
df_sb_avg = pd.DataFrame(
df_sb.loc[:, df_sb.columns.str.startswith("diff")].mean()).T
df_sb.to_csv(output_dir / (group_name +
'@@@Sb.debug.csv'), index=False)
df_sb_avg.to_csv(output_dir / (group_name +
'@@@SbMean.debug.csv'), index=False)
df_mb = pd.DataFrame(rows_mb)
df_mb = ComputeF1F2Diff(df_mb)
df_mb_avg = pd.DataFrame(
df_mb.loc[:, df_mb.columns.str.startswith("diff")].mean()).T
df_mb.to_csv(output_dir / (group_name +
'@@@Mb.debug.csv'), index=False)
df_mb_avg.to_csv(output_dir / (group_name +
'@@@MbMean.debug.csv'), index=False)
x = ['Sb', 'Mb']
y = [df_sb_avg['diff_' + self.formant + '_7525'][0],
df_mb_avg['diff_'+self.formant+'_7525'][0]]
plt.bar(x, y, width=kBarWidth)
plt.title(group_name)
plt.savefig(output_dir / (group_name + '.png'), bbox_inches="tight")
plt.clf()
plt.cla()
class FormantQuantilesF1SbMb(FormantQuantilesSbMbBase):
def __init__(self):
super().__init__('F1')
class FormantQuantilesF2SbMb(FormantQuantilesSbMbBase):
def __init__(self):
super().__init__('F2')
class FormantRegressionBase(Analyzer):
def __init__(self, filters):
self.filters = filters
def RunAnalysis(self, df, group_name, output_dir):
matched_rows = []
for _, row in df.iterrows():
is_all_matched = [f.IsMatched(row) for f in self.filters]
if np.all(is_all_matched):
matched_rows.append(row)
df = pd.DataFrame(matched_rows)
filter_name = '_'.join([f.GetValue() for f in self.filters])
full_group_name = group_name + '@@' + filter_name
s_f1 = df.loc[:, df.columns.str.startswith("barkF1")].mean()
s_f2 = df.loc[:, df.columns.str.startswith("barkF2")].mean()
x = np.arange(0, 9)
y1 = s_f1['barkF1_2': 'barkF1_10'].to_numpy(dtype='float')
y2 = s_f2['barkF2_2': 'barkF2_10'].to_numpy(dtype='float')
coeff1 = np.polyfit(x, y1, 4)
coeff2 = np.polyfit(x, y2, 4)
line1 = np.poly1d(coeff1)
line2 = np.poly1d(coeff2)
line1dd = np.polyder(line1, 2)
line2dd = np.polyder(line2, 2)
# line1ddd = np.polyder(line1, 3)
# line2ddd = np.polyder(line2, 3)
line1dd_max = minimize_scalar(-line1dd,
bounds=(0, 8), method='bounded')
line2dd_max = minimize_scalar(-line2dd,
bounds=(0, 8), method='bounded')
inflection1 = line1dd_max.x
inflection2 = line2dd_max.x
# line1ddd_max_left = minimize_scalar(-line1ddd,
# bounds=(0, inflection1), method='bounded')
# line1ddd_max_right = minimize_scalar(-line1ddd,
# bounds=(inflection1, 8), method='bounded')
# line2ddd_max_left = minimize_scalar(-line2ddd,
# bounds=(0, inflection2), method='bounded')
# line2ddd_max_right = minimize_scalar(-line2ddd,
# bounds=(inflection2, 8), method='bounded')
# inflection1d_left = line1ddd_max_left.x
# inflection1d_right = line1ddd_max_right.x
# inflection2d_left = line2ddd_max_left.x
# inflection2d_right = line2ddd_max_right.x
df_inflex = pd.DataFrame(
data={'f1_inflection': [inflection1], 'f2_inflection': [inflection2]})
df_inflex.to_csv(output_dir / (full_group_name + '.csv'), index=False)
# Plot f1/f2
plt.plot(x, y1, 'o')
plt.plot(x, y2, 'x')
plt.plot(x, line1(x), label='F1 fitted')
plt.plot(x, line2(x), label='F2 fitted')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(full_group_name)
plt.savefig(output_dir / (full_group_name + '.fitted.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
# Plot deriv and inflection
plt.plot(x, line1dd(x), label='F1 2nd deriv')
plt.plot(x, line2dd(x), label='F2 2nd deriv')
plt.axvline(x=inflection1, linestyle=':', label='F1 inflection')
plt.axvline(x=inflection2, linestyle='-.', label='F2 inflection')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(full_group_name)
plt.savefig(output_dir / (full_group_name + '.inflection.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
# Plot 3rd deriv and inflection
# plt.plot(x, line1ddd(x), label='F1 3rd deriv')
# plt.plot(x, line2ddd(x), label='F2 3rd deriv')
# plt.axvline(x=inflection1d_left, linestyle=':', label='F1 inf L')
# plt.axvline(x=inflection1d_right, linestyle=':', label='F1 inf R')
# plt.axvline(x=inflection2d_left, linestyle='-.', label='F2 inf L')
# plt.axvline(x=inflection2d_right, linestyle='-.', label='F2 inf R')
# plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
# plt.title(full_group_name)
# plt.savefig(output_dir / (full_group_name + '.inflection3rd.png'),
# bbox_inches="tight")
# plt.clf()
# plt.cla()
# output_debug_path = output_dir / (full_group_name + '.debug.csv')
# df.to_csv(output_debug_path, index=False)
class FormantRegressionSa(FormantRegressionBase):
def __init__(self):
super().__init__([filter.IsShanghainese(), filter.IsPosition('a')])
class FormantRegressionSb(FormantRegressionBase):
def __init__(self):
super().__init__([filter.IsShanghainese(), filter.IsPosition('b')])
class FormantRegressionMb(FormantRegressionBase):
def __init__(self):
super().__init__([filter.IsMandarin(), filter.IsPosition('b')])
class FormantInflectionBase(Analyzer):
def __init__(self, filter_map):
self.filter_map = filter_map
def RunAnalysis(self, df, group_name, output_dir):
matched_rows_map = {}
for key, _ in self.filter_map.items():
matched_rows_map[key] = []
for _, row in df.iterrows():
for key, filters in self.filter_map.items():
is_all_matched = [f.IsMatched(row) for f in filters]
if np.all(is_all_matched):
matched_rows_map[key].append(row)
matched_df = {}
for key, rows in matched_rows_map.items():
matched_df[key] = pd.DataFrame(rows)
x_all = []
f1_front = []
f1_back = []
f2_front = []
f2_back = []
for key, mdf in matched_df.items():
s_f1 = mdf.loc[:, mdf.columns.str.startswith("barkF1")].mean()
s_f2 = mdf.loc[:, mdf.columns.str.startswith("barkF2")].mean()
x = np.arange(0, 9)
y1 = s_f1['barkF1_2': 'barkF1_10'].to_numpy(dtype='float')
y2 = s_f2['barkF2_2': 'barkF2_10'].to_numpy(dtype='float')
coeff1 = np.polyfit(x, y1, 4)
coeff2 = np.polyfit(x, y2, 4)
line1 = np.poly1d(coeff1)
line2 = np.poly1d(coeff2)
line1dd = np.polyder(line1, 2)
line2dd = | np.polyder(line2, 2) | numpy.polyder |
import os, glob
from statistics import NormalDist
import pandas as pd
import numpy as np
import input_representation as ir
SAMPLE_DIR = os.getenv('SAMPLE_DIR', './samples')
OUT_FILE = os.getenv('OUT_FILE', './metrics.csv')
MAX_SAMPLES = int(os.getenv('MAX_SAMPLES', 1024))
METRICS = [
'inst_prec', 'inst_rec', 'inst_f1',
'chord_prec', 'chord_rec', 'chord_f1',
'time_sig_acc',
'note_dens_oa', 'pitch_oa', 'velocity_oa', 'duration_oa',
'chroma_crossent', 'chroma_kldiv', 'chroma_sim',
'groove_crossent', 'groove_kldiv', 'groove_sim',
]
DF_KEYS = ['id', 'original', 'sample'] + METRICS
keys = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
qualities = ['maj', 'min', 'dim', 'aug', 'dom7', 'maj7', 'min7', 'None']
CHORDS = [f"{k}:{q}" for k in keys for q in qualities] + ['N:N']
def get_group_id(file):
# change this depending on name of generated samples
name = os.path.basename(file)
return name.split('.')[0]
def get_file_groups(path, max_samples=MAX_SAMPLES):
# change this depending on file structure of generated samples
files = glob.glob(os.path.join(path, '*.mid'), recursive=True)
assert len(files), f"provided directory was empty: {path}"
samples = sorted(files)
origs = sorted([os.path.join(path, 'gt', os.path.basename(file)) for file in files])
pairs = list(zip(origs, samples))
pairs = list(filter(lambda pair: os.path.exists(pair[0]), pairs))
if max_samples > 0:
pairs = pairs[:max_samples]
groups = dict()
for orig, sample in pairs:
sample_id = get_group_id(sample)
orig_id = get_group_id(orig)
assert sample_id == orig_id, f"Sample id doesn't match original id: {sample} and {orig}"
if sample_id not in groups:
groups[sample_id] = list()
groups[sample_id].append((orig, sample))
return list(groups.values())
def read_file(file):
with open(file, 'r') as f:
events = f.read().split('\n')
events = [e for e in events if e]
return events
def get_chord_groups(desc):
bars = [1 if 'Bar_' in item else 0 for item in desc]
bar_ids = np.cumsum(bars) - 1
groups = [[] for _ in range(bar_ids[-1] + 1)]
for i, item in enumerate(desc):
if 'Chord_' in item:
chord = item.split('_')[-1]
groups[bar_ids[i]].append(chord)
return groups
def instruments(events):
insts = [128 if item.instrument == 'drum' else int(item.instrument) for item in events[1:-1] if item.name == 'Note']
insts = np.bincount(insts, minlength=129)
return (insts > 0).astype(int)
def chords(events):
chords = [CHORDS.index(item) for item in events]
chords = np.bincount(chords, minlength=129)
return (chords > 0).astype(int)
def chroma(events):
pitch_classes = [item.pitch % 12 for item in events[1:-1] if item.name == 'Note' and item.instrument != 'drum']
if len(pitch_classes):
count = | np.bincount(pitch_classes, minlength=12) | numpy.bincount |
# -*- coding: utf-8 -*-
from Voicelab.pipeline.Node import Node
from parselmouth.praat import call
from Voicelab.toolkits.Voicelab.VoicelabNode import VoicelabNode
import numpy as np
from scipy.fftpack import fft
from scipy.interpolate import interp1d
from scipy.io import wavfile
from scipy.io.wavfile import read as wavread
import librosa
class MeasureHNRVoiceSauceNode(VoicelabNode):
###############################################################################################
# process: WARIO hook called once for each voice file.
###############################################################################################
def process(self):
#try:
"""Returns harmonic-to-harmonic ratio Voice Sauce Style."""
filename = self.args["file_path"]
fs, y = wavread(filename)
print(f'{y=}')
#fmin = self.args["min f0"]
#fmax = self.args["max f0"]
librosa_y, sr = librosa.load(filename)
F0 = librosa.yin(librosa_y, fmin=40, fmax=600, sr=sr)
variables = {
"Nperiods_EC": 5,
"frameshift": 1,
}
hnrs = run(y, fs, F0, variables)
print(hnrs)
return {
"harmonic-to-harmonic ratio": hrs,
}
#except:
# return {
# "subharmonic-to-harmonic ratio": "Measurement failed",
# "Subharmonic Mean Pitch": "Measurement failed",
# "Subharmonic Pitch Values": "Measurement failed",
# "Subharmonic Pitch": "Measurement failed",
# }
"""
Created on Mon Apr 14 21:51:49 2014
@author: Helene
"""
# import numpy as np moved to top of file
def run(y, Fs, F0, variables):
"""
INPUT
y, FS - fr wav-read
F0 - vector of fundamental frequency
variables - global settings
OUTPUT
N vectors
NOTES
Calculates the harmonic to noise ration based on the method described in de
Krom, 1993 - A cepstrum-based technique for determining a harmonic-to-noise
ratio in speech signals, JSHR.
AUTHOR
<NAME>, Speech Processing and Auditory Perception Laboratory, UCLA
Copyright UCLA SPAPL 2009
"""
N_periods = int(variables['Nperiods_EC'])
sampleshift = (Fs / (1000 * int(variables['frameshift'])))
HNR05 = np.zeros(len(F0))# * None
HNR15 = np.zeros(len(F0))# * None
HNR25 = np.zeros(len(F0))# * None
HNR35 = np.zeros(len(F0))# * None
print('reached the first for loop')
for k in range(1, len(F0)): # check this with the k multiplcation stuff below
print('loop!')
ks = np.round(k * sampleshift)
if ks <= 0 or ks > len(y):
continue
F0_curr = F0[k]
if F0_curr == 0:
continue
N0_curr = 1/(F0_curr * Fs)
if not F0_curr:
continue
ystart = round(ks - N_periods/2*N0_curr)
yend = round(ks + N_periods/2*N0_curr)-1
if (yend-ystart + 1) % 2 == 0:
yend -= 1
if ystart <= 0 or yend > len(y):
continue
yseg = y[ystart:yend]
hnr = getHNR(yseg, Fs, F0_curr, [500, 1500, 2500, 3500])
HNR05[k] = hnr[0]
HNR15[k] = hnr[1]
HNR25[k] = hnr[2]
HNR35[k] = hnr[3]
print([HNR05, HNR15, HNR25, HNR35])
return [HNR05, HNR15, HNR25, HNR35]
def getHNR(y, Fs, F0, Nfreqs):
print('holla')
print(f'{y=}')
NBins = len(y)
print(f'{NBins=}')
N0 = round(Fs/F0)
N0_delta = round(N0 * 0.1)
y = [x*z for x, z in zip(np.hamming(len(y)), y)]
fftY = np.fft.fft(y, NBins)
aY = np.log10(abs(fftY))
ay = np.ifft(aY)
peakinx = np.zeros(np.floor(len(y))/2/N0)
for k in range(1, len(peakinx)):
ayseg = ay[(k*N0 - N0_delta):(k*N0 + N0_delta)]
val, inx = max(abs(ayseg)) # MAX does not behave the same - doesn't return inx??
peakinx[k] = inx + (k * N0) - N0_delta - 1
s_ayseg = np.sign(np.diff(ayseg))
l_inx = inx - np.find((np.sign(s_ayseg[inx-1:-1:1]) != np.sign(inx)))[0] + 1
r_inx = inx + np.find(np.sign(s_ayseg[inx+1:]) == np.sign(inx))[0]
l_inx = l_inx + k*N0 - N0_delta - 1
r_inx = r_inx + k*N0 - N0_delta - 1
for num in range(l_inx, r_inx):
ay[num] = 0
midL = round(len(y)/2)+1
ay[midL:] = ay[(midL-1):-1:(midL-1-(len(ay)-midL))]
Nap = np.real( | np.fft(ay) | numpy.fft |
import numpy as np
from scipy import interpolate, optimize
from scipy.integrate import cumtrapz
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from frenet_path import *
Kern = lambda x: (3/4)*(1-np.power(x,2))*(np.abs(x)<1)
Kern_bis = lambda x,delta: np.power((1 - np.power((np.abs(x)/delta),3)), 3)
class Trajectory:
"""
A class used to represent a 3D curve.
...
Attributes
----------
data : numpy array of shape (N,3) that contained the coordinates of the curve
t : numpy array of shape N, time of each point in data, supposed to be croissant
dim : 3
t0 : float, initial time value
tmax : float, final time value
scale : Boolean, True if the curve is scale, False otherwise
dX1 : function, estimated first derivative
dX2 : function, estimated second derivative
dX3 : function, estimated third derivative
S : function, estimated of the arclenght function
Sdot : function, estimated derivative of the arclenght function
L : float, estimated lenght of the curve
curv_extrins : function, extrinsic estimates of the curvature
tors_extrins : function, extrinsic estimates of the torsion
Methods
-------
loc_poly_estimation(t_out, deg, h):
estimation of derivatives using local polynomial regression with parameters "h" and degree "deg" evaluted of the grid "t_out"
compute_S(scale=False):
compute the arclenght function, the lenght of the curve and scale it if "scale" equals True.
scale():
scale the curve, needs to have run compute_S before.
TNB_GramSchmidt(t):
compute the T,N,B frame of the curve from the pointwise estimated derivatives (of Higher Order : 1,2,3) by Gram-Schmidt Orthonormalization on t
return: instance of class FrenetPath
theta_extrinsic_formula(t):
compute the curvature and torsion functions from the pointwise estimated derivatives (of Higher Order : 1,2,3) computed by the classical formulas
BECAREFUL very unstable (numerically ill-posed).
return: pointwise estimate of curvature, pointwise estimate of torsion
TNB_locPolyReg(grid_in, grid_out, h, p=3, iflag=[1,1], ibound=0, local=True):
TNB estimates based on constrained local polynomial regression |T|=1, <T,N>=0
b0 + b1(t-t_0)+b2(t-t0)^2/2 + b3(t-t0)^3/6 + ... + bp(t-t0)^p/p!, |b1|=1, <b1,b2>=0
minimize (Y-XB)'W(Y-XB) -la*(|b1|^2-1) - mu(2*<b1,b2>)
inputs:
grid_in - input grid
grid_out - output grid
h - scalar
p - degree of polynomial (defaul = 3)
iflag - [1,1] for both constraints, [1,0] for |b1|=1, [0,1] for <b1,b2>=0
ibound - 1 for boundary correction, 0 by default
local - True for local version, False for regular version
return:
Q - instance of class FrenetPath
kappa - [kappa, kappap, tau]
Param - estimates with constraints
Param0 - estimates without constraints
vparam - [la, mu, vla, vmu] tuning parameters
[la, mu]: optimal values amongst vla, and vmu
success - True if a solution was found for all point, False otherwise
"""
def __init__(self, data, t):
self.t = t
self.data = data
self.dim = data.shape[1]
self.t0 = np.min(t)
self.tmax = np.max(t)
self.scale = False
def loc_poly_estimation(self, t_out, deg, h):
pre_process = PolynomialFeatures(degree=deg)
deriv_estim = np.zeros((len(t_out),(deg+1)*self.dim))
for i in range(len(t_out)):
T = self.t - t_out[i]
# print(T)
W = Kern(T/h)
# print(W)
T_poly = pre_process.fit_transform(T.reshape(-1,1))
for j in range(deg+1):
T_poly[:,j] = T_poly[:,j]/np.math.factorial(j)
pr_model = LinearRegression(fit_intercept = False)
pr_model.fit(T_poly, self.data, W)
B = pr_model.coef_
deriv_estim[i,:] = B.reshape(1,(deg+1)*self.dim, order='F')
self.derivatives = deriv_estim
def dx1(t): return interpolate.griddata(self.t, deriv_estim[:,3:6], t, method='cubic')
self.dX1 = dx1
def dx2(t): return interpolate.griddata(self.t, deriv_estim[:,6:9], t, method='cubic')
self.dX2 = dx2
def dx3(t): return interpolate.griddata(self.t, deriv_estim[:,9:12], t, method='cubic')
self.dX3 = dx3
def compute_S(self, scale=False):
def Sdot_fun(t): return np.linalg.norm(self.dX1(t), axis=1)
self.Sdot = Sdot_fun
def S_fun(t): return cumtrapz(self.Sdot(t), t, initial=0)
# S_fun = interpolate.interp1d(self.t, cumtrapz(self.Sdot(self.t), self.t, initial=0))
self.L = S_fun(self.t)[-1]
# print(self.L)
if scale==True:
self.scale = True
def S_fun_scale(t): return cumtrapz(self.Sdot(t), t, initial=0)/self.L
# S_fun_scale = interpolate.interp1d(self.t, cumtrapz(self.Sdot(self.t), self.t, initial=0)/self.L)
self.S = S_fun_scale
self.data = self.data/self.L
else:
self.S = S_fun
def scale(self):
self.scale = True
def S_fun_scale(t): return cumtrapz(self.Sdot(t), t, initial=0)/self.L
self.S = S_fun_scale
self.data = self.data/self.L
def TNB_GramSchmidt(self, t_grid):
def GramSchmidt(DX1, DX2, DX3):
normdX1 = np.linalg.norm(DX1)
normdX2 = np.linalg.norm(DX2)
normdX3 = np.linalg.norm(DX3)
T = DX1/normdX1
N = DX2 - np.dot(np.transpose(T),DX2)*T
N = N/np.linalg.norm(N)
B = DX3 - np.dot(np.transpose(N),DX3)*N - np.dot(np.transpose(T),DX3)*T
B = B/np.linalg.norm(B)
Q = np.stack((T, N, B))
if np.linalg.det(Q)<0:
B = -B
Q = np.stack((T, N, B))
return np.transpose(Q)
dX1 = self.dX1(t_grid)
dX2 = self.dX2(t_grid)
dX3 = self.dX3(t_grid)
nb_t = len(t_grid)
Q = np.zeros((self.dim, self.dim, nb_t))
for i in range(nb_t):
Qi = GramSchmidt(dX1[i,:],dX2[i,:],dX3[i,:])
Q[:,:,i]= Qi
Q_fin = FrenetPath(self.S(t_grid), self.S(t_grid), data=Q)
return Q_fin
def theta_extrinsic_formula(self, t_grid):
dX1 = self.dX1(t_grid)
dX2 = self.dX2(t_grid)
dX3 = self.dX3(t_grid)
nb_t = len(t_grid)
crossvect = np.zeros(dX1.shape)
norm_crossvect = np.zeros(nb_t)
curv = np.zeros(nb_t)
tors = np.zeros(nb_t)
for t in range(nb_t):
crossvect[t,:] = np.cross(dX1[t,:],dX2[t,:])
norm_crossvect[t] = np.linalg.norm(crossvect[t,:],1)
curv[t]= norm_crossvect[t]/np.power(np.linalg.norm(dX1[t,:]),3)
tors[t]= (np.dot(crossvect[t,:],np.transpose(dX3[t,:])))/(norm_crossvect[t]**2)
if self.scale==True:
curv = curv*self.L
tors = tors*self.L
def curv_extrins_fct(s): return interpolate.interp1d(self.S(t_grid), curv)(s)
def tors_extrins_fct(s): return interpolate.interp1d(self.S(t_grid), tors)(s)
self.curv_extrins = curv_extrins_fct
self.tors_extrins = tors_extrins_fct
return curv, tors
def TNB_locPolyReg(self, grid_in, grid_out, h, p=3, iflag=[1,1], ibound=0, local=True):
(n,d) = self.data.shape
nout = len(grid_out)
s0 = np.min(grid_in)
smax = np.max(grid_in)
if ibound>0:
# bandwidth correction at the boundary
hvec = h + np.maximum(np.maximum(s0 - (grid_out-h), (grid_out+h) - smax),np.zeros(nout))
else:
hvec = h* | np.ones(nout) | numpy.ones |
import cv2
import numpy as np
def scale_bi_linear(img, rate: float):
"""bi-linear補間による拡大縮小
Arguments:
img {numpy.ndarray} -- 元画像
rate {float} -- 拡大縮小率
Returns:
[numpy.ndarray] -- 拡大縮小後画像
"""
h = img.shape[0]
w = img.shape[1]
out_h = int(h * rate)
out_w = int(w * rate)
ex_x = np.tile(np.arange(out_w), (out_h, 1))
ex_y = np.arange(out_h).repeat(out_w).reshape(out_h, out_w)
out_x = np.minimum(ex_x / rate, w - 2).astype(np.float)
out_y = np.minimum(ex_y / rate, h - 2).astype(np.float)
out_idx_x = np.minimum( | np.floor(ex_x / rate) | numpy.floor |
import numpy as np
import math
import itertools
def _p_val_1d(A, B, metric=np.mean, numResamples=10000):
"""Return p value of observed difference between 1-dimensional A and B"""
observedDiff = abs(metric(A) - metric(B))
combined = np.concatenate([A, B])
numA = len(A)
resampleDiffs = np.zeros(numResamples,dtype='float')
for resampleInd in range(numResamples):
permutedCombined = np.random.permutation(combined)
diff = metric(permutedCombined[:numA]) - metric(permutedCombined[numA:])
resampleDiffs[resampleInd] = diff
pVal = (np.sum(resampleDiffs > observedDiff) + np.sum(resampleDiffs < -observedDiff))/float(numResamples)
return pVal
def _ma_p_val_1d(A, B, metric=np.mean, numResamples=10000):
A = np.ma.masked_invalid(A, copy=True)
A = A.compressed()
B = np.ma.masked_invalid(B, copy=True)
B = B.compressed()
pVal = _p_val_1d(A, B, metric, numResamples)
return pVal
def _ma_p_val_concatenated(C, numSamplesFirstGroup, metric=np.mean, numResamples=10000):
A = C[:numSamplesFirstGroup]
B = C[numSamplesFirstGroup:]
pVal = _ma_p_val_1d(A, B, metric, numResamples)
return pVal
def p_val(A, B, axis=None, metric=np.mean, numResamples=10000):
"""Return the p value that metric(A) and metric(B) differ along an axis ignoring NaNs and masked elements.
Parameters
----------
A : array_like
Array containing numbers of first group.
B : array_like
Array containing numbers of second group.
axis : int, optional
Axis along which the p value is computed.
The default is to compute the p value of the flattened arrays.
metric : numpy function, optional
metric to calculate p value for.
The default is numpy.mean
numResamples : int, optional
number of permutations. The default is 10000.
Returns
-------
pValue : ndarray
An array with the same shape as `A` and `B`, with the specified axis removed.
If axis is None, a scalar is returned.
See Also
--------
fast_p_val : uses the same random permutation for all entries.
"""
A = A.copy()
B = B.copy()
if axis is None:
A = A.ravel()
B = B.ravel()
pVal = _ma_p_val_1d(A, B, metric, numResamples)
else:
numSamplesFirstGroup = A.shape[axis]
C = np.concatenate((A,B),axis=axis)
pVal = np.apply_along_axis(_ma_p_val_concatenated, axis, C, numSamplesFirstGroup, metric, numResamples)
return pVal
def fast_p_val(A, B, axis=0, metric=np.mean, numResamples=10000):
"""Return the p value that metric(A) and metric(B) differ along an axis.
Parameters
----------
A : array_like
Array containing numbers of first group.
B : array_like
Array containing numbers of second group.
axis : int, optional
Axis along which the p value is computed.
The default is to compute the p value along the first dimension.
metric : numpy function, optional
metric to calculate p value for.
The default is numpy.mean
numResamples : int, optional
number of permutations. The default is 10000.
Returns
-------
pValue : ndarray
An array with the same shape as `A` and `B`, with the specified axis removed.
See Also
--------
p_val : ignores NaNs and masked elements, and independently calculates random
permutations for each entry.
"""
rolledA = | np.rollaxis(A,axis) | numpy.rollaxis |
from __future__ import print_function
import numpy as np
import random
import pickle
from tqdm import tqdm
import os, sys, pdb, math, time
import networkx as nx
import argparse
import scipy.io as sio
import scipy.sparse as ssp
from sklearn import metrics
from gensim.models import Word2Vec
import warnings
import pickle
warnings.simplefilter('ignore', ssp.SparseEfficiencyWarning)
cur_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append('%s/../pytorch_DGCNN' % cur_dir)
sys.path.append('%s/software/node2vec/src' % cur_dir)
from util import GNNGraph
import node2vec
import multiprocessing as mp
from itertools import islice
def sample_neg(net, test_ratio=0.1, train_pos=None, test_pos=None, max_train_num=None,
all_unknown_as_negative=False):
# get upper triangular matrix
drug_proteinnet = net[:732][:,1172:3087].toarray()
#print(drug_proteinnet)
drug_drugnet = net[:732][:,:732].toarray()
#print(drug_drugnet)
protein_proteinnet = net[1172:3087][:,1172:3087].toarray()
#print(protein_proteinnet)
protein_drugnet = drug_proteinnet.T
drugrownet = np.concatenate((drug_drugnet,drug_proteinnet), axis=1)
proteinrownet = np.concatenate((protein_drugnet,protein_proteinnet), axis=1)
dpnet = np.concatenate((drugrownet, proteinrownet), axis=0)
print(np.shape(dpnet))
dpnet = ssp.csr_matrix(dpnet)
#print(dpnet)
net_triu = ssp.triu(dpnet, k=1)
# sample positive links for train/test
row, col, _ = ssp.find(ssp.csr_matrix(net_triu)[:732][:,732:])
col = col + 1172
print(net_triu.count_nonzero())
# sample positive links if not specified
if train_pos is None and test_pos is None:
perm = random.sample(range(len(row)), len(row))
row, col = row[perm], col[perm]
split = int(math.ceil(len(row) * (1 - test_ratio)))
train_pos = (row[:split], col[:split])
test_pos = (row[split:], col[split:])
# if max_train_num is set, randomly sample train links
if max_train_num is not None and train_pos is not None:
perm = np.random.permutation(len(train_pos[0]))[:max_train_num]
train_pos = (train_pos[0][perm], train_pos[1][perm])
# sample negative links for train/test
train_num = len(train_pos[0]) if train_pos else 0
test_num = len(test_pos[0]) if test_pos else 0
neg = ([], [])
newnet = ssp.csr_matrix(net_triu)[:732][:,732:]
n = newnet.shape[0]
m = newnet.shape[1]
print('sampling negative links for train and test')
if not all_unknown_as_negative:
# sample a portion unknown links as train_negs and test_negs (no overlap)
while len(neg[0]) < train_num + test_num:
i, j = random.randint(0, n-1), random.randint(0, m-1)
if i < j and newnet[i, j] == 0:
neg[0].append(i)
neg[1].append(j+1172)
else:
continue
train_neg = (neg[0][:train_num], neg[1][:train_num])
test_neg = (neg[0][train_num:], neg[1][train_num:])
return train_pos, train_neg, test_pos, test_neg
def links2subgraphs(A, train_pos, train_neg, test_pos, test_neg, h=1,
max_nodes_per_hop=None, node_information=None, no_parallel=False):
# automatically select h from {1, 2}
if h == 'auto':
# split train into val_train and val_test
_, _, val_test_pos, val_test_neg = sample_neg(A, 0.1)
val_A = A.copy()
val_A[val_test_pos[0], val_test_pos[1]] = 0
val_A[val_test_pos[1], val_test_pos[0]] = 0
val_auc_CN = CN(val_A, val_test_pos, val_test_neg)
val_auc_AA = AA(val_A, val_test_pos, val_test_neg)
print('\033[91mValidation AUC of AA is {}, CN is {}\033[0m'.format(
val_auc_AA, val_auc_CN))
if val_auc_AA >= val_auc_CN:
h = 2
print('\033[91mChoose h=2\033[0m')
else:
h = 1
print('\033[91mChoose h=1\033[0m')
# extract enclosing subgraphs
max_n_label = {'value': 0}
def helper(A, links, g_label):
g_list = []
if no_parallel:
for i, j in tqdm(zip(links[0], links[1])):
g, n_labels, n_features = subgraph_extraction_labeling(
(i, j), A, h, max_nodes_per_hop, node_information
)
max_n_label['value'] = max(max(n_labels), max_n_label['value'])
g_list.append(GNNGraph(g, g_label, n_labels, n_features))
return g_list
else:
# the parallel extraction code
start = time.time()
pool = mp.Pool(15)
results = pool.map_async(
parallel_worker,
[((i, j), A, h, max_nodes_per_hop, node_information) for i, j in zip(links[0], links[1])]
)
remaining = results._number_left
pbar = tqdm(total=remaining)
while True:
pbar.update(remaining - results._number_left)
if results.ready(): break
remaining = results._number_left
time.sleep(1)
results = results.get()
pool.close()
pbar.close()
print('ready for multiprocessing.')
g_list = [GNNGraph(g, g_label, n_labels, n_features) for g, n_labels, n_features in results]
print('ready g_list.')
max_n_label['value'] = max(
max([max(n_labels) for _, n_labels, _ in results]), max_n_label['value']
)
end = time.time()
print("Time eplased for subgraph extraction: {}s".format(end-start))
return g_list
print('Enclosing subgraph extraction begins...')
train_graphs, test_graphs = None, None
if train_pos and train_neg:
#print(len(train_pos[0]))
train_graphs = helper(A, train_pos, 1) + helper(A, train_neg, 0)
if test_pos and test_neg:
#print(len(test_pos[0]))
test_graphs = helper(A, test_pos, 1) + helper(A, test_neg, 0)
elif test_pos:
test_graphs = helper(A, test_pos, 1)
return train_graphs, test_graphs, max_n_label['value']
def parallel_worker(x):
return subgraph_extraction_labeling(*x)
def subgraph_extraction_labeling(ind, A, h=1, max_nodes_per_hop=None,
node_information=None):
# extract the h-hop enclosing subgraph around link 'ind'
dist = 0
nodes = set([ind[0], ind[1]])
visited = set([ind[0], ind[1]])
fringe = set([ind[0], ind[1]])
nodes_dist = [0, 0]
for dist in range(1, h+1):
fringe = neighbors(fringe, A)
#print(fringe)
fringe = fringe - visited
visited = visited.union(fringe)
if max_nodes_per_hop is not None:
if max_nodes_per_hop < len(fringe):
fringe = random.sample(fringe, max_nodes_per_hop)
if len(fringe) == 0:
break
nodes = nodes.union(fringe)
nodes_dist += [dist] * len(fringe)
#print(nodes_dist)
# move target nodes to top
'''
if (ind[1]==79) and (79 not in nodes):
print(nodes)
'''
nodes.remove(ind[0])
nodes.remove(ind[1])
nodes = [ind[0], ind[1]] + list(nodes)
subgraph = A[nodes, :][:, nodes]
#print(subgraph)
# apply node-labeling
labels = node_label(subgraph)
# get node features
features = None
if node_information is not None:
features = node_information[nodes]
# construct nx graph
g = nx.from_scipy_sparse_matrix(subgraph)
# remove link between target nodes
if g.has_edge(0, 1):
g.remove_edge(0, 1)
return g, labels.tolist(), features
def neighbors(fringe, A):
# find all 1-hop neighbors of nodes in fringe from A
res = set()
for node in fringe:
nei, _, _ = ssp.find(A[:, node])
nei = set(nei)
res = res.union(nei)
return res
def node_label(subgraph):
# an implementation of the proposed double-radius node labeling (DRNL)
K = subgraph.shape[0]
subgraph_wo0 = subgraph[1:, 1:]
subgraph_wo1 = subgraph[[0]+list(range(2, K)), :][:, [0]+list(range(2, K))]
dist_to_0 = ssp.csgraph.shortest_path(subgraph_wo0, directed=False, unweighted=True)
dist_to_0 = dist_to_0[1:, 0]
dist_to_1 = ssp.csgraph.shortest_path(subgraph_wo1, directed=False, unweighted=True)
dist_to_1 = dist_to_1[1:, 0]
d = (dist_to_0 + dist_to_1).astype(int)
d_over_2, d_mod_2 = np.divmod(d, 2)
labels = 1 + np.minimum(dist_to_0, dist_to_1).astype(int) + d_over_2 * (d_over_2 + d_mod_2 - 1)
labels = np.concatenate((np.array([1, 1]), labels))
labels[np.isinf(labels)] = 0
labels[labels>1e6] = 0 # set inf labels to 0
labels[labels<-1e6] = 0 # set -inf labels to 0
return labels
def generate_node2vec_embeddings(A, emd_size=128, negative_injection=False, train_neg=None, mode='node2vec'):
if mode == 'node2vec':
if negative_injection:
row, col = train_neg
A = A.copy()
A[row, col] = 1 # inject negative train
A[col, row] = 1 # inject negative train
nx_G = nx.from_scipy_sparse_matrix(A)
G = node2vec.Graph(nx_G, is_directed=False, p=1, q=1)
G.preprocess_transition_probs()
walks = G.simulate_walks(num_walks=10, walk_length=100)
walks = [list(map(str, walk)) for walk in walks]
model = Word2Vec(walks, size=emd_size, window=7, min_count=0, sg=1,
workers=8, iter=1)
wv = model.wv
embeddings = | np.zeros([A.shape[0], emd_size], dtype='float32') | numpy.zeros |
# coding: utf-8
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
__all__ = ['Ackley','Sphere','Rosenbrock','Beale','GoldsteinPrice','Booth',
'BukinN6','Matyas','LeviN13','ThreeHumpCamel','Easom','Eggholder',
'McCormick','SchafferN2','SchafferN4','StyblinskiTang','DeJongsF1',
'DeJongsF2','DeJongsF3','DeJongsF4','DeJongsF5','Ellipsoid','KTablet',
'FiveWellPotential','WeightedSphere','HyperEllipsodic',
'SumOfDifferentPower','Griewank','Michalewicz','Perm','Rastrigin',
'Schwefel','SixHumpCamel','Shuberts','XinSheYang','Zakharov']
__oneArgument__ = ['Beale','GoldsteinPrice','Booth','BukinN6','Matyas','LeviN13',
'ThreeHumpCamel','Easom','Eggholder','McCormick','SchafferN2',
'SchafferN4','DeJongsF3','DeJongsF4','DeJongsF5',
'FiveWellPotential','SixHumpCamel','Shuberts']
__twoArgument__ = ['Ackley','Sphere','Rosenbrock','StyblinskiTang','DeJongsF1',
'DeJongsF2','Ellipsoid','KTablet','WeightedSphere',
'HyperEllipsodic','SumOfDifferentPower','Griewank',
'Michalewicz','Rastrigin','Schwefel','XinSheYang','Zakharov']
__threeArgument__ = ['Perm']
##### Basic function #####
class OptimalBasic:
def __init__(self, variable_num):
self.variable_num = variable_num
self.max_search_range = np.array([0]*self.variable_num)
self.min_search_range = np.array([0]*self.variable_num)
self.optimal_solution = np.array([0]*self.variable_num)
self.global_optimum_solution = 0
self.plot_place = 0.25
self.func_name = ''
self.save_dir = os.path.dirname(os.path.abspath(__file__))+'\\img\\'
if(os.path.isdir(self.save_dir) == False):
os.mkdir(self.save_dir)
def get_global_optimum_solution(self):
return self.global_optimum_solution
def get_optimal_solution(self):
return self.optimal_solution
def get_search_range(self):
return [self.max_search_range, self.min_search_range]
def get_func_val(self, variables):
return -1
def plot(self):
x = np.arange(self.min_search_range[0],self.max_search_range[0], self.plot_place, dtype=np.float32)
y = np.arange(self.min_search_range[1],self.max_search_range[1], self.plot_place, dtype=np.float32)
X, Y = np.meshgrid(x,y)
Z = []
for xy_list in zip(X,Y):
z = []
for xy_input in zip(xy_list[0],xy_list[1]):
tmp = list(xy_input)
tmp.extend(list(self.optimal_solution[0:self.variable_num-2]))
z.append(self.get_func_val(np.array(tmp)))
Z.append(z)
Z = np.array(Z)
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_wireframe(X,Y,Z)
plt.show()
def save_fig(self):
x = np.arange(self.min_search_range[0],self.max_search_range[0], self.plot_place, dtype=np.float32)
y = np.arange(self.min_search_range[1],self.max_search_range[1], self.plot_place, dtype=np.float32)
X, Y = np.meshgrid(x,y)
Z = []
for xy_list in zip(X,Y):
z = []
for xy_input in zip(xy_list[0],xy_list[1]):
tmp = list(xy_input)
tmp.extend(list(self.optimal_solution[0:self.variable_num-2]))
z.append(self.get_func_val(np.array(tmp)))
Z.append(z)
Z = np.array(Z)
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_wireframe(X,Y,Z)
plt.savefig(self.save_dir+self.func_name+'.png')
plt.close()
##### Optimization benchmark function group #####
##### Class Ackley function #####
class Ackley(OptimalBasic):
def __init__(self,variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([32.768]*self.variable_num)
self.min_search_range = np.array([-32.768]*self.variable_num)
self.optimal_solution = np.array([0]*self.variable_num)
self.global_optimum_solution = 0
self.func_name = 'Ackley'
def get_func_val(self, variables):
tmp1 = 20.-20.*np.exp(-0.2*np.sqrt(1./self.variable_num*np.sum(np.square(variables))))
tmp2 = np.e-np.exp(1./self.variable_num*np.sum(np.cos(variables*2.*np.pi)))
return tmp1+tmp2
##### Class Sphere function #####
class Sphere(OptimalBasic):
def __init__(self, variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([1000]*self.variable_num) # nearly inf
self.min_search_range = np.array([-1000]*self.variable_num) # nearly inf
self.optimal_solution = np.array([1]*self.variable_num)
self.global_optimum_solution = 0
self.plot_place = 10
self.func_name = 'Sphere'
def get_func_val(self, variables):
return np.sum(np.square(variables))
##### Class Rosenbrock function #####
class Rosenbrock(OptimalBasic):
def __init__(self, variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([5]*self.variable_num)
self.min_search_range = np.array([-5]*self.variable_num)
self.optimal_solution = np.array([1]*self.variable_num)
self.global_optimum_solution = 0
self.plot_place = 0.25
self.func_name = 'Rosenbrock'
def get_func_val(self, variables):
f = 0
for i in range(self.variable_num-1):
f += 100*np.power(variables[i+1]-np.power(variables[i],2),2)+np.power(variables[i]-1,2)
return f
##### Class Beale function #####
class Beale(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([4.5]*self.variable_num)
self.min_search_range = np.array([-4.5]*self.variable_num)
self.optimal_solution = np.array([3.,0.5])
self.global_optimum_solution = 0
self.plot_place = 0.25
self.func_name = 'Beale'
def get_func_val(self, variables):
tmp1 = np.power(1.5 - variables[0] + variables[0] * variables[1],2)
tmp2 = np.power(2.25 - variables[0] + variables[0] * np.power(variables[1],2),2)
tmp3 = np.power(2.625 - variables[0] + variables[0] * np.power(variables[1],3),2)
return tmp1+tmp2+tmp3
##### Class Goldstein-Price function #####
class GoldsteinPrice(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([2.]*self.variable_num)
self.min_search_range = np.array([-2.]*self.variable_num)
self.optimal_solution = np.array([0.,-1.])
self.global_optimum_solution = 3
self.plot_place = 0.25
self.func_name = 'GoldsteinPrice'
def get_func_val(self, variables):
tmp1 = (1+np.power(variables[0]+variables[1]+1,2)*(19-14*variables[0]+3*np.power(variables[0],2)-14*variables[1]+6*variables[0]*variables[1]+3*np.power(variables[1],2)))
tmp2 = (30+(np.power(2*variables[0]-3*variables[1],2)*(18-32*variables[0]+12*np.power(variables[0],2)+48*variables[1]-36*variables[0]*variables[1]+27*np.power(variables[1],2))))
return tmp1*tmp2
##### Class Booth function #####
class Booth(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([10.]*self.variable_num)
self.min_search_range = np.array([-10.]*self.variable_num)
self.optimal_solution = np.array([1.,-3.])
self.global_optimum_solution = 0
self.func_name = 'Booth'
def get_func_val(self, variables):
tmp1 = np.power(variables[0]+2*variables[1]-7,2)
tmp2 = np.power(2*variables[0]+variables[1]-5,2)
return tmp1+tmp2
##### Class Bukin function N.6 #####
class BukinN6(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([-5.,3.])
self.min_search_range = np.array([-15.,-3.])
self.optimal_solution = np.array([-10.,1.])
self.global_optimum_solution = 0
self.func_name = 'BukinN6'
def get_func_val(self, variables):
tmp1 = 100*np.sqrt(np.absolute(variables[1]-0.01*np.power(variables[1],2)))
tmp2 = 0.01*np.absolute(variables[0]+10)
return tmp1+tmp2
##### Class Matyas function #####
class Matyas(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([10.]*self.variable_num)
self.min_search_range = np.array([-10.]*self.variable_num)
self.optimal_solution = np.array([0.,0.])
self.global_optimum_solution = 0
self.func_name = 'Matyas'
def get_func_val(self, variables):
tmp1 = 0.26*(np.power(variables[0],2)+np.power(variables[1],2))
tmp2 = 0.48*variables[0]*variables[1]
return tmp1-tmp2
##### Class Levi function N.13 #####
class LeviN13(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([10.]*self.variable_num)
self.min_search_range = np.array([-10.]*self.variable_num)
self.optimal_solution = np.array([1.,1.])
self.global_optimum_solution = 0
self.func_name = 'LeviN13'
def get_func_val(self, variables):
tmp1 = np.power(np.sin(3*np.pi*variables[0]),2)
tmp2 = np.power(variables[0]-1,2)*(1+np.power(np.sin(3*np.pi*variables[1]),2))
tmp3 = np.power(variables[1]-1,2)*(1+np.power(np.sin(2*np.pi*variables[1]),2))
return tmp1+tmp2+tmp3
##### Class Three-hump camel function #####
class ThreeHumpCamel(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([5.]*self.variable_num)
self.min_search_range = np.array([-5.]*self.variable_num)
self.optimal_solution = np.array([0.,0.])
self.global_optimum_solution = 0
self.func_name = 'ThreeHumpCamel'
def get_func_val(self, variables):
return 2*np.power(variables[0],2)-1.05*np.power(variables[0],4)+np.power(variables[0],6)/6+variables[0]*variables[1]+np.power(variables[1],2)
##### Class Easom function #####
class Easom(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([100.]*self.variable_num)
self.min_search_range = np.array([-100.]*self.variable_num)
self.optimal_solution = np.array([np.pi,np.pi])
self.global_optimum_solution = -1
self.plot_place = 10
self.func_name = 'Easom'
def get_func_val(self, variables):
return -1.0*np.cos(variables[0])*np.cos(variables[1])*np.exp(-(np.power(variables[0]-np.pi,2)+np.power(variables[1]-np.pi,2)))
##### Class Eggholder function #####
class Eggholder(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([512.]*self.variable_num)
self.min_search_range = np.array([-512.]*self.variable_num)
self.optimal_solution = np.array([512.,404.2319])
self.global_optimum_solution = -959.6407
self.plot_place = 5
self.func_name = 'Eggholder'
def get_func_val(self, variables):
tmp1 = -(variables[1]+47)*np.sin(np.sqrt(np.absolute(variables[1]+variables[0]/2+47)))
tmp2 = -variables[0]*np.sin(np.sqrt(np.absolute(variables[0]-(variables[1]+47))))
return tmp1+tmp2
##### Class McCormick function #####
class McCormick(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([4.]*self.variable_num)
self.min_search_range = np.array([-1.5,-3.])
self.optimal_solution = np.array([-0.54719,-1.54719])
self.global_optimum_solution = -1.9133
self.func_name = 'McCormick'
def get_func_val(self, variables):
tmp1 = np.sin(variables[0]+variables[1])+np.power(variables[0]-variables[1],2)
tmp2 = -1.5*variables[0]+2.5*variables[1]+1
return tmp1+tmp2
##### Class Schaffer function N.2 #####
class SchafferN2(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([100.]*self.variable_num)
self.min_search_range = np.array([-100]*self.variable_num)
self.optimal_solution = np.array([0.,0.])
self.global_optimum_solution = 0
self.plot_place = 10
self.func_name = 'SchafferN2'
def get_func_val(self, variables):
tmp1 = np.power(np.sin(np.power(variables[0],2)-np.power(variables[1],2)),2)-0.5
tmp2 = np.power(1+0.001*(np.power(variables[0],2)+np.power(variables[1],2)),2)
return 0.5+tmp1/tmp2
##### Class Schaffer function N.4 #####
class SchafferN4(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([100.]*self.variable_num)
self.min_search_range = np.array([-100]*self.variable_num)
self.optimal_solution = np.array([0.,1.25313])
self.global_optimum_solution = 0
self.plot_place = 10
self.func_name = 'SchafferN4'
def get_func_val(self, variables):
tmp1 = np.power(np.cos(np.sin(np.absolute(np.power(variables[0],2)-np.power(variables[1],2)))),2)-0.5
tmp2 = np.power(1+0.001*(np.power(variables[0],2)+np.power(variables[1],2)),2)
return 0.5+tmp1/tmp2
##### Class Styblinski-Tang function #####
class StyblinskiTang(OptimalBasic):
def __init__(self,variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([5.]*self.variable_num)
self.min_search_range = np.array([-5.]*self.variable_num)
self.optimal_solution = np.array([-2.903534]*self.variable_num)
self.global_optimum_solution = -39.166165*self.variable_num
self.func_name = 'StyblinskiTang'
def get_func_val(self, variables):
tmp1 = 0
for i in range(self.variable_num):
tmp1 += np.power(variables[i],4)-16*np.power(variables[i],2)+5*variables[i]
return tmp1/2
##### Class De Jong's function F1 #####
class DeJongsF1(Sphere):
def __init__(self,variable_num):
super().__init__(variable_num)
self.func_name = 'DeJongsF1'
##### Class De Jong's function F2 #####
class DeJongsF2(Rosenbrock):
def __init__(self,variable_num):
super().__init__(variable_num)
self.func_name = 'DeJongsF2'
##### Class De Jong's function F3 #####
class DeJongsF3(OptimalBasic):
def __init__(self):
super().__init__(5)
self.max_search_range = np.array([5.12]*self.variable_num)
self.min_search_range = np.array([-5.12]*self.variable_num)
self.optimal_solution = np.array([-5.12]*self.variable_num)
self.global_optimum_solution = 0
self.func_name = 'DeJongsF3'
def get_func_val(self, variables):
tmp1 = 0
for i in range(self.variable_num):
tmp1 += np.floor(variables[i])
return tmp1
##### Class De Jong's function F4 #####
class DeJongsF4(OptimalBasic):
def __init__(self):
super().__init__(30)
self.max_search_range = np.array([1.28]*self.variable_num)
self.min_search_range = np.array([-1.28]*self.variable_num)
self.optimal_solution = np.array([0.]*self.variable_num)
self.global_optimum_solution = np.random.normal(0,1)
self.func_name = 'DeJongsF4'
def get_func_val(self, variables):
tmp1 = 0
for i in range(self.variable_num):
tmp1 += (i+1)*np.power(variables[i],4)
return tmp1 + np.random.normal(0, 1)
##### Class De Jong's function F5 #####
class DeJongsF5(OptimalBasic):
def __init__(self):
super().__init__(25)
self.max_search_range = np.array([65.536]*self.variable_num)
self.min_search_range = np.array([-65.536]*self.variable_num)
self.optimal_solution = np.array([-32.32]*self.variable_num)
self.global_optimum_solution = 1.
self.plot_place = 1.5
self.func_name = 'DeJongsF5'
def get_func_val(self, variables):
A = np.zeros([2,25])
a = [-32,16,0,16,32]
A[0,:] = np.tile(a,(1,5))
tmp = []
for x in a:
tmp_list = [x]*5
tmp.extend(tmp_list)
A[1,:] = tmp
sum = 0
for i in range(self.variable_num):
a1i = A[0,i]
a2i = A[1,i]
term1 = i
term2 = np.power(variables[0]-a1i,6)
term3 = np.power(variables[1]-a2i,6)
new = 1/(term1+term2+term3)
sum += new
return 1/(0.002+sum)
##### Class Ellipsoid function #####
class Ellipsoid(OptimalBasic):
def __init__(self,variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([5.12]*self.variable_num)
self.min_search_range = np.array([-5.12]*self.variable_num)
self.optimal_solution = np.array([0.]*self.variable_num)
self.global_optimum_solution = 0.
self.func_name = 'Ellipsoid'
def get_func_val(self, variables):
tmp = 0
for i in range(self.variable_num):
tmp += np.power(np.power(1000,i/(self.variable_num-1))*variables[i],2)
return tmp
##### Class k-tablet function #####
class KTablet(OptimalBasic):
def __init__(self,variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([5.12]*self.variable_num)
self.min_search_range = np.array([-5.12]*self.variable_num)
self.optimal_solution = np.array([0.]*self.variable_num)
self.global_optimum_solution = 0.
self.func_name = 'KTablet'
def get_func_val(self, variables):
tmp = 0
k = int(self.variable_num/4)
for i in range(k):
tmp += variables[i]
for i in range(k,self.variable_num):
tmp += np.power(100*variables[i],2)
return tmp
##### Class Five-well potential function #####
# Not yet checked to do working properly
class FiveWellPotential(OptimalBasic):
def __init__(self):
super().__init__(2)
self.max_search_range = np.array([20.]*self.variable_num)
self.min_search_range = np.array([-20.]*self.variable_num)
self.optimal_solution = np.array([4.92,-9.89])
self.global_optimum_solution = -1.4616
self.plot_place = 1
self.func_name = 'FiveWellPotential'
def get_func_val(self, variables):
tmp1 = []
tmp1.append(1-1/(1+0.05*np.power(np.power(variables[0],2)+(variables[1]-10),2)))
tmp1.append(-1/(1+0.05*(np.power(variables[0]-10,2)+np.power(variables[1],2))))
tmp1.append(-1/(1+0.03*(np.power(variables[0]+10,2)+np.power(variables[1],2))))
tmp1.append(-1/(1+0.05*(np.power(variables[0]-5,2)+np.power(variables[1]+10,2))))
tmp1.append(-1/(1+0.1*(np.power(variables[0]+5,2)+np.power(variables[1]+10,2))))
tmp1_sum = 0
for x in tmp1:
tmp1_sum += x
tmp2 = 1+0.0001*np.power((np.power(variables[0],2)+np.power(variables[1],2)),1.2)
return tmp1_sum*tmp2
##### Class Weighted Sphere function or hyper ellipsodic function #####
class WeightedSphere(OptimalBasic):
def __init__(self,variable_num):
super().__init__(variable_num)
self.max_search_range = np.array([5.12]*self.variable_num)
self.min_search_range = np.array([-5.12]*self.variable_num)
self.optimal_solution = | np.array([0.]*self.variable_num) | numpy.array |
#simple algorith for ortholombic case
def find_nearest_ortho(positions,cell,i,j):
import numpy as np
distance=positions[j]-positions[i]
rv=cell
#cell is ortholombic, so only diagonal element should be considered
xinit=distance[0]-2.0*rv[0,0]
yinit=distance[1]-2.0*rv[1,1]
zinit=distance[2]-2.0*rv[2,2]
#consider distance between equiliblium 27=3x3x3 cell
ii=np.array([i//9+1 for i in range(27)],dtype=float)
jj=np.array([(i//3)%3+1 for i in range(27)],dtype=float)
kk=np.array([i%3+1 for i in range(27)],dtype=float)
xcan=xinit+rv[0,0]*ii
ycan=yinit+rv[1,1]*jj
zcan=zinit+rv[2,2]*kk
candidate=np.stack((xcan,ycan,zcan),axis=1)
dist=[np.linalg.norm(candidate[i,:]) for i in range(27)]
min= | np.min(dist) | numpy.min |
import sys
import time
#import csv
import numpy as np
from numpy.linalg import inv
def Regression(X, Y, lambda_value) :
''' Adding the columns of Ones '''
col_Ones = np.ones((len(X), 1))
X = np.append(col_Ones, X, 1)
I = np.identity(len(X[0]))
#print(len(I))
I[0][0] = 0
temp_1 = np.dot( | np.transpose(X) | numpy.transpose |
#=======================================================================
def load(Fcmn): # Load imaging datasets from caiman output to custom object
#=======================================================================
import os
import re
from scipy import io
import numpy as np
# These things ideally need to be defined in the Fish files
#-----------------------------------------------------------------------------
spacing = [1.6, 1.6, 8]
xy_size = [451, 298]
# Arrange optional prefix
#-----------------------------------------------------------------------------
prefx = ''
if prefx: prefx = prefx + '_'
# Find all fish folders
#-----------------------------------------------------------------------------
dirlist = os.listdir(Fcmn)
r = re.compile(prefx + '[A-Z].*')
fishies = list(filter(r.match, dirlist))
print('I found ' + str(len(fishies)) + ' datasets')
# Go through through folder tree Fish > Condition > Plane.mat
#-----------------------------------------------------------------------------
Fish = []
for f in fishies:
print('Loading dataset ' + f)
condlist = os.listdir(Fcmn + os.sep + f)
r = re.compile('^[A-Z]')
condlist = list(filter(r.match, condlist))
Condition = []
for c in condlist:
files = os.listdir(Fcmn + os.sep + f + os.sep + c)
r = re.compile('^[A-Z]')
files = list(filter(r.match, files))
files.sort()
testp = io.loadmat(Fcmn + os.sep + f + os.sep + c + os.sep + files[0])
pid = 0
xyz = np.empty((0,3))
pixco = np.empty((0,3))
dff = np.empty((0,testp["Temporal"].shape[1]))
for p in files:
tp = io.loadmat(Fcmn + os.sep + f + os.sep + c + os.sep + p)
# Unpack coordinates
#---------------------------------------------------------------
for s in range(tp["Spatial"].shape[1]):
xy = np.reshape(tp["Spatial"][:,s].toarray(), xy_size, order='C')
x,y = np.where(xy)
PIXCO = np.array([int(np.mean(x)), int(np.mean(y)), pid])
pixco = np.vstack((pixco,PIXCO))
XYZ = np.array([np.mean(x) * spacing[0], np.mean(y) * spacing[1], pid * spacing[2]])
xyz = np.vstack((xyz, XYZ))
DFF = tp["Temporal"]
dff = np.vstack((dff, DFF))
pid = pid + 1
Condition.append({"Name":c, "Data":dff, "Coordinates":xyz, "Pixels":pixco})
Fish.append({"Name":f, "Cond":Condition, "Path":Fcmn, "xy_size":xy_size, "spacing":spacing})
return Fish
#=======================================================================
def nneigh(cs, rng = None, dim = [1,1,1], cnt=5, fullmat = False): # xyz (or xy) coordinates of nodes
#=======================================================================
import numpy as np
# Set up nearest neighbour graph
#---------------------------------------------------------------------------
mcs = np.multiply(cs, dim) # metrically scaled coordinates (in microns)
# Initialise full distance matrix and nearest neighbour graph (binary) matrix
#---------------------------------------------------------------------------
nnb = np.zeros((cs.shape[0],cs.shape[0]))
dismat = np.zeros((cs.shape[0], cs.shape[0]))
if rng == None: rng = cs.shape[0] * 2 + 1
# Loop through all cells to fill in distances
#---------------------------------------------------------------------------
for r in range(cs.shape[0]):
dis = | np.ones((1,cs.shape[0])) | numpy.ones |
#!/usr/bin/env python
"""
Homogeneous Transformation Matrices
"""
import math
import numpy as np
# Local modules
import baldor as br
def are_equal(T1, T2, rtol=1e-5, atol=1e-8):
"""
Returns True if two homogeneous transformation are equal within a tolerance.
Parameters
----------
T1: array_like
First input homogeneous transformation
T2: array_like
Second input homogeneous transformation
rtol: float
The relative tolerance parameter.
atol: float
The absolute tolerance parameter.
Returns
-------
equal : bool
True if `T1` and `T2` are `almost` equal, False otherwise
See Also
--------
numpy.allclose: Contains the details about the tolerance parameters
"""
M1 = np.array(T1, dtype=np.float64, copy=True)
M1 /= M1[3,3]
M2 = np.array(T2, dtype=np.float64, copy=True)
M2 /= M2[3,3]
return np.allclose(M1, M2, rtol, atol)
def between_axes(axis_a, axis_b):
"""
Compute the transformation that aligns two vectors/axes.
Parameters
----------
axis_a: array_like
The initial axis
axis_b: array_like
The goal axis
Returns
-------
transform: array_like
The transformation that transforms `axis_a` into `axis_b`
"""
a_unit = br.vector.unit(axis_a)
b_unit = br.vector.unit(axis_b)
c = np.dot(a_unit, b_unit)
angle = np.arccos(c)
if np.isclose(c, -1.0) or np.allclose(a_unit, b_unit):
axis = br.vector.perpendicular(b_unit)
else:
axis = br.vector.unit(np.cross(a_unit, b_unit))
transform = br.axis_angle.to_transform(axis, angle)
return transform
def inverse(transform):
"""
Compute the inverse of an homogeneous transformation.
.. note:: This function is more efficient than :obj:`numpy.linalg.inv` given
the special properties of homogeneous transformations.
Parameters
----------
transform: array_like
The input homogeneous transformation
Returns
-------
inv: array_like
The inverse of the input homogeneous transformation
"""
R = transform[:3,:3].T
p = transform[:3,3]
inv = np.eye(4)
inv[:3,:3] = R
inv[:3,3] = np.dot(-R, p)
return inv
def random(max_position=1.):
"""
Generate a random homogeneous transformation.
Parameters
----------
max_position: float, optional
Maximum value for the position components of the transformation
Returns
-------
T: array_like
The random homogeneous transformation
Examples
--------
>>> import numpy as np
>>> import baldor as br
>>> T = br.transform.random()
>>> Tinv = br.transform.inverse(T)
>>> np.allclose(np.dot(T, Tinv), np.eye(4))
True
"""
quat = br.quaternion.random()
T = br.quaternion.to_transform(quat)
T[:3,3] = np.random.rand(3)*max_position
return T
def to_axis_angle(transform):
"""
Return rotation angle and axis from rotation matrix.
Parameters
----------
transform: array_like
The input homogeneous transformation
Returns
-------
axis: array_like
axis around which rotation occurs
angle: float
angle of rotation
point: array_like
point around which the rotation is performed
Examples
--------
>>> import numpy as np
>>> import baldor as br
>>> axis = np.random.sample(3) - 0.5
>>> angle = (np.random.sample(1) - 0.5) * (2*np.pi)
>>> point = np.random.sample(3) - 0.5
>>> T0 = br.axis_angle.to_transform(axis, angle, point)
>>> axis, angle, point = br.transform.to_axis_angle(T0)
>>> T1 = br.axis_angle.to_transform(axis, angle, point)
>>> br.transform.are_equal(T0, T1)
True
"""
R = np.array(transform, dtype=np.float64, copy=False)
R33 = R[:3,:3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
w, W = np.linalg.eig(R33.T)
i = np.where(abs(np.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
axis = np.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R corresponding to eigenvalue of 1
w, Q = np.linalg.eig(R)
i = np.where(abs(np.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = np.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on axis
cosa = (np.trace(R33) - 1.0) / 2.0
if abs(axis[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*axis[0]*axis[1]) / axis[2]
elif abs(axis[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*axis[0]*axis[2]) / axis[1]
else:
sina = (R[2, 1] + (cosa-1.0)*axis[1]*axis[2]) / axis[0]
angle = math.atan2(sina, cosa)
return axis, angle, point
def to_dual_quaternion(transform):
"""
Return quaternion from the rotation part of an homogeneous transformation.
Parameters
----------
transform: array_like
Rotation matrix. It can be (3x3) or (4x4)
isprecise: bool
If True, the input transform is assumed to be a precise rotation matrix and
a faster algorithm is used.
Returns
-------
qr: array_like
Quaternion in w, x, y z (real, then vector) for the rotation component
qt: array_like
Quaternion in w, x, y z (real, then vector) for the translation component
Notes
-----
Some literature prefers to use :math:`q` for the rotation component and
:math:`q'` for the translation component
"""
cot = lambda x: 1./np.tan(x)
R = np.eye(4)
R[:3,:3] = transform[:3,:3]
l,theta,_ = to_axis_angle(R)
t = transform[:3,3]
# Pitch d
d = np.dot(l.reshape(1,3), t.reshape(3,1))
# Point c
c = 0.5*(t-d*l) + cot(theta/2.)*np.cross(l,t)
# Moment vector
m = np.cross(c, l)
# Rotation quaternion
qr = | np.zeros(4) | numpy.zeros |
# https://scikit-learn.org/stable/auto_examples/inspection/plot_permutation_importance_multicollinear.html#sphx-glr-auto-examples-inspection-plot-permutation-importance-multicollinear-py
# https://orbi.uliege.be/bitstream/2268/155642/1/louppe13.pdf
# https://proceedings.neurips.cc/paper/2019/file/702cafa3bb4c9c86e4a3b6834b45aedd-Paper.pdf
# https://indico.cern.ch/event/443478/contributions/1098668/attachments/1157598/1664920/slides.pdf
import time
import warnings
from collections import defaultdict
from typing import Callable, Tuple
import joblib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.cluster import hierarchy
from scipy.spatial.distance import squareform
from scipy.stats import spearmanr
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier
from sklearn.inspection import permutation_importance
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
def evaluate_cv(model, dataset_x, dataset_y, cv=None):
start_time = time.time()
scores = cross_val_score(model, dataset_x, dataset_y, cv=cv)
elapsed_time = time.time() - start_time
# print(f"mean CV score: {np.mean(scores):.3f} (in {elapsed_time:.3f} seconds)")
return scores
def check_class_imbalance(dataset_y):
_, occurrences = np.unique(dataset_y, return_counts=True)
print(f"{occurrences = }")
# highest fraction of class over samples
imbalance = np.max(occurrences / dataset_y.size)
print(f"{np.max(occurrences / dataset_y.size) = :.4f}")
print(f"{np.min(occurrences / dataset_y.size) = :.4f}")
print(f". . . . . . . . . . . . 1 / #classes = {1/(np.max(dataset_y)+1):.4f}")
return imbalance
def compare_score_imbalance(score: float, imbalance: float):
if score < 1.5 * imbalance:
warnings.warn(
f"{score = :.3f} is below {1.5*imbalance:.3f}, results may not be "
f"indicative (class_{imbalance = :.3f})"
)
else:
print(f"{score = :.3f} ({imbalance = :.3f})")
def check_correlation(dataset_x):
for i in range(dataset_x.shape[1]):
for j in range(i + 1, dataset_x.shape[1]):
coeff = np.corrcoef(dataset_x[:, i], dataset_x[:, j])[0, 1]
if np.abs(coeff) > 0.8:
# dataset_x[:, j] = np.random.rand(dataset_x.shape[0])
print(f"{i=} {j=} {coeff=}")
def new_model(
random_state,
n_estimators: int = 1000,
max_features: int = None,
max_depth: int = None,
) -> ExtraTreesClassifier:
return ExtraTreesClassifier(
n_estimators=n_estimators,
max_features=max_features,
max_depth=max_depth,
random_state=random_state,
)
def get_feature_idx(dataset_x, dataset_y, start=(), random_state=48):
cv = 5
def get_score_partial_features(indices: tuple):
partial_x = dataset_x[:, indices]
# model = new_model(random_state)
# model = new_model(random_state=random_state)
model = ExtraTreesClassifier(random_state=random_state)
return indices[-1], np.mean(evaluate_cv(model, partial_x, dataset_y, cv))
delayed_score = joblib.delayed(get_score_partial_features)
last_score = 0.0
selected = tuple(start)
candidates = list(set(range(dataset_x.shape[1])) - set(selected))
while True:
results = joblib.Parallel(n_jobs=-1)(
delayed_score(selected + (c,)) for c in candidates
)
best_idx, best_score = results[0]
for idx_, score_ in results[1:]:
if score_ > best_score:
best_score = score_
best_idx = idx_
if best_score - last_score < 0.01:
break
selected += (best_idx,)
candidates.remove(best_idx)
print(f"{best_score=:.3f} {selected=}")
last_score = best_score
return selected
def add_input_noise(dataset_x: np.ndarray, rel_scale: float):
scale = rel_scale * np.mean(np.abs(dataset_x), axis=1)
# numpy needs the first axis to be the same as the scale
size = dataset_x.shape[::-1]
noise = np.random.normal(scale=scale, size=size).T
return dataset_x + noise
def do_plot(dataset_x, dataset_y, stratify_classes=True, random_state=48):
model = new_model(random_state)
cv = 10
# check_correlation(dataset_x)
# imbalance = check_class_imbalance(dataset_y)
# split dataset
stratify = dataset_y if stratify_classes else None
X_train, X_test, Y_train, Y_test = train_test_split(
dataset_x,
dataset_y,
test_size=0.33,
random_state=random_state,
stratify=stratify,
)
# cv_scores = evaluate_cv(model, dataset_x, dataset_y, cv)
# print(
# f"{np.min(cv_scores)=}",
# f"{np.mean(cv_scores)=}",
# f"{np.median(cv_scores)=}",
# f"{np.max(cv_scores)=}",
# )
# compare_score_imbalance(np.mean(cv_scores), imbalance)
model.fit(X_train, Y_train)
ts_score = model.score(X_test, Y_test)
print(f"{ts_score=}")
feature_names = list(map(str, range(dataset_x.shape[1])))
# find the most important features (see sklearn doc)
# result = permutation_importance(
# model, X_train, Y_train, n_repeats=10, random_state=42
# )
# perm_sorted_idx = result.importances_mean.argsort()
# tree_importance_sorted_idx = np.argsort(model.feature_importances_)
# tree_indices = np.arange(0, len(model.feature_importances_)) + 0.5
# fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))
# ax1.barh(
# tree_indices, model.feature_importances_[tree_importance_sorted_idx], height=0.7
# )
# ax1.set_yticks(tree_indices)
# ax1.set_yticklabels([feature_names[i] for i in tree_importance_sorted_idx])
# ax1.set_ylim((0, len(model.feature_importances_)))
# ax2.boxplot(
# result.importances[perm_sorted_idx].T,
# vert=False,
# labels=[feature_names[i] for i in perm_sorted_idx],
# )
# fig.tight_layout()
# plt.show()
# find the correlated features
# fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))
fig, ax1 = plt.subplots(1, 1, figsize=(12, 8))
corr = spearmanr(dataset_x).correlation
# Ensure the correlation matrix is symmetric
corr = (corr + corr.T) / 2
np.fill_diagonal(corr, 1)
# We convert the correlation matrix to a distance matrix before performing
# hierarchical clustering using Ward's linkage.
distance_matrix = 1 - np.abs(corr)
dist_linkage = hierarchy.ward(squareform(distance_matrix))
dendro = hierarchy.dendrogram(
dist_linkage, labels=feature_names, ax=ax1, leaf_rotation=90
)
# dendro_idx = np.arange(0, len(dendro["ivl"]))
# ax2.imshow(corr[dendro["leaves"], :][:, dendro["leaves"]])
# ax2.set_xticks(dendro_idx)
# ax2.set_yticks(dendro_idx)
# ax2.set_xticklabels(dendro["ivl"], rotation="vertical")
# ax2.set_yticklabels(dendro["ivl"])
fig.tight_layout()
plt.show()
# for threshold in [3.5, 2.5, 1.5, 1.0, 0.8, 0.6, 0.4, 0.2, 0.1, 0.05]:
for threshold in [0.4]:
cluster_ids = hierarchy.fcluster(dist_linkage, threshold, criterion="distance")
cluster_id_to_feature_ids = defaultdict(list)
for idx, cluster_id in enumerate(cluster_ids):
cluster_id_to_feature_ids[cluster_id].append(idx)
selected_features = [v[0] for v in cluster_id_to_feature_ids.values()]
X_train_sel = X_train[:, selected_features]
X_test_sel = X_test[:, selected_features]
clf_sel = new_model(random_state=random_state)
clf_sel.fit(X_train_sel, Y_train)
score = clf_sel.score(X_test_sel, Y_test)
print(f"{threshold=:.3f} {score=:.3f} {len(selected_features)=}")
print(f"{selected_features=}")
def get_mdi_importance(ds_x, ds_y, model):
model.fit(ds_x, ds_y)
try:
importances = model.feature_importances_
if hasattr(model, "estimators_"):
std = np.std(
[tree.feature_importances_ for tree in model.estimators_], axis=0
)
else:
std = np.full_like(importances, np.nan)
return importances, std
except AttributeError as _:
return None
def get_permutation_importance(ds_x, ds_y, model, random_state):
# permutation importance
X_train, X_test, Y_train, Y_test = train_test_split(
ds_x, ds_y, test_size=0.33, random_state=random_state
)
model.fit(X_train, Y_train)
result = permutation_importance(
model, X_test, Y_test, random_state=random_state, n_repeats=10, n_jobs=-1,
)
return (result.importances_mean, result.importances_std)
def get_feature_importances(ds_x, ds_y, model_fn: Callable, random_state):
return (
get_permutation_importance(ds_x, ds_y, model_fn(random_state), random_state),
get_mdi_importance(ds_x, ds_y, model_fn(random_state)),
)
def study_model(ds_x, ds_y, random_state):
model_builders = [
lambda: ExtraTreesClassifier(
n_estimators=1000, max_features=None, n_jobs=-1, random_state=random_state
),
lambda: RandomForestClassifier(
n_estimators=1000, max_features=None, n_jobs=-1, random_state=random_state
),
lambda: MLPClassifier(hidden_layer_sizes=(128, 128), random_state=random_state),
]
df = pd.DataFrame()
# TODO
def add_features(
dataset_x: np.ndarray,
*,
n_comb_lin_droppout: int = 0,
n_noise: int = 0,
n_lin_comb: int = 0,
n_redundant: int = 0,
) -> np.ndarray:
"""add some correlated or noisy features to a dataset.
Args:
dataset_x (np.ndarray): original dataset
n_comb_lin_droppout (int): first apply a 30% dropout to the dataset and
then apply a linear combination with a small noise (scale=0.1*std)
n_noise (int): number of gaussian noise features to add (scale=1.0)
n_lin_comb (int): linear combination of the features with added
gaussian noise (scale=0.1*std) to add
n_redundant (int): number of redundant features to add with a gaussian
noise (scale=0.1*std)
Returns:
np.ndarray: the dataset, columns are added in order, at the right edge
"""
def _dropout() -> np.ndarray:
"compute one correlated noisy feature column"
weights = np.random.normal(loc=0, scale=1, size=(dataset_x.shape[1], 1))
dropout = | np.copy(dataset_x) | numpy.copy |
#!/usr/bin/env python3
from functools import reduce
import numpy as np
from numpy.linalg import inv
def matrix_power(a, n):
m = a.shape[0]
if n > 0:
return reduce(lambda x,y: x @ y, (a for b in range(n)))
elif n == 0:
return np.eye(m)
else:
return reduce(lambda x,y: x @ y, ( | inv(a) | numpy.linalg.inv |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from scipy.constants import pi
from scipy.special import binom
import warnings
from .utilities import _initialize_figure, _format_axes
class _BasePipe(object):
"""
Template for pipe classes.
Pipe classes inherit from this class.
Attributes
----------
borehole : Borehole object
Borehole class object of the borehole containing the U-Tube.
nPipes : int
Number of U-Tubes, equals to 1.
nInlets : int
Total number of pipe inlets, equals to 1.
nOutlets : int
Total number of pipe outlets, equals to 1.
Notes
-----
The expected array shapes of input parameters and outputs are documented
for each class method. `nInlets` and `nOutlets` are the number of inlets
and outlets to the borehole, and both correspond to the number of
independent parallel pipes. `nSegments` is the number of discretized
segments along the borehole. `nPipes` is the number of pipes (i.e. the
number of U-tubes) in the borehole. `nDepths` is the number of depths at
which temperatures are evaluated.
"""
def __init__(self, borehole):
self.b = borehole
self.nPipes = 1
self.nInlets = 1
self.nOutlets = 1
def get_temperature(self, z, T_f_in, T_b, m_flow_borehole, cp_f):
"""
Returns the fluid temperatures of the borehole at a depth (z).
Parameters
----------
z : float or (nDepths,) array
Depths (in meters) to evaluate the fluid temperatures.
T_f_in : float or (nInlets,) array
Inlet fluid temperatures (in Celsius).
T_b : float or (nSegments,) array
Borehole wall temperatures (in Celsius).
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rates (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
Returns
-------
T_f : (2*nPipes,) or (nDepths, 2*nPipes,) array
Fluid temperature (in Celsius) in each pipe. The returned shape
depends on the type of the parameter `z`.
"""
T_b = np.atleast_1d(T_b)
nSegments = len(T_b)
z_all = np.atleast_1d(z).flatten()
AB = list(zip(*[self.coefficients_temperature(
zi, m_flow_borehole, cp_f, nSegments) for zi in z_all]))
a_in = np.stack(AB[0], axis=-1)
a_b = np.stack(AB[1], axis=-1)
T_f = np.einsum('ijk,j->ki', a_in, np.atleast_1d(T_f_in)) \
+ np.einsum('ijk,j->ki', a_b, T_b)
# Return 1d array if z was supplied as scalar
if np.isscalar(z):
T_f = T_f.flatten()
return T_f
def get_inlet_temperature(self, Q_f, T_b, m_flow_borehole, cp_f):
"""
Returns the outlet fluid temperatures of the borehole.
Parameters
----------
Q_f : float or (nInlets,) array
Heat extraction from the fluid circuits (in Watts).
T_b : float or (nSegments,) array
Borehole wall temperatures (in Celsius).
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rates (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
Returns
-------
T_in : float or (nOutlets,) array
Inlet fluid temperatures (in Celsius) into each inlet pipe. The
returned type corresponds to the type of the parameter `Q_f`.
"""
T_b = np.atleast_1d(T_b)
nSegments = len(T_b)
# Build coefficient matrices
a_qf, a_b = self.coefficients_inlet_temperature(
m_flow_borehole, cp_f, nSegments)
# Evaluate outlet temperatures
T_f_in = a_qf @ np.atleast_1d(Q_f) + a_b @ T_b
# Return float if Qf was supplied as scalar
if np.isscalar(Q_f) and not np.isscalar(T_f_in):
T_f_in = T_f_in.item()
return T_f_in
def get_outlet_temperature(self, T_f_in, T_b, m_flow_borehole, cp_f):
"""
Returns the outlet fluid temperatures of the borehole.
Parameters
----------
T_f_in : float or (nInlets,) array
Inlet fluid temperatures (in Celsius).
T_b : float or (nSegments,) array
Borehole wall temperatures (in Celsius).
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rates (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
Returns
-------
T_f_out : float or (nOutlets,) array
Outlet fluid temperatures (in Celsius) from each outlet pipe. The
returned type corresponds to the type of the parameter `T_f_in`.
"""
T_b = np.atleast_1d(T_b)
nSegments = len(T_b)
# Build coefficient matrices
a_in, a_b = self.coefficients_outlet_temperature(
m_flow_borehole, cp_f, nSegments)
# Evaluate outlet temperatures
T_f_out = a_in @ np.atleast_1d(T_f_in) + a_b @ T_b
# Return float if Tin was supplied as scalar
if np.isscalar(T_f_in) and not np.isscalar(T_f_out):
T_f_out = T_f_out.item()
return T_f_out
def get_borehole_heat_extraction_rate(
self, T_f_in, T_b, m_flow_borehole, cp_f):
"""
Returns the heat extraction rates of the borehole.
Parameters
----------
T_f_in : float or (nInlets,) array
Inlet fluid temperatures (in Celsius).
T_b : float or (nSegments,) array
Borehole wall temperatures (in Celsius).
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rates (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
Returns
-------
Q_b : float or (nSegments,) array
Heat extraction rates along each borehole segment (in Watts). The
returned type corresponds to the type of the parameter `T_b`.
"""
T_b = np.atleast_1d(T_b)
nSegments = len(T_b)
a_in, a_b = self.coefficients_borehole_heat_extraction_rate(
m_flow_borehole, cp_f, nSegments)
Q_b = a_in @ np.atleast_1d(T_f_in) + a_b @ T_b
# Return float if Tb was supplied as scalar
if np.isscalar(T_b) and not np.isscalar(Q_b):
Q_b = Q_b.item()
return Q_b
def get_fluid_heat_extraction_rate(
self, T_f_in, T_b, m_flow_borehole, cp_f):
"""
Returns the heat extraction rates of the borehole.
Parameters
----------
T_f_in : float or (nInlets,) array
Inlet fluid temperatures (in Celsius).
T_b : float or (nSegments,) array
Borehole wall temperatures (in Celsius).
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rates (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
Returns
-------
Q_f : float or (nOutlets,) array
Heat extraction rates from each fluid circuit (in Watts). The
returned type corresponds to the type of the parameter `T_f_in`.
"""
T_b = np.atleast_1d(T_b)
nSegments = len(T_b)
a_in, a_b = self.coefficients_fluid_heat_extraction_rate(
m_flow_borehole, cp_f, nSegments)
Q_f = a_in @ np.atleast_1d(T_f_in) + a_b @ T_b
# Return float if Tb was supplied as scalar
if np.isscalar(T_f_in) and not np.isscalar(Q_f):
Q_f = Q_f.item()
return Q_f
def get_total_heat_extraction_rate(
self, T_f_in, T_b, m_flow_borehole, cp_f):
"""
Returns the total heat extraction rate of the borehole.
Parameters
----------
T_f_in : float or (nInlets,) array
Inlet fluid temperatures (in Celsius).
T_b : float or (nSegments,) array
Borehole wall temperatures (in Celsius).
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rates (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
Returns
-------
Q_t : float
Total net heat extraction rate of the borehole (in Watts).
"""
Q_f = self.get_fluid_heat_extraction_rate(
T_f_in, T_b, m_flow_borehole, cp_f)
Q_t = np.sum(Q_f)
return Q_t
def coefficients_inlet_temperature(self, m_flow_borehole, cp_f, nSegments):
"""
Build coefficient matrices to evaluate outlet fluid temperature.
Returns coefficients for the relation:
.. math::
\\mathbf{T_{f,in}} = \\mathbf{a_{q,f}} \\mathbf{Q_{f}}
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rates (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
Returns
-------
a_qf : (nOutlets, nInlets,) array
Array of coefficients for inlet fluid temperature.
a_b : (nOutlets, nSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# method_id for coefficients_inlet_temperature is 3
method_id = 3
# Check if stored coefficients are available
if self._check_coefficients(
m_flow_borehole, cp_f, nSegments, method_id):
a_qf, a_b = self._get_stored_coefficients(method_id)
else:
# Coefficient matrices for fluid heat extraction rates:
# [Q_{f}] = [b_in]*[T_{f,in}] + [b_b]*[T_{b}]
b_in, b_b = self.coefficients_fluid_heat_extraction_rate(
m_flow_borehole, cp_f, nSegments)
b_in_m1 = np.linalg.inv(b_in)
# Matrices for fluid heat extraction rates:
# [T_{f,in}] = [a_qf]*[Q_{f}] + [a_b]*[T_{b}]
a_qf = b_in_m1
a_b = -b_in_m1 @ b_b
# Store coefficients
self._set_stored_coefficients(
m_flow_borehole, cp_f, nSegments, (a_qf, a_b), method_id)
return a_qf, a_b
def coefficients_outlet_temperature(
self, m_flow_borehole, cp_f, nSegments):
"""
Build coefficient matrices to evaluate outlet fluid temperature.
Returns coefficients for the relation:
.. math::
\\mathbf{T_{f,out}} = \\mathbf{a_{in}} \\mathbf{T_{f,in}}
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rates (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
Returns
-------
a_in : (nOutlets, nInlets,) array
Array of coefficients for inlet fluid temperature.
a_b : (nOutlets, nSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# method_id for coefficients_outlet_temperature is 4
method_id = 4
# Check if stored coefficients are available
if self._check_coefficients(
m_flow_borehole, cp_f, nSegments, method_id):
a_in, a_b = self._get_stored_coefficients(method_id)
else:
# Check if _continuity_condition_base need to be called
# method_id for _continuity_condition_base is 0
if self._check_coefficients(m_flow_borehole, cp_f, nSegments, 0):
b_in, b_out, b_b = self._get_stored_coefficients(0)
else:
# Coefficient matrices from continuity condition:
# [b_out]*[T_{f,out}] = [b_in]*[T_{f,in}] + [b_b]*[T_b]
b_in, b_out, b_b = self._continuity_condition_base(
m_flow_borehole, cp_f, nSegments)
# Store coefficients
self._set_stored_coefficients(
m_flow_borehole, cp_f, nSegments, (b_in, b_out, b_b), 0)
# Final coefficient matrices for outlet temperatures:
# [T_{f,out}] = [a_in]*[T_{f,in}] + [a_b]*[T_b]
b_out_m1 = np.linalg.inv(b_out)
a_in = b_out_m1 @ b_in
a_b = b_out_m1 @ b_b
# Store coefficients
self._set_stored_coefficients(
m_flow_borehole, cp_f, nSegments, (a_in, a_b), method_id)
return a_in, a_b
def coefficients_temperature(self, z, m_flow_borehole, cp_f, nSegments):
"""
Build coefficient matrices to evaluate fluid temperatures at a depth
(z).
Returns coefficients for the relation:
.. math::
\\mathbf{T_f}(z) = \\mathbf{a_{in}} \\mathbf{T_{f,in}}
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
z : float
Depth (in meters) to evaluate the fluid temperature coefficients.
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rate (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
Returns
-------
a_in : (2*nPipes, nInlets,) array
Array of coefficients for inlet fluid temperature.
a_b : (2*nPipes, nSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# method_id for coefficients_temperature is 5
method_id = 5
# Coefficient matrices for outlet temperatures:
# [T_{f,out}] = [b_in]*[T_{f,in}] + [b_b]*[T_b]
b_in, b_b = self.coefficients_outlet_temperature(
m_flow_borehole, cp_f, nSegments)
# Check if _continuity_condition_head need to be called
# method_id for _continuity_condition_head is 1
if self._check_coefficients(m_flow_borehole, cp_f, nSegments, 1):
c_in, c_out, c_b = self._get_stored_coefficients(1)
else:
# Coefficient matrices for temperatures at depth (z = 0):
# [T_f](0) = [c_in]*[T_{f,in}] + [c_out]*[T_{f,out}]
# + [c_b]*[T_b]
c_in, c_out, c_b = self._continuity_condition_head(
m_flow_borehole, cp_f, nSegments)
# Store coefficients
self._set_stored_coefficients(
m_flow_borehole, cp_f, nSegments, (c_in, c_out, c_b), 1)
# Coefficient matrices from general solution:
# [T_f](z) = [d_f0]*[T_f](0) + [d_b]*[T_b]
d_f0, d_b = self._general_solution(z, m_flow_borehole, cp_f, nSegments)
# Final coefficient matrices for temperatures at depth (z):
# [T_f](z) = [a_in]*[T_{f,in}] + [a_b]*[T_b]
a_in = d_f0 @ (c_in + c_out @ b_in)
a_b = d_f0 @ (c_b + c_out @ b_b) + d_b
return a_in, a_b
def coefficients_borehole_heat_extraction_rate(
self, m_flow_borehole, cp_f, nSegments):
"""
Build coefficient matrices to evaluate heat extraction rates.
Returns coefficients for the relation:
.. math::
\\mathbf{Q_b} = \\mathbf{a_{in}} \\mathbf{T_{f,in}}
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rate (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
Returns
-------
a_in : (nSegments, nInlets,) array
Array of coefficients for inlet fluid temperature.
a_b : (nSegments, nSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# method_id for coefficients_borehole_heat_extraction_rate is 6
method_id = 6
nPipes = self.nPipes
# Check if stored coefficients are available
if self._check_coefficients(m_flow_borehole, cp_f, nSegments, method_id):
a_in, a_b = self._get_stored_coefficients(method_id)
else:
# Update input variables
self._format_inputs(m_flow_borehole, cp_f, nSegments)
m_flow_pipe = self._m_flow_pipe
cp_pipe = self._cp_pipe
mcp = np.hstack((-m_flow_pipe[0:nPipes],
m_flow_pipe[-nPipes:]))*cp_pipe
# Initialize coefficient matrices
a_in = np.zeros((nSegments, self.nInlets))
a_b = np.zeros((nSegments, nSegments))
# Heat extraction rates are calculated from an energy balance on a
# borehole segment.
z1 = 0.
aTf1, bTf1 = self.coefficients_temperature(
z1, m_flow_borehole, cp_f, nSegments)
for i in range(nSegments):
z2 = (i + 1) * self.b.H / nSegments
aTf2, bTf2 = self.coefficients_temperature(
z2, m_flow_borehole, cp_f, nSegments)
a_in[i, :] = mcp @ (aTf1 - aTf2)
a_b[i, :] = mcp @ (bTf1 - bTf2)
aTf1, bTf1 = aTf2, bTf2
# Store coefficients
self._set_stored_coefficients(
m_flow_borehole, cp_f, nSegments, (a_in, a_b), method_id)
return a_in, a_b
def coefficients_fluid_heat_extraction_rate(
self, m_flow_borehole, cp_f, nSegments):
"""
Build coefficient matrices to evaluate heat extraction rates.
Returns coefficients for the relation:
.. math::
\\mathbf{Q_f} = \\mathbf{a_{in}} \\mathbf{T_{f,in}}
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rate (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
Returns
-------
a_in : (nOutlets, nInlets,) array
Array of coefficients for inlet fluid temperature.
a_b : (nOutlets, nSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# method_id for coefficients_fluid_heat_extraction_rate is 7
method_id = 7
# Check if stored coefficients are available
if self._check_coefficients(m_flow_borehole, cp_f, nSegments, method_id):
a_in, a_b = self._get_stored_coefficients(method_id)
else:
# Update input variables
self._format_inputs(m_flow_borehole, cp_f, nSegments)
# Coefficient matrices for outlet temperatures:
# [T_{f,out}] = [b_in]*[T_{f,in}] + [b_b]*[T_b]
b_in, b_b = self.coefficients_outlet_temperature(
m_flow_borehole, cp_f, nSegments)
# Intermediate matrices for fluid heat extraction rates:
# [Q_{f}] = [c_in]*[T_{f,in}] + [c_out]*[T_{f,out}]
MCP = self._m_flow_in * self._cp_in
c_in = -np.diag(MCP)
c_out = np.diag(MCP)
# Matrices for fluid heat extraction rates:
# [Q_{f}] = [a_in]*[T_{f,in}] + [a_b]*[T_{b}]
a_in = c_in + c_out @ b_in
a_b = c_out @ b_b
# Store coefficients
self._set_stored_coefficients(
m_flow_borehole, cp_f, nSegments, (a_in, a_b), method_id)
return a_in, a_b
def visualize_pipes(self):
"""
Plot the cross-section view of the borehole.
Returns
-------
fig : figure
Figure object (matplotlib).
"""
# Configure figure and axes
fig = _initialize_figure()
ax = fig.add_subplot(111)
ax.set_xlabel(r'$x$ [m]')
ax.set_ylabel(r'$y$ [m]')
ax.axis('equal')
_format_axes(ax)
# Color cycle
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
lw = plt.rcParams['lines.linewidth']
# Borehole wall outline
ax.plot([-self.b.r_b, 0., self.b.r_b, 0.],
[0., self.b.r_b, 0., -self.b.r_b],
'k.', alpha=0.)
borewall = plt.Circle(
(0., 0.), radius=self.b.r_b, fill=False,
color='k', linestyle='--', lw=lw)
ax.add_patch(borewall)
# Pipes
for i in range(self.nPipes):
# Coordinates of pipes
(x_in, y_in) = self.pos[i]
(x_out, y_out) = self.pos[i + self.nPipes]
# Pipe outline (inlet)
pipe_in_in = plt.Circle(
(x_in, y_in), radius=self.r_in,
fill=False, linestyle='-', color=colors[i], lw=lw)
pipe_in_out = plt.Circle(
(x_in, y_in), radius=self.r_out,
fill=False, linestyle='-', color=colors[i], lw=lw)
ax.text(x_in, y_in, i, ha="center", va="center")
# Pipe outline (outlet)
pipe_out_in = plt.Circle(
(x_out, y_out), radius=self.r_in,
fill=False, linestyle='-', color=colors[i], lw=lw)
pipe_out_out = plt.Circle(
(x_out, y_out), radius=self.r_out,
fill=False, linestyle='-', color=colors[i], lw=lw)
ax.text(x_out, y_out, i + self.nPipes,
ha="center", va="center")
ax.add_patch(pipe_in_in)
ax.add_patch(pipe_in_out)
ax.add_patch(pipe_out_in)
ax.add_patch(pipe_out_out)
plt.tight_layout()
return fig
def _initialize_stored_coefficients(self):
nMethods = 8 # Number of class methods
self._stored_coefficients = [() for i in range(nMethods)]
self._stored_m_flow_cp = [np.empty(self.nInlets)
for i in range(nMethods)]
self._stored_nSegments = [np.nan for i in range(nMethods)]
self._m_flow_cp_model_variables = np.empty(self.nInlets)
self._nSegments_model_variables = np.nan
def _set_stored_coefficients(
self, m_flow_borehole, cp_f, nSegments, coefficients, method_id):
self._stored_coefficients[method_id] = coefficients
self._stored_m_flow_cp[method_id] = m_flow_borehole*cp_f
self._stored_nSegments[method_id] = nSegments
def _get_stored_coefficients(self, method_id):
coefficients = self._stored_coefficients[method_id]
return coefficients
def _check_model_variables(self, m_flow_borehole, cp_f, nSegments, tol=1e-6):
stored_m_flow_cp = self._m_flow_cp_model_variables
stored_nSegments = self._nSegments_model_variables
if (np.all(np.abs(m_flow_borehole*cp_f - stored_m_flow_cp) < np.abs(stored_m_flow_cp)*tol)
and nSegments == stored_nSegments):
check = True
else:
self._update_model_variables(m_flow_borehole, cp_f, nSegments)
self._m_flow_cp_model_variables = m_flow_borehole*cp_f
self._nSegments_model_variables = nSegments
check = False
return check
def _check_coefficients(
self, m_flow_borehole, cp_f, nSegments, method_id, tol=1e-6):
stored_m_flow_cp = self._stored_m_flow_cp[method_id]
stored_nSegments = self._stored_nSegments[method_id]
if (np.all(np.abs(m_flow_borehole*cp_f - stored_m_flow_cp) < np.abs(stored_m_flow_cp)*tol)
and nSegments == stored_nSegments):
check = True
else:
check = False
return check
def _check_geometry(self):
""" Verifies the inputs to the pipe object and raises an error if
the geometry is not valid.
"""
# Verify that thermal properties are greater than 0.
if not self.k_s > 0.:
raise ValueError(
'The ground thermal conductivity must be greater than zero. '
'A value of {} was provided.'.format(self.k_s))
if not self.k_g > 0.:
raise ValueError(
'The grout thermal conductivity must be greater than zero. '
'A value of {} was provided.'.format(self.k_g))
if not self.R_fp > 0.:
raise ValueError(
'The fluid to outer pipe wall thermal resistance must be'
'greater than zero. '
'A value of {} was provided.'.format(self.R_fp))
# Verify that the pipe radius is greater than zero.
if not self.r_in > 0.:
raise ValueError(
'The pipe inner radius must be greater than zero. '
'A value of {} was provided.'.format(self.r_in))
# Verify that the outer pipe radius is greater than the inner pipe
# radius.
if not self.r_out > self.r_in:
raise ValueError(
'The pipe outer radius must be greater than the pipe inner'
' radius. '
'A value of {} was provided.'.format(self.r_out))
# Verify that the number of multipoles is zero or greater.
if not self.J >= 0:
raise ValueError(
'The number of terms in the multipole expansion must be zero'
' or greater. '
'A value of {} was provided.'.format(self.J))
# Verify that the pipes are contained within the borehole.
for i in range(2*self.nPipes):
r_pipe = np.sqrt(self.pos[i][0]**2 + self.pos[i][1]**2)
if not r_pipe + self.r_out <= self.b.r_b:
raise ValueError(
'Pipes must be entirely contained within the borehole. '
'Pipe {} is partly or entirely outside the '
'borehole.'.format(i))
# Verify that the pipes do not collide to one another.
for i in range(2*self.nPipes):
for j in range(i+1, 2*self.nPipes):
dx = self.pos[i][0] - self.pos[j][0]
dy = self.pos[i][1] - self.pos[j][1]
dis = np.sqrt(dx**2 + dy**2)
if not dis >= 2*self.r_out:
raise ValueError(
'Pipes {} and {} are overlapping.'.format(i, j))
return True
def _continuity_condition_base(self, m_flow_borehole, cp_f, nSegments):
""" Returns coefficients for the relation
[a_out]*[T_{f,out}] = [a_in]*[T_{f,in}] + [a_b]*[T_b]
"""
raise NotImplementedError(
'_continuity_condition_base class method not implemented, '
'this method should return matrices for the relation: '
'[a_out]*[T_{f,out}] = [a_in]*[T_{f,in}] + [a_b]*[T_b]')
def _continuity_condition_head(self, m_flow_borehole, cp_f, nSegments):
""" Returns coefficients for the relation
[T_f](z=0) = [a_in]*[T_{f,in}] + [a_out]*[T_{f,out}] + [a_b]*[T_b]
"""
raise NotImplementedError(
'_continuity_condition_head class method not implemented, '
'this method should return matrices for the relation: '
'[T_f](z=0) = [a_in]*[T_{f,in}] + [a_out]*[T_{f,out}] '
'+ [a_b]*[T_b]')
def _general_solution(self, z, m_flow_borehole, cp_f, nSegments):
""" Returns coefficients for the relation
[T_f](z) = [a_f0]*[T_f](0) + [a_b]*[T_b]
"""
raise NotImplementedError(
'_general_solution class method not implemented, '
'this method should return matrices for the relation: '
'[T_f](z) = [a_f0]*[T_f](0) + [a_b]*[T_b]')
def _update_model_variables(self, m_flow_borehole, cp_f, nSegments):
"""
Evaluate common coefficients needed in other class methods.
"""
raise NotImplementedError(
'_update_coefficients class method not implemented, '
'this method should evaluate common coefficients needed in other '
'class methods.')
def _format_inputs(self, m_flow_borehole, cp_f, nSegments):
"""
Format arrays of mass flow rates and heat capacity.
"""
raise NotImplementedError(
'_format_inputs class method not implemented, '
'this method should format 1d arrays for the inlet mass flow '
'rates (_m_flow_in), mass flow rates in each pipe (_m_flow_pipe), '
'heat capacity at each inlet (_cp_in) and heat capacity in each '
'pipe (_cp_pipe).')
class SingleUTube(_BasePipe):
"""
Class for single U-Tube boreholes.
Contains information regarding the physical dimensions and thermal
characteristics of the pipes and the grout material, as well as methods to
evaluate fluid temperatures and heat extraction rates based on the work of
Hellstrom [#Single-Hellstrom1991]_.
Attributes
----------
pos : list of tuples
Position (x, y) (in meters) of the pipes inside the borehole.
r_in : float
Inner radius (in meters) of the U-Tube pipes.
r_out : float
Outer radius (in meters) of the U-Tube pipes.
borehole : Borehole object
Borehole class object of the borehole containing the U-Tube.
k_s : float
Soil thermal conductivity (in W/m-K).
k_g : float
Grout thermal conductivity (in W/m-K).
R_fp : float
Fluid to outer pipe wall thermal resistance (m-K/W).
J : int, optional
Number of multipoles per pipe to evaluate the thermal resistances.
Default is 2.
nPipes : int
Number of U-Tubes, equals to 1.
nInlets : int
Total number of pipe inlets, equals to 1.
nOutlets : int
Total number of pipe outlets, equals to 1.
Notes
-----
The expected array shapes of input parameters and outputs are documented
for each class method. `nInlets` and `nOutlets` are the number of inlets
and outlets to the borehole, and both are equal to 1 for a single U-tube
borehole. `nSegments` is the number of discretized segments along the
borehole. `nPipes` is the number of pipes (i.e. the number of U-tubes) in
the borehole, equal to 1. `nDepths` is the number of depths at which
temperatures are evaluated.
References
----------
.. [#Single-Hellstrom1991] <NAME>. (1991). Ground heat storage.
Thermal Analyses of Duct Storage Systems I: Theory. PhD Thesis.
University of Lund, Department of Mathematical Physics. Lund, Sweden.
"""
def __init__(self, pos, r_in, r_out, borehole, k_s, k_g, R_fp, J=2):
self.pos = pos
self.r_in = r_in
self.r_out = r_out
self.b = borehole
self.k_s = k_s
self.k_g = k_g
self.R_fp = R_fp
self.J = J
self.nPipes = 1
self.nInlets = 1
self.nOutlets = 1
self._check_geometry()
# Delta-circuit thermal resistances
self._Rd = thermal_resistances(pos, r_out, borehole.r_b,
k_s, k_g, self.R_fp, J=self.J)[1]
# Initialize stored_coefficients
self._initialize_stored_coefficients()
def _continuity_condition_base(self, m_flow_borehole, cp_f, nSegments):
"""
Equation that satisfies equal fluid temperatures in both legs of
each U-tube pipe at depth (z = H).
Returns coefficients for the relation:
.. math::
\\mathbf{a_{out}} T_{f,out} =
\\mathbf{a_{in}} \\mathbf{T_{f,in}}
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rate (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
Returns
-------
a_in : (nOutlets, nInlets,) array
Array of coefficients for inlet fluid temperature.
a_out : (nOutlets, nOutlets,) array
Array of coefficients for outlet fluid temperature.
a_b : (nOutlets, nSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# Check if model variables need to be updated
self._check_model_variables(m_flow_borehole, cp_f, nSegments)
# Evaluate coefficient matrices from Hellstrom (1991):
a_in = ((self._f1(self.b.H) + self._f2(self.b.H))
/ (self._f3(self.b.H) - self._f2(self.b.H)))
a_in = np.array([[a_in]])
a_out = np.array([[1.0]])
a_b = np.zeros((self.nOutlets, nSegments))
z = (nSegments - np.arange(nSegments + 1)) * self.b.H / nSegments
F4 = self._F4(z)
dF4 = F4[:-1] - F4[1:]
F5 = self._F5(z)
dF5 = F5[:-1] - F5[1:]
a_b[0, :] = (dF4 + dF5) / (self._f3(self.b.H) - self._f2(self.b.H))
return a_in, a_out, a_b
def _continuity_condition_head(self, m_flow_borehole, cp_f, nSegments):
"""
Build coefficient matrices to evaluate fluid temperatures at depth
(z = 0). These coefficients take into account connections between
U-tube pipes.
Returns coefficients for the relation:
.. math::
\\mathbf{T_f}(z=0) = \\mathbf{a_{in}} \\mathbf{T_{f,in}}
+ \\mathbf{a_{out}} \\mathbf{T_{f,out}}
+ \\mathbf{a_{b}} \\mathbf{T_{b}}
Parameters
----------
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rate (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
Returns
-------
a_in : (2*nPipes, nInlets,) array
Array of coefficients for inlet fluid temperature.
a_out : (2*nPipes, nOutlets,) array
Array of coefficients for outlet fluid temperature.
a_b : (2*nPipes, nSegments,) array
Array of coefficients for borehole wall temperature.
"""
# Check if model variables need to be updated
self._check_model_variables(m_flow_borehole, cp_f, nSegments)
# There is only one pipe
a_in = np.array([[1.0], [0.0]])
a_out = np.array([[0.0], [1.0]])
a_b = np.zeros((2, nSegments))
return a_in, a_out, a_b
def _general_solution(self, z, m_flow_borehole, cp_f, nSegments):
"""
General solution for fluid temperatures at a depth (z).
Returns coefficients for the relation:
.. math::
\\mathbf{T_f}(z) = \\mathbf{a_{f0}} \\mathbf{T_{f}}(z=0)
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
z : float
Depth (in meters) to evaluate the fluid temperature coefficients.
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rate (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
Returns
-------
a_f0 : (2*nPipes, 2*nPipes,) array
Array of coefficients for inlet fluid temperature.
a_b : (2*nPipes, nSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# Check if model variables need to be updated
self._check_model_variables(m_flow_borehole, cp_f, nSegments)
a_f0 = np.array([[self._f1(z), self._f2(z)],
[-self._f2(z), self._f3(z)]])
a_b = np.zeros((2*self.nPipes, nSegments))
N = int(np.ceil(z/self.b.H*nSegments))
z1 = z - np.minimum(np.arange(1, N+1)*self.b.H/nSegments, z)
z2 = z - np.arange(N) * self.b.H / nSegments
dF4 = self._F4(z2) - self._F4(z1)
dF5 = self._F5(z2) - self._F5(z1)
a_b[0, :N] = dF4
a_b[1, :N] = -dF5
return a_f0, a_b
def _update_model_variables(self, m_flow_borehole, cp_f, nSegments):
"""
Evaluate dimensionless resistances for Hellstrom (1991) solution.
Parameters
----------
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rate (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
"""
# Format mass flow rate and heat capacity inputs
self._format_inputs(m_flow_borehole, cp_f, nSegments)
m_flow_in = self._m_flow_in
cp_in = self._cp_in
# Dimensionless delta-circuit conductances
self._beta1 = 1./(self._Rd[0][0]*m_flow_in[0]*cp_in[0])
self._beta2 = 1./(self._Rd[1][1]*m_flow_in[0]*cp_in[0])
self._beta12 = 1./(self._Rd[0][1]*m_flow_in[0]*cp_in[0])
self._beta = 0.5*(self._beta2 - self._beta1)
# Eigenvalues
self._gamma = np.sqrt(0.25*(self._beta1+self._beta2)**2
+ self._beta12*(self._beta1+self._beta2))
self._delta = 1./self._gamma \
* (self._beta12 + 0.5*(self._beta1+self._beta2))
def _format_inputs(self, m_flow_borehole, cp_f, nSegments):
"""
Format mass flow rate and heat capacity inputs.
Parameters
----------
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rate (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
"""
# Format mass flow rate inputs
if np.isscalar(m_flow_borehole):
# Mass flow rate in each fluid circuit
m_flow_in = m_flow_borehole*np.ones(self.nInlets)
else:
# Mass flow rate in each fluid circuit
m_flow_in = m_flow_borehole
self._m_flow_in = m_flow_in
# Mass flow rate in pipes
m_flow_pipe = np.tile(m_flow_in, 2*self.nPipes)
self._m_flow_pipe = m_flow_pipe
# Format heat capacity inputs
if np.isscalar(cp_f):
# Heat capacity in each fluid circuit
cp_in = cp_f*np.ones(self.nInlets)
else:
# Heat capacity in each fluid circuit
cp_in = cp_f
self._cp_in = cp_in
# Heat capacity in pipes
cp_pipe = np.tile(cp_in, 2*self.nPipes)
self._cp_pipe = cp_pipe
def _f1(self, z):
"""
Calculate function f1 from Hellstrom (1991)
Parameters
----------
z : float
Depth (in meters) to evaluate the fluid temperature coefficients.
"""
f1 = np.exp(self._beta*z)*(np.cosh(self._gamma*z)
- self._delta*np.sinh(self._gamma*z))
return f1
def _f2(self, z):
"""
Calculate function f2 from Hellstrom (1991)
Parameters
----------
z : float
Depth (in meters) to evaluate the fluid temperature coefficients.
"""
f2 = np.exp(self._beta*z)*self._beta12/self._gamma \
* np.sinh(self._gamma*z)
return f2
def _f3(self, z):
"""
Calculate function f3 from Hellstrom (1991)
Parameters
----------
z : float
Depth (in meters) to evaluate the fluid temperature coefficients.
"""
f3 = np.exp(self._beta*z)*(np.cosh(self._gamma*z)
+ self._delta*np.sinh(self._gamma*z))
return f3
def _f4(self, z):
"""
Calculate function f4 from Hellstrom (1991)
Parameters
----------
z : float
Depth (in meters) to evaluate the fluid temperature coefficients.
"""
A = self._delta*self._beta1 + self._beta2*self._beta12/self._gamma
f4 = np.exp(self._beta*z) \
* (self._beta1*np.cosh(self._gamma*z) - A*np.sinh(self._gamma*z))
return f4
def _f5(self, z):
"""
Calculate function f5 from Hellstrom (1991)
Parameters
----------
z : float
Depth (in meters) to evaluate the fluid temperature coefficients.
"""
B = self._delta*self._beta2 + self._beta1*self._beta12/self._gamma
f5 = np.exp(self._beta*z) \
* (self._beta2*np.cosh(self._gamma*z) + B*np.sinh(self._gamma*z))
return f5
def _F4(self, z):
"""
Calculate integral of function f4 from Hellstrom (1991)
Parameters
----------
z : float
Depth (in meters) to evaluate the fluid temperature coefficients.
"""
A = self._delta*self._beta1 + self._beta2*self._beta12/self._gamma
C = self._beta1*self._beta + A*self._gamma
S = - (self._beta1*self._gamma + self._beta*A)
denom = (self._beta**2 - self._gamma**2)
F4 = np.exp(self._beta*z) / denom \
* (C*np.cosh(self._gamma*z) + S*np.sinh(self._gamma*z))
return F4
def _F5(self, z):
"""
Calculate integral of function f5 from Hellstrom (1991)
Parameters
----------
z : float
Depth (in meters) to evaluate the fluid temperature coefficients.
"""
B = self._delta*self._beta2 + self._beta1*self._beta12/self._gamma
C = self._beta2*self._beta - B*self._gamma
S = - (self._beta2*self._gamma - self._beta*B)
denom = (self._beta**2 - self._gamma**2)
F5 = np.exp(self._beta*z) / denom \
* (C*np.cosh(self._gamma*z) + S*np.sinh(self._gamma*z))
return F5
class MultipleUTube(_BasePipe):
"""
Class for multiple U-Tube boreholes.
Contains information regarding the physical dimensions and thermal
characteristics of the pipes and the grout material, as well as methods to
evaluate fluid temperatures and heat extraction rates based on the work of
Cimmino [#Cimmino2016]_ for boreholes with any number of U-tubes.
Attributes
----------
pos : list of tuples
Position (x, y) (in meters) of the pipes inside the borehole.
r_in : float
Inner radius (in meters) of the U-Tube pipes.
r_out : float
Outer radius (in meters) of the U-Tube pipes.
borehole : Borehole object
Borehole class object of the borehole containing the U-Tube.
k_s : float
Soil thermal conductivity (in W/m-K).
k_g : float
Grout thermal conductivity (in W/m-K).
R_fp : float
Fluid to outer pipe wall thermal resistance (m-K/W).
J : int, optional
Number of multipoles per pipe to evaluate the thermal resistances.
Default is 2.
nPipes : int
Number of U-Tubes.
config : str, defaults to 'parallel'
Configuration of the U-Tube pipes:
'parallel' : U-tubes are connected in parallel.
'series' : U-tubes are connected in series.
nInlets : int
Total number of pipe inlets, equals to 1.
nOutlets : int
Total number of pipe outlets, equals to 1.
Notes
-----
The expected array shapes of input parameters and outputs are documented
for each class method. `nInlets` and `nOutlets` are the number of inlets
and outlets to the borehole, and both are equal to 1 for a multiple U-tube
borehole. `nSegments` is the number of discretized segments along the
borehole. `nPipes` is the number of pipes (i.e. the number of U-tubes) in
the borehole. `nDepths` is the number of depths at which temperatures are
evaluated.
References
----------
.. [#Cimmino2016] <NAME>. (2016). Fluid and borehole wall temperature
profiles in vertical geothermal boreholes with multiple U-tubes.
Renewable Energy, 96, 137-147.
"""
def __init__(self, pos, r_in, r_out, borehole, k_s,
k_g, R_fp, nPipes, config='parallel', J=2):
self.pos = pos
self.r_in = r_in
self.r_out = r_out
self.b = borehole
self.k_s = k_s
self.k_g = k_g
self.R_fp = R_fp
self.J = J
self.nPipes = nPipes
self.nInlets = 1
self.nOutlets = 1
self.config = config.lower()
self._check_geometry()
# Delta-circuit thermal resistances
self._Rd = thermal_resistances(pos, r_out, borehole.r_b,
k_s, k_g, self.R_fp, J=self.J)[1]
# Initialize stored_coefficients
self._initialize_stored_coefficients()
def _continuity_condition_base(self, m_flow_borehole, cp_f, nSegments):
"""
Equation that satisfies equal fluid temperatures in both legs of
each U-tube pipe at depth (z = H).
Returns coefficients for the relation:
.. math::
\\mathbf{a_{out}} T_{f,out} = \\mathbf{a_{in}} T_{f,in}
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rate (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
Returns
-------
a_in : (nOutlets, nInlets,) array
Array of coefficients for inlet fluid temperature.
a_out : (nOutlets, nOutlets,) array
Array of coefficients for outlet fluid temperature.
a_b : (nOutlets, nSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# Check if model variables need to be updated
self._check_model_variables(m_flow_borehole, cp_f, nSegments)
# Coefficient matrices from continuity condition:
# [b_u]*[T_{f,u}](z=0) = [b_d]*[T_{f,d}](z=0) + [b_b]*[T_b]
b_d, b_u, b_b = self._continuity_condition(
m_flow_borehole, cp_f, nSegments)
b_u_m1 = np.linalg.inv(b_u)
if self.config == 'parallel':
# Intermediate coefficient matrices:
# [T_{f,d}](z=0) = [c_in]*[T_{f,in}]
c_in = np.ones((self.nPipes, 1))
# Intermediate coefficient matrices:
# [T_{f,out}] = d_u*[T_{f,u}](z=0)
mcp = self._m_flow_pipe[-self.nPipes:]*self._cp_pipe[-self.nPipes:]
d_u = np.reshape(mcp/np.sum(mcp), (1, -1))
# Final coefficient matrices for continuity at depth (z = H):
# [a_out][T_{f,out}] = [a_in]*[T_{f,in}] + [a_b]*[T_b]
a_in = d_u @ b_u_m1 @ b_d @ c_in
a_out = np.array([[1.0]])
a_b = d_u @ b_u_m1 @ b_b
elif self.config == 'series':
# Intermediate coefficient matrices:
# [T_{f,d}](z=0) = [c_in]*[T_{f,in}] + [c_u]*[T_{f,u}](z=0)
c_in = np.eye(self.nPipes, M=1)
c_u = np.eye(self.nPipes, k=-1)
# Intermediate coefficient matrices:
# [d_u]*[T_{f,u}](z=0) = [d_in]*[T_{f,in}] + [d_b]*[T_b]
d_u = b_u - b_d @ c_u
d_in = b_d @ c_in
d_b = b_b
d_u_m1 = np.linalg.inv(d_u)
# Intermediate coefficient matrices:
# [T_{f,out}] = e_u*[T_{f,u}](z=0)
e_u = np.eye(self.nPipes, M=1, k=-self.nPipes+1).T
# Final coefficient matrices for continuity at depth (z = H):
# [a_out][T_{f,out}] = [a_in]*[T_{f,in}] + [a_b]*[T_b]
a_in = e_u @ d_u_m1 @ d_in
a_out = np.array([[1.0]])
a_b = e_u @ d_u_m1 @ d_b
else:
raise NotImplementedError("Configuration '{}' not implemented.".format(self.config))
return a_in, a_out, a_b
def _continuity_condition_head(self, m_flow_borehole, cp_f, nSegments):
"""
Build coefficient matrices to evaluate fluid temperatures at depth
(z = 0). These coefficients take into account connections between
U-tube pipes.
Returns coefficients for the relation:
.. math::
\\mathbf{T_f}(z=0) = \\mathbf{a_{in}} \\mathbf{T_{f,in}}
+ \\mathbf{a_{out}} \\mathbf{T_{f,out}}
+ \\mathbf{a_{b}} \\mathbf{T_{b}}
Parameters
----------
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rate (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
Returns
-------
a_in : (2*nPipes, nInlets,) array
Array of coefficients for inlet fluid temperature.
a_out : (2*nPipes, nOutlets,) array
Array of coefficients for outlet fluid temperature.
a_b : (2*nPipes, nSegments,) array
Array of coefficients for borehole wall temperature.
"""
# Check if model variables need to be updated
self._check_model_variables(m_flow_borehole, cp_f, nSegments)
if self.config == 'parallel':
a_in = np.vstack((np.ones((self.nPipes, self.nInlets)),
np.zeros((self.nPipes, self.nInlets))))
a_out = np.vstack((np.zeros((self.nPipes, self.nOutlets)),
np.ones((self.nPipes, self.nOutlets))))
a_b = np.zeros((2*self.nPipes, nSegments))
elif self.config == 'series':
# Coefficient matrices from continuity condition:
# [b_u]*[T_{f,u}](z=0) = [b_d]*[T_{f,d}](z=0) + [b_b]*[T_b]
b_d, b_u, b_b = self._continuity_condition(
m_flow_borehole, cp_f, nSegments)
# Intermediate coefficient matrices:
# [T_{f,d}](z=0) = [c_in]*[T_{f,in}] + [c_u]*[T_{f,u}](z=0)
c_in = np.eye(self.nPipes, M=1)
c_u = np.eye(self.nPipes, k=-1)
# Intermediate coefficient matrices:
# [d_u]*[T_{f,u}](z=0) = [d_in]*[T_{f,in}] + [d_b]*[T_b]
d_u = b_u - b_d @ c_u
d_in = b_d @ c_in
d_b = b_b
d_u_m1 = np.linalg.inv(d_u)
# Intermediate coefficient matrices:
# [T_f](z=0) = [e_d]*[T_{f,d}](z=0) + [e_u]*[T_{f,u}](z=0)
e_d = np.eye(2*self.nPipes, M=self.nPipes)
e_u = np.eye(2*self.nPipes, M=self.nPipes, k=-self.nPipes)
# Final coefficient matrices for temperatures at depth (z = 0):
# [T_f](z=0) = [a_in]*[T_{f,in}]+[a_out]*[T_{f,out}]+[a_b]*[T_b]
a_in = e_d @ (c_in + c_u @ d_u_m1 @ d_in) + e_u @ d_u_m1 @ d_in
a_out = np.zeros((2*self.nPipes, self.nOutlets))
a_b = e_d @ c_u @ d_u_m1 @ d_b + e_u @ d_u_m1 @ d_b
else:
raise NotImplementedError("Configuration '{}' not implemented.".format(self.config))
return a_in, a_out, a_b
def _continuity_condition(self, m_flow_borehole, cp_f, nSegments):
"""
Build coefficient matrices to evaluate fluid temperatures in downward
and upward flowing pipes at depth (z = 0).
Returns coefficients for the relation:
.. math::
\\mathbf{a_{u}} \\mathbf{T_{f,u}}(z=0) =
+ \\mathbf{a_{d}} \\mathbf{T_{f,d}}(z=0)
+ \\mathbf{a_{b}} \\mathbf{T_{b}}
Parameters
----------
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rate (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
Returns
-------
a_d : (nPipes, nPipes,) array
Array of coefficients for fluid temperature in downward flowing
pipes.
a_u : (nPipes, nPipes,) array
Array of coefficients for fluid temperature in upward flowing
pipes.
a_b : (nPipes, nSegments,) array
Array of coefficients for borehole wall temperature.
"""
# Load coefficients
sumA = self._sumA
V = self._V
Vm1 = self._Vm1
L = self._L
Dm1 = self._Dm1
# Matrix exponential at depth (z = H)
H = self.b.H
E = np.real(V @ np.diag(np.exp(L*H)) @ Vm1)
# Coefficient matrix for borehole wall temperatures
IIm1 = np.hstack((np.eye(self.nPipes), -np.eye(self.nPipes)))
a_b = np.zeros((self.nPipes, nSegments))
z = H - np.arange(nSegments + 1) * H / nSegments
exp_Lz = np.exp(np.multiply.outer(L, z))
dexp_Lz = exp_Lz[:,:-1] - exp_Lz[:,1:]
a_b = np.real(((IIm1 @ V @ Dm1) * (Vm1 @ sumA)) @ dexp_Lz)
# Configuration-specific inlet and outlet coefficient matrices
IZER = np.vstack((np.eye(self.nPipes),
np.zeros((self.nPipes, self.nPipes))))
ZERI = np.vstack((np.zeros((self.nPipes, self.nPipes)),
np.eye(self.nPipes)))
a_u = IIm1 @ E @ ZERI
a_d = -IIm1 @ E @ IZER
return a_d, a_u, a_b
def _general_solution(self, z, m_flow_borehole, cp_f, nSegments):
"""
General solution for fluid temperatures at a depth (z).
Returns coefficients for the relation:
.. math::
\\mathbf{T_f}(z) = \\mathbf{a_{f0}} \\mathbf{T_{f}}(z=0)
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
z : float
Depth (in meters) to evaluate the fluid temperature coefficients.
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rate (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
Returns
-------
a_f0 : (2*nPipes, 2*nPipes,) array
Array of coefficients for inlet fluid temperature.
a_b : (2*nPipes, nSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# Check if model variables need to be updated
self._check_model_variables(m_flow_borehole, cp_f, nSegments)
# Load coefficients
sumA = self._sumA
V = self._V
Vm1 = self._Vm1
L = self._L
Dm1 = self._Dm1
# Matrix exponential at depth (z)
a_f0 = np.real(V @ np.diag(np.exp(L*z)) @ Vm1)
# Coefficient matrix for borehole wall temperatures
a_b = np.zeros((2*self.nPipes, nSegments))
dz = z - np.minimum(z, np.arange(nSegments+1)*self.b.H/nSegments)
exp_Lz = np.exp(np.multiply.outer(L, dz))
dexp_Lz = exp_Lz[:,1:] - exp_Lz[:,:-1]
a_b = np.real(((V @ Dm1) * (Vm1 @ sumA)) @ dexp_Lz)
return a_f0, a_b
def _update_model_variables(self, m_flow_borehole, cp_f, nSegments):
"""
Evaluate eigenvalues and eigenvectors for the system of differential
equations.
Parameters
----------
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rate (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
"""
nPipes = self.nPipes
# Format mass flow rate and heat capacity inputs
self._format_inputs(m_flow_borehole, cp_f, nSegments)
m_flow_pipe = self._m_flow_pipe
cp_pipe = self._cp_pipe
# Coefficient matrix for differential equations
self._A = 1.0 / (self._Rd.T * m_flow_pipe * cp_pipe).T
for i in range(2*nPipes):
self._A[i, i] = -self._A[i, i] - sum(
[self._A[i, j] for j in range(2*nPipes) if not i == j])
for i in range(nPipes, 2*nPipes):
self._A[i, :] = - self._A[i, :]
self._sumA = | np.sum(self._A, axis=1) | numpy.sum |
#!/usr/bin/env python
from __future__ import print_function
import ctypes
from functools import partial
from collections import namedtuple
import sys
if sys.version_info[0] < 3:
from collections import Sequence
else:
from collections.abc import Sequence
import numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests, unittest
def is_numeric(dtype):
return np.issubdtype(dtype, np.integer) or np.issubdtype(dtype, np.floating)
def get_limits(dtype):
if not is_numeric(dtype):
return None, None
if np.issubdtype(dtype, np.integer):
info = np.iinfo(dtype)
else:
info = np.finfo(dtype)
return info.min, info.max
def get_conversion_error_msg(value, expected, actual):
return 'Conversion "{}" of type "{}" failed\nExpected: "{}" vs Actual "{}"'.format(
value, type(value).__name__, expected, actual
)
def get_no_exception_msg(value):
return 'Exception is not risen for {} of type {}'.format(value, type(value).__name__)
class Bindings(NewOpenCVTests):
def test_inheritance(self):
bm = cv.StereoBM_create()
bm.getPreFilterCap() # from StereoBM
bm.getBlockSize() # from SteroMatcher
boost = cv.ml.Boost_create()
boost.getBoostType() # from ml::Boost
boost.getMaxDepth() # from ml::DTrees
boost.isClassifier() # from ml::StatModel
def test_raiseGeneralException(self):
with self.assertRaises((cv.error,),
msg='C++ exception is not propagated to Python in the right way') as cm:
cv.utils.testRaiseGeneralException()
self.assertEqual(str(cm.exception), 'exception text')
def test_redirectError(self):
try:
cv.imshow("", None) # This causes an assert
self.assertEqual("Dead code", 0)
except cv.error as _e:
pass
handler_called = [False]
def test_error_handler(status, func_name, err_msg, file_name, line):
handler_called[0] = True
cv.redirectError(test_error_handler)
try:
cv.imshow("", None) # This causes an assert
self.assertEqual("Dead code", 0)
except cv.error as _e:
self.assertEqual(handler_called[0], True)
pass
cv.redirectError(None)
try:
cv.imshow("", None) # This causes an assert
self.assertEqual("Dead code", 0)
except cv.error as _e:
pass
def test_overload_resolution_can_choose_correct_overload(self):
val = 123
point = (51, 165)
self.assertEqual(cv.utils.testOverloadResolution(val, point),
'overload (int={}, point=(x={}, y={}))'.format(val, *point),
"Can't select first overload if all arguments are provided as positional")
self.assertEqual(cv.utils.testOverloadResolution(val, point=point),
'overload (int={}, point=(x={}, y={}))'.format(val, *point),
"Can't select first overload if one of the arguments are provided as keyword")
self.assertEqual(cv.utils.testOverloadResolution(val),
'overload (int={}, point=(x=42, y=24))'.format(val),
"Can't select first overload if one of the arguments has default value")
rect = (1, 5, 10, 23)
self.assertEqual(cv.utils.testOverloadResolution(rect),
'overload (rect=(x={}, y={}, w={}, h={}))'.format(*rect),
"Can't select second overload if all arguments are provided")
def test_overload_resolution_fails(self):
def test_overload_resolution(msg, *args, **kwargs):
no_exception_msg = 'Overload resolution failed without any exception for: "{}"'.format(msg)
wrong_exception_msg = 'Overload resolution failed with wrong exception type for: "{}"'.format(msg)
with self.assertRaises((cv.error, Exception), msg=no_exception_msg) as cm:
res = cv.utils.testOverloadResolution(*args, **kwargs)
self.fail("Unexpected result for {}: '{}'".format(msg, res))
self.assertEqual(type(cm.exception), cv.error, wrong_exception_msg)
test_overload_resolution('wrong second arg type (keyword arg)', 5, point=(1, 2, 3))
test_overload_resolution('wrong second arg type', 5, 2)
test_overload_resolution('wrong first arg', 3.4, (12, 21))
test_overload_resolution('wrong first arg, no second arg', 4.5)
test_overload_resolution('wrong args number for first overload', 3, (12, 21), 123)
test_overload_resolution('wrong args number for second overload', (3, 12, 12, 1), (12, 21))
# One of the common problems
test_overload_resolution('rect with float coordinates', (4.5, 4, 2, 1))
test_overload_resolution('rect with wrong number of coordinates', (4, 4, 1))
class Arguments(NewOpenCVTests):
def _try_to_convert(self, conversion, value):
try:
result = conversion(value).lower()
except Exception as e:
self.fail(
'{} "{}" is risen for conversion {} of type {}'.format(
type(e).__name__, e, value, type(value).__name__
)
)
else:
return result
def test_InputArray(self):
res1 = cv.utils.dumpInputArray(None)
# self.assertEqual(res1, "InputArray: noArray()") # not supported
self.assertEqual(res1, "InputArray: empty()=true kind=0x00010000 flags=0x01010000 total(-1)=0 dims(-1)=0 size(-1)=0x0 type(-1)=CV_8UC1")
res2_1 = cv.utils.dumpInputArray((1, 2))
self.assertEqual(res2_1, "InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=2 dims(-1)=2 size(-1)=1x2 type(-1)=CV_64FC1")
res2_2 = cv.utils.dumpInputArray(1.5) # Scalar(1.5, 1.5, 1.5, 1.5)
self.assertEqual(res2_2, "InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=4 dims(-1)=2 size(-1)=1x4 type(-1)=CV_64FC1")
a = np.array([[1, 2], [3, 4], [5, 6]])
res3 = cv.utils.dumpInputArray(a) # 32SC1
self.assertEqual(res3, "InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=6 dims(-1)=2 size(-1)=2x3 type(-1)=CV_32SC1")
a = np.array([[[1, 2], [3, 4], [5, 6]]], dtype='f')
res4 = cv.utils.dumpInputArray(a) # 32FC2
self.assertEqual(res4, "InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=3 dims(-1)=2 size(-1)=3x1 type(-1)=CV_32FC2")
a = np.array([[[1, 2]], [[3, 4]], [[5, 6]]], dtype=float)
res5 = cv.utils.dumpInputArray(a) # 64FC2
self.assertEqual(res5, "InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=3 dims(-1)=2 size(-1)=1x3 type(-1)=CV_64FC2")
a = np.zeros((2,3,4), dtype='f')
res6 = cv.utils.dumpInputArray(a)
self.assertEqual(res6, "InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=6 dims(-1)=2 size(-1)=3x2 type(-1)=CV_32FC4")
a = np.zeros((2,3,4,5), dtype='f')
res7 = cv.utils.dumpInputArray(a)
self.assertEqual(res7, "InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=120 dims(-1)=4 size(-1)=[2 3 4 5] type(-1)=CV_32FC1")
def test_InputArrayOfArrays(self):
res1 = cv.utils.dumpInputArrayOfArrays(None)
# self.assertEqual(res1, "InputArray: noArray()") # not supported
self.assertEqual(res1, "InputArrayOfArrays: empty()=true kind=0x00050000 flags=0x01050000 total(-1)=0 dims(-1)=1 size(-1)=0x0")
res2_1 = cv.utils.dumpInputArrayOfArrays((1, 2)) # { Scalar:all(1), Scalar::all(2) }
self.assertEqual(res2_1, "InputArrayOfArrays: empty()=false kind=0x00050000 flags=0x01050000 total(-1)=2 dims(-1)=1 size(-1)=2x1 type(0)=CV_64FC1 dims(0)=2 size(0)=1x4")
res2_2 = cv.utils.dumpInputArrayOfArrays([1.5])
self.assertEqual(res2_2, "InputArrayOfArrays: empty()=false kind=0x00050000 flags=0x01050000 total(-1)=1 dims(-1)=1 size(-1)=1x1 type(0)=CV_64FC1 dims(0)=2 size(0)=1x4")
a = np.array([[1, 2], [3, 4], [5, 6]])
b = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
res3 = cv.utils.dumpInputArrayOfArrays([a, b])
self.assertEqual(res3, "InputArrayOfArrays: empty()=false kind=0x00050000 flags=0x01050000 total(-1)=2 dims(-1)=1 size(-1)=2x1 type(0)=CV_32SC1 dims(0)=2 size(0)=2x3")
c = np.array([[[1, 2], [3, 4], [5, 6]]], dtype='f')
res4 = cv.utils.dumpInputArrayOfArrays([c, a, b])
self.assertEqual(res4, "InputArrayOfArrays: empty()=false kind=0x00050000 flags=0x01050000 total(-1)=3 dims(-1)=1 size(-1)=3x1 type(0)=CV_32FC2 dims(0)=2 size(0)=3x1")
a = np.zeros((2,3,4), dtype='f')
res5 = cv.utils.dumpInputArrayOfArrays([a, b])
self.assertEqual(res5, "InputArrayOfArrays: empty()=false kind=0x00050000 flags=0x01050000 total(-1)=2 dims(-1)=1 size(-1)=2x1 type(0)=CV_32FC4 dims(0)=2 size(0)=3x2")
# TODO: fix conversion error
#a = np.zeros((2,3,4,5), dtype='f')
#res6 = cv.utils.dumpInputArray([a, b])
#self.assertEqual(res6, "InputArrayOfArrays: empty()=false kind=0x00050000 flags=0x01050000 total(-1)=2 dims(-1)=1 size(-1)=2x1 type(0)=CV_32FC1 dims(0)=4 size(0)=[2 3 4 5]")
def test_20968(self):
pixel = np.uint8([[[40, 50, 200]]])
_ = cv.cvtColor(pixel, cv.COLOR_RGB2BGR) # should not raise exception
def test_parse_to_bool_convertible(self):
try_to_convert = partial(self._try_to_convert, cv.utils.dumpBool)
for convertible_true in (True, 1, 64, np.bool(1), np.int8(123), np.int16(11), np.int32(2),
np.int64(1), np.bool_(3), np.bool8(12)):
actual = try_to_convert(convertible_true)
self.assertEqual('bool: true', actual,
msg=get_conversion_error_msg(convertible_true, 'bool: true', actual))
for convertible_false in (False, 0, np.uint8(0), np.bool_(0), np.int_(0)):
actual = try_to_convert(convertible_false)
self.assertEqual('bool: false', actual,
msg=get_conversion_error_msg(convertible_false, 'bool: false', actual))
def test_parse_to_bool_not_convertible(self):
for not_convertible in (1.2, np.float(2.3), 's', 'str', (1, 2), [1, 2], complex(1, 1),
complex(imag=2), complex(1.1), np.array([1, 0], dtype=np.bool)):
with self.assertRaises((TypeError, OverflowError),
msg=get_no_exception_msg(not_convertible)):
_ = cv.utils.dumpBool(not_convertible)
def test_parse_to_bool_convertible_extra(self):
try_to_convert = partial(self._try_to_convert, cv.utils.dumpBool)
_, max_size_t = get_limits(ctypes.c_size_t)
for convertible_true in (-1, max_size_t):
actual = try_to_convert(convertible_true)
self.assertEqual('bool: true', actual,
msg=get_conversion_error_msg(convertible_true, 'bool: true', actual))
def test_parse_to_bool_not_convertible_extra(self):
for not_convertible in (np.array([False]), np.array([True], dtype=np.bool)):
with self.assertRaises((TypeError, OverflowError),
msg=get_no_exception_msg(not_convertible)):
_ = cv.utils.dumpBool(not_convertible)
def test_parse_to_int_convertible(self):
try_to_convert = partial(self._try_to_convert, cv.utils.dumpInt)
min_int, max_int = get_limits(ctypes.c_int)
for convertible in (-10, -1, 2, int(43.2), np.uint8(15), np.int8(33), np.int16(-13),
np.int32(4), np.int64(345), (23), min_int, max_int, np.int_(33)):
expected = 'int: {0:d}'.format(convertible)
actual = try_to_convert(convertible)
self.assertEqual(expected, actual,
msg=get_conversion_error_msg(convertible, expected, actual))
def test_parse_to_int_not_convertible(self):
min_int, max_int = get_limits(ctypes.c_int)
for not_convertible in (1.2, np.float(4), float(3), np.double(45), 's', 'str',
np.array([1, 2]), (1,), [1, 2], min_int - 1, max_int + 1,
complex(1, 1), complex(imag=2), complex(1.1)):
with self.assertRaises((TypeError, OverflowError, ValueError),
msg=get_no_exception_msg(not_convertible)):
_ = cv.utils.dumpInt(not_convertible)
def test_parse_to_int_not_convertible_extra(self):
for not_convertible in (np.bool_(True), True, False, np.float32(2.3),
np.array([3, ], dtype=int), np.array([-2, ], dtype=np.int32),
np.array([1, ], dtype=np.int), np.array([11, ], dtype=np.uint8)):
with self.assertRaises((TypeError, OverflowError),
msg=get_no_exception_msg(not_convertible)):
_ = cv.utils.dumpInt(not_convertible)
def test_parse_to_size_t_convertible(self):
try_to_convert = partial(self._try_to_convert, cv.utils.dumpSizeT)
_, max_uint = get_limits(ctypes.c_uint)
for convertible in (2, max_uint, (12), np.uint8(34), np.int8(12), np.int16(23),
np.int32(123), np.int64(344), np.uint64(3), np.uint16(2), np.uint32(5),
np.uint(44)):
expected = 'size_t: {0:d}'.format(convertible).lower()
actual = try_to_convert(convertible)
self.assertEqual(expected, actual,
msg=get_conversion_error_msg(convertible, expected, actual))
def test_parse_to_size_t_not_convertible(self):
min_long, _ = get_limits(ctypes.c_long)
for not_convertible in (1.2, True, False, np.bool_(True), np.float(4), float(3),
| np.double(45) | numpy.double |
"""
Copyright: Intel Corp. 2018
Author: <NAME>
Email: <EMAIL>
Created Date: May 17th 2018
Updated Date: May 17th 2018
Training environment callbacks preset
"""
from pathlib import Path
from functools import partial
import numpy as np
from PIL.Image import Image
from ..Util.ImageProcess import array_to_img, img_to_array, imresize
def _sub_residual(**kwargs):
img = kwargs.get('input')
res = kwargs.get('output') or np.zeros_like(img)
res = res[0] if isinstance(res, list) else res
return img - res
def _save_model_predicted_images(output, index, mode='YCbCr', **kwargs):
save_dir = kwargs.get('save_dir') or '.'
name = kwargs.get('name')
if output is not None:
img = output[index] if isinstance(output, list) else output
img = _to_normalized_image(img, mode)
path = Path(f'{save_dir}/{name}_PR.png')
path.parent.mkdir(parents=True, exist_ok=True)
rep = 1
while path.exists():
path = Path(f'{save_dir}/{name}_PR_{rep}.png')
rep += 1
img.convert('RGB').save(str(path))
return output
def _colored_grayscale_image(outputs, input, **kwargs):
ret = []
for img in outputs:
assert img.shape[-1] == 1
scale = np.array(img.shape[1:3]) // np.array(input.shape[1:3])
uv = array_to_img(input[0], 'YCbCr')
uv = imresize(uv, scale)
uv = img_to_array(uv)[..., 1:]
img = np.concatenate([img[0], uv], axis=-1)
img = np.clip(img, 0, 255)
ret.append(array_to_img(img, 'YCbCr'))
return ret
def _to_normalized_image(img, mode):
img = np.asarray(img)
# squeeze to [H, W, C]
for i in range(np.ndim(img)):
try:
img = np.squeeze(img, i)
except ValueError:
pass
img = np.clip(img, 0, 255)
if img.ndim < 2 or img.ndim > 3:
raise ValueError('Invalid img data, must be an array of 2D image1 with channel less than 3')
return array_to_img(img, mode)
def _add_noise(feature, stddev, mean, clip, **kwargs):
x = feature.astype('float') + np.random.normal(mean, stddev, feature.shape)
return np.clip(x, 0, 255) if clip else x
def _add_random_noise(feature, low, high, step, mean, clip, **kwargs):
n = list(range(low, high, step))
i = np.random.randint(len(n))
stddev = n[i]
return _add_noise(feature, stddev, mean, clip)
def _gaussian_blur(feature, width, size, **kwargs):
from scipy.ndimage.filters import gaussian_filter as gf
y = []
for img in np.split(feature, feature.shape[0]):
c = []
for channel in np.split(img, img.shape[-1]):
channel = np.squeeze(channel).astype('float')
c.append(gf(channel, width, mode='constant', truncate=(size // 2) / width))
y.append(np.stack(c, axis=-1))
return np.stack(y)
def _exponential_decay(lr, start_lr, epochs, steps, decay_step, decay_rate):
return start_lr * decay_rate ** (steps / decay_step)
def _poly_decay(lr, start_lr, end_lr, epochs, steps, decay_step, power):
return (start_lr - end_lr) * (1 - steps / decay_step) ** power + end_lr
def _stair_decay(lr, start_lr, epochs, steps, decay_step, decay_rate):
return start_lr * decay_rate ** (steps // decay_step)
def _eval_psnr(outputs, label, max_val, name, **kwargs):
if not isinstance(outputs, list):
outputs = [outputs]
if isinstance(label, Image):
label = img_to_array(label.convert('RGB'))
for outp in outputs:
if isinstance(outp, Image):
outp = img_to_array(outp.convert('RGB'))
label = | np.squeeze(label) | numpy.squeeze |
# -*- coding: utf-8 -*-
# Python Libraries
import numpy as np
def intrensic_matrix_IC(camera_params: dict):
"""
Return the intrensic matrix for converting from image
to camera coordinates.
"""
fx = camera_params["img_width"]/camera_params["sen_width"] # px/mm ## Relationship between px and mm
fy = camera_params["img_height"]/camera_params["sen_height"] # px/mm ## Relationship between px and mm
cx = camera_params["img_width"]/2 # px ## Center of the image in x
cy = camera_params["img_height"]/2 # px ## Center of the image in y
# Matrix
K = np.array([[1/fx, 0, -cx/fx],
[0, 1/fy, -cy/fy],
[0, 0, 1]])
transform = camera_params["focal_length"]*np.array([[0, -1, 0],
[-1, 0, 0],
[0, 0, 1]])
return np.dot(transform, K)
def extrensic_matrix_IC(params: dict):
"""
Return the extrensic matrix for converting from camera
to world coordinates.
"""
from math import sin as s
from math import cos as c
from math import radians
y = radians(params["yaw"]) # yaw [radians]
p = radians(params["pitch"]) # pitch [radians]
r = radians(params["roll"]) # roll [radians]
x_ext = params["x_t"] # Translation x [meters]
y_ext = params["y_t"] # Translation y [meters]
z_ext = params["z_t"] # Translation z [meters]
# Rotation from camera to vehicle
R_CV = np.array([[c(r)*c(p), c(r)*s(p)*s(y) - s(r)*c(y), c(r)*s(p)*c(y) + s(r)*s(y)],
[s(r)*c(p), s(r)*s(p)*s(y) + c(r)*c(y), s(r)*s(p)*c(y) - c(y)*s(r)],
[-s(p) , c(p)*s(y) , c(p)*c(y) ]])
# Translation from camera to vehicle
t_CV = np.array([[x_ext], [y_ext], [z_ext]])
return np.hstack((R_CV, t_CV))
if __name__=="__main__":
####################
# Intrensic Matrix #
####################
camera_params= dict(img_width = 1920, # px ## Width of the image
img_height = 1080, # px ## Height of the image
sen_width = 5.18, # mm ## Width of the sensor array
sen_height = 3.89, # mm ## Height of the sensor array
focal_length = 3.93) # mm ## Focal length of the camera
K = intrensic_matrix_IC(camera_params)
print("\n##################################")
print("Interensic Matrix of shape ", K.shape)
print(K)
print("##################################\n")
# Image coordinates
u, v = 720, 619
image_cord = | np.array([u, v, 1]) | numpy.array |
"""Test the constraints processing."""
import numpy as np
import pandas as pd
import pytest
from estimagic.optimization.optimize import minimize
from numpy.testing import assert_array_almost_equal
def rosen(x):
"""The Rosenbrock function
Args:
x (pd.DataFrame): DataFrame with the parameters in the "value" column.
"""
x = x["value"].to_numpy()
return sum(100.0 * (x[1:] - x[:-1] ** 2.0) ** 2.0 + (1 - x[:-1]) ** 2.0)
params = pd.DataFrame()
params["value"] = np.array([1.3, 0.7, 1.0, 1.9, 1.2])
params["lower_bound"] = [-1.0, -1, -1, -1, -1]
params["upper_bound"] = [5.0, 5, 5, 5, 5]
def test_single_optimization_with_list_arguments():
"""Test an easy single optimization."""
batch_options = {"error_handling": "raise"}
result = minimize(
criterion=[rosen],
params=[params],
algorithm=["scipy_lbfgsb"],
batch_evaluator_options=batch_options,
numdiff_options={"error_handling": "raise_strict"},
)
expected_result = np.ones(5)
assert_array_almost_equal(
result["solution_params"]["value"].to_numpy(), expected_result, decimal=4
)
def test_parallel_optimizations_all_arguments_have_same_length():
"""Test a parallel optimization: All inputs are a list of the same length."""
result = minimize(
[rosen, rosen],
[params, params],
["scipy_lbfgsb", "scipy_lbfgsb"],
batch_evaluator_options={"n_cores": 4},
logging=False,
)
res1 = result[0]["solution_params"]["value"].to_numpy()
res2 = result[1]["solution_params"]["value"].to_numpy()
expected_result = np.ones(5)
assert_array_almost_equal(res1, expected_result, decimal=4)
assert_array_almost_equal(res2, expected_result, decimal=4)
def test_parallel_optimizations_with_logging(tmp_path):
"""Test a parallel optimization: All inputs are a list of the same length."""
paths = [tmp_path / "1.db", tmp_path / "2.db"]
result = minimize(
[rosen, rosen],
[params, params],
["scipy_lbfgsb", "scipy_lbfgsb"],
batch_evaluator_options={"n_cores": 4},
logging=paths,
)
res1 = result[0]["solution_params"]["value"].to_numpy()
res2 = result[1]["solution_params"]["value"].to_numpy()
expected_result = np.ones(5)
assert_array_almost_equal(res1, expected_result, decimal=4)
assert_array_almost_equal(res2, expected_result, decimal=4)
def test_lists_different_size():
"""Test if error is raised if arguments entered as list are of different length."""
with pytest.raises(ValueError):
minimize(
[rosen, rosen],
[params, params, params],
["scipy_lbfgsb", "scipy_lbfgsb"],
)
def test_missing_argument():
"""Test if error is raised if an important argument is entered as empty list."""
with pytest.raises(ValueError):
minimize(criterion=rosen, params=params, algorithm=[])
with pytest.raises(ValueError):
minimize(criterion=rosen, params=[], algorithm="scipy_lbfgsb")
with pytest.raises(ValueError):
minimize(criterion=[], params=params, algorithm="scipy_lbfgsb")
def test_wrong_type_criterion():
"""Make sure an error is raised if an argument has a wrong type."""
with pytest.raises(TypeError):
minimize(
[rosen, "error"],
[params, params],
["scipy_lbfgsb", "scipy_lbfgsb"],
)
with pytest.raises(TypeError):
minimize("error", params, "scipy_lbfgsb")
def test_broadcasting():
"""Test if broadcasting of arguments that are not entered as list works."""
result = minimize(
rosen,
params,
["scipy_lbfgsb", "scipy_lbfgsb"],
batch_evaluator_options={"n_cores": 4},
logging=False,
)
assert len(result) == 2
res1 = result[0]["solution_params"]["value"].to_numpy()
res2 = result[1]["solution_params"]["value"].to_numpy()
expected_result = | np.ones(5) | numpy.ones |
import numpy as np
from numpy import exp, inf, log, mean, sqrt
from scipy.stats import bernoulli
from .ashr import my_e2truncnorm, my_etruncnorm
from .output import result_in_output
from .r_utils import length, numeric, pmax, pmin, rep, stop, unlist
from .r_utils.stats import dnorm, pnorm, rtruncnorm
from .workhorse_parametric import check_g_init
def laplacemix(pi, mean, scale):
return dict(pi=pi, mean=mean, scale=scale)
def pl_checkg(g_init, fix_g, mode, scale, pointmass):
return check_g_init(
g_init=g_init,
fix_g=fix_g,
mode=mode,
scale=scale,
pointmass=pointmass,
class_name="laplacemix",
scale_name="scale",
)
def pl_initpar(g_init, mode, scale, pointmass, x, s):
if g_init is not None and length(g_init["pi"]) == 1:
par = dict(alpha=inf, beta=-log(g_init["scale"]), mu=g_init["mean"])
elif g_init is not None and length(g_init["pi"]) == 2:
par = dict(
alpha=log(1 / g_init["pi"][0] - 1) if g_init["pi"][0] != 0 else inf,
beta=-log(g_init["scale"][1]),
mu=g_init["mean"][0],
)
else:
par = dict()
if not pointmass:
par["alpha"] = inf
else:
par["alpha"] = 0
if scale != "estimate":
if length(scale) != 1:
stop("Argument 'scale' must be either 'estimate' or a scalar.")
par["beta"] = -log(scale)
else:
par["beta"] = -0.5 * log(mean(x ** 2) / 2)
if mode != "estimate":
par["mu"] = mode
else:
par["mu"] = mean(x)
return par
def pl_scalepar(par, scale_factor):
if par["beta"] is not None:
par["beta"] = par["beta"] - log(scale_factor)
if par["mu"] is not None:
par["mu"] = scale_factor * par["mu"]
return par
def pl_precomp(x, s, par_init, fix_par):
fix_mu = fix_par[2]
if not fix_mu and np.any(s == 0):
stop("The mode cannot be estimated if any SE is zero (the gradient does not exist).")
return dict()
def pl_nllik(par, x, s, par_init, fix_par, calc_grad, calc_hess):
fix_pi0, fix_a, fix_mu = fix_par
p = unlist(par_init)
p[~np.array(fix_par)] = par
w = 1 - 1 / (1 + exp(p[0]))
a = exp(p[1])
mu = p[2]
lf = -0.5 * log(2 * np.pi * s ** 2) - 0.5 * (x - mu) ** 2 / s ** 2
xleft = (x - mu) / s + s * a
lpnormleft = pnorm(xleft, log_p=True, lower_tail=False)
lgleft = log(a / 2) + s ** 2 * a ** 2 / 2 + a * (x - mu) + lpnormleft
xright = (x - mu) / s - s * a
lpnormright = pnorm(xright, log_p=True)
lgright = log(a / 2) + s ** 2 * a ** 2 / 2 - a * (x - mu) + lpnormright
lg = logscale_add(lgleft, lgright)
llik = logscale_add(log(1 - w) + lf, log(w) + lg)
nllik = - | np.nansum(llik) | numpy.nansum |
"""
Synapse probabilistic m
"""
import numpy as np
from scipy import ndimage
from scipy.ndimage.interpolation import shift
from scipy.stats import norm
from skimage.morphology import remove_small_objects
def fg_prob(im):
im = im.astype(np.float64)
probs = np.zeros_like(im)
for i in range(im.shape[0]):
mean = | np.mean(im[i]) | numpy.mean |
#!/usr/bin/env python
"""Carry out standard MBAR analysis on MWUS simulation output."""
import argparse
import json
import numpy as np
from origamipy import biases
from origamipy import conditions
from origamipy import decorrelate
from origamipy import files
from origamipy import mbar_wrapper
from origamipy import outputs
from origamipy import us_process
from origamipy import utility
def main():
args = parse_args()
system_file = files.JSONStructInpFile(args.system_filename)
inp_filebase = create_input_filepathbase(args)
fileformatter = construct_fileformatter()
all_conditions = construct_conditions(
args, fileformatter, inp_filebase, system_file)
staple_lengths = system_file._staple_lengths
sim_collections = create_simplesim_collections(args, inp_filebase,
all_conditions)
decor_outs = decorrelate.SimpleDecorrelatedOutputs(
sim_collections, all_conditions)
decor_outs.read_decors_from_files()
mbarw = mbar_wrapper.MBARWrapper(decor_outs)
mbarw.perform_mbar()
out_filebase = create_output_filepathbase(args)
conds = conditions.SimConditions({'temp': args.temp, 'staple_m': args.staple_m,
'bias': biases.NoBias()}, fileformatter, staple_lengths)
all_tags = decor_outs.all_conditions.condition_tags
aves = []
stds = []
for tag in decor_outs.all_series_tags:
all_tags.append(tag)
series = decor_outs.get_concatenated_series(tag)
ave, std = mbarw.calc_expectation(series, conds)
aves.append(ave)
stds.append(std)
# Hack calculate LFEs
values = decor_outs.get_concatenated_series(tag)
decor_enes = decor_outs.get_concatenated_datatype('enes')
decor_ops = decor_outs.get_concatenated_datatype('ops')
bins = list(set(values))
bins.sort()
value_to_bin = {value: i for i, value in enumerate(bins)}
bin_index_series = [value_to_bin[i] for i in values]
bin_index_series = np.array(bin_index_series)
rpots = utility.calc_reduced_potentials(decor_enes, decor_ops,
conds)
lfes, lfe_stds = mbarw._mbar.computePMF(
rpots, bin_index_series, len(bins))
# Hack write LFEs to file
header = | np.array(['ops', args.temp]) | numpy.array |
from skimage.segmentation import quickshift, mark_boundaries
import numpy as np
import matplotlib.pyplot as plt
from IPython.core.display import display, HTML
import cv2
def show_vis_explanation(explanation_image, cmap=None):
"""
Get the environment and show the image.
Args:
explanation_image:
cmap:
Returns: None
"""
plt.imshow(explanation_image, cmap)
plt.axis("off")
plt.show()
# def is_jupyter():
# # ref: https://stackoverflow.com/a/39662359/4834515
# try:
# shell = get_ipython().__class__.__name__
# if shell == 'ZMQInteractiveShell':
# return True # Jupyter notebook or qtconsole
# elif shell == 'TerminalInteractiveShell':
# return False # Terminal running IPython
# else:
# return False # Other type (?)
# except NameError:
# return False # Probably standard Python interpreter
def explanation_to_vis(batched_image: np.ndarray, explanation: np.ndarray, style='grayscale') -> np.ndarray:
"""
Args:
batched_image: e.g., (1, height, width, 3).
explanation: should have the same width and height as image.
style: ['grayscale', 'heatmap', 'overlay_grayscale', 'overlay_heatmap', 'overlay_threshold'].
Returns:
"""
if len(batched_image.shape) == 4:
assert batched_image.shape[0] == 1, "For one image only"
batched_image = batched_image[0]
assert len(batched_image.shape) == 3
assert len(explanation.shape) == 2, f"image shape {batched_image.shape} vs " \
f"explanation {explanation.shape}"
image = batched_image
if style == 'grayscale':
# explanation has the same size as image, no need to scale.
# usually for gradient-based explanations w.r.t. the image.
return _grayscale(explanation)
elif style == 'heatmap':
# explanation's width and height are usually smaller than image.
# usually for CAM, GradCAM etc, which produce lower-resolution explanations.
return _heatmap(explanation, (image.shape[1], image.shape[0])) # image just for the shape.
elif style == 'overlay_grayscale':
return overlay_grayscale(image, explanation)
elif style == 'overlay_heatmap':
return overlay_heatmap(image, explanation)
elif style == 'overlay_threshold':
# usually for LIME etc, which originally shows positive and negative parts.
return overlay_heatmap(image, explanation)
else:
raise KeyError("Unknown visualization style.")
def _grayscale(explanation: np.ndarray, percentile=99) -> np.ndarray:
"""
Args:
explanation: numpy.ndarray, 2d.
percentile:
Returns: numpy.ndarray, uint8, same shape as explanation
"""
assert len(explanation.shape) == 2, f"{explanation.shape}. " \
"Currently support 2D explanation results for visualization. " \
"Reduce higher dimensions to 2D for visualization."
assert np.max(explanation) <= 1.0
assert isinstance(percentile, int)
assert 0 <= percentile <= 100
image_2d = explanation
vmax = np.percentile(image_2d, percentile)
vmin = np.min(image_2d)
x = np.clip((image_2d - vmin) / (vmax - vmin), 0, 1) * 255
x = np.uint8(x)
return x
def overlay_grayscale(image, explanation, percentile=99) -> np.ndarray:
x = _grayscale(explanation, percentile)
overlay_vis = np.zeros_like(image, dtype=np.uint8)
overlay_vis[:, :, 1] = x
overlay_vis = overlay_vis * 0.4 + image * 0.6
return np.uint8(overlay_vis)
def _heatmap(explanation, resize_shape=(224, 224)) -> np.ndarray:
"""
Args:
explanation:
resize_shape: (width, height)
Returns:
"""
assert len(explanation.shape) == 2, f"{explanation.shape}. " \
f"Currently support 2D explanation results for visualization. " \
"Reduce higher dimensions to 2D for visualization."
# explanation = np.maximum(explanation, 0)
# ex_max = np.max(explanation)
# explanation /= ex_max
explanation = (explanation - explanation.min()) / (explanation.max() - explanation.min())
explanation = cv2.resize(explanation, resize_shape)
explanation = np.uint8(255 * explanation)
explanation = cv2.applyColorMap(explanation, cv2.COLORMAP_JET)
explanation = cv2.cvtColor(explanation, cv2.COLOR_BGR2RGB)
return explanation
def overlay_heatmap(image, explanation) -> np.ndarray:
x = _heatmap(explanation, (image.shape[1], image.shape[0]))
overlay_vis = x * 0.4 + image * 0.6
return np.uint8(overlay_vis)
def overlay_threshold(image, explanation_mask) -> np.ndarray:
overlay_vis = np.zeros_like(image, dtype=np.uint8)
overlay_vis[:, :, 1] = explanation_mask * 255
overlay_vis = overlay_vis * 0.6 + image * 0.4
return | np.uint8(overlay_vis) | numpy.uint8 |
import math
import os
from os.path import join as pjoin
import json
import copy
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import GPUtil
import pandas as pd
from multiprocessing import Pool
from tqdm import tqdm
import sklearn.metrics
from .config import print_config, class_labels
from .utils import (
anno_to_binary, cut_score, debug, display_imgs, info, gen_cwd_slash, labels_to_str, load_config, load_img,
np_macro_f1, str_to_labels, class_id_to_label, class_ids_to_label, combine_windows, chunk, compute_i_coords,
format_macro_f1_details, vec_to_str
)
# from .utils_heavy import predict, model_from_config
from .ignite_trainer import predict as predict
# def predict_and_save_scores(
# config,
# path_to_anno=None,
# path_to_imgs=None,
# save_scores_to=None,
# to_csv=None,
# ):
# model = model_from_config(config, which='latest')
# valid_anno = pd.read_csv(path_to_anno, index_col=0)
# predict(config)
# return valid_anno_predicted
def remove_scores_predicted(config):
cwd_slash = gen_cwd_slash(config)
pd.read_csv(cwd_slash('validation_predictions.csv'), index_col=0) \
.drop('Scores Predicted', 1) \
.to_csv(cwd_slash('validation_predictions.csv'))
def evaluate_validation_prediction(config):
info('evaluate_validation_prediction()')
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(config['path_to_valid_anno_cache'], index_col=0, dtype=object)
prediction_df = pd.read_csv(cwd_slash('valid_predicted.csv'), index_col=0, dtype=object)
anno = anno.join(prediction_df, how='left')
# DEBUG BEGIN
anno.loc[:, ['Target', 'Predicted', 'folder', 'extension']].to_csv(cwd_slash('valid_anno_predicted.csv'))
# DEBUG END
y_true, y_pred = anno_to_binary(anno, config)
macro_f1_score, f1_details = np_macro_f1(y_true, y_pred, config, return_details=True)
print(format_macro_f1_details(f1_details, config))
print(f'macro_f1_score = {macro_f1_score}')
def final_corrections(config):
info('final_corrections()')
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(cwd_slash('test_predicted.csv'), index_col=0)
# correct best submission [TODO: REMOVE: not for private leaderboard] --------------
# best_anno = pd.read_csv(cwd_slash('submission_587.csv'), index_col=0)
# rare_classes = [15, 27, 10, 8, 9, 17, 20, 24, 26]
# comparison_anno = anno.copy()
# comparison_anno['best'] = best_anno['Predicted']
# plot_imgs(
# config,
# comparison_anno.query('best != Predicted').sample(28),
# save_as='./tmp/best_submission_corrections.png',
# folder='data/test_minimaps',
# extension='jpg',
# )
# new_rows = []
# for id_, row in comparison_anno.iterrows():
# current_labels = str_to_labels(row['Predicted'])
# best_labels = str_to_labels(row['best'])
# for c in rare_classes:
# if c in current_labels and c not in best_labels:
# debug(f"removing {c} from {id_}")
# current_labels.remove(c)
# if c not in current_labels and c in best_labels:
# debug(f"adding {c} to {id_}")
# current_labels.append(c)
# new_row = {
# 'Id': id_,
# 'Predicted': labels_to_str(current_labels),
# }
# new_rows.append(new_row)
# anno = pd.DataFrame.from_records(new_rows).set_index('Id')
# debug(f"anno ({len(anno)}) =\n{anno.head(10)}")
# correct leaked --------------
# pairs_anno = pd.read_csv('data/identical_pairs.csv')
# hpa_anno = pd.read_csv('data/hpa_public_imgs.csv', index_col=0)
# correction_anno = pairs_anno.join(hpa_anno, how='left', on=['hpa_id'])\
# .join(anno, how='left', on=['test_id'])
# correction_anno['Target'] = [labels_to_str(str_to_labels(x)) for x in correction_anno['Target']]
# debug(f"correction_anno['test_id'] = {correction_anno['test_id']}")
# debug(f"len = {len(anno.loc[correction_anno['test_id'], 'Predicted'].values)}")
# correction_anno['Predicted'] = anno.loc[correction_anno['test_id'], 'Predicted'].values
# actual_corrections = correction_anno.query('Predicted != Target').set_index('test_id')
# # DEBUG BEGIN
# # plot_imgs(config, actual_corrections, folder='data/test_minimaps', extension='jpg')
# # DEBUG END
# debug(f"making {len(correction_anno)} corrections, {len(actual_corrections)} are actually different")
# debug(f"actual_corrections =\n{actual_corrections}")
# anno.loc[correction_anno['test_id'], 'Predicted'] = correction_anno['Target'].values
# correct leaked 2 --------------
pairs_anno = pd.read_csv('data/identical_pairs_new_fixed.csv')
for i_begin, i_end in chunk(len(pairs_anno), 24):
plot_imgs(
config,
pairs_anno.iloc[i_begin:i_end].drop('test_id', axis=1).set_index('hpa_id'),
save_as=f'./tmp/diff_{i_begin}_hpa.jpg',
folder='data/hpa_public_imgs',
extension='jpg',
background_color=None,
channel=None,
dpi=100,
)
plot_imgs(
config,
pairs_anno.iloc[i_begin:i_end].drop('hpa_id', axis=1).set_index('test_id'),
save_as=f'./tmp/diff_{i_begin}_test.jpg',
folder='data/test_full_size',
extension='tif',
background_color=None,
channel=['red', 'green', 'blue'],
dpi=100,
)
hpa_anno = pd.read_csv('data/hpa_public_imgs.csv', index_col=0)
correction_anno = pairs_anno.join(hpa_anno, how='left', on=['hpa_id'])\
.join(anno, how='left', on=['test_id'])
correction_anno['Target'] = [labels_to_str(str_to_labels(x)) for x in correction_anno['Target']]
debug(f"correction_anno['test_id'] = {correction_anno['test_id']}")
debug(f"len = {len(anno.loc[correction_anno['test_id'], 'Predicted'].values)}")
correction_anno['Predicted'] = anno.loc[correction_anno['test_id'], 'Predicted'].values
actual_corrections = correction_anno.query('Predicted != Target').set_index('test_id')
# DEBUG BEGIN
# plot_imgs(config, actual_corrections, folder='data/test_minimaps', extension='jpg')
# DEBUG END
debug(f"making {len(correction_anno)} corrections, {len(actual_corrections)} are actually different")
debug(f"actual_corrections =\n{actual_corrections}")
anno.loc[correction_anno['test_id'], 'Predicted'] = correction_anno['Target'].values
# DEBUG BEGIN
# plot_imgs(
# config,
# anno.loc[[27 in str_to_labels(p) for p in anno['Predicted']]],
# folder='data/test_minimaps',
# extension='jpg'
# )
# DEBUG END
anno.to_csv(cwd_slash('test_predicted_corrected.csv'))
# def list_confusion(config):
# fn_counts_list = {}
# class_labels = [f'{k}-{classes[k]}' for k in range(n_classes)]
# for which_class in tqdm(range(n_classes)):
# cwd_slash = gen_cwd_slash(config)
# anno = pd.read_csv(cwd_slash('validation_predictions.csv'), index_col=0)
# y_true, y_pred = anno_to_binary(anno)
# fn = y_true * (1 - y_pred)
# fp = (1 - y_true) * y_pred
# i_fn_predictions = np.nonzero(fn[:, which_class])[0]
# fn_counts = fp[i_fn_predictions, :].sum(axis=0) / len(i_fn_predictions)
# fn_counts_list[class_labels[which_class]] = fn_counts
# # out = pd.Series(fn_counts, index=pd.Index(range(n_classes), name='class'))\
# # .sort_values(ascending=False)\
# # .head(3)
# pd.DataFrame(fn_counts_list, index=class_labels).to_csv('./tmp/confusion.csv')
def plot_imgs(
config,
anno,
save_as='./tmp/imgs.jpg',
folder=None,
extension=None,
background_color=None,
channel=None,
dpi=100,
):
img_list = []
for id_, row in anno.iterrows():
img = load_img(
id_,
config,
resize=False,
folder=row.get('folder') or folder,
channel=channel,
extension=row.get('extension') or extension,
)
# if type(channel) is str:
# channel = {
# 'red': 0,
# 'green': 1,
# 'blue': 2,
# 'yellow': 3,
# }.get(channel)
# if channel is not None:
# img = img[:, :, channel]
debug(f' - Loaded image {id_} with size {img.shape}')
img_label = '\n'.join([f'{id_}'] + [f'{k} = {v}' for k, v in row.items()])
img_list.append((img, img_label))
display_imgs(
img_list,
save_as=save_as,
background_color=background_color,
dpi=dpi,
)
def plot_tfpn_examples(config, which_class, max_n_imgs=28, output_folder='./tmp'):
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(cwd_slash('validation_predictions.csv'), index_col=0)
y_true, y_pred = anno_to_binary(anno)
y_true = y_true[:, which_class]
y_pred = y_pred[:, which_class]
def plot_imgs(selector, filename, background_color):
debug(f'selector = {selector}')
if type(config['score_threshold']) is list:
score_threshold = config['score_threshold'][which_class]
else:
score_threshold = config['score_threshold']
tp_idxs = np.nonzero(selector > score_threshold)[0]
if len(tp_idxs) > max_n_imgs:
sample_idxs = np.sort(np.random.choice(range(len(tp_idxs)), max_n_imgs, replace=False))
tp_idxs = tp_idxs[sample_idxs]
img_list = []
for idx in tp_idxs:
row = anno.iloc[idx]
img_id = row.name
labels_true = class_ids_to_label(str_to_labels(row['Target']), config)
labels_pred = class_ids_to_label(str_to_labels(row['Predicted']), config)
img_label = '\n'.join([
f'{img_id}',
f'T: {labels_true}',
f'P: {labels_pred}',
])
# img = load_img(img_id, self.config, resize=False, folder='./data/train_full_size', extension='tif')
img = load_img(
img_id,
config,
resize=False,
folder=config['path_to_valid'],
channel=None,
extension=config['img_extension'],
)
debug(f' - Loaded image {img_id} with size {img.shape}')
img_list.append((img, img_label))
display_imgs(
img_list,
save_as=filename,
background_color=background_color,
)
def out_slash(fn):
return pjoin(output_folder, fn)
plot_imgs(y_true * y_pred, out_slash(f'class_{which_class}_true_positives.png'), 'white')
plot_imgs((1 - y_true) * y_pred, out_slash(f'class_{which_class}_false_positives.png'), 'yellow')
plot_imgs(y_true * (1 - y_pred), out_slash(f'class_{which_class}_false_negatives.png'), 'blue')
# plot_imgs((1 - y_true) * (1 - y_pred), out_slash(f'class_{which_class}_true_negatives.png'), 'black')
def add_extra_data_into_train_anno(config):
cwd_slash = gen_cwd_slash(config)
train_anno = pd.read_csv(cwd_slash('train_windowed_anno.csv'), index_col=0)
valid_anno = pd.read_csv(cwd_slash('valid_windowed_anno.csv'), index_col=0)
train_with_hpa_anno = pd.read_csv('data/train_with_hpa.csv', index_col=0)
train_windowed_anno = pd.read_csv('data/train_windowed.csv', index_col=0)
hpa_ids = set(train_with_hpa_anno.index)
existing_ids = set(valid_anno['source_img_id']).union(train_anno['source_img_id'])
new_ids = hpa_ids.difference(existing_ids)
extra_train_anno = train_with_hpa_anno.loc[new_ids]
debug(f'extra_train_anno ({len(extra_train_anno)}) =\n{extra_train_anno.head(10)}')
extra_train_windowed_anno = train_windowed_anno.join(extra_train_anno, how='right', on=['source_img_id'])
debug(f'extra_train_windowed_anno ({len(extra_train_windowed_anno)}) =\n{extra_train_windowed_anno.head(10)}')
pd.concat([train_anno, extra_train_windowed_anno]).to_csv(cwd_slash('train_windowed_anno.csv'))
# def calibrate_one_task(task):
# i_class = task['i_class']
# mat_pred_windowed = task['mat_pred_windowed']
# mat_true = task['mat_true']
# alpha = task['alpha']
# i_windowss = task['i_windowss']
# beta_values = task['beta_values']
# config = task['config']
# details_list = []
# for beta in beta_values:
# vec_true = mat_true[:, i_class]
# vec_pred_windowed = mat_pred_windowed[:, i_class]
# list_pred = []
# for i_source, i_windows in enumerate(i_windowss):
# combined_prediction = vec_pred_windowed[i_windows].mean() + vec_pred_windowed[i_windows].mean()
# list_pred.append(combined_prediction)
# vec_pred = np.array(list_pred)
# f1 = np_macro_f1(vec_true, vec_pred, config)
# details_list.append({
# 'i_class': i_class,
# 'alpha': alpha,
# 'beta': beta,
# 'f1': f1,
# })
# # debug(f'i_class = {i_class}, alpha = {alpha}, beta = {beta}, f1 = {f1}, best_f1 = {best_f1}')
# details_df = pd.DataFrame.from_records(details_list)
# return {
# 'task': task,
# 'details_df': details_df,
# }
# def calibrate_windowed_score(
# config,
# n_threads=70,
# n_cols=7,
# save_graph_to='./tmp/calibrate_score_threshold.png',
# epsilon=1e-7,
# ):
# info('calibrate_windowed_score()')
# cwd_slash = gen_cwd_slash(config)
# alpha_values = range(10)
# beta_values = np.linspace(0, 1, 21)
# mat_pred_windowed = np.load(cwd_slash('valid_windowed_scores.npy'))
# valid_anno = pd.read_csv(config['path_to_valid_anno_cache'])
# mat_true = np.zeros((valid_anno.shape[0], 28))
# for i, target_str in enumerate(valid_anno['Target']):
# targets = str_to_labels(target_str)
# mat_true[np.ix_([i], targets)] = 1
# valid_windowed_anno = pd.read_csv(cwd_slash('valid_windowed_anno.csv'))
# valid_windowed_anno['row_number'] = valid_windowed_anno.index
# grouped = valid_windowed_anno.groupby('source_img_id')
# source_id_to_window_row_nums = {id_: group['row_number'].values.tolist() for id_, group in grouped}
# i_windowss = [source_id_to_window_row_nums[id_] for id_ in valid_anno['Id']]
# task_list = [
# {
# 'i_class': i_class,
# 'alpha': alpha,
# 'mat_pred_windowed': mat_pred_windowed,
# 'mat_true': mat_true,
# 'i_windowss': i_windowss,
# 'beta_values': beta_values,
# 'config': config,
# } for i_class in range(config['_n_classes']) for alpha in alpha_values
# ]
# details_dfs = []
# with Pool(n_threads) as p:
# result_iter = p.imap_unordered(calibrate_one_task, task_list)
# for i_result, result in enumerate(result_iter):
# info(
# f"({i_result}/{len(task_list)}) "
# f"i_class = {result['task']['i_class']}, "
# f"alpha = {result['task']['alpha']} is done"
# )
# details_dfs.append(result['details_df'])
# details_df = pd.concat(details_dfs)
# if save_graph_to is not None:
# n_rows = math.ceil(config['_n_classes'] / n_cols)
# plt.figure(figsize=(n_cols * 10, n_rows * 10))
# for i_class, group_df in details_df.groupby('i_class'):
# mat = group_df.pivot(index='beta', columns='alpha', values='f1')
# plt.subplot(n_rows, n_cols, i_class + 1, sharex=plt.gca(), sharey=plt.gca())
# plt.imshow(mat, aspect='auto')
# plt.xticks(range(len(alpha_values)), alpha_values)
# plt.yticks(range(len(beta_values)), beta_values)
# plt.text(0, 1, f'{i_class}', transform=plt.gca().transAxes)
# plt.savefig(save_graph_to, dpi=100)
# debug(f'Saved graph to {save_graph_to}')
# print(details_df)
# details_df.to_csv(cwd_slash('calibrate_windowed_score_details.csv'), index=False)
# debug(f"saved to {cwd_slash('calibrate_windowed_score_details.csv')}")
# best_df = pd.concat([group.sort_values('f1').tail(1) for i_class, group in details_df.groupby('i_class')])
# best_df['manually_modified'] = False
# best_df.to_csv(cwd_slash('calibrate_windowed_score.csv'), index=False)
# debug(f"saved to {cwd_slash('calibrate_windowed_score.csv')}")
# def calibrate_score_threshold(config, n_cols=7, save_graph_to='./tmp/calibrate_score_threshold.png', epsilon=1e-7):
# info('calibrate_score_threshold()')
# cwd_slash = gen_cwd_slash(config)
# n_rows = math.ceil(config['_n_classes'] / n_cols)
# mat_pred = np.load(cwd_slash('valid_scores.npy'))
# anno = pd.read_csv(cwd_slash('valid_windowed_anno.csv'))
# mat_true = np.zeros_like(mat_pred)
# for i, target_str in enumerate(anno['Target']):
# targets = str_to_labels(target_str)
# mat_true[np.ix_([i], targets)] = 1
# if save_graph_to is not None:
# plt.figure(figsize=(n_cols * 10, n_rows * 10))
# best_ths = []
# for class_id in tqdm(config['classes']):
# thresholds = np.round(np.linspace(0, 1, 1001), 3)
# f1_scores = np.zeros_like(thresholds)
# ps = []
# rs = []
# for i_th, th in enumerate(thresholds):
# y_pred = mat_pred[:, i_class]
# y_pred = np.where(y_pred < th, np.zeros_like(y_pred), np.ones_like(y_pred))
# y_true = mat_true[:, i_class]
# tp = np.sum(y_true * y_pred, axis=0)
# # tn = np.sum((1 - y_true) * (1 - y_pred), axis=0)
# fp = np.sum((1 - y_true) * y_pred, axis=0)
# fn = np.sum(y_true * (1 - y_pred), axis=0)
# p = tp / (tp + fp + epsilon)
# r = tp / (tp + fn + epsilon)
# ps.append(p)
# rs.append(r)
# out = 2 * p * r / (p + r + epsilon)
# # replace all NaN's with 0's
# out = np.where(np.isnan(out), np.zeros_like(out), out)
# f1_scores[i_th] = out
# if save_graph_to is not None:
# plt.subplot(n_rows, n_cols, i_class + 1, sharex=plt.gca(), sharey=plt.gca())
# plt.plot(thresholds, f1_scores)
# plt.plot(thresholds, ps)
# plt.plot(thresholds, rs)
# plt.text(0, 1, f'{i_class}', transform=plt.gca().transAxes)
# # debug(f'thresholds = {thresholds}')
# # debug(f'f1_scores = {f1_scores}')
# best_th = thresholds[np.argmax(f1_scores)]
# best_ths.append(best_th)
# if save_graph_to is not None:
# plt.savefig(save_graph_to, dpi=100)
# debug(f'Saved graph to {save_graph_to}')
# debug(f'best_ths = {best_ths}')
# with open(cwd_slash('calibrated_score_threshold.json'), 'w') as f:
# json.dump(best_ths, f)
def predict_for_valid(config):
cwd_slash = gen_cwd_slash(config)
valid_windowed_anno = pd.read_csv(config['path_to_valid_windowed_anno_cache'], index_col=0)
predict(
config,
valid_windowed_anno,
cwd_slash('model.pth'),
save_numpy_to='valid_windowed_predicted.npy',
save_csv_to='valid_windowed_anno_predicted.csv',
target_col='corrected_target',
)
# predict(
# anno=cwd_slash('valid_windowed_anno.csv'),
# config=config,
# extension=config['img_extension'],
# folder=config['path_to_valid'],
# to_npy=cwd_slash('valid_windowed_scores.npy'),
# )
# def cut_score_for_valid(config):
# info('cut_score_for_valid()')
# cwd_slash = gen_cwd_slash(config)
# path_to_score = cwd_slash('calibrate_windowed_score.csv')
# if os.path.exists(path_to_score):
# tb = pd.read_csv(path_to_score)
# debug(f"read from {path_to_score}")
# score_threshold = tb.sort_values('i_class')['beta'].values
# debug(f'score_threshold = {score_threshold}')
# min_n_windows = tb.sort_values('i_class')['alpha'].values
# debug(f'min_n_windows = {min_n_windows}')
# else:
# debug(f'WARNING: using default score_threshold and min_n_windows')
# score_threshold = config['score_threshold']
# min_n_windows = 3
# # if os.path.exists(cwd_slash('calibrated_score_threshold.json')):
# # with open(cwd_slash('calibrated_score_threshold.json'), 'r') as f:
# # score_threshold = json.load(f)
# # else:
# # score_threshold = config['score_threshold']
# debug('cut_score()')
# cut_score(
# anno=cwd_slash('valid_windowed_anno.csv'),
# scores_mat=cwd_slash('valid_windowed_scores.npy'),
# config=config,
# prediction_col='Predicted',
# score_threshold=score_threshold,
# to_csv=cwd_slash('valid_windowed_predicted.csv'),
# )
# debug('combine_windows()')
# combine_windows(
# cwd_slash('valid_windowed_predicted.csv'),
# min_n_windows,
# config,
# save_combined_anno_to=cwd_slash('valid_predicted.csv'),
# group_col='source_img_id',
# )
def predict_for_test(config):
info('predict_for_test()')
cwd_slash = gen_cwd_slash(config)
test_windowed_anno = pd.read_csv(config['path_to_test_anno'], index_col=0)
test_windowed_anno = compute_i_coords(test_windowed_anno, config)
test_windowed_anno['group'] = 'test_full_size'
predict(
config,
test_windowed_anno,
cwd_slash('model.pth'),
save_numpy_to='test_windowed_predicted.npy',
save_csv_to='test_windowed_anno_predicted.csv',
)
# anno = pd.read_csv('./data/test_windowed.csv', index_col=0)
# if config['submission_subsampling'] is not None:
# anno = anno.sample(config['submission_subsampling'])
# predict(
# anno=anno,
# config=config,
# extension=config['img_extension'],
# folder=config['path_to_test'],
# to_npy=cwd_slash('test_windowed_scores.npy'),
# )
def create_csv_for_debugger(config):
info('create_csv_for_debugger()')
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(cwd_slash('valid_windowed_anno.csv'), index_col=0)
pred_mat = np.load(cwd_slash('valid_windowed_scores.npy'))
pred_anno = pd.DataFrame(pred_mat, columns=[f'score_of_{x}' for x in config['class_ids']], index=anno.index)
anno.join(pred_anno, how='left').to_csv(cwd_slash('valid_windowed_scores.csv'))
def take_top_n_for_test(config):
info('take_top_n_for_test()')
cwd_slash = gen_cwd_slash(config)
class_distrib = pd.read_csv('tmp/class_distribution.csv', index_col=0)
# test_scores = pd.read_csv(cwd_slash('test_aggregated_prediction.csv'), index_col=0)
test_scores = pd.read_csv(cwd_slash('stacking_v3_test.csv'), index_col=0)
# correct class 17 for LB613
# test_scores_LB613 = pd.read_csv(
# './working/__613__190104-001629__P1T500_/test_aggregated_prediction.csv', index_col=0
# )
# test_scores_LB613['17'] = test_scores['17']
# test_scores = test_scores_LB613
# test_scores = pd.read_csv('tmp/yuanhao.csv', index_col=0)
submission_df = pd.read_csv('data/sample_submission.csv', index_col=0)
test_scores = test_scores.loc[submission_df.index]
def get_order(col):
fixed_n_samples = class_distrib.loc[int(col.name), 'LB613']
if not np.isnan(fixed_n_samples):
n_samples = fixed_n_samples
else:
n_samples = class_distrib.loc[int(col.name), 'expected_n_samples_in_test'] * 1.2
return np.where(np.argsort(np.argsort(-col)) >= n_samples, 0, 1)
submission_df['Predicted'] = test_scores.apply(get_order).apply(vec_to_str, axis=1)
submission_df.to_csv(cwd_slash('submission_top_n_stacking_v3.csv'))
# submission_df.to_csv('tmp/yuanhao_submission.csv')
def cut_score_for_valid(config):
info('cut_score_for_valid()')
cwd_slash = gen_cwd_slash(config)
threshold_df = pd.read_csv(cwd_slash('calibrated_threshold_17_corrected.csv'), index_col=0)
thresholds = threshold_df['best_threshold']
valid_scores = pd.read_csv(cwd_slash('valid_aggregated_prediction_17_corrected.csv'), index_col=0)
submission_df = pd.read_csv(cwd_slash('valid_anno.csv'))
valid_scores = valid_scores.loc[submission_df['Id']]
pick_mat = valid_scores.values > [thresholds]
preds = [vec_to_str(row) for row in pick_mat]
submission_df['Predicted'] = preds
submission_df.to_csv(cwd_slash('valid_anno_predicted.csv'), index=False)
def cut_score_for_test(config):
info('cut_score_for_test()')
cwd_slash = gen_cwd_slash(config)
threshold_df = pd.read_csv(cwd_slash('calibrated_threshold.csv'), index_col=0)
# thresholds = threshold_df['best_threshold'] * 0.4
test_scores = pd.read_csv(cwd_slash('test_aggregated_prediction.csv'), index_col=0)
submission_df = pd.read_csv('data/sample_submission.csv')
test_scores = test_scores.loc[submission_df['Id']]
pick_mat = test_scores.values > [thresholds]
def get_order(col, n_samples):
return np.where(np.argsort(np.argsort(-col)) >= n_samples, 0, 1)
for class_id in test_scores:
i_class = int(class_id)
manual_top_n = threshold_df.loc[i_class, 'manual_top_n']
if not np.isnan(manual_top_n):
debug(f"manually set {class_id} to pick the top {manual_top_n}")
pick_vec = get_order(test_scores[class_id], manual_top_n)
pick_mat[:, i_class] = pick_vec
preds = [vec_to_str(row) for row in pick_mat]
submission_df['Predicted'] = preds
submission_df.to_csv(cwd_slash('submission.csv'), index=False)
def compare_with_best_submssion(config):
info('compare_with_best_submssion()')
cwd_slash = gen_cwd_slash(config)
best_submission = pd.read_csv(cwd_slash('submission_587.csv'), index_col=0)
current_submission = pd.read_csv(cwd_slash('test_predicted_corrected.csv'), index_col=0)
current_submission['best'] = best_submission['Predicted']
debug(f"index all equal = {np.all(current_submission.index.values == best_submission.index.values)}")
diff = current_submission.query('Predicted != best')
# DEBUG BEGIN
plot_imgs(
config,
diff.loc[[10 in (str_to_labels(row['Predicted']) + str_to_labels(row['best'])) for i, row in diff.iterrows()]],
folder='data/test_minimaps',
extension='jpg',
channel='green',
)
# DEBUG END
# debug(f"diff =\n{diff}")
save_path = './tmp/diff.csv'
diff.to_csv(save_path)
debug(f"saved to {save_path}")
def show_score_details(config, id_='94c0f350-bada-11e8-b2b9-ac1f6b6435d0'):
info('show_score_details()')
cwd_slash = gen_cwd_slash(config)
windowed_anno = pd.read_csv('./data/test_windowed.csv')
scores_mat = np.load(cwd_slash('test_windowed_scores.npy'))
idxs = windowed_anno.loc[windowed_anno['source_img_id'] == id_].index.values
print(pd.DataFrame(np.round(scores_mat[idxs, :], 3), index=windowed_anno.loc[idxs]['img_id']))
def aggregate_prediction_for_valid(config):
info('aggregate_prediction_for_valid()')
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(cwd_slash('valid_windowed_anno_predicted.csv'))
score_cols = [str(class_id) for class_id in config['class_ids']]
anno_agg = anno.groupby('source_img_id')[score_cols].agg([np.mean, np.max])
result_df = pd.DataFrame(index=anno_agg.index)
for score_col in score_cols:
result_df[score_col] = (anno_agg[score_col, 'mean'] + anno_agg[score_col, 'amax']) / 2
# result_df[score_col] = anno_agg[score_col, 'mean']
print(result_df.head())
save_path = cwd_slash('valid_aggregated_prediction.csv')
result_df.to_csv(save_path)
debug(f"saved to {save_path}")
def aggregate_prediction_for_test(config):
info('aggregate_prediction_for_test()')
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(cwd_slash('test_windowed_anno_predicted.csv'))
score_cols = [str(class_id) for class_id in config['class_ids']]
anno_agg = anno.groupby('source_img_id')[score_cols].agg([np.mean, np.max])
result_df = pd.DataFrame(index=anno_agg.index)
for score_col in score_cols:
result_df[score_col] = (anno_agg[score_col, 'mean'] + anno_agg[score_col, 'amax']) / 2
# result_df[score_col] = anno_agg[score_col, 'mean']
print(result_df.head())
save_path = cwd_slash('test_aggregated_prediction.csv')
result_df.to_csv(save_path)
debug(f"saved to {save_path}")
def calibrate_thresholds(config, epsilon=1e-7):
info('calibrate_thresholds()')
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(config['path_to_valid_anno_cache'], index_col=0)
pred_df = pd.read_csv(cwd_slash('valid_aggregated_prediction.csv'), index_col=0).sort_index()
truth_rows = []
for id_, row in anno.iterrows():
labels = str_to_labels(row['Target'])
truth_row = np.array([class_id in labels for class_id in config['class_ids']]).astype(int)
truth_row = pd.Series(truth_row, name=id_)
truth_rows.append(truth_row)
truth_df = pd.concat(truth_rows, axis=1).transpose().sort_index()
truth_df.columns = config['class_ids']
macro_f1, details = np_macro_f1(pred_df.values, truth_df.values, config, return_details=True)
print(format_macro_f1_details(details, config))
debug(f"macro_f1 = {macro_f1}")
os.makedirs(cwd_slash('threshold_calibration_dfs'), exist_ok=True)
plt.figure(figsize=(20, 15))
threshold_rows = {}
for class_id in config['class_ids']:
y_pred = pred_df.iloc[:, class_id]
y = truth_df[class_id]
compare_df = pd.DataFrame({
'y_pred': y_pred,
'y': y,
})
compare_df = compare_df.sort_values('y_pred')
compare_df['tn'] = (1 - compare_df['y']).cumsum()
compare_df['fn'] = compare_df['y'].cumsum()
compare_df = compare_df.sort_values('y_pred', ascending=False)
compare_df['fp'] = np.concatenate([[0], (1 - compare_df['y']).cumsum()[:-1]])
compare_df['tp'] = np.concatenate([[0], compare_df['y'].cumsum()[:-1]])
compare_df['precision'] = compare_df['tp'] / (compare_df['tp'] + compare_df['fp'] + epsilon)
compare_df['recall'] = compare_df['tp'] / (compare_df['tp'] + compare_df['fn'] + epsilon)
compare_df['f1'] = 2 * compare_df['precision'] * compare_df['recall'] / (
compare_df['precision'] + compare_df['recall'] + epsilon
)
compare_df['f1_smoothed'] = np.convolve(compare_df['f1'], np.ones(1) / 1, mode='same')
best_row_idx = compare_df['f1_smoothed'].idxmax()
picked_threshold = compare_df['y_pred'][best_row_idx]
best_f1 = compare_df['f1'][best_row_idx]
threshold_rows[class_id] = compare_df.loc[best_row_idx].copy()
precisions, recalls, _ = sklearn.metrics.precision_recall_curve(y, y_pred)
threshold_rows[class_id]['rp_auc'] = auc = sklearn.metrics.auc(recalls, precisions)
plt.plot(
recalls,
precisions,
label=f"{class_id_to_label(class_id, config)} (rp_auc={auc:.4f})",
)
compare_df.to_csv(
cwd_slash('threshold_calibration_dfs', f"{class_id:02d}_t{picked_threshold:.4f}_f1{best_f1:.4f}.csv")
)
threshold_df = pd.DataFrame(threshold_rows).transpose()
threshold_df.index.name = 'class_id'
threshold_df['best_threshold'] = threshold_df['y_pred']
threshold_df['manual_top_n'] = np.nan
threshold_df['manual_top_n'][15] = 5
threshold_df['manual_top_n'][27] = 9
threshold_df['manual_top_n'][20] = 59
threshold_df['manual_top_n'][8] = 12
threshold_df['manual_top_n'][9] = 20
threshold_df['manual_top_n'][10] = 16
threshold_df['manual_top_n'][16] = 204
threshold_df['manual_top_n'][17] = 300
threshold_df = threshold_df[[
'best_threshold',
'manual_top_n',
'tn',
'fn',
'fp',
'tp',
'precision',
'recall',
'f1',
'f1_smoothed',
'rp_auc',
]]
print(threshold_df)
print(f"best total f1: {threshold_df['f1'].mean()}")
plt.legend()
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title(f"{config['job_title']} (mean rp_auc={threshold_df['rp_auc'].mean():.4f})")
save_path = cwd_slash('recall_precision_curve.png')
plt.savefig(save_path, dpi=200)
debug(f"saved to {save_path}")
threshold_df.to_csv(cwd_slash('calibrated_threshold.csv'))
# for class_id in config['class_ids']:
# pd.DataFrame({'y_pred': pred_df[class_id].sort_values(), 'y':
def logit(x, epsilon=1e-15):
return x
x = np.clip(x, epsilon, 1 - epsilon)
return | np.log(x / (1 - x)) | numpy.log |
# coding=utf-8
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for android_env.components.coordinator."""
import time
from absl.testing import absltest
from android_env.components import action_type
from android_env.components import coordinator
from android_env.components import errors
from android_env.components import task_manager
from android_env.components.simulators.emulator import emulator_simulator
import mock
import numpy as np
class CoordinatorTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.addCleanup(mock.patch.stopall) # Disable previous patches.
self._simulator = mock.create_autospec(emulator_simulator.EmulatorSimulator)
self._task_manager = mock.create_autospec(task_manager.TaskManager)
self._coordinator = coordinator.Coordinator(
simulator=self._simulator,
task_manager=self._task_manager,
step_timeout_sec=2,
max_steps_per_sec=60,
periodic_restart_time_min=0)
def test_restart_simulator(self):
self._coordinator._restart_simulator()
def test_reset(self):
self._coordinator.reset_environment_state()
def test_lift_all_fingers(self):
self._simulator.num_fingers.return_value = 3
self._coordinator.reset_environment_state()
expected_lift_action = {
'action_type': | np.array(action_type.ActionType.LIFT) | numpy.array |
import tkinter as tk
from tkinter import filedialog
from tkinter import *
import tkinter.ttk as ttk
from tkinter.scrolledtext import ScrolledText
import PIL
from PIL import Image, ImageTk
import cv2
from functools import partial
import json
import math
import numpy as np
import os
import scipy
import sys
import time
import urllib
import OSMTGC
import tgc_tools
import tree_mapper
from usgs_lidar_parser import *
# Parameters
desired_visible_points_per_pixel = 1.0
lidar_sample = 1 # Use every Nths lidar point. 1 is use all, 10 is use one of out 10
lidar_to_disk = False
status_print_duration = 1.0 # Print progress every n seconds
# 1 Unassigned
# 2 Ground
# 3 Low Vegetation
# 4 Medium Vegetation
# 5 High Vegetation
# 6 Building
# 7 Noise
# 8 Model Key Points
# 9 Water
wanted_classifications = [2, 8] # These are considered "bare earth"
# Global Variables for the UI
rect = None
rectid = None
rectx0 = 0
recty0 = 0
rectx1 = 10
recty1 = 10
lower_x = 0
lower_y = 0
upper_x = 10
upper_y = 10
running_as_main = False
canvas = None
im_img = None
sat_canvas = None
sat_img = None
move = False
def normalize_image(im):
# Set Nans and Infs to minimum value
finite_pixels = im[np.isfinite(im)]
im[np.isnan(im)] = np.min(finite_pixels)
# Limit outlier pixels
# Use the median of valid pixels only to ensure that the contrast is good
im = np.clip(im, 0.0, 3.5*np.median(finite_pixels))
# Scale from 0.0 to 1.0
min_value = np.min(im)
max_value = np.max(im)
return (im - min_value) / (max_value - min_value)
def createCanvasBinding():
global canvas
global move
global rect
global rectid
global rectx0
global rectx1
global recty0
global recty1
canvas.bind( "<Button-1>", startRect )
canvas.bind( "<ButtonRelease-1>", stopRect )
canvas.bind( "<Motion>", movingRect )
def startRect(event):
global canvas
global move
global rect
global rectid
global rectx0
global rectx1
global recty0
global recty1
move = True
rectx0 = canvas.canvasx(event.x)
recty0 = canvas.canvasy(event.y)
if rect is not None:
canvas.delete(rect)
rect = canvas.create_rectangle(
rectx0, recty0, rectx0, recty0, outline="#ff0000")
rectid = canvas.find_closest(rectx0, recty0, halo=2)
def movingRect(event):
global canvas
global move
global rectid
global rectx0
global rectx1
global recty0
global recty1
if move:
rectx1 = canvas.canvasx(event.x)
recty1 = canvas.canvasy(event.y)
canvas.coords(rectid, rectx0, recty0,
rectx1, recty1)
def stopRect(event):
global canvas
global move
global rectid
global rectx0
global rectx1
global recty0
global recty1
move = False
rectx1 = canvas.canvasx(event.x)
recty1 = canvas.canvasy(event.y)
canvas.coords(rectid, rectx0, recty0,
rectx1, recty1)
def closeWindow(main, bundle, input_size, canvas_size, printf):
global lower_x
global lower_y
global upper_x
global upper_y
main.destroy()
# TODO im.thumbnail may return the actual image size and not the resized size, investigate
# Need to determine the preview size
max_canvas_dimension = max([canvas_size[0], canvas_size[1]]) # Probably the same value
width_over_height_ratio = float(input_size[0])/float(input_size[1])
canvas_width = max_canvas_dimension * width_over_height_ratio
canvas_height = max_canvas_dimension
if width_over_height_ratio > 1.0: # Width is actually wider
tmp = canvas_width
canvas_width = max_canvas_dimension
canvas_height = max_canvas_dimension / width_over_height_ratio
width_ratio = float(input_size[0])/float(canvas_width)
height_ratio = float(input_size[1])/float(canvas_height)
lower_x = int(width_ratio*rectx0)
upper_x = int(width_ratio*rectx1)
if lower_x > upper_x:
tmp = lower_x
lower_x = upper_x
upper_x = tmp
lower_y = int(height_ratio*(canvas_size[1] - recty0))
upper_y = int(height_ratio*(canvas_size[1] - recty1))
if lower_y > upper_y:
tmp = lower_y
lower_y = upper_y
upper_y = tmp
generate_lidar_heightmap(*bundle, printf=printf)
def request_course_outline(course_image, sat_image=None, bundle=None, printf=print):
global running_as_main
global canvas
global im_img
global sat_canvas
global sat_img
input_size = (course_image.shape[1], course_image.shape[0]) # width, height
preview_size = (600, 600) # Size of image previews
# Create new window since this tool could be used as main
if running_as_main:
popup = tk.Tk()
else:
popup = tk.Toplevel()
popup.geometry("1250x700")
popup.wm_title("Select Course Boundaries")
# Convert and resize for display
im = Image.fromarray((255.0*course_image).astype(np.uint8), 'RGB')
im = im.transpose(Image.FLIP_TOP_BOTTOM)
im.thumbnail(preview_size, PIL.Image.LANCZOS) # Thumbnail is just resize but preserves aspect ratio
cim = ImageTk.PhotoImage(image=im)
instruction_frame = tk.Frame(popup)
B1 = ttk.Button(instruction_frame, text="Accept", command = partial(closeWindow, popup, bundle, input_size, im.size, printf))
label = ttk.Label(instruction_frame, text="Draw the rectangle around the course on the left (in black and white)\n \
Then close this window using the Accept Button.", justify=CENTER)
label.pack(fill="x", padx=10, pady=10)
B1.pack()
instruction_frame.pack()
# Show both images
image_frame = tk.Frame(popup)
image_frame.pack()
canvas = tk.Canvas(image_frame, width=preview_size[0], height=preview_size[1])
im_img = canvas.create_image(0,0,image=cim,anchor=tk.NW)
canvas.itemconfig(im_img, image=cim)
canvas.image = im_img
canvas.grid(row=0, column=0, sticky='w')
if sat_image is not None:
sim = Image.fromarray((sat_image).astype(np.uint8), 'RGB')
sim.thumbnail(preview_size, PIL.Image.LANCZOS) # Thumbnail is just resize but preserves aspect ratio
scim = ImageTk.PhotoImage(image=sim)
sat_canvas = tk.Canvas(image_frame, width=preview_size[0], height=preview_size[1])
sat_img = sat_canvas.create_image(0,0,image=scim,anchor=tk.NW)
sat_canvas.itemconfig(sat_img, image=scim)
sat_canvas.image = sat_img
sat_canvas.grid(row=0, column=preview_size[0]+10, sticky='e')
createCanvasBinding()
popup.mainloop()
def generate_lidar_previews(lidar_dir_path, sample_scale, output_dir_path, force_epsg=None, force_unit=None, printf=print):
# Create directory for intermediate files
tgc_tools.create_directory(output_dir_path)
# Use provided las or get las files
pc = load_usgs_directory(lidar_dir_path, force_epsg=force_epsg, force_unit=force_unit, printf=printf)
if pc is None:
# Can't do anything with nothing
return
image_width = math.ceil(pc.width/sample_scale)+1 # If image is exact multiple, then need one more pixel. Example: 1500m -> 750 pixels, @1500, 750 isn't a valid pixel otherwise
image_height = math.ceil(pc.height/sample_scale)+1
printf("Generating lidar intensity image")
im = np.full((image_height,image_width,1), math.nan, np.float32)
img_points = pc.pointsAsCV2(sample_scale)
num_points = len(img_points)
point_density = float(num_points) / (image_width * image_height)
visible_sampling = math.floor(point_density/desired_visible_points_per_pixel) # Roughly get 1 sample per pixel for the visible image
if visible_sampling < 1.0:
visible_sampling = 1
# Some pointclouds don't have intensity channel, so try to visualize elevation instead?
visualization_axis = 3
if pc.imin == pc.imax:
printf("No lidar intensity found, using elevation instead")
visualization_axis = 2
last_print_time = time.time()
for n, i in enumerate(img_points[0::visible_sampling]):
if time.time() > last_print_time + status_print_duration:
last_print_time = time.time()
printf(str(round(100.0*float(n*visible_sampling) / num_points, 2)) + "% visualizing lidar")
im[int(i[0]), int(i[1])] = i[visualization_axis]
# Download OpenStreetMaps Data
printf("Adding golf features to lidar data")
# Convert to RGB for pretty golf colors
im = normalize_image(im)
im = cv2.cvtColor(im, cv2.COLOR_GRAY2RGB)
# Use this data to draw features on the intensity image to help with masking
upper_left_enu = pc.ulENU()
lower_right_enu = pc.lrENU()
upper_left_latlon = pc.enuToLatLon(*upper_left_enu)
lower_right_latlon = pc.enuToLatLon(*lower_right_enu)
# Order is South, West, North, East
result = OSMTGC.getOSMData(lower_right_latlon[0], upper_left_latlon[1], upper_left_latlon[0], lower_right_latlon[1], printf=printf)
im = OSMTGC.addOSMToImage(result.ways, im, pc, sample_scale, printf=printf)
# Keep API out of code
mapquest_api_key = None
im_map = None
try:
this_file_directory = os.path.dirname(os.path.realpath(__file__))
with open(this_file_directory + os.sep + "MAPQUEST_API_KEY.txt", "r") as f:
mapquest_api_key = f.read()
except:
pass
if mapquest_api_key is not None:
# Grab a preview image approximately the same to help reference the lidar data.
# Set margin to be 1/8 of image size to get preview to about 1 pixel per two meters
origin_projected_coordinates = pc.origin
gps_center = pc.projToLatLon(origin_projected_coordinates[0] + pc.width / 2.0, origin_projected_coordinates[1] + pc.height / 2.0)
# Determine how zoomed in the map should be
zoom_level = 20 # Most zoomed in possible
max_dimension = max([image_width, image_height])
if sample_scale*max_dimension < 500:
zoom_level = 19 # roughly 437m
elif sample_scale*max_dimension < 900:
zoom_level = 18 # roughly 875m
elif sample_scale*max_dimension < 1800:
zoom_level = 17 # roughly 1750m
elif sample_scale*max_dimension < 3600:
zoom_level = 16 # roughly 3500m
elif sample_scale*max_dimension < 7000:
zoom_level = 15 # roughly 7000m
else:
zoom_level = 14 # Over 7000m
# Determine the aspect ratio
req_height = 1500
req_width = 1500
if max_dimension == image_width: # Shrink height
req_height = int(1500.0*float(image_height)/float(image_width))
else: # Shrink width
req_width = int(1500.0*float(image_width)/float(image_height))
img_url_request = "https://open.mapquestapi.com/staticmap/v5/map?key=MAPQUEST_API_KEY&scalebar=true&format=png¢er=" + \
str(gps_center[0]) + "," + str(gps_center[1]) + \
"&type=hyb&zoom=" + str(zoom_level) + "&size=" + str(req_width) + "," + str(req_height)
printf("Mapquest Image URL Request: " + img_url_request)
# Don't print the Mapquest API Key to users
img_url_request = img_url_request.replace("MAPQUEST_API_KEY", mapquest_api_key)
try:
# TODO switch to requests ?
with urllib.request.urlopen(img_url_request) as url:
map_image = url.read()
nparr = np.frombuffer(map_image, np.uint8)
im_map = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
im_map = cv2.cvtColor(im_map, cv2.COLOR_BGR2RGB)
except urllib.error.HTTPError as err:
printf("Could not get sat preview: " + str(err))
request_course_outline(im, im_map, bundle=(pc, img_points, sample_scale, output_dir_path, result), printf=printf)
def generate_lidar_heightmap(pc, img_points, sample_scale, output_dir_path, osm_results=None, printf=print):
global lower_x
global lower_y
global upper_x
global upper_y
image_width = math.ceil(pc.width/sample_scale)+1 # If image is exact multiple, then need one more pixel. Example: 1500m -> 750 pixels, @1500, 750 isn't a valid pixel otherwise
image_height = math.ceil(pc.height/sample_scale)+1
printf("Generating heightmap")
om = np.full((image_height,image_width,1), math.nan, np.float32)
high_res_visual = np.full((image_height,image_width,1), math.nan, np.float32)
# Make sure selected limits are in bounds, otherwise limit them
# This can happen if the rectangle goes outside the image
lower_x = max(0, lower_x)
lower_y = max(0, lower_y)
upper_x = min(image_width, upper_x)
upper_y = min(image_height, upper_y)
## Start cropping data and saving it for future steps
# Save only the relevant points from the raw pointcloud
printf("Selecting only needed data from lidar")
llenu = pc.cv2ToENU(upper_y, lower_x, sample_scale)
urenu = pc.cv2ToENU(lower_y, upper_x, sample_scale)
# Remove the points not in the selection
# Use numpy to efficiently reduce the number of points we loop over to create the terrain image
selected_points = img_points[np.where(lower_y <= img_points[:,0])]
selected_points = selected_points[np.where(selected_points[:,0] < upper_y)]
selected_points = selected_points[np.where(lower_x <= selected_points[:,1])]
selected_points = selected_points[np.where(selected_points[:,1] < upper_x)]
# Remove points that aren't useful for ground heightmaps
ground_points = numpy.copy(selected_points) # Copy to preserve selected points for other uses like tree detection
ground_points = ground_points[np.isin(ground_points[:,4], wanted_classifications)]
if len(ground_points) == 0:
printf("\n\n\nSorry, this lidar data is not classified and I can't support it right now. Ask for help on the forum or your lidar provider if they have a classified version.")
printf("Classification is where they determine which points are the ground and which are trees, buildings, etc. I can't make a nice looking course without clean input.")
return
# Some pointclouds don't have intensity channel, so try to visualize elevation instead?
visualization_axis = 3
if pc.imin == pc.imax:
printf("No lidar intensity found, using elevation instead")
visualization_axis = 2
# Generate heightmap only for the selected area
num_points = len(ground_points)
last_print_time = time.time()
for n, i in enumerate(ground_points[0::lidar_sample]):
if time.time() > last_print_time + status_print_duration:
last_print_time = time.time()
printf(str(round(100.0*float(n*lidar_sample) / num_points, 2)) + "% generating heightmap")
c = (int(i[0]), int(i[1]))
# Add visual data
value = high_res_visual[c]
if math.isnan(value):
value = i[visualization_axis]
else:
value = (i[visualization_axis] - value) * 0.3 + value
high_res_visual[c] = value
# Add elevation data
elevation = om[c]
if math.isnan(elevation):
elevation = i[2]
else:
alpha = 0.1
if i[2] < elevation:
# Trend lower faster
alpha = 0.4
elevation = (i[2] - elevation) * alpha + elevation
om[c] = elevation
printf("Finished generating heightmap")
printf("Starting tree detection")
trees = []
# Make a maximum heightmap
# Must be around 1 meter grid size and a power of 2 from sample_scale
tree_ratio = 2**(math.ceil(math.log2(1.0/sample_scale)))
tree_scale = sample_scale * tree_ratio
printf("Tree ratio is: " + str(tree_ratio))
treemap = np.full((int(image_height/tree_ratio),int(image_width/tree_ratio),1), math.nan, np.float32)
num_points = len(selected_points)
last_print_time = time.time()
for n, i in enumerate(selected_points[0::lidar_sample]):
if time.time() > last_print_time + status_print_duration:
last_print_time = time.time()
printf(str(round(100.0*float(n*lidar_sample) / num_points, 2)) + "% generating object map")
c = (int(i[0]/tree_ratio), int(i[1]/tree_ratio))
# Add elevation data
if math.isnan(treemap[c]) or i[2] > treemap[c]:
# Just take the maximum value possible for this pixel
treemap[c] = i[2]
# Make a resized copy of the ground height that matches the object detection image size
groundmap = np.copy(om[lower_y:upper_y, lower_x:upper_x])
groundmap = numpy.array(Image.fromarray(groundmap[:,:,0], mode='F').resize((int(groundmap.shape[1]/tree_ratio), int(groundmap.shape[0]/tree_ratio)), resample=Image.NEAREST))
groundmap = | np.expand_dims(groundmap, axis=2) | numpy.expand_dims |
import numpy as np
import scipy
import matplotlib.pyplot as plt
from astropy.io import fits
from .lightcurve import KeplerLightCurve, LightCurve
from .utils import KeplerQualityFlags, plot_image
__all__ = ['KeplerTargetPixelFile']
class TargetPixelFile(object):
"""
TargetPixelFile class
"""
def to_lightcurve(self, method=None, subtract_bkg=False, **kwargs):
"""Returns a raw light curve of the TPF.
Attributes
----------
method : str or None
Method to detrend the light curve.
kwargs : dict
Keyword arguments passed to the detrending method.
Returns
-------
lc : LightCurve object
Array containing the summed or detrended flux within the aperture
for each cadence.
"""
pass
class KeplerTargetPixelFile(TargetPixelFile):
"""
Defines a TargetPixelFile class for the Kepler/K2 Mission.
Enables extraction of raw lightcurves and centroid positions.
Attributes
----------
path : str
Path to fits file.
quality_bitmask : str or int
Bitmask specifying quality flags of cadences that should be ignored.
If a string is passed, it has the following meaning:
* "default": recommended quality mask
* "conservative": removes more flags, known to remove good data
* "hard": removes all data that has been flagged
References
----------
.. [1] Kepler: A Search for Terrestrial Planets. Kepler Archive Manual.
http://archive.stsci.edu/kepler/manuals/archive_manual.pdf
"""
def __init__(self, path, quality_bitmask=KeplerQualityFlags.DEFAULT_BITMASK,
**kwargs):
self.path = path
self.hdu = fits.open(self.path, **kwargs)
self.quality_bitmask = quality_bitmask
self.quality_mask = self._quality_mask(quality_bitmask)
def _quality_mask(self, bitmask):
"""Returns a boolean mask which flags all good-quality cadences.
Parameters
----------
bitmask : str or int
Bitmask. See ref. [1], table 2-3.
"""
if bitmask is None:
return np.ones(len(self.hdu[1].data['TIME']), dtype=bool)
elif isinstance(bitmask, str):
bitmask = KeplerQualityFlags.OPTIONS[bitmask]
return (self.hdu[1].data['QUALITY'] & bitmask) == 0
def header(self, ext=0):
"""Returns the header for a given extension."""
return self.hdu[ext].header
@property
def keplerid(self):
return self.header()['KEPLERID']
@property
def module(self):
return self.header()['MODULE']
@property
def channel(self):
return self.header()['CHANNEL']
@property
def output(self):
return self.header()['OUTPUT']
@property
def column(self):
return self.hdu['TARGETTABLES'].header['1CRV5P']
@property
def row(self):
return self.hdu['TARGETTABLES'].header['2CRV5P']
@property
def pipeline_mask(self):
"""Returns the aperture mask used by the Kepler pipeline"""
return self.hdu[-1].data > 2
@property
def n_good_cadences(self):
"""Returns the number of good-quality cadences."""
return self.quality_mask.sum()
@property
def shape(self):
"""Return the cube dimension shape."""
return self.flux.shape
@property
def time(self):
"""Returns the time for all good-quality cadences."""
return self.hdu[1].data['TIME'][self.quality_mask]
@property
def cadenceno(self):
"""Return the cadence number for all good-quality cadences."""
return self.hdu[1].data['CADENCENO'][self.quality_mask]
@property
def nan_time_mask(self):
"""Returns a boolean mask flagging cadences whose time is `nan`."""
return ~np.isfinite(self.time)
@property
def flux(self):
"""Returns the flux for all good-quality cadences."""
return self.hdu[1].data['FLUX'][self.quality_mask]
@property
def flux_err(self):
"""Returns the flux uncertainty for all good-quality cadences."""
return self.hdu[1].data['FLUX_ERR'][self.quality_mask]
@property
def flux_bkg(self):
"""Returns the background flux for all good-quality cadences."""
return self.hdu[1].data['FLUX_BKG'][self.quality_mask]
@property
def flux_bkg_err(self):
return self.hdu[1].data['FLUX_BKG_ERR'][self.quality_mask]
@property
def quality(self):
"""Returns the quality flag integer of every good cadence."""
return self.hdu[1].data['QUALITY'][self.quality_mask]
@property
def quarter(self):
"""Quarter number"""
try:
return self.header(ext=0)['QUARTER']
except KeyError:
return None
@property
def campaign(self):
"""Campaign number"""
try:
return self.header(ext=0)['CAMPAIGN']
except KeyError:
return None
@property
def mission(self):
"""Mission name"""
return self.header(ext=0)['MISSION']
def to_fits(self):
"""Save the TPF to fits"""
raise NotImplementedError
def to_lightcurve(self, aperture_mask=None):
"""Performs aperture photometry.
Attributes
----------
aperture_mask : array-like
A boolean array describing the aperture such that `False` means
that the pixel will be masked out.
The default behaviour is to use all pixels.
Returns
-------
lc : KeplerLightCurve object
Array containing the summed flux within the aperture for each
cadence.
"""
if aperture_mask is None:
mask = ~np.isnan(self.hdu[1].data['FLUX'][100])
aperture_mask = np.ones((self.shape[1], self.shape[2]),
dtype=bool) * mask
centroid_col, centroid_row = self.centroids(aperture_mask)
return KeplerLightCurve(flux=np.nansum(self.flux[:, aperture_mask], axis=1),
time=self.time,
flux_err=np.nansum(self.flux_err[:, aperture_mask]**2, axis=1)**0.5,
centroid_col=centroid_col,
centroid_row=centroid_row,
quality=self.quality,
channel=self.channel,
campaign=self.campaign,
quarter=self.quarter,
mission=self.mission,
cadenceno=self.cadenceno)
def centroids(self, aperture_mask=None):
"""Returns centroids based on sample moments.
Parameters
----------
aperture_mask : array-like or None
A boolean array describing the aperture such that `False` means
that the pixel will be masked out. The default behaviour is to
use all pixels.
Returns
-------
col_centr, row_centr : tuple
Arrays containing centroids for column and row at each cadence
"""
if aperture_mask is None:
mask = ~np.isnan(self.hdu[1].data['FLUX'][100])
aperture_mask = np.ones((self.shape[1], self.shape[2]),
dtype=bool) * mask
yy, xx = np.indices(self.shape[1:]) + 0.5
yy = self.row + yy
xx = self.column + xx
total_flux = np.nansum(self.flux[:, aperture_mask], axis=1)
col_centr = np.nansum(xx * aperture_mask * self.flux) / total_flux
row_centr = np.nansum(yy * aperture_mask * self.flux) / total_flux
return col_centr, row_centr
def plot(self, frame=None, cadenceno=None, **kwargs):
"""
Plot a target pixel file at a given frame (index) or cadence number.
Parameters
----------
frame : int
Frame number.
cadenceno : int
Alternatively, a cadence number can be provided.
This argument has priority over frame number.
"""
if cadenceno is not None:
frame = np.argwhere(cadenceno == self.cadenceno)[0][0]
elif frame is None:
raise ValueError("Either frame or cadenceno must be provided.")
pflux = self.flux[frame]
plot_image(pflux, title='Kepler ID: {}'.format(self.keplerid),
extent=(self.column, self.column + self.shape[2],
self.row, self.row + self.shape[1]), **kwargs)
def get_bkg_lightcurve(self, aperture_mask=None):
if aperture_mask is None:
mask = self.hdu[1].data['FLUX'][100] == self.hdu[1].data['FLUX'][100]
aperture_mask = np.ones((self.shape[1], self.shape[2]), dtype=bool) * mask
return LightCurve(flux= | np.nansum(self.flux_bkg[:, aperture_mask], axis=1) | numpy.nansum |
import pytest
import numpy as np
import torch
import time
from htvlearn.delaunay import Delaunay
from htvlearn.plots.plot_cpwl import Plot
from htvlearn.data import (
BoxSpline,
SimplicialSpline,
CutPyramid,
SimpleJunction,
DistortedGrid,
Data
)
@pytest.fixture(autouse=True)
def set_seed(request):
"""Set random seed."""
# Code that will run before
seed = request.config.getoption("--seed")
torch.manual_seed(int(seed))
torch.cuda.manual_seed_all(int(seed))
np.random.seed(int(seed))
# toy datasets that have an htv attribute
toy_dataset_list = [BoxSpline, CutPyramid, SimpleJunction]
dataset_dict = {
'toy': toy_dataset_list,
'all': toy_dataset_list + [SimplicialSpline, DistortedGrid],
'simple_junction': [SimpleJunction],
'distorted_grid': [DistortedGrid]
}
# receives dataset as parameter
@pytest.fixture(scope="module")
def dataset(request):
dt = request.param
ret_dict = {
'name': dt.__name__,
'points': dt.points.copy(),
'values': dt.values.copy()
}
if hasattr(dt, 'htv'):
ret_dict['htv'] = dt.htv
return ret_dict
@pytest.fixture(scope='module')
def skip_plot(request):
if 'plot' not in request.config.getoption("-m"):
raise pytest.skip('Skipping!')
@pytest.mark.filterwarnings("ignore::UserWarning")
class TestDelaunay:
@pytest.mark.plot
@pytest.mark.parametrize("dataset", dataset_dict["all"], indirect=True)
def test_plot_delaunay(self, dataset, request):
""" """
plot_arg = request.config.getoption("--plot")
if plot_arg is None or plot_arg not in dataset['name']:
pytest.skip()
cpwl = Delaunay(**dataset)
plot = Plot(log_dir='/tmp')
plot.plot_delaunay(cpwl)
def test_is_admissible(self):
points, values = Data.init_zero_boundary_planes()
values = Data.add_linear_func(points, values)
cpwl = Delaunay(points=points, values=values)
assert cpwl.is_admissible is True
@pytest.mark.parametrize("dataset", dataset_dict["toy"], indirect=True)
def test_exact_htv(self, dataset):
""" """
cpwl = Delaunay(**dataset)
assert np.allclose(cpwl.get_exact_HTV(), dataset['htv'])
@pytest.mark.parametrize("dataset", dataset_dict["all"], indirect=True)
def test_exact_grad_trace_htv(self, dataset):
""" """
if dataset['name'].endswith('Junction') or \
dataset['name'].endswith('DistortedGrid'):
cpwl = Delaunay(**dataset)
else:
cpwl = Delaunay(**dataset, add_extreme_points=True)
h = (cpwl.tri.points[:, 0].max() - cpwl.tri.points[:, 0].min()) / 5000
exact_grad_trace_htv = cpwl.get_exact_grad_trace_HTV(h=h)
exact_htv = cpwl.get_exact_HTV()
print('(Discrete, Exact) : ({:.4f}, {:.4f})'
.format(exact_grad_trace_htv, exact_htv))
assert np.allclose(exact_grad_trace_htv, exact_htv, rtol=1e-3)
@pytest.mark.parametrize("dataset", dataset_dict["all"], indirect=True)
def test_lefkimiattis_HTV(self, dataset):
""" """
if dataset['name'].endswith('Junction') or \
dataset['name'].endswith('DistortedGrid'):
cpwl = Delaunay(**dataset)
else:
cpwl = Delaunay(**dataset, add_extreme_points=True)
h = (cpwl.tri.points[:, 0].max() - cpwl.tri.points[:, 0].min()) / 5000
lefkimiattis_htv = cpwl.get_lefkimiattis_schatten_HTV(h=h)
exact_htv = cpwl.get_exact_HTV()
print('(Discrete, Exact) : ({:.4f}, {:.4f})'
.format(lefkimiattis_htv, exact_htv))
assert not np.allclose(lefkimiattis_htv, exact_htv, rtol=1e-3)
@pytest.mark.parametrize("dataset", dataset_dict["all"], indirect=True)
def test_lefkimiattis_trace_HTV(self, dataset):
""" """
if dataset['name'].endswith('Junction') or \
dataset['name'].endswith('DistortedGrid'):
cpwl = Delaunay(**dataset)
else:
cpwl = Delaunay(**dataset, add_extreme_points=True)
h = (cpwl.tri.points[:, 0].max() - cpwl.tri.points[:, 0].min()) / 5000
lefkimiattis_trace_htv = cpwl.get_lefkimiattis_trace_HTV(h=h)
exact_htv = cpwl.get_exact_HTV()
print('(Discrete, Exact) : ({:.4f}, {:.4f})'
.format(lefkimiattis_trace_htv, exact_htv))
assert np.allclose(lefkimiattis_trace_htv, exact_htv, rtol=2e-3)
@pytest.mark.parametrize("dataset", dataset_dict["all"], indirect=True)
def test_exact_grad_schatten_HTV(self, dataset):
""" """
if dataset['name'].endswith('Junction') or \
dataset['name'].endswith('DistortedGrid'):
cpwl = Delaunay(**dataset)
else:
cpwl = Delaunay(**dataset, add_extreme_points=True)
h = (cpwl.tri.points[:, 0].max() - cpwl.tri.points[:, 0].min()) / 5000
exact_grad_schatten_htv = cpwl.get_exact_grad_schatten_HTV(h=h)
exact_htv = cpwl.get_exact_HTV()
print('(Discrete, Exact) : ({:.4f}, {:.4f})'
.format(exact_grad_schatten_htv, exact_htv))
assert not np.allclose(exact_grad_schatten_htv, exact_htv, rtol=1e-3)
@pytest.mark.parametrize("dataset",
dataset_dict["simple_junction"],
indirect=True)
def test_simple_junction(self, dataset):
""" """
cpwl = Delaunay(**dataset)
assert np.array_equal(cpwl.tri.points, dataset['points'])
assert np.array_equal(cpwl.tri.values, dataset['values'])
pos_mask = (cpwl.tri.simplices_affine_coeff[:, 0] > 0)
assert np.allclose(
(cpwl.tri.simplices_affine_coeff[np.where(pos_mask)[0], :] -
SimpleJunction.a1_affine_coeff[np.newaxis, :]),
np.zeros((np.sum(pos_mask), 3)))
assert np.allclose(
(cpwl.tri.simplices_affine_coeff[np.where(~pos_mask)[0], :] -
SimpleJunction.a2_affine_coeff[np.newaxis, :]),
np.zeros((np.sum(pos_mask), 3)))
grid = cpwl.get_grid(h=0.01)
z, x_grad = cpwl.evaluate_with_grad(grid.x)
assert np.allclose(
(np.abs(x_grad) -
SimpleJunction.a1_affine_coeff[np.newaxis, 0:2]),
| np.zeros_like(x_grad) | numpy.zeros_like |
import numpy as np
import matplotlib.pyplot as pyplot
import h5py
import scipy
from PIL import Image
from scipy import ndimage
# extras for debugging
import math
# dataset loader
import utils
# dataset parameters
training_path = './training_data' # path of training data containing class sub-directories (image files)
image_size = 128 # length and width to uniformly format training data
classes = ['apple', 'orange', 'banana'] # classes of images to classify
c_len = len(classes) # number of classes to be used for training
validation_size = 0.2 # randomly chosen 20% of training data to be used as validation data
# model parameters
iteration_count = 1000 # number of times to apply gradient descent
learning_rate = 0.005 # size of gradient step
show_cost = True # show cost every 100 iterations
# loading data_set object
data_set = utils.read_data_sets(training_path, image_size, classes, validation_size)
# designating training objects
original_training_images = data_set.train.images # image np.array w/ shape: (image_size, image_size, channel_depth)
original_training_labels = data_set.train.labels # class label array (exempli gratia '[1.0, 0, 0]' from apple)
training_class_set = data_set.train.class_set # class label string array (e.g. 'apple')
training_file_name = data_set.train.image_names # original unique image file names
# designating validation objects
original_validation_images = data_set.valid.images
original_validation_labels = data_set.valid.labels
validation_class_set = data_set.valid.class_set
validation_file_name = data_set.valid.image_names
"""
Reshaping data arrays using matrix transposition
flattening color pixels to single array using transpose function of image pixel matrix
*_images shape: (image_size * image_size * channel_depth, data_set_size)
*_labels shape: (data_set_size, channel_depth)
"""
training_images = original_training_images.reshape(original_training_images.shape[0], -1).T
validation_images = original_validation_images.reshape(original_validation_images.shape[0], -1).T
training_labels = original_training_labels.T
validation_labels = original_validation_labels.T
# data is now properly formatted and defined respectively
def sigmoid(z):
"""
Computing the sigmoid of z
Parameters:
-- z = w^T * x^i + b
-- w^T: specific weight associated with neuron index from previous layer
-- x^i: specific neuron value from previous layer
-- b: bias associated with neuron
Return:
s: result of applying sigmoid activation function (domain in R, returns monotonically increasing value between 0 and 1)
s = 1 / (1 + e^-z)
"""
s = 1 / (1 + np.exp(-z)) #definition of the sigmoid function
return s
def init_zero(dimension):
"""
Parameters:
-- dimension: the length of matrix to be initialized
Initializes:
-- w (weight array): zero array w/ shape: (image_size * image_size * channel_depth, 1)
-- b (bias value): as zero
"""
w = | np.zeros(shape=(dimension, 3)) | numpy.zeros |
"""
Created on Thu Jan 26 17:04:11 2017
@author: <NAME>, <EMAIL>
"""
#%matplotlib inline
import numpy as np
import pandas as pd
import dicom
import os
import scipy.ndimage as ndimage
import matplotlib.pyplot as plt
import scipy.ndimage # added for scaling
import cv2
import time
import glob
from skimage import measure, morphology, segmentation
import SimpleITK as sitk
RESIZE_SPACING = [2,2,2] # z, y, x (x & y MUST be the same)
RESOLUTION_STR = "2x2x2"
img_rows = 448
img_cols = 448 # global values
DO_NOT_USE_SEGMENTED = True
#STAGE = "stage1"
STAGE_DIR_BASE = "../input/%s/" # on one cluster we had input_shared
LUNA_MASKS_DIR = "../luna/data/original_lung_masks/"
luna_subset = 0 # initial
LUNA_BASE_DIR = "../luna/data/original_lungs/subset%s/" # added on AWS; data as well
LUNA_DIR = LUNA_BASE_DIR % luna_subset
CSVFILES = "../luna/data/original_lungs/CSVFILES/%s"
LUNA_ANNOTATIONS = CSVFILES % "annotations.csv"
LUNA_CANDIDATES = CSVFILES % "candidates.csv"
# Load the scans in given folder path (loads the most recent acquisition)
def load_scan(path):
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
#slices.sort(key = lambda x: int(x.InstanceNumber))
acquisitions = [x.AcquisitionNumber for x in slices]
vals, counts = np.unique(acquisitions, return_counts=True)
vals = vals[::-1] # reverse order so the later acquisitions are first (the np.uniques seems to always return the ordered 1 2 etc.
counts = counts[::-1]
## take the acquistions that has more entries; if these are identical take the later entrye
acq_val_sel = vals[np.argmax(counts)]
##acquisitions = sorted(np.unique(acquisitions), reverse=True)
if len(vals) > 1:
print ("WARNING ##########: MULTIPLE acquisitions & counts, acq_val_sel, path: ", vals, counts, acq_val_sel, path)
slices2= [x for x in slices if x.AcquisitionNumber == acq_val_sel]
slices = slices2
## ONE path includes 2 acquisitions (2 sets), take the latter acquiisiton only whihch cyupically is better than the first/previous ones.
## example of the '../input/stage1/b8bb02d229361a623a4dc57aa0e5c485'
#slices.sort(key = lambda x: int(x.ImagePositionPatient[2])) # from v 8, BUG should be float
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from v 9
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
def get_3d_data_slices(slices): # get data in Hunsfield Units
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from v 9
image = np.stack([s.pixel_array for s in slices])
image = image.astype(np.int16) # ensure int16 (it may be here uint16 for some images )
image[image == -2000] = 0 #correcting cyindrical bound entrioes to 0
# Convert to Hounsfield units (HU)
# The intercept is usually -1024
for slice_number in range(len(slices)): # from v 8
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1: # added 16 Jan 2016, evening
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
def get_pixels_hu(slices):
image = np.stack([s.pixel_array for s in slices])
image = image.astype(np.int16)
# Set outside-of-scan pixels to 0
# The intercept is usually -1024, so air is approximately 0
image[image == -2000] = 0
# Convert to Hounsfield units (HU)
### slope can differ per slice -- so do it individually (case in point black_tset, slices 95 vs 96)
### Changes/correction - 31.01.2017
for slice_number in range(len(slices)):
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1:
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
MARKER_INTERNAL_THRESH = -400
MARKER_FRAME_WIDTH = 9 # 9 seems OK for the half special case ...
def generate_markers(image):
#Creation of the internal Marker
useTestPlot = False
if useTestPlot:
timg = image
plt.imshow(timg, cmap='gray')
plt.show()
add_frame_vertical = True
if add_frame_vertical: # add frame for potentially closing the lungs that touch the edge, but only vertically
fw = MARKER_FRAME_WIDTH # frame width (it looks that 2 is the minimum width for the algorithms implemented here, namely the first 2 operations for the marker_internal)
xdim = image.shape[1]
#ydim = image.shape[0]
img2 = np.copy(image)
#y3 = ydim // 3
img2 [:, 0] = -1024
img2 [:, 1:fw] = 0
img2 [:, xdim-1:xdim] = -1024
img2 [:, xdim-fw:xdim-1] = 0
marker_internal = img2 < MARKER_INTERNAL_THRESH
else:
marker_internal = image < MARKER_INTERNAL_THRESH # was -400
useTestPlot = False
if useTestPlot:
timg = marker_internal
plt.imshow(timg, cmap='gray')
plt.show()
correct_edges2 = False ## NOT a good idea - no added value
if correct_edges2:
marker_internal[0,:] = 0
marker_internal[:,0] = 0
#marker_internal[:,1] = True
#marker_internal[:,2] = True
marker_internal[511,:] = 0
marker_internal[:,511] = 0
marker_internal = segmentation.clear_border(marker_internal, buffer_size=0)
marker_internal_labels = measure.label(marker_internal)
areas = [r.area for r in measure.regionprops(marker_internal_labels)]
areas.sort()
if len(areas) > 2:
for region in measure.regionprops(marker_internal_labels):
if region.area < areas[-2]:
for coordinates in region.coords:
marker_internal_labels[coordinates[0], coordinates[1]] = 0
marker_internal = marker_internal_labels > 0
#Creation of the external Marker
external_a = ndimage.binary_dilation(marker_internal, iterations=10) # was 10
external_b = ndimage.binary_dilation(marker_internal, iterations=55) # was 55
marker_external = external_b ^ external_a
#Creation of the Watershed Marker matrix
#marker_watershed = np.zeros((512, 512), dtype=np.int) # origi
marker_watershed = np.zeros((marker_external.shape), dtype=np.int)
marker_watershed += marker_internal * 255
marker_watershed += marker_external * 128
return marker_internal, marker_external, marker_watershed
# Some of the starting Code is taken from ArnavJain, since it's more readable then my own
def generate_markers_3d(image):
#Creation of the internal Marker
marker_internal = image < -400
marker_internal_labels = np.zeros(image.shape).astype(np.int16)
for i in range(marker_internal.shape[0]):
marker_internal[i] = segmentation.clear_border(marker_internal[i])
marker_internal_labels[i] = measure.label(marker_internal[i])
#areas = [r.area for r in measure.regionprops(marker_internal_labels)]
areas = [r.area for i in range(marker_internal.shape[0]) for r in measure.regionprops(marker_internal_labels[i])]
for i in range(marker_internal.shape[0]):
areas = [r.area for r in measure.regionprops(marker_internal_labels[i])]
areas.sort()
if len(areas) > 2:
for region in measure.regionprops(marker_internal_labels[i]):
if region.area < areas[-2]:
for coordinates in region.coords:
marker_internal_labels[i, coordinates[0], coordinates[1]] = 0
marker_internal = marker_internal_labels > 0
#Creation of the external Marker
# 3x3 structuring element with connectivity 1, used by default
struct1 = ndimage.generate_binary_structure(2, 1)
struct1 = struct1[np.newaxis,:,:] # expand by z axis .
external_a = ndimage.binary_dilation(marker_internal, structure=struct1, iterations=10)
external_b = ndimage.binary_dilation(marker_internal, structure=struct1, iterations=55)
marker_external = external_b ^ external_a
#Creation of the Watershed Marker matrix
#marker_watershed = np.zeros((512, 512), dtype=np.int) # origi
marker_watershed = np.zeros((marker_external.shape), dtype=np.int)
marker_watershed += marker_internal * 255
marker_watershed += marker_external * 128
return marker_internal, marker_external, marker_watershed
BINARY_CLOSING_SIZE = 7 #was 7 before final; 5 for disk seems sufficient - for safety let's go with 6 or even 7
def seperate_lungs(image):
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers(image)
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, 1)
sobel_filtered_dy = ndimage.sobel(image, 0)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
#Reducing the image created by the Watershed algorithm to its outline
outline = ndimage.morphological_gradient(watershed, size=(3,3))
outline = outline.astype(bool)
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
#Perform the Black-Hat
outline += ndimage.black_tophat(outline, structure=blackhat_struct)
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure = morphology.disk(BINARY_CLOSING_SIZE) # better , 5 seems sufficient, we use 7 for safety/just in case
lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, iterations=3) # was structure=np.ones((5,5))
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
segmented = np.where(lungfilter == 1, image, -2000*np.ones((512, 512))) ### was -2000
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def rescale_n(n,reduce_factor):
return max( 1, int(round(n / reduce_factor)))
def seperate_lungs_cv2(image): # for increased speed
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers(image)
#image_size = image.shape[0]
reduce_factor = 512 / image.shape[0]
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, 1)
sobel_filtered_dy = ndimage.sobel(image, 0)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
useTestPlot = False
if useTestPlot:
timg = sobel_gradient
plt.imshow(timg, cmap='gray')
plt.show()
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
if useTestPlot:
timg = marker_external
plt.imshow(timg, cmap='gray')
plt.show()
#Reducing the image created by the Watershed algorithm to its outline
#wsize = rescale_n(3,reduce_factor) # THIS IS TOO SMALL, dynamically adjusting the size for the watersehed algorithm
outline = ndimage.morphological_gradient(watershed, size=(3,3)) # original (3,3), (wsize, wsize) is too small to create an outline
outline = outline.astype(bool)
outline_u = outline.astype(np.uint8) #added
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
use_reduce_factor = True
if use_reduce_factor:
blackhat_struct = ndimage.iterate_structure(blackhat_struct, rescale_n(8,reduce_factor)) # dyanmically adjust the number of iterattions; original was 8
else:
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
blackhat_struct_cv2 = blackhat_struct.astype(np.uint8)
#Perform the Black-Hat
#outline += ndimage.black_tophat(outline, structure=blackhat_struct) # original slow
#outline1 = outline + (cv2.morphologyEx(outline_u, cv2.MORPH_BLACKHAT, kernel=blackhat_struct_cv2)).astype(np.bool)
#outline2 = outline + ndimage.black_tophat(outline, structure=blackhat_struct)
#np.array_equal(outline1,outline2) # True
outline += (cv2.morphologyEx(outline_u, cv2.MORPH_BLACKHAT, kernel=blackhat_struct_cv2)).astype(np.bool) # fats
if useTestPlot:
timg = outline
plt.imshow(timg, cmap='gray')
plt.show()
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
if useTestPlot:
timg = lungfilter
plt.imshow(timg, cmap='gray')
plt.show()
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure2 = morphology.disk(2) # used to fill the gaos/holes close to the border (otherwise the large sttructure would create a gap by the edge)
if use_reduce_factor:
structure3 = morphology.disk(rescale_n(BINARY_CLOSING_SIZE,reduce_factor)) # dynanically adjust; better , 5 seems sufficient, we use 7 for safety/just in case
else:
structure3 = morphology.disk(BINARY_CLOSING_SIZE) # dynanically adjust; better , 5 seems sufficient, we use 7 for safety/just in case
##lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, ORIGINAL iterations=3) # was structure=np.ones((5,5))
lungfilter2 = ndimage.morphology.binary_closing(lungfilter, structure=structure2, iterations=3) # ADDED
lungfilter3 = ndimage.morphology.binary_closing(lungfilter, structure=structure3, iterations=3)
lungfilter = np.bitwise_or(lungfilter2, lungfilter3)
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
#image.shape
#segmented = np.where(lungfilter == 1, image, -2000*np.ones((512, 512)).astype(np.int16)) # was -2000 someone suggested 30
segmented = np.where(lungfilter == 1, image, -2000*np.ones(image.shape).astype(np.int16)) # was -2000 someone suggested 30
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def seperate_lungs_3d(image):
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers_3d(image)
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, axis=2)
sobel_filtered_dy = ndimage.sobel(image, axis=1)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
#Reducing the image created by the Watershed algorithm to its outline
outline = ndimage.morphological_gradient(watershed, size=(1,3,3))
outline = outline.astype(bool)
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
blackhat_struct = blackhat_struct[np.newaxis,:,:]
#Perform the Black-Hat
outline += ndimage.black_tophat(outline, structure=blackhat_struct) # very long time
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure = morphology.disk(BINARY_CLOSING_SIZE) # better , 5 seems sufficient, we use 7 for safety/just in case
structure = structure[np.newaxis,:,:]
lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, iterations=3) # was structure=np.ones((5,5))
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
segmented = np.where(lungfilter == 1, image, -2000*np.ones(marker_internal.shape))
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def get_slice_location(dcm):
return float(dcm[0x0020, 0x1041].value)
def thru_plane_position(dcm):
"""Gets spatial coordinate of image origin whose axis
is perpendicular to image plane.
"""
orientation = tuple((float(o) for o in dcm.ImageOrientationPatient))
position = tuple((float(p) for p in dcm.ImagePositionPatient))
rowvec, colvec = orientation[:3], orientation[3:]
normal_vector = np.cross(rowvec, colvec)
slice_pos = np.dot(position, normal_vector)
return slice_pos
def resample(image, scan, new_spacing=[1,1,1]):
# Determine current pixel spacing
spacing = map(float, ([scan[0].SliceThickness] + scan[0].PixelSpacing))
spacing = np.array(list(spacing))
#scan[2].SliceThickness
resize_factor = spacing / new_spacing
new_real_shape = image.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize_factor = new_shape / image.shape
new_spacing = spacing / real_resize_factor
image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest') ### early orig modified
return image, new_spacing
def segment_all(stage, part=0, processors=1, showSummaryPlot=True): # stage added to simplify the stage1 and stage2 calculations
count = 0
STAGE_DIR = STAGE_DIR_BASE % stage
folders = glob.glob(''.join([STAGE_DIR,'*']))
if len(folders) == 0:
print ("ERROR, check directory, no folders found in: ", STAGE_DIR )
for folder in folders:
count += 1
if count % processors == part: # do this part in this process, otherwise skip
path = folder
slices = load_scan(path)
image_slices = get_3d_data_slices(slices)
#mid = len(image_slices) // 2
#img_sel = mid
useTestPlot = False
if useTestPlot:
print("Shape before segmenting\t", image_slices.shape)
plt.hist(image_slices.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
start = time.time()
resampleImages = True
if resampleImages:
image_resampled, spacing = resample(image_slices, slices, RESIZE_SPACING) # let's start wkith this small resolutuion for workign our the system (then perhaps 2, 0.667, 0.667)
print("Shape_before_&_after_resampling\t", image_slices.shape,image_resampled.shape)
if useTestPlot:
plt.imshow(image_slices[image_slices.shape[0]//2], cmap=plt.cm.bone)
plt.show()
plt.imshow(image_resampled[image_resampled.shape[0]//2], cmap=plt.cm.bone)
np.max(image_slices)
np.max(image_resampled)
np.min(image_slices)
np.min(image_resampled)
plt.show()
image_slices = image_resampled
shape = image_slices.shape
l_segmented = np.zeros(shape).astype(np.int16)
l_lungfilter = np.zeros(shape).astype(np.bool)
l_outline = np.zeros(shape).astype(np.bool)
l_watershed = np.zeros(shape).astype(np.int16)
l_sobel_gradient = np.zeros(shape).astype(np.float32)
l_marker_internal = np.zeros(shape).astype(np.bool)
l_marker_external = np.zeros(shape).astype(np.bool)
l_marker_watershed = np.zeros(shape).astype(np.int16)
# start = time.time()
i=0
for i in range(shape[0]):
l_segmented[i], l_lungfilter[i], l_outline[i], l_watershed[i], l_sobel_gradient[i], l_marker_internal[i], l_marker_external[i], l_marker_watershed[i] = seperate_lungs_cv2(image_slices[i])
print("Rescale & Seg time, and path: ", ((time.time() - start)), path )
if useTestPlot:
plt.hist(image_slices.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
plt.hist(l_segmented.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
img_sel_i = shape[0] // 2
# Show some slice in the middle
plt.imshow(image_slices[img_sel_i], cmap=plt.cm.gray)
plt.show()
# Show some slice in the middle
plt.imshow(l_segmented[img_sel_i], cmap='gray')
plt.show()
path_rescaled = path.replace(stage, ''.join([stage, "_", RESOLUTION_STR]), 1)
path_segmented = path.replace(stage, ''.join([stage, "_segmented_", RESOLUTION_STR]), 1)
path_segmented_crop = path.replace(stage, ''.join([stage, "_segmented_", RESOLUTION_STR, "_crop"]), 1)
np.savez_compressed (path_rescaled, image_slices)
np.savez_compressed (path_segmented, l_segmented)
mask = l_lungfilter.astype(np.int8)
regions = measure.regionprops(mask) # this measures the largest region and is a bug when the mask is not the largest region !!!
bb = regions[0].bbox
#print(bb)
zlen = bb[3] - bb[0]
ylen = bb[4] - bb[1]
xlen = bb[5] - bb[2]
dx = 0 # could be reduced
## have to reduce dx as for istance at least image the lungs stretch right to the border evebn without cropping
## namely for '../input/stage1/be57c648eb683a31e8499e278a89c5a0'
crop_max_ratio_z = 0.6 # 0.8 is to big make_submit2(45, 1)
crop_max_ratio_y = 0.4
crop_max_ratio_x = 0.6
bxy_min = np.min(bb[1:3])
bxy_max = np.max(bb[4:6])
mask_shape= mask.shape
image_shape = l_segmented.shape
mask_volume = zlen*ylen*zlen /(mask_shape[0] * mask_shape[1] * mask_shape[2])
mask_volume_thresh = 0.08 # anything below is too small (maybe just one half of the lung or something very small0)
mask_volume_check = mask_volume > mask_volume_thresh
# print ("Mask Volume: ", mask_volume )
### DO NOT allow the mask to touch x & y ---> if it does it is likely a wrong one as for:
## folders[3] , path = '../input/stage1/9ba5fbcccfbc9e08edcfe2258ddf7
maskOK = False
if bxy_min >0 and bxy_max < 512 and mask_volume_check and zlen/mask_shape[0] > crop_max_ratio_z and ylen/mask_shape[1] > crop_max_ratio_y and xlen/mask_shape[2] > crop_max_ratio_x:
## square crop and at least dx elements on both sides on x & y
bxy_min = np.min(bb[1:3])
bxy_max = np.max(bb[4:6])
if bxy_min == 0 or bxy_max == 512:
# Mask to bigg, auto-correct
print("The following mask likely too big, autoreducing by:", dx)
bxy_min = np.max((bxy_min, dx))
bxy_max = np.min ((bxy_max, mask_shape[1] - dx))
image = l_segmented[bb[0]:bb[3], bxy_min:bxy_max, bxy_min:bxy_max]
mask = mask[bb[0]:bb[3], bxy_min:bxy_max, bxy_min:bxy_max]
#maskOK = True
print ("Shape, cropped, bbox ", mask_shape, mask.shape, bb)
elif bxy_min> 0 and bxy_max < 512 and mask_volume_check and zlen/mask.shape[0] > crop_max_ratio_z:
## cut on z at least
image = l_segmented[bb[0]:bb[3], dx: image_shape[1] - dx, dx: image_shape[2] - dx]
#mask = mask[bb[0]:bb[3], dx: mask_shape[1] - dx, dx: mask_shape[2] - dx]
print("Mask too small, NOT auto-cropping x-y: shape, cropped, bbox, ratios, violume:", mask_shape, image.shape, bb, path, zlen/mask_shape[0], ylen/mask_shape[1], xlen/mask_shape[2], mask_volume)
else:
image = l_segmented[0:mask_shape[0], dx: image_shape[1] - dx, dx: image_shape[2] - dx]
#mask = mask[0:mask_shape[0], dx: mask_shape[1] - dx, dx: mask_shape[2] - dx]
print("Mask wrong, NOT auto-cropping: shape, cropped, bbox, ratios, volume:", mask_shape, image.shape, bb, path, zlen/mask_shape[0], ylen/mask_shape[1], xlen/mask_shape[2], mask_volume)
if showSummaryPlot:
img_sel_i = shape[0] // 2
# Show some slice in the middle
useSeparatePlots = False
if useSeparatePlots:
plt.imshow(image_slices[img_sel_i], cmap=plt.cm.gray)
plt.show()
# Show some slice in the middle
plt.imshow(l_segmented[img_sel_i], cmap='gray')
plt.show()
else:
f, ax = plt.subplots(1, 2, figsize=(6,3))
ax[0].imshow(image_slices[img_sel_i],cmap=plt.cm.bone)
ax[1].imshow(l_segmented[img_sel_i],cmap=plt.cm.bone)
plt.show()
# Show some slice in the middle
#plt.imshow(image[image.shape[0] // 2], cmap='gray') # don't show it for simpler review
#plt.show()
np.savez_compressed(path_segmented_crop, image)
#print("Mask count: ", count)
#print ("Shape: ", image.shape)
return part, processors, count
# the following 3 functions to read LUNA files are from: https://www.kaggle.com/arnavkj95/data-science-bowl-2017/candidate-generation-and-luna16-preprocessing/notebook
'''
This funciton reads a '.mhd' file using SimpleITK and return the image array,
origin and spacing of the image.
'''
def load_itk(filename):
# Reads the image using SimpleITK
itkimage = sitk.ReadImage(filename)
# Convert the image to a numpy array first and then shuffle the dimensions to get axis in the order z,y,x
ct_scan = sitk.GetArrayFromImage(itkimage)
# Read the origin of the ct_scan, will be used to convert the coordinates from world to voxel and vice versa.
origin = np.array(list(reversed(itkimage.GetOrigin())))
# Read the spacing along each dimension
spacing = np.array(list(reversed(itkimage.GetSpacing())))
return ct_scan, origin, spacing
'''
This function is used to convert the world coordinates to voxel coordinates using
the origin and spacing of the ct_scan
'''
def world_2_voxel(world_coordinates, origin, spacing):
stretched_voxel_coordinates = np.absolute(world_coordinates - origin)
voxel_coordinates = stretched_voxel_coordinates / spacing
return voxel_coordinates
'''
This function is used to convert the voxel coordinates to world coordinates using
the origin and spacing of the ct_scan.
'''
def voxel_2_world(voxel_coordinates, origin, spacing):
stretched_voxel_coordinates = voxel_coordinates * spacing
world_coordinates = stretched_voxel_coordinates + origin
return world_coordinates
def seq(start, stop, step=1):
n = int(round((stop - start)/float(step)))
if n > 1:
return([start + step*i for i in range(n+1)])
else:
return([])
'''
This function is used to create spherical regions in binary masks
at the given locations and radius.
'''
#image = lung_img
#spacing = new_spacing
def draw_circles(image,cands,origin,spacing):
#make empty matrix, which will be filled with the mask
image_mask = np.zeros(image.shape, dtype=np.int16)
#run over all the nodules in the lungs
for ca in cands.values:
#get middel x-,y-, and z-worldcoordinate of the nodule
#radius = np.ceil(ca[4])/2 ## original: replaced the ceil with a very minor increase of 1% ....
radius = (ca[4])/2 + 0.51 * spacing[0] # increasing by circa half of distance in z direction .... (trying to capture wider region/border for learning ... and adress the rough net .
coord_x = ca[1]
coord_y = ca[2]
coord_z = ca[3]
image_coord = np.array((coord_z,coord_y,coord_x))
#determine voxel coordinate given the worldcoordinate
image_coord = world_2_voxel(image_coord,origin,spacing)
#determine the range of the nodule
#noduleRange = seq(-radius, radius, RESIZE_SPACING[0]) # original, uniform spacing
noduleRange_z = seq(-radius, radius, spacing[0])
noduleRange_y = seq(-radius, radius, spacing[1])
noduleRange_x = seq(-radius, radius, spacing[2])
#x = y = z = -2
#create the mask
for x in noduleRange_x:
for y in noduleRange_y:
for z in noduleRange_z:
coords = world_2_voxel(np.array((coord_z+z,coord_y+y,coord_x+x)),origin,spacing)
#if (np.linalg.norm(image_coord-coords) * RESIZE_SPACING[0]) < radius: ### original (contrained to a uniofrm RESIZE)
if (np.linalg.norm((image_coord-coords) * spacing)) < radius:
image_mask[int(np.round(coords[0])),int(np.round(coords[1])),int(np.round(coords[2]))] = int(1)
return image_mask
'''
This function takes the path to a '.mhd' file as input and
is used to create the nodule masks and segmented lungs after
rescaling to 1mm size in all directions. It saved them in the .npz
format. It also takes the list of nodule locations in that CT Scan as
input.
'''
def load_scans_masks(luna_subset, useAll, use_unsegmented=True):
#luna_subset = "[0-6]"
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
sids = []
scans = []
masks = []
cnt = 0
skipped = 0
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if use_unsegmented:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
#useAll = True
if (len(cands) > 0 or useAll):
sids.append(seriesuid)
if use_unsegmented:
scan_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
scan_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
#scan_z.keys()
scan = scan_z['arr_0']
mask_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
mask = mask_z['arr_0']
scans.append(scan)
masks.append(mask)
cnt += 1
else:
print("Skipping non-nodules entry ", seriesuid)
skipped += 1
print ("Summary: cnt & skipped: ", cnt, skipped)
return scans, masks, sids
def load_scans_masks_or_blanks(luna_subset, useAll, use_unsegmented=True):
#luna_subset = "[0-6]"
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
candidates = pd.read_csv(LUNA_CANDIDATES)
candidates_false = candidates[candidates["class"] == 0] # only select the false candidates
candidates_true = candidates[candidates["class"] == 1] # only select the false candidates
sids = []
scans = []
masks = []
blankids = [] # class/id whether scan is with nodule or without, 0 - with, 1 - without
cnt = 0
skipped = 0
#file=files[7]
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if use_unsegmented:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
ctrue = candidates_true[seriesuid == candidates_true.seriesuid]
cfalse = candidates_false[seriesuid == candidates_false.seriesuid]
#useAll = True
blankid = 1 if (len(cands) == 0 and len(ctrue) == 0 and len(cfalse) > 0) else 0
skip_nodules_entirely = False # was False
use_only_nodules = False # was True
if skip_nodules_entirely and blankid ==0:
## manual switch to generate extra data for the corrupted set
print("Skipping nodules (skip_nodules_entirely) ", seriesuid)
skipped += 1
elif use_only_nodules and (len(cands) == 0):
## manual switch to generate only nodules data due lack of time and repeat etc time pressures
print("Skipping blanks (use_only_nodules) ", seriesuid)
skipped += 1
else: # NORMAL operations
if (len(cands) > 0 or
(blankid >0) or
useAll):
sids.append(seriesuid)
blankids.append(blankid)
if use_unsegmented:
scan_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
scan_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
#scan_z.keys()
scan = scan_z['arr_0']
#mask_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
mask_z = np.load(''.join((path_segmented + '_nodule_mask_wblanks' + '.npz')))
mask = mask_z['arr_0']
testPlot = False
if testPlot:
maskcheck_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
maskcheck = maskcheck_z['arr_0']
f, ax = plt.subplots(1, 2, figsize=(10,5))
ax[0].imshow(np.sum(np.abs(maskcheck), axis=0),cmap=plt.cm.gray)
ax[1].imshow(np.sum(np.abs(mask), axis=0),cmap=plt.cm.gray)
#ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
scans.append(scan)
masks.append(mask)
cnt += 1
else:
print("Skipping non-nodules and non-blank entry ", seriesuid)
skipped += 1
print ("Summary: cnt & skipped: ", cnt, skipped)
return scans, masks, sids, blankids
#return scans, masks, sids # not yet, old style
def load_scans_masks_no_nodules(luna_subset, use_unsegmented=True): # load only the ones that do not contain nodules
#luna_subset = "[0-6]"
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
sids = []
scans = []
masks = []
cnt = 0
skipped = 0
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if use_unsegmented:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
#useAll = True
if (len(cands)):
print("Skipping entry with nodules ", seriesuid)
skipped += 1
else:
sids.append(seriesuid)
if use_unsegmented:
scan_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
scan_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
#scan_z.keys()
scan = scan_z['arr_0']
mask_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
mask = mask_z['arr_0']
scans.append(scan)
masks.append(mask)
cnt += 1
print ("Summary: cnt & skipped: ", cnt, skipped)
return scans, masks, sids
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
def normalize(image):
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image>1] = 1.
image[image<0] = 0.
return image
PIXEL_MEAN = 0.028 ## for LUNA subset 0 and our preprocessing, only with nudels was 0.028, all was 0.020421744071562546 (in the tutorial they used 0.25)
def zero_center(image):
image = image - PIXEL_MEAN
return image
def load_scans(path): # function used for testing
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key=lambda x: int(x.InstanceNumber))
return np.stack([s.pixel_array for s in slices])
def get_scans(df,scans_list):
scans=np.stack([load_scans(scan_folder+df.id[i_scan[0]])[i_scan[1]] for i_scan in scans_list])
scans=process_scans(scans)
view_scans(scans)
return(scans)
def process_scans(scans): # used for tesing
scans1=np.zeros((scans.shape[0],1,img_rows,img_cols))
for i in range(scans.shape[0]):
img=scans[i,:,:]
img = 255.0 / np.amax(img) * img
img =img.astype(np.uint8)
img =cv2.resize(img, (img_rows, img_cols))
scans1[i,0,:,:]=img
return (scans1)
only_with_nudels = True
def convert_scans_and_masks(scans, masks, only_with_nudels):
flattened1 = [val for sublist in scans for val in sublist[1:-1]] # skip one element at the beginning and at the end
scans1 = np.stack(flattened1)
flattened1 = [val for sublist in masks for val in sublist[1:-1]] # skip one element at the beginning and at the end
masks1 = np.stack(flattened1) # 10187
#only_with_nudels = True
if only_with_nudels:
nudels_pix_count = np.sum(masks1, axis = (1,2))
scans1 = scans1[nudels_pix_count>0]
masks1 = masks1[nudels_pix_count>0] # 493 -- circa 5 % with nudeles oters without
#nudels2 = np.where(masks1 == 1, scans1, -4000*np.ones(( masks1.shape[1], masks1.shape[2))) ### was -2000
#nudels1 = np.where(masks1 == 1, scans1, masks1 - 4000) ### was -2000
#nudles1_rf = nudels1.flatten()
#nudles1_rf = nudles1_rf[nudles1_rf > -4000]
scans = normalize(scans1)
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
#for i in range(scans.shape[0]):
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
#np.mean(scans) # 0.028367 / 0.0204
#np.min(scans) # 0
#np.max(scans) #
scans = zero_center(scans)
masks = np.copy(masks1)
## if needed do the resize here ....
img_rows = scans.shape[1] ### redefine img_rows/ cols and add resize if needed
img_cols = scans.shape[2]
scans1=np.zeros((scans.shape[0],1,img_rows,img_cols))
for i in range(scans.shape[0]):
img=scans[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
scans1[i,0,:,:]=img
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
#scans = [scans[i]]
#masks = [masks[i]]
def convert_scans_and_masks_xd_ablanks(scans, masks, blankids, only_with_nudels, dim=3):
# reuse scan to reduce memory footprint
dim_orig = dim
add_blank_spacing_size = dim * 8 #### use 4 for [0 - 3] and 8 for [4 - 7] ???initial trial (should perhaps be just dim ....)
#skip = dim // 2 # old
skip_low = dim // 2 # dim shoudl be uneven -- it is recalculated anyway to this end
skip_high = dim -skip_low - 1
do_not_allow_even_dim = False ## now we allow odd numbers ...
if do_not_allow_even_dim:
dim = 2 * skip_low + 1
skip_low = dim // 2
skip_high = dim -skip_low - 1
if dim != dim_orig:
print ("convert_scans_and_masks_x: Dim must be uneven, corrected from .. to:", dim_orig, dim)
work = [] # 3 layers
#scan = scans[0]
for scan in scans: ##TEMP
tmp = []
#i = 1
#for i in range(1, scan.shape[0]-1, 3): # SKIP EVERY 3
for i in range(skip_low, scan.shape[0]-skip_high):
#img1 = scan[i-1]
#img2 = scan[i]
#img3 = scan[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(scan[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
#flattened1 = [val for sublist in work for val in sublist ] # NO skipping as we have already cut the first and the last layer
#scans1 = np.stack(flattened1)
scans1 = np.stack([val for sublist in work for val in sublist ]) # NO skipping as we have already cut the first and the last layer
work = []
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
blanks_per_axis = 4 # skip border
crop = 16
dx = (img_cols - 2 * crop) // (blanks_per_axis + 2)
dy = (img_rows - 2 * crop) // (blanks_per_axis + 2)
for mask in masks:
if (np.sum(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for i in range(skip_low, mask.shape[0]-skip_high, add_blank_spacing_size):
for ix in range(blanks_per_axis):
xpos = crop + (ix+1)*dx + dx //2
for iy in range(blanks_per_axis):
ypos = crop + (iy+1)*dy + dy //2
#print (xpos, ypos)
mask[skip_low, ypos, xpos] = -1 # negative pixel to be picked up below and corrected back to none
#for k in range(len(blankids)):
# if blankids[k] > 0:
# mask = masks[k]
# ## add the blanls
# for i in range(skip_low, mask.shape[0]-skip_high, add_blank_spacing_size):
# mask[skip_low, 0, 0] = -1 # negative pixel to be picked up below and corrected back to none
use_3d_mask = True ##
if use_3d_mask:
work = [] # 3 layers
#mask = masks[0]
for mask in masks:
tmp = []
#i = 0
for i in range(skip_low, mask.shape[0]-skip_high):
#img1 = mask[i-1]
#img2 = mask[i]
#img3 = mask[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(mask[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
masks1 = np.stack([val for sublist in work for val in sublist ] )# NO skipping as we have already cut the first and the last layer
else:
masks1 = np.stack([val for sublist in masks for val in sublist[skip_low:-skip_high]] ) # skip one element at the beginning and at the end
#masks1 = np.stack(flattened1) # 10187
#only_with_nudels = True
if only_with_nudels:
if use_3d_mask:
nudels_pix_count = | np.sum(masks1[:,skip_low], axis = (1,2)) | numpy.sum |
from __future__ import division
from __future__ import with_statement
from __future__ import absolute_import
from future.builtins import zip
import math
import os
import xml.etree.ElementTree as etree
from collections import defaultdict
from operator import itemgetter
import re
import numpy as np
import csv
import spacy
import config
from io import open
verbose = 1
# ACE 2005 tasks types
EVENT = [u'Conflict', u'Life', u'Movement', u'Justice', u'Personnel', u'Contact',
u'Transaction', u'Business']
# ACE 2005 Event Subtypes
EVENT_SUBTYPE = [
u'Be-Born', u'Die', u'Marry', u'Divorce', u'Injure', u'Transfer-Ownership',
u'Transfer-Money', u'Transport', u'Start-Org', u'End-Org', u'Declare-Bankruptcy',
u'Merge-Org', u'Attack', u'Demonstrate', u'Meet', u'Phone-Write', u'Start-Position',
u'End-Position', u'Nominate', u'Elect', u'Arrest-Jail', u'Release-Parole',
u'Charge-Indict', u'Trial-Hearing', u'Sue', u'Convict', u'Sentence', u'Fine',
u'Execute', u'Extradite', u'Acquit', u'Pardon', u'Appeal']
# ACE 2005 role for events
ROLES = [u'Person', u'Place', u'Buyer', u'Seller', u'Beneficiary', u'Price',
u'Artifact', u'Origin', u'Destination', u'Giver', u'Recipient', u'Money',
u'Org', u'Agent', u'Victim', u'Instrument', u'Entity', u'Attacker', u'Target',
u'Defendant', u'Adjudicator', u'Prosecutor', u'Plaintiff', u'Crime',
u'Position', u'Sentence', u'Vehicle', u'Time-Within', u'Time-Starting',
u'Time-Ending', u'Time-Before', u'Time-After', u'Time-Holds',
u'Time-At-Beginning', u'Time-At-End']
# ACE 2005 roles for each tasks subtype
EVENT_SUBTYPE_ROLES = {
u'Acquit': set([u'Defendant', u'Time-Within', u'Adjudicator', u'Crime']),
u'Appeal': set([u'Adjudicator',
u'Crime',
u'Place',
u'Plaintiff',
u'Time-Holds',
u'Time-Within']),
u'Arrest-Jail': set([u'Agent',
u'Crime',
u'Person',
u'Place',
u'Time-At-Beginning',
u'Time-Before',
u'Time-Ending',
u'Time-Holds',
u'Time-Starting',
u'Time-Within']),
u'Attack': set([u'Agent',
u'Attacker',
u'Instrument',
u'Place',
u'Target',
u'Time-After',
u'Time-At-Beginning',
u'Time-At-End',
u'Time-Before',
u'Time-Ending',
u'Time-Holds',
u'Time-Starting',
u'Time-Within',
u'Victim']),
u'Be-Born': set([u'Time-Within', u'Place', u'Time-Holds', u'Person']),
u'Charge-Indict': set([u'Adjudicator',
u'Crime',
u'Defendant',
u'Place',
u'Prosecutor',
u'Time-Before',
u'Time-Ending',
u'Time-Within']),
u'Convict': set([u'Adjudicator',
u'Crime',
u'Defendant',
u'Place',
u'Time-At-Beginning',
u'Time-Within']),
u'Declare-Bankruptcy': set([u'Org',
u'Place',
u'Time-After',
u'Time-At-Beginning',
u'Time-Within']),
u'Demonstrate': set([u'Entity',
u'Place',
u'Time-At-End',
u'Time-Starting',
u'Time-Within']),
u'Die': set([u'Agent',
u'Instrument',
u'Person',
u'Place',
u'Time-After',
u'Time-At-Beginning',
u'Time-Before',
u'Time-Ending',
u'Time-Holds',
u'Time-Starting',
u'Time-Within',
u'Victim']),
u'Divorce': set([u'Place', u'Time-Within', u'Person']),
u'Elect': set([u'Entity',
u'Person',
u'Place',
u'Position',
u'Time-At-Beginning',
u'Time-Before',
u'Time-Holds',
u'Time-Starting',
u'Time-Within']),
u'End-Org': set([u'Org',
u'Place',
u'Time-After',
u'Time-At-Beginning',
u'Time-Holds',
u'Time-Within']),
u'End-Position': set([u'Entity',
u'Person',
u'Place',
u'Position',
u'Time-After',
u'Time-At-End',
u'Time-Before',
u'Time-Ending',
u'Time-Holds',
u'Time-Starting',
u'Time-Within']),
u'Execute': set([u'Agent',
u'Crime',
u'Person',
u'Place',
u'Time-After',
u'Time-At-Beginning',
u'Time-Within']),
u'Extradite': set([u'Destination', u'Time-Within', u'Origin', u'Agent', u'Person']),
u'Fine': set([u'Time-Within', u'Adjudicator', u'Place', u'Money', u'Crime', u'Entity']),
u'Injure': set([u'Place', u'Time-Within', u'Victim', u'Agent', u'Instrument']),
u'Marry': set([u'Place', u'Time-Within', u'Time-Before', u'Time-Holds', u'Person']),
u'Meet': set([u'Entity',
u'Place',
u'Time-After',
u'Time-At-Beginning',
u'Time-Ending',
u'Time-Holds',
u'Time-Starting',
u'Time-Within']),
u'Merge-Org': set([u'Org', u'Time-Ending']),
u'Nominate': set([u'Agent', u'Time-Within', u'Position', u'Person']),
u'Pardon': set([u'Defendant', u'Place', u'Time-At-End', u'Adjudicator']),
u'Phone-Write': set([u'Entity',
u'Place',
u'Time-After',
u'Time-Before',
u'Time-Holds',
u'Time-Starting',
u'Time-Within']),
u'Release-Parole': set([u'Crime',
u'Entity',
u'Person',
u'Place',
u'Time-After',
u'Time-Within']),
u'Sentence': set([u'Adjudicator',
u'Crime',
u'Defendant',
u'Place',
u'Sentence',
u'Time-At-End',
u'Time-Starting',
u'Time-Within']),
u'Start-Org': set([u'Agent',
u'Org',
u'Place',
u'Time-After',
u'Time-Before',
u'Time-Starting',
u'Time-Within']),
u'Start-Position': set([u'Entity',
u'Person',
u'Place',
u'Position',
u'Time-After',
u'Time-At-Beginning',
u'Time-Before',
u'Time-Holds',
u'Time-Starting',
u'Time-Within']),
u'Sue': set([u'Adjudicator',
u'Crime',
u'Defendant',
u'Place',
u'Plaintiff',
u'Time-Holds',
u'Time-Within']),
u'Transfer-Money': set([u'Beneficiary',
u'Giver',
u'Money',
u'Place',
u'Recipient',
u'Time-After',
u'Time-Before',
u'Time-Holds',
u'Time-Starting',
u'Time-Within']),
u'Transfer-Ownership': set([u'Artifact',
u'Beneficiary',
u'Buyer',
u'Place',
u'Price',
u'Seller',
u'Time-At-Beginning',
u'Time-Before',
u'Time-Ending',
u'Time-Within']),
u'Transport': set([u'Agent',
u'Artifact',
u'Destination',
u'Origin',
u'Place',
u'Time-After',
u'Time-At-Beginning',
u'Time-At-End',
u'Time-Before',
u'Time-Ending',
u'Time-Holds',
u'Time-Starting',
u'Time-Within',
u'Vehicle',
u'Victim']),
u'Trial-Hearing': set([u'Adjudicator',
u'Crime',
u'Defendant',
u'Place',
u'Prosecutor',
u'Time-At-End',
u'Time-Holds',
u'Time-Starting',
u'Time-Within'])}
ENTITY_TYPE = [u'FAC', u'PER', u'LOC', u'GPE', u'ORG', u'WEA', u'VEH']
VALUE_TYPE = [u'Sentence', u'Job-Title', u'Crime', u'Contact-Info', u'Numeric']
TIME_TYPE = [u'Time']
ENTITY_VALUE_TIME = ENTITY_TYPE + VALUE_TYPE + TIME_TYPE
ENTITY_VALUE_TIME_SIZE = len(ENTITY_VALUE_TIME)
ENTITY_VALUE_LOOKUP = dict()
for i, x in enumerate(ENTITY_VALUE_TIME):
ENTITY_VALUE_LOOKUP[x] = i
BIO_ENTITY_TYPE = [u'O'] + [u'B-'+x for x in ENTITY_VALUE_TIME] + [u'I-'+x for x in ENTITY_VALUE_TIME]
def get_bio_index(type_name, is_begin):
u"""Retrun integer index corresponding to BIO entity annotation"""
if is_begin:
return ENTITY_VALUE_LOOKUP[type_name] + 1
else:
return ENTITY_VALUE_LOOKUP[type_name] + ENTITY_VALUE_TIME_SIZE + 1
def extract_text(node, replace_newline_with_space=False):
return extract_text2(node, [], replace_newline_with_space)
def extract_text2(node, all_text, replace_newline_with_space):
u""" Return list of text from XML element subtree.
Python 2 version"""
tag = node.tag
if not isinstance(tag, str) and tag is not None:
return
text = node.text
if text:
if replace_newline_with_space:
text = text.replace('\n', ' ')
all_text.append(unicode(text))
for e in node:
extract_text2(e, all_text, replace_newline_with_space)
text = e.tail
if text:
if replace_newline_with_space:
text = text.replace('\n', ' ')
all_text.append(unicode(text))
return all_text
def extract_text3(node, replace_newline_with_space):
u""" Return list of text from XML element subtree.
Python 3 version"""
all = [];
for text in node.itertext():
if replace_newline_with_space:
text = text.replace('\n', ' ')
all.append(text)
return all
def find_sentence_index_by_string(search_string, start_idx, end_idx, all_text, sent_text_idx):
u""" Find the sentence containing 'search_string'.
Use 'start_idx' and 'end_idx' of 'all_text' as hints to the location of the sentence.
'sent_text_idx' is a list of pairs indicating the beginning and end of sentences.
"""
text_string = all_text[start_idx:end_idx]
if not text_string == search_string:
best_match = (len(all_text), None)
start = 0
while True:
match_pos = all_text.find(search_string, start)
if match_pos < 0:
break
dist = abs(start_idx - match_pos)
if dist < best_match[0]:
best_match = (dist, match_pos)
start = match_pos + 1
# for match in re.finditer(search_string, all_text):
# dist = abs(start_idx - match.start())
# if dist < best_match[0]:
# best_match = (dist, match)
if best_match[1]:
if verbose:
print(u' Search string and indices mismatch: "{0}" != "{1}". ' +
u'Found match by shifting {2} chars'.format(
search_string, all_text[start_idx:end_idx],
start_idx-best_match[1]))
start_idx = best_match[1]
end_idx = best_match[1]+len(search_string)
# if verbose:
# print(' Search string and indices mismatch: "{}" != "{}". Found match by shifting {} chars'.format(
# search_string, all_text[start_idx:end_idx], start_idx-best_match[1].start()))
# start_idx = best_match[1].start()
# end_idx = best_match[1].end()
else:
print(u' !! Search string ({0}) not in text.'.format(search_string))
return -1
sent_idx = [i for i in xrange(len(sent_text_idx))
if (sent_text_idx[i][0] <= start_idx and
end_idx <= sent_text_idx[i][1])]
if len(sent_idx) == 0:
if verbose:
print(u' !! Search string ({0}) not in sentence.'.format(search_string))
return -1
if len(sent_idx) > 1:
print(u' !! findSentByString: Multiple sentence matches for {0}'.format(search_string))
return sent_idx[0]
def file_iterator(data_dir, suffix):
for dir_name, subDirs, files in os.walk(data_dir):
for afile in [x for x in files if x.endswith(suffix)]:
yield (dir_name, afile)
def filelist_iterator(data_dir, file_base_list, suffix):
for base in file_base_list:
yield (data_dir, base + suffix)
def read_file_prefixes(filename):
result = []
with open(filename, u'r') as f:
for prefix in f:
name = prefix.strip()
result.append(name)
return result
def sentence_stats(data_dir, display=0, nlp=None, num_tokens_long_sent=4):
sentence_stats_by_iter(file_iterator(data_dir, u'.sgm'), display=display, nlp=nlp,
num_tokens_long_sent=num_tokens_long_sent)
def sentence_stats_by_iter(sgml_file_iter, display=0, nlp=None, n_threads=8, num_tokens_long_sent=4):
if nlp is None:
nlp = spacy.load(u'en')
sent_count = 0
long_sent_count = 0
max_length = 0
sum_length = 0
sum_squared_length = 0
length_list = []
docs = []
psum = [0]
for dir_name, sgml_file in sgml_file_iter:
if display > 1:
print(sgml_file)
tree = etree.parse(os.path.join(dir_name, sgml_file))
root = tree.getroot()
text_list = extract_text(root)
for i, doc in enumerate(nlp.pipe(text_list, batch_size=10,
n_threads=n_threads)):
docs.append(doc)
psum.append(psum[-1] + len(text_list[i]))
for span in doc.sents:
sent_count += 1
num_tokens = span.end - span.start
if num_tokens > max_length:
max_length = num_tokens
if num_tokens >= num_tokens_long_sent:
length_list.append(num_tokens)
long_sent_count += 1
sum_length += num_tokens
sum_squared_length += num_tokens * num_tokens
if display > 2:
print(num_tokens)
sent = u''.join(doc[i].string for i in xrange(span.start, span.end)).strip()
print(sent)
print(u'Sentence statistics (ignoring short sentences <{0} tokens):'.format(
num_tokens_long_sent))
print(u'Number of sentences: {0}'.format(sent_count))
print(u'Number of long sentences: {0}'.format(long_sent_count))
print(u'Max long sentence length: {0}'.format(max_length))
print(u'Average long sentence length: {0:.2f}'.format(sum_length/long_sent_count))
std = math.sqrt((sum_squared_length - (sum_length*sum_length)
/long_sent_count)/(long_sent_count-1))
print(u'Std long sentence length: {0:.2f}'.format(std))
length_list.sort()
larray = np.asarray(length_list)
larray = np.floor((1+larray)/10)
d = defaultdict(int)
for x in larray:
d[x] += 1
print(u'Length distribution')
for k in sorted(d.keys()):
print(u' {0:3}: {1:5} {2:6.3f}'.format(int(10*k), d[k], d[k]/len(length_list)))
def entity_stats(apf_xml_file_iter, display=0):
type_count = defaultdict(lambda: defaultdict(int))
for dir_name, xmlfile in apf_xml_file_iter:
if display > 1:
print(xmlfile)
tree = etree.parse(os.path.join(dir_name, xmlfile))
root = tree.getroot()
entities = root[0].findall(u'entity')
for entity in entities:
etype = entity.attrib[u'TYPE']
esubtype = entity.attrib[u'SUBTYPE']
sub_dict = type_count[etype]
sub_dict[esubtype] += 1
if display > 0:
for etype in type_count.keys():
sub_dict = type_count[etype]
total = sum(sub_dict.values())
print(u' {0}: {1}'.format(etype, total))
for key, value in sub_dict.items():
print(u' {0}: {1}'.format(key, value))
return type_count
def value_stats(apf_xml_file_iter, display=0):
type_count = defaultdict(lambda: defaultdict(int))
for dir_name, xmlfile in apf_xml_file_iter:
if display > 1:
print(xmlfile)
tree = etree.parse(os.path.join(dir_name, xmlfile))
root = tree.getroot()
entities = root[0].findall(u'value')
for entity in entities:
etype = entity.attrib[u'TYPE']
sub_dict = type_count[etype]
if u'SUBTYPE' in entity.attrib:
esubtype = entity.attrib[u'SUBTYPE']
else:
esubtype = u'None'
sub_dict[esubtype] += 1
if display > 0:
for etype in type_count.keys():
sub_dict = type_count[etype]
total = sum(sub_dict.values())
print(u' {0}: {1}'.format(etype, total))
for key, value in sub_dict.items():
print(u' {0}: {1}'.format(key, value))
return type_count
def event_stats(data_dir, display=0):
event_stats_by_iter(file_iterator(data_dir, u'apf.xml'), display=display)
def event_stats_by_partition(data_dir, display=0):
list_dict = read_hengji_partition_lists()
for name in list_dict.keys():
print(u'== Parition = ' + name)
event_stats_by_iter(filelist_iterator(data_dir, list_dict[name], u'.apf.xml'), display=display)
print()
def event_stats_by_iter(apf_xml_file_iter, display=0):
u"""Return statistics on events, tasks type and tasks subtype"""
file_count = 0
event_count = 0
event_mention_count = 0
event_mention_argument_count = 0
anchor_word_count = 0
type_count = defaultdict(int)
subtype_count = defaultdict(int)
dir_count = defaultdict(int)
anchor_word_dist = [0] * 10
for dir_name, xmlfile in apf_xml_file_iter:
if display > 1:
print(xmlfile)
tree = etree.parse(os.path.join(dir_name, xmlfile))
root = tree.getroot()
events = root[0].findall(u'tasks')
for event in events:
type_count[event.attrib[u'TYPE']] += 1
subtype_count[event.attrib[u'SUBTYPE']] += 1
dir_count[dir_name] += 1
event_mentions = event.findall(u'event_mention')
for mention in event_mentions:
event_mention_count += 1
arguments = mention.findall(u'event_mention_argument')
event_mention_argument_count += len(arguments)
anchor = mention.find(u'anchor')
anchor_words = anchor[0].text.split(u' ')
num_words = sum([len(x)>0 for x in anchor_words])
if verbose and num_words > 2: # print multi-work anchors
print('multi-word anchor: {}'.format(anchor_words))
anchor_word_dist[num_words if num_words < 10 else 9] += 1
anchor_word_count += num_words
file_count += 1
event_count += len(events)
if display > 0:
print(u'Number of apf.xml files: {0}'.format(file_count))
print(u'Number of events: {0}'.format(event_count))
print(u'Number of tasks mentions: {0}'.format(event_mention_count))
print(u'Number of tasks mention arguments: {0}'.format(event_mention_argument_count))
print(u'Average anchor length: {0:.3f}'.format(anchor_word_count/event_mention_argument_count))
print(u'Anchor length distribution: ', u','.join([unicode(x) for x in anchor_word_dist]))
print(u'Types')
for etype, count in sorted([(etype, count)
for (etype, count) in type_count.items()],
key=itemgetter(1), reverse=True):
print(u' {0}: {1:.4f}'.format(etype, count/event_count))
print(u'Subtypes')
for etype, count in sorted([(etype, count)
for (etype, count) in subtype_count.items()],
key=itemgetter(1), reverse=True):
print(u' {0}: {1:.4f}'.format(etype, count/event_count))
print(u'Directory:')
for etype, count in sorted([(etype, count)
for (etype, count) in dir_count.items()],
key=itemgetter(1), reverse=True):
print(u' {0}: {1:.4f}'.format(etype, count/event_count))
return (event_count, type_count, subtype_count)
def ace_eval(prediction, label, doutput, num_skipped_events=None, num_total_events=None, additional_predictions=0):
"""
('- label=', array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 0, 1],
...,
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1]], dtype=int32))
label: matrix of size (#instance, #label-types)
So doing an argmax along 2nd dimension amounts to
extracting the index of the true/predicted label, for each instance
('- label_arg_max=', array([3, 2, 3, ..., 3, 3, 3])
:param prediction:
:param label:
:param doutput: number of label types
:param num_skipped_events:
:param num_total_events:
:param additional_predictions:
:return:
"""
print('\nace_utils.py : ace_eval()')
print('- prediction=', prediction)
print('- label=', label)
print('- doutput=', doutput)
print('- num_skipped_events=', num_skipped_events)
print('- num_total_events=', num_total_events)
print('- additional_predictions=', additional_predictions)
# number of instances in data set
num_instances = label.shape[0]
# doutput: number of label types
none_class_index = doutput -1
label_arg_max = np.argmax(label, axis=1)
pred_arg_max = np.argmax(prediction, axis=1)
# label_arg_max is a vector of size #examples
# pred_arg_max is a vector of size #examples
# you can now directly compare these label_arg_max vs pred_arg_max
# to see how many of their elements match. These are the examples that we predicted correctly.
print('- none_class_index=', none_class_index)
print('- label_arg_max=', label_arg_max)
print('- pred_arg_max=', pred_arg_max)
# check whether each element in label_arg_max != none_class_index
# So event_instances is a 1-dim vector of size #instances,
# where each element is True or False
event_instances = label_arg_max != none_class_index
print('- event_instances=', event_instances)
# sum up the number of True elements to obtain the num# of true events
num_events = np.sum(event_instances)
if num_skipped_events:
if num_total_events:
assert num_total_events == num_events + num_skipped_events
else:
num_total_events = num_events + num_skipped_events
else:
if num_total_events:
num_skipped_events = num_total_events - num_events
else:
num_total_events = num_events
num_skipped_events = 0
print('- num_skipped_events=', num_skipped_events)
accuracy = np.sum(pred_arg_max==label_arg_max)/num_instances
correct_event_predictions = pred_arg_max[event_instances] == label_arg_max[event_instances]
# since event_instances is a vector of True/False elements,
# pred_arg_max[event_instances] does the following:
# ret = []
# for i, v in enumerate(event_instances):
# if v == True:
# ret.append(pred_arg_max[i])
#
# correct_event_predictions : stores whether each element of
# pred_arg_max[event_instances] matches each element label_arg_max[event_instances]
print('- pred_arg_max[event_instances]=', pred_arg_max[event_instances])
print('- label_arg_max[event_instances]=', label_arg_max[event_instances])
print('- correct_event_predictions=', correct_event_predictions)
precision = np.sum(correct_event_predictions) / (np.sum(pred_arg_max != none_class_index)
+ additional_predictions)
recall = np.sum(correct_event_predictions) / (num_events + num_skipped_events)
f1 = 2.0*precision*recall/(precision+recall)
print(u' number of events = {0}'.format(num_events))
print(u' number of events (including skipped) = {0}'.format(num_events + num_skipped_events))
print(u' number of tasks prediction = {0}'.format(np.sum(pred_arg_max != none_class_index)))
print(u' number of tasks prediction (including additional) = {0}'.format(
np.sum(pred_arg_max != none_class_index) + additional_predictions))
print(u' number of correct tasks prediction = {0}'.format(np.sum(correct_event_predictions)))
print(u' classification accuracy = {0}'.format(accuracy))
print(u' classification f1 = {0}'.format(f1))
print(u' classification precision = {0}'.format(precision))
print(u' classification recall = {0}'.format(recall))
# True, if instance is tasks
ident_label = label_arg_max < none_class_index
# True, if predict tasks
ident_pred = pred_arg_max < none_class_index
ident_accuracy = 1.0 * np.sum(ident_pred==ident_label) / num_instances
num_correct_identifications = np.sum(ident_pred[event_instances]==True)
denom = np.sum(ident_pred) + additional_predictions
if denom < 1e-8:
denom = 1e-8
ident_precision = 1.0 * num_correct_identifications / denom
ident_recall = 1.0 * num_correct_identifications / (num_events + num_skipped_events)
if ident_precision < 1e-8:
ident_precision = 1e-8
if ident_recall < 1e-8:
ident_recall = 1e-8
ident_f1 = 2.0*ident_precision*ident_recall/(ident_precision+ident_recall)
print('')
print(u' number of correct tasks identication = {0}'.format(np.sum(num_correct_identifications)))
print(u' identification accuracy = {0}'.format(ident_accuracy))
print(u' identification f1 = {0}'.format(ident_f1))
print(u' identification precision = {0}'.format(ident_precision))
print(u' identification recall = {0}'.format(ident_recall))
result = {}
result[u'accuracy'] = accuracy
result[u'precision'] = precision
result[u'recall'] = recall
result[u'f1'] = f1
result[u'identification-accuracy'] = ident_accuracy
result[u'identification-precision'] = ident_precision
result[u'identification-recall'] = ident_recall
result[u'identification-f1'] = ident_f1
return result
def ace_eval_cutoff(prediction_prob, label, class_index, cutoff, num_skipped_events=0):
# number of instances in data set
num_instances = label.shape[0]
# none_class_index = doutput -1
ground_truth = label[:,class_index] == 1
predicted = prediction_prob[:,class_index] > cutoff
# instances that are actual events
event_instances = ground_truth
accuracy = np.sum(predicted==ground_truth)/num_instances
correct_event_predictions = predicted[event_instances] == ground_truth[event_instances]
precision = | np.sum(correct_event_predictions) | numpy.sum |
"""Spatial statistical tools to estimate uncertainties related to DEMs"""
from __future__ import annotations
import math as m
import multiprocessing as mp
import os
import warnings
from functools import partial
from typing import Callable, Union, Iterable, Optional, Sequence, Any
import itertools
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from numba import njit
import numpy as np
import pandas as pd
from scipy import integrate
from scipy.optimize import curve_fit
from skimage.draw import disk
from scipy.interpolate import RegularGridInterpolator, LinearNDInterpolator, griddata
from scipy.stats import binned_statistic, binned_statistic_2d, binned_statistic_dd
from geoutils.spatial_tools import subsample_raster, get_array_and_mask
from geoutils.georaster import RasterType, Raster
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import skgstat as skg
from skgstat import models
def nmad(data: np.ndarray, nfact: float = 1.4826) -> float:
"""
Calculate the normalized median absolute deviation (NMAD) of an array.
Default scaling factor is 1.4826 to scale the median absolute deviation (MAD) to the dispersion of a normal
distribution (see https://en.wikipedia.org/wiki/Median_absolute_deviation#Relation_to_standard_deviation, and
e.g. http://dx.doi.org/10.1016/j.isprsjprs.2009.02.003)
:param data: input data
:param nfact: normalization factor for the data
:returns nmad: (normalized) median absolute deviation of data.
"""
if isinstance(data, np.ma.masked_array):
data_arr = get_array_and_mask(data, check_shape=False)[0]
else:
data_arr = np.asarray(data)
return nfact * np.nanmedian(np.abs(data_arr - np.nanmedian(data_arr)))
def interp_nd_binning(df: pd.DataFrame, list_var_names: Union[str,list[str]], statistic : Union[str, Callable[[np.ndarray],float]] = nmad,
min_count: Optional[int] = 100) -> Callable[[tuple[np.ndarray, ...]], np.ndarray]:
"""
Estimate an interpolant function for an N-dimensional binning. Preferably based on the output of nd_binning.
For more details on the input dataframe, and associated list of variable name and statistic, see nd_binning.
If the variable pd.DataSeries corresponds to an interval (as the output of nd_binning), uses the middle of the interval.
Otherwise, uses the variable as such.
Workflow of the function:
Fills the no-data present on the regular N-D binning grid with nearest neighbour from scipy.griddata, then provides an
interpolant function that linearly interpolates/extrapolates using scipy.RegularGridInterpolator.
:param df: dataframe with statistic of binned values according to explanatory variables (preferably output of nd_binning)
:param list_var_names: explanatory variable data series to select from the dataframe (containing interval or float dtype)
:param statistic: statistic to interpolate, stored as a data series in the dataframe
:param min_count: minimum number of samples to be used as a valid statistic (replaced by nodata)
:return: N-dimensional interpolant function
:examples
# Using a dataframe created from scratch
>>> df = pd.DataFrame({"var1": [1, 2, 3, 1, 2, 3, 1, 2, 3], "var2": [1, 1, 1, 2, 2, 2, 3, 3, 3], "statistic": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
# In 2 dimensions, the statistic array looks like this
# array([
# [1, 2, 3],
# [4, 5, 6],
# [7, 8, 9]
# ])
>>> fun = interp_nd_binning(df, list_var_names=["var1", "var2"], statistic="statistic", min_count=None)
# Right on point.
>>> fun((2, 2))
array(5.)
# Interpolated linearly inside the 2D frame.
>>> fun((1.5, 1.5))
array(3.)
# Extrapolated linearly outside the 2D frame.
>>> fun((-1, 1))
array(-1.)
"""
# if list of variable input is simply a string
if isinstance(list_var_names,str):
list_var_names = [list_var_names]
# check that the dataframe contains what we need
for var in list_var_names:
if var not in df.columns:
raise ValueError('Variable "'+var+'" does not exist in the provided dataframe.')
statistic_name = statistic if isinstance(statistic,str) else statistic.__name__
if statistic_name not in df.columns:
raise ValueError('Statistic "' + statistic_name + '" does not exist in the provided dataframe.')
if min_count is not None and 'count' not in df.columns:
raise ValueError('Statistic "count" is not in the provided dataframe, necessary to use the min_count argument.')
if df.empty:
raise ValueError('Dataframe is empty.')
df_sub = df.copy()
# if the dataframe is an output of nd_binning, keep only the dimension of interest
if 'nd' in df_sub.columns:
df_sub = df_sub[df_sub.nd == len(list_var_names)]
# compute the middle values instead of bin interval if the variable is a pandas interval type
for var in list_var_names:
check_any_interval = [isinstance(x, pd.Interval) for x in df_sub[var].values]
if any(check_any_interval):
df_sub[var] = pd.IntervalIndex(df_sub[var]).mid.values
# otherwise, leave as is
# check that explanatory variables have valid binning values which coincide along the dataframe
df_sub = df_sub[np.logical_and.reduce([np.isfinite(df_sub[var].values) for var in list_var_names])]
if df_sub.empty:
raise ValueError('Dataframe does not contain a nd binning with the variables corresponding to the list of variables.')
# check that the statistic data series contain valid data
if all(~np.isfinite(df_sub[statistic_name].values)):
raise ValueError('Dataframe does not contain any valid statistic values.')
# remove statistic values calculated with a sample count under the minimum count
if min_count is not None:
df_sub.loc[df_sub['count'] < min_count,statistic_name] = np.nan
values = df_sub[statistic_name].values
ind_valid = np.isfinite(values)
# re-check that the statistic data series contain valid data after filtering with min_count
if all(~ind_valid):
raise ValueError("Dataframe does not contain any valid statistic values after filtering with min_count = "+str(min_count)+".")
# get a list of middle values for the binning coordinates, to define a nd grid
list_bmid = []
shape = []
for var in list_var_names:
bmid = sorted(np.unique(df_sub[var][ind_valid]))
list_bmid.append(bmid)
shape.append(len(bmid))
# griddata first to perform nearest interpolation with NaNs (irregular grid)
# valid values
values = values[ind_valid]
# coordinates of valid values
points_valid = tuple([df_sub[var].values[ind_valid] for var in list_var_names])
# grid coordinates
bmid_grid = np.meshgrid(*list_bmid, indexing='ij')
points_grid = tuple([bmid_grid[i].flatten() for i in range(len(list_var_names))])
# fill grid no data with nearest neighbour
values_grid = griddata(points_valid, values, points_grid, method='nearest')
values_grid = values_grid.reshape(shape)
# RegularGridInterpolator to perform linear interpolation/extrapolation on the grid
# (will extrapolate only outside of boundaries not filled with the nearest of griddata as fill_value = None)
interp_fun = RegularGridInterpolator(tuple(list_bmid), values_grid, method='linear', bounds_error=False, fill_value=None)
return interp_fun
def nd_binning(values: np.ndarray, list_var: Iterable[np.ndarray], list_var_names=Iterable[str], list_var_bins: Optional[Union[int,Iterable[Iterable]]] = None,
statistics: Iterable[Union[str, Callable, None]] = ['count', np.nanmedian ,nmad], list_ranges : Optional[Iterable[Sequence]] = None) \
-> pd.DataFrame:
"""
N-dimensional binning of values according to one or several explanatory variables.
Values input is a (N,) array and variable input is a list of flattened arrays of similar dimensions (N,).
For more details on the format of input variables, see documentation of scipy.stats.binned_statistic_dd.
:param values: values array (N,)
:param list_var: list (L) of explanatory variables array (N,)
:param list_var_names: list (L) of names of the explanatory variables
:param list_var_bins: count, or list (L) of counts or custom bin edges for the explanatory variables; defaults to 10 bins
:param statistics: list (X) of statistics to be computed; defaults to count, median and nmad
:param list_ranges: list (L) of minimum and maximum ranges to bin the explanatory variables; defaults to min/max of the data
:return:
"""
# we separate 1d, 2d and nd binning, because propagating statistics between different dimensional binning is not always feasible
# using scipy because it allows for several dimensional binning, while it's not straightforward in pandas
if list_var_bins is None:
list_var_bins = (10,) * len(list_var_names)
elif isinstance(list_var_bins,int):
list_var_bins = (list_var_bins,) * len(list_var_names)
# flatten the arrays if this has not been done by the user
values = values.ravel()
list_var = [var.ravel() for var in list_var]
# remove no data values
valid_data = np.logical_and.reduce([np.isfinite(values)]+[np.isfinite(var) for var in list_var])
values = values[valid_data]
list_var = [var[valid_data] for var in list_var]
statistics_name = [f if isinstance(f,str) else f.__name__ for f in statistics]
# get binned statistics in 1d: a simple loop is sufficient
list_df_1d = []
for i, var in enumerate(list_var):
df_stats_1d = pd.DataFrame()
# get statistics
for j, statistic in enumerate(statistics):
stats_binned_1d, bedges_1d = binned_statistic(var,values,statistic=statistic,bins=list_var_bins[i],range=list_ranges)[:2]
# save in a dataframe
df_stats_1d[statistics_name[j]] = stats_binned_1d
# we need to get the middle of the bins from the edges, to get the same dimension length
df_stats_1d[list_var_names[i]] = pd.IntervalIndex.from_breaks(bedges_1d,closed='left')
# report number of dimensions used
df_stats_1d['nd'] = 1
list_df_1d.append(df_stats_1d)
# get binned statistics in 2d: all possible 2d combinations
list_df_2d = []
if len(list_var)>1:
combs = list(itertools.combinations(list_var_names, 2))
for i, comb in enumerate(combs):
var1_name, var2_name = comb
# corresponding variables indexes
i1, i2 = list_var_names.index(var1_name), list_var_names.index(var2_name)
df_stats_2d = pd.DataFrame()
for j, statistic in enumerate(statistics):
stats_binned_2d, bedges_var1, bedges_var2 = binned_statistic_2d(list_var[i1],list_var[i2],values,statistic=statistic
,bins=[list_var_bins[i1],list_var_bins[i2]]
,range=list_ranges)[:3]
# get statistics
df_stats_2d[statistics_name[j]] = stats_binned_2d.flatten()
# derive interval indexes and convert bins into 2d indexes
ii1 = pd.IntervalIndex.from_breaks(bedges_var1,closed='left')
ii2 = pd.IntervalIndex.from_breaks(bedges_var2,closed='left')
df_stats_2d[var1_name] = [i1 for i1 in ii1 for i2 in ii2]
df_stats_2d[var2_name] = [i2 for i1 in ii1 for i2 in ii2]
# report number of dimensions used
df_stats_2d['nd'] = 2
list_df_2d.append(df_stats_2d)
# get binned statistics in nd, without redoing the same stats
df_stats_nd = pd.DataFrame()
if len(list_var)>2:
for j, statistic in enumerate(statistics):
stats_binned_2d, list_bedges = binned_statistic_dd(list_var,values,statistic=statistic,bins=list_var_bins,range=list_ranges)[0:2]
df_stats_nd[statistics_name[j]] = stats_binned_2d.flatten()
list_ii = []
# loop through the bin edges and create IntervalIndexes from them (to get both
for bedges in list_bedges:
list_ii.append(pd.IntervalIndex.from_breaks(bedges,closed='left'))
# create nd indexes in nd-array and flatten for each variable
iind = np.meshgrid(*list_ii)
for i, var_name in enumerate(list_var_names):
df_stats_nd[var_name] = iind[i].flatten()
# report number of dimensions used
df_stats_nd['nd'] = len(list_var_names)
# concatenate everything
list_all_dfs = list_df_1d + list_df_2d + [df_stats_nd]
df_concat = pd.concat(list_all_dfs)
# commenting for now: pd.MultiIndex can be hard to use
# df_concat = df_concat.set_index(list_var_names)
return df_concat
def create_circular_mask(shape: Union[int, Sequence[int]], center: Optional[list[float]] = None,
radius: Optional[float] = None) -> np.ndarray:
"""
Create circular mask on a raster, defaults to the center of the array and it's half width
:param shape: shape of array
:param center: center
:param radius: radius
:return:
"""
w, h = shape
if center is None: # use the middle of the image
center = (int(w / 2), int(h / 2))
if radius is None: # use the smallest distance between the center and image walls
radius = min(center[0], center[1], w - center[0], h - center[1])
# skimage disk is not inclusive (correspond to distance_from_center < radius and not <= radius)
mask = np.zeros(shape, dtype=bool)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "invalid value encountered in true_divide")
rr, cc = disk(center=center,radius=radius,shape=shape)
mask[rr, cc] = True
# manual solution
# Y, X = np.ogrid[:h, :w]
# dist_from_center = np.sqrt((X - center[0]) ** 2 + (Y - center[1]) ** 2)
# mask = dist_from_center < radius
return mask
def create_ring_mask(shape: Union[int, Sequence[int]], center: Optional[list[float]] = None, in_radius: float = 0.,
out_radius: Optional[float] = None) -> np.ndarray:
"""
Create ring mask on a raster, defaults to the center of the array and a circle mask of half width of the array
:param shape: shape of array
:param center: center
:param in_radius: inside radius
:param out_radius: outside radius
:return:
"""
w, h = shape
if out_radius is None:
center = (int(w / 2), int(h / 2))
out_radius = min(center[0], center[1], w - center[0], h - center[1])
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "invalid value encountered in true_divide")
mask_inside = create_circular_mask((w,h),center=center,radius=in_radius)
mask_outside = create_circular_mask((w,h),center=center,radius=out_radius)
mask_ring = np.logical_and(~mask_inside,mask_outside)
return mask_ring
def _subsample_wrapper(values: np.ndarray, coords: np.ndarray, shape: tuple[int,int] = None, subsample: int = 10000,
subsample_method: str = 'pdist_ring', inside_radius = None, outside_radius = None,
random_state: None | np.random.RandomState | np.random.Generator | int = None) -> tuple[np.ndarray, np.ndarray]:
"""
(Not used by default)
Wrapper for subsampling pdist methods
"""
nx, ny = shape
# Define state for random subsampling (to fix results during testing)
if random_state is None:
rnd = np.random.default_rng()
elif isinstance(random_state, (np.random.RandomState, np.random.Generator)):
rnd = random_state
else:
rnd = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(random_state)))
# Subsample spatially for disk/ring methods
if subsample_method in ['pdist_disk', 'pdist_ring']:
# Select random center coordinates
center_x = rnd.choice(nx, 1)[0]
center_y = rnd.choice(ny, 1)[0]
if subsample_method == 'pdist_ring':
subindex = create_ring_mask((nx, ny), center=[center_x, center_y], in_radius=inside_radius,
out_radius=outside_radius)
else:
subindex = create_circular_mask((nx, ny), center=[center_x, center_y], radius=inside_radius)
index = subindex.flatten()
values_sp = values[index]
coords_sp = coords[index, :]
else:
values_sp = values
coords_sp = coords
index = subsample_raster(values_sp, subsample=subsample, return_indices=True, random_state=rnd)
values_sub = values_sp[index[0]]
coords_sub = coords_sp[index[0], :]
return values_sub, coords_sub
def _aggregate_pdist_empirical_variogram(values: np.ndarray, coords: np.ndarray, subsample: int, shape: tuple,
subsample_method: str, gsd: float,
pdist_multi_ranges: Optional[list[float]] = None, **kwargs) -> pd.DataFrame:
"""
(Not used by default)
Aggregating subfunction of sample_empirical_variogram for pdist methods.
The pairwise differences are calculated within each subsample.
"""
# If no multi_ranges are provided, define a logical default behaviour with the pixel size and grid size
if subsample_method in ['pdist_disk', 'pdist_ring']:
if pdist_multi_ranges is None:
# Define list of ranges as exponent 2 of the resolution until the maximum range
pdist_multi_ranges = []
# We start at 10 times the ground sampling distance
new_range = gsd * 10
while new_range < kwargs.get('maxlag') / 2:
pdist_multi_ranges.append(new_range)
new_range *= 2
pdist_multi_ranges.append(kwargs.get('maxlag'))
# Define subsampling parameters
list_inside_radius, list_outside_radius = ([] for i in range(2))
binned_ranges = [0] + pdist_multi_ranges
for i in range(len(binned_ranges) - 1):
# Radiuses need to be passed as pixel sizes, dividing by ground sampling distance
outside_radius = binned_ranges[i + 1]/gsd
if subsample_method == 'pdist_ring':
inside_radius = binned_ranges[i]/gsd
else:
inside_radius = None
list_outside_radius.append(outside_radius)
list_inside_radius.append(inside_radius)
else:
# For random point selection, no need for multi-range parameters
pdist_multi_ranges = [kwargs.get('maxlag')]
list_outside_radius = [None]
list_inside_radius = [None]
# Estimate variogram with specific subsampling at multiple ranges
list_df_range = []
for j in range(len(pdist_multi_ranges)):
values_sub, coords_sub = _subsample_wrapper(values, coords, shape = shape, subsample = subsample,
subsample_method = subsample_method,
inside_radius = list_inside_radius[j],
outside_radius = list_outside_radius[j],
random_state= kwargs.get('random_state'))
if len(values_sub) == 0:
continue
df_range = _get_pdist_empirical_variogram(values=values_sub, coords=coords_sub, **kwargs)
# Aggregate runs
list_df_range.append(df_range)
df = pd.concat(list_df_range)
return df
def _get_pdist_empirical_variogram(values: np.ndarray, coords: np.ndarray, **kwargs) -> pd.DataFrame:
"""
Get empirical variogram from skgstat.Variogram object calculating pairwise distances within the sample
:param values: values
:param coords: coordinates
:return: empirical variogram (variance, lags, counts)
"""
# Remove random_state keyword argument that is not used
kwargs.pop('random_state')
# Get arguments of Variogram class init function
vgm_args = skg.Variogram.__init__.__code__.co_varnames[:skg.Variogram.__init__.__code__.co_argcount]
# Check no other argument is left to be passed
remaining_kwargs = kwargs.copy()
for arg in vgm_args:
remaining_kwargs.pop(arg, None)
if len(remaining_kwargs) != 0:
warnings.warn('Keyword arguments: '+','.join(list(remaining_kwargs.keys()))+ ' were not used.')
# Filter corresponding arguments before passing
filtered_kwargs = {k:kwargs[k] for k in vgm_args if k in kwargs}
# Derive variogram with default MetricSpace (equivalent to scipy.pdist)
V = skg.Variogram(coordinates=coords, values=values, normalize=False, fit_method=None, **filtered_kwargs)
# Get bins, empirical variogram values, and bin count
bins, exp = V.get_empirical()
count = V.bin_count
# Write to dataframe
df = pd.DataFrame()
df = df.assign(exp=exp, bins=bins, count=count)
return df
def _get_cdist_empirical_variogram(values: np.ndarray, coords: np.ndarray, subsample_method: str,
**kwargs) -> pd.DataFrame:
"""
Get empirical variogram from skgstat.Variogram object calculating pairwise distances between two sample collections
of a MetricSpace (see scikit-gstat documentation for more details)
:param values: values
:param coords: coordinates
:return: empirical variogram (variance, lags, counts)
"""
# Rename the "subsample" argument into "samples", which is used by skgstat Metric subclasses
kwargs['samples'] = kwargs.pop('subsample')
# Rename the "random_state" argument into "rnd", also used by skgstat Metric subclasses
kwargs['rnd'] = kwargs.pop('random_state')
# Define MetricSpace function to be used, fetch possible keywords arguments
if subsample_method == 'cdist_point':
# List keyword arguments of the Probabilistic class init function
ms_args = skg.ProbabalisticMetricSpace.__init__.__code__.co_varnames[:skg.ProbabalisticMetricSpace.__init__.__code__.co_argcount]
ms = skg.ProbabalisticMetricSpace
else:
# List keyword arguments of the RasterEquidistant class init function
ms_args = skg.RasterEquidistantMetricSpace.__init__.__code__.co_varnames[:skg.RasterEquidistantMetricSpace.__init__.__code__.co_argcount]
ms = skg.RasterEquidistantMetricSpace
# Get arguments of Variogram class init function
vgm_args = skg.Variogram.__init__.__code__.co_varnames[:skg.Variogram.__init__.__code__.co_argcount]
# Check no other argument is left to be passed, accounting for MetricSpace arguments
remaining_kwargs = kwargs.copy()
for arg in vgm_args + ms_args:
remaining_kwargs.pop(arg, None)
if len(remaining_kwargs) != 0:
warnings.warn('Keyword arguments: ' + ', '.join(list(remaining_kwargs.keys())) + ' were not used.')
# Filter corresponding arguments before passing to MetricSpace function
filtered_ms_kwargs = {k: kwargs[k] for k in ms_args if k in kwargs}
M = ms(coords=coords, **filtered_ms_kwargs)
# Filter corresponding arguments before passing to Variogram function
filtered_var_kwargs = {k: kwargs[k] for k in vgm_args if k in kwargs}
V = skg.Variogram(M, values=values, normalize=False, fit_method=None, **filtered_var_kwargs)
# Get bins, empirical variogram values, and bin count
bins, exp = V.get_empirical()
count = V.bin_count
# Write to dataframe
df = pd.DataFrame()
df = df.assign(exp=exp, bins=bins, count=count)
return df
def _wrapper_get_empirical_variogram(argdict: dict) -> pd.DataFrame:
"""
Multiprocessing wrapper for get_pdist_empirical_variogram and get_cdist_empirical variogram
:param argdict: Keyword argument to pass to get_pdist/cdist_empirical_variogram
:return: empirical variogram (variance, lags, counts)
"""
if argdict['verbose']:
print('Working on run '+str(argdict['i']) + ' out of '+str(argdict['imax']))
argdict.pop('i')
argdict.pop('imax')
if argdict['subsample_method'] in ['cdist_equidistant', 'cdist_point']:
# Simple wrapper for the skgstat Variogram function for cdist methods
get_variogram = _get_cdist_empirical_variogram
else:
# Aggregating several skgstat Variogram after iterative subsampling of specific points in the Raster
get_variogram = _aggregate_pdist_empirical_variogram
return get_variogram(**argdict)
def sample_empirical_variogram(values: Union[np.ndarray, RasterType], gsd: float = None, coords: np.ndarray = None,
subsample: int = 10000, subsample_method: str = 'cdist_equidistant',
n_variograms: int = 1, n_jobs: int = 1, verbose=False,
random_state: None | np.random.RandomState | np.random.Generator | int = None,
**kwargs) -> pd.DataFrame:
"""
Sample empirical variograms with binning adaptable to multiple ranges and spatial subsampling adapted for raster data.
By default, subsampling is based on RasterEquidistantMetricSpace implemented in scikit-gstat. This method samples more
effectively large grid data by isolating pairs of spatially equidistant ensembles for distributed pairwise comparison.
In practice, two subsamples are drawn for pairwise comparison: one from a disk of certain radius within the grid, and
another one from rings of larger radii that increase steadily between the pixel size and the extent of the raster.
Those disk and rings are sampled several times across the grid using random centers.
If values are provided as a Raster subclass, nothing else is required.
If values are provided as a 2D array (M,N), a ground sampling distance is sufficient to derive the pairwise distances.
If values are provided as a 1D array (N), an array of coordinates (N,2) or (2,N) is expected. If the coordinates
do not correspond to all points of the grid, a ground sampling distance is needed to correctly get the grid size.
Spatial subsampling method argument subsample_method can be one of "cdist_equidistant", "cdist_point", "pdist_point",
"pdist_disk" and "pdist_ring".
The cdist methods use MetricSpace classes of scikit-gstat and do pairwise comparison of two ensembles as in
scipy.spatial.cdist.
The pdist methods use methods to subsample the Raster points directly and do pairwise comparison within a single
ensemble as in scipy.spatial.pdist.
For the cdist methods, the variogram is estimated in a single run from the MetricSpace.
For the pdist methods, an iterative process is required: a list of ranges subsampled independently is used.
Variograms are derived independently for several runs and ranges using each pairwise sample, and later aggregated.
If the subsampling method selected is "random_point", the multi-range argument is ignored as range has no effect on
this subsampling method.
For pdist methods, keyword arguments are passed to skgstat.Variogram.
For cdist methods, keyword arguments are passed to both skgstat.Variogram and skgstat.MetricSpace.
:param values: values
:param gsd: ground sampling distance
:param coords: coordinates
:param subsample: number of samples to randomly draw from the values
:param subsample_method: spatial subsampling method
:param n_variograms: number of independent empirical variogram estimations
:param n_jobs: number of processing cores
:param verbose: print statements during processing
:param random_state: random state or seed number to use for calculations (to fix random sampling during testing)
:return: empirical variogram (variance, lags, counts)
"""
# First, check all that the values provided are OK
if isinstance(values, Raster):
gsd = values.res[0]
values, mask = get_array_and_mask(values.data)
elif isinstance(values, (np.ndarray, np.ma.masked_array)):
values, mask = get_array_and_mask(values)
else:
raise TypeError('Values must be of type np.ndarray, np.ma.masked_array or Raster subclass.')
values = values.squeeze()
# Then, check if the logic between values, coords and gsd is respected
if (gsd is not None or subsample_method in ['cdist_equidistant', 'pdist_disk','pdist_ring']) and values.ndim == 1:
raise TypeError('Values array must be 2D when using any of the "cdist_equidistant", "pdist_disk" and '
'"pdist_ring" methods, or providing a ground sampling distance instead of coordinates.')
elif coords is not None and values.ndim != 1:
raise TypeError('Values array must be 1D when providing coordinates.')
elif coords is not None and (coords.shape[0] != 2 and coords.shape[1] != 2):
raise TypeError('The coordinates array must have one dimension with length equal to 2')
# Check the subsample method provided exists, otherwise list options
if subsample_method not in ['cdist_equidistant','cdist_point','pdist_point','pdist_disk','pdist_ring']:
raise TypeError('The subsampling method must be one of "cdist_equidistant, "cdist_point", "pdist_point", '
'"pdist_disk" or "pdist_ring".')
# Check that, for several runs, the binning function is an Iterable, otherwise skgstat might provide variogram
# values over slightly different binnings due to randomly changing subsample maximum lags
if n_variograms > 1 and 'bin_func' in kwargs.keys() and not isinstance(kwargs.get('bin_func'), Iterable):
warnings.warn('Using a named binning function of scikit-gstat might provide different binnings for each '
'independent run. To remediate that issue, pass bin_func as an Iterable of right bin edges, '
'(or use default bin_func).')
# Defaulting to coordinates if those are provided
if coords is not None:
nx = None
ny = None
# Making the shape of coordinates consistent if they are transposed
if coords.shape[0] == 2 and coords.shape[1] != 2:
coords = np.transpose(coords)
# If no coordinates provided, we use the shape of the array and the provided ground sampling distance to derive
# relative coordinates (starting at zero)
else:
nx, ny = np.shape(values)
x, y = np.meshgrid(np.arange(0, values.shape[0] * gsd, gsd), np.arange(0, values.shape[1] * gsd, gsd))
coords = np.dstack((x.flatten(), y.flatten())).squeeze()
values = values.flatten()
# Get the ground sampling distance from the coordinates before keeping only valid data, if it was not provided
if gsd is None:
gsd = np.mean([coords[0, 0] - coords[0, 1], coords[0, 0] - coords[1, 0]])
# Get extent
extent = (np.min(coords[:, 0]), np.max(coords[:, 0]), np.min(coords[:, 1]), np.max(coords[:, 1]))
# Get the maximum lag from the coordinates before keeping only valid data, if it was not provided
if 'maxlag' not in kwargs.keys():
# We define maximum lag as the maximum distance between coordinates (needed to provide custom bins, otherwise
# skgstat rewrites the maxlag with the subsample of coordinates provided)
maxlag = np.sqrt((np.max(coords[:, 0])-np.min(coords[:, 1]))**2
+ (np.max(coords[:, 1]) - np.min(coords[:, 1]))**2)
kwargs.update({'maxlag': maxlag})
# Keep only valid data for cdist methods, remove later for pdist methods
if 'cdist' in subsample_method:
ind_valid = np.isfinite(values)
values = values[ind_valid]
coords = coords[ind_valid, :]
if 'bin_func' not in kwargs.keys():
# If no bin_func is provided, we provide an Iterable to provide a custom binning function to skgstat,
# because otherwise bins might be unconsistent across runs
bin_func = []
right_bin_edge = np.sqrt(2) * gsd
while right_bin_edge < kwargs.get('maxlag'):
bin_func.append(right_bin_edge)
# We use the default exponential increasing factor of RasterEquidistantMetricSpace, adapted for grids
right_bin_edge *= np.sqrt(2)
bin_func.append(kwargs.get('maxlag'))
kwargs.update({'bin_func': bin_func})
# Prepare necessary arguments to pass to variogram subfunctions
args = {'values': values, 'coords': coords, 'subsample_method': subsample_method, 'subsample': subsample,
'verbose': verbose}
if subsample_method in ['cdist_equidistant','pdist_ring','pdist_disk', 'pdist_point']:
# The shape is needed for those three methods
args.update({'shape': (nx, ny)})
if subsample_method == 'cdist_equidistant':
# The coordinate extent is needed for this method
args.update({'extent':extent})
else:
args.update({'gsd': gsd})
# If a random_state is passed, each run needs to be passed an independent child random state, otherwise they will
# provide exactly the same sampling and results
if random_state is not None:
# Define the random state if only a seed is provided
if isinstance(random_state, (np.random.RandomState, np.random.Generator)):
rnd = random_state
else:
rnd = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(random_state)))
# Create a list of child random states
if n_variograms == 1:
# No issue if there is only one variogram run
list_random_state = [rnd]
else:
# Otherwise, pass a list of seeds
list_random_state = list(rnd.choice(n_variograms, n_variograms, replace=False))
else:
list_random_state = [None for i in range(n_variograms)]
# Derive the variogram
# Differentiate between 1 core and several cores for multiple runs
# All variogram runs have random sampling inherent to their subfunctions, so we provide the same input arguments
if n_jobs == 1:
if verbose:
print('Using 1 core...')
list_df_run = []
for i in range(n_variograms):
argdict = {'i': i, 'imax': n_variograms, 'random_state': list_random_state[i], **args, **kwargs}
df_run = _wrapper_get_empirical_variogram(argdict=argdict)
list_df_run.append(df_run)
else:
if verbose:
print('Using ' + str(n_jobs) + ' cores...')
pool = mp.Pool(n_jobs, maxtasksperchild=1)
argdict = [{'i': i, 'imax': n_variograms, 'random_state': list_random_state[i], **args, **kwargs} for i in range(n_variograms)]
list_df_run = pool.map(_wrapper_get_empirical_variogram, argdict, chunksize=1)
pool.close()
pool.join()
# Aggregate multiple ranges subsampling
df = pd.concat(list_df_run)
# For a single run, no multi-run sigma estimated
if n_variograms == 1:
df['err_exp'] = np.nan
# For several runs, group results, use mean as empirical variogram, estimate sigma, and sum the counts
else:
df_grouped = df.groupby('bins', dropna=False)
df_mean = df_grouped[['exp']].mean()
df_std = df_grouped[['exp']].std()
df_count = df_grouped[['count']].sum()
df_mean['bins'] = df_mean.index.values
df_mean['err_exp'] = df_std['exp']
df_mean['count'] = df_count['count']
df = df_mean
return df
def fit_sum_model_variogram(list_model: list[str], empirical_variogram: pd.DataFrame,
bounds: list[tuple[float, float]] = None,
p0: list[float] = None) -> tuple[Callable, list[float]]:
"""
Fit a multi-range variogram model to an empirical variogram, weighted least-squares based on sampling errors
:param list_model: list of K variogram models to sum for the fit: from short-range to long-ranges
:param empirical_variogram: empirical variogram
:param bounds: bounds of ranges and sills for each model (shape K x 4 = K x range lower, range upper, sill lower, sill upper)
:param p0: initial guess of ranges and sills each model (shape K x 2 = K x range first guess, sill first guess)
:return: modelled variogram function, coefficients
"""
# TODO: expand to other models than spherical, exponential, gaussian (more than 2 arguments)
# Define a sum of variogram function
def vgm_sum(h, *args):
fn = 0
i = 0
for model in list_model:
fn += skg.models.spherical(h, args[i], args[i+1])
# fn += vgm(h, model=model,crange=args[i],psill=args[i+1])
i += 2
return fn
# First, filter outliers
empirical_variogram = empirical_variogram[np.isfinite(empirical_variogram.exp.values)]
# Use shape of empirical variogram to assess rough boundaries/first estimates
n_average = np.ceil(len(empirical_variogram.exp.values) / 10)
exp_movaverage = np.convolve(empirical_variogram.exp.values, np.ones(int(n_average)) / n_average, mode='valid')
grad = np.gradient(exp_movaverage, 2)
# Maximum variance of the process
max_var = np.max(exp_movaverage)
# Simplify things for scipy: let's provide boundaries and first guesses
if bounds is None:
bounds = []
for i in range(len(list_model)):
# Use largest boundaries possible for our problem
psill_bound = [0, max_var]
range_bound = [0, empirical_variogram.bins.values[-1]]
# Add bounds and guesses with same order as function arguments
bounds.append(range_bound)
bounds.append(psill_bound)
if p0 is None:
p0 = []
for i in range(len(list_model)):
# Use psill evenly distributed
psill_p0 = ((i+1)/len(list_model))*max_var
# Use corresponding ranges
# !! This fails when no empirical value crosses this (too wide binning/nugget)
# ind = np.array(np.abs(exp_movaverage-psill_p0)).argmin()
# range_p0 = empirical_variogram.bins.values[ind]
range_p0 = ((i+1)/len(list_model)) * empirical_variogram.bins.values[-1]
p0.append(range_p0)
p0.append(psill_p0)
bounds = np.transpose(np.array(bounds))
# If the error provided is all NaNs (single variogram run), or all zeros (two variogram runs), run without weights
if np.all(np.isnan(empirical_variogram.err_exp.values)) or np.all(empirical_variogram.err_exp.values == 0):
cof, cov = curve_fit(vgm_sum, empirical_variogram.bins.values, empirical_variogram.exp.values, method='trf',
p0=p0, bounds=bounds)
# Otherwise, use a weighted fit
else:
# We need to filter for possible no data in the error
valid = np.isfinite(empirical_variogram.err_exp.values)
cof, cov = curve_fit(vgm_sum, empirical_variogram.bins.values[valid], empirical_variogram.exp.values[valid],
method='trf', p0=p0, bounds=bounds, sigma=empirical_variogram.err_exp.values[valid])
# Provide the output function (couldn't find a way to pass this through functool.partial as arguments are unordered)
def vgm_sum_fit(h):
fn = 0
i = 0
for model in list_model:
fn += skg.models.spherical(h, cof[i], cof[i+1])
i += 2
return fn
return vgm_sum_fit, cof
def exact_neff_sphsum_circular(area: float, crange1: float, psill1: float, crange2: float, psill2: float) -> float:
"""
Number of effective samples derived from exact integration of sum of 2 spherical variogram models over a circular area.
The number of effective samples serves to convert between standard deviation/partial sills and standard error
over the area.
If SE is the standard error, SD the standard deviation and N_eff the number of effective samples, we have:
SE = SD / sqrt(N_eff) => N_eff = SD^2 / SE^2 => N_eff = (PS1 + PS2)/SE^2 where PS1 and PS2 are the partial sills
estimated from the variogram models, and SE is estimated by integrating the variogram models with parameters PS1/PS2
and R1/R2 where R1/R2 are the correlation ranges.
Source: <NAME> al. (2009), appendix: http://dx.doi.org/10.3189/002214309789470950
:param area: circular area
:param crange1: range of short-range variogram model
:param psill1: partial sill of short-range variogram model
:param crange2: range of long-range variogram model
:param psill2: partial sill of long-range variogram model
:return: number of effective samples
"""
# short range variogram
c1 = psill1 # partial sill
a1 = crange1 # short correlation range
# long range variogram
c1_2 = psill2
a1_2 = crange2 # long correlation range
h_equiv = np.sqrt(area / np.pi)
# hypothesis of a circular shape to integrate variogram model
if h_equiv > a1_2:
std_err = np.sqrt(c1 * a1 ** 2 / (5 * h_equiv ** 2) + c1_2 * a1_2 ** 2 / (5 * h_equiv ** 2))
elif (h_equiv < a1_2) and (h_equiv > a1):
std_err = np.sqrt(c1 * a1 ** 2 / (5 * h_equiv ** 2) + c1_2 * (1-h_equiv / a1_2+1 / 5 * (h_equiv / a1_2) ** 3))
else:
std_err = np.sqrt(c1 * (1-h_equiv / a1+1 / 5 * (h_equiv / a1) ** 3) +
c1_2 * (1-h_equiv / a1_2+1 / 5 * (h_equiv / a1_2) ** 3))
return (psill1 + psill2)/std_err**2
def neff_circ(area: float, list_vgm: list[tuple[float, str, float]]) -> float:
"""
Number of effective samples derived from numerical integration for any sum of variogram models a circular area
(generalization of Rolstad et al. (2009): http://dx.doi.org/10.3189/002214309789470950)
The number of effective samples N_eff serves to convert between standard deviation/partial sills and standard error
over the area: SE = SD / sqrt(N_eff) if SE is the standard error, SD the standard deviation.
:param area: area
:param list_vgm: variogram functions to sum (range, model name, partial sill)
:returns: number of effective samples
"""
psill_tot = 0
for vario in list_vgm:
psill_tot += vario[2]
def hcov_sum(h):
fn = 0
for vario in list_vgm:
crange, model, psill = vario
fn += h*(cov(h, crange, model=model, psill=psill))
return fn
h_equiv = np.sqrt(area / np.pi)
full_int = integrate_fun(hcov_sum, 0, h_equiv)
std_err = np.sqrt(2*np.pi*full_int / area)
return psill_tot/std_err**2
def neff_rect(area: float, width: float, crange1: float, psill1: float, model1: str = 'Sph', crange2: float = None,
psill2: float = None, model2: str = None) -> float:
"""
Number of effective samples derived from numerical integration for a sum of 2 variogram functions over a rectangular area
:param area: area
:param width: width of rectangular area
:param crange1: correlation range of first variogram
:param psill1: partial sill of first variogram
:param model1: model of first variogram
:param crange2: correlation range of second variogram
:param psill2: partial sill of second variogram
:param model2: model of second variogram
:returns: number of effective samples
"""
def hcov_sum(h, crange1=crange1, psill1=psill1, model1=model1, crange2=crange2, psill2=psill2, model2=model2):
if crange2 is None or psill2 is None or model2 is None:
return h*(cov(h, crange1, model=model1, psill=psill1))
else:
return h*(cov(h, crange1, model=model1, psill=psill1)+cov(h, crange2, model=model2, psill=psill2))
width = min(width, area/width)
full_int = integrate_fun(hcov_sum, 0, width/2)
bin_int = np.linspace(width/2, area/width, 100)
for i in range(len(bin_int)-1):
low = bin_int[i]
upp = bin_int[i+1]
mid = bin_int[i] + (bin_int[i+1] - bin_int[i])/2
piec_int = integrate_fun(hcov_sum, low, upp)
full_int += piec_int * 2/np.pi*np.arctan(width/(2*mid))
std_err = np.sqrt(2*np.pi*full_int / area)
if crange2 is None or psill2 is None or model2 is None:
return psill1 / std_err ** 2
else:
return (psill1 + psill2) / std_err ** 2
def integrate_fun(fun: Callable, low_b: float, upp_b: float) -> float:
"""
Numerically integrate function between upper and lower bounds
:param fun: function
:param low_b: lower bound
:param upp_b: upper bound
:return: integral
"""
return integrate.quad(fun, low_b, upp_b)[0]
def cov(h: float, crange: float, model: str = 'Sph', psill: float = 1., kappa: float = 1/2, nugget: float = 0) -> Callable:
"""
Covariance function based on variogram function (COV = STD - VGM)
:param h: spatial lag
:param crange: correlation range
:param model: model
:param psill: partial sill
:param kappa: smoothing parameter for Exp Class
:param nugget: nugget
:returns: covariance function
"""
return (nugget + psill) - vgm(h, crange, model=model, psill=psill, kappa=kappa)
def vgm(h: float, crange: float, model: str = 'Sph', psill: float = 1., kappa: float = 1/2, nugget: float = 0):
"""
Compute variogram model function (Spherical, Exponential, Gaussian or Exponential Class)
:param h: spatial lag
:param crange: correlation range
:param model: model
:param psill: partial sill
:param kappa: smoothing parameter for Exp Class
:param nugget: nugget
:returns: variogram function
"""
c0 = nugget # nugget
c1 = psill # partial sill
a1 = crange # correlation range
s = kappa # smoothness parameter for Matern class
if model == 'Sph': # spherical model
if h < a1:
vgm = c0 + c1 * (3 / 2 * h / a1-1 / 2 * (h / a1) ** 3)
else:
vgm = c0 + c1
elif model == 'Exp': # exponential model
vgm = c0 + c1 * (1-np.exp(-h / a1))
elif model == 'Gau': # gaussian model
vgm = c0 + c1 * (1-np.exp(- (h / a1) ** 2))
elif model == 'Exc': # stable exponential model
vgm = c0 + c1 * (1-np.exp(-(h / a1)**s))
return vgm
def std_err_finite(std: float, neff_tot: float, neff: float) -> float:
"""
Standard error of subsample of a finite ensemble
:param std: standard deviation
:param neff_tot: maximum number of effective samples
:param neff: number of effective samples
:return: standard error
"""
return std * np.sqrt(1 / neff_tot * (neff_tot - neff) / neff_tot)
def std_err(std: float, neff: float) -> float:
"""
Standard error
:param std: standard deviation
:param neff: number of effective samples
:return: standard error
"""
return std * np.sqrt(1 / neff)
def distance_latlon(tup1: tuple, tup2: tuple, earth_rad: float = 6373000) -> float:
"""
Distance between two lat/lon coordinates projected on a spheroid
ref: https://stackoverflow.com/questions/19412462/getting-distance-between-two-points-based-on-latitude-longitude
:param tup1: lon/lat coordinates of first point
:param tup2: lon/lat coordinates of second point
:param earth_rad: radius of the earth in meters
:return: distance
"""
lat1 = m.radians(abs(tup1[1]))
lon1 = m.radians(abs(tup1[0]))
lat2 = m.radians(abs(tup2[1]))
lon2 = m.radians(abs(tup2[0]))
dlon = lon2 - lon1
dlat = lat2 - lat1
a = m.sin(dlat / 2)**2 + m.cos(lat1) * m.cos(lat2) * m.sin(dlon / 2)**2
c = 2 * m.atan2(m.sqrt(a), m.sqrt(1 - a))
distance = earth_rad * c
return distance
def kernel_sph(xi: float, x0: float, a1: float) -> float:
# TODO: homogenize kernel/variogram use
"""
Spherical kernel
:param xi: position of first point
:param x0: position of second point
:param a1: range of kernel
:return: covariance between the two points
"""
if np.abs(xi - x0) > a1:
return 0
else:
return 1 - 3 / 2 * np.abs(xi-x0) / a1 + 1 / 2 * (np.abs(xi-x0) / a1) ** 3
def part_covar_sum(argsin: tuple) -> float:
"""
Multiprocessing wrapper for covariance summing
:param argsin: Tupled argument for covariance calculation
:return: covariance sum
"""
list_tuple_errs, corr_ranges, list_area_tot, list_lat, list_lon, i_range = argsin
n = len(list_tuple_errs)
part_var_err = 0
for i in i_range:
for j in range(n):
d = distance_latlon((list_lon[i], list_lat[i]), (list_lon[j], list_lat[j]))
for k in range(len(corr_ranges)):
part_var_err += kernel_sph(0, d, corr_ranges[k]) * list_tuple_errs[i][k] * list_tuple_errs[j][k] * \
list_area_tot[i] * list_area_tot[j]
return part_var_err
def double_sum_covar(list_tuple_errs: list[float], corr_ranges: list[float], list_area_tot: list[float],
list_lat: list[float], list_lon: list[float], nproc: int = 1) -> float:
"""
Double sum of covariances for propagating multi-range correlated errors between disconnected spatial ensembles
:param list_tuple_errs: list of tuples of correlated errors by range, by ensemble
:param corr_ranges: list of correlation ranges
:param list_area_tot: list of areas of ensembles
:param list_lat: list of center latitude of ensembles
:param list_lon: list of center longitude of ensembles
:param nproc: number of cores to use for multiprocessing
:returns: sum of covariances
"""
n = len(list_tuple_errs)
if nproc == 1:
print('Deriving double covariance sum with 1 core...')
var_err = 0
for i in range(n):
for j in range(n):
d = distance_latlon((list_lon[i], list_lat[i]), (list_lon[j], list_lat[j]))
for k in range(len(corr_ranges)):
var_err += kernel_sph(0, d, corr_ranges[k]) * list_tuple_errs[i][k] * list_tuple_errs[j][k] * \
list_area_tot[i] * list_area_tot[j]
else:
print('Deriving double covariance sum with '+str(nproc)+' cores...')
pack_size = int(np.ceil(n/nproc))
argsin = [(list_tuple_errs, corr_ranges, list_area_tot, list_lon, list_lat, np.arange(
i, min(i+pack_size, n))) for k, i in enumerate(np.arange(0, n, pack_size))]
pool = mp.Pool(nproc, maxtasksperchild=1)
outputs = pool.map(part_covar_sum, argsin, chunksize=1)
pool.close()
pool.join()
var_err = np.sum(np.array(outputs))
area_tot = 0
for j in range(len(list_area_tot)):
area_tot += list_area_tot[j]
var_err /= np.nansum(area_tot) ** 2
return np.sqrt(var_err)
def patches_method(values: np.ndarray, gsd: float, area: float, mask: Optional[np.ndarray] = None,
perc_min_valid: float = 80., statistics: Iterable[Union[str, Callable, None]] = ['count', np.nanmedian ,nmad],
patch_shape: str = 'circular', n_patches: int = 1000, verbose: bool = False,
random_state: None | int | np.random.RandomState | np.random.Generator = None) -> pd.DataFrame:
"""
Patches method for empirical estimation of the standard error over an integration area
:param values: values
:param gsd: ground sampling distance
:param mask: mask of sampled terrain
:param area: size of integration area
:param perc_min_valid: minimum valid area in the patch
:param statistics: list of statistics to compute in the patch
:param patch_shape: shape of patch ['circular' or 'rectangular']
:param n_patches: maximum number of patches to sample
:param verbose: print statement to console
:param random_state: random state or seed number to use for calculations (to fix random sampling during testing)
:return: tile, mean, median, std and count of each patch
"""
# Define state for random subsampling (to fix results during testing)
if random_state is None:
rnd = np.random.default_rng()
elif isinstance(random_state, (np.random.RandomState, np.random.Generator)):
rnd = random_state
else:
rnd = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(random_state)))
statistics_name = [f if isinstance(f,str) else f.__name__ for f in statistics]
values, mask_values = get_array_and_mask(values)
values = values.squeeze()
# Use all grid if no mask is provided
if mask is None:
mask = np.ones(np.shape(values),dtype=bool)
# First, remove non sampled area (but we need to keep the 2D shape of raster for patch sampling)
valid_mask = np.logical_and(~mask_values, mask)
values[~valid_mask] = np.nan
# Divide raster in cadrants where we can sample
nx, ny = np.shape(values)
valid_count = len(values[~np.isnan(values)])
count = nx * ny
if verbose:
print('Number of valid pixels: ' + str(count))
nb_cadrant = int(np.floor(np.sqrt((count * gsd ** 2) / area) + 1))
# For rectangular quadrants
nx_sub = int(np.floor((nx - 1) / nb_cadrant))
ny_sub = int(np.floor((ny - 1) / nb_cadrant))
# For circular patches
rad = np.sqrt(area/np.pi) / gsd
# Create list of all possible cadrants
list_cadrant = [[i, j] for i in range(nb_cadrant) for j in range(nb_cadrant)]
u = 0
# Keep sampling while there is cadrants left and below maximum number of patch to sample
remaining_nsamp = n_patches
list_df = []
while len(list_cadrant) > 0 and u < n_patches:
# Draw a random coordinate from the list of cadrants, select more than enough random points to avoid drawing
# randomly and differencing lists several times
list_idx_cadrant = rnd.choice(len(list_cadrant), size=min(len(list_cadrant), 10*remaining_nsamp))
for idx_cadrant in list_idx_cadrant:
if verbose:
print('Working on a new cadrant')
# Select center coordinates
i = list_cadrant[idx_cadrant][0]
j = list_cadrant[idx_cadrant][1]
if patch_shape == 'rectangular':
patch = values[nx_sub * i:nx_sub * (i + 1), ny_sub * j:ny_sub * (j + 1)].flatten()
elif patch_shape == 'circular':
center_x = np.floor(nx_sub*(i+1/2))
center_y = np.floor(ny_sub*(j+1/2))
mask = create_circular_mask((nx, ny), center=[center_x, center_y], radius=rad)
patch = values[mask]
else:
raise ValueError('Patch method must be rectangular or circular.')
nb_pixel_total = len(patch)
nb_pixel_valid = len(patch[np.isfinite(patch)])
if nb_pixel_valid >= np.ceil(perc_min_valid / 100. * nb_pixel_total):
u=u+1
if u > n_patches:
break
if verbose:
print('Found valid cadrant ' + str(u) + ' (maximum: ' + str(n_patches) + ')')
df = pd.DataFrame()
df = df.assign(tile=[str(i) + '_' + str(j)])
for j, statistic in enumerate(statistics):
if isinstance(statistic, str):
if statistic == 'count':
df[statistic] = [nb_pixel_valid]
else:
raise ValueError('No other string than "count" are supported for named statistics.')
else:
df[statistics_name[j]] = [statistic(patch)]
list_df.append(df)
# Get remaining samples to draw
remaining_nsamp = n_patches - u
# Remove cadrants already sampled from list
list_cadrant = [c for j, c in enumerate(list_cadrant) if j not in list_idx_cadrant]
if len(list_df)>0:
df_all = pd.concat(list_df)
else:
warnings.warn('No valid patch found covering this area: returning dataframe containing only nans' )
df_all = pd.DataFrame()
for j, statistic in enumerate(statistics):
df_all[statistics_name[j]] = [np.nan]
return df_all
def plot_vgm(df: pd.DataFrame, list_fit_fun: Optional[list[Callable[[float],float]]] = None,
list_fit_fun_label: Optional[list[str]] = None, ax: matplotlib.axes.Axes | None = None,
xscale='linear', xscale_range_split: Optional[list] = None,
xlabel = None, ylabel = None, xlim = None, ylim = None):
"""
Plot empirical variogram, and optionally also plot one or several model fits.
Input dataframe is expected to be the output of xdem.spatialstats.sample_empirical_variogram.
Input function model is expected to be the output of xdem.spatialstats.fit_sum_model_variogram.
:param df: dataframe of empirical variogram
:param list_fit_fun: list of model function fits
:param list_fit_fun_label: list of model function fits labels
:param ax: plotting ax to use, creates a new one by default
:param xscale: scale of x axis
:param xscale_range_split: list of ranges at which to split the figure
:param xlabel: label of x axis
:param ylabel: label of y axis
:param xlim: limits of x axis
:param ylim: limits of y axis
:return:
"""
# Create axes if they are not passed
if ax is None:
fig = plt.figure()
elif isinstance(ax, matplotlib.axes.Axes):
ax = ax
fig = ax.figure
else:
raise ValueError("ax must be a matplotlib.axes.Axes instance or None")
if ylabel is None:
ylabel = r'Variance [$\mu$ $\pm \sigma$]'
if xlabel is None:
xlabel = 'Spatial lag (m)'
init_gridsize = [10, 10]
# Create parameters to split x axis into different linear scales
# If there is no split, get parameters for a single subplot
if xscale_range_split is None:
nb_subpanels=1
if xscale == 'log':
xmin = [np.min(df.bins)/2]
else:
xmin = [0]
xmax = [np.max(df.bins)]
xgridmin = [0]
xgridmax = [init_gridsize[0]]
gridsize = init_gridsize
# Otherwise, derive a list for each subplot
else:
# Add initial zero if not in input
if xscale_range_split[0] != 0:
if xscale == 'log':
first_xmin = np.min(df.bins)/2
else:
first_xmin = 0
xscale_range_split = [first_xmin] + xscale_range_split
# Add maximum distance if not in input
if xscale_range_split[-1] != np.max(df.bins):
xscale_range_split.append(np.max(df.bins))
# Scale grid size by the number of subpanels
nb_subpanels = len(xscale_range_split)-1
gridsize = init_gridsize.copy()
gridsize[0] *= nb_subpanels
# Create list of parameters to pass to ax/grid objects of subpanels
xmin, xmax, xgridmin, xgridmax = ([] for i in range(4))
for i in range(nb_subpanels):
xmin.append(xscale_range_split[i])
xmax.append(xscale_range_split[i+1])
xgridmin.append(init_gridsize[0]*i)
xgridmax.append(init_gridsize[0]*(i+1))
# Need a grid plot to show the sample count and the statistic
grid = plt.GridSpec(gridsize[1], gridsize[0], wspace=0.5, hspace=0.5)
# Loop over each subpanel
for k in range(nb_subpanels):
# First, an axis to plot the sample histogram
ax0 = fig.add_subplot(grid[:3, xgridmin[k]:xgridmax[k]])
ax0.set_xscale(xscale)
ax0.set_xticks([])
# Plot the histogram manually with fill_between
interval_var = [0] + list(df.bins)
for i in range(len(df)):
count = df['count'].values[i]
ax0.fill_between([interval_var[i], interval_var[i+1]], [0] * 2, [count] * 2,
facecolor=plt.cm.Greys(0.75), alpha=1,
edgecolor='white', linewidth=0.5)
if k == 0:
ax0.set_ylabel('Sample count')
# Scientific format to avoid undesired additional space on the label side
ax0.ticklabel_format(axis='y', style='sci', scilimits=(0, 0))
else:
ax0.set_yticks([])
# Ignore warnings for log scales
ax0.set_xlim((xmin[k], xmax[k]))
# Now, plot the statistic of the data
ax = fig.add_subplot(grid[3:, xgridmin[k]:xgridmax[k]])
# Get the bins center
bins_center = np.subtract(df.bins, np.diff([0] + df.bins.tolist()) / 2)
# If all the estimated errors are all NaN (single run), simply plot the empirical variogram
if np.all(np.isnan(df.err_exp)):
ax.scatter(bins_center, df.exp, label='Empirical variogram', color='blue', marker='x')
# Otherwise, plot the error estimates through multiple runs
else:
ax.errorbar(bins_center, df.exp, yerr=df.err_exp, label='Empirical variogram (1-sigma s.d)', fmt='x')
# If a list of functions is passed, plot the modelled variograms
if list_fit_fun is not None:
for i, fit_fun in enumerate(list_fit_fun):
x = np.linspace(xmin[k], xmax[k], 1000)
y = fit_fun(x)
if list_fit_fun_label is not None:
ax.plot(x, y, linestyle='dashed', label=list_fit_fun_label[i], zorder=30)
else:
ax.plot(x, y, linestyle='dashed', color='black', zorder=30)
if list_fit_fun_label is None:
ax.plot([],[],linestyle='dashed',color='black',label='Model fit')
ax.set_xscale(xscale)
if nb_subpanels>1 and k == (nb_subpanels-1):
ax.xaxis.set_ticks(np.linspace(xmin[k], xmax[k], 3))
elif nb_subpanels>1:
ax.xaxis.set_ticks(np.linspace(xmin[k],xmax[k],3)[:-1])
if xlim is None:
ax.set_xlim((xmin[k], xmax[k]))
else:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
else:
ax.set_ylim((0, np.nanmax(df.exp)+np.nanmean(df.err_exp)))
if k == int(nb_subpanels/2):
ax.set_xlabel(xlabel)
if k == nb_subpanels - 1:
ax.legend(loc='best')
if k == 0:
ax.set_ylabel(ylabel)
else:
ax.set_yticks([])
def plot_1d_binning(df: pd.DataFrame, var_name: str, statistic_name: str, label_var: Optional[str] = None,
label_statistic: Optional[str] = None, min_count: int = 30, ax: matplotlib.axes.Axes | None = None):
"""
Plot a statistic and its count along a single binning variable.
Input is expected to be formatted as the output of the xdem.spatialstats.nd_binning function.
:param df: output dataframe of nd_binning
:param var_name: name of binning variable to plot
:param statistic_name: name of statistic of interest to plot
:param label_var: label of binning variable
:param label_statistic: label of statistic of interest
:param min_count: removes statistic values computed with a count inferior to this minimum value
:param ax: plotting ax to use, creates a new one by default
"""
# Create axes
if ax is None:
fig = plt.figure()
elif isinstance(ax, matplotlib.axes.Axes):
ax = ax
fig = ax.figure
else:
raise ValueError("ax must be a matplotlib.axes.Axes instance or None")
if label_var is None:
label_var = var_name
if label_statistic is None:
label_statistic = statistic_name
# Subsample to 1D and for the variable of interest
df_sub = df[np.logical_and(df.nd == 1, np.isfinite(pd.IntervalIndex(df[var_name]).mid))].copy()
# Remove statistic calculated in bins with too low count
df_sub.loc[df_sub['count']<min_count, statistic_name] = np.nan
# Need a grid plot to show the sample count and the statistic
grid = plt.GridSpec(10, 10, wspace=0.5, hspace=0.5)
# First, an axis to plot the sample histogram
ax0 = fig.add_subplot(grid[:3, :])
ax0.set_xticks([])
# Plot the histogram manually with fill_between
interval_var = pd.IntervalIndex(df_sub[var_name])
for i in range(len(df_sub) ):
count = df_sub['count'].values[i]
ax0.fill_between([interval_var[i].left, interval_var[i].right], [0] * 2, [count] * 2, facecolor=plt.cm.Greys(0.75), alpha=1,
edgecolor='white',linewidth=0.5)
ax0.set_ylabel('Sample count')
# Scientific format to avoid undesired additional space on the label side
ax0.ticklabel_format(axis='y',style='sci',scilimits=(0,0))
# Try to identify if the count is always the same
# (np.quantile can have a couple undesired effet, so leave an error margin of 2 wrong bins and 5 count difference)
if np.sum(~(np.abs(df_sub['count'].values[0] - df_sub['count'].values) < 5)) <= 2:
ax0.text(0.5, 0.5, "Fixed number of\n samples: "+'{:,}'.format(int(df_sub['count'].values[0])), ha='center', va='center',
fontweight='bold', transform=ax0.transAxes, bbox=dict(facecolor='white', alpha=0.8))
ax0.set_ylim((0,1.1*np.max(df_sub['count'].values)))
ax0.set_xlim((np.min(interval_var.left),np.max(interval_var.right)))
# Now, plot the statistic of the data
ax = fig.add_subplot(grid[3:, :])
ax.scatter(interval_var.mid, df_sub[statistic_name],marker='x')
ax.set_xlabel(label_var)
ax.set_ylabel(label_statistic)
ax.set_xlim((np.min(interval_var.left),np.max(interval_var.right)))
def plot_2d_binning(df: pd.DataFrame, var_name_1: str, var_name_2: str, statistic_name: str,
label_var_name_1: Optional[str] = None, label_var_name_2: Optional[str] = None,
label_statistic: Optional[str] = None, cmap: matplotlib.colors.Colormap = plt.cm.Reds, min_count: int = 30,
scale_var_1: str = 'linear', scale_var_2: str = 'linear', vmin: float = None, vmax: float = None,
nodata_color: Union[str,tuple[float,float,float,float]] = 'yellow', ax: matplotlib.axes.Axes | None = None):
"""
Plot one statistic and its count along two binning variables.
Input is expected to be formatted as the output of the xdem.spatialstats.nd_binning function.
:param df: output dataframe of nd_binning
:param var_name_1: name of first binning variable to plot
:param var_name_2: name of second binning variable to plot
:param statistic_name: name of statistic of interest to plot
:param label_var_name_1: label of first binning variable
:param label_var_name_2: label of second binning variable
:param label_statistic: label of statistic of interest
:param cmap: colormap
:param min_count: removes statistic values computed with a count inferior to this minimum value
:param scale_var_1: scale along the axis of the first variable
:param scale_var_2: scale along the axis of the second variable
:param vmin: minimum statistic value in colormap range
:param vmax: maximum statistic value in colormap range
:param nodata_color: color for no data bins
:param ax: plotting ax to use, creates a new one by default
"""
# Create axes
if ax is None:
fig = plt.figure(figsize=(8,6))
elif isinstance(ax, matplotlib.axes.Axes):
ax = ax
fig = ax.figure
else:
raise ValueError("ax must be a matplotlib.axes.Axes instance or None")
# Subsample to 2D and for the variables of interest
df_sub = df[np.logical_and.reduce((df.nd == 2, np.isfinite(pd.IntervalIndex(df[var_name_1]).mid),
np.isfinite(pd.IntervalIndex(df[var_name_2]).mid)))].copy()
# Remove statistic calculated in bins with too low count
df_sub.loc[df_sub['count']<min_count, statistic_name] = np.nan
# Let's do a 4 panel figure:
# two histograms for the binning variables
# + a colored grid to display the statistic calculated on the value of interest
# + a legend panel with statistic colormap and nodata color
# For some reason the scientific notation displays weirdly for default figure size
grid = plt.GridSpec(10, 10, wspace=0.5, hspace=0.5)
# First, an horizontal axis on top to plot the sample histogram of the first variable
ax0 = fig.add_subplot(grid[:3, :-3])
ax0.set_xscale(scale_var_1)
ax0.set_xticklabels([])
# Plot the histogram manually with fill_between
interval_var_1 = pd.IntervalIndex(df_sub[var_name_1])
df_sub['var1_mid'] = interval_var_1.mid.values
unique_var_1 = np.unique(df_sub.var1_mid)
list_counts = []
for i in range(len(unique_var_1)):
df_var1 = df_sub[df_sub.var1_mid == unique_var_1[i]]
count = np.nansum(df_var1['count'].values)
list_counts.append(count)
ax0.fill_between([df_var1[var_name_1].values[0].left, df_var1[var_name_1].values[0].right], [0] * 2, [count] * 2, facecolor=plt.cm.Greys(0.75), alpha=1,
edgecolor='white', linewidth=0.5)
ax0.set_ylabel('Sample count')
# In case the axis value does not agree with the scale (e.g., 0 for log scale)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ax0.set_ylim((0,1.1*np.max(list_counts)))
ax0.set_xlim((np.min(interval_var_1.left),np.max(interval_var_1.right)))
ax0.ticklabel_format(axis='y',style='sci',scilimits=(0,0))
ax0.spines['top'].set_visible(False)
ax0.spines['right'].set_visible(False)
# Try to identify if the count is always the same
if np.sum(~(np.abs(list_counts[0] - | np.array(list_counts) | numpy.array |
import unittest
import numpy
import math
gravity = numpy.array([0, 0, -9.80665])
class LinkBuilder():
"""
For building up a robotic object for gravity compensation calculations
used like:
b = LinkBuilder()
b.addLink(Link(1, 2, 3, 4, (1,1,1), 1))
b.addLink(Link(2, 3, 4, 5, (1,1,1), 1))
b.addLink(Link(3, 4, 5, 6, (1,1,1), 1))
links = b.build()
addLink returns the builder for chaining
"""
def __init__(self):
self.links = None
def addLink(self, link):
if self.links is None:
self.links = link
else:
self.links.addLinkToEnd(link)
return self
def build(self):
return self.links
class Link():
"""
Uses DH parameters
https://en.wikipedia.org/wiki/Denavit%E2%80%93Hartenberg_parameters to
represent the location of each link
Center of mass is relative to the coordinates of the DH frame
all values are in SI units - I.E. m, kg, and radians
"""
def __init__(self, d, theta, r, alpha, centerOfMass, mass):
self.d = d # offset along previous z to the common normal # in meters
self.theta = theta # angle about previous z, from old x to new x # in radians
self.r = r # length of the common normal. Assuming a revolute joint, this is the radius about previous z (Sometimes called a) # in meters
self.alpha = alpha # angle about common normal, from old z axis to new z axis # in radians
self.com = centerOfMass # tuple (x, y, z), in meters
self.mass = mass # in kilograms
self.nextLink = None
def __repr__(self):
return str(self)
def __str__(self):
return "Link {{{}kg at {}}} [d: {}, \u03B8: {}, r: {}, \u03B1: {}] --> {}".format(
self.mass, self.com, self.d, self.theta, self.r, self.alpha, str(self.nextLink))
def __len__(self):
if self.nextLink is not None:
return len(self.nextLink) + 1
return 1
def addLinkToEnd(self, link):
if self.nextLink is None:
self.nextLink = link
else:
self.nextLink.addLinkToEnd(link)
def calculateInverseDynamics(self, *angles):
"""
Calculates the required torque for each motor to compensate
for gravity. Uses DH matrices to calculate the position on
the forward recursive step, then uses Newton-Euler laws of
motion to calculate the required moment acting on each motor
"""
reqArgs = len(self)
# https://en.wikipedia.org/wiki/Denavit%E2%80%93Hartenberg_parameters
if len(angles) == reqArgs:
res = [None for _ in angles] + [(0, 0, 0)]
self._calculateInverseDynamics(angles, 0, res)
res.pop()
return [tuple(x) for x in res]
raise TypeError("calculateInverseDynamics on this object requires exactly {} arguments. ({} given)".format(reqArgs, len(angles)))
def _calculateInverseDynamics(self, angles, index, moments):
# calculate position of center of mass and joint origin taking into account rotation
# TODO: use DH-matrix to calculate the absolute positions
# Currently it doesn't do this, and will only work for 1 link sytems
jointLoc = numpy.array([0,0,0])
com = numpy.array(self.com)
# call _calculateInverseDynamics on self.nextLink, incrementing index if has next
if self.nextLink is not None:
self.nextLink._calculateInverseDynamics(angles, index+1, moments)
# calculate moment caused by gravity, insert into moments[index], taking into account moments[index+1]
gravitationalForce = numpy.multiply(gravity, self.mass)
radius = | numpy.subtract(jointLoc, com) | numpy.subtract |
from pathlib import Path
from typing import Tuple, Union
import h5py
import numpy
import pandas
def read_expval(path: Union[Path, str]) -> Tuple[numpy.ndarray, numpy.ndarray]:
is_hdf5, path, interior_path = read_expval(path)
if is_hdf5:
return read_expval_hdf5(path, interior_path)
return read_expval_ascii(path)
def read_expval_ascii(path: Union[Path, str]) -> Tuple[numpy.ndarray, numpy.ndarray]:
with open(path) as fp:
line = fp.readline().split()
if len(line) == 2:
return (
| numpy.array([0.0]) | numpy.array |
# pylint: disable=missing-function-docstring, missing-module-docstring/
import numpy as np
from pyccel.decorators import types, stack_array, allow_negative_index
a_1d = np.array([1 << i for i in range(21)], dtype=int)
a_2d_f = np.array([[1 << j for j in range(21)] for i in range(21)], dtype=int, order='F')
a_2d_c = np.array([[1 << j for j in range(21)] for i in range(21)], dtype=int)
#==============================================================================
# 1D ARRAYS OF INT-32
#==============================================================================
@types( 'int32[:]', 'int32' )
def array_int32_1d_scalar_add( x, a ):
x[:] += a
@types( 'int32[:]', 'int32' )
def array_int32_1d_scalar_sub( x, a ):
x[:] -= a
@types( 'int32[:]', 'int32' )
def array_int32_1d_scalar_mul( x, a ):
x[:] *= a
@types( 'int32[:]', 'int32' )
def array_int32_1d_scalar_div( x, a ):
x[:] = x / a
@types( 'int32[:]', 'int32' )
def array_int32_1d_scalar_idiv( x, a ):
x[:] = x // a
@types( 'int32[:]', 'int32[:]' )
def array_int32_1d_add( x, y ):
x[:] += y
@types( 'int32[:]', 'int32[:]' )
def array_int32_1d_sub( x, y ):
x[:] -= y
@types( 'int32[:]', 'int32[:]' )
def array_int32_1d_mul( x, y ):
x[:] *= y
@types( 'int32[:]', 'int32[:]' )
def array_int32_1d_idiv( x, y ):
x[:] = x // y
@types( 'int32[:]', 'int32[:]' )
def array_int32_1d_add_augassign( x, y ):
x += y
@types( 'int32[:]', 'int32[:]' )
def array_int32_1d_sub_augassign( x, y ):
x -= y
def array_int_1d_initialization_1():
import numpy as np
a = np.array([1, 2, 4, 8, 16])
b = np.array(a)
return np.sum(b), b[0], b[-1]
def array_int_1d_initialization_2():
import numpy as np
a = [1, 2, 4, 8, 16]
b = np.array(a)
return np.sum(b), b[0], b[-1]
def array_int_1d_initialization_3():
import numpy as np
a = (1, 2, 4, 8, 16)
b = np.array(a)
return np.sum(b), b[0], b[-1]
#==============================================================================
# 2D ARRAYS OF INT-32 WITH C ORDERING
#==============================================================================
@types( 'int32[:,:]', 'int32' )
def array_int32_2d_C_scalar_add( x, a ):
x[:,:] += a
@types( 'int32[:,:]', 'int32' )
def array_int32_2d_C_scalar_sub( x, a ):
x[:,:] -= a
@types( 'int32[:,:]', 'int32' )
def array_int32_2d_C_scalar_mul( x, a ):
x[:,:] *= a
@types( 'int32[:,:]', 'int32' )
def array_int32_2d_C_scalar_idiv( x, a ):
x[:,:] = x // a
@types( 'int32[:,:]', 'int32[:,:]' )
def array_int32_2d_C_add( x, y ):
x[:,:] += y
@types( 'int32[:,:]', 'int32[:,:]' )
def array_int32_2d_C_sub( x, y ):
x[:,:] -= y
@types( 'int32[:,:]', 'int32[:,:]' )
def array_int32_2d_C_mul( x, y ):
x[:,:] *= y
@types( 'int32[:,:]', 'int32[:,:]' )
def array_int32_2d_C_idiv( x, y ):
x[:,:] = x // y
#==============================================================================
# 2D ARRAYS OF INT-32 WITH F ORDERING
#==============================================================================
@types( 'int32[:,:](order=F)', 'int32' )
def array_int32_2d_F_scalar_add( x, a ):
x[:,:] += a
@types( 'int32[:,:](order=F)', 'int32' )
def array_int32_2d_F_scalar_sub( x, a ):
x[:,:] -= a
@types( 'int32[:,:](order=F)', 'int32' )
def array_int32_2d_F_scalar_mul( x, a ):
x[:,:] *= a
@types( 'int32[:,:](order=F)', 'int32' )
def array_int32_2d_F_scalar_idiv( x, a ):
x[:,:] = x // a
@types( 'int32[:,:](order=F)', 'int32[:,:](order=F)' )
def array_int32_2d_F_add( x, y ):
x[:,:] += y
@types( 'int32[:,:](order=F)', 'int32[:,:](order=F)' )
def array_int32_2d_F_sub( x, y ):
x[:,:] -= y
@types( 'int32[:,:](order=F)', 'int32[:,:](order=F)' )
def array_int32_2d_F_mul( x, y ):
x[:,:] *= y
@types( 'int32[:,:](order=F)', 'int32[:,:](order=F)' )
def array_int32_2d_F_idiv( x, y ):
x[:,:] = x // y
#==============================================================================
# 1D ARRAYS OF INT-64
#==============================================================================
@types( 'int[:]', 'int' )
def array_int_1d_scalar_add( x, a ):
x[:] += a
@types( 'int[:]', 'int' )
def array_int_1d_scalar_sub( x, a ):
x[:] -= a
@types( 'int[:]', 'int' )
def array_int_1d_scalar_mul( x, a ):
x[:] *= a
@types( 'int[:]', 'int' )
def array_int_1d_scalar_idiv( x, a ):
x[:] = x // a
@types( 'int[:]', 'int[:]' )
def array_int_1d_add( x, y ):
x[:] += y
@types( 'int[:]', 'int[:]' )
def array_int_1d_sub( x, y ):
x[:] -= y
@types( 'int[:]', 'int[:]' )
def array_int_1d_mul( x, y ):
x[:] *= y
@types( 'int[:]', 'int[:]' )
def array_int_1d_idiv( x, y ):
x[:] = x // y
#==============================================================================
# 2D ARRAYS OF INT-64 WITH C ORDERING
#==============================================================================
@types( 'int[:,:]', 'int' )
def array_int_2d_C_scalar_add( x, a ):
x[:,:] += a
@types( 'int[:,:]', 'int' )
def array_int_2d_C_scalar_sub( x, a ):
x[:,:] -= a
@types( 'int[:,:]', 'int' )
def array_int_2d_C_scalar_mul( x, a ):
x[:,:] *= a
@types( 'int[:,:]', 'int' )
def array_int_2d_C_scalar_idiv( x, a ):
x[:,:] = x // a
@types( 'int[:,:]', 'int[:,:]' )
def array_int_2d_C_add( x, y ):
x[:,:] += y
@types( 'int[:,:]', 'int[:,:]' )
def array_int_2d_C_sub( x, y ):
x[:,:] -= y
@types( 'int[:,:]', 'int[:,:]' )
def array_int_2d_C_mul( x, y ):
x[:,:] *= y
@types( 'int[:,:]', 'int[:,:]' )
def array_int_2d_C_idiv( x, y ):
x[:,:] = x // y
@types('int[:,:]')
def array_int_2d_C_initialization(a):
from numpy import array
tmp = array([[1, 2, 3], [4, 5, 6]])
a[:,:] = tmp[:,:]
#==============================================================================
# 2D ARRAYS OF INT-64 WITH F ORDERING
#==============================================================================
@types( 'int[:,:](order=F)', 'int' )
def array_int_2d_F_scalar_add( x, a ):
x[:,:] += a
@types( 'int[:,:](order=F)', 'int' )
def array_int_2d_F_scalar_sub( x, a ):
x[:,:] -= a
@types( 'int[:,:](order=F)', 'int' )
def array_int_2d_F_scalar_mul( x, a ):
x[:,:] *= a
@types( 'int[:,:](order=F)', 'int' )
def array_int_2d_F_scalar_idiv( x, a ):
x[:,:] = x // a
@types( 'int[:,:](order=F)', 'int[:,:](order=F)' )
def array_int_2d_F_add( x, y ):
x[:,:] += y
@types( 'int[:,:](order=F)', 'int[:,:](order=F)' )
def array_int_2d_F_sub( x, y ):
x[:,:] -= y
@types( 'int[:,:](order=F)', 'int[:,:](order=F)' )
def array_int_2d_F_mul( x, y ):
x[:,:] *= y
@types( 'int[:,:](order=F)', 'int[:,:](order=F)' )
def array_int_2d_F_idiv( x, y ):
x[:,:] = x // y
@types('int[:,:](order=F)')
def array_int_2d_F_initialization(a):
from numpy import array
tmp = array([[1, 2, 3], [4, 5, 6]], dtype='int', order='F')
a[:,:] = tmp[:,:]
#==============================================================================
# 1D ARRAYS OF REAL
#==============================================================================
@types( 'real[:]', 'real' )
def array_real_1d_scalar_add( x, a ):
x[:] += a
@types( 'real[:]', 'real' )
def array_real_1d_scalar_sub( x, a ):
x[:] -= a
@types( 'real[:]', 'real' )
def array_real_1d_scalar_mul( x, a ):
x[:] *= a
@types( 'real[:]', 'real' )
def array_real_1d_scalar_div( x, a ):
x[:] /= a
@types( 'real[:]', 'real' )
def array_real_1d_scalar_idiv( x, a ):
x[:] = x // a
@types( 'real[:]', 'real[:]' )
def array_real_1d_add( x, y ):
x[:] += y
@types( 'real[:]', 'real[:]' )
def array_real_1d_sub( x, y ):
x[:] -= y
@types( 'real[:]', 'real[:]' )
def array_real_1d_mul( x, y ):
x[:] *= y
@types( 'real[:]', 'real[:]' )
def array_real_1d_div( x, y ):
x[:] /= y
@types( 'real[:]', 'real[:]' )
def array_real_1d_idiv( x, y ):
x[:] = x // y
#==============================================================================
# 2D ARRAYS OF REAL WITH C ORDERING
#==============================================================================
@types( 'real[:,:]', 'real' )
def array_real_2d_C_scalar_add( x, a ):
x[:,:] += a
@types( 'real[:,:]', 'real' )
def array_real_2d_C_scalar_sub( x, a ):
x[:,:] -= a
@types( 'real[:,:]', 'real' )
def array_real_2d_C_scalar_mul( x, a ):
x[:,:] *= a
@types( 'real[:,:]', 'real' )
def array_real_2d_C_scalar_div( x, a ):
x[:,:] /= a
@types( 'real[:,:]', 'real[:,:]' )
def array_real_2d_C_add( x, y ):
x[:,:] += y
@types( 'real[:,:]', 'real[:,:]' )
def array_real_2d_C_sub( x, y ):
x[:,:] -= y
@types( 'real[:,:]', 'real[:,:]' )
def array_real_2d_C_mul( x, y ):
x[:,:] *= y
@types( 'real[:,:]', 'real[:,:]' )
def array_real_2d_C_div( x, y ):
x[:,:] /= y
@types('real[:,:]')
def array_real_2d_C_array_initialization(a):
from numpy import array
tmp = array([[1, 2, 3], [4, 5, 6]], dtype='float')
a[:,:] = tmp[:,:]
@types('real[:,:]','real[:,:]', 'real[:,:,:]')
def array_real_3d_C_array_initialization_1(x, y, a):
from numpy import array
tmp = array([x, y], dtype='float')
a[:,:,:] = tmp[:,:,:]
@types('real[:,:,:]')
def array_real_3d_C_array_initialization_2(a):
from numpy import array
x = array([[[0., 1., 2., 3.], [4., 5., 6., 7.], [8., 9., 10., 11.]],
[[12., 13., 14., 15.], [16., 17., 18., 19.], [20., 21., 22., 23.]]], order='C')
a[:,:,:] = x[:,:,:]
@types('real[:,:,:]','real[:,:,:]', 'real[:,:,:,:]')
def array_real_4d_C_array_initialization(x, y, a):
from numpy import array
tmp = array([x, y], dtype='float')
a[:,:,:,:] = tmp[:,:,:,:]
#==============================================================================
# 2D ARRAYS OF REAL WITH F ORDERING
#==============================================================================
@types( 'real[:,:](order=F)', 'real' )
def array_real_2d_F_scalar_add( x, a ):
x[:,:] += a
@types( 'real[:,:](order=F)', 'real' )
def array_real_2d_F_scalar_sub( x, a ):
x[:,:] -= a
@types( 'real[:,:](order=F)', 'real' )
def array_real_2d_F_scalar_mul( x, a ):
x[:,:] *= a
@types( 'real[:,:](order=F)', 'real' )
def array_real_2d_F_scalar_div( x, a ):
x[:,:] /= a
@types( 'real[:,:](order=F)', 'real[:,:](order=F)' )
def array_real_2d_F_add( x, y ):
x[:,:] += y
@types( 'real[:,:](order=F)', 'real[:,:](order=F)' )
def array_real_2d_F_sub( x, y ):
x[:,:] -= y
@types( 'real[:,:](order=F)', 'real[:,:](order=F)' )
def array_real_2d_F_mul( x, y ):
x[:,:] *= y
@types( 'real[:,:](order=F)', 'real[:,:](order=F)' )
def array_real_2d_F_div( x, y ):
x[:,:] /= y
@types('real[:,:](order=F)')
def array_real_2d_F_array_initialization(a):
from numpy import array
tmp = array([[1, 2, 3], [4, 5, 6]], dtype='float', order='F')
a[:,:] = tmp[:,:]
@types('real[:,:](order=F)','real[:,:](order=F)', 'real[:,:,:](order=F)')
def array_real_3d_F_array_initialization_1(x, y, a):
from numpy import array
tmp = array([x, y], dtype='float', order='F')
a[:,:,:] = tmp[:,:,:]
@types('real[:,:,:](order=F)')
def array_real_3d_F_array_initialization_2(a):
from numpy import array
x = array([[[0., 1., 2., 3.], [4., 5., 6., 7.], [8., 9., 10., 11.]],
[[12., 13., 14., 15.], [16., 17., 18., 19.], [20., 21., 22., 23.]]], order='F')
a[:,:,:] = x[:,:,:]
@types('real[:,:,:](order=F)','real[:,:,:](order=F)', 'real[:,:,:,:](order=F)')
def array_real_4d_F_array_initialization(x, y, a):
from numpy import array
tmp = array([x, y], dtype='float', order='F')
a[:,:,:,:] = tmp[:,:,:,:]
@types('real[:,:](order=F)', 'real[:,:,:,:](order=F)')
def array_real_4d_F_array_initialization_mixed_ordering(x, a):
import numpy as np
tmp = np.array(((((0., 1.), (2., 3.)),
((4., 5.), (6., 7.)),
((8., 9.), (10., 11.))),
(((12., 13.), (14., 15.)),
x,
((20., 21.), (22., 23.)))),
dtype='float', order='F')
a[:,:,:,:] = tmp[:,:,:,:]
#==============================================================================
# COMPLEX EXPRESSIONS IN 3D : TEST CONSTANT AND UNKNOWN SHAPES
#==============================================================================
@types( 'int32[:]', 'int32[:]' )
def array_int32_1d_complex_3d_expr( x, y ):
from numpy import full, int32
z = full(3,5, dtype=int32)
x[:] = (x // y) * x + z
@types( 'int32[:,:]', 'int32[:,:]' )
def array_int32_2d_C_complex_3d_expr( x, y ):
from numpy import full, int32
z = full((2,3),5, dtype=int32)
x[:] = (x // y) * x + z
@types( 'int32[:,:](order=F)', 'int32[:,:](order=F)' )
def array_int32_2d_F_complex_3d_expr( x, y ):
from numpy import full, int32
z = full((2,3),5,order='F', dtype=int32)
x[:] = (x // y) * x + z
@types( 'real[:]', 'real[:]' )
def array_real_1d_complex_3d_expr( x, y ):
from numpy import full
z = full(3,5)
x[:] = (x // y) * x + z
@types( 'real[:,:]', 'real[:,:]' )
def array_real_2d_C_complex_3d_expr( x, y ):
from numpy import full
z = full((2,3),5)
x[:] = (x // y) * x + z
@types( 'real[:,:](order=F)', 'real[:,:](order=F)' )
def array_real_2d_F_complex_3d_expr( x, y ):
from numpy import full
z = full((2,3),5,order='F')
x[:] = (x // y) * x + z
@types( 'int32[:]', 'int32[:]', 'bool[:]' )
def array_int32_in_bool_out_1d_complex_3d_expr( x, y, ri ):
from numpy import full, int32, empty
z = full(3,5, dtype=int32)
ri[:] = (x // y) * x > z
@types( 'int32[:,:]', 'int32[:,:]', 'bool[:,:]' )
def array_int32_in_bool_out_2d_C_complex_3d_expr( x, y, ri ):
from numpy import full, int32
z = full((2,3),5, dtype=int32)
ri[:] = (x // y) * x > z
@types( 'int32[:,:](order=F)', 'int32[:,:](order=F)', 'bool[:,:](order=F)' )
def array_int32_in_bool_out_2d_F_complex_3d_expr( x, y, ri ):
from numpy import full, int32
z = full((2,3),5,order='F', dtype=int32)
ri[:] = (x // y) * x > z
#==============================================================================
# 1D STACK ARRAYS OF REAL
#==============================================================================
@stack_array('a')
def array_real_1d_sum_stack_array():
from numpy import zeros
a = zeros(10)
s = 0.
for i in range(10):
s += a[i]
return s
@stack_array('a')
def array_real_1d_div_stack_array():
from numpy import ones
a = ones(10)
s = 0.
for i in range(10):
s += 1.0 / a[i]
return s
@stack_array('a')
@stack_array('b')
def multiple_stack_array_1():
from numpy import ones, array
a = ones(5)
b = array([1, 3, 5, 7, 9])
s = 0.0
for i in range(5):
s += a[i] / b[i]
return s
@stack_array('a')
@stack_array('b', 'c')
def multiple_stack_array_2():
from numpy import ones, array
a = ones(5)
b = array([2, 4, 6, 8, 10])
c = array([1, 3, 5, 7, 9])
s = 0.0
for i in range(5):
s = s + b[i] - a[i] / c[i]
return s
#==============================================================================
# 2D STACK ARRAYS OF REAL
#==============================================================================
@stack_array('a')
def array_real_2d_sum_stack_array():
from numpy import zeros
a = zeros((10, 10))
s = 0.
for i in range(10):
for j in range(10):
s += a[i][j]
return s
@stack_array('a')
def array_real_2d_div_stack_array():
from numpy import full
a = full((10, 10), 2)
s = 1.
for i in range(10):
for j in range(10):
s /= a[i][j]
return s
@stack_array('a')
@stack_array('b')
def multiple_2d_stack_array_1():
from numpy import ones, array
a = ones((2, 5))
b = array([[1, 3, 5, 7, 9], [11, 13, 17, 19, 23]])
s = 0.0
j = 0
for i in range(2):
for j in range(5):
s += a[i][j] / b[i][j]
return s
@stack_array('a')
@stack_array('b', 'c')
def multiple_2d_stack_array_2():
from numpy import ones, array
a = ones(5)
b = array([[2, 4, 6, 8, 10], [1, 3, 5, 7, 9]])
c = array([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
s = 0.0
for i in range(2):
for j in range(5):
s = s + b[i][j] - a[j] / c[i][j]
return s
#==============================================================================
# TEST: Product and matrix multiplication
#==============================================================================
@types('real[:], real[:]')
def array_real_1d_1d_prod(x, out):
from numpy import prod
out[:] = prod(x)
@types('real[:,:], real[:], real[:]')
def array_real_2d_1d_matmul(A, x, out):
from numpy import matmul
out[:] = matmul(A, x)
@types('real[:,:], real[:]')
def array_real_2d_1d_matmul_creation(A, x):
from numpy import matmul
out = matmul(A, x)
return out.sum()
@types('real[:,:](order=F), real[:], real[:]')
def array_real_2d_1d_matmul_order_F(A, x, out):
from numpy import matmul
out[:] = matmul(A, x)
@types('real[:], real[:,:], real[:]')
def array_real_1d_2d_matmul(x, A, out):
from numpy import matmul
out[:] = matmul(x, A)
@types('real[:,:], real[:,:], real[:,:]')
def array_real_2d_2d_matmul(A, B, out):
from numpy import matmul
out[:,:] = matmul(A, B)
@types('real[:,:](order=F), real[:,:](order=F), real[:,:](order=F)')
def array_real_2d_2d_matmul_F_F(A, B, out):
from numpy import matmul
out[:,:] = matmul(A, B)
# Mixed order, not supported currently, see #244
@types('real[:,:], real[:,:](order=F), real[:,:]')
def array_real_2d_2d_matmul_mixorder(A, B, out):
from numpy import matmul
out[:,:] = matmul(A, B)
@types('real[:,:], real[:,:], real[:,:]')
def array_real_2d_2d_matmul_operator(A, B, out):
out[:,:] = A @ B
@types('real[:], real[:], real[:]')
def array_real_loopdiff(x, y, out):
dxy = x - y
for k in range(len(x)):
out[k] = dxy[k]
#==============================================================================
# KEYWORD ARGUMENTS
#==============================================================================
def array_kwargs_full():
""" full(shape, fill_value, dtype=None, order='C')
"""
from numpy import sum as np_sum
from numpy import full
n = 3
a = full((n, n-1), 0.5, 'float', 'C')
b = full((n+1, 2*n), 2.0, order='F')
c = full((1, n), 3)
d = full(2+n, order='F', fill_value=5)
e = full(dtype=int, fill_value=1.0, shape=2*n)
return np_sum(a) + np_sum(b) + np_sum(c) + np_sum(d) + np_sum(e)
def array_kwargs_ones():
""" ones(shape, dtype=float, order='C')
"""
from numpy import sum as np_sum
from numpy import ones
n = 4
a = ones((n, n-1), 'float', 'C')
b = ones((n+1, 2*n), float, order='F')
c = ones((1, n), complex)
d = ones(dtype=int, shape=2+n)
return np_sum(a) + np_sum(b) + np_sum(c) + np_sum(d)
#==============================================================================
# NEGATIVE INDEXES
#==============================================================================
@types('int')
def constant_negative_index(n):
import numpy as np
a = np.empty(n, dtype=int)
for i in range(n):
a[i] = i
return a[-1], a[-2]
@types('int')
def almost_negative_index(n):
import numpy as np
a = np.empty(n, dtype=int)
for i in range(n):
a[i] = i
j = -1
return a[-j]
@allow_negative_index('a')
@types('int', 'int')
def var_negative_index(n, idx):
import numpy as np
a = np.empty(n, dtype=int)
for i in range(n):
a[i] = i
return a[idx]
@allow_negative_index('a')
@types('int', 'int', 'int')
def expr_negative_index(n, idx_1, idx_2):
import numpy as np
a = np.empty(n, dtype=int)
for i in range(n):
a[i] = i
return a[idx_1-idx_2]
@allow_negative_index('a')
@allow_negative_index('b')
@types('int', 'int')
def test_multiple_negative_index(c, d):
import numpy as np
a = np.array([1, 2, 3, 4, 5, 6])
b = np.array([1, 2, 3])
x = a[c]
y = b[d]
return x, y
@allow_negative_index('a', 'b')
@types('int', 'int')
def test_multiple_negative_index_2(c, d):
import numpy as np
a = np.array([1.2, 2.2, 3.2, 4.2])
b = np.array([1, 5, 9, 13])
x = a[c] * d
y = b[d] * c
return x, y
@allow_negative_index('a')
@allow_negative_index('b', 'c')
@types('int', 'int', 'int')
def test_multiple_negative_index_3(d, e, f):
import numpy as np
a = np.array([1.2, 2.2, 3.2, 4.2])
b = np.array([1])
c = np.array([1, 2, 3])
return a[d], b[e], c[f]
@allow_negative_index('a')
@types('int[:]')
def test_argument_negative_index_1(a):
c = -2
d = 5
return a[c], a[d]
@allow_negative_index('a', 'b')
@types('int[:]', 'int[:]')
def test_argument_negative_index_2(a, b):
c = -2
d = 3
return a[c], a[d], b[c], b[d]
#==============================================================================
# SHAPE INITIALISATION
#==============================================================================
def array_random_size():
import numpy as np
a = np.zeros(np.random.randint(23))
c = np.zeros_like(a)
return np.shape(a)[0], np.shape(c)[0]
@types('int','int')
def array_variable_size(n,m):
import numpy as np
s = n
a = np.zeros(s)
s = m
c = np.zeros_like(a)
return np.shape(a)[0], np.shape(c)[0]
#==============================================================================
# 1D ARRAY SLICING
#==============================================================================
@types('int[:]')
def array_1d_slice_1(a):
import numpy as np
b = a[:]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_2(a):
import numpy as np
b = a[5:]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_3(a):
import numpy as np
b = a[:5]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_4(a):
import numpy as np
b = a[5:15]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_5(a):
import numpy as np
b = a[:-5]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_6(a):
import numpy as np
b = a[-5:]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_7(a):
import numpy as np
b = a[-15:-5]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_8(a):
import numpy as np
b = a[5:-5]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_9(a):
import numpy as np
b = a[-15:15]
return np.sum(b), b[0], b[-1], len(b)
@allow_negative_index('a')
@types('int[:]')
def array_1d_slice_10(a):
import numpy as np
c = -15
b = a[c:]
return np.sum(b), b[0], b[-1], len(b)
@allow_negative_index('a')
@types('int[:]')
def array_1d_slice_11(a):
import numpy as np
c = -5
b = a[:c]
return np.sum(b), b[0], b[-1], len(b)
@allow_negative_index('a')
@types('int[:]')
def array_1d_slice_12(a):
import numpy as np
c = -15
d = -5
b = a[c:d]
return np.sum(b), b[0], b[-1], len(b)
#==============================================================================
# 2D ARRAY SLICE ORDER F
#==============================================================================
@types('int[:,:](order=F)')
def array_2d_F_slice_1(a):
import numpy as np
b = a[:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_2(a):
import numpy as np
b = a[5:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_3(a):
import numpy as np
b = a[:5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_4(a):
import numpy as np
b = a[-15:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_5(a):
import numpy as np
b = a[:-5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_6(a):
import numpy as np
b = a[5:15]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_7(a):
import numpy as np
b = a[-15:-5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_8(a):
import numpy as np
b = a[::]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_9(a):
import numpy as np
b = a[5:, :]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_10(a):
import numpy as np
b = a[:5, :]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_11(a):
import numpy as np
b = a[:, 5:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_12(a):
import numpy as np
b = a[:, :5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_13(a):
import numpy as np
b = a[:-5, :]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_14(a):
import numpy as np
b = a[-5:, :]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_15(a):
import numpy as np
b = a[:, -5:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_16(a):
import numpy as np
b = a[:, :-5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_17(a):
import numpy as np
b = a[:, 5:-5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_18(a):
import numpy as np
b = a[5:15, :]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_19(a):
import numpy as np
b = a[5:15, -5:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_20(a):
import numpy as np
b = a[5:15, 5:-5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@allow_negative_index('a')
@types('int[:,:](order=F)')
def array_2d_F_slice_21(a):
import numpy as np
c = -5
d = 5
b = a[d:15, 5:c]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@allow_negative_index('a')
@types('int[:,:](order=F)')
def array_2d_F_slice_22(a):
import numpy as np
c = -5
d = -15
b = a[d:15, 5:c]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@allow_negative_index('a')
@types('int[:,:](order=F)')
def array_2d_F_slice_23(a):
import numpy as np
c = -5
b = a[:c, :c]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
#==============================================================================
# 2D ARRAY SLICE ORDER C
#==============================================================================
@types('int[:,:]')
def array_2d_C_slice_1(a):
import numpy as np
b = a[:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_2(a):
import numpy as np
b = a[5:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_3(a):
import numpy as np
b = a[:5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_4(a):
import numpy as np
b = a[-15:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_5(a):
import numpy as np
b = a[:-5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_6(a):
import numpy as np
b = a[5:15]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_7(a):
import numpy as np
b = a[-15:-5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_8(a):
import numpy as np
b = a[::]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_9(a):
import numpy as np
b = a[5:, :]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_10(a):
import numpy as np
b = a[:5, :]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_11(a):
import numpy as np
b = a[:, 5:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_12(a):
import numpy as np
b = a[:, :5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_13(a):
import numpy as np
b = a[:-5, :]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_14(a):
import numpy as np
b = a[-5:, :]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_15(a):
import numpy as np
b = a[:, -5:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_16(a):
import numpy as np
b = a[:, :-5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_17(a):
import numpy as np
b = a[:, 5:-5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_18(a):
import numpy as np
b = a[5:15, :]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_19(a):
import numpy as np
b = a[5:15, -5:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_20(a):
import numpy as np
b = a[5:15, 5:-5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@allow_negative_index('a')
@types('int[:,:]')
def array_2d_C_slice_21(a):
import numpy as np
c = -5
d = 5
b = a[d:15, 5:c]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@allow_negative_index('a')
@types('int[:,:]')
def array_2d_C_slice_22(a):
import numpy as np
c = -5
d = -15
b = a[d:15, 5:c]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@allow_negative_index('a')
@types('int[:,:]')
def array_2d_C_slice_23(a):
import numpy as np
c = -5
b = a[:c, :c]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
#==============================================================================
# 1D ARRAY SLICE STRIDE
#==============================================================================
@types('int[:]')
def array_1d_slice_stride_1(a):
import numpy as np
b = a[::1]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_stride_2(a):
import numpy as np
b = a[::-1]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_stride_3(a):
import numpy as np
b = a[::2]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_stride_4(a):
import numpy as np
b = a[::-2]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_stride_5(a):
import numpy as np
b = a[5::2]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_stride_6(a):
import numpy as np
b = a[5::-2]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_stride_7(a):
import numpy as np
b = a[:15:2]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_stride_8(a):
import numpy as np
b = a[:15:-2]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_stride_9(a):
import numpy as np
b = a[5:15:2]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_stride_10(a):
import numpy as np
b = a[15:5:-2]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_stride_11(a):
import numpy as np
b = a[-15:-5:2]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_stride_12(a):
import numpy as np
b = a[-5:-15:-2]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_stride_13(a):
import numpy as np
b = a[-5::2]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_stride_14(a):
import numpy as np
b = a[:-5:-2]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_stride_15(a):
import numpy as np
b = a[::-5]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_stride_16(a):
import numpy as np
b = a[-15::2]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_stride_17(a):
import numpy as np
b = a[:-15:-2]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_stride_18(a):
import numpy as np
b = a[5::-5]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_stride_19(a):
import numpy as np
b = a[5:-5:5]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_stride_20(a):
import numpy as np
b = a[-5:5:-5]
return np.sum(b), b[0], b[-1], len(b)
@allow_negative_index('a')
@types('int[:]')
def array_1d_slice_stride_21(a):
import numpy as np
c = -5
b = a[-5:5:c]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_stride_22(a):
import numpy as np
c = 5
b = a[5:-5:c]
return np.sum(b), b[0], b[-1], len(b)
@allow_negative_index('a')
@types('int[:]')
def array_1d_slice_stride_23(a):
import numpy as np
c = -5
b = a[::c]
return np.sum(b), b[0], b[-1], len(b)
#==============================================================================
# 2D ARRAY SLICE STRIDE ORDER F
#==============================================================================
@types('int[:,:](order=F)')
def array_2d_F_slice_stride_1(a):
import numpy as np
b = a[::2]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_stride_2(a):
import numpy as np
b = a[::-1]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_stride_3(a):
import numpy as np
b = a[::-2]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_stride_4(a):
import numpy as np
b = a[::, ::2]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_stride_5(a):
import numpy as np
b = a[::, ::-2]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_stride_6(a):
import numpy as np
b = a[::2, ::]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_stride_7(a):
import numpy as np
b = a[::-2, ::]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_stride_8(a):
import numpy as np
b = a[::2, ::2]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_stride_9(a):
import numpy as np
b = a[::-2, ::2]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_stride_10(a):
import numpy as np
b = a[::2, ::-2]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_stride_11(a):
import numpy as np
b = a[::-2, ::-2]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_stride_12(a):
import numpy as np
b = a[5:15:2, 15:5:-2]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_stride_13(a):
import numpy as np
b = a[15:5:-2, 5:15]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_stride_14(a):
import numpy as np
b = a[-15:-5:2, -5:-15:-2]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_stride_15(a):
import numpy as np
b = a[-5:-15:-2, -15:-5:2]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_stride_16(a):
import numpy as np
b = a[::-5, ::5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_stride_17(a):
import numpy as np
b = a[::5, ::-5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_stride_18(a):
import numpy as np
b = a[::-1, ::-1]
return | np.sum(b) | numpy.sum |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import networkx as networkx
import numpy as numpy
import scipy as scipy
import scipy.integrate
class SEIRSModel():
"""
A class to simulate the Deterministic SEIRS Model
===================================================
Params: beta Rate of transmission (exposure)
sigma Rate of infection (upon exposure)
gamma Rate of recovery (upon infection)
xi Rate of re-susceptibility (upon recovery)
mu_I Rate of infection-related death
mu_0 Rate of baseline death
nu Rate of baseline birth
beta_D Rate of transmission (exposure) for individuals with detected infections
sigma_D Rate of infection (upon exposure) for individuals with detected infections
gamma_D Rate of recovery (upon infection) for individuals with detected infections
mu_D Rate of infection-related death for individuals with detected infections
theta_E Rate of baseline testing for exposed individuals
theta_I Rate of baseline testing for infectious individuals
psi_E Probability of positive test results for exposed individuals
psi_I Probability of positive test results for exposed individuals
q Probability of quarantined individuals interacting with others
initE Init number of exposed individuals
initI Init number of infectious individuals
initD_E Init number of detected infectious individuals
initD_I Init number of detected infectious individuals
initR Init number of recovered individuals
initF Init number of infection-related fatalities
(all remaining nodes initialized susceptible)
"""
def __init__(self, initN, beta, sigma, gamma, xi=0, mu_I=0, mu_0=0, nu=0, p=0,
beta_D=None, sigma_D=None, gamma_D=None, mu_D=None,
theta_E=0, theta_I=0, psi_E=0, psi_I=0, q=0,
initE=0, initI=10, initD_E=0, initD_I=0, initR=0, initF=0):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = beta
self.sigma = sigma
self.gamma = gamma
self.xi = xi
self.mu_I = mu_I
self.mu_0 = mu_0
self.nu = nu
self.p = p
# Testing-related parameters:
self.beta_D = beta_D if beta_D is not None else self.beta
self.sigma_D = sigma_D if sigma_D is not None else self.sigma
self.gamma_D = gamma_D if gamma_D is not None else self.gamma
self.mu_D = mu_D if mu_D is not None else self.mu_I
self.theta_E = theta_E if theta_E is not None else self.theta_E
self.theta_I = theta_I if theta_I is not None else self.theta_I
self.psi_E = psi_E if psi_E is not None else self.psi_E
self.psi_I = psi_I if psi_I is not None else self.psi_I
self.q = q if q is not None else self.q
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tmax = 0 # will be set when run() is called
self.tseries = numpy.array([0])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.N = numpy.array([int(initN)])
self.numE = numpy.array([int(initE)])
self.numI = numpy.array([int(initI)])
self.numD_E = numpy.array([int(initD_E)])
self.numD_I = numpy.array([int(initD_I)])
self.numR = numpy.array([int(initR)])
self.numF = numpy.array([int(initF)])
self.numS = numpy.array([self.N[-1] - self.numE[-1] - self.numI[-1] - self.numD_E[-1] - self.numD_I[-1] - self.numR[-1] - self.numF[-1]])
assert(self.numS[0] >= 0), "The specified initial population size N must be greater than or equal to the initial compartment counts."
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@staticmethod
def system_dfes(t, variables, beta, sigma, gamma, xi, mu_I, mu_0, nu,
beta_D, sigma_D, gamma_D, mu_D, theta_E, theta_I, psi_E, psi_I, q):
S, E, I, D_E, D_I, R, F = variables # varibles is a list with compartment counts as elements
N = S + E + I + D_E + D_I + R
dS = - (beta*S*I)/N - q*(beta_D*S*D_I)/N + xi*R + nu*N - mu_0*S
dE = (beta*S*I)/N + q*(beta_D*S*D_I)/N - sigma*E - theta_E*psi_E*E - mu_0*E
dI = sigma*E - gamma*I - mu_I*I - theta_I*psi_I*I - mu_0*I
dDE = theta_E*psi_E*E - sigma_D*D_E - mu_0*D_E
dDI = theta_I*psi_I*I + sigma_D*D_E - gamma_D*D_I - mu_D*D_I - mu_0*D_I
dR = gamma*I + gamma_D*D_I - xi*R - mu_0*R
dF = mu_I*I + mu_D*D_I
return [dS, dE, dI, dDE, dDI, dR, dF]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run_epoch(self, runtime, dt=0.1):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create a list of times at which the ODE solver should output system values.
# Append this list of times as the model's timeseries
t_eval = numpy.arange(start=self.t, stop=self.t+runtime, step=dt)
# Define the range of time values for the integration:
t_span = (self.t, self.t+runtime)
# Define the initial conditions as the system's current state:
# (which will be the t=0 condition if this is the first run of this model,
# else where the last sim left off)
init_cond = [self.numS[-1], self.numE[-1], self.numI[-1], self.numD_E[-1], self.numD_I[-1], self.numR[-1], self.numF[-1]]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Solve the system of differential eqns:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
solution = scipy.integrate.solve_ivp(lambda t, X: SEIRSModel.system_dfes(t, X, self.beta, self.sigma, self.gamma, self.xi, self.mu_I, self.mu_0, self.nu,
self.beta_D, self.sigma_D, self.gamma_D, self.mu_D, self.theta_E, self.theta_I, self.psi_E, self.psi_I, self.q
),
t_span=[self.t, self.tmax], y0=init_cond, t_eval=t_eval
)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Store the solution output as the model's time series and data series:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.tseries = numpy.append(self.tseries, solution['t'])
self.numS = numpy.append(self.numS, solution['y'][0])
self.numE = numpy.append(self.numE, solution['y'][1])
self.numI = numpy.append(self.numI, solution['y'][2])
self.numD_E = numpy.append(self.numD_E, solution['y'][3])
self.numD_I = numpy.append(self.numD_I, solution['y'][4])
self.numR = numpy.append(self.numR, solution['y'][5])
self.numF = numpy.append(self.numF, solution['y'][6])
self.t = self.tseries[-1]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run(self, T, dt=0.1, checkpoints=None, verbose=False):
if(T>0):
self.tmax += T
else:
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-process checkpoint values:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(checkpoints):
numCheckpoints = len(checkpoints['t'])
paramNames = ['beta', 'sigma', 'gamma', 'xi', 'mu_I', 'mu_0', 'nu',
'beta_D', 'sigma_D', 'gamma_D', 'mu_D',
'theta_E', 'theta_I', 'psi_E', 'psi_I', 'q']
for param in paramNames:
# For params that don't have given checkpoint values (or bad value given),
# set their checkpoint values to the value they have now for all checkpoints.
if(param not in list(checkpoints.keys())
or not isinstance(checkpoints[param], (list, numpy.ndarray))
or len(checkpoints[param])!=numCheckpoints):
checkpoints[param] = [getattr(self, param)]*numCheckpoints
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Run the simulation loop:
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if(not checkpoints):
self.run_epoch(runtime=self.tmax, dt=dt)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print("t = %.2f" % self.t)
if(verbose):
print("\t S = " + str(self.numS[-1]))
print("\t E = " + str(self.numE[-1]))
print("\t I = " + str(self.numI[-1]))
print("\t D_E = " + str(self.numD_E[-1]))
print("\t D_I = " + str(self.numD_I[-1]))
print("\t R = " + str(self.numR[-1]))
print("\t F = " + str(self.numF[-1]))
else: # checkpoints provided
for checkpointIdx, checkpointTime in enumerate(checkpoints['t']):
# Run the sim until the next checkpoint time:
self.run_epoch(runtime=checkpointTime-self.t, dt=dt)
# Having reached the checkpoint, update applicable parameters:
print("[Checkpoint: Updating parameters]")
for param in paramNames:
setattr(self, param, checkpoints[param][checkpointIdx])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print("t = %.2f" % self.t)
if(verbose):
print("\t S = " + str(self.numS[-1]))
print("\t E = " + str(self.numE[-1]))
print("\t I = " + str(self.numI[-1]))
print("\t D_E = " + str(self.numD_E[-1]))
print("\t D_I = " + str(self.numD_I[-1]))
print("\t R = " + str(self.numR[-1]))
print("\t F = " + str(self.numF[-1]))
if(self.t < self.tmax):
self.run_epoch(runtime=self.tmax-self.t, dt=dt)
return True
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def total_num_infections(self, t_idx=None):
if(t_idx is None):
return (self.numE[:] + self.numI[:] + self.numD_E[:] + self.numD_I[:])
else:
return (self.numE[t_idx] + self.numI[t_idx] + self.numD_E[t_idx] + self.numD_I[t_idx])
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def plot(self, ax=None, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True):
import matplotlib.pyplot as pyplot
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create an Axes object if None provided:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(not ax):
fig, ax = pyplot.subplots()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Prepare data series to be plotted:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fseries = self.numF/self.N if plot_percentages else self.numF
Eseries = self.numE/self.N if plot_percentages else self.numE
Dseries = (self.numD_E+self.numD_I)/self.N if plot_percentages else (self.numD_E+self.numD_I)
D_Eseries = self.numD_E/self.N if plot_percentages else self.numD_E
D_Iseries = self.numD_I/self.N if plot_percentages else self.numD_I
Iseries = self.numI/self.N if plot_percentages else self.numI
Rseries = self.numR/self.N if plot_percentages else self.numR
Sseries = self.numS/self.N if plot_percentages else self.numS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the reference data:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(dashed_reference_results):
dashedReference_tseries = dashed_reference_results.tseries[::int(self.N/100)]
dashedReference_IDEstack = (dashed_reference_results.numI + dashed_reference_results.numD_I + dashed_reference_results.numD_E + dashed_reference_results.numE)[::int(self.N/100)] / (self.N if plot_percentages else 1)
ax.plot(dashedReference_tseries, dashedReference_IDEstack, color='#E0E0E0', linestyle='--', label='$I+D+E$ ('+dashed_reference_label+')', zorder=0)
if(shaded_reference_results):
shadedReference_tseries = shaded_reference_results.tseries
shadedReference_IDEstack = (shaded_reference_results.numI + shaded_reference_results.numD_I + shaded_reference_results.numD_E + shaded_reference_results.numE) / (self.N if plot_percentages else 1)
ax.fill_between(shaded_reference_results.tseries, shadedReference_IDEstack, 0, color='#EFEFEF', label='$I+D+E$ ('+shaded_reference_label+')', zorder=0)
ax.plot(shaded_reference_results.tseries, shadedReference_IDEstack, color='#E0E0E0', zorder=1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the stacked variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
topstack = numpy.zeros_like(self.tseries)
if(any(Fseries) and plot_F=='stacked'):
ax.fill_between(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, topstack+Fseries), topstack, color=color_F, alpha=0.5, label='$F$', zorder=2)
ax.plot( numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, topstack+Fseries), color=color_F, zorder=3)
topstack = topstack+Fseries
if(any(Eseries) and plot_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, topstack+Eseries), topstack, color=color_E, alpha=0.5, label='$E$', zorder=2)
ax.plot( numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, topstack+Eseries), color=color_E, zorder=3)
topstack = topstack+Eseries
if(combine_D and plot_D_E=='stacked' and plot_D_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), topstack, color=color_D_E, alpha=0.5, label='$D_{all}$', zorder=2)
ax.plot( numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), color=color_D_E, zorder=3)
topstack = topstack+Dseries
else:
if(any(D_Eseries) and plot_D_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), topstack, color=color_D_E, alpha=0.5, label='$D_E$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), color=color_D_E, zorder=3)
topstack = topstack+D_Eseries
if(any(D_Iseries) and plot_D_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, topstack+D_Iseries), topstack, color=color_D_I, alpha=0.5, label='$D_I$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, topstack+D_Iseries), color=color_D_I, zorder=3)
topstack = topstack+D_Iseries
if(any(Iseries) and plot_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, topstack+Iseries), topstack, color=color_I, alpha=0.5, label='$I$', zorder=2)
ax.plot( numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, topstack+Iseries), color=color_I, zorder=3)
topstack = topstack+Iseries
if(any(Rseries) and plot_R=='stacked'):
ax.fill_between(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, topstack+Rseries), topstack, color=color_R, alpha=0.5, label='$R$', zorder=2)
ax.plot( numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, topstack+Rseries), color=color_R, zorder=3)
topstack = topstack+Rseries
if(any(Sseries) and plot_S=='stacked'):
ax.fill_between(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, topstack+Sseries), topstack, color=color_S, alpha=0.5, label='$S$', zorder=2)
ax.plot( numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, topstack+Sseries), color=color_S, zorder=3)
topstack = topstack+Sseries
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the shaded variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any(Fseries) and plot_F=='shaded'):
ax.fill_between(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), 0, color=color_F, alpha=0.5, label='$F$', zorder=4)
ax.plot( numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), color=color_F, zorder=5)
if(any(Eseries) and plot_E=='shaded'):
ax.fill_between(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), 0, color=color_E, alpha=0.5, label='$E$', zorder=4)
ax.plot( numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), color=color_E, zorder=5)
if(combine_D and (any(Dseries) and plot_D_E=='shaded' and plot_D_E=='shaded')):
ax.fill_between(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), 0, color=color_D_E, alpha=0.5, label='$D_{all}$', zorder=4)
ax.plot( numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), color=color_D_E, zorder=5)
else:
if(any(D_Eseries) and plot_D_E=='shaded'):
ax.fill_between(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), 0, color=color_D_E, alpha=0.5, label='$D_E$', zorder=4)
ax.plot( numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), color=color_D_E, zorder=5)
if(any(D_Iseries) and plot_D_I=='shaded'):
ax.fill_between(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), 0, color=color_D_I, alpha=0.5, label='$D_I$', zorder=4)
ax.plot( numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), color=color_D_I, zorder=5)
if(any(Iseries) and plot_I=='shaded'):
ax.fill_between(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), 0, color=color_I, alpha=0.5, label='$I$', zorder=4)
ax.plot( numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), color=color_I, zorder=5)
if(any(Sseries) and plot_S=='shaded'):
ax.fill_between(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), 0, color=color_S, alpha=0.5, label='$S$', zorder=4)
ax.plot( numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), color=color_S, zorder=5)
if(any(Rseries) and plot_R=='shaded'):
ax.fill_between(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), 0, color=color_R, alpha=0.5, label='$R$', zorder=4)
ax.plot( numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), color=color_R, zorder=5)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the line variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any(Fseries) and plot_F=='line'):
ax.plot(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), color=color_F, label='$F$', zorder=6)
if(any(Eseries) and plot_E=='line'):
ax.plot(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), color=color_E, label='$E$', zorder=6)
if(combine_D and (any(Dseries) and plot_D_E=='line' and plot_D_E=='line')):
ax.plot(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), color=color_D_E, label='$D_{all}$', zorder=6)
else:
if(any(D_Eseries) and plot_D_E=='line'):
ax.plot(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), color=color_D_E, label='$D_E$', zorder=6)
if(any(D_Iseries) and plot_D_I=='line'):
ax.plot(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), color=color_D_I, label='$D_I$', zorder=6)
if(any(Iseries) and plot_I=='line'):
ax.plot(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), color=color_I, label='$I$', zorder=6)
if(any(Sseries) and plot_S=='line'):
ax.plot(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), color=color_S, label='$S$', zorder=6)
if(any(Rseries) and plot_R=='line'):
ax.plot(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), color=color_R, label='$R$', zorder=6)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the vertical line annotations:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(len(vlines)>0 and len(vline_colors)==0):
vline_colors = ['gray']*len(vlines)
if(len(vlines)>0 and len(vline_labels)==0):
vline_labels = [None]*len(vlines)
if(len(vlines)>0 and len(vline_styles)==0):
vline_styles = [':']*len(vlines)
for vline_x, vline_color, vline_style, vline_label in zip(vlines, vline_colors, vline_styles, vline_labels):
if(vline_x is not None):
ax.axvline(x=vline_x, color=vline_color, linestyle=vline_style, alpha=1, label=vline_label)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the plot labels:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ax.set_xlabel('days')
ax.set_ylabel('percent of population' if plot_percentages else 'number of individuals')
ax.set_xlim(0, (max(self.tseries) if not xlim else xlim))
ax.set_ylim(0, ylim)
if(plot_percentages):
ax.set_yticklabels(['{:,.0%}'.format(y) for y in ax.get_yticks()])
if(legend):
legend_handles, legend_labels = ax.get_legend_handles_labels()
ax.legend(legend_handles[::-1], legend_labels[::-1], loc='upper right', facecolor='white', edgecolor='none', framealpha=0.9, prop={'size': 8})
if(title):
ax.set_title(title, size=12)
if(side_title):
ax.annotate(side_title, (0, 0.5), xytext=(-45, 0), ha='right', va='center',
size=12, rotation=90, xycoords='axes fraction', textcoords='offset points')
return ax
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_basic(self, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12,8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if(use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I,plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title, plot_percentages=plot_percentages)
if(show):
pyplot.show()
return fig, ax
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_infections(self, plot_S=False, plot_E='stacked', plot_I='stacked',plot_R=False, plot_F=False,
plot_D_E='stacked', plot_D_I='stacked', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12,8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if(use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I,plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title, plot_percentages=plot_percentages)
if(show):
pyplot.show()
return fig, ax
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
class SEIRSNetworkModel():
"""
A class to simulate the SEIRS Stochastic Network Model
===================================================
Params: G Network adjacency matrix (numpy array) or Networkx graph object.
beta Rate of transmission (exposure) (global)
beta_local Rate(s) of transmission (exposure) for adjacent individuals (optional)
sigma Rate of infection (upon exposure)
gamma Rate of recovery (upon infection)
xi Rate of re-susceptibility (upon recovery)
mu_I Rate of infection-related death
mu_0 Rate of baseline death
nu Rate of baseline birth
p Probability of interaction outside adjacent nodes
Q Quarantine adjacency matrix (numpy array) or Networkx graph object.
beta_D Rate of transmission (exposure) for individuals with detected infections (global)
beta_local Rate(s) of transmission (exposure) for adjacent individuals with detected infections (optional)
sigma_D Rate of infection (upon exposure) for individuals with detected infections
gamma_D Rate of recovery (upon infection) for individuals with detected infections
mu_D Rate of infection-related death for individuals with detected infections
theta_E Rate of baseline testing for exposed individuals
theta_I Rate of baseline testing for infectious individuals
phi_E Rate of contact tracing testing for exposed individuals
phi_I Rate of contact tracing testing for infectious individuals
psi_E Probability of positive test results for exposed individuals
psi_I Probability of positive test results for exposed individuals
q Probability of quarantined individuals interaction outside adjacent nodes
initE Init number of exposed individuals
initI Init number of infectious individuals
initD_E Init number of detected infectious individuals
initD_I Init number of detected infectious individuals
initR Init number of recovered individuals
initF Init number of infection-related fatalities
(all remaining nodes initialized susceptible)
"""
def __init__(self, G, beta, sigma, gamma, xi=0, mu_I=0, mu_0=0, nu=0, beta_local=None, p=0,
Q=None, beta_D=None, sigma_D=None, gamma_D=None, mu_D=None, beta_D_local=None,
theta_E=0, theta_I=0, phi_E=0, phi_I=0, psi_E=1, psi_I=1, q=0,
initE=0, initI=10, initD_E=0, initD_I=0, initR=0, initF=0,
node_groups=None, store_Xseries=False):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Adjacency matrix:
self.update_G(G)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Quarantine Adjacency matrix:
if(Q is None):
Q = G # If no Q graph is provided, use G in its place
self.update_Q(Q)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.parameters = { 'beta':beta, 'sigma':sigma, 'gamma':gamma, 'xi':xi, 'mu_I':mu_I, 'mu_0':mu_0, 'nu':nu,
'beta_D':beta_D, 'sigma_D':sigma_D, 'gamma_D':gamma_D, 'mu_D':mu_D,
'beta_local':beta_local, 'beta_D_local':beta_D_local, 'p':p,'q':q,
'theta_E':theta_E, 'theta_I':theta_I, 'phi_E':phi_E, 'phi_I':phi_I, 'psi_E':phi_E, 'psi_I':psi_I }
self.update_parameters()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Each node can undergo up to 4 transitions (sans vitality/re-susceptibility returns to S state),
# so there are ~numNodes*4 events/timesteps expected; initialize numNodes*5 timestep slots to start
# (will be expanded during run if needed)
self.tseries = numpy.zeros(5*self.numNodes)
self.numE = numpy.zeros(5*self.numNodes)
self.numI = numpy.zeros(5*self.numNodes)
self.numD_E = numpy.zeros(5*self.numNodes)
self.numD_I = numpy.zeros(5*self.numNodes)
self.numR = numpy.zeros(5*self.numNodes)
self.numF = numpy.zeros(5*self.numNodes)
self.numS = numpy.zeros(5*self.numNodes)
self.N = numpy.zeros(5*self.numNodes)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tmax = 0 # will be set when run() is called
self.tidx = 0
self.tseries[0] = 0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.numE[0] = int(initE)
self.numI[0] = int(initI)
self.numD_E[0] = int(initD_E)
self.numD_I[0] = int(initD_I)
self.numR[0] = int(initR)
self.numF[0] = int(initF)
self.numS[0] = self.numNodes - self.numE[0] - self.numI[0] - self.numD_E[0] - self.numD_I[0] - self.numR[0] - self.numF[0]
self.N[0] = self.numS[0] + self.numE[0] + self.numI[0] + self.numD_E[0] + self.numD_I[0] + self.numR[0]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Node states:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.S = 1
self.E = 2
self.I = 3
self.D_E = 4
self.D_I = 5
self.R = 6
self.F = 7
self.X = numpy.array([self.S]*int(self.numS[0]) + [self.E]*int(self.numE[0]) + [self.I]*int(self.numI[0]) + [self.D_E]*int(self.numD_E[0]) + [self.D_I]*int(self.numD_I[0]) + [self.R]*int(self.numR[0]) + [self.F]*int(self.numF[0])).reshape((self.numNodes,1))
numpy.random.shuffle(self.X)
self.store_Xseries = store_Xseries
if(store_Xseries):
self.Xseries = numpy.zeros(shape=(5*self.numNodes, self.numNodes), dtype='uint8')
self.Xseries[0,:] = self.X.T
self.transitions = {
'StoE': {'currentState':self.S, 'newState':self.E},
'EtoI': {'currentState':self.E, 'newState':self.I},
'ItoR': {'currentState':self.I, 'newState':self.R},
'ItoF': {'currentState':self.I, 'newState':self.F},
'RtoS': {'currentState':self.R, 'newState':self.S},
'EtoDE': {'currentState':self.E, 'newState':self.D_E},
'ItoDI': {'currentState':self.I, 'newState':self.D_I},
'DEtoDI': {'currentState':self.D_E, 'newState':self.D_I},
'DItoR': {'currentState':self.D_I, 'newState':self.R},
'DItoF': {'currentState':self.D_I, 'newState':self.F},
'_toS': {'currentState':True, 'newState':self.S},
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize node subgroup data series:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.nodeGroupData = None
if(node_groups):
self.nodeGroupData = {}
for groupName, nodeList in node_groups.items():
self.nodeGroupData[groupName] = {'nodes': numpy.array(nodeList),
'mask': numpy.isin(range(self.numNodes), nodeList).reshape((self.numNodes,1))}
self.nodeGroupData[groupName]['numS'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numE'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numI'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_E'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_I'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numR'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numF'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['N'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numS'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.S)
self.nodeGroupData[groupName]['numE'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.E)
self.nodeGroupData[groupName]['numI'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I)
self.nodeGroupData[groupName]['numD_E'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_E)
self.nodeGroupData[groupName]['numD_I'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_I)
self.nodeGroupData[groupName]['numR'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.R)
self.nodeGroupData[groupName]['numF'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.F)
self.nodeGroupData[groupName]['N'][0] = self.nodeGroupData[groupName]['numS'][0] + self.nodeGroupData[groupName]['numE'][0] + self.nodeGroupData[groupName]['numI'][0] + self.nodeGroupData[groupName]['numD_E'][0] + self.nodeGroupData[groupName]['numD_I'][0] + self.nodeGroupData[groupName]['numR'][0]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_parameters(self):
import time
updatestart = time.time()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = numpy.array(self.parameters['beta']).reshape((self.numNodes, 1)) if isinstance(self.parameters['beta'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['beta'], shape=(self.numNodes,1))
self.sigma = numpy.array(self.parameters['sigma']).reshape((self.numNodes, 1)) if isinstance(self.parameters['sigma'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['sigma'], shape=(self.numNodes,1))
self.gamma = numpy.array(self.parameters['gamma']).reshape((self.numNodes, 1)) if isinstance(self.parameters['gamma'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma'], shape=(self.numNodes,1))
self.xi = numpy.array(self.parameters['xi']).reshape((self.numNodes, 1)) if isinstance(self.parameters['xi'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['xi'], shape=(self.numNodes,1))
self.mu_I = numpy.array(self.parameters['mu_I']).reshape((self.numNodes, 1)) if isinstance(self.parameters['mu_I'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['mu_I'], shape=(self.numNodes,1))
self.mu_0 = numpy.array(self.parameters['mu_0']).reshape((self.numNodes, 1)) if isinstance(self.parameters['mu_0'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['mu_0'], shape=(self.numNodes,1))
self.nu = numpy.array(self.parameters['nu']).reshape((self.numNodes, 1)) if isinstance(self.parameters['nu'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['nu'], shape=(self.numNodes,1))
self.p = numpy.array(self.parameters['p']).reshape((self.numNodes, 1)) if isinstance(self.parameters['p'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['p'], shape=(self.numNodes,1))
# Testing-related parameters:
self.beta_D = (numpy.array(self.parameters['beta_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['beta_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['beta_D'], shape=(self.numNodes,1))) if self.parameters['beta_D'] is not None else self.beta
self.sigma_D = (numpy.array(self.parameters['sigma_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['sigma_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['sigma_D'], shape=(self.numNodes,1))) if self.parameters['sigma_D'] is not None else self.sigma
self.gamma_D = (numpy.array(self.parameters['gamma_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['gamma_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma_D'], shape=(self.numNodes,1))) if self.parameters['gamma_D'] is not None else self.gamma
self.mu_D = (numpy.array(self.parameters['mu_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['mu_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['mu_D'], shape=(self.numNodes,1))) if self.parameters['mu_D'] is not None else self.mu_I
self.theta_E = numpy.array(self.parameters['theta_E']).reshape((self.numNodes, 1)) if isinstance(self.parameters['theta_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['theta_E'], shape=(self.numNodes,1))
self.theta_I = numpy.array(self.parameters['theta_I']).reshape((self.numNodes, 1)) if isinstance(self.parameters['theta_I'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['theta_I'], shape=(self.numNodes,1))
self.phi_E = numpy.array(self.parameters['phi_E']).reshape((self.numNodes, 1)) if isinstance(self.parameters['phi_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['phi_E'], shape=(self.numNodes,1))
self.phi_I = numpy.array(self.parameters['phi_I']).reshape((self.numNodes, 1)) if isinstance(self.parameters['phi_I'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['phi_I'], shape=(self.numNodes,1))
self.psi_E = numpy.array(self.parameters['psi_E']).reshape((self.numNodes, 1)) if isinstance(self.parameters['psi_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['psi_E'], shape=(self.numNodes,1))
self.psi_I = numpy.array(self.parameters['psi_I']).reshape((self.numNodes, 1)) if isinstance(self.parameters['psi_I'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['psi_I'], shape=(self.numNodes,1))
self.q = numpy.array(self.parameters['q']).reshape((self.numNodes, 1)) if isinstance(self.parameters['q'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['q'], shape=(self.numNodes,1))
#Local transmission parameters:
if(self.parameters['beta_local'] is not None):
if(isinstance(self.parameters['beta_local'], (list, numpy.ndarray))):
if(isinstance(self.parameters['beta_local'], list)):
self.beta_local = numpy.array(self.parameters['beta_local'])
else: # is numpy.ndarray
self.beta_local = self.parameters['beta_local']
if(self.beta_local.ndim == 1):
self.beta_local.reshape((self.numNodes, 1))
elif(self.beta_local.ndim == 2):
self.beta_local.reshape((self.numNodes, self.numNodes))
else:
self.beta_local = numpy.full_like(self.beta, fill_value=self.parameters['beta_local'])
else:
self.beta_local = self.beta
#----------------------------------------
if(self.parameters['beta_D_local'] is not None):
if(isinstance(self.parameters['beta_D_local'], (list, numpy.ndarray))):
if(isinstance(self.parameters['beta_D_local'], list)):
self.beta_D_local = numpy.array(self.parameters['beta_D_local'])
else: # is numpy.ndarray
self.beta_D_local = self.parameters['beta_D_local']
if(self.beta_D_local.ndim == 1):
self.beta_D_local.reshape((self.numNodes, 1))
elif(self.beta_D_local.ndim == 2):
self.beta_D_local.reshape((self.numNodes, self.numNodes))
else:
self.beta_D_local = numpy.full_like(self.beta_D, fill_value=self.parameters['beta_D_local'])
else:
self.beta_D_local = self.beta_D
# Pre-multiply beta values by the adjacency matrix ("transmission weight connections")
if(self.beta_local.ndim == 1):
self.A_beta = scipy.sparse.csr_matrix.multiply(self.A, numpy.tile(self.beta_local, (1,self.numNodes))).tocsr()
elif(self.beta_local.ndim == 2):
self.A_beta = scipy.sparse.csr_matrix.multiply(self.A, self.beta_local).tocsr()
# Pre-multiply beta_D values by the quarantine adjacency matrix ("transmission weight connections")
if(self.beta_D_local.ndim == 1):
self.A_Q_beta_D = scipy.sparse.csr_matrix.multiply(self.A_Q, numpy.tile(self.beta_D_local, (1,self.numNodes))).tocsr()
elif(self.beta_D_local.ndim == 2):
self.A_Q_beta_D = scipy.sparse.csr_matrix.multiply(self.A_Q, self.beta_D_local).tocsr()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update scenario flags:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.update_scenario_flags()
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def node_degrees(self, Amat):
return Amat.sum(axis=0).reshape(self.numNodes,1) # sums of adj matrix cols
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_G(self, new_G):
self.G = new_G
# Adjacency matrix:
if type(new_G)==numpy.ndarray:
self.A = scipy.sparse.csr_matrix(new_G)
elif type(new_G)==networkx.classes.graph.Graph:
self.A = networkx.adj_matrix(new_G) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Input an adjacency matrix or networkx object only.")
self.numNodes = int(self.A.shape[1])
self.degree = numpy.asarray(self.node_degrees(self.A)).astype(float)
return
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_Q(self, new_Q):
self.Q = new_Q
# Quarantine Adjacency matrix:
if type(new_Q)==numpy.ndarray:
self.A_Q = scipy.sparse.csr_matrix(new_Q)
elif type(new_Q)==networkx.classes.graph.Graph:
self.A_Q = networkx.adj_matrix(new_Q) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Input an adjacency matrix or networkx object only.")
self.numNodes_Q = int(self.A_Q.shape[1])
self.degree_Q = numpy.asarray(self.node_degrees(self.A_Q)).astype(float)
assert(self.numNodes == self.numNodes_Q), "The normal and quarantine adjacency graphs must be of the same size."
return
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_scenario_flags(self):
self.testing_scenario = ( (numpy.any(self.psi_I) and (numpy.any(self.theta_I) or numpy.any(self.phi_I)))
or (numpy.any(self.psi_E) and (numpy.any(self.theta_E) or numpy.any(self.phi_E))) )
self.tracing_scenario = ( (numpy.any(self.psi_E) and numpy.any(self.phi_E))
or (numpy.any(self.psi_I) and numpy.any(self.phi_I)) )
self.vitality_scenario = (numpy.any(self.mu_0) and numpy.any(self.nu))
self.resusceptibility_scenario = (numpy.any(self.xi))
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def total_num_infections(self, t_idx=None):
if(t_idx is None):
return (self.numE[:] + self.numI[:] + self.numD_E[:] + self.numD_I[:])
else:
return (self.numE[t_idx] + self.numI[t_idx] + self.numD_E[t_idx] + self.numD_I[t_idx])
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def calc_propensities(self):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-calculate matrix multiplication terms that may be used in multiple propensity calculations,
# and check to see if their computation is necessary before doing the multiplication
transmissionTerms_I = numpy.zeros(shape=(self.numNodes,1))
if(numpy.any(self.numI[self.tidx])
and numpy.any(self.beta!=0)):
transmissionTerms_I = numpy.asarray( scipy.sparse.csr_matrix.dot(self.A_beta, self.X==self.I) )
transmissionTerms_DI = numpy.zeros(shape=(self.numNodes,1))
if(self.testing_scenario
and numpy.any(self.numD_I[self.tidx])
and numpy.any(self.beta_D)):
transmissionTerms_DI = numpy.asarray( scipy.sparse.csr_matrix.dot(self.A_Q_beta_D, self.X==self.D_I) )
numContacts_D = numpy.zeros(shape=(self.numNodes,1))
if(self.tracing_scenario
and (numpy.any(self.numD_E[self.tidx]) or numpy.any(self.numD_I[self.tidx]))):
numContacts_D = numpy.asarray( scipy.sparse.csr_matrix.dot( self.A, ((self.X==self.D_E)|(self.X==self.D_I)) ) )
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities_StoE = ( self.p*((self.beta*self.numI[self.tidx] + self.q*self.beta_D*self.numD_I[self.tidx])/self.N[self.tidx])
+ (1-self.p)*numpy.divide((transmissionTerms_I + transmissionTerms_DI), self.degree, out=numpy.zeros_like(self.degree), where=self.degree!=0)
)*(self.X==self.S)
propensities_EtoI = self.sigma*(self.X==self.E)
propensities_ItoR = self.gamma*(self.X==self.I)
propensities_ItoF = self.mu_I*(self.X==self.I)
# propensities_EtoDE = ( self.theta_E + numpy.divide((self.phi_E*numContacts_D), self.degree, out=numpy.zeros_like(self.degree), where=self.degree!=0) )*self.psi_E*(self.X==self.E)
propensities_EtoDE = (self.theta_E + self.phi_E*numContacts_D)*self.psi_E*(self.X==self.E)
# propensities_ItoDI = ( self.theta_I + numpy.divide((self.phi_I*numContacts_D), self.degree, out=numpy.zeros_like(self.degree), where=self.degree!=0) )*self.psi_I*(self.X==self.I)
propensities_ItoDI = (self.theta_I + self.phi_I*numContacts_D)*self.psi_I*(self.X==self.I)
propensities_DEtoDI = self.sigma_D*(self.X==self.D_E)
propensities_DItoR = self.gamma_D*(self.X==self.D_I)
propensities_DItoF = self.mu_D*(self.X==self.D_I)
propensities_RtoS = self.xi*(self.X==self.R)
propensities__toS = self.nu*(self.X!=self.F)
propensities = numpy.hstack([propensities_StoE, propensities_EtoI,
propensities_ItoR, propensities_ItoF,
propensities_EtoDE, propensities_ItoDI, propensities_DEtoDI,
propensities_DItoR, propensities_DItoF,
propensities_RtoS, propensities__toS])
columns = ['StoE', 'EtoI', 'ItoR', 'ItoF', 'EtoDE', 'ItoDI', 'DEtoDI', 'DItoR', 'DItoF', 'RtoS', '_toS']
return propensities, columns
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def increase_data_series_length(self):
self.tseries= numpy.pad(self.tseries, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numS = numpy.pad(self.numS, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numE = numpy.pad(self.numE, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numI = numpy.pad(self.numI, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numD_E = numpy.pad(self.numD_E, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numD_I = numpy.pad(self.numD_I, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numR = numpy.pad(self.numR, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numF = numpy.pad(self.numF, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.N = numpy.pad(self.N, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
if(self.store_Xseries):
self.Xseries = numpy.pad(self.Xseries, [(0, 5*self.numNodes), (0,0)], mode=constant, constant_values=0)
if(self.nodeGroupData):
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]['numS'] = numpy.pad(self.nodeGroupData[groupName]['numS'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numE'] = numpy.pad(self.nodeGroupData[groupName]['numE'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numI'] = numpy.pad(self.nodeGroupData[groupName]['numI'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numD_E'] = numpy.pad(self.nodeGroupData[groupName]['numD_E'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numD_I'] = numpy.pad(self.nodeGroupData[groupName]['numD_I'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numR'] = numpy.pad(self.nodeGroupData[groupName]['numR'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numF'] = numpy.pad(self.nodeGroupData[groupName]['numF'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['N'] = numpy.pad(self.nodeGroupData[groupName]['N'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
return None
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def finalize_data_series(self):
self.tseries= numpy.array(self.tseries, dtype=float)[:self.tidx+1]
self.numS = numpy.array(self.numS, dtype=float)[:self.tidx+1]
self.numE = numpy.array(self.numE, dtype=float)[:self.tidx+1]
self.numI = numpy.array(self.numI, dtype=float)[:self.tidx+1]
self.numD_E = numpy.array(self.numD_E, dtype=float)[:self.tidx+1]
self.numD_I = numpy.array(self.numD_I, dtype=float)[:self.tidx+1]
self.numR = numpy.array(self.numR, dtype=float)[:self.tidx+1]
self.numF = numpy.array(self.numF, dtype=float)[:self.tidx+1]
self.N = numpy.array(self.N, dtype=float)[:self.tidx+1]
if(self.store_Xseries):
self.Xseries = self.Xseries[:self.tidx+1, :]
if(self.nodeGroupData):
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]['numS'] = numpy.array(self.nodeGroupData[groupName]['numS'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numE'] = numpy.array(self.nodeGroupData[groupName]['numE'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numI'] = numpy.array(self.nodeGroupData[groupName]['numI'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numD_E'] = numpy.array(self.nodeGroupData[groupName]['numD_E'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numD_I'] = numpy.array(self.nodeGroupData[groupName]['numD_I'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numR'] = numpy.array(self.nodeGroupData[groupName]['numR'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numF'] = numpy.array(self.nodeGroupData[groupName]['numF'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['N'] = numpy.array(self.nodeGroupData[groupName]['N'], dtype=float)[:self.tidx+1]
return None
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run_iteration(self):
if(self.tidx >= len(self.tseries)-1):
# Room has run out in the timeseries storage arrays; double the size of these arrays:
self.increase_data_series_length()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 1. Generate 2 random numbers uniformly distributed in (0,1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
r1 = numpy.random.rand()
r2 = numpy.random.rand()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 2. Calculate propensities
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities, transitionTypes = self.calc_propensities()
# Terminate when probability of all events is 0:
if(propensities.sum() <= 0.0):
self.finalize_data_series()
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 3. Calculate alpha
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities_flat = propensities.ravel(order='F')
cumsum = propensities_flat.cumsum()
alpha = propensities_flat.sum()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 4. Compute the time until the next event takes place
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tau = (1/alpha)*numpy.log(float(1/r1))
self.t += tau
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 5. Compute which event takes place
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
transitionIdx = numpy.searchsorted(cumsum,r2*alpha)
transitionNode = transitionIdx % self.numNodes
transitionType = transitionTypes[ int(transitionIdx/self.numNodes) ]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 6. Update node states and data series
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
assert(self.X[transitionNode] == self.transitions[transitionType]['currentState'] and self.X[transitionNode]!=self.F), "Assertion error: Node "+str(transitionNode)+" has unexpected current state "+str(self.X[transitionNode])+" given the intended transition of "+str(transitionType)+"."
self.X[transitionNode] = self.transitions[transitionType]['newState']
self.tidx += 1
self.tseries[self.tidx] = self.t
self.numS[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.S), a_min=0, a_max=self.numNodes)
self.numE[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.E), a_min=0, a_max=self.numNodes)
self.numI[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.I), a_min=0, a_max=self.numNodes)
self.numD_E[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.D_E), a_min=0, a_max=self.numNodes)
self.numD_I[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.D_I), a_min=0, a_max=self.numNodes)
self.numR[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.R), a_min=0, a_max=self.numNodes)
self.numF[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.F), a_min=0, a_max=self.numNodes)
self.N[self.tidx] = numpy.clip((self.numS[self.tidx] + self.numE[self.tidx] + self.numI[self.tidx] + self.numD_E[self.tidx] + self.numD_I[self.tidx] + self.numR[self.tidx]), a_min=0, a_max=self.numNodes)
if(self.store_Xseries):
self.Xseries[self.tidx,:] = self.X.T
if(self.nodeGroupData):
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]['numS'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.S)
self.nodeGroupData[groupName]['numE'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.E)
self.nodeGroupData[groupName]['numI'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I)
self.nodeGroupData[groupName]['numD_E'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_E)
self.nodeGroupData[groupName]['numD_I'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_I)
self.nodeGroupData[groupName]['numR'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.R)
self.nodeGroupData[groupName]['numF'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.F)
self.nodeGroupData[groupName]['N'][self.tidx] = numpy.clip((self.nodeGroupData[groupName]['numS'][0] + self.nodeGroupData[groupName]['numE'][0] + self.nodeGroupData[groupName]['numI'][0] + self.nodeGroupData[groupName]['numD_E'][0] + self.nodeGroupData[groupName]['numD_I'][0] + self.nodeGroupData[groupName]['numR'][0]), a_min=0, a_max=self.numNodes)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Terminate if tmax reached or num infectious and num exposed is 0:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(self.t >= self.tmax or (self.numI[self.tidx]<1 and self.numE[self.tidx]<1 and self.numD_E[self.tidx]<1 and self.numD_I[self.tidx]<1)):
self.finalize_data_series()
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
return True
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run(self, T, checkpoints=None, print_interval=10, verbose='t'):
if(T>0):
self.tmax += T
else:
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-process checkpoint values:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(checkpoints):
numCheckpoints = len(checkpoints['t'])
for chkpt_param, chkpt_values in checkpoints.items():
assert(isinstance(chkpt_values, (list, numpy.ndarray)) and len(chkpt_values)==numCheckpoints), "Expecting a list of values with length equal to number of checkpoint times ("+str(numCheckpoints)+") for each checkpoint parameter."
checkpointIdx = numpy.searchsorted(checkpoints['t'], self.t) # Finds 1st index in list greater than given val
if(checkpointIdx >= numCheckpoints):
# We are out of checkpoints, stop checking them:
checkpoints = None
else:
checkpointTime = checkpoints['t'][checkpointIdx]
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Run the simulation loop:
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
print_reset = True
running = True
while running:
running = self.run_iteration()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Handle checkpoints if applicable:
if(checkpoints):
if(self.t >= checkpointTime):
if(verbose is not False):
print("[Checkpoint: Updating parameters]")
# A checkpoint has been reached, update param values:
if('G' in list(checkpoints.keys())):
self.update_G(checkpoints['G'][checkpointIdx])
if('Q' in list(checkpoints.keys())):
self.update_Q(checkpoints['Q'][checkpointIdx])
for param in list(self.parameters.keys()):
if(param in list(checkpoints.keys())):
self.parameters.update({param: checkpoints[param][checkpointIdx]})
# Update parameter data structures and scenario flags:
self.update_parameters()
# Update the next checkpoint time:
checkpointIdx = numpy.searchsorted(checkpoints['t'], self.t) # Finds 1st index in list greater than given val
if(checkpointIdx >= numCheckpoints):
# We are out of checkpoints, stop checking them:
checkpoints = None
else:
checkpointTime = checkpoints['t'][checkpointIdx]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(print_interval):
if(print_reset and (int(self.t) % print_interval == 0)):
if(verbose=="t"):
print("t = %.2f" % self.t)
if(verbose==True):
print("t = %.2f" % self.t)
print("\t S = " + str(self.numS[self.tidx]))
print("\t E = " + str(self.numE[self.tidx]))
print("\t I = " + str(self.numI[self.tidx]))
print("\t D_E = " + str(self.numD_E[self.tidx]))
print("\t D_I = " + str(self.numD_I[self.tidx]))
print("\t R = " + str(self.numR[self.tidx]))
print("\t F = " + str(self.numF[self.tidx]))
print_reset = False
elif(not print_reset and (int(self.t) % 10 != 0)):
print_reset = True
return True
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def plot(self, ax=None, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True):
import matplotlib.pyplot as pyplot
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create an Axes object if None provided:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(not ax):
fig, ax = pyplot.subplots()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Prepare data series to be plotted:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fseries = self.numF/self.numNodes if plot_percentages else self.numF
Eseries = self.numE/self.numNodes if plot_percentages else self.numE
Dseries = (self.numD_E+self.numD_I)/self.numNodes if plot_percentages else (self.numD_E+self.numD_I)
D_Eseries = self.numD_E/self.numNodes if plot_percentages else self.numD_E
D_Iseries = self.numD_I/self.numNodes if plot_percentages else self.numD_I
Iseries = self.numI/self.numNodes if plot_percentages else self.numI
Rseries = self.numR/self.numNodes if plot_percentages else self.numR
Sseries = self.numS/self.numNodes if plot_percentages else self.numS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the reference data:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(dashed_reference_results):
dashedReference_tseries = dashed_reference_results.tseries[::int(self.numNodes/100)]
dashedReference_IDEstack = (dashed_reference_results.numI + dashed_reference_results.numD_I + dashed_reference_results.numD_E + dashed_reference_results.numE)[::int(self.numNodes/100)] / (self.numNodes if plot_percentages else 1)
ax.plot(dashedReference_tseries, dashedReference_IDEstack, color='#E0E0E0', linestyle='--', label='$I+D+E$ ('+dashed_reference_label+')', zorder=0)
if(shaded_reference_results):
shadedReference_tseries = shaded_reference_results.tseries
shadedReference_IDEstack = (shaded_reference_results.numI + shaded_reference_results.numD_I + shaded_reference_results.numD_E + shaded_reference_results.numE) / (self.numNodes if plot_percentages else 1)
ax.fill_between(shaded_reference_results.tseries, shadedReference_IDEstack, 0, color='#EFEFEF', label='$I+D+E$ ('+shaded_reference_label+')', zorder=0)
ax.plot(shaded_reference_results.tseries, shadedReference_IDEstack, color='#E0E0E0', zorder=1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the stacked variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
topstack = numpy.zeros_like(self.tseries)
if(any(Fseries) and plot_F=='stacked'):
ax.fill_between(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, topstack+Fseries), topstack, color=color_F, alpha=0.5, label='$F$', zorder=2)
ax.plot( numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, topstack+Fseries), color=color_F, zorder=3)
topstack = topstack+Fseries
if(any(Eseries) and plot_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, topstack+Eseries), topstack, color=color_E, alpha=0.5, label='$E$', zorder=2)
ax.plot( numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, topstack+Eseries), color=color_E, zorder=3)
topstack = topstack+Eseries
if(combine_D and plot_D_E=='stacked' and plot_D_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), topstack, color=color_D_E, alpha=0.5, label='$D_{all}$', zorder=2)
ax.plot( numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), color=color_D_E, zorder=3)
topstack = topstack+Dseries
else:
if(any(D_Eseries) and plot_D_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), topstack, color=color_D_E, alpha=0.5, label='$D_E$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), color=color_D_E, zorder=3)
topstack = topstack+D_Eseries
if(any(D_Iseries) and plot_D_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, topstack+D_Iseries), topstack, color=color_D_I, alpha=0.5, label='$D_I$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, topstack+D_Iseries), color=color_D_I, zorder=3)
topstack = topstack+D_Iseries
if(any(Iseries) and plot_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, topstack+Iseries), topstack, color=color_I, alpha=0.5, label='$I$', zorder=2)
ax.plot( numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, topstack+Iseries), color=color_I, zorder=3)
topstack = topstack+Iseries
if(any(Rseries) and plot_R=='stacked'):
ax.fill_between(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, topstack+Rseries), topstack, color=color_R, alpha=0.5, label='$R$', zorder=2)
ax.plot( numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, topstack+Rseries), color=color_R, zorder=3)
topstack = topstack+Rseries
if(any(Sseries) and plot_S=='stacked'):
ax.fill_between(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, topstack+Sseries), topstack, color=color_S, alpha=0.5, label='$S$', zorder=2)
ax.plot( numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, topstack+Sseries), color=color_S, zorder=3)
topstack = topstack+Sseries
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the shaded variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any(Fseries) and plot_F=='shaded'):
ax.fill_between(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), 0, color=color_F, alpha=0.5, label='$F$', zorder=4)
ax.plot( numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), color=color_F, zorder=5)
if(any(Eseries) and plot_E=='shaded'):
ax.fill_between(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), 0, color=color_E, alpha=0.5, label='$E$', zorder=4)
ax.plot( numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), color=color_E, zorder=5)
if(combine_D and (any(Dseries) and plot_D_E=='shaded' and plot_D_I=='shaded')):
ax.fill_between(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), 0, color=color_D_E, alpha=0.5, label='$D_{all}$', zorder=4)
ax.plot( numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), color=color_D_E, zorder=5)
else:
if(any(D_Eseries) and plot_D_E=='shaded'):
ax.fill_between(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), 0, color=color_D_E, alpha=0.5, label='$D_E$', zorder=4)
ax.plot( numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), color=color_D_E, zorder=5)
if(any(D_Iseries) and plot_D_I=='shaded'):
ax.fill_between(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), 0, color=color_D_I, alpha=0.5, label='$D_I$', zorder=4)
ax.plot( numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), color=color_D_I, zorder=5)
if(any(Iseries) and plot_I=='shaded'):
ax.fill_between(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), 0, color=color_I, alpha=0.5, label='$I$', zorder=4)
ax.plot( numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), color=color_I, zorder=5)
if(any(Sseries) and plot_S=='shaded'):
ax.fill_between(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), 0, color=color_S, alpha=0.5, label='$S$', zorder=4)
ax.plot( numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), color=color_S, zorder=5)
if(any(Rseries) and plot_R=='shaded'):
ax.fill_between(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), 0, color=color_R, alpha=0.5, label='$R$', zorder=4)
ax.plot( numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), color=color_R, zorder=5)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the line variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any(Fseries) and plot_F=='line'):
ax.plot(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), color=color_F, label='$F$', zorder=6)
if(any(Eseries) and plot_E=='line'):
ax.plot(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), color=color_E, label='$E$', zorder=6)
if(combine_D and (any(Dseries) and plot_D_E=='line' and plot_D_I=='line')):
ax.plot(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), color=color_D_E, label='$D_{all}$', zorder=6)
else:
if(any(D_Eseries) and plot_D_E=='line'):
ax.plot(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), color=color_D_E, label='$D_E$', zorder=6)
if(any(D_Iseries) and plot_D_I=='line'):
ax.plot(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), color=color_D_I, label='$D_I$', zorder=6)
if(any(Iseries) and plot_I=='line'):
ax.plot(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), color=color_I, label='$I$', zorder=6)
if(any(Sseries) and plot_S=='line'):
ax.plot(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), color=color_S, label='$S$', zorder=6)
if(any(Rseries) and plot_R=='line'):
ax.plot(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), color=color_R, label='$R$', zorder=6)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the vertical line annotations:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(len(vlines)>0 and len(vline_colors)==0):
vline_colors = ['gray']*len(vlines)
if(len(vlines)>0 and len(vline_labels)==0):
vline_labels = [None]*len(vlines)
if(len(vlines)>0 and len(vline_styles)==0):
vline_styles = [':']*len(vlines)
for vline_x, vline_color, vline_style, vline_label in zip(vlines, vline_colors, vline_styles, vline_labels):
if(vline_x is not None):
ax.axvline(x=vline_x, color=vline_color, linestyle=vline_style, alpha=1, label=vline_label)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the plot labels:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ax.set_xlabel('days')
ax.set_ylabel('percent of population' if plot_percentages else 'number of individuals')
ax.set_xlim(0, (max(self.tseries) if not xlim else xlim))
ax.set_ylim(0, ylim)
if(plot_percentages):
ax.set_yticklabels(['{:,.0%}'.format(y) for y in ax.get_yticks()])
if(legend):
legend_handles, legend_labels = ax.get_legend_handles_labels()
ax.legend(legend_handles[::-1], legend_labels[::-1], loc='upper right', facecolor='white', edgecolor='none', framealpha=0.9, prop={'size': 8})
if(title):
ax.set_title(title, size=12)
if(side_title):
ax.annotate(side_title, (0, 0.5), xytext=(-45, 0), ha='right', va='center',
size=12, rotation=90, xycoords='axes fraction', textcoords='offset points')
return ax
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_basic(self, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12,8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if(use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I,plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title, plot_percentages=plot_percentages)
if(show):
pyplot.show()
return fig, ax
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_infections(self, plot_S=False, plot_E='stacked', plot_I='stacked',plot_R=False, plot_F=False,
plot_D_E='stacked', plot_D_I='stacked', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12,8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if(use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I,plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title, plot_percentages=plot_percentages)
if(show):
pyplot.show()
return fig, ax
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
class SymptomaticSEIRSNetworkModel():
"""
A class to simulate the SEIRS Stochastic Network Model
with Symptom Presentation Compartments
===================================================
Params:
G Network adjacency matrix (numpy array) or Networkx graph object.
beta Rate of transmission (global interactions)
beta_local Rate(s) of transmission between adjacent individuals (optional)
beta_A Rate of transmission (global interactions)
beta_A_local Rate(s) of transmission between adjacent individuals (optional)
sigma Rate of progression to infectious state (inverse of latent period)
lamda Rate of progression to infectious (a)symptomatic state (inverse of prodromal period)
eta Rate of progression to hospitalized state (inverse of onset-to-admission period)
gamma Rate of recovery for non-hospitalized symptomatic individuals (inverse of symptomatic infectious period)
gamma_A Rate of recovery for asymptomatic individuals (inverse of asymptomatic infectious period)
gamma_H Rate of recovery for hospitalized symptomatic individuals (inverse of hospitalized infectious period)
mu_H Rate of death for hospitalized individuals (inverse of admission-to-death period)
xi Rate of re-susceptibility (upon recovery)
mu_0 Rate of baseline death
nu Rate of baseline birth
a Probability of an infected individual remaining asymptomatic
h Probability of a symptomatic individual being hospitalized
f Probability of death for hospitalized individuals (case fatality rate)
p Probability of individuals interacting with global population
Q Quarantine adjacency matrix (numpy array) or Networkx graph object.
beta_D Rate of transmission for individuals with detected infections (global interactions)
beta_D_local Rate(s) of transmission (exposure) for adjacent individuals with detected infections (optional)
sigma_D Rate of progression to infectious state for individuals with detected infections
lamda_D Rate of progression to infectious (a)symptomatic state for individuals with detected infections
eta_D Rate of progression to hospitalized state for individuals with detected infections
gamma_D_S Rate of recovery for non-hospitalized symptomatic individuals for individuals with detected infections
gamma_D_A Rate of recovery for asymptomatic individuals for individuals with detected infections
theta_E Rate of random testing for exposed individuals
theta_pre Rate of random testing for infectious pre-symptomatic individuals
theta_S Rate of random testing for infectious symptomatic individuals
theta_A Rate of random testing for infectious asymptomatic individuals
phi_E Rate of testing when a close contact has tested positive for exposed individuals
phi_pre Rate of testing when a close contact has tested positive for infectious pre-symptomatic individuals
phi_S Rate of testing when a close contact has tested positive for infectious symptomatic individuals
phi_A Rate of testing when a close contact has tested positive for infectious asymptomatic individuals
d_E Probability of positive test for exposed individuals
d_pre Probability of positive test for infectious pre-symptomatic individuals
d_S Probability of positive test for infectious symptomatic individuals
d_A Probability of positive test for infectious asymptomatic individuals
q Probability of individuals with detected infection interacting with global population
initE Initial number of exposed individuals
initI_pre Initial number of infectious pre-symptomatic individuals
initI_S Initial number of infectious symptomatic individuals
initI_A Initial number of infectious asymptomatic individuals
initH Initial number of hospitalized individuals
initR Initial number of recovered individuals
initF Initial number of infection-related fatalities
initD_E Initial number of detected exposed individuals
initD_pre Initial number of detected infectious pre-symptomatic individuals
initD_S Initial number of detected infectious symptomatic individuals
initD_A Initial number of detected infectious asymptomatic individuals
(all remaining nodes initialized susceptible)
"""
def __init__(self, G, beta, sigma, lamda, gamma,
eta=0, gamma_A=None, gamma_H=None, mu_H=0, xi=0, mu_0=0, nu=0, a=0, h=0, f=0, p=0,
beta_local=None, beta_A=None, beta_A_local=None,
Q=None, lamda_D=None, beta_D=None, beta_D_local=None, sigma_D=None, eta_D=None, gamma_D_S=None, gamma_D_A=None,
theta_E=0, theta_pre=0, theta_S=0, theta_A=0, phi_E=0, phi_pre=0, phi_S=0, phi_A=0,
d_E=1, d_pre=1, d_S=1, d_A=1, q=0,
initE=0, initI_pre=0, initI_S=0, initI_A=0, initH=0, initR=0, initF=0,
initD_E=0, initD_pre=0, initD_S=0, initD_A=0,
node_groups=None, store_Xseries=False):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Adjacency matrix:
self.update_G(G)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Quarantine Adjacency matrix:
if(Q is None):
Q = G # If no Q graph is provided, use G in its place
self.update_Q(Q)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.parameters = { 'beta':beta, 'sigma':sigma, 'lamda':lamda, 'gamma':gamma,
'eta':eta, 'gamma_A':gamma_A, 'gamma_H':gamma_H, 'mu_H':mu_H,
'xi':xi, 'mu_0':mu_0, 'nu':nu, 'a':a, 'h':h, 'f':f, 'p':p,
'beta_local':beta_local, 'beta_A':beta_A, 'beta_A_local':beta_A_local,
'lamda_D':lamda_D, 'beta_D':beta_D, 'beta_D_local':beta_D_local, 'sigma_D':sigma_D,
'eta_D':eta_D, 'gamma_D_S':gamma_D_S, 'gamma_D_A':gamma_D_A,
'theta_E':theta_E, 'theta_pre':theta_pre, 'theta_S':theta_S, 'theta_A':theta_A,
'phi_E':phi_E, 'phi_pre':phi_pre, 'phi_S':phi_S, 'phi_A':phi_A,
'd_E':d_E, 'd_pre':d_pre, 'd_S':d_S, 'd_A':d_A, 'q':q,
'initE':initE, 'initI_pre':initI_pre, 'initI_S':initI_S, 'initI_A':initI_A,
'initH':initH, 'initR':initR, 'initF':initF,
'initD_E':initD_E, 'initD_pre':initD_pre, 'initD_S':initD_S, 'initD_A':initD_A }
self.update_parameters()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Each node can undergo 4-6 transitions (sans vitality/re-susceptibility returns to S state),
# so there are ~numNodes*6 events/timesteps expected; initialize numNodes*6 timestep slots to start
# (will be expanded during run if needed for some reason)
self.tseries = numpy.zeros(5*self.numNodes)
self.numS = numpy.zeros(5*self.numNodes)
self.numE = numpy.zeros(5*self.numNodes)
self.numI_pre = numpy.zeros(5*self.numNodes)
self.numI_S = numpy.zeros(5*self.numNodes)
self.numI_A = numpy.zeros(5*self.numNodes)
self.numH = numpy.zeros(5*self.numNodes)
self.numR = numpy.zeros(5*self.numNodes)
self.numF = numpy.zeros(5*self.numNodes)
self.numD_E = numpy.zeros(5*self.numNodes)
self.numD_pre = numpy.zeros(5*self.numNodes)
self.numD_S = numpy.zeros(5*self.numNodes)
self.numD_A = numpy.zeros(5*self.numNodes)
self.N = numpy.zeros(5*self.numNodes)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tmax = 0 # will be set when run() is called
self.tidx = 0
self.tseries[0] = 0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.numE[0] = int(initE)
self.numI_pre[0] = int(initI_pre)
self.numI_S[0] = int(initI_S)
self.numI_A[0] = int(initI_A)
self.numH[0] = int(initH)
self.numR[0] = int(initR)
self.numF[0] = int(initF)
self.numD_E[0] = int(initD_E)
self.numD_pre[0] = int(initD_pre)
self.numD_S[0] = int(initD_S)
self.numD_A[0] = int(initD_A)
self.numS[0] = (self.numNodes - self.numE[0] - self.numI_pre[0] - self.numI_S[0] - self.numI_A[0] - self.numH[0] - self.numR[0]
- self.numD_E[0] - self.numD_pre[0] - self.numD_S[0] - self.numD_A[0] - self.numF[0])
self.N[0] = self.numNodes - self.numF[0]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Node states:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.S = 1
self.E = 2
self.I_pre = 3
self.I_S = 4
self.I_A = 5
self.H = 6
self.R = 7
self.F = 8
self.D_E = 9
self.D_pre = 10
self.D_S = 11
self.D_A = 12
self.X = numpy.array( [self.S]*int(self.numS[0]) + [self.E]*int(self.numE[0])
+ [self.I_pre]*int(self.numI_pre[0]) + [self.I_S]*int(self.numI_S[0]) + [self.I_A]*int(self.numI_A[0])
+ [self.H]*int(self.numH[0]) + [self.R]*int(self.numR[0]) + [self.F]*int(self.numF[0])
+ [self.D_E]*int(self.numD_E[0]) + [self.D_pre]*int(self.numD_pre[0]) + [self.D_S]*int(self.numD_S[0]) + [self.D_A]*int(self.numD_A[0])
).reshape((self.numNodes,1))
numpy.random.shuffle(self.X)
self.store_Xseries = store_Xseries
if(store_Xseries):
self.Xseries = numpy.zeros(shape=(5*self.numNodes, self.numNodes), dtype='uint8')
self.Xseries[0,:] = self.X.T
self.transitions = {
'StoE': {'currentState':self.S, 'newState':self.E},
'EtoIPRE': {'currentState':self.E, 'newState':self.I_pre},
'EtoDE': {'currentState':self.E, 'newState':self.D_E},
'IPREtoIS': {'currentState':self.I_pre, 'newState':self.I_S},
'IPREtoIA': {'currentState':self.I_pre, 'newState':self.I_A},
'IPREtoDPRE': {'currentState':self.I_pre, 'newState':self.D_pre},
'IStoH': {'currentState':self.I_S, 'newState':self.H},
'IStoR': {'currentState':self.I_S, 'newState':self.R},
'IStoDS': {'currentState':self.I_S, 'newState':self.D_S},
'IAtoR': {'currentState':self.I_A, 'newState':self.R},
'IAtoDA': {'currentState':self.I_A, 'newState':self.D_A},
'HtoR': {'currentState':self.H, 'newState':self.R},
'HtoF': {'currentState':self.H, 'newState':self.F},
'RtoS': {'currentState':self.R, 'newState':self.S},
'DEtoDPRE': {'currentState':self.D_E, 'newState':self.D_pre},
'DPREtoDS': {'currentState':self.D_pre, 'newState':self.D_S},
'DPREtoDA': {'currentState':self.D_pre, 'newState':self.D_A},
'DStoH': {'currentState':self.D_S, 'newState':self.H},
'DStoR': {'currentState':self.D_S, 'newState':self.R},
'DAtoR': {'currentState':self.D_A, 'newState':self.R},
'_toS': {'currentState':True, 'newState':self.S},
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize node subgroup data series:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.nodeGroupData = None
if(node_groups):
self.nodeGroupData = {}
for groupName, nodeList in node_groups.items():
self.nodeGroupData[groupName] = {'nodes': numpy.array(nodeList),
'mask': numpy.isin(range(self.numNodes), nodeList).reshape((self.numNodes,1))}
self.nodeGroupData[groupName]['numS'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numE'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numI_pre'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numI_S'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numI_A'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numH'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numR'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numF'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_E'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_pre'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_S'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_A'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['N'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numS'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.S)
self.nodeGroupData[groupName]['numE'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.E)
self.nodeGroupData[groupName]['numI_pre'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I_pre)
self.nodeGroupData[groupName]['numI_S'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I_S)
self.nodeGroupData[groupName]['numI_A'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I_A)
self.nodeGroupData[groupName]['numH'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.H)
self.nodeGroupData[groupName]['numR'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.R)
self.nodeGroupData[groupName]['numF'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.F)
self.nodeGroupData[groupName]['numD_E'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_E)
self.nodeGroupData[groupName]['numD_pre'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_pre)
self.nodeGroupData[groupName]['numD_I_S'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_I_S)
self.nodeGroupData[groupName]['numD_I_A'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_I_A)
self.nodeGroupData[groupName]['N'][0] = self.numNodes - self.numF[0]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_parameters(self):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = numpy.array(self.parameters['beta']).reshape((self.numNodes, 1)) if isinstance(self.parameters['beta'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['beta'], shape=(self.numNodes,1))
self.beta_A = (numpy.array(self.parameters['beta_A']).reshape((self.numNodes, 1)) if isinstance(self.parameters['beta_A'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['beta_A'], shape=(self.numNodes,1))) if self.parameters['beta_A'] is not None else self.beta
self.sigma = numpy.array(self.parameters['sigma']).reshape((self.numNodes, 1)) if isinstance(self.parameters['sigma'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['sigma'], shape=(self.numNodes,1))
self.lamda = numpy.array(self.parameters['lamda']).reshape((self.numNodes, 1)) if isinstance(self.parameters['lamda'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['lamda'], shape=(self.numNodes,1))
self.gamma = numpy.array(self.parameters['gamma']).reshape((self.numNodes, 1)) if isinstance(self.parameters['gamma'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma'], shape=(self.numNodes,1))
self.eta = numpy.array(self.parameters['eta']).reshape((self.numNodes, 1)) if isinstance(self.parameters['eta'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['eta'], shape=(self.numNodes,1))
self.gamma_A = (numpy.array(self.parameters['gamma_A']).reshape((self.numNodes, 1))if isinstance(self.parameters['gamma_A'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma_A'], shape=(self.numNodes,1))) if self.parameters['gamma_A'] is not None else self.gamma
self.gamma_H = (numpy.array(self.parameters['gamma_H']).reshape((self.numNodes, 1))if isinstance(self.parameters['gamma_H'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma_H'], shape=(self.numNodes,1))) if self.parameters['gamma_H'] is not None else self.gamma
self.mu_H = | numpy.array(self.parameters['mu_H']) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 17:17:41 2020
This script is for generating synthetic data.
You can use multi-class data to generate balance dataset.
<NAME>
<EMAIL>
BME Bogazici University
Istanbul / Uskudar
@author: abas
"""
import numpy as np
from sklearn import neighbors
from sklearn.cluster import KMeans
def EuclidianDistance(data1,data2):
"""Euclidian Distance implementation
Args:
data1 (float): data point 1
data2 (float): data point 2
Returns:
[float]: distance between two data points
"""
dist=np.sqrt(sum([( | np.square(x) | numpy.square |
Subsets and Splits