prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if str(tz) in ('UTC', 'tzutc()'):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# Different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_index_with_mixed_timezones_with_NaT(self):
# see gh-11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# Same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
msg = 'cannot be converted to datetime64'
with pytest.raises(ValueError, match=msg):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
with pytest.raises(ValueError, match=msg):
# passing tz should results in DatetimeIndex, then mismatch raises
# TypeError
Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
def test_construction_base_constructor(self):
arr = [pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex( | np.array(arr) | numpy.array |
import numpy as np
import numpy.linalg as la
import scipy.sparse as sp
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.sparsefuncs import mean_variance_axis0
from sklearn.preprocessing import Binarizer
from sklearn.preprocessing import KernelCenterer
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import normalize
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import add_dummy_feature
from sklearn import datasets
from sklearn.linear_model.stochastic_gradient import SGDClassifier
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_scaler_1d():
"""Test scaling of dataset along single axis"""
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
def test_scaler_2d_arrays():
"""Test scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any( | np.isnan(X_scaled) | numpy.isnan |
# -*- coding: utf-8 -*-
"""Routines for multiple scattering. The first half of the module contains functions to explicitly compute the
coupling matrix entries. The second half of the module contains functions for the preparation of lookup tables that
are used to approximate the coupling matrices by interoplation."""
from numba import complex128,int64,jit
from scipy.signal.filter_design import bessel
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import scipy.interpolate
import scipy.special
import smuthi.coordinates as coord
import smuthi.cuda_sources as cu
import smuthi.field_expansion as fldex
import smuthi.layers as lay
import smuthi.spherical_functions as sf
import smuthi.vector_wave_functions as vwf
import sys
try:
import pycuda.autoinit
import pycuda.driver as drv
from pycuda import gpuarray
from pycuda.compiler import SourceModule
import pycuda.cumath
except:
pass
@jit(complex128(complex128[:], complex128[:]),
nopython=True, cache=True, nogil=True)
def numba_trapz(y, x):
out = 0.0 + 0.0j
#TODO implement some (optional) advanced summation?
#e.g. https://github.com/nschloe/accupy/blob/master/accupy/sums.py
#or better Sum2 from https://doi.org/10.1137/030601818 (Algorithm 4.4)
#Note, that this may need to have exact summation for x and y, and exact product.
for i in range( len(y) - 2 ):
out += (x[i+1]-x[i]) * (y[i+1] + y[i])/2.0
return out
@jit((complex128[:], complex128[:,:,:],
complex128[:,:,:,:],complex128[:,:,:], int64),
nopython=True,cache=True
,nogil=True
# , parallel=True
)
def eval_BeLBe(BeLBe, BeL, B1, ejkz, n2):
for k in range(len(BeLBe)):
for iplmn2 in range(2):
for pol in range(2):
BeLBe[k] += BeL[pol, iplmn2, k] * B1[pol, iplmn2, n2, k
] * ejkz[1, 1 - iplmn2, k]
def layer_mediated_coupling_block(vacuum_wavelength, receiving_particle, emitting_particle, layer_system,
k_parallel='default', show_integrand=False):
"""Layer-system mediated particle coupling matrix :math:`W^R` for two particles. This routine is explicit, but slow.
Args:
vacuum_wavelength (float): Vacuum wavelength :math:`\lambda` (length unit)
receiving_particle (smuthi.particles.Particle): Particle that receives the scattered field
emitting_particle (smuthi.particles.Particle): Particle that emits the scattered field
layer_system (smuthi.layers.LayerSystem): Stratified medium in which the coupling takes place
k_parallel (numpy ndarray): In-plane wavenumbers for Sommerfeld integral
If 'default', use smuthi.coordinates.default_k_parallel
show_integrand (bool): If True, the norm of the integrand is plotted.
Returns:
Layer mediated coupling matrix block as numpy array.
"""
if type(k_parallel) == str and k_parallel == 'default':
k_parallel = coord.default_k_parallel
omega = coord.angular_frequency(vacuum_wavelength)
# index specs
lmax1 = receiving_particle.l_max
mmax1 = receiving_particle.m_max
lmax2 = emitting_particle.l_max
mmax2 = emitting_particle.m_max
blocksize1 = fldex.blocksize(lmax1, mmax1)
blocksize2 = fldex.blocksize(lmax2, mmax2)
# cylindrical coordinates of relative position vectors
rs1 = np.array(receiving_particle.position)
rs2 = np.array(emitting_particle.position)
rs2s1 = rs1 - rs2
rhos2s1 = np.linalg.norm(rs2s1[0:2])
phis2s1 = np.arctan2(rs2s1[1], rs2s1[0])
is1 = layer_system.layer_number(rs1[2])
ziss1 = rs1[2] - layer_system.reference_z(is1)
is2 = layer_system.layer_number(rs2[2])
ziss2 = rs2[2] - layer_system.reference_z(is2)
# wave numbers
kis1 = omega * layer_system.refractive_indices[is1]
kis2 = omega * layer_system.refractive_indices[is2]
kzis1 = coord.k_z(k_parallel=k_parallel, k=kis1)
kzis2 = coord.k_z(k_parallel=k_parallel, k=kis2)
# phase factors
ejkz = np.zeros((2, 2, len(k_parallel)), dtype=complex) # indices are: particle, plus/minus, kpar_idx
ejkz[0, 0, :] = np.exp(1j * kzis1 * ziss1)
ejkz[0, 1, :] = np.exp(- 1j * kzis1 * ziss1)
ejkz[1, 0, :] = np.exp(1j * kzis2 * ziss2)
ejkz[1, 1, :] = np.exp(- 1j * kzis2 * ziss2)
# layer response
L = np.zeros((2, 2, 2, len(k_parallel)), dtype=complex) # polarization, pl/mn1, pl/mn2, kpar_idx
for pol in range(2):
L[pol, :, :, :] = lay.layersystem_response_matrix(pol, layer_system.thicknesses,
layer_system.refractive_indices, k_parallel, omega, is2, is1)
# transformation coefficients
B = [np.zeros((2, 2, blocksize1, len(k_parallel)), dtype=complex),
np.zeros((2, 2, blocksize2, len(k_parallel)), dtype=complex)]
# list index: particle, np indices: pol, plus/minus, n, kpar_idx
m_vec = [np.zeros(blocksize1, dtype=int), np.zeros(blocksize2, dtype=int)]
# precompute spherical functions
ct = kzis1 / kis1
st = k_parallel / kis1
_, pilm_list_pl, taulm_list_pl = sf.legendre_normalized(ct, st, lmax1)
_, pilm_list_mn, taulm_list_mn = sf.legendre_normalized(-ct, st, lmax1)
pilm = (pilm_list_pl, pilm_list_mn)
taulm = (taulm_list_pl, taulm_list_mn)
for tau in range(2):
for m in range(-mmax1, mmax1 + 1):
for l in range(max(1, abs(m)), lmax1 + 1):
n = fldex.multi_to_single_index(tau, l, m, lmax1, mmax1)
m_vec[0][n] = m
for iplmn in range(2):
for pol in range(2):
B[0][pol, iplmn, n, :] = vwf.transformation_coefficients_vwf(tau, l, m, pol, pilm_list=pilm[iplmn],
taulm_list=taulm[iplmn], dagger=True)
ct = kzis2 / kis2
st = k_parallel / kis2
_, pilm_list_pl, taulm_list_pl = sf.legendre_normalized(ct, st, lmax2)
_, pilm_list_mn, taulm_list_mn = sf.legendre_normalized(-ct, st, lmax2)
pilm = (pilm_list_pl, pilm_list_mn)
taulm = (taulm_list_pl, taulm_list_mn)
for tau in range(2):
for m in range(-mmax2, mmax2 + 1):
for l in range(max(1, abs(m)), lmax2 + 1):
n = fldex.multi_to_single_index(tau, l, m, lmax2, mmax2)
m_vec[1][n] = m
for iplmn in range(2):
for pol in range(2):
B[1][pol, iplmn, n, :] = vwf.transformation_coefficients_vwf(tau, l, m, pol, pilm_list=pilm[iplmn],
taulm_list=taulm[iplmn], dagger=False)
# bessel function and jacobi factor
bessel_list = []
for dm in range(lmax1 + lmax2 + 1):
bessel_list.append(scipy.special.jv(dm, k_parallel * rhos2s1))
jacobi_vector = k_parallel / (kzis2 * kis2)
m2_minus_m1 = m_vec[1] - m_vec[0][np.newaxis].T
wr_const = 4 * (1j) ** abs(m2_minus_m1) * np.exp(1j * m2_minus_m1 * phis2s1)
integral = np.zeros((blocksize1, blocksize2), dtype=complex)
for n1 in range(blocksize1):
BeL = np.zeros((2, 2, len(k_parallel)), dtype=complex) # indices are: pol, plmn2, n1, kpar_idx
for iplmn1 in range(2):
for pol in range(2):
BeL[pol, :, :] += (L[pol, iplmn1, :, :]
* B[0][pol, iplmn1, n1, :]
* ejkz[0, iplmn1, :])
for n2 in range(blocksize2):
bessel_full = bessel_list[abs(m_vec[0][n1] - m_vec[1][n2])]
BeLBe = np.zeros((len(k_parallel)), dtype=complex)
eval_BeLBe(BeLBe, BeL, B[1], ejkz, n2)
integrand = bessel_full * jacobi_vector * BeLBe
integral[n1,n2] = numba_trapz(integrand, k_parallel)
wr = wr_const * integral
return wr
def layer_mediated_coupling_matrix(vacuum_wavelength, particle_list, layer_system, k_parallel='default'):
"""Layer system mediated particle coupling matrix W^R for a particle collection in a layered medium.
Args:
vacuum_wavelength (float): Wavelength in length unit
particle_list (list of smuthi.particles.Particle obejcts: Scattering particles
layer_system (smuthi.layers.LayerSystem): The stratified medium
k_parallel (numpy.ndarray or str): In-plane wavenumber for Sommerfeld integrals.
If 'default', smuthi.coordinates.default_k_parallel
Returns:
Ensemble coupling matrix as numpy array.
"""
# indices
blocksizes = [fldex.blocksize(particle.l_max, particle.m_max) for particle in particle_list]
# initialize result
wr = np.zeros((sum(blocksizes), sum(blocksizes)), dtype=complex)
for s1, particle1 in enumerate(particle_list):
idx1 = np.array(range(sum(blocksizes[:s1]), sum(blocksizes[:s1]) + blocksizes[s1]))
for s2, particle2 in enumerate(particle_list):
idx2 = range(sum(blocksizes[:s2]), sum(blocksizes[:s2]) + blocksizes[s2])
wr[idx1[:, None], idx2] = layer_mediated_coupling_block(vacuum_wavelength, particle1, particle2,
layer_system, k_parallel)
return wr
def direct_coupling_block(vacuum_wavelength, receiving_particle, emitting_particle, layer_system):
"""Direct particle coupling matrix :math:`W` for two particles. This routine is explicit, but slow.
Args:
vacuum_wavelength (float): Vacuum wavelength :math:`\lambda` (length unit)
receiving_particle (smuthi.particles.Particle): Particle that receives the scattered field
emitting_particle (smuthi.particles.Particle): Particle that emits the scattered field
layer_system (smuthi.layers.LayerSystem): Stratified medium in which the coupling takes place
Returns:
Direct coupling matrix block as numpy array.
"""
omega = coord.angular_frequency(vacuum_wavelength)
# index specs
lmax1 = receiving_particle.l_max
mmax1 = receiving_particle.m_max
lmax2 = emitting_particle.l_max
mmax2 = emitting_particle.m_max
blocksize1 = fldex.blocksize(lmax1, mmax1)
blocksize2 = fldex.blocksize(lmax2, mmax2)
# initialize result
w = np.zeros((blocksize1, blocksize2), dtype=complex)
# check if particles are in same layer
rS1 = receiving_particle.position
rS2 = emitting_particle.position
iS1 = layer_system.layer_number(rS1[2])
iS2 = layer_system.layer_number(rS2[2])
if iS1 == iS2 and not emitting_particle == receiving_particle:
k = omega * layer_system.refractive_indices[iS1]
dx = rS1[0] - rS2[0]
dy = rS1[1] - rS2[1]
dz = rS1[2] - rS2[2]
d = np.sqrt(dx**2 + dy**2 + dz**2)
cos_theta = dz / d
sin_theta = np.sqrt(dx**2 + dy**2) / d
phi = np.arctan2(dy, dx)
# spherical functions
bessel_h = [sf.spherical_hankel(n, k * d) for n in range(lmax1 + lmax2 + 1)]
legendre, _, _ = sf.legendre_normalized(cos_theta, sin_theta, lmax1 + lmax2)
# the particle coupling operator is the transpose of the SVWF translation operator
# therefore, (l1,m1) and (l2,m2) are interchanged:
for m1 in range(-mmax1, mmax1 + 1):
for m2 in range(-mmax2, mmax2 + 1):
eimph = np.exp(1j * (m2 - m1) * phi)
for l1 in range(max(1, abs(m1)), lmax1 + 1):
for l2 in range(max(1, abs(m2)), lmax2 + 1):
A, B = complex(0), complex(0)
for ld in range(max(abs(l1 - l2), abs(m1 - m2)), l1 + l2 + 1): # if ld<abs(m1-m2) then P=0
a5, b5 = vwf.ab5_coefficients(l2, m2, l1, m1, ld)
A += a5 * bessel_h[ld] * legendre[ld][abs(m1 - m2)]
B += b5 * bessel_h[ld] * legendre[ld][abs(m1 - m2)]
A, B = eimph * A, eimph * B
for tau1 in range(2):
n1 = fldex.multi_to_single_index(tau1, l1, m1, lmax1, mmax1)
for tau2 in range(2):
n2 = fldex.multi_to_single_index(tau2, l2, m2, lmax2, mmax2)
if tau1 == tau2:
w[n1, n2] = A
else:
w[n1, n2] = B
return w
def direct_coupling_matrix(vacuum_wavelength, particle_list, layer_system):
"""Return the direct particle coupling matrix W for a particle collection in a layered medium.
Args:
vacuum_wavelength (float): Wavelength in length unit
particle_list (list of smuthi.particles.Particle obejcts: Scattering particles
layer_system (smuthi.layers.LayerSystem): The stratified medium
Returns:
Ensemble coupling matrix as numpy array.
"""
# indices
blocksizes = [fldex.blocksize(particle.l_max, particle.m_max)
for particle in particle_list]
# initialize result
w = np.zeros((sum(blocksizes), sum(blocksizes)), dtype=complex)
for s1, particle1 in enumerate(particle_list):
idx1 = np.array(range(sum(blocksizes[:s1]), sum(blocksizes[:s1+1])))
for s2, particle2 in enumerate(particle_list):
idx2 = range(sum(blocksizes[:s2]), sum(blocksizes[:s2+1]))
w[idx1[:, None], idx2] = direct_coupling_block(vacuum_wavelength, particle1, particle2, layer_system)
return w
def volumetric_coupling_lookup_table(vacuum_wavelength, particle_list, layer_system, k_parallel='default',
resolution=None):
"""Prepare Sommerfeld integral lookup table to allow for a fast calculation of the coupling matrix by interpolation.
This function is called when not all particles are on the same z-position.
Args:
vacuum_wavelength (float): Vacuum wavelength in length units
particle_list (list): List of particle objects
layer_system (smuthi.layers.LayerSystem): Stratified medium
k_parallel (numpy.ndarray or str): In-plane wavenumber for Sommerfeld integrals.
If 'default', smuthi.coordinates.default_k_parallel
resolution (float): Spatial resolution of lookup table in length units. (default: vacuum_wavelength / 100)
Smaller means more accurate but higher memory footprint
Returns:
(tuple): tuple containing:
w_pl (ndarray): Coupling lookup for z1 + z2, indices are [rho, z, n1, n2]. Includes layer mediated coupling.
w_mn (ndarray): Coupling lookup for z1 + z2, indices are [rho, z, n1, n2]. Includes layer mediated and
direct coupling.
rho_array (ndarray): Values for the radial distance considered for the lookup (starting from negative
numbers to allow for simpler cubic interpolation without distinction of cases
for lookup edges
sz_array (ndarray): Values for the sum of z-coordinates (z1 + z2) considered for the lookup
dz_array (ndarray): Values for the difference of z-coordinates (z1 - z2) considered for the lookup
"""
sys.stdout.write('Prepare 3D particle coupling lookup:\n')
sys.stdout.flush()
if resolution is None:
resolution = vacuum_wavelength / 100
sys.stdout.write('Setting lookup resolution to %f\n'%resolution)
sys.stdout.flush()
l_max = max([particle.l_max for particle in particle_list])
m_max = max([particle.m_max for particle in particle_list])
blocksize = fldex.blocksize(l_max, m_max)
particle_x_array = np.array([particle.position[0] for particle in particle_list])
particle_y_array = np.array([particle.position[1] for particle in particle_list])
particle_z_array = np.array([particle.position[2] for particle in particle_list])
particle_rho_array = np.sqrt((particle_x_array[:, None] - particle_x_array[None, :]) ** 2
+ (particle_y_array[:, None] - particle_y_array[None, :]) ** 2)
dz_min = particle_z_array.min() - particle_z_array.max()
dz_max = particle_z_array.max() - particle_z_array.min()
sz_min = 2 * particle_z_array.min()
sz_max = 2 * particle_z_array.max()
rho_array = np.arange(- 3 * resolution, particle_rho_array.max() + 3 * resolution, resolution)
sz_array = np.arange(sz_min - 3 * resolution, sz_max + 3 * resolution, resolution)
dz_array = np.arange(dz_min - 3 * resolution, dz_max + 3 * resolution, resolution)
len_rho = len(rho_array)
len_sz = len(sz_array)
len_dz = len(dz_array)
assert len_sz == len_dz
i_s = layer_system.layer_number(particle_list[0].position[2])
k_is = layer_system.wavenumber(i_s, vacuum_wavelength)
z_is = layer_system.reference_z(i_s)
# direct -----------------------------------------------------------------------------------------------------------
w = np.zeros((len_rho, len_dz, blocksize, blocksize), dtype=np.complex64)
sys.stdout.write('Lookup table memory footprint: ' + size_format(2 * w.nbytes) + '\n')
sys.stdout.flush()
r_array = np.sqrt(dz_array[None, :]**2 + rho_array[:, None]**2)
r_array[r_array==0] = 1e-20
ct = dz_array[None, :] / r_array
st = rho_array[:, None] / r_array
legendre, _, _ = sf.legendre_normalized(ct, st, 2 * l_max)
bessel_h = []
for dm in tqdm(range(2 * l_max + 1), desc='Spherical Hankel lookup ', file=sys.stdout,
bar_format='{l_bar}{bar}| elapsed: {elapsed} remaining: {remaining}'):
bessel_h.append(sf.spherical_hankel(dm, k_is * r_array))
pbar = tqdm(total=blocksize**2,
desc='Direct coupling ',
file=sys.stdout,
bar_format='{l_bar}{bar}| elapsed: {elapsed} remaining: {remaining}')
for m1 in range(-m_max, m_max+1):
for m2 in range(-m_max, m_max+1):
for l1 in range(max(1, abs(m1)), l_max + 1):
for l2 in range(max(1, abs(m2)), l_max + 1):
A = np.zeros((len_rho, len_dz), dtype=complex)
B = np.zeros((len_rho, len_dz), dtype=complex)
for ld in range(max(abs(l1 - l2), abs(m1 - m2)), l1 + l2 + 1): # if ld<abs(m1-m2) then P=0
a5, b5 = vwf.ab5_coefficients(l2, m2, l1, m1, ld) # remember that w = A.T
A += a5 * bessel_h[ld] * legendre[ld][abs(m1 - m2)] # remember that w = A.T
B += b5 * bessel_h[ld] * legendre[ld][abs(m1 - m2)] # remember that w = A.T
for tau1 in range(2):
n1 = fldex.multi_to_single_index(tau1, l1, m1, l_max, m_max)
for tau2 in range(2):
n2 = fldex.multi_to_single_index(tau2, l2, m2, l_max, m_max)
if tau1 == tau2:
w[:, :, n1, n2] = A
else:
w[:, :, n1, n2] = B
pbar.update()
pbar.close()
# switch off direct coupling contribution near rho=0:
w[rho_array < particle_rho_array[~np.eye(particle_rho_array.shape[0],dtype=bool)].min() / 2, :, :, :] = 0
# layer mediated ---------------------------------------------------------------------------------------------------
sys.stdout.write('Layer mediated coupling : ...')
sys.stdout.flush()
if type(k_parallel) == str and k_parallel == 'default':
k_parallel = coord.default_k_parallel
kz_is = coord.k_z(k_parallel=k_parallel, k=k_is)
len_kp = len(k_parallel)
# phase factors
epljksz = np.exp(1j * kz_is[None, :] * (sz_array[:, None] - 2 * z_is)) # z, k
emnjksz = np.exp(- 1j * kz_is[None, :] * (sz_array[:, None] - 2 * z_is))
epljkdz = np.exp(1j * kz_is[None, :] * dz_array[:, None])
emnjkdz = np.exp(- 1j * kz_is[None, :] * dz_array[:, None])
# layer response
L = np.zeros((2, 2, 2, len_kp), dtype=complex) # pol, pl/mn1, pl/mn2, kp
for pol in range(2):
L[pol, :, :, :] = lay.layersystem_response_matrix(pol, layer_system.thicknesses,
layer_system.refractive_indices, k_parallel,
coord.angular_frequency(vacuum_wavelength), i_s, i_s)
# transformation coefficients
B_dag = np.zeros((2, 2, blocksize, len_kp), dtype=complex) # pol, pl/mn, n, kp
B = np.zeros((2, 2, blocksize, len_kp), dtype=complex) # pol, pl/mn, n, kp
ct_k = kz_is / k_is
st_k = k_parallel / k_is
_, pilm_pl, taulm_pl = sf.legendre_normalized(ct_k, st_k, l_max)
_, pilm_mn, taulm_mn = sf.legendre_normalized(-ct_k, st_k, l_max)
m_list = [None for i in range(blocksize)]
for tau in range(2):
for m in range(-m_max, m_max + 1):
for l in range(max(1, abs(m)), l_max + 1):
n = fldex.multi_to_single_index(tau, l, m, l_max, m_max)
m_list[n] = m
for pol in range(2):
B_dag[pol, 0, n, :] = vwf.transformation_coefficients_vwf(tau, l, m, pol, pilm_list=pilm_pl,
taulm_list=taulm_pl, dagger=True)
B_dag[pol, 1, n, :] = vwf.transformation_coefficients_vwf(tau, l, m, pol, pilm_list=pilm_mn,
taulm_list=taulm_mn, dagger=True)
B[pol, 0, n, :] = vwf.transformation_coefficients_vwf(tau, l, m, pol, pilm_list=pilm_pl,
taulm_list=taulm_pl, dagger=False)
B[pol, 1, n, :] = vwf.transformation_coefficients_vwf(tau, l, m, pol, pilm_list=pilm_mn,
taulm_list=taulm_mn, dagger=False)
# pairs of (n1, n2), listed by abs(m1-m2)
n1n2_combinations = [[] for dm in range(2*m_max+1)]
for n1 in range(blocksize):
m1 = m_list[n1]
for n2 in range(blocksize):
m2 = m_list[n2]
n1n2_combinations[abs(m1-m2)].append((n1,n2))
wr_pl = np.zeros((len_rho, len_dz, blocksize, blocksize), dtype=np.complex64)
wr_mn = np.zeros((len_rho, len_dz, blocksize, blocksize), dtype=np.complex64)
dkp = np.diff(k_parallel)
if cu.use_gpu:
re_dkp_d = gpuarray.to_gpu(np.float32(dkp.real))
im_dkp_d = gpuarray.to_gpu(np.float32(dkp.imag))
kernel_source_code = cu.volume_lookup_assembly_code %(blocksize, len_rho, len_sz, len_kp)
helper_function = SourceModule(kernel_source_code).get_function("helper")
cuda_blocksize = 128
cuda_gridsize = (len_rho * len_sz + cuda_blocksize - 1) // cuda_blocksize
re_dwr_d = gpuarray.to_gpu(np.zeros((len_rho, len_sz), dtype=np.float32))
im_dwr_d = gpuarray.to_gpu( | np.zeros((len_rho, len_sz), dtype=np.float32) | numpy.zeros |
# -*- coding: utf-8 -*-
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD (3-clause)
from collections import Counter
from functools import partial
from math import factorial
from os import path as op
import numpy as np
from scipy import linalg
from .. import __version__
from ..annotations import _annotations_starts_stops
from ..bem import _check_origin
from ..transforms import (_str_to_frame, _get_trans, Transform, apply_trans,
_find_vector_rotation, _cart_to_sph, _get_n_moments,
_sph_to_cart_partials, _deg_ord_idx, _average_quats,
_sh_complex_to_real, _sh_real_to_complex, _sh_negate,
quat_to_rot, rot_to_quat)
from ..forward import _concatenate_coils, _prep_meg_channels, _create_meg_coils
from ..surface import _normalize_vectors
from ..io.constants import FIFF, FWD
from ..io.meas_info import _simplify_info
from ..io.proc_history import _read_ctc
from ..io.write import _generate_meas_id, DATE_NONE
from ..io import _loc_to_coil_trans, _coil_trans_to_loc, BaseRaw, RawArray
from ..io.pick import pick_types, pick_info
from ..utils import (verbose, logger, _clean_names, warn, _time_mask, _pl,
_check_option, _ensure_int, _validate_type)
from ..fixes import _get_args, _safe_svd, einsum, bincount
from ..channels.channels import _get_T1T2_mag_inds
# Note: MF uses single precision and some algorithms might use
# truncated versions of constants (e.g., μ0), which could lead to small
# differences between algorithms
# Changes to arguments here should also be made in find_bad_channels_maxwell
@verbose
def maxwell_filter(raw, origin='auto', int_order=8, ext_order=3,
calibration=None, cross_talk=None, st_duration=None,
st_correlation=0.98, coord_frame='head', destination=None,
regularize='in', ignore_ref=False, bad_condition='error',
head_pos=None, st_fixed=True, st_only=False, mag_scale=100.,
skip_by_annotation=('edge', 'bad_acq_skip'), verbose=None):
"""Maxwell filter data using multipole moments.
Parameters
----------
raw : instance of mne.io.Raw
Data to be filtered.
.. warning:: It is critical to mark bad channels in
``raw.info['bads']`` prior to processing in order to
prevent artifact spreading. Manual inspection and use
of :func:`~find_bad_channels_maxwell` is recommended.
%(maxwell_origin_int_ext_calibration_cross)s
st_duration : float | None
If not None, apply spatiotemporal SSS with specified buffer duration
(in seconds). MaxFilter™'s default is 10.0 seconds in v2.2.
Spatiotemporal SSS acts as implicitly as a high-pass filter where the
cut-off frequency is 1/st_duration Hz. For this (and other) reasons,
longer buffers are generally better as long as your system can handle
the higher memory usage. To ensure that each window is processed
identically, choose a buffer length that divides evenly into your data.
Any data at the trailing edge that doesn't fit evenly into a whole
buffer window will be lumped into the previous buffer.
st_correlation : float
Correlation limit between inner and outer subspaces used to reject
ovwrlapping intersecting inner/outer signals during spatiotemporal SSS.
%(maxwell_coord)s
destination : str | array-like, shape (3,) | None
The destination location for the head. Can be ``None``, which
will not change the head position, or a string path to a FIF file
containing a MEG device<->head transformation, or a 3-element array
giving the coordinates to translate to (with no rotations).
For example, ``destination=(0, 0, 0.04)`` would translate the bases
as ``--trans default`` would in MaxFilter™ (i.e., to the default
head location).
%(maxwell_reg_ref_cond_pos)s
.. versionadded:: 0.12
%(maxwell_st_fixed_only)s
%(maxwell_mag)s
.. versionadded:: 0.13
%(maxwell_skip)s
.. versionadded:: 0.17
%(verbose)s
Returns
-------
raw_sss : instance of mne.io.Raw
The raw data with Maxwell filtering applied.
See Also
--------
mne.preprocessing.mark_flat
mne.preprocessing.find_bad_channels_maxwell
mne.chpi.filter_chpi
mne.chpi.read_head_pos
mne.epochs.average_movements
Notes
-----
.. versionadded:: 0.11
Some of this code was adapted and relicensed (with BSD form) with
permission from <NAME>. These algorithms are based on work
from [1]_ and [2]_. It will likely use multiple CPU cores, see the
:ref:`FAQ <faq_cpu>` for more information.
.. warning:: Maxwell filtering in MNE is not designed or certified
for clinical use.
Compared to the MEGIN MaxFilter™ software, the MNE Maxwell filtering
routines currently provide the following features:
.. table::
:widths: auto
+-----------------------------------------------------------------------------+-----+-----------+
| Feature | MNE | MaxFilter |
+=============================================================================+=====+===========+
| Maxwell filtering software shielding | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Bad channel reconstruction | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Cross-talk cancellation | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Fine calibration correction (1D) | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Fine calibration correction (3D) | ✓ | |
+-----------------------------------------------------------------------------+-----+-----------+
| Spatio-temporal SSS (tSSS) | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Coordinate frame translation | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Regularization using information theory | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Movement compensation (raw) | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Movement compensation (:func:`epochs <mne.epochs.average_movements>`) | ✓ | |
+-----------------------------------------------------------------------------+-----+-----------+
| :func:`cHPI subtraction <mne.chpi.filter_chpi>` | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Double floating point precision | ✓ | |
+-----------------------------------------------------------------------------+-----+-----------+
| Seamless processing of split (``-1.fif``) and concatenated files | ✓ | |
+-----------------------------------------------------------------------------+-----+-----------+
| Automatic bad channel detection (:func:`~find_bad_channels_maxwell`) | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Head position estimation (:func:`~mne.chpi.compute_head_pos`) | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Certified for clinical use | | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
Epoch-based movement compensation is described in [1]_.
Use of Maxwell filtering routines with non-Neuromag systems is currently
**experimental**. Worse results for non-Neuromag systems are expected due
to (at least):
* Missing fine-calibration and cross-talk cancellation data for
other systems.
* Processing with reference sensors has not been vetted.
* Regularization of components may not work well for all systems.
* Coil integration has not been optimized using Abramowitz/Stegun
definitions.
.. note:: Various Maxwell filtering algorithm components are covered by
patents owned by MEGIN. These patents include, but may not be
limited to:
- US2006031038 (Signal Space Separation)
- US6876196 (Head position determination)
- WO2005067789 (DC fields)
- WO2005078467 (MaxShield)
- WO2006114473 (Temporal Signal Space Separation)
These patents likely preclude the use of Maxwell filtering code
in commercial applications. Consult a lawyer if necessary.
Currently, in order to perform Maxwell filtering, the raw data must not
have any projectors applied. During Maxwell filtering, the spatial
structure of the data is modified, so projectors are discarded (unless
in ``st_only=True`` mode).
References
----------
.. [1] <NAME>. and <NAME>. "Presentation of electromagnetic
multichannel data: The signal space separation method,"
Journal of Applied Physics, vol. 97, pp. 124905 1-10, 2005.
https://doi.org/10.1063/1.1935742
.. [2] <NAME>. and <NAME>. "Spatiotemporal signal space separation
method for rejecting nearby interference in MEG measurements,"
Physics in Medicine and Biology, vol. 51, pp. 1759-1768, 2006.
https://doi.org/10.1088/0031-9155/51/7/008
""" # noqa: E501
logger.info('Maxwell filtering raw data')
params = _prep_maxwell_filter(
raw=raw, origin=origin, int_order=int_order, ext_order=ext_order,
calibration=calibration, cross_talk=cross_talk,
st_duration=st_duration, st_correlation=st_correlation,
coord_frame=coord_frame, destination=destination,
regularize=regularize, ignore_ref=ignore_ref,
bad_condition=bad_condition, head_pos=head_pos, st_fixed=st_fixed,
st_only=st_only, mag_scale=mag_scale,
skip_by_annotation=skip_by_annotation)
raw_sss = _run_maxwell_filter(raw, **params)
# Update info
_update_sss_info(raw_sss, **params['update_kwargs'])
logger.info('[done]')
return raw_sss
@verbose
def _prep_maxwell_filter(
raw, origin='auto', int_order=8, ext_order=3,
calibration=None, cross_talk=None, st_duration=None,
st_correlation=0.98, coord_frame='head', destination=None,
regularize='in', ignore_ref=False, bad_condition='error',
head_pos=None, st_fixed=True, st_only=False,
mag_scale=100.,
skip_by_annotation=('edge', 'bad_acq_skip'),
reconstruct='in', verbose=None):
# There are an absurd number of different possible notations for spherical
# coordinates, which confounds the notation for spherical harmonics. Here,
# we purposefully stay away from shorthand notation in both and use
# explicit terms (like 'azimuth' and 'polar') to avoid confusion.
# See mathworld.wolfram.com/SphericalHarmonic.html for more discussion.
# Our code follows the same standard that ``scipy`` uses for ``sph_harm``.
# triage inputs ASAP to avoid late-thrown errors
_validate_type(raw, BaseRaw, 'raw')
_check_usable(raw)
_check_regularize(regularize)
st_correlation = float(st_correlation)
if st_correlation <= 0. or st_correlation > 1.:
raise ValueError('Need 0 < st_correlation <= 1., got %s'
% st_correlation)
_check_option('coord_frame', coord_frame, ['head', 'meg'])
head_frame = True if coord_frame == 'head' else False
recon_trans = _check_destination(destination, raw.info, head_frame)
if st_duration is not None:
st_duration = float(st_duration)
st_correlation = float(st_correlation)
st_duration = int(round(st_duration * raw.info['sfreq']))
if not 0. < st_correlation <= 1:
raise ValueError('st_correlation must be between 0. and 1.')
_check_option('bad_condition', bad_condition,
['error', 'warning', 'ignore', 'info'])
if raw.info['dev_head_t'] is None and coord_frame == 'head':
raise RuntimeError('coord_frame cannot be "head" because '
'info["dev_head_t"] is None; if this is an '
'empty room recording, consider using '
'coord_frame="meg"')
if st_only and st_duration is None:
raise ValueError('st_duration must not be None if st_only is True')
head_pos = _check_pos(head_pos, head_frame, raw, st_fixed,
raw.info['sfreq'])
_check_info(raw.info, sss=not st_only, tsss=st_duration is not None,
calibration=not st_only and calibration is not None,
ctc=not st_only and cross_talk is not None)
# Now we can actually get moving
info = raw.info.copy()
meg_picks, mag_picks, grad_picks, good_mask, mag_or_fine = \
_get_mf_picks(info, int_order, ext_order, ignore_ref)
# Magnetometers are scaled to improve numerical stability
coil_scale, mag_scale = _get_coil_scale(
meg_picks, mag_picks, grad_picks, mag_scale, info)
#
# Fine calibration processing (load fine cal and overwrite sensor geometry)
#
sss_cal = dict()
if calibration is not None:
calibration, sss_cal = _update_sensor_geometry(
info, calibration, ignore_ref)
mag_or_fine.fill(True) # all channels now have some mag-type data
# Determine/check the origin of the expansion
origin = _check_origin(origin, info, coord_frame, disp=True)
# Convert to the head frame
if coord_frame == 'meg' and info['dev_head_t'] is not None:
origin_head = apply_trans(info['dev_head_t'], origin)
else:
origin_head = origin
update_kwargs = dict(
origin=origin, coord_frame=coord_frame, sss_cal=sss_cal,
int_order=int_order, ext_order=ext_order)
del origin, coord_frame, sss_cal
origin_head.setflags(write=False)
#
# Cross-talk processing
#
sss_ctc = dict()
ctc = None
if cross_talk is not None:
sss_ctc = _read_ctc(cross_talk)
ctc_chs = sss_ctc['proj_items_chs']
meg_ch_names = [info['ch_names'][p] for p in meg_picks]
# checking for extra space ambiguity in channel names
# between old and new fif files
if meg_ch_names[0] not in ctc_chs:
ctc_chs = _clean_names(ctc_chs, remove_whitespace=True)
missing = sorted(list(set(meg_ch_names) - set(ctc_chs)))
if len(missing) != 0:
raise RuntimeError('Missing MEG channels in cross-talk matrix:\n%s'
% missing)
missing = sorted(list(set(ctc_chs) - set(meg_ch_names)))
if len(missing) > 0:
warn('Not all cross-talk channels in raw:\n%s' % missing)
ctc_picks = [ctc_chs.index(info['ch_names'][c]) for c in meg_picks]
ctc = sss_ctc['decoupler'][ctc_picks][:, ctc_picks]
# I have no idea why, but MF transposes this for storage..
sss_ctc['decoupler'] = sss_ctc['decoupler'].T.tocsc()
update_kwargs['sss_ctc'] = sss_ctc
del sss_ctc
#
# Translate to destination frame (always use non-fine-cal bases)
#
exp = dict(origin=origin_head, int_order=int_order, ext_order=0)
all_coils = _prep_mf_coils(info, ignore_ref)
S_recon = _trans_sss_basis(exp, all_coils, recon_trans, coil_scale)
exp['ext_order'] = ext_order
# Reconstruct data from internal space only (Eq. 38), and rescale S_recon
S_recon /= coil_scale
if recon_trans is not None:
# warn if we have translated too far
diff = 1000 * (info['dev_head_t']['trans'][:3, 3] -
recon_trans['trans'][:3, 3])
dist = np.sqrt(np.sum(_sq(diff)))
if dist > 25.:
warn('Head position change is over 25 mm (%s) = %0.1f mm'
% (', '.join('%0.1f' % x for x in diff), dist))
# Reconstruct raw file object with spatiotemporal processed data
max_st = dict()
if st_duration is not None:
if st_only:
job = FIFF.FIFFV_SSS_JOB_TPROJ
else:
job = FIFF.FIFFV_SSS_JOB_ST
max_st.update(job=job, subspcorr=st_correlation,
buflen=st_duration / info['sfreq'])
logger.info(' Processing data using tSSS with st_duration=%s'
% max_st['buflen'])
st_when = 'before' if st_fixed else 'after' # relative to movecomp
else:
# st_duration from here on will act like the chunk size
st_duration = min(max(int(round(10. * info['sfreq'])), 1),
len(raw.times))
st_correlation = None
st_when = 'never'
update_kwargs['max_st'] = max_st
del st_fixed, max_st
# Figure out which transforms we need for each tSSS block
# (and transform pos[1] to times)
head_pos[1] = raw.time_as_index(head_pos[1], use_rounding=True)
# Compute the first bit of pos_data for cHPI reporting
if info['dev_head_t'] is not None and head_pos[0] is not None:
this_pos_quat = np.concatenate([
rot_to_quat(info['dev_head_t']['trans'][:3, :3]),
info['dev_head_t']['trans'][:3, 3],
np.zeros(3)])
else:
this_pos_quat = None
_get_this_decomp_trans = partial(
_get_decomp, all_coils=all_coils,
cal=calibration, regularize=regularize,
exp=exp, ignore_ref=ignore_ref, coil_scale=coil_scale,
grad_picks=grad_picks, mag_picks=mag_picks, good_mask=good_mask,
mag_or_fine=mag_or_fine, bad_condition=bad_condition,
mag_scale=mag_scale)
update_kwargs.update(
nchan=good_mask.sum(), st_only=st_only, recon_trans=recon_trans)
params = dict(
skip_by_annotation=skip_by_annotation,
st_duration=st_duration, st_correlation=st_correlation,
st_only=st_only, st_when=st_when, ctc=ctc, coil_scale=coil_scale,
this_pos_quat=this_pos_quat, meg_picks=meg_picks,
good_mask=good_mask, grad_picks=grad_picks, head_pos=head_pos,
info=info, _get_this_decomp_trans=_get_this_decomp_trans,
S_recon=S_recon, update_kwargs=update_kwargs)
return params
def _run_maxwell_filter(
raw, skip_by_annotation, st_duration, st_correlation, st_only,
st_when, ctc, coil_scale, this_pos_quat, meg_picks, good_mask,
grad_picks, head_pos, info, _get_this_decomp_trans, S_recon,
update_kwargs,
reconstruct='in', count_msg=True, copy=True):
# Eventually find_bad_channels_maxwell could be sped up by moving this
# outside the loop (e.g., in the prep function) but regularization depends
# on which channels are being used, so easier just to include it here.
# The time it takes to recompute S and pS themselves is roughly on par
# with the np.dot with the data, so not a huge gain to be made there.
S_decomp, S_decomp_full, pS_decomp, reg_moments, n_use_in = \
_get_this_decomp_trans(info['dev_head_t'], t=0.)
update_kwargs.update(reg_moments=reg_moments.copy())
if ctc is not None:
ctc = ctc[good_mask][:, good_mask]
add_channels = (head_pos[0] is not None) and (not st_only) and copy
raw_sss, pos_picks = _copy_preload_add_channels(raw, add_channels, copy)
sfreq = info['sfreq']
del raw
if not st_only:
# remove MEG projectors, they won't apply now
_remove_meg_projs(raw_sss)
# Figure out which segments of data we can use
onsets, ends = _annotations_starts_stops(
raw_sss, skip_by_annotation, invert=True)
max_samps = (ends - onsets).max()
if not 0. < st_duration <= max_samps + 1.:
raise ValueError('st_duration (%0.1fs) must be between 0 and the '
'longest contiguous duration of the data '
'(%0.1fs).' % (st_duration / sfreq,
max_samps / sfreq))
# Generate time points to break up data into equal-length windows
starts, stops = list(), list()
for onset, end in zip(onsets, ends):
read_lims = np.arange(onset, end + 1, st_duration)
if len(read_lims) == 1:
read_lims = np.concatenate([read_lims, [end]])
if read_lims[-1] != end:
read_lims[-1] = end
# fold it into the previous buffer
n_last_buf = read_lims[-1] - read_lims[-2]
if st_correlation is not None and len(read_lims) > 2:
if n_last_buf >= st_duration:
logger.info(
' Spatiotemporal window did not fit evenly into'
'contiguous data segment. %0.2f seconds were lumped '
'into the previous window.'
% ((n_last_buf - st_duration) / sfreq,))
else:
logger.info(
' Contiguous data segment of duration %0.2f '
'seconds is too short to be processed with tSSS '
'using duration %0.2f'
% (n_last_buf / sfreq, st_duration / sfreq))
assert len(read_lims) >= 2
assert read_lims[0] == onset and read_lims[-1] == end
starts.extend(read_lims[:-1])
stops.extend(read_lims[1:])
del read_lims
st_duration = min(max_samps, st_duration)
# Loop through buffer windows of data
n_sig = int(np.floor(np.log10(max(len(starts), 0)))) + 1
if count_msg:
logger.info(
' Processing %s data chunk%s' % (len(starts), _pl(starts)))
for ii, (start, stop) in enumerate(zip(starts, stops)):
tsss_valid = (stop - start) >= st_duration
rel_times = raw_sss.times[start:stop]
t_str = '%8.3f - %8.3f sec' % tuple(rel_times[[0, -1]])
t_str += ('(#%d/%d)' % (ii + 1, len(starts))).rjust(2 * n_sig + 5)
# Get original data
orig_data = raw_sss._data[meg_picks[good_mask], start:stop]
# This could just be np.empty if not st_only, but shouldn't be slow
# this way so might as well just always take the original data
out_meg_data = raw_sss._data[meg_picks, start:stop]
# Apply cross-talk correction
if ctc is not None:
orig_data = ctc.dot(orig_data)
out_pos_data = np.empty((len(pos_picks), stop - start))
# Figure out which positions to use
t_s_s_q_a = _trans_starts_stops_quats(head_pos, start, stop,
this_pos_quat)
n_positions = len(t_s_s_q_a[0])
# Set up post-tSSS or do pre-tSSS
if st_correlation is not None:
# If doing tSSS before movecomp...
resid = orig_data.copy() # to be safe let's operate on a copy
if st_when == 'after':
orig_in_data = np.empty((len(meg_picks), stop - start))
else: # 'before'
avg_trans = t_s_s_q_a[-1]
if avg_trans is not None:
# if doing movecomp
S_decomp_st, _, pS_decomp_st, _, n_use_in_st = \
_get_this_decomp_trans(avg_trans, t=rel_times[0])
else:
S_decomp_st, pS_decomp_st = S_decomp, pS_decomp
n_use_in_st = n_use_in
orig_in_data = np.dot(np.dot(S_decomp_st[:, :n_use_in_st],
pS_decomp_st[:n_use_in_st]),
resid)
resid -= np.dot(np.dot(S_decomp_st[:, n_use_in_st:],
pS_decomp_st[n_use_in_st:]), resid)
resid -= orig_in_data
# Here we operate on our actual data
proc = out_meg_data if st_only else orig_data
_do_tSSS(proc, orig_in_data, resid, st_correlation,
n_positions, t_str, tsss_valid)
if not st_only or st_when == 'after':
# Do movement compensation on the data
for trans, rel_start, rel_stop, this_pos_quat in \
zip(*t_s_s_q_a[:4]):
# Recalculate bases if necessary (trans will be None iff the
# first position in this interval is the same as last of the
# previous interval)
if trans is not None:
S_decomp, S_decomp_full, pS_decomp, reg_moments, \
n_use_in = _get_this_decomp_trans(
trans, t=rel_times[rel_start])
# Determine multipole moments for this interval
mm_in = np.dot(pS_decomp[:n_use_in],
orig_data[:, rel_start:rel_stop])
# Our output data
if not st_only:
if reconstruct == 'in':
proj = S_recon.take(reg_moments[:n_use_in], axis=1)
mult = mm_in
else:
assert reconstruct == 'orig'
proj = S_decomp_full # already picked reg
mm_out = np.dot(pS_decomp[n_use_in:],
orig_data[:, rel_start:rel_stop])
mult = np.concatenate((mm_in, mm_out))
out_meg_data[:, rel_start:rel_stop] = \
np.dot(proj, mult)
if len(pos_picks) > 0:
out_pos_data[:, rel_start:rel_stop] = \
this_pos_quat[:, np.newaxis]
# Transform orig_data to store just the residual
if st_when == 'after':
# Reconstruct data using original location from external
# and internal spaces and compute residual
rel_resid_data = resid[:, rel_start:rel_stop]
orig_in_data[:, rel_start:rel_stop] = \
np.dot(S_decomp[:, :n_use_in], mm_in)
rel_resid_data -= np.dot(np.dot(S_decomp[:, n_use_in:],
pS_decomp[n_use_in:]),
rel_resid_data)
rel_resid_data -= orig_in_data[:, rel_start:rel_stop]
# If doing tSSS at the end
if st_when == 'after':
_do_tSSS(out_meg_data, orig_in_data, resid, st_correlation,
n_positions, t_str, tsss_valid)
elif st_when == 'never' and head_pos[0] is not None:
logger.info(' Used % 2d head position%s for %s'
% (n_positions, _pl(n_positions), t_str))
raw_sss._data[meg_picks, start:stop] = out_meg_data
raw_sss._data[pos_picks, start:stop] = out_pos_data
return raw_sss
def _get_coil_scale(meg_picks, mag_picks, grad_picks, mag_scale, info):
"""Get the magnetometer scale factor."""
if isinstance(mag_scale, str):
if mag_scale != 'auto':
raise ValueError('mag_scale must be a float or "auto", got "%s"'
% mag_scale)
if len(mag_picks) in (0, len(meg_picks)):
mag_scale = 100. # only one coil type, doesn't matter
logger.info(' Setting mag_scale=%0.2f because only one '
'coil type is present' % mag_scale)
else:
# Find our physical distance between gradiometer pickup loops
# ("base line")
coils = _create_meg_coils([info['chs'][pick]
for pick in meg_picks], 'accurate')
grad_base = {coils[pick]['base'] for pick in grad_picks}
if len(grad_base) != 1 or list(grad_base)[0] <= 0:
raise RuntimeError('Could not automatically determine '
'mag_scale, could not find one '
'proper gradiometer distance from: %s'
% list(grad_base))
grad_base = list(grad_base)[0]
mag_scale = 1. / grad_base
logger.info(' Setting mag_scale=%0.2f based on gradiometer '
'distance %0.2f mm' % (mag_scale, 1000 * grad_base))
mag_scale = float(mag_scale)
coil_scale = np.ones((len(meg_picks), 1))
coil_scale[mag_picks] = mag_scale
return coil_scale, mag_scale
def _remove_meg_projs(inst):
"""Remove inplace existing MEG projectors (assumes inactive)."""
meg_picks = pick_types(inst.info, meg=True, exclude=[])
meg_channels = [inst.ch_names[pi] for pi in meg_picks]
non_meg_proj = list()
for proj in inst.info['projs']:
if not any(c in meg_channels for c in proj['data']['col_names']):
non_meg_proj.append(proj)
inst.add_proj(non_meg_proj, remove_existing=True, verbose=False)
def _check_destination(destination, info, head_frame):
"""Triage our reconstruction trans."""
if destination is None:
return info['dev_head_t']
if not head_frame:
raise RuntimeError('destination can only be set if using the '
'head coordinate frame')
if isinstance(destination, str):
recon_trans = _get_trans(destination, 'meg', 'head')[0]
elif isinstance(destination, Transform):
recon_trans = destination
else:
destination = np.array(destination, float)
if destination.shape != (3,):
raise ValueError('destination must be a 3-element vector, '
'str, or None')
recon_trans = np.eye(4)
recon_trans[:3, 3] = destination
recon_trans = Transform('meg', 'head', recon_trans)
if recon_trans.to_str != 'head' or recon_trans.from_str != 'MEG device':
raise RuntimeError('Destination transform is not MEG device -> head, '
'got %s -> %s' % (recon_trans.from_str,
recon_trans.to_str))
return recon_trans
@verbose
def _prep_mf_coils(info, ignore_ref=True, verbose=None):
"""Get all coil integration information loaded and sorted."""
coils, comp_coils = _prep_meg_channels(
info, accurate=True, head_frame=False,
ignore_ref=ignore_ref, do_picking=False, verbose=False)[:2]
mag_mask = _get_mag_mask(coils)
if len(comp_coils) > 0:
meg_picks = pick_types(info, meg=True, ref_meg=False, exclude=[])
ref_picks = pick_types(info, meg=False, ref_meg=True, exclude=[])
inserts = np.searchsorted(meg_picks, ref_picks)
# len(inserts) == len(comp_coils)
for idx, comp_coil in zip(inserts[::-1], comp_coils[::-1]):
coils.insert(idx, comp_coil)
# Now we have:
# [c['chname'] for c in coils] ==
# [info['ch_names'][ii]
# for ii in pick_types(info, meg=True, ref_meg=True)]
# Now coils is a sorted list of coils. Time to do some vectorization.
n_coils = len(coils)
rmags = np.concatenate([coil['rmag'] for coil in coils])
cosmags = np.concatenate([coil['cosmag'] for coil in coils])
ws = np.concatenate([coil['w'] for coil in coils])
cosmags *= ws[:, np.newaxis]
del ws
n_int = np.array([len(coil['rmag']) for coil in coils])
bins = np.repeat(np.arange(len(n_int)), n_int)
bd = np.concatenate(([0], np.cumsum(n_int)))
slice_map = {ii: slice(start, stop)
for ii, (start, stop) in enumerate(zip(bd[:-1], bd[1:]))}
return rmags, cosmags, bins, n_coils, mag_mask, slice_map
def _trans_starts_stops_quats(pos, start, stop, this_pos_data):
"""Get all trans and limits we need."""
pos_idx = np.arange(*np.searchsorted(pos[1], [start, stop]))
used = np.zeros(stop - start, bool)
trans = list()
rel_starts = list()
rel_stops = list()
quats = list()
weights = list()
for ti in range(-1, len(pos_idx)):
# first iteration for this block of data
if ti < 0:
rel_start = 0
rel_stop = pos[1][pos_idx[0]] if len(pos_idx) > 0 else stop
rel_stop = rel_stop - start
if rel_start == rel_stop:
continue # our first pos occurs on first time sample
# Don't calculate S_decomp here, use the last one
trans.append(None) # meaning: use previous
quats.append(this_pos_data)
else:
rel_start = pos[1][pos_idx[ti]] - start
if ti == len(pos_idx) - 1:
rel_stop = stop - start
else:
rel_stop = pos[1][pos_idx[ti + 1]] - start
trans.append(pos[0][pos_idx[ti]])
quats.append(pos[2][pos_idx[ti]])
assert 0 <= rel_start
assert rel_start < rel_stop
assert rel_stop <= stop - start
assert not used[rel_start:rel_stop].any()
used[rel_start:rel_stop] = True
rel_starts.append(rel_start)
rel_stops.append(rel_stop)
weights.append(rel_stop - rel_start)
assert used.all()
# Use weighted average for average trans over the window
if this_pos_data is None:
avg_trans = None
else:
weights = np.array(weights)
quats = np.array(quats)
weights = weights / weights.sum().astype(float) # int -> float
avg_quat = _average_quats(quats[:, :3], weights)
avg_t = np.dot(weights, quats[:, 3:6])
avg_trans = np.vstack([
np.hstack([quat_to_rot(avg_quat), avg_t[:, np.newaxis]]),
[[0., 0., 0., 1.]]])
return trans, rel_starts, rel_stops, quats, avg_trans
def _do_tSSS(clean_data, orig_in_data, resid, st_correlation,
n_positions, t_str, tsss_valid):
"""Compute and apply SSP-like projection vectors based on min corr."""
if not tsss_valid:
t_proj = np.empty((clean_data.shape[1], 0))
else:
np.asarray_chkfinite(resid)
t_proj = _overlap_projector(orig_in_data, resid, st_correlation)
# Apply projector according to Eq. 12 in [2]_
msg = (' Projecting %2d intersecting tSSS component%s '
'for %s' % (t_proj.shape[1], _pl(t_proj.shape[1], ' '), t_str))
if n_positions > 1:
msg += ' (across %2d position%s)' % (n_positions,
_pl(n_positions, ' '))
logger.info(msg)
clean_data -= np.dot(np.dot(clean_data, t_proj), t_proj.T)
def _copy_preload_add_channels(raw, add_channels, copy):
"""Load data for processing and (maybe) add cHPI pos channels."""
if copy:
raw = raw.copy()
if add_channels:
kinds = [FIFF.FIFFV_QUAT_1, FIFF.FIFFV_QUAT_2, FIFF.FIFFV_QUAT_3,
FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5, FIFF.FIFFV_QUAT_6,
FIFF.FIFFV_HPI_G, FIFF.FIFFV_HPI_ERR, FIFF.FIFFV_HPI_MOV]
out_shape = (len(raw.ch_names) + len(kinds), len(raw.times))
out_data = np.zeros(out_shape, np.float64)
msg = ' Appending head position result channels and '
if raw.preload:
logger.info(msg + 'copying original raw data')
out_data[:len(raw.ch_names)] = raw._data
raw._data = out_data
else:
logger.info(msg + 'loading raw data from disk')
raw._preload_data(out_data[:len(raw.ch_names)], verbose=False)
raw._data = out_data
assert raw.preload is True
off = len(raw.ch_names)
chpi_chs = [
dict(ch_name='CHPI%03d' % (ii + 1), logno=ii + 1,
scanno=off + ii + 1, unit_mul=-1, range=1., unit=-1,
kind=kinds[ii], coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
cal=1e-4, coil_type=FWD.COIL_UNKNOWN, loc=np.zeros(12))
for ii in range(len(kinds))]
raw.info['chs'].extend(chpi_chs)
raw.info._update_redundant()
raw.info._check_consistency()
assert raw._data.shape == (raw.info['nchan'], len(raw.times))
# Return the pos picks
pos_picks = np.arange(len(raw.ch_names) - len(chpi_chs),
len(raw.ch_names))
return raw, pos_picks
else:
if copy:
if not raw.preload:
logger.info(' Loading raw data from disk')
raw.load_data(verbose=False)
else:
logger.info(' Using loaded raw data')
return raw, np.array([], int)
def _check_pos(pos, head_frame, raw, st_fixed, sfreq):
"""Check for a valid pos array and transform it to a more usable form."""
_validate_type(pos, (np.ndarray, None), 'head_pos')
if pos is None:
return [None, np.array([-1])]
if not head_frame:
raise ValueError('positions can only be used if coord_frame="head"')
if not st_fixed:
warn('st_fixed=False is untested, use with caution!')
if not isinstance(pos, np.ndarray):
raise TypeError('pos must be an ndarray')
if pos.ndim != 2 or pos.shape[1] != 10:
raise ValueError('pos must be an array of shape (N, 10)')
t = pos[:, 0]
if not np.array_equal(t, np.unique(t)):
raise ValueError('Time points must unique and in ascending order')
# We need an extra 1e-3 (1 ms) here because MaxFilter outputs values
# only out to 3 decimal places
if not _time_mask(t, tmin=raw._first_time - 1e-3, tmax=None,
sfreq=sfreq).all():
raise ValueError('Head position time points must be greater than '
'first sample offset, but found %0.4f < %0.4f'
% (t[0], raw._first_time))
max_dist = np.sqrt(np.sum(pos[:, 4:7] ** 2, axis=1)).max()
if max_dist > 1.:
warn('Found a distance greater than 1 m (%0.3g m) from the device '
'origin, positions may be invalid and Maxwell filtering could '
'fail' % (max_dist,))
dev_head_ts = np.zeros((len(t), 4, 4))
dev_head_ts[:, 3, 3] = 1.
dev_head_ts[:, :3, 3] = pos[:, 4:7]
dev_head_ts[:, :3, :3] = quat_to_rot(pos[:, 1:4])
pos = [dev_head_ts, t - raw._first_time, pos[:, 1:]]
return pos
def _get_decomp(trans, all_coils, cal, regularize, exp, ignore_ref,
coil_scale, grad_picks, mag_picks, good_mask, mag_or_fine,
bad_condition, t, mag_scale):
"""Get a decomposition matrix and pseudoinverse matrices."""
#
# Fine calibration processing (point-like magnetometers and calib. coeffs)
#
S_decomp_full = _get_s_decomp(
exp, all_coils, trans, coil_scale, cal, ignore_ref, grad_picks,
mag_picks, mag_scale)
S_decomp = S_decomp_full[good_mask]
#
# Regularization
#
S_decomp, pS_decomp, sing, reg_moments, n_use_in = _regularize(
regularize, exp, S_decomp, mag_or_fine, t=t)
S_decomp_full = S_decomp_full.take(reg_moments, axis=1)
# Pseudo-inverse of total multipolar moment basis set (Part of Eq. 37)
cond = sing[0] / sing[-1]
if bad_condition != 'ignore' and cond >= 1000.:
msg = 'Matrix is badly conditioned: %0.0f >= 1000' % cond
if bad_condition == 'error':
raise RuntimeError(msg)
elif bad_condition == 'warning':
warn(msg)
else: # condition == 'info'
logger.info(msg)
# Build in our data scaling here
pS_decomp *= coil_scale[good_mask].T
S_decomp /= coil_scale[good_mask]
S_decomp_full /= coil_scale
return S_decomp, S_decomp_full, pS_decomp, reg_moments, n_use_in
def _get_s_decomp(exp, all_coils, trans, coil_scale, cal, ignore_ref,
grad_picks, mag_picks, mag_scale):
"""Get S_decomp."""
S_decomp = _trans_sss_basis(exp, all_coils, trans, coil_scale)
if cal is not None:
# Compute point-like mags to incorporate gradiometer imbalance
grad_cals = _sss_basis_point(exp, trans, cal, ignore_ref, mag_scale)
# Add point like magnetometer data to bases.
S_decomp[grad_picks, :] += grad_cals
# Scale magnetometers by calibration coefficient
S_decomp[mag_picks, :] /= cal['mag_cals']
# We need to be careful about KIT gradiometers
return S_decomp
@verbose
def _regularize(regularize, exp, S_decomp, mag_or_fine, t, verbose=None):
"""Regularize a decomposition matrix."""
# ALWAYS regularize the out components according to norm, since
# gradiometer-only setups (e.g., KIT) can have zero first-order
# (homogeneous field) components
int_order, ext_order = exp['int_order'], exp['ext_order']
n_in, n_out = _get_n_moments([int_order, ext_order])
t_str = '%8.3f' % t
if regularize is not None: # regularize='in'
in_removes, out_removes = _regularize_in(
int_order, ext_order, S_decomp, mag_or_fine)
else:
in_removes = []
out_removes = _regularize_out(int_order, ext_order, mag_or_fine)
reg_in_moments = np.setdiff1d(np.arange(n_in), in_removes)
reg_out_moments = np.setdiff1d(np.arange(n_in, n_in + n_out),
out_removes)
n_use_in = len(reg_in_moments)
n_use_out = len(reg_out_moments)
reg_moments = | np.concatenate((reg_in_moments, reg_out_moments)) | numpy.concatenate |
"""
Created on Mon Nov 05 03:52:36 2018
@author: Paul
"""
### Boiler-Plate ###
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import scipy as sp
from numpy import random
import time
import csv
from Class1_Eq import *
from Func import *
""" Change this value when changed in restart .i files """
global t_final
t_final = 10000 # seconds
global ss_fail_penalty
ss_fail_penalty = 700
global cost_multiplier_for_nucl_safety_grade
cost_multiplier_for_nucl_safety_grade = 5.0
###########################################################################
""""""""" Tri-System Option Class """"""""" ###########################
###########################################################################
class Option:
"""
Inputs:
x1 = Zion core loop x-optimization parameters
x2 = PERCS loop x-optimization parameters
x3 = PCS superstructure x-optimization parameters
y = PCS superstructure y-optimization parameters
Parameters:
*Individual optimization parameters (explained in __init__() function)
Core Loop:
cards = Array of RELAP5 card numbers with core loop value changes
i_vals = Array of column numbers for core loop value changes
vals = Array of new values for core loop value changes
T_fuel_cent_max = Maximum fuel centerline temperature (constraint)
T_clad_surf_max = Maximum cladding surface temperature (constraint)
MDNBR = Minimum departure from nucleate boiling ratio (constraint)
T_f_over_max = [Boolean] Did fuel temperature go over the max?
T_clad_surf_max = [Boolean] Did cladding temperature go over the max?
MDNBR_below_1 = [Boolean] Did MDNBR go below 1.0?
peanlized = [Boolean] Did the core loop receive a penalty?
failed = [Boolean] Did the RELAP5 core loop model fail early?
csvFileLocation = [String] Core's PyPost results file location
*Parameters for T, P, m_dot, H, & x_e core data from PyPost
k_eff = Effective multiplication factor per neutron cycle in core
rho_0 = Initial reactivity of the core
Bc = Cycle burn-up of the fuel [EFPD = effective full-power days]
nBc = Discharge burn-up of the fuel
cost_RCPs = Capital cost of RCPs
op_cost_RCPs = Operating cost of RCPs (40 yrs)
cost_total_fuel = Cost of UO2 fuel (40 yrs)
PERCS Loop:
list_card = Array of RELAP5 card numbers with PERCS value changes
list_i_change = Array of column numbers for PERCS value changes
list_change = Array of new values for PERCS value changes
len_diff_717 = Parameter used to calculate length of Pipe 717
n_tubes = Number of tubes w/in PERCS tank
m_MgCO3 = Mass of Magnesium Carbonate w/in PERCS tank
T_over_620 = [Boolean] Did the core outlet T go above 620K?
T_over_635 = [Boolean] Did the core outlet T go above 635K?
csvFileLocation2 = [String] PERCS's PyPost results file location
*Parameters for T & alpha PERCS data from PyPost
PERCS_failed = [Boolean] Did the PERCS RELAP5 model fail early?
PERCS_penalty = [Boolean] Did the PERCS receive a penalty?
cost_penalty = Multaplicative cost penalty if 'PERCS_failed' = TRUE
ss_fail = [Boolean] Redundant of Core's 'failed'
p716, p717 = Pipes 716 & 717 (for cost purposes)
support = Support structure for PERCS tank (for cost purposes)
hx = Fake heat exchanger (for cost purposes)
tank = PERCS tank (for cost purposes)
chemical = MgCO3 in tank (for cost purposes)
PCS Loop:
pinch_point = [Boolean]
s = Array of Stream instances for all 37 PCS superstructure streams
phx = PHX instance representing the Steam Generator
t1a, t1b, t1c, t2a, t2b = Turbines representing the diff. stages
t1, t2 = Actual turbines (for cost purposes)
t3, t4, t5 = Turbine instances for LPTs
ms1, ms2 = Moisture separator instances
rh1, rh2 = Reheater heat exchanger instances
cond = Condenser instance
fwh1, fwh2, fwh3, fwh4 = Feedwater heater instances
p1, p2, p3, p4, p5, p6 = Pump instances
Objective Functions:
W_rcp = Core Obj. 1 - Total work of RCPs
cost_1 = Core Obj. 2 - Total core loop costs
obj_1_1 = Normalized W_rcp
obj_1_2 = Normalized cost_1
fmm_1 = Maximin fitness value for core loop
cost_2 = PERCS Obj. 1 - Total PERCS equipment cost
dT_int = PERCS Obj. 2 - Integral of deviation of core outlet T
alpha = PERCS Obj. 3 - Consumption of MgCO3
obj_2_1 = Normalized cost_2
obj_2_2 = Normalized dT_int
obj_2_3 = Normalized alpha
fmm_2 = Maximin fitness value for PERCS loop
color = [String] PCS superstructure color/configuration
eff = PCS Obj. 1 - Thermodynamic Efficiency
cost_3 = PCS Obj. 2 - Total PCS equipment cost
obj_3_1 = Normalized eff
obj_3_2 = Normalized cost_3
fmm_3 = Maximin fitness value for PCS loop
obj_fmm_1 = Normalized fmm_1
obj_fmm_2 = Normalized fmm_2
obj_fmm_3 = Normalized fmm_3
fmm_o = Overall Maximin fitness value
Functions:
init_ZION_calcs() - Fills arrays to make core loop RELAP5 value changes
init_PERCS_calcs() - Fills arrays to make PERCS RELAP5 value changes
final_ZION_calcs() - Grabs PyPost data, Performs final core loop calcs
final_PERCS_calcs() - Grabs PyPost data, Performs final PERCS calcs
Alpha_calcs() - Grabs alpha PyPost data, Calcs overall Alpha
PCS_SS_calcs() - Calls solve_PCS(), Performs final PCS calcs
solve_PCS() - Fills out PCS superstructure & converges the cycle
"""
def __init__(self,x1_in,x2_in,x3_in,y_in):
self.opt_ID = 0
self.last_sec_penalty = False
# Define the x- and y-optimization parameter arrays
self.x1 = x1_in # ZION x-opt parameters
self.x2 = x2_in # PERCS x-opt parameters
self.x3 = x3_in # PCS x-opt parameters
self.y = y_in # PCS y-opt parameters
# Further define the ZION Core loop opt. parameters
self.R_f = self.x1[0] # ft (radius of fuel per pin)
self.H_fuel = self.x1[1] # ft (height of fuel pins)
self.Dh_00 = self.x1[2] # ft (hydraulic D of pipes _00)
self.Dh_12 = self.x1[3] # ft (hydraulic D of pipes _12)
self.Dh_14 = self.x1[4] # ft (hydraulic D of pipes _14)
# Further define the PERCS loop opt. parameters
self.R_tank = self.x2[0] # ft (radius of PERCS HX tank)
self.pitch = self.x2[1] # ft (pitch b/t tubes in PERCS)
self.D_h = self.x2[2] # ft (hydraulic D of tubes)
self.th = self.x2[3] # ft (thickness of tubes)
self.Len = self.x2[4] # ft (length of tubes / height of tank)
self.elev = self.x2[5] # ft (height diff. b/t core outlet & PERCS inlet)
# Further define the PCS superstructure x-opt. parameters
self.To_PHX = self.x3[0] # degC
self.Po_t1a = self.x3[1] # bar
self.mf_t1a = self.x3[2]
self.Po_t1b = self.x3[3] # bar
self.mf_t1b = self.x3[4]
self.Po_t1c = self.x3[5] # bar
self.Po_t2a = self.x3[6] # bar
self.mf_t2a = self.x3[7]
self.Po_t2b = self.x3[8] # bar
# Further define the PCS superstructure y-opt. parameters
self.y_ipt = self.y[0] # IPT
self.y_rh1 = self.y[1] # RH 1
self.y_rh2 = self.y[2] # RH 2
self.y_s14 = self.y[3] # s[14]
self.y_s4 = self.y[4] # s[4]
self.y_s5 = self.y[5] # s[5]
################################
""" Init stuff for ZION Core """
################################
# Initialize card, i_change, and change lists for ZION
self.cards = np.empty(119,dtype='<U32')
self.i_vals = np.zeros(119,dtype=int)
self.vals = np.zeros(119)
# Initiate the Booleans that tracks thermal design limit violations
self.T_fuel_cent_max = 2100 # degC
self.T_clad_surf_max = 348 # degC
self.MDNBR = 0
self.T_f_over_max = False
self.T_c_over_max = False
self.MDNBR_below_1 = False
self.penalized = False
self.failed = False
# Parameter data grabbed from .csv files using PyPost
self.csvFileLocation = 'None'
self.T_106 = 0.0 # degC
self.T_110 = 0.0 # degC
self.P_106 = 0.0 # bar
self.P_110 = 0.0 # bar
self.P_335 = np.zeros(6) # MPa
self.P_p_out = 0.0 # bar
self.m_dot_100 = 0.0 # kg/s
self.m_dot_335 = 0.0 # kg/s
self.m_dot_400 = 0.0 # kg/s
self.m_dot_600 = 0.0 # kg/s
self.m_dot_200 = 0.0 # kg/s
self.H_106 = 0.0 # kJ/kg
self.H_110 = 0.0 # kJ/kg
self.H_335_1 = 0.0 # kJ/kg
self.H_112_5 = 0.0 # kJ/kg
self.H_114 = 0.0 # kJ/kg
self.H_412_5 = 0.0 # kJ/kg
self.H_414 = 0.0 # kJ/kg
self.H_612_5 = 0.0 # kJ/kg
self.H_614 = 0.0 # kJ/kg
self.H_212_5 = 0.0 # kJ/kg
self.H_214 = 0.0 # kJ/kg
self.T_1336_1 = np.zeros(6) # K
self.T_1336_17 = np.zeros(6) # K
self.x_e_335 = np.zeros(6)
# Other parameters that should be reported in Excel
self.k_eff = 0.0
self.rho_0 = 0.0
self.Bc = 0.0 # EFPD
self.nBc = 0.0 # yr
# Three cost parameters that make up 'cost_1'
self.cost_RCPs = 0.0 # $
self.op_cost_RCPs = 0.0 # $
self.cost_total_fuel = 0.0 # $
############################
""" Init stuff for PERCS """
############################
# Initialize card, i_change, and change lists for PERCS
self.list_card = np.empty(39,dtype='<U32')
self.list_i_change = np.zeros(39,dtype=int)
self.list_change = np.empty(39)
# Needed to calc the elev of Pipe 717, calc'd in Init_ZION_Calcs()
self.len_diff_717 = 0.0 # ft
# Initialize some stuff
self.n_tubes = 0
self.m_MgCO3 = 0 # kg
# Initiate the Boolean that says whether T goes over 620 K and/or 635 K
self.T_over_620 = False
self.T_over_635 = False
# Initiate the arrays for t and T and the matrix for a (alpha)
self.csvFileLocation2 = 'None'
self.t = np.zeros(0)
self.T_335_6 = np.zeros(0)
self.dT_335_6 = np.zeros(0)
self.a_array = np.zeros(100)
self.a = np.zeros((10,10))
# Initiate the Boolean that says if there was a penalty for failing before t_final
self.PERCS_failed = False
self.PERCS_penalty = 1.0
self.cost_penalty = 1.0
self.ss_fail = False # Redundant
# Initialize PERCS system equipment
self.p716 = Pipe(self.elev)
self.p717 = Pipe(0.0)
self.support = Support(self.R_tank,self.Len,0.0)
self.hx = HX()
self.tank = Tank(self.R_tank,self.Len)
self.chemical = Chemical(0)
##########################
""" Init stuff for PCS """
##########################
self.pinch_point = False
# Initialize all Streams with zeros
self.s = np.array([0])
for i in range(1,37):
self.s = np.append(self.s,Stream(0.0,0.0,0.0,0.0))
# Create the PCS equipment w/ original opt. parameters
self.phx = PHX(self.To_PHX)
self.t1a = Turbine(0.0,0.0,0.0,0.0,self.Po_t1a)
self.t1b = Turbine(0.0,0.0,0.0,0.0,self.Po_t1b)
self.t1c = Turbine(0.0,0.0,0.0,0.0,self.Po_t1c)
self.t1 = Turbine(0.0,0.0,0.0,0.0,self.Po_t1c)
self.ms1 = MS(self.Po_t1c,0.0,0.0,0.0)
self.rh1 = Reheater(1,self.Po_t1a,0.0,0.0,0.0,self.Po_t1c,0.0,0.0,False)
self.t2a = Turbine(0.0,0.0,0.0,0.0,self.Po_t2a)
self.t2b = Turbine(0.0,0.0,0.0,0.0,self.Po_t2b)
self.t2 = Turbine(0.0,0.0,0.0,0.0,self.Po_t2b)
self.ms2 = MS(self.Po_t2b,0.0,0.0,0.0)
self.rh2 = Reheater(2,0.0,0.0,0.0,0.0,self.Po_t2b,0.0,0.0,False)
self.t3 = Turbine(0.0,0.0,0.0,0.0,0.086)
self.t4 = Turbine(0.0,0.0,0.0,0.0,0.086)
self.t5 = Turbine(0.0,0.0,0.0,0.0,0.086)
self.cond = Condenser(0.086,0.0,0.0,0.0)
self.fwh1 = FWH(0.0,0.0,0.0,0.0,0.0,0.0,0.0)
self.fwh2 = FWH(0.0,0.0,0.0,0.0,0.0,0.0,0.0)
self.fwh3 = FWH(0.0,0.0,0.0,0.0,0.0,0.0,0.0)
self.fwh4 = FWH(0.0,0.0,0.0,0.0,0.0,0.0,0.0)
self.p1 = Pump(0.0,0.0,0.0,self.phx.Pin)
self.p2 = Pump(0.0,0.0,0.0,self.Po_t1a)
self.p3 = Pump(0.0,0.0,0.0,0.0)
self.p4 = Pump(0.0,0.0,0.0,0.0)
self.p5 = Pump(0.0,0.0,0.0,0.0)
self.p6 = Pump(0.0,0.0,0.0,0.0)
##########################################################
""" Initiate all objective function and maximin values """
##########################################################
# For ZION Core
self.W_rcp = 0.0 # 1
self.cost_1 = 0.0 # 2
self.obj_1_1 = 0.0 # W_rcp
self.obj_1_2 = 0.0 # cost_1
self.fmm_1 = 0
# For PERCS
self.cost_2 = 0.0 # 1
self.dT_int = 0.0 # 2
self.alpha = 0.0 # 3
self.obj_2_1 = 0.0 # cost_2
self.obj_2_2 = 0.0 # dT_int
self.obj_2_3 = 0.0 # consumption(alpha)
self.fmm_2 = 0
# For Rankine PCS
self.color = 'black'
self.eff = 0.0
self.inv_eff = 0.0 # 1
self.cost_3 = 0.0 # 2
self.obj_3_1 = 0.0 # inv_eff
self.obj_3_2 = 0.0 # cost_3
self.fmm_3 = 0
# Overall fmm-value
self.obj_fmm_1 = 0.0 # normalized fmm_1
self.obj_fmm_2 = 0.0 # normalized fmm_2
self.obj_fmm_3 = 0.0 # normalized fmm_3
self.fmm_o = 0
#######################################################
""" Perform the initial calculations for the Option """
#######################################################
self.init_ZION_calcs()
self.init_PERCS_calcs()
"""
The initial calcs take place in the init_ZION_calcs(), init_PERCS_calcs()
function below.
The RELAP5 and PyPost files are run from the Population.calc_Options() function.
The obj. function and constraints calcs are run from the
Population.final_Option_calcs() function.
"""
def init_ZION_calcs(self):
##############################################
""" Calcs corresponding to a change in R_f """
##############################################
#-----------------------------
""" Core Area Calculations """
#-----------------------------
## Constants and Ratios
ratio_f2m = 0.48374681 # Fuel to Moderator Ratio
th_g = 0.002 # ft
th_c = 0.0005 # ft
self.n_pins = 41958.0554 # ~42,000 (value derived from RELAP5 model)
ratio_p2D = 1.35532 # Fuel Pin Pitch to Diameter Ratio
## Calculations
self.R_g = np.round(self.R_f + th_g, 4) # Gap radius [ft]
self.R_c = np.round(self.R_f + th_g + th_c, 4) # Cladding radius [ft]
pitch = ratio_p2D * (2.0 * self.R_c) # ft
self.p = np.round(pitch, 4) # Fuel pin pitch [ft]
A_f = np.pi * self.R_f**2.0 # Fuel A_c [ft^2]
A_g = np.pi * (self.R_g**2.0 - self.R_f**2.0) # Gap A_c [ft^2]
A_c = np.pi * (self.R_c**2.0 - self.R_g**2.0) # Cladding A_c [ft^2]
A_p = A_f + A_g + A_c # Fuel pin A_c [ft^2]
self.A_fuel = self.n_pins * A_f # Total fuel pin A_c [ft^2]
self.A_gap = self.n_pins * A_g # Total gap A_c [ft^2]
self.A_clad = self.n_pins * A_c # Total cladding A_c [ft^2]
A_pins = self.n_pins * A_p # Total fuel pin A_c [ft^2]
self.A_H2O = self.A_fuel / ratio_f2m # Core coolant A_c [ft^2]
self.A_total = A_pins + self.A_H2O # Total core A_c [ft^2]
self.A_335 = np.round(self.A_H2O,5) # Rounded core A_c [ft^2]
A_jun_diff_335 = 2.207 # Total A_c of the baffle [ft^2]
# Junction A_c at end of core flow segment
self.A_jun_335 = np.round(self.A_H2O - A_jun_diff_335, 5) # ft^2
# Hydraulic diameter of core flow segment 335 [ft]
D_hyd = 4.0 * (pitch**2.0 - np.pi*self.R_c**2.0) / (2.0*np.pi*self.R_c)
# Rounded hydraulic diameter of core flow segment 335
self.Dh_335 = np.round(D_hyd,5) # ft
# A_c of branch 336 (core above baffle) [ft^2]
A_336 = np.round(0.272*(self.A_H2O-self.A_jun_335)+self.A_jun_335, 5)
## Fill the lists
self.cards[114:117] = ['13360101','13360102','13360103']
self.cards[78:80] = ['3350101','3350201']
self.cards[86:88] = ['3350801','3360101']
self.i_vals[114:117] = [3,3,3]
self.i_vals[78:80] = [2,2]
self.i_vals[86:88] = [3,4]
self.vals[114:117] = [self.R_f,self.R_g,self.R_c]
self.vals[78:80] = [self.A_335,self.A_jun_335]
self.vals[86:88] = [self.Dh_335,A_336]
#------------------------------------
""" Outer Area/R_eff Calculations """
#------------------------------------
## Constants and Ratios
R_in_barrel = 6.1667 # Inner radius of the barrel [ft]
th_baffle = 0.0937 # Thickness of the barrel [ft]
ratio_baffle_2_core = 1.2577045 # Ratio b/t core and effective baffle
## Calculations
self.R_core = np.sqrt(self.A_total/np.pi) # Radius of the entire core [ft]
# Effective inner radius of the baffle
Reff_in_baffle = self.R_core * ratio_baffle_2_core # ft
# Rounded effective inner radius of the baffle
left_bc_1335 = np.round(Reff_in_baffle, 4) # ft
# Effective outer radius of the the baffle
Reff_out_baffle = Reff_in_baffle + th_baffle # ft
# Rounded effective outer radius of the baffle
right_bc_1335 = np.round(Reff_out_baffle, 4) # ft
# A_c taken up by the baffle
A_baffle = np.pi * (Reff_out_baffle**2.0 - Reff_in_baffle**2.0) # ft^2
# Total A_c of core contents (calc'd from inside out)
A_total_plus_baffle = self.A_total + A_baffle # ft^2
# Total A_c of core (calc'd from outside in)
A_total_in_barrel = np.pi * R_in_barrel**2.0 # ft^2
self.A_320_bypass = 0.0
if (A_total_in_barrel - A_total_plus_baffle) > 18.6736:
self.A_320_bypass = 18.6736 # ft^2
else:
self.A_320_bypass = A_total_in_barrel - A_total_plus_baffle # ft^2
Dh_320 = 0.9591 # Hydraulic diameter of core bypass [ft]
## Fill the lists
self.cards[106:108],self.cards[70],self.cards[77] = ['13350000','13350101'],'3200101','3200801'
self.i_vals[106:108],self.i_vals[70],self.i_vals[77] = [6,3],2,3
self.vals[106:108],self.vals[70],self.vals[77] = [left_bc_1335,right_bc_1335],self.A_320_bypass,Dh_320
#################################################
""" Calcs corresponding to a change in H_fuel """
#################################################
#---------------------------
""" RPV len's and elev's """
#---------------------------
## Ratios and Percentages
# Height ratio b/t core flow segment (335) and actual fuel w/in pins
ratio_H335_2_Hfuel = 1.1145844358
# Length fractions per node along core flow segment (335)
L_frac_335 = np.array((0.187389,0.1632396,0.1632396,0.1632396,0.1632396,0.1596523))
# Length fractions per node along fuel in pins
L_frac_pin = np.array((0.1819444,0.1819444,0.1819444,0.1819444,0.1819444,0.090278))
## Calculations
# Height of core flow segment (335)
self.H_335 = self.H_fuel * ratio_H335_2_Hfuel # ft
# Lengths per node along core flow segment (335)
len_335 = np.round(self.H_335 * L_frac_335, 5) # ft
# Lengths of 'len_335' for upward-oriented RELAP5 flow segments
Lu = [len_335[0],len_335[3],len_335[5]] # ft
# Lengths of 'len_335' for downward-oriented RELAP5 flow segments
Ld = [len_335[5],len_335[3],len_335[0]] # ft
# Lengths of 'len_335' for downward-flowing RELAP5 flow segments
nLd = [-len_335[5],-len_335[3],-len_335[0]] # ft
len_pin = np.round(self.H_fuel * L_frac_pin, 5) # Rounded length of pin [ft]
C_pin = 2.0*np.pi * self.R_c # Circumference of fuel pin [ft]
# Total pin surface area on node 5
SA_1336_5R = np.round(self.n_pins * C_pin * len_pin[4], 4) # ft^2
# Total pin surface area on node 6
SA_1336_6R = np.round(self.n_pins * C_pin * len_pin[5], 4) # ft^2
## Fill the lists
self.cards[80:86] = ['3350301','3350302','3350303','3350701','3350702','3350703']
self.i_vals[80:86] = [2,2,2,2,2,2]
self.vals[80:86] = Lu+Lu
self.cards[71:77] = ['3200301','3200302','3200303','3200701','3200702','3200703']
self.i_vals[71:77] = [2,2,2,2,2,2]
self.vals[71:77] = Ld+nLd
self.cards[64:70] = ['3150301','3150302','3150303','3150701','3150702','3150703']
self.i_vals[64:70] = [2,2,2,2,2,2]
self.vals[64:70] = Ld+nLd
self.cards[88:94] = ['13150501','13150502','13150503','13150601','13150602','13150603']
self.i_vals[88:94] = [6,6,6,6,6,6]
self.vals[88:94] = Ld+Ld
self.cards[94:100] = ['13160501','13160502','13160503','13160601','13160602','13160603']
self.i_vals[94:100] = [6,6,6,6,6,6]
self.vals[94:100] = Ld+Ld
self.cards[100:106] = ['13200501','13200502','13200503','13200601','13200602','13200603']
self.i_vals[100:106] = [6,6,6,6,6,6]
self.vals[100:106] = Ld+Ld
self.cards[108:114] = ['13350501','13350502','13350503','13350601','13350602','13350603']
self.i_vals[108:114] = [6,6,6,6,6,6]
self.vals[108:114] = Lu+Lu
self.cards[117:119] = ['13360601','13360602']
self.i_vals[117:119] = [6,6]
self.vals[117:119] = [SA_1336_5R,SA_1336_6R]
#------------------------------
""" PERCS p717 len and elev """
#------------------------------
## Calculations
# Deviation from original height of the fuel (for PERCS pipe 717 calc)
self.len_diff_717 = ratio_H335_2_Hfuel * (self.H_fuel - 11.99971) # ft
##################################################
""" Calcs corresponding to changes in pipe D's """
##################################################
## Calculations
A_00 = np.round(np.pi/4.0*self.Dh_00**2.0, 3) # A_c of pipes _00 [ft^2]
A_12 = np.round(np.pi/4.0*self.Dh_12**2.0, 3) # A_c of pipes _12 [ft^2]
A_14 = np.round(np.pi/4.0*self.Dh_14**2.0, 3) # A_c of pipes _14 [ft^2]
## Fill the lists
self.cards[0:6] = ['1000101','1000801','1020101','1020101','1040101','1040801']
self.i_vals[0:6] = [2,3,2,9,2,3]
self.vals[0:6] = [A_00,self.Dh_00,A_00,self.Dh_00,A_00,self.Dh_00]
self.cards[6:10] = ['1120101','1120801','1130101','1130108']
self.i_vals[6:10] = [2,3,2,3]
self.vals[6:10] = [A_12,self.Dh_12,A_12,A_12]
self.cards[10:19] = ['1130109','1140101','1140801','1160101','1160101','1161101','1162101','1180101','1180801']
self.i_vals[10:19] = [3,2,3,2,9,4,4,2,3]
self.vals[10:19] = [A_14,A_14,self.Dh_14,A_14,self.Dh_14,A_14,A_14,A_14,self.Dh_14]
self.cards[19:25] = ['4000101','4000801','4120101','4120801','4130101','4130108']
self.i_vals[19:25] = [2,3,2,3,2,3]
self.vals[19:25] = [A_00,self.Dh_00,A_12,self.Dh_12,A_12,A_12]
self.cards[25:34] = ['4130109','4140101','4140801','4160101','4160101','4161101','4162101','4180101','4180801']
self.i_vals[25:34] = [3,2,3,2,9,4,4,2,3]
self.vals[25:34] = [A_14,A_14,self.Dh_14,A_14,self.Dh_14,A_14,A_14,A_14,self.Dh_14]
self.cards[34:40] = ['6000101','6000801','6120101','6120801','6130101','6130108']
self.i_vals[34:40] = [2,3,2,3,2,3]
self.vals[34:40] = [A_00,self.Dh_00,A_12,self.Dh_12,A_12,A_12]
self.cards[40:49] = ['6130109','6140101','6140801','6160101','6160101','6161101','6162101','6180101','6180801']
self.i_vals[40:49] = [3,2,3,2,9,4,4,2,3]
self.vals[40:49] = [A_14,A_14,self.Dh_14,A_14,self.Dh_14,A_14,A_14,A_14,self.Dh_14]
self.cards[49:55] = ['2000101','2000801','2120101','2120801','2130101','2130108']
self.i_vals[49:55] = [2,3,2,3,2,3]
self.vals[49:55] = [A_00,self.Dh_00,A_12,self.Dh_12,A_12,A_12]
self.cards[55:64] = ['2130109','2140101','2140801','2160101','2160101','2161101','2162101','2180101','2180801']
self.i_vals[55:64] = [3,2,3,2,9,4,4,2,3]
self.vals[55:64] = [A_14,A_14,self.Dh_14,A_14,self.Dh_14,A_14,A_14,A_14,self.Dh_14]
def init_PERCS_calcs(self):
# Calc the number of tubes in PERCS
Ac_tank = np.pi * self.R_tank**2.0 # A_c of entire tank ft^2
Ac_hex = np.sqrt(3)/2 * self.pitch**2.0 # A_c of hexagon around tube [ft^2]
self.n_tubes = np.round(Ac_tank / Ac_hex) # Number of PERCS tubes
self.hx.n = self.n_tubes
# Calc the heat transfer Surface Area in PERCS
OD_tube = self.D_h + 2.0*self.th # Outer D of tube [ft]
SA_tube = np.pi*OD_tube*self.Len # Surface area of tube [ft^2]
SA_tot = SA_tube * self.n_tubes # Total surface area of tubes ft^2]
self.hx.A = SA_tot / 10.7639 # m^2
# Perform calcs for HX and Tank
self.hx.calc_HX()
self.tank.calc_Tank()
# Calc the total cross-sectional Area of all tubes
Ac_tube = np.pi*(self.D_h/2.0)**2 # ft^2
Ac_tubes = np.round(Ac_tube*self.n_tubes,5) # ft^2
# Calc the length of a single node along the tubes
len_node = np.round((self.Len / 10.0),5) # ft
# Calc the thickness of a single MgCO3 section (there being 10 across)
R_hex = np.sqrt(Ac_hex/np.pi) # ft
OR_tube = OD_tube / 2.0 # ft
th_MgCO3 = '%.5g'%((R_hex - OR_tube)/10.0) # ft
# Calc the heat transfer length between all tubes and MgCO3 per node
HT_len_per_node = np.round((len_node*self.n_tubes),5) # ft
# Calc the len and elev of Pipe 717
self.elev_717 = | np.round(-(15.62469 + self.elev - self.Len + self.len_diff_717),5) | numpy.round |
from .openmolecularsystem import OpenMolecularSystem
import simtk.unit as unit
import numpy as np
import sympy as sy
import simtk.unit as unit
import simtk.openmm as mm
import simtk.openmm.app as app
class DoubleWell(OpenMolecularSystem):
"""Particles in an double well potential
Test system with particles in a quadratic double well potential.
.. math::
Eo\\left[\\left(\\frac{x}{a}\\right)^4-2\\left(\\frac{x}{a}\\right)^2\\right]-\\frac{b}{a}x + \\frac{1}{2} k \\left(y^2 + z^2\\right)
Attributes
----------
n_particles
Number of particles
mass
Mass of particles
system
Openmm system
potential_expression
External potential expression as a sympy function.
potential_parameters
Dictionary with the values of the parameters of the potential.
Methods
-------
potential
Potential evaluation at certain coordinates.
"""
def __init__(self, n_particles=1, mass=100*unit.amu, Eo=3.0*unit.kilocalories_per_mole,
a=0.5*unit.nanometers, b=0.5*unit.kilocalories_per_mole,
k=1.0*unit.kilocalories_per_mole/unit.angstroms**2,
coordinates= None):
"""Creating a new instance of DoubleWell
A new test system is returned with the openmm system of particles in an external double
well potential.
Parameters
----------
n_particles: int
Number of particles in the system
mass: unit.Quantity
Mass of the particles (in units of mass).
Eo: unit.Quantity
Parameter of the external potential with units of energy.
a: unit.Quantity
Parameter of the external potential with units of length.
b: unit.Quantity
Parameter of the external potential with units of energy.
k: unit.Quantity
Parameter of the external potential with units of energy/length^2.
Examples
--------
>>> from uibcdf_test_systems import DoubleWell
>>> from simtk import unit
>>> double_well = DoubleWell(n_particles = 1, mass = 64 * unit.amu, Eo=4.0 * unit.kilocalories_per_mole, a=1.0 * unit.nanometers, b=0.0 * unit.kilocalories_per_mole, k=1.0 * unit.kilocalories_per_mole/unit.angstroms**2))
Notes
-----
See `corresponding documentation in the user guide regarding this class
<../../systems/double_well_potential.html>`_.
"""
super().__init__()
# Parameters
self.parameters={}
self.parameters['n_particles']=n_particles
self.parameters['mass']=mass
self.parameters['Eo']=Eo
self.parameters['a']=a
self.parameters['b']=b
self.parameters['k']=k
# OpenMM topology
self.topology = app.Topology()
try:
dummy_element = app.element.get_by_symbol('DUM')
except:
dummy_element = app.Element(0, 'DUM', 'DUM', 0.0 * unit.amu)
dummy_element.mass._value = mass.value_in_unit(unit.amu)
chain = self.topology.addChain('A')
for _ in range(n_particles):
residue = self.topology.addResidue('DUM', chain)
atom = self.topology.addAtom(name='DUM', element= dummy_element, residue=residue)
# OpenMM system
self.system = mm.System()
for _ in range(n_particles):
self.system.addParticle(dummy_element.mass)
A = Eo/(a**4)
B = -2.0*Eo/(a**2)
C = -b/a
D = k/2.0
force = mm.CustomExternalForce('A*x^4+B*x^2+C*x + D*(y^2+z^2)')
force.addGlobalParameter('A', A)
force.addGlobalParameter('B', B)
force.addGlobalParameter('C', C)
force.addGlobalParameter('D', D)
for ii in range(n_particles):
force.addParticle(ii, [])
_ = self.system.addForce(force)
# Coordinates
if coordinates is None:
coordinates = | np.zeros([self.parameters['n_particles'], 3], np.float32) | numpy.zeros |
"""
class for angular quadrature in 2D geometry
"""
import numpy as np
from math import pi, cos, sin
class AQ(object):
def __init__(self, sn_ord):
'''@brief Constructor of aq class
@param sn_ord Sn angular quadrature order
'''
assert sn_ord%2==0, 'SN order must be even'
# int for sn order
self._sn_ord = sn_ord
# number of directions
self._n_dir = (sn_ord+2) * sn_ord / 2
# dictionary for containing angular quandrature directions and weights
self._aq_data = {'omega':{},'wt':{},'dir_prods':{},'wt_tensor':{},
'bd_angle':{},'bd_vec_n':{},'refl_dir':{},'n_dir':self._n_dir}
# make aq data
self._quad2d()
# store the outward normal vectors on boundaries
self._aq_data['bd_vec_n'] = {
'xmin':np.array([-1.,0]),'xmax':np.array([1.,0]),
'ymin':np.array([0,-1.]),'ymax':np.array([0,1.])}
# get incident and reflective directions
self._boundary_info()
def _quad2d(self):
'''@brief Internal function used to calculate aq data
@param self Reference to the class
@return aq_data dictionary
'''
# initialize dictionary data
ct, quad1d, solid_angle = 0, | np.polynomial.legendre.leggauss(self._sn_ord) | numpy.polynomial.legendre.leggauss |
import os
import numpy as np
import tensorflow as tf
from cuda import custom_ops
def _get_plugin():
loc = os.path.dirname(os.path.abspath(__file__))
cu_fn = 'upfirdn_2d.cu'
return custom_ops.get_plugin(os.path.join(loc, cu_fn))
def _setup_kernel(k):
k = np.asarray(k, dtype=np.float32)
if k.ndim == 1:
k = | np.outer(k, k) | numpy.outer |
import autograd.numpy as anp
import numpy as np
from pymoo.util.misc import stack
from pymoo.model.problem import Problem
from pymoo.factory import get_problem
from pymoo.algorithms.nsga2 import NSGA2
from pymoo.factory import get_sampling, get_crossover, get_mutation
from pymoo.factory import get_termination
from pymoo.optimize import minimize
from pymoo.visualization.scatter import Scatter
import matplotlib.pyplot as plt
from pymoo.performance_indicator.hv import Hypervolume
class MyProblem(Problem):
def __init__(self):
super().__init__(n_var=2,
n_obj=2,
n_constr=2,
xl=anp.array([-2,-2]),
xu=anp.array([2,2]))
def _evaluate(self, x, out, *args, **kwargs):
f1 = x[:,0]**2 + x[:,1]**2
f2 = (x[:,0]-1)**2 + x[:,1]**2
g1 = 2*(x[:, 0]-0.1) * (x[:, 0]-0.9) / 0.18
g2 = - 20*(x[:, 0]-0.4) * (x[:, 0]-0.6) / 4.8
out["F"] = anp.column_stack([f1, f2])
out["G"] = anp.column_stack([g1, g2])
# --------------------------------------------------
# Pareto-front - not necessary but used for plotting
# --------------------------------------------------
def _calc_pareto_front(self, flatten=True, **kwargs):
f1_a = | np.linspace(0.1**2, 0.4**2, 100) | numpy.linspace |
# -*- coding: utf-8 -*-
"""
Script for assessing how the number of nulls used to generate a p-value
influences the p-value
"""
from collections import defaultdict
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from netneurotools import stats as nnstats
from parspin import simnulls, utils as putils
from parspin.plotting import savefig
plt.rcParams['svg.fonttype'] = 'none'
plt.rcParams['font.sans-serif'] = ['Myriad Pro']
plt.rcParams['font.size'] = 20.0
ROIDIR = Path('./data/raw/rois').resolve()
SIMDIR = Path('./data/derivatives/simulated').resolve()
OUTDIR = Path('./data/derivatives/supplementary/comp_nnulls')
FIGDIR = Path('./figures/supplementary/comp_nnulls')
SEED = 1234 # reproducibility
SIM = 9999 # which simulation was used to generate 10000 nulls
N_PVALS = 1000 # how many repeated draws should be done to calculate pvals
PLOTS = (
('vertex', 'fsaverage5'),
('atl-cammoun2012', 'scale500'),
('atl-schaefer2018', '1000Parcels7Networks')
)
PARCS, SCALES = zip(*PLOTS)
def pval_from_perms(actual, null):
""" Calculates p-value of `actual` based on `null` permutations
"""
return (np.sum( | np.abs(null) | numpy.abs |
import cv2
import numpy as np
import glob
from os.path import join, basename
import os
import soccer.calibration.utils as utils
import matplotlib.pyplot as plt
from skimage.morphology import medial_axis
import time
path_to_data = '/home/krematas/Mountpoints/grail/data/Singleview/Soccer/Russia2018'
dataset_list = [join(path_to_data, 'adnan-januzaj-goal-england-v-belgium-match-45'), join(path_to_data, 'ahmed-fathy-s-own-goal-russia-egypt'), join(path_to_data, 'ahmed-musa-1st-goal-nigeria-iceland'), join(path_to_data, 'ahmed-musa-2nd-goal-nigeria-iceland')]
goal_dirs = [ item for item in os.listdir(path_to_data) if os.path.isdir(os.path.join(path_to_data, item)) ]
goal_dirs.sort()
goal_id = 41
dataset = join(path_to_data, goal_dirs[goal_id])
image_files = glob.glob(join(dataset, 'images', '*.jpg'))
image_files.sort()
mask_files = glob.glob(join(dataset, 'detectron', '*.png'))
mask_files.sort()
cam_data = np.load(join(dataset, 'calib', '{0}.npy'.format(basename(image_files[0])[:-4]))).item()
h, w = 1080, 1920
A, R, T = cam_data['A'], cam_data['R'], cam_data['T']
soccer_field3d, _, _, _ = utils.read_ply('/home/krematas/Documents/field.ply')
soccer_field3d, _, _ = utils.ply_to_numpy(soccer_field3d)
for i in range(0, len(image_files), 10):
print('{0} ========================================================='.format(i))
frame = cv2.imread(image_files[i])[:, :, ::-1]
mask = cv2.imread(mask_files[i])
edge_sfactor = 1.0
edges = utils.robust_edge_detection(cv2.resize(frame[:, :, ::-1], None, fx=edge_sfactor, fy=edge_sfactor))
mask = cv2.dilate(mask[:, :, 0], np.ones((25, 25), dtype=np.uint8))/255
edges = edges * (1 - mask)
start = time.time()
skel = medial_axis(edges, return_distance=False)
end = time.time()
print('skimage: {0:.4f}'.format(end-start))
start = time.time()
dist_transf = cv2.distanceTransform((1 - edges).astype(np.uint8), cv2.DIST_L2, 0)
end = time.time()
print('distance transform: {0:.4f}'.format(end-start))
start = time.time()
template, field_mask = utils.draw_field(A, R, T, h, w)
end = time.time()
print('draw: {0:.4f}'.format(end - start))
start = time.time()
II, JJ = (template > 0).nonzero()
synth_field2d = | np.array([[JJ, II]]) | numpy.array |
import numpy as np
from LabPy import Core, Constants
from time import time
__all__ = ["PlaneWave", \
"GaussianWave",
"GaussianWaveFFT",
"TukeyWaveFFT",
"TmmForWaves",
"SecondOrderNLTmmForWaves"]
#===============================================================================
# PlaneWave
#===============================================================================
class PlaneWave(Core.ParamsBaseClass):
def __init__(self, params = [], **kwargs):
paramsThis = ["pwr", "overrideE0", "w0", "n", "Ly"]
self.overrideE0 = None #E0 specified in vacuum, not in first layer
self.Ly = None
super(PlaneWave, self).__init__(params + paramsThis, **kwargs)
def Solve(self, wl, th0, **kwargs):
self.wl = wl
self.th0 = th0
self.SetParams(**kwargs)
self._Precalc()
self._Solve()
def _Precalc(self):
if self.overrideE0 is None:
self.I0 = self.pwr / (self.Ly * self.w0)
self.E0 = np.sqrt(2.0 * Constants.mu0 * Constants.c * self.I0)
# E0 in vacuum, not in 1st layer
print("E0 vacuum", self.E0)
else:
self.E0 = self.overrideE0
self.k0 = 2.0 * np.pi / self.wl
self.k = self.k0 * self.n(self.wl).real
print("_Precalc", self.E0)
def _Solve(self):
self.phis = np.array([0.0])
self.kxs = np.array([self.k * np.sin(self.th0)])
self.kzs = np.array([self.k * np.cos(self.th0)])
self.expansionCoefsPhi = np.array([self.E0])
self.expansionCoefsKx = np.array([self.E0])
self.betas = (self.kxs / self.k0).real
#===============================================================================
# GaussianWave
#===============================================================================
class GaussianWave(PlaneWave):
def __init__(self, params = [], **kwargs):
paramsThis = ["integCriteria", "nPointsInteg"]
self.nPointsInteg = 30
self.integCriteria = 1e-3
super(GaussianWave, self).__init__(params + paramsThis, **kwargs)
def _Solve(self):
self._phiLim = np.arcsin(2.0 / self.w0 * np.sqrt(-np.log(self.integCriteria)) / self.k)
self.phis = np.linspace(-self._phiLim, self._phiLim, self.nPointsInteg)
if np.max(abs(self.phis + self.th0)) > np.pi / 2.0:
raise ValueError("Gaussian wave requires backward propagating waves!")
kzPs, kxPs = np.cos(self.phis) * self.k, np.sin(self.phis) * self.k
self.kxs = kxPs * np.cos(self.th0) + kzPs * np.sin(self.th0)
self.kzs = -kxPs * np.sin(self.th0) + kzPs * np.cos(self.th0)
profileSpectrum = self.E0 * 1.0 / 2.0 / np.sqrt(np.pi) * self.w0 * \
np.exp(-(kxPs) ** 2.0 * self.w0 ** 2.0 / 4.0)
self.expansionCoefsPhi = profileSpectrum * np.cos(self.phis) * self.k
self.betas = (self.kxs / self.k0).real
#===============================================================================
# WaveFFT
#===============================================================================
class WaveFFT(PlaneWave):
def __init__(self, params = [], **kwargs):
paramsThis = ["nPointsInteg", "maxPhi", "integCriteria"]
self.nPointsInteg = 30
self.maxPhi = np.radians(0.5)
self.integCriteria = 1e-3
PlaneWave.__init__(self, params + paramsThis, **kwargs)
def _FieldProfile(self, xs):
raise NotImplementedError()
def _Solve(self, maxPhiForce = None):
maxKxp = np.sin(self.maxPhi if maxPhiForce is None else maxPhiForce) * self.k
dx = np.pi / maxKxp
xs = np.arange(-0.5 * dx * self.nPointsInteg, 0.5 * dx * self.nPointsInteg, dx)
fieldProfile = self._FieldProfile(xs)
fieldProfileSpectrum = (xs[1] - xs[0]) / (2.0 * np.pi) * np.fft.fftshift(np.fft.fft(np.fft.ifftshift(fieldProfile)))
kxPs = 2.0 * np.pi * np.fft.fftshift(np.fft.fftfreq(len(fieldProfile), xs[1] - xs[0]))
self.phis = np.arcsin(kxPs / self.k)
if maxPhiForce is None and self.integCriteria is not None:
# It is first iteration, need to check if it is possible to reduce the range
# by using the integCriteria
cumSpectra = np.cumsum(abs(fieldProfileSpectrum))
cumSpectra /= | np.max(cumSpectra) | numpy.max |
#Plot the potential of the periodic surface with a dipole source
import numpy as np
import matplotlib.pyplot as plt
from Calc_power_cresc import Pow_abs_rad, \
Pow_abs_rad_r,\
Pow_abs_rad_hori,\
Pow_sca_rad, Pow_sca_r,\
Pow_sca_hori
def Absorption(R1_cyl, R2_cyl, inv, epos, sca, vel, orientation):
phi = np.arange(0,2*np.pi,0.001)
z_1 = sca / (R1_cyl*np.exp(1j*phi) - inv)
z_2 = sca / (R2_cyl*np.exp(1j*phi) - inv)
#Geometrical and physical constants in the cylinder frame
c = 3e8
Conv = 1.602e-19/6.626e-34*2*np.pi #Conversion from eV to SI-units
omega = np.arange(0.01, 8, 0.01)
c_e = vel*c #electron velocity
plt.figure(4)
plt.clf()
plt.subplot(111)
Q = np.zeros(np.size(omega))
if orientation==1:
x_e0 = min(np.real(z_2))
x_e = x_e0 + np.sign(x_e0)*epos
for m in range(0,np.size(omega)):
Q[m] = Pow_abs_rad(inv, x_e, c_e, sca, R1_cyl, R2_cyl, omega[m])
plt.plot(omega, Q/1.6e-19)
elif orientation==3:
x_e0 = max(np.real(z_2))
x_e = x_e0 + epos
for m in range(0,np.size(omega)):
Q[m] = Pow_abs_rad_r(inv, x_e, c_e, sca, R1_cyl, R2_cyl, omega[m])
plt.plot(omega, Q/1.6e-19)
else:
y_e0 = max(np.imag(z_1))
x_e = y_e0 + epos
for m in range(0,np.size(omega)):
Q[m] = Pow_abs_rad_hori(inv, x_e, c_e, sca, R1_cyl, R2_cyl, omega[m])
plt.plot(omega, Q/1.6e-19)
plt.yscale('log')
plt.xlabel('$\omega/eV$')
plt.ylabel('Q/eV')
plt.gcf().tight_layout()
plt.figure(4).canvas.draw()
def Scattering(R1_cyl, R2_cyl, inv, epos, sca, vel, orientation):
phi = np.arange(0,2*np.pi,0.001)
z_1 = sca / (R1_cyl* | np.exp(1j*phi) | numpy.exp |
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_per_channel(self):
# test channelwise
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 1 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Add(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
aug = iaa.Add(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.Add((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10)
class TestAddElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.AddElementwise(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.AddElementwise(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_add_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.AddElementwise(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
aug = iaa.AddElementwise(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AddElementwise(value=1)
aug_det = iaa.AddElementwise(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(-50, 50))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.9 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.AddElementwise(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.AddElementwise(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.AddElementwise((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class AdditiveGaussianNoise(unittest.TestCase):
def setUp(self):
reseed()
def test_loc_zero_scale_zero(self):
# no noise, shouldnt change anything
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_loc_zero_scale_nonzero(self):
# zero-centered noise
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_std_dev_of_added_noise_matches_scale(self):
# std correct?
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0])
values = np.array(values)
assert np.min(values) == 0
assert 0.1 < np.std(values) / 255.0 < 0.4
def test_nonzero_loc(self):
# non-zero loc
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.25 * 255, scale=0.01 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0] - 128)
values = np.array(values)
assert 54 < np.average(values) < 74 # loc=0.25 should be around 255*0.25=64 average
def test_tuple_as_loc(self):
# varying locs
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=(0, 0.5 * 255), scale=0.0001 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_loc(self):
# varying locs by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=iap.Choice([-20, 20]), scale=0.0001 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
seen = [0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
mean = np.mean(observed)
diff_m20 = abs(mean - (128-20))
diff_p20 = abs(mean - (128+20))
if diff_m20 <= 1:
seen[0] += 1
elif diff_p20 <= 1:
seen[1] += 1
else:
assert False
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_tuple_as_scale(self):
# varying stds
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=(0.01 * 255, 0.2 * 255))
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_scale(self):
# varying stds by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=iap.Choice([1, 20]))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 128
seen = [0, 0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
std = np.std(observed.astype(np.int32) - 128)
diff_1 = abs(std - 1)
diff_20 = abs(std - 20)
if diff_1 <= 2:
seen[0] += 1
elif diff_20 <= 5:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 5
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(loc="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(scale="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.5, scale=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.AdditiveGaussianNoise(scale=(0.1, 10), per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class TestDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
# no dropout, shouldnt change anything
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Dropout(p=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# 100% dropout, should drop everything
aug = iaa.Dropout(p=1.0)
observed = aug.augment_images(images)
expected = np.zeros((1, 512, 512, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.zeros((512, 512, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_p_is_50_percent(self):
# 50% dropout
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Dropout(p=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_tuple_as_p(self):
# varying p
aug = iaa.Dropout(p=(0.0, 1.0))
aug_det = aug.to_deterministic()
images = np.ones((1, 8, 8, 1), dtype=np.uint8) * 255
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_list_as_p(self):
aug = iaa.Dropout(p=[0.0, 0.5, 1.0])
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
nb_seen = [0, 0, 0, 0]
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
n_dropped = np.sum(observed_aug == 0)
p_observed = n_dropped / observed_aug.size
if 0 <= p_observed <= 0.01:
nb_seen[0] += 1
elif 0.5 - 0.05 <= p_observed <= 0.5 + 0.05:
nb_seen[1] += 1
elif 1.0-0.01 <= p_observed <= 1.0:
nb_seen[2] += 1
else:
nb_seen[3] += 1
assert np.allclose(nb_seen[0:3], nb_iterations*0.33, rtol=0, atol=75)
assert nb_seen[3] < 30
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.Dropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for wrong parameter datatype
got_exception = False
try:
_aug = iaa.Dropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Dropout(p=1.0)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.Dropout(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = base_img
assert np.array_equal(observed, expected)
def test_p_is_one(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=1.0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = np.zeros_like(base_img)
assert np.array_equal(observed, expected)
def test_p_is_50_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_size_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=0.001, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_per_channel(self):
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=True, min_size=1)
base_img = np.ones((4, 4, 3), dtype=np.uint8) * 100
found = False
for _ in sm.xrange(100):
observed = aug.augment_image(base_img)
avgs = np.average(observed, axis=(0, 1))
if len(set(avgs)) >= 2:
found = True
break
assert found
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.CoarseDropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])), size_px=50)
images = np.ones((1, 100, 100, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for bad parameters
got_exception = False
try:
_ = iaa.CoarseDropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test___init___size_px_and_size_percent_both_none(self):
got_exception = False
try:
_ = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseDropout(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseDropout(p=0.5, size_px=10, per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10, shape=(40, 40, 3))
class TestDropout2d(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.Dropout2d(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 1
def test___init___p_is_float(self):
aug = iaa.Dropout2d(p=0.7)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 0.3)
assert aug.nb_keep_channels == 1
def test___init___nb_keep_channels_is_int(self):
aug = iaa.Dropout2d(p=0, nb_keep_channels=2)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 2
def test_no_images_in_batch(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
heatmaps = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=heatmaps)
assert np.allclose(heatmaps_aug.arr_0to1, heatmaps.arr_0to1)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_1_heatmaps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_1_segmentation_maps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_1_cbaois__keep_one_channel(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_heatmaps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075(self):
image = np.full((1, 1, 3000), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.75, nb_keep_channels=0)
image_aug = aug(image=image)
nb_kept = np.sum(image_aug == 255)
nb_dropped = image.shape[2] - nb_kept
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.isclose(nb_dropped, image.shape[2]*0.75, atol=75)
def test_force_nb_keep_channels(self):
image = np.full((1, 1, 3), 255, dtype=np.uint8)
images = np.array([image] * 1000)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
images_aug = aug(images=images)
ids_kept = [np.nonzero(image[0, 0, :]) for image in images_aug]
ids_kept_uq = np.unique(ids_kept)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
# on average, keep 1 of 3 channels
# due to p=1.0 we expect to get exactly 2/3 dropped
assert np.isclose(nb_dropped,
(len(images)*images.shape[3])*(2/3), atol=1)
# every channel dropped at least once, i.e. which one is kept is random
assert sorted(ids_kept_uq.tolist()) == [0, 1, 2]
def test_some_images_below_nb_keep_channels(self):
image_2c = np.full((1, 1, 2), 255, dtype=np.uint8)
image_3c = np.full((1, 1, 3), 255, dtype=np.uint8)
images = [image_2c if i % 2 == 0 else image_3c
for i in sm.xrange(100)]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=2)
images_aug = aug(images=images)
for i, image_aug in enumerate(images_aug):
assert np.sum(image_aug == 255) == 2
if i % 2 == 0:
assert np.sum(image_aug == 0) == 0
else:
assert np.sum(image_aug == 0) == 1
def test_all_images_below_nb_keep_channels(self):
image = np.full((1, 1, 2), 255, dtype=np.uint8)
images = np.array([image] * 100)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert nb_dropped == 0
def test_get_parameters(self):
aug = iaa.Dropout2d(p=0.7, nb_keep_channels=2)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert np.isclose(params[0].p.value, 0.3)
assert params[1] == 2
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.full(shape, 255, dtype=np.uint8)
aug = iaa.Dropout2d(1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if value == 0:
assert np.sum(image_aug == value) == 10
else:
assert np.sum(image_aug == value) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if _isclose(value, 0.0):
assert np.sum(_isclose(image_aug, value)) == 10
else:
assert (
np.sum(_isclose(image_aug, np.float128(value)))
== 3)
assert np.sum(image_aug == 0) == 7
def test_pickleable(self):
aug = iaa.Dropout2d(p=0.5, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3, shape=(1, 1, 50))
class TestTotalDropout(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p(self):
aug = iaa.TotalDropout(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.sum(images_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=1.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_heatmaps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=0.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075_multiple_images_list(self):
images = [np.full((1, 1, 1), 255, dtype=np.uint8)] * 3000
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum([np.sum(image_aug == 255) for image_aug in images_aug])
nb_dropped = len(images) - nb_kept
for image_aug in images_aug:
assert image_aug.shape == images[0].shape
assert image_aug.dtype.name == images[0].dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_p_is_075_multiple_images_array(self):
images = np.full((3000, 1, 1, 1), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = len(images) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_get_parameters(self):
aug = iaa.TotalDropout(p=0.0)
params = aug.get_parameters()
assert params[0] is aug.p
def test_unusual_channel_numbers(self):
shapes = [
(5, 1, 1, 4),
(5, 1, 1, 5),
(5, 1, 1, 512),
(5, 1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.zeros(shape, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert np.all(images_aug == 0)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == shape
def test_zero_sized_axes(self):
shapes = [
(5, 0, 0),
(5, 0, 1),
(5, 1, 0),
(5, 0, 1, 0),
(5, 1, 0, 0),
(5, 0, 1, 1),
(5, 1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.full(shape, 255, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == images.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 0
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0) or value == 0:
assert np.sum(images_aug == 0) == 5*3
else:
assert np.sum(images_aug == value) == 5*3
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0):
assert np.sum(_isclose(images_aug, 0.0)) == 5*3
else:
assert (
np.sum(_isclose(images_aug, np.float128(value)))
== 5*3)
def test_pickleable(self):
aug = iaa.TotalDropout(p=0.5, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=30, shape=(4, 4, 2))
class TestMultiply(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Multiply(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Multiply(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_per_channel(self):
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=True)
observed = aug.augment_image(np.ones((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 2 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Multiply(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Multiply(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(1)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Multiply(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Multiply(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1.2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(1.2 * int(center_value)))
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.Multiply(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.Multiply(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.Multiply((0.5, 1.5), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
class TestMultiplyElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0.5, 1.5))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.95 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
assert observed.shape == (100, 100, 3)
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.MultiplyElementwise(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.MultiplyElementwise(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), int(center_value), dtype=dtype)
# aug = iaa.MultiplyElementwise(1.2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == int(1.2 * int(center_value)))
# deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == min_value)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10.0, dtype=dtype)
# aug = iaa.MultiplyElementwise(2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.MultiplyElementwise((0.5, 1.5), per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestReplaceElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mask_is_always_zero(self):
# no replace, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
aug = iaa.ReplaceElementwise(mask=0, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mask_is_always_one(self):
# replace at 100 percent prob., should change everything
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_mask_is_stochastic_parameter(self):
# replace half
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
img = np.ones((100, 100, 1), dtype=np.uint8)
nb_iterations = 100
nb_diff_all = 0
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
nb_diff = np.sum(img != observed)
nb_diff_all += nb_diff
p = nb_diff_all / (nb_iterations * 100 * 100)
assert 0.45 <= p <= 0.55
def test_mask_is_list(self):
# mask is list
aug = iaa.ReplaceElementwise(mask=[0.2, 0.7], replacement=1)
img = np.zeros((20, 20, 1), dtype=np.uint8)
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_image(img)
p = np.mean(observed)
if 0.1 < p < 0.3:
seen[0] += 1
elif 0.6 < p < 0.8:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
aug_det = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_replacement_is_stochastic_parameter(self):
# different replacements
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Choice([100, 200]))
img = np.zeros((1000, 1000, 1), dtype=np.uint8)
img100 = img + 100
img200 = img + 200
observed = aug.augment_image(img)
nb_diff_100 = np.sum(img100 != observed)
nb_diff_200 = np.sum(img200 != observed)
p100 = nb_diff_100 / (1000 * 1000)
p200 = nb_diff_200 / (1000 * 1000)
assert 0.45 <= p100 <= 0.55
assert 0.45 <= p200 <= 0.55
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.ReplaceElementwise(mask=iap.Choice([0, 1]), replacement=1, per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.ReplaceElementwise(mask="test", replacement=1)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.ReplaceElementwise(mask=1, replacement=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ReplaceElementwise(1.0, 1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ReplaceElementwise(1.0, 1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.ReplaceElementwise(mask=0.5, replacement=2, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert isinstance(params[0].p, iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert 0.5 - 1e-6 < params[0].p.value < 0.5 + 1e-6
assert params[1].value == 2
assert params[2].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.5)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
image = np.full((3, 3), True, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), True, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.7)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.2)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=2)
image = np.full((3, 3), 1, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 2)
# deterministic stochastic parameters are by default int32 for
# any integer value and hence cannot cover the full uint32 value
# range
if dtype.name != "uint32":
aug = iaa.ReplaceElementwise(mask=1, replacement=max_value)
image = np.full((3, 3), min_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=min_value)
image = np.full((3, 3), max_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Uniform(1.0, 10.0))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 1
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.DiscreteUniform(1, 10))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 1
aug = iaa.ReplaceElementwise(mask=0.5, replacement=iap.DiscreteUniform(1, 10), per_channel=True)
image = np.full((1, 1, 100), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(0 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 2
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32, np.float64]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
atol = 1e-3*max_value if dtype == np.float16 else 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1.0)
image = np.full((3, 3), 0.0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, 1.0)
aug = iaa.ReplaceElementwise(mask=1, replacement=2.0)
image = np.full((3, 3), 1.0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, 2.0)
# deterministic stochastic parameters are by default float32 for
# any float value and hence cannot cover the full float64 value
# range
if dtype.name != "float64":
aug = iaa.ReplaceElementwise(mask=1, replacement=max_value)
image = np.full((3, 3), min_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=min_value)
image = | np.full((3, 3), max_value, dtype=dtype) | numpy.full |
#!/usr/bin/env python3
# numpy and scipy
import numpy as np
import scipy.fftpack
import scipy.misc
from scipy.special import erf
from scipy import signal
from scipy.ndimage.filters import gaussian_filter
# galsim and lmfit
import galsim
import lmfit
# astropy
# TODO: replace with afw equivalents
from astropy.convolution import Gaussian2DKernel
from astropy.convolution import Tophat2DKernel
# lsst
import lsst.afw.math
import lsst.afw.image
# auxiliary imports
import socket
import time
class ZernikeFitterPFS(object):
"""Create a model image for Prime Focus Spectrograph
"""
def __init__(self, image=np.ones((20, 20)), image_var=np.ones((20, 20)), image_mask=None,
pixelScale=20.76, wavelength=794,
diam_sic=139.5327e-3, npix=1536,
pupilExplicit=None, wf_full_Image=None,
dithering=None, save=None,
use_optPSF=None, use_wf_grid=None,
zmaxInit=None, extraZernike=None, simulation_00=None, verbosity=None,
double_sources=None, double_sources_positions_ratios=None,
explicit_psf_position=None, use_only_chi=False, use_center_of_flux=False,
PSF_DIRECTORY=None, *args):
"""
Parameters
----------
image: `np.array`, (N, N)
image that you wish to model
if you do not pass the image that you wish to compare,
the algorithm will default to creating 20x20 image that has
value of '1' everywhere
image_var: `np.array`, (N, N)
variance image
if you do not pass the variance image,
the algorithm will default to creating 20x20 image that has
value of '1' everywhere
image_mask: `np.array`, (N, N)
mask image
pixelScale: `float`
pixel scale in arcseconds
This is size of the pixel in arcsec for PFS red arm in focus
calculated with http://www.wilmslowastro.com/software/formulae.htm
pixel size in microns/focal length in mm x 206.3
pixel size = 15 microns, focal length = 149.2 mm
(138 aperature x 1.1 f number)
wavelength: `float`
wavelength of the psf [nm]
if you do not pass the value for wavelength it will default to 794 nm,
which is roughly in the middle of the red detector
diam_sic: `float`
size of the exit pupil [m]
Exit pupil size in focus, default is 139.5237e-3 meters
(taken from Zemax)
npix: `int`
size of 2d array contaning exit pupil illumination
pupilExplicit: `np.array`, (Np, Np)
if avaliable, uses this image for pupil instead of
creating it from supplied parameters
wf_full_Image: `np.array`, (Np, Np)
wavefront image
if avaliable, uses this image for wavefront instead of
creating it from supplied parameters
dithering: `int`
dithering scale (most likely 1 or 2)
save: `int`
if 1, save various intermediate results, for testing purposes
needs to set up also PSF_DIRECTORY
use_optPSF: `np.array`, (Np, Np)
if provided skip creation of optical psf, only do postprocessing
use_wf_grid: `np.array`, (Ny, Nx)
if provided, use this explicit wavefront map
zmaxInit: `int`
highest Zernike order (11 or 22)
extraZernike: `np.array`, (N)
if provided, simulated Zernike orders higher than 22
simulation_00: `np.array`, (2,)
places optical center at the center of the final image
verbosity: `int`
verbosity during evaluations
double_sources:
is there a second source present in the image
double_sources_positions_ratios: `np.arrray`, (2,)
initial guess for the position and strength of the second source
explicit_psf_position: `np.array`, (2,)
explicit position where to place optical psf
use_only_chi: `bool`
if True, fit to minimize np.abs(chi), and not chi**2
use_center_of_flux: `bool`
if True, fit to minimize the distance between the center of flux
for the model and the input image
PSF_DIRECTORY: `str`
where will intermediate outputs be saved for testing purposes
Notes
----------
Creates a model image that is fitted to the input sicence image
The model image is made by the convolution of
1. an OpticalPSF (constructed using FFT)
created with _getOptPsf_naturalResolution
The OpticalPSF part includes
1.1. description of pupil
created with get_Pupil
1.2. specification of an arbitrary number of
zernike wavefront aberrations,
which are input to galsim.phase_screens.OpticalScreen
2. an input fiber image and other convolutions such as
CCD charge diffusion created with _optPsf_postprocessing
This code uses lmfit to initalize the parameters.
Calls class Psf_position
Calls class PFSPupilFactory
Examples
----------
Simple exampe with initial parameters, changing only one parameter
>>> zmax = 22
>>> single_image_analysis = ZernikeFitterPFS(zmaxInit = zmax,
verbosity=1)
>>> single_image_analysis.initParams()
>>> single_image_analysis.params['detFrac'] =\
lmfit.Parameter(name='detFrac', value=0.70)
>>> resulting_image, psf_pos =\
single_image_analysis.constructModelImage_PFS_naturalResolution()
"""
self.image = image
self.image_var = image_var
if image_mask is None:
image_mask = np.zeros(image.shape)
self.image_mask = image_mask
self.wavelength = wavelength
self.pixelScale = pixelScale
self.diam_sic = diam_sic
self.npix = npix
if pupilExplicit is None:
pupilExplicit is False
self.pupilExplicit = pupilExplicit
# effective size of pixels, which can be differ from physical size of pixels due to dithering
if dithering is None:
dithering = 1
self.dithering = dithering
self.pixelScale_effective = self.pixelScale / dithering
if save in (None, 0):
save = None
else:
save = 1
assert PSF_DIRECTORY is not None
self.save = save
self.use_optPSF = use_optPSF
self.use_wf_grid = use_wf_grid
self.zmax = zmaxInit
self.simulation_00 = simulation_00
if self.simulation_00:
self.simulation_00 = 1
self.extraZernike = extraZernike
self.verbosity = verbosity
self.double_sources = double_sources
self.double_sources_positions_ratios = double_sources_positions_ratios
self.explicit_psf_position = explicit_psf_position
self.use_only_chi = use_only_chi
self.use_center_of_flux = use_center_of_flux
# flux = number of counts in the image
self.flux = float(np.sum(image))
try:
if not explicit_psf_position:
self.explicit_psf_position = None
except BaseException:
pass
self.PSF_DIRECTORY = PSF_DIRECTORY
if PSF_DIRECTORY is not None:
self.TESTING_FOLDER = PSF_DIRECTORY + 'Testing/'
self.TESTING_PUPIL_IMAGES_FOLDER = self.TESTING_FOLDER + 'Pupil_Images/'
self.TESTING_WAVEFRONT_IMAGES_FOLDER = self.TESTING_FOLDER + 'Wavefront_Images/'
self.TESTING_FINAL_IMAGES_FOLDER = self.TESTING_FOLDER + 'Final_Images/'
if self.verbosity == 1:
# check the versions of the most important libraries
print('np.__version__' + str(np.__version__))
print('scipy.__version__' + str(scipy.__version__))
def initParams(
self,
z4Init=None,
detFracInit=None,
strutFracInit=None,
focalPlanePositionInit=None,
slitFracInit=None,
slitFrac_dy_Init=None,
wide_0Init=None,
wide_23Init=None,
wide_43Init=None,
radiometricEffectInit=None,
radiometricExponentInit=None,
x_ilumInit=None,
y_ilumInit=None,
pixel_effectInit=None,
backgroundInit=None,
x_fiberInit=None,
y_fiberInit=None,
effective_ilum_radiusInit=None,
frd_sigmaInit=None,
frd_lorentz_factorInit=None,
misalignInit=None,
det_vertInit=None,
slitHolder_frac_dxInit=None,
grating_linesInit=None,
scattering_slopeInit=None,
scattering_amplitudeInit=None,
fiber_rInit=None,
fluxInit=None):
"""Initialize `lmfit.Parameters` object
Allows to set up all parameters describing the pupil and
Zernike parameter (up to z22) explicitly. If any value is not passed,
it will be substituted by a default value (specified below).
Parameters
----------
zmax: `int`
Total number of Zernike aberrations used (11 or 22)
Possible to add more with extra_zernike parameter
z4Init: `float`
Initial Z4 aberration value in waves (that is 2*np.pi*wavelengths)
# pupil parameters
detFracInit: `float`
Value determining how much of the exit pupil obscured by the
central obscuration(detector)
strutFracInit: `float`
Value determining how much of the exit pupil is obscured
by a single strut
focalPlanePositionInit: (`float`, `float`)
2-tuple for position of the central obscuration(detector)
in the focal plane
slitFracInit: `float`
Value determining how much of the exit pupil is obscured by slit
slitFrac_dy_Init: `float`
Value determining what is the vertical position of the slit
in the exit pupil
# parameters dsecribing individual struts
wide_0Init: `float`
Parameter describing widening of the strut at 0 degrees
wide_23Init: `float`
Parameter describing widening of the top-left strut
wide_34Init: `float`
Parameter describing widening of the bottom-left strut
#non-uniform illumination
radiometricEffectInit: `float`
parameter describing non-uniform illumination of the pupil
(1-params['radiometricEffect']**2*r**2)**\
(params['radiometricExponent']) [DEPRECATED]
radiometricExponentInit: `float`
parameter describing non-uniform illumination of the pupil
(1-params['radiometricEffect']**2*r**2)\
**(params['radiometricExponent'])
x_ilumInit: `float`
x-position of the center of illumination
of the exit pupil [DEPRECATED]
y_ilumInit: `float`
y-position of the center of illumination
of the exit pupil [DEPRECATED]
# illumination due to fiber, parameters
x_fiberInit: `float`
position of the fiber misaligment in the x direction
y_fiberInit: `float`
position of the fiber misaligment in the y direction
effective_ilum_radiusInit: `float`
fraction of the maximal radius of the illumination
of the exit pupil that is actually illuminated
frd_sigma: `float`
sigma of Gaussian convolving only outer edge, mimicking FRD
frd_lorentz_factor: `float`
strength of the lorentzian factor describing wings
of the pupil illumination
misalign: `float`
amount of misaligment in the illumination
# further pupil parameters
det_vert: `float
multiplicative factor determining vertical size
of the detector obscuration
slitHolder_frac_dx: `float`
dx position of slit holder
# convolving (postprocessing) parameters
grating_lines: `int`
number of effective lines in the grating
scattering_slopeInit: `float`
slope of scattering
scattering_amplitudeInit: `float`
amplitude of scattering compared to optical PSF
pixel_effectInit: `float`
sigma describing charge diffusion effect [in units of 15 microns]
fiber_rInit: `float`
radius of perfect tophat fiber, as seen on the detector
[in units of 15 microns]
fluxInit: `float`
total flux in generated image compared to input image
(needs to be 1 or very close to 1)
"""
if self.verbosity == 1:
print(' ')
print('Initializing ZernikeFitterPFS')
print('Verbosity parameter is: ' + str(self.verbosity))
print('Highest Zernike polynomial is (zmax): ' + str(self.zmax))
params = lmfit.Parameters()
# Zernike parameters
z_array = []
if z4Init is None:
params.add('z4', 0.0)
else:
params.add('z4', z4Init)
for i in range(5, self.zmax + 1):
params.add('z{}'.format(i), 0.0)
# pupil parameters
if detFracInit is None:
params.add('detFrac', 0.65)
else:
params.add('detFrac', detFracInit)
if strutFracInit is None:
params.add('strutFrac', 0.07)
else:
params.add('strutFrac', strutFracInit)
if focalPlanePositionInit is None:
params.add('dxFocal', 0.0)
params.add('dyFocal', 0.0)
else:
params.add('dxFocal', focalPlanePositionInit[0])
params.add('dyFocal', focalPlanePositionInit[1])
if slitFracInit is None:
params.add('slitFrac', 0.05)
else:
params.add('slitFrac', slitFracInit)
if slitFrac_dy_Init is None:
params.add('slitFrac_dy', 0)
else:
params.add('slitFrac_dy', slitFrac_dy_Init)
# parameters dsecribing individual struts
if wide_0Init is None:
params.add('wide_0', 0)
else:
params.add('wide_0', wide_0Init)
if wide_23Init is None:
params.add('wide_23', 0)
else:
params.add('wide_23', wide_23Init)
if wide_43Init is None:
params.add('wide_43', 0)
else:
params.add('wide_43', wide_43Init)
# non-uniform illumination
if radiometricExponentInit is None:
params.add('radiometricExponent', 0.25)
else:
params.add('radiometricExponent', radiometricExponentInit)
if radiometricEffectInit is None:
params.add('radiometricEffect', 0)
else:
params.add('radiometricEffect', radiometricEffectInit)
if x_ilumInit is None:
params.add('x_ilum', 1)
else:
params.add('x_ilum', x_ilumInit)
if y_ilumInit is None:
params.add('y_ilum', 1)
else:
params.add('y_ilum', y_ilumInit)
# illumination due to fiber, parameters
if x_ilumInit is None:
params.add('x_fiber', 1)
else:
params.add('x_fiber', x_fiberInit)
if y_fiberInit is None:
params.add('y_fiber', 0)
else:
params.add('y_fiber', y_fiberInit)
if effective_ilum_radiusInit is None:
params.add('effective_ilum_radius', 0.9)
else:
params.add('effective_ilum_radius', effective_ilum_radiusInit)
if frd_sigmaInit is None:
params.add('frd_sigma', 0.02)
else:
params.add('frd_sigma', frd_sigmaInit)
if frd_lorentz_factorInit is None:
params.add('frd_lorentz_factor', 0.5)
else:
params.add('frd_lorentz_factor', frd_lorentz_factorInit)
if misalignInit is None:
params.add('misalign', 0)
else:
params.add('misalign', misalignInit)
# further pupil parameters
if det_vertInit is None:
params.add('det_vert', 1)
else:
params.add('det_vert', det_vertInit)
if slitHolder_frac_dxInit is None:
params.add('slitHolder_frac_dx', 0)
else:
params.add('slitHolder_frac_dx', slitHolder_frac_dxInit)
# convolving (postprocessing) parameters
if grating_linesInit is None:
params.add('grating_lines', 100000)
else:
params.add('grating_lines', grating_linesInit)
if scattering_slopeInit is None:
params.add('scattering_slope', 2)
else:
params.add('scattering_slope', scattering_slopeInit)
if scattering_amplitudeInit is None:
params.add('scattering_amplitude', 10**-2)
else:
params.add('scattering_amplitude', scattering_amplitudeInit)
if pixel_effectInit is None:
params.add('pixel_effect', 0.35)
else:
params.add('pixel_effect', pixel_effectInit)
if fiber_rInit is None:
params.add('fiber_r', 1.8)
else:
params.add('fiber_r', fiber_rInit)
if fluxInit is None:
params.add('flux', 1)
else:
params.add('flux', fluxInit)
self.params = params
self.optPsf = None
self.z_array = z_array
def constructModelImage_PFS_naturalResolution(
self,
params=None,
shape=None,
pixelScale=None,
use_optPSF=None,
extraZernike=None,
return_intermediate_images=False):
"""Construct model image given the set of parameters
Parameters
----------
params : `lmfit.Parameters` object or python dictionary
Parameters describing model; None to use self.params
shape : `(int, int)`
Shape for model image; None to use the shape of self.maskedImage
pixelScale : `float`
Pixel scale in arcseconds to use for model image;
None to use self.pixelScale.
use_optPSF : `bool`
If True, use previously generated optical PSF,
skip _getOptPsf_naturalResolution, and conduct only postprocessing
extraZernike : `np.array`, (N,)
Zernike parameteres beyond z22
return_intermediate_images : `bool`
If True, return intermediate images created during the run
This is in order to help with debugging and inspect
the images created during the process
Return
----------
(if not return_intermediate_images)
optPsf_final : `np.array`, (N, N)
Final model image
psf_position : np.array, (2,)
Position where image is centered
(if return_intermediate_images)
optPsf_final : `np.array`, (N, N)
Final model image
ilum : `np.array`, (N, N)
Illumination array
wf_grid_rot : `np.array`, (N, N)
Wavefront array
psf_position : np.array, (2,)
Position where image is centered
Notes
----------
Calls _getOptPsf_naturalResolution and optPsf_postprocessing
"""
if self.verbosity == 1:
print(' ')
print('Entering constructModelImage_PFS_naturalResolution')
if params is None:
params = self.params
if shape is None:
shape = self.image.shape
if pixelScale is None:
pixelScale = self.pixelScale
try:
parameter_values = params.valuesdict()
except AttributeError:
parameter_values = params
use_optPSF = self.use_optPSF
if extraZernike is None:
pass
else:
extraZernike = list(extraZernike)
self.extraZernike = extraZernike
# if you did not pass pure optical psf image, create one here
if use_optPSF is None:
# change outputs depending on if you want intermediate results
if not return_intermediate_images:
optPsf = self._getOptPsf_naturalResolution(
parameter_values, return_intermediate_images=return_intermediate_images)
else:
optPsf, ilum, wf_grid_rot = self._getOptPsf_naturalResolution(
parameter_values, return_intermediate_images=return_intermediate_images)
else:
# if you claimed to supply optical psf image, but none is provided
# still create one
if self.optPsf is None:
if not return_intermediate_images:
optPsf = self._getOptPsf_naturalResolution(
parameter_values, return_intermediate_images=return_intermediate_images)
else:
optPsf, ilum, wf_grid_rot = self._getOptPsf_naturalResolution(
parameter_values, return_intermediate_images=return_intermediate_images)
self.optPsf = optPsf
else:
optPsf = self.optPsf
# at the moment, no difference in optPsf_postprocessing depending on return_intermediate_images
optPsf_final, psf_position = self._optPsf_postprocessing(
optPsf, return_intermediate_images=return_intermediate_images)
if self.save == 1:
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf', optPsf)
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_final',
optPsf_final)
else:
pass
if not return_intermediate_images:
return optPsf_final, psf_position
if return_intermediate_images:
return optPsf_final, ilum, wf_grid_rot, psf_position
if self.verbosity == 1:
print('Finished with constructModelImage_PFS_naturalResolution')
print(' ')
def _optPsf_postprocessing(self, optPsf, return_intermediate_images=False):
"""Apply postprocessing to the pure optical psf image
Parameters
----------
optPsf : `np.array`, (N, N)
Optical image, only psf
return_intermediate_images : `bool`
If True, return intermediate images created during the run
This is potentially in order to help with debugging and inspect
the images created during the process
Returns
----------
(At the moment, the output is the same no matter what
return_intermediate_images is, but there is a possibility
to add intermediate outputs)
optPsf_final : `np.array`, (N, N)
Final model image
psf_position : `np.array`, (2,)
Position where the image is centered
Notes
----------
Takes optical psf and ``postprocesses`` it to generate final image.
The algorithm first reduces the oversampling and cuts the central part
of the image. This is done to speed up the calculations.
Then we apply various effects that are separate from
the pure optical PSF considerations.
We then finish with the centering algorithm to move our created image
to fit the input science image, invoking PSFPosition class.
The effects we apply are
1. scattered light
function apply_scattered_light
2. convolution with fiber
3. CCD difusion
4. grating effects
5. centering
"""
time_start_single = time.time()
if self.verbosity == 1:
print(' ')
print('Entering optPsf_postprocessing')
params = self.params
shape = self.image.shape
# all of the parameters for the creation of the image
param_values = params.valuesdict()
# how much is my generated image oversampled compared to final image
oversampling_original = (self.pixelScale_effective) / self.scale_ModelImage_PFS_naturalResolution
if self.verbosity == 1:
print('optPsf.shape: ' + str(optPsf.shape))
print('oversampling_original: ' + str(oversampling_original))
# print('type(optPsf) '+str(type(optPsf[0][0])))
# determine the size of the central cut, so that from the huge generated
# image we can cut out only the central portion (1.4 times larger
# than the size of actual final image)
size_of_central_cut = int(oversampling_original * self.image.shape[0] * 1.4)
if size_of_central_cut > optPsf.shape[0]:
# if larger than size of image, cut the image
# fail if not enough space to cut out the image
size_of_central_cut = optPsf.shape[0]
if self.verbosity == 1:
print('size_of_central_cut modified to ' + str(size_of_central_cut))
assert int(oversampling_original * self.image.shape[0] * 1.0) < optPsf.shape[0]
assert size_of_central_cut <= optPsf.shape[0]
if self.verbosity == 1:
print('size_of_central_cut: ' + str(size_of_central_cut))
# cut part which you need to form the final image
# set oversampling to 1 so you are not resizing the image, and dx=0 and
# dy=0 so that you are not moving around, i.e., you are just cutting the
# central region
optPsf_cut = PsfPosition.cut_Centroid_of_natural_resolution_image(
image=optPsf, size_natural_resolution=size_of_central_cut + 1, oversampling=1, dx=0, dy=0)
if self.verbosity == 1:
print('optPsf_cut.shape' + str(optPsf_cut.shape))
# we want to reduce oversampling to be roughly around 10 to make things computationaly easier
# if oversamplign_original is smaller than 20 (in case of dithered images),
# make resolution coarser by factor of 2
# otherwise set it to 11
if oversampling_original < 20:
oversampling = np.round(oversampling_original / 2)
else:
oversampling = 11
if self.verbosity == 1:
print('oversampling:' + str(oversampling))
# what will be the size of the image after you resize it to the from
# ``oversampling_original'' to ``oversampling'' ratio
size_of_optPsf_cut_downsampled = np.int(
np.round(size_of_central_cut / (oversampling_original / oversampling)))
if self.verbosity == 1:
print('size_of_optPsf_cut_downsampled: ' + str(size_of_optPsf_cut_downsampled))
# make sure that optPsf_cut_downsampled is an array which has an odd size
# - increase size by 1 if needed
if (size_of_optPsf_cut_downsampled % 2) == 0:
im1 = galsim.Image(optPsf_cut, copy=True, scale=1)
interpolated_image = galsim._InterpolatedImage(im1, x_interpolant=galsim.Lanczos(5, True))
optPsf_cut_downsampled = interpolated_image.\
drawImage(nx=size_of_optPsf_cut_downsampled + 1, ny=size_of_optPsf_cut_downsampled + 1,
scale=(oversampling_original / oversampling), method='no_pixel').array
else:
im1 = galsim.Image(optPsf_cut, copy=True, scale=1)
interpolated_image = galsim._InterpolatedImage(im1, x_interpolant=galsim.Lanczos(5, True))
optPsf_cut_downsampled = interpolated_image.\
drawImage(nx=size_of_optPsf_cut_downsampled, ny=size_of_optPsf_cut_downsampled,
scale=(oversampling_original / oversampling), method='no_pixel').array
if self.verbosity == 1:
print('optPsf_cut_downsampled.shape: ' + str(optPsf_cut_downsampled.shape))
if self.verbosity == 1:
print('Postprocessing parameters are:')
print(str(['grating_lines', 'scattering_slope', 'scattering_amplitude',
'pixel_effect', 'fiber_r']))
print(str([param_values['grating_lines'], param_values['scattering_slope'],
param_values['scattering_amplitude'], param_values['pixel_effect'],
param_values['fiber_r']]))
##########################################
# 1. scattered light
optPsf_cut_downsampled_scattered = self.apply_scattered_light(optPsf_cut_downsampled,
oversampling,
param_values['scattering_slope'],
param_values['scattering_amplitude'],
dithering=self.dithering)
##########################################
# 2. convolution with fiber
optPsf_cut_fiber_convolved = self.convolve_with_fiber(optPsf_cut_downsampled_scattered,
oversampling,
param_values['fiber_r'],
dithering=self.dithering)
##########################################
# 3. CCD difusion
optPsf_cut_pixel_response_convolved = self.convolve_with_CCD_diffusion(optPsf_cut_fiber_convolved,
oversampling,
param_values['pixel_effect'],
dithering=self.dithering)
##########################################
# 4. grating effects
optPsf_cut_grating_convolved = self.convolve_with_grating(optPsf_cut_pixel_response_convolved,
oversampling,
self.wavelength,
param_values['grating_lines'],
dithering=self.dithering)
##########################################
# 5. centering
# This is the part which creates the final image
# the algorithm finds the best downsampling combination automatically
if self.verbosity == 1:
print('Are we invoking double sources (1 or True if yes): ' + str(self.double_sources))
print('Double source position/ratio is:' + str(self.double_sources_positions_ratios))
# initialize the class which does the centering -
# TODO: the separation between the class and the main function in the class,
# ``find_single_realization_min_cut'', is a bit blurry and unsatisfactory
# this needs to be improved
single_Psf_position = PsfPosition(optPsf_cut_grating_convolved,
int(round(oversampling)),
shape[0],
simulation_00=self.simulation_00,
verbosity=self.verbosity,
save=self.save,
PSF_DIRECTORY=self.PSF_DIRECTORY)
time_end_single = time.time()
if self.verbosity == 1:
print('Time for postprocessing up to single_Psf_position protocol is ' +
str(time_end_single - time_start_single))
# run the code for centering
time_start_single = time.time()
optPsf_final, psf_position =\
single_Psf_position.find_single_realization_min_cut(optPsf_cut_grating_convolved,
int(round(oversampling)),
shape[0],
self.image,
self.image_var,
self.image_mask,
v_flux=param_values['flux'],
double_sources=self.double_sources,
double_sources_positions_ratios= #noqa: E251
self.double_sources_positions_ratios,
verbosity=self.verbosity,
explicit_psf_position= #noqa: E251
self.explicit_psf_position,
use_only_chi=self.use_only_chi,
use_center_of_flux=self.use_center_of_flux)
time_end_single = time.time()
if self.verbosity == 1:
print('Time for single_Psf_position protocol is ' + str(time_end_single - time_start_single))
if self.verbosity == 1:
print('Sucesfully created optPsf_final')
if self.save == 1:
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut', optPsf_cut)
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut_downsampled', optPsf_cut_downsampled)
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut_downsampled_scattered',
optPsf_cut_downsampled_scattered)
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut_fiber_convolved',
optPsf_cut_fiber_convolved)
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut_pixel_response_convolved',
optPsf_cut_pixel_response_convolved)
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut_grating_convolved',
optPsf_cut_grating_convolved)
if self.verbosity == 1:
print('Finished with optPsf_postprocessing')
print(' ')
# TODO: at the moment, the output is the same but there is a possibility to add intermediate outputs
if not return_intermediate_images:
return optPsf_final, psf_position
if return_intermediate_images:
return optPsf_final, psf_position
def apply_scattered_light(self, image, oversampling,
scattering_slope, scattering_amplitude, dithering):
"""Add scattered light to optical psf
Parameters
----------
image : `np.array`, (N, N)
input image
oversampling: `int`
how oversampled is `image`
scattering_slope: `float`
slope of the scattered light
scattering_amplitude: `float`
amplitude of the scattered light
dithering: `int`
dithering
Returns
----------
image_scattered : `np.array`, (N, N)
image convolved with the fiber image
Notes
----------
Assumes that one physical pixel is 15 microns
so that effective size is 15 / dithering
"""
size_of_pixels_in_image = (15 / self.dithering) / oversampling
# size of the created optical PSF images in microns
size_of_image_in_Microns = size_of_pixels_in_image * \
(image.shape[0])
if self.verbosity == 1:
print('image: ' + str(image))
##########################################
# 1. scattered light
# create grid to apply scattered light
pointsx = np.linspace(-(size_of_image_in_Microns - size_of_pixels_in_image) / 2,
(size_of_image_in_Microns - size_of_pixels_in_image) / 2,
num=image.shape[0],
dtype=np.float32)
pointsy = np.linspace(-(size_of_image_in_Microns - size_of_pixels_in_image) / 2,
(size_of_image_in_Microns - size_of_pixels_in_image) / 2,
num=image.shape[0]).astype(np.float32)
xs, ys = np.meshgrid(pointsx, pointsy)
r0 = np.sqrt((xs - 0) ** 2 + (ys - 0) ** 2) + .01
# creating scattered light
scattered_light_kernel = (r0**(-scattering_slope))
scattered_light_kernel[r0 < 7.5] = 7.5**(-scattering_slope)
scattered_light_kernel[scattered_light_kernel == np.inf] = 0
scattered_light_kernel = scattered_light_kernel * \
(scattering_amplitude) / (10 * np.max(scattered_light_kernel))
# convolve the psf with the scattered light kernel to create scattered light component
scattered_light = signal.fftconvolve(image, scattered_light_kernel, mode='same')
# add back the scattering to the image
image_scattered = image + scattered_light
return image_scattered
def convolve_with_fiber(self, image, oversampling, fiber_r, dithering):
"""Convolve optical psf with a fiber
Parameters
----------
image : `np.array`, (N, N)
input image
oversampling: `int`
how oversampled is `image`
fiber_r: `float`
radius of the fiber in pixel units
dithering: `int`
dithering
Returns
----------
image_fiber_convolved : `np.array`, (N, N)
image convolved with the fiber image
Notes
----------
"""
fiber = Tophat2DKernel(oversampling * fiber_r * dithering,
mode='oversample').array
# create array with zeros with size of the current image, which we will
# fill with fiber array in the middle
fiber_padded = np.zeros_like(image, dtype=np.float32)
mid_point_of_image = int(image.shape[0] / 2)
fiber_array_size = fiber.shape[0]
# fill the zeroes image with fiber here
fiber_padded[int(mid_point_of_image - fiber_array_size / 2) + 1:
int(mid_point_of_image + fiber_array_size / 2) + 1,
int(mid_point_of_image - fiber_array_size / 2) + 1:
int(mid_point_of_image + fiber_array_size / 2) + 1] = fiber
# convolve with the fiber
image_fiber_convolved = signal.fftconvolve(image, fiber_padded, mode='same')
return image_fiber_convolved
def convolve_with_CCD_diffusion(self, image, oversampling, pixel_effect, dithering):
"""Convolve optical psf with a ccd diffusion effect
Parameters
----------
image : `np.array`, (N, N)
input image
oversampling: `int`
how oversampled is `image`
pixel_effect: `float`
sigma of gaussian kernel convolving image
dithering: `int`
dithering
Returns
----------
image_pixel_response_convolved : `np.array`, (N, N)
image convolved with the ccd diffusion kernel
Notes
----------
Pixels are not perfect detectors
Charge diffusion in our optical CCDs, can be well described with a Gaussian
sigma that is around 7 microns (<NAME> - private communication).
This is controled in our code by @param 'pixel_effect'
"""
pixel_gauss = Gaussian2DKernel(oversampling * pixel_effect * dithering).array.astype(np.float32)
pixel_gauss_padded = np.pad(pixel_gauss, int((len(image) - len(pixel_gauss)) / 2),
'constant', constant_values=0)
# assert that gauss_padded array did not produce empty array
assert np.sum(pixel_gauss_padded) > 0
image_pixel_response_convolved = signal.fftconvolve(image, pixel_gauss_padded, mode='same')
return image_pixel_response_convolved
def convolve_with_grating(self, image, oversampling, wavelength, grating_lines, dithering):
"""Convolve optical psf with a grating effect
Parameters
----------
image : `np.array`, (N, N)
input image
oversampling: `int`
how oversampled is `image`
wavelength: `float`
central wavelength of the spot
grating_lines: `int`
effective number of grating lines in the spectrograph
dithering: `int`
dithering
Returns
----------
image_grating_convolved : `np.array`, (N, N)
image convolved with the grating effect
Notes
----------
This code assumes that 15 microns covers wavelength range of 0.07907 nm
(assuming that 4300 pixels in real detector uniformly covers 340 nm)
"""
grating_kernel = np.ones((image.shape[0], 1), dtype=np.float32)
for i in range(len(grating_kernel)):
grating_kernel[i] = Ifun16Ne((i - int(image.shape[0] / 2)) * 0.07907 * 10**-9 /
(dithering * oversampling) + wavelength * 10**-9,
wavelength * 10**-9, grating_lines)
grating_kernel = grating_kernel / np.sum(grating_kernel)
image_grating_convolved = signal.fftconvolve(image, grating_kernel, mode='same')
return image_grating_convolved
def _get_Pupil(self):
"""Create an image of the pupil
Parameters
----------
params : `lmfit.Parameters` object or python dictionary
Parameters describing the pupil model
Returns
----------
pupil : `pupil`
Instance of class PFSPupilFactory
Notes
----------
Calls PFSPupilFactory class
"""
if self.verbosity == 1:
print(' ')
print('Entering _get_Pupil (function inside ZernikeFitterPFS)')
if self.verbosity == 1:
print('Size of the pupil (npix): ' + str(self.npix))
Pupil_Image = PFSPupilFactory(
pupilSize=self.diam_sic,
npix=self.npix,
input_angle=np.pi / 2,
detFrac=self.params['detFrac'].value,
strutFrac=self.params['strutFrac'].value,
slitFrac=self.params['slitFrac'].value,
slitFrac_dy=self.params['slitFrac_dy'].value,
x_fiber=self.params['x_fiber'].value,
y_fiber=self.params['y_fiber'].value,
effective_ilum_radius=self.params['effective_ilum_radius'].value,
frd_sigma=self.params['frd_sigma'].value,#noqa: E
frd_lorentz_factor=self.params['frd_lorentz_factor'].value,
det_vert=self.params['det_vert'].value,
slitHolder_frac_dx=self.params['slitHolder_frac_dx'].value,
wide_0=self.params['wide_0'].value,
wide_23=self.params['wide_23'].value,
wide_43=self.params['wide_43'].value,
misalign=self.params['misalign'].value,
verbosity=self.verbosity)
point = [self.params['dxFocal'].value, self.params['dyFocal'].value]#noqa: E
pupil = Pupil_Image.getPupil(point)
if self.save == 1:
np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'pupil.illuminated',
pupil.illuminated.astype(np.float32))
if self.verbosity == 1:
print('Finished with _get_Pupil')
return pupil
def _getOptPsf_naturalResolution(self, params, return_intermediate_images=False):
"""Returns optical PSF, given the initialized parameters
called by constructModelImage_PFS_naturalResolution
Parameters
----------
params : `lmfit.Parameters` object or python dictionary
Parameters describing the model
return_intermediate_images : `bool`
If True, return intermediate images created during the run
This is in order to help with debugging and inspect
the images created during the process
Returns
----------
(if not return_intermediate_images)
img_apod : `np.array`, (N, N)
Psf image, only optical components considered
(if return_intermediate_images)
# return the image, pupil, illumination applied to the pupil
img_apod : `np.array`, (N, N)
Psf image, only optical components considred
ilum : `np.array`, (N, N)
Image showing the illumination of the pupil
wf_grid_rot : `np.array`, (N, N)
Image showing the wavefront across the pupil
Notes
----------
called by constructModelImage_PFS_naturalResolution
"""
if self.verbosity == 1:
print(' ')
print('Entering _getOptPsf_naturalResolution')
################################################################################
# pupil and illumination of the pupil
################################################################################
time_start_single_1 = time.time()
diam_sic = self.diam_sic
if self.verbosity == 1:
print(['detFrac', 'strutFrac', 'dxFocal', 'dyFocal', 'slitFrac', 'slitFrac_dy'])
print(['x_fiber', 'y_fiber', 'effective_ilum_radius', 'frd_sigma',
'frd_lorentz_factor', 'det_vert', 'slitHolder_frac_dx'])
print(['wide_0', 'wide_23', 'wide_43', 'misalign'])
# print('set of pupil_parameters I. : ' + str(self.pupil_parameters[:6]))
# print('set of pupil_parameters II. : ' + str(self.pupil_parameters[6:6 + 7]))
# print('set of pupil_parameters III. : ' + str(self.pupil_parameters[13:]))
time_start_single_2 = time.time()
# initialize galsim.Aperature class
# the output will be the size of pupil.illuminated
pupil = self._get_Pupil()
aper = galsim.Aperture(
diam=pupil.size,
pupil_plane_im=pupil.illuminated.astype(np.float32),
pupil_plane_scale=pupil.scale,
pupil_plane_size=None)
if self.verbosity == 1:
if self.pupilExplicit is None:
print('Requested pupil size is (pupil.size) [m]: ' + str(pupil.size))
print('One pixel has size of (pupil.scale) [m]: ' + str(pupil.scale))
print('Requested pupil has so many pixels (pupil_plane_im): ' +
str(pupil.illuminated.astype(np.int16).shape))
else:
print('Supplied pupil size is (diam_sic) [m]: ' + str(self.diam_sic))
print('One pixel has size of (diam_sic/npix) [m]: ' + str(self.diam_sic / self.npix))
print('Requested pupil has so many pixels (pupilExplicit): ' + str(self.pupilExplicit.shape))
time_end_single_2 = time.time()
if self.verbosity == 1:
print('Time for _get_Pupil function is ' + str(time_end_single_2 - time_start_single_2))
time_start_single_3 = time.time()
# create array with pixels=1 if the area is illuminated and 0 if it is obscured
ilum = np.array(aper.illuminated, dtype=np.float32)
assert np.sum(ilum) > 0, str(self.pupil_parameters)
# gives size of the illuminated image
lower_limit_of_ilum = int(ilum.shape[0] / 2 - self.npix / 2)
higher_limit_of_ilum = int(ilum.shape[0] / 2 + self.npix / 2)
if self.verbosity == 1:
print('lower_limit_of_ilum: ' + str(lower_limit_of_ilum))
print('higher_limit_of_ilum: ' + str(higher_limit_of_ilum))
if self.pupilExplicit is None:
ilum[lower_limit_of_ilum:higher_limit_of_ilum,
lower_limit_of_ilum:higher_limit_of_ilum] = ilum[lower_limit_of_ilum:higher_limit_of_ilum,
lower_limit_of_ilum:higher_limit_of_ilum] *\
pupil.illuminated
else:
ilum[lower_limit_of_ilum:higher_limit_of_ilum,
lower_limit_of_ilum:higher_limit_of_ilum] = ilum[lower_limit_of_ilum:higher_limit_of_ilum,
lower_limit_of_ilum:higher_limit_of_ilum] *\
self.pupilExplicit.astype(np.float32)
if self.verbosity == 1:
print('Size after padding zeros to 2x size and extra padding to get size suitable for FFT: ' +
str(ilum.shape))
# maximum extent of pupil image in units of radius of the pupil, needed for next step
size_of_ilum_in_units_of_radius = ilum.shape[0] / self.npix
if self.verbosity == 1:
print('size_of_ilum_in_units_of_radius: ' + str(size_of_ilum_in_units_of_radius))
# do not caculate the ``radiometric effect (difference between entrance and exit pupil)
# if paramters are too small to make any difference
# if that is the case just declare the ``ilum_radiometric'' to be the same as ilum
# i.e., the illumination of the exit pupil is the same as the illumination of the entrance pupil
if params['radiometricExponent'] < 0.01 or params['radiometricEffect'] < 0.01:
if self.verbosity == 1:
print('skiping ``radiometric effect\'\' ')
ilum_radiometric = ilum
else:
if self.verbosity == 1:
print('radiometric parameters are: ')
print('x_ilum,y_ilum,radiometricEffect,radiometricExponent' +
str([params['x_ilum'], params['y_ilum'],
params['radiometricEffect'], params['radiometricExponent']]))
# add the change of flux between the entrance and exit pupil
# end product is radiometricEffectArray
points = np.linspace(-size_of_ilum_in_units_of_radius,
size_of_ilum_in_units_of_radius, num=ilum.shape[0])
xs, ys = np.meshgrid(points, points)
_radius_coordinate = np.sqrt(
(xs - params['x_ilum'] * params['dxFocal']) ** 2 +
(ys - params['y_ilum'] * params['dyFocal']) ** 2)
# ilumination to which radiometric effet has been applied, describing
# difference betwen entrance and exit pupil
radiometricEffectArray = (1 + params['radiometricEffect'] *
_radius_coordinate**2)**(-params['radiometricExponent'])
ilum_radiometric = np.nan_to_num(radiometricEffectArray * ilum, 0)
# this is where you can introduce some apodization in the pupil image by using the line below
# for larger images, scale according to the size of the input image which is to be FFT-ed
# 0.75 is an arbitrary number
apodization_sigma = ((len(ilum_radiometric)) / 1158)**0.875 * 0.75
time_start_single_4 = time.time()
# cut out central region, apply Gaussian on the center region and return to the full size image
# thus is done to spped up the calculation
# noqa: E128 in order to keep informative names
ilum_radiometric_center_region = ilum_radiometric[(lower_limit_of_ilum -
int(np.ceil(3 * apodization_sigma))):(higher_limit_of_ilum + # noqa: E128
int(np.ceil(3 * apodization_sigma))),
(lower_limit_of_ilum - int(np.ceil(3 * apodization_sigma))):
(higher_limit_of_ilum + int(np.ceil(3 * apodization_sigma)))]
ilum_radiometric_center_region_apodized = gaussian_filter(
ilum_radiometric_center_region, sigma=apodization_sigma)
ilum_radiometric_apodized = np.copy(ilum_radiometric)
ilum_radiometric_apodized[(lower_limit_of_ilum -
int(np.ceil(3 * apodization_sigma))):(higher_limit_of_ilum +
int(np.ceil(3 * apodization_sigma))), (lower_limit_of_ilum - # noqa: E128
int(np.ceil(3 * apodization_sigma))):(higher_limit_of_ilum +
int(np.ceil(3 * apodization_sigma)))] =\
ilum_radiometric_center_region_apodized
time_end_single_4 = time.time()
if self.verbosity == 1:
print('Time to apodize the pupil: ' + str(time_end_single_4 - time_start_single_4))
print('type(ilum_radiometric_apodized)' + str(type(ilum_radiometric_apodized[0][0])))
# set pixels for which amplitude is less than 0.01 to 0
r_ilum_pre = np.copy(ilum_radiometric_apodized)
r_ilum_pre[ilum_radiometric_apodized > 0.01] = 1
r_ilum_pre[ilum_radiometric_apodized < 0.01] = 0
ilum_radiometric_apodized_bool = r_ilum_pre.astype(bool)
# manual creation of aper.u and aper.v (mimicking steps which were automatically done in galsim)
# this gives position information about each point in the exit pupil so we can apply wavefront to it
single_line_aperu_manual = np.linspace(-diam_sic * (size_of_ilum_in_units_of_radius / 2), diam_sic * (
size_of_ilum_in_units_of_radius / 2), len(ilum_radiometric_apodized_bool), endpoint=True)
aperu_manual = np.tile(
single_line_aperu_manual,
len(single_line_aperu_manual)).reshape(
len(single_line_aperu_manual),
len(single_line_aperu_manual))
# full grid
u_manual = aperu_manual
v_manual = np.transpose(aperu_manual)
# select only parts of the grid that are actually illuminated
u = u_manual[ilum_radiometric_apodized_bool]
v = v_manual[ilum_radiometric_apodized_bool]
time_end_single_3 = time.time()
if self.verbosity == 1:
print('Time for postprocessing pupil after _get_Pupil ' +
str(time_end_single_3 - time_start_single_3))
time_end_single_1 = time.time()
if self.verbosity == 1:
print('Time for pupil and illumination calculation is ' +
str(time_end_single_1 - time_start_single_1))
################################################################################
# wavefront
################################################################################
# create wavefront across the exit pupil
time_start_single = time.time()
if self.verbosity == 1:
print('')
print('Starting creation of wavefront')
aberrations_init = [0.0, 0, 0.0, 0.0]
aberrations = aberrations_init
# list of aberrations where we set z4, z11, z22 etc...
# This is only for testing purposes to study behaviour of non-focus terms
aberrations_0 = list(np.copy(aberrations_init))
for i in range(4, self.zmax + 1):
aberrations.append(params['z{}'.format(i)])
if i in [4, 11, 22]:
aberrations_0.append(0)
else:
aberrations_0.append(params['z{}'.format(i)])
# if you have passed abberation above Zernike 22, join them with lower
# order abberations here
if self.extraZernike is None:
pass
else:
aberrations_extended = np.concatenate((aberrations, self.extraZernike), axis=0)
if self.verbosity == 1:
print('diam_sic [m]: ' + str(diam_sic))
print('aberrations: ' + str(aberrations))
print('aberrations moved to z4=0: ' + str(aberrations_0))
print('aberrations extra: ' + str(self.extraZernike))
print('wavelength [nm]: ' + str(self.wavelength))
if self.extraZernike is None:
optics_screen = galsim.phase_screens.OpticalScreen(
diam=diam_sic, aberrations=aberrations, lam_0=self.wavelength)
if self.save == 1:
# only create fake images with abberations set to 0 if we are going to save
# i.e., if we testing the results
optics_screen_fake_0 = galsim.phase_screens.OpticalScreen(
diam=diam_sic, aberrations=aberrations_0, lam_0=self.wavelength)
else:
optics_screen = galsim.phase_screens.OpticalScreen(
diam=diam_sic, aberrations=aberrations_extended, lam_0=self.wavelength)
if self.save == 1:
# only create fake images with abberations set to 0 if we are going to save
# i.e., if we are testing the results
optics_screen_fake_0 = galsim.phase_screens.OpticalScreen(
diam=diam_sic, aberrations=aberrations_0, lam_0=self.wavelength)
screens = galsim.PhaseScreenList(optics_screen)
if self.save == 1:
# only create fake images with abberations set to 0 if we are going to save
# i.e., if we are testing the results
screens_fake_0 = galsim.PhaseScreenList(optics_screen_fake_0)
time_end_single = time.time()
################################################################################
# combining the pupil illumination and the wavefront
################################################################################
# apply wavefront to the array describing illumination
if self.use_wf_grid is None:
wf = screens.wavefront(u, v, None, 0)
if self.save == 1:
wf_full = screens.wavefront(u_manual, v_manual, None, 0)
wf_grid = np.zeros_like(ilum_radiometric_apodized_bool, dtype=np.float32)
wf_grid[ilum_radiometric_apodized_bool] = (wf / self.wavelength)
wf_grid_rot = wf_grid
else:
# if you want to pass an explit wavefront, it is applied here
wf_grid = self.use_wf_grid
wf_grid_rot = wf_grid
if self.save == 1:
# only create fake images with abberations set to 0 if we are going to save
# i.e., if we are testing the results
if self.verbosity == 1:
print('creating wf_full_fake_0')
wf_full_fake_0 = screens_fake_0.wavefront(u_manual, v_manual, None, 0)
# exponential of the wavefront
expwf_grid = np.zeros_like(ilum_radiometric_apodized_bool, dtype=np.complex64)
expwf_grid[ilum_radiometric_apodized_bool] =\
ilum_radiometric_apodized[ilum_radiometric_apodized_bool] *\
np.exp(2j * np.pi * wf_grid_rot[ilum_radiometric_apodized_bool])
if self.verbosity == 1:
print('Time for wavefront and wavefront/pupil combining is ' +
str(time_end_single - time_start_single))
################################################################################
# execute the FFT
################################################################################
time_start_single = time.time()
ftexpwf = np.fft.fftshift(scipy.fftpack.fft2(np.fft.fftshift(expwf_grid)))
img_apod = np.abs(ftexpwf)**2
time_end_single = time.time()
if self.verbosity == 1:
print('Time for FFT is ' + str(time_end_single - time_start_single))
######################################################################
# size in arcseconds of the image generated by the code
scale_ModelImage_PFS_naturalResolution = sky_scale(
size_of_ilum_in_units_of_radius * self.diam_sic, self.wavelength)
self.scale_ModelImage_PFS_naturalResolution = scale_ModelImage_PFS_naturalResolution
if self.save == 1:
np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'aperilluminated', aper.illuminated)
np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'ilum', ilum)
np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'ilum_radiometric', ilum_radiometric)
np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'ilum_radiometric_apodized', ilum_radiometric_apodized)
np.save(self.TESTING_PUPIL_IMAGES_FOLDER +
'ilum_radiometric_apodized_bool', ilum_radiometric_apodized_bool)
np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'u_manual', u_manual)
np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'v_manual', v_manual)
np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'u', u)
np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'v', v)
np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'wf_grid', wf_grid)
if self.use_wf_grid is None:
np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'wf_full', wf_full)
np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'wf_full_fake_0', wf_full_fake_0)
np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'expwf_grid', expwf_grid)
if self.verbosity == 1:
print('Finished with _getOptPsf_naturalResolution')
print(' ')
if not return_intermediate_images:
return img_apod
if return_intermediate_images:
return img_apod, ilum[lower_limit_of_ilum:higher_limit_of_ilum,
lower_limit_of_ilum:higher_limit_of_ilum], wf_grid_rot
class PupilFactory(object):
"""!Pupil obscuration function factory for use with Fourier optics.
Based on the code by <NAME>, developed for HSC camera
Contains functions that can create various obscurations in the camera
"""
def __init__(
self,
pupilSize,
npix,
input_angle,
detFrac,
strutFrac,
slitFrac,
slitFrac_dy,
x_fiber,
y_fiber,
effective_ilum_radius,
frd_sigma,
frd_lorentz_factor,
det_vert,
wide_0,
wide_23,
wide_43,
misalign,
verbosity=0):
"""Construct a PupilFactory.
Parameters
----------
pupilSize: `float`
Size of the exit pupil [m]
npix: `int`
Constructed Pupils will be npix x npix
input_angle: `float`
Angle of the pupil (for all practical purposes fixed an np.pi/2)
detFrac: `float`
Value determining how much of the exit pupil obscured by the
central obscuration(detector)
strutFrac: `float`
Value determining how much of the exit pupil is obscured
by a single strut
slitFrac: `float`
Value determining how much of the exit pupil is obscured by slit
slitFrac_dy: `float`
Value determining what is the vertical position of the slit
in the exit pupil
x_fiber: `float`
Position of the fiber misaligment in the x direction
y_fiber: `float`
Position of the fiber misaligment in the y direction
effective_ilum_radius: `float`
Fraction of the maximal radius of the illumination
of the exit pupil that is actually illuminated
frd_sigma: `float`
Sigma of Gaussian convolving only outer edge, mimicking FRD
frd_lorentz_factor: `float`
Strength of the lorentzian factor describing wings
det_vert: `float`
Multiplicative factor determining vertical size
of the detector obscuration
wide_0: `float`
Widening of the strut at 0 degrees
wide_23: `float`
Widening of the strut at the top-left corner
wide_43: `float`
Widening of the strut at the bottom-left corner
misalign: `float`
Describing the amount of misaligment
verbosity: `int`
How verbose during evaluation (1 = full verbosity)
"""
self.verbosity = verbosity
if self.verbosity == 1:
print('Entering PupilFactory class')
self.pupilSize = pupilSize
self.npix = npix
self.input_angle = input_angle
self.detFrac = detFrac
self.strutFrac = strutFrac
self.pupilScale = pupilSize / npix
self.slitFrac = slitFrac
self.slitFrac_dy = slitFrac_dy
self.effective_ilum_radius = effective_ilum_radius
self.frd_sigma = frd_sigma
self.frd_lorentz_factor = frd_lorentz_factor
self.det_vert = det_vert
self.wide_0 = wide_0
self.wide_23 = wide_23
self.wide_43 = wide_43
self.misalign = misalign
u = (np.arange(npix, dtype=np.float32) - (npix - 1) / 2) * self.pupilScale
self.u, self.v = np.meshgrid(u, u)
@staticmethod
def _pointLineDistance(p0, p1, p2):
"""Compute the right-angle distance between the points given by `p0`
and the line that passes through `p1` and `p2`.
@param[in] p0 2-tuple of numpy arrays (x,y coords)
@param[in] p1 2-tuple of scalars (x,y coords)
@param[in] p2 2-tuple of scalars (x,y coords)
@returns numpy array of distances; shape congruent to p0[0]
"""
x0, y0 = p0
x1, y1 = p1
x2, y2 = p2
dy21 = y2 - y1
dx21 = x2 - x1
return np.abs(dy21 * x0 - dx21 * y0 + x2 * y1 - y2 * x1) / np.hypot(dy21, dx21)
def _fullPupil(self):
"""Make a fully-illuminated Pupil.
@returns Pupil
"""
illuminated = np.ones(self.u.shape, dtype=np.float32)
return Pupil(illuminated, self.pupilSize, self.pupilScale)
def _cutCircleInterior(self, pupil, p0, r):
"""Cut out the interior of a circular region from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating region center
@param[in] r Circular region radius
"""
r2 = (self.u - p0[0])**2 + (self.v - p0[1])**2
pupil.illuminated[r2 < r**2] = False
def _cutCircleExterior(self, pupil, p0, r):
"""Cut out the exterior of a circular region from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating region center
@param[in] r Circular region radius
"""
r2 = (self.u - p0[0])**2 + (self.v - p0[1])**2
pupil.illuminated[r2 > r**2] = False
def _cutEllipseExterior(self, pupil, p0, r, b, thetarot):
"""Cut out the exterior of a circular region from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating region center
@param[in] r Ellipse region radius = major axis
@param[in] b Ellipse region radius = minor axis
@param[in] thetarot Ellipse region rotation
"""
r2 = (self.u - p0[0])**2 + (self.v - p0[1])**2
theta = np.arctan(self.u / self.v) + thetarot
pupil.illuminated[r2 > r**2 * b**2 / (b**2 * (np.cos(theta))**2 + r**2 * (np.sin(theta))**2)] = False
def _cutSquare(self, pupil, p0, r, angle, det_vert):
"""Cut out the interior of a circular region from a Pupil.
It is not necesarilly square, because of the det_vert parameter
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating region center
@param[in] r half lenght of the length of square side
@param[in] angle angle that the camera is rotated
@param[in] det_vert multiplicative factor that distorts the square into a rectangle
"""
pupil_illuminated_only1 = np.ones_like(pupil.illuminated, dtype=np.float32)
time_start_single_square = time.time()
###########################################################
# Central square
if det_vert is None:
det_vert = 1
x21 = -r / 2 * det_vert * 1
x22 = +r / 2 * det_vert * 1
y21 = -r / 2 * 1
y22 = +r / 2 * 1
i_max = self.npix / 2 - 0.5
i_min = -i_max
i_y_max = int(np.round((x22 + p0[1]) / self.pupilScale - (i_min)))
i_y_min = int(np.round((x21 + p0[1]) / self.pupilScale - (i_min)))
i_x_max = int(np.round((y22 + p0[0]) / self.pupilScale - (i_min)))
i_x_min = int(np.round((y21 + p0[0]) / self.pupilScale - (i_min)))
assert angle == np.pi / 2
camX_value_for_f_multiplier = p0[0]
camY_value_for_f_multiplier = p0[1]
camY_Max = 0.02
f_multiplier_factor = (-camX_value_for_f_multiplier * 100 / 3) * \
(np.abs(camY_value_for_f_multiplier) / camY_Max) + 1
if self.verbosity == 1:
print('f_multiplier_factor for size of detector triangle is: ' + str(f_multiplier_factor))
pupil_illuminated_only0_in_only1 = np.zeros((i_y_max - i_y_min, i_x_max - i_x_min))
u0 = self.u[i_y_min:i_y_max, i_x_min:i_x_max]
v0 = self.v[i_y_min:i_y_max, i_x_min:i_x_max]
# factors that are controling how big is the triangle in the corner of the detector
f = 0.2
f_multiplier = f_multiplier_factor / 1
###########################################################
# Lower right corner
x21 = -r / 2
x22 = +r / 2
y21 = -r / 2 * det_vert
y22 = +r / 2 * det_vert
f_lr = np.copy(f) * (1 / f_multiplier)
angleRad21 = -np.pi / 4
triangle21 = [[p0[0] + x22, p0[1] + y21], [p0[0] + x22, p0[1] +
y21 - y21 * f_lr], [p0[0] + x22 - x22 * f_lr, p0[1] + y21]]
p21 = triangle21[0]
y22 = (triangle21[1][1] - triangle21[0][1]) / np.sqrt(2)
y21 = 0
x21 = (triangle21[2][0] - triangle21[0][0]) / np.sqrt(2)
x22 = -(triangle21[2][0] - triangle21[0][0]) / np.sqrt(2)
pupil_illuminated_only0_in_only1[((v0 - p21[1]) * np.cos(-angleRad21) -
(u0 - p21[0]) * np.sin(-angleRad21) < y22)] = True
###########################################################
# Upper left corner
x21 = -r / 2 * 1
x22 = +r / 2 * 1
y21 = -r / 2 * det_vert
y22 = +r / 2 * det_vert
f_ul = np.copy(f) * (1 / f_multiplier)
triangle12 = [[p0[0] + x21, p0[1] + y22], [p0[0] + x21, p0[1] +
y22 - y22 * f_ul], [p0[0] + x21 - x21 * f_ul, p0[1] + y22]]
p21 = triangle12[0]
y22 = 0
y21 = (triangle12[1][1] - triangle12[0][1]) / np.sqrt(2)
x21 = -(triangle12[2][0] - triangle12[0][0]) / np.sqrt(2)
x22 = +(triangle12[2][0] - triangle12[0][0]) / np.sqrt(2)
pupil_illuminated_only0_in_only1[((v0 - p21[1]) * np.cos(-angleRad21) -
(u0 - p21[0]) * np.sin(-angleRad21) > y21)] = True
###########################################################
# Upper right corner
x21 = -r / 2 * 1
x22 = +r / 2 * 1
y21 = -r / 2 * det_vert
y22 = +r / 2 * det_vert
f_ur = np.copy(f) * f_multiplier
triangle22 = [[p0[0] + x22, p0[1] + y22], [p0[0] + x22, p0[1] +
y22 - y22 * f_ur], [p0[0] + x22 - x22 * f_ur, p0[1] + y22]]
p21 = triangle22[0]
y22 = -0
y21 = +(triangle22[1][1] - triangle22[0][1]) / np.sqrt(2)
x21 = +(triangle22[2][0] - triangle22[0][0]) / np.sqrt(2)
x22 = -(triangle22[2][0] - triangle22[0][0]) / np.sqrt(2)
pupil_illuminated_only0_in_only1[((u0 - p21[0]) * np.cos(-angleRad21) +
(v0 - p21[1]) * np.sin(-angleRad21) > x21)] = True
###########################################################
# Lower left corner
x21 = -r / 2 * 1
x22 = +r / 2 * 1
y21 = -r / 2 * det_vert
y22 = +r / 2 * det_vert
f_ll = np.copy(f) * f_multiplier
triangle11 = [[p0[0] + x21, p0[1] + y21], [p0[0] + x21, p0[1] +
y21 - y21 * f_ll], [p0[0] + x21 - x21 * f_ll, p0[1] + y21]]
p21 = triangle11[0]
y22 = -(triangle11[1][1] - triangle11[0][1]) / np.sqrt(2)
y21 = 0
x21 = +(triangle11[2][0] - triangle11[0][0]) / np.sqrt(2)
x22 = +(triangle11[2][0] - triangle11[0][0]) / np.sqrt(2)
pupil_illuminated_only0_in_only1[((u0 - p21[0]) * np.cos(-angleRad21) +
(v0 - p21[1]) * np.sin(-angleRad21) < x22)] = True
pupil_illuminated_only1[i_y_min:i_y_max, i_x_min:i_x_max] = pupil_illuminated_only0_in_only1
pupil.illuminated = pupil.illuminated * pupil_illuminated_only1
time_end_single_square = time.time()
if self.verbosity == 1:
print('Time for cutting out the central square is ' +
str(time_end_single_square - time_start_single_square))
def _cutRay(self, pupil, p0, angle, thickness, angleunit=None, wide=0):
"""Cut out a ray from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating ray starting point
@param[in] angle Ray angle measured CCW from +x.
@param[in] thickness Thickness of cutout
@param[in] angleunit If None, changes internal units to radians
@param[in] wide Controls the widening of the strut as
a function of the distance from the origin
"""
if angleunit is None:
angleRad = angle.asRadians()
else:
angleRad = angle
# the 1 is arbitrary, just need something to define another point on
# the line
p1 = (p0[0] + 1, p0[1] + np.tan(angleRad))
d = PupilFactory._pointLineDistance((self.u, self.v), p0, p1)
radial_distance = 14.34 * np.sqrt((self.u - p0[0])**2 + (self.v - p0[1])**2)
pupil.illuminated[(d < 0.5 * thickness * (1 + wide * radial_distance)) &
((self.u - p0[0]) * np.cos(angleRad) +
(self.v - p0[1]) * np.sin(angleRad) >= 0)] = False
def _addRay(self, pupil, p0, angle, thickness, angleunit=None):
"""Add a ray from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating ray starting point
@param[in] angle Ray angle measured CCW from +x.
@param[in] thickness Thickness of cutout
"""
if angleunit is None:
angleRad = angle.asRadians()
else:
angleRad = angle
# the 1 is arbitrary, just need something to define another point on
# the line
p1 = (p0[0] + 1, p0[1] + np.tan(angleRad))
d = PupilFactory._pointLineDistance((self.u, self.v), p0, p1)
pupil.illuminated[(d < 0.5 * thickness) &
((self.u - p0[0]) * np.cos(angleRad) +
(self.v - p0[1]) * np.sin(angleRad) >= 0)] = True
class PFSPupilFactory(PupilFactory):
"""Pupil obscuration function factory for PFS
Based on the code by <NAME>, initially developed for HSC camera
Invokes PupilFactory to create obscurations of the camera
Adds various illumination effects which are specified to the spectrographs
"""
def __init__(
self,
pupilSize,
npix,
input_angle,
detFrac,
strutFrac,
slitFrac,
slitFrac_dy,
x_fiber,
y_fiber,
effective_ilum_radius,
frd_sigma,
frd_lorentz_factor,
det_vert,
slitHolder_frac_dx,
wide_0,
wide_23,
wide_43,
misalign,
verbosity=0):
"""Construct a PFS PupilFactory.
Parameters
----------
pupilSize: `float`
Size of the exit pupil [m]
npix: `int`
Constructed Pupils will be npix x npix
input_angle: `float`
Angle of the pupil (for all practical purposes fixed an np.pi/2)
detFrac: `float`
Value determining how much of the exit pupil obscured by the
central obscuration(detector)
strutFrac: `float`
Value determining how much of the exit pupil is obscured
by a single strut
slitFrac: `float`
Value determining how much of the exit pupil is obscured by slit
slitFrac_dy: `float`
Value determining what is the vertical position of the slit
in the exit pupil
x_fiber: `float`
Position of the fiber misaligment in the x direction
y_fiber: `float`
Position of the fiber misaligment in the y direction
effective_ilum_radius: `float`
Fraction of the maximal radius of the illumination
of the exit pupil that is actually illuminated
frd_sigma: `float`
Sigma of Gaussian convolving only outer edge, mimicking FRD
frd_lorentz_factor: `float`
Strength of the lorentzian factor describing wings
det_vert: `float`
Multiplicative factor determining vertical size
of the detector obscuration
wide_0: `float`
Widening of the strut at 0 degrees
wide_23: `float`
Widening of the strut at the top-left corner
wide_43: `float`
Widening of the strut at the bottom-left corner
misalign: `float`
Describing the amount of misaligment
verbosity: `int`
How verbose during evaluation (1 = full verbosity)
"""
self.verbosity = verbosity
if self.verbosity == 1:
print('Entering PFSPupilFactory class')
PupilFactory.__init__(
self,
pupilSize,
npix,
input_angle,
detFrac,
strutFrac,
slitFrac,
slitFrac_dy,
x_fiber,
y_fiber,
effective_ilum_radius,
frd_sigma,
frd_lorentz_factor,
det_vert,
verbosity=self.verbosity,
wide_0=wide_0,
wide_23=wide_23,
wide_43=wide_43,
misalign=misalign)
self.x_fiber = x_fiber
self.y_fiber = y_fiber
self.slitHolder_frac_dx = slitHolder_frac_dx
self._spiderStartPos = [np.array([0., 0.]), np.array([0., 0.]), np.array([0., 0.])]
self._spiderAngles = [0, np.pi * 2 / 3, np.pi * 4 / 3]
self.effective_ilum_radius = effective_ilum_radius
self.wide_0 = wide_0
self.wide_23 = wide_23
self.wide_43 = wide_43
self.misalign = misalign
def getPupil(self, point):
"""Calculate a Pupil at a given point in the focal plane.
@param point Point2D indicating focal plane coordinates.
@returns Pupil
"""
if self.verbosity == 1:
print('Entering getPupil (function inside PFSPupilFactory)')
# called subaruRadius as it was taken from the code fitting pupil for HSC on Subaru
subaruRadius = (self.pupilSize / 2) * 1
detFrac = self.detFrac # linear fraction
hscRadius = detFrac * subaruRadius
slitFrac = self.slitFrac # linear fraction
subaruSlit = slitFrac * subaruRadius
strutFrac = self.strutFrac # linear fraction
subaruStrutThick = strutFrac * subaruRadius
# y-position of the slit
slitFrac_dy = self.slitFrac_dy
# relic from the HSC code
# See DM-8589 for more detailed description of following parameters
# d(lensCenter)/d(theta) in meters per degree
# lensRate = 0.0276 * 3600 / 128.9 * subaruRadius
# d(cameraCenter)/d(theta) in meters per degree
hscRate = 2.62 / 1000 * subaruRadius
hscPlateScale = 380
thetaX = point[0] * hscPlateScale
thetaY = point[1] * hscPlateScale
pupil = self._fullPupil()
camX = thetaX * hscRate
camY = thetaY * hscRate
# creating FRD effects
single_element = np.linspace(-1, 1, len(pupil.illuminated), endpoint=True, dtype=np.float32)
u_manual = np.tile(single_element, (len(single_element), 1))
v_manual = np.transpose(u_manual)
center_distance = np.sqrt((u_manual - self.x_fiber * hscRate * hscPlateScale * 12)
** 2 + (v_manual - self.y_fiber * hscRate * hscPlateScale * 12)**2)
frd_sigma = self.frd_sigma
sigma = 2 * frd_sigma
pupil_frd = (1 / 2 * (scipy.special.erf((-center_distance + self.effective_ilum_radius) / sigma) +
scipy.special.erf((center_distance + self.effective_ilum_radius) / sigma)))
################
# Adding misaligment in this section
time_misalign_start = time.time()
position_of_center_0 = np.where(center_distance == np.min(center_distance))
position_of_center = [position_of_center_0[1][0], position_of_center_0[0][0]]
position_of_center_0_x = position_of_center_0[0][0]
position_of_center_0_y = position_of_center_0[1][0]
distances_to_corners = np.array([np.sqrt(position_of_center[0]**2 + position_of_center[1]**2),
np.sqrt((len(pupil_frd) - position_of_center[0])**2 +
position_of_center[1]**2), np.sqrt((position_of_center[0])**2 +
(len(pupil_frd) - position_of_center[1])**2),
np.sqrt((len(pupil_frd) - position_of_center[0])**2 +
(len(pupil_frd) - position_of_center[1])**2)])
max_distance_to_corner = np.max(distances_to_corners)
threshold_value = 0.5
left_from_center = np.where(pupil_frd[position_of_center_0_x]
[0:position_of_center_0_y] < threshold_value)[0]
right_from_center = \
np.where(pupil_frd[position_of_center_0_x][position_of_center_0_y:] < threshold_value)[0] +\
position_of_center_0_y
up_from_center = \
np.where(pupil_frd[:, position_of_center_0_y][position_of_center_0_x:] < threshold_value)[0] +\
position_of_center_0_x
down_from_center = np.where(pupil_frd[:, position_of_center_0_y]
[:position_of_center_0_x] < threshold_value)[0]
if len(left_from_center) > 0:
size_of_05_left = position_of_center_0_y - np.max(left_from_center)
else:
size_of_05_left = 0
if len(right_from_center) > 0:
size_of_05_right = np.min(right_from_center) - position_of_center_0_y
else:
size_of_05_right = 0
if len(up_from_center) > 0:
size_of_05_up = np.min(up_from_center) - position_of_center_0_x
else:
size_of_05_up = 0
if len(down_from_center) > 0:
size_of_05_down = position_of_center_0_x - np.max(down_from_center)
else:
size_of_05_down = 0
sizes_4_directions = np.array([size_of_05_left, size_of_05_right, size_of_05_up, size_of_05_down])
max_size = np.max(sizes_4_directions)
imageradius = max_size
radiusvalues = np.linspace(0, int(np.ceil(max_distance_to_corner)),
int(np.ceil(max_distance_to_corner)) + 1)
sigtotp = sigma * 550
dif_due_to_mis_class = Pupil_misalign(radiusvalues, imageradius, sigtotp, self.misalign)
dif_due_to_mis = dif_due_to_mis_class()
scaling_factor_pixel_to_physical = max_distance_to_corner / np.max(center_distance)
distance_int = np.round(center_distance * scaling_factor_pixel_to_physical).astype(int)
pupil_frd_with_mis = pupil_frd + dif_due_to_mis[distance_int]
pupil_frd_with_mis[pupil_frd_with_mis > 1] = 1
time_misalign_end = time.time()
if self.verbosity == 1:
print('Time to execute illumination considerations due to misalignment ' +
str(time_misalign_end - time_misalign_start))
####
pupil_lorentz = (np.arctan(2 * (self.effective_ilum_radius - center_distance) / (4 * sigma)) +
np.arctan(2 * (self.effective_ilum_radius + center_distance) / (4 * sigma))) /\
(2 * np.arctan((2 * self.effective_ilum_radius) / (4 * sigma)))
pupil.illuminated = (pupil_frd + 1 * self.frd_lorentz_factor *
pupil_lorentz) / (1 + self.frd_lorentz_factor)
pupil_lorentz = (np.arctan(2 * (self.effective_ilum_radius - center_distance) / (4 * sigma)) +
np.arctan(2 * (self.effective_ilum_radius + center_distance) / (4 * sigma))) /\
(2 * np.arctan((2 * self.effective_ilum_radius) / (4 * sigma)))
pupil_frd = np.copy(pupil_frd_with_mis)
pupil.illuminated = (pupil_frd + 1 * self.frd_lorentz_factor *
pupil_lorentz) / (1 + self.frd_lorentz_factor)
# Cut out the acceptance angle of the camera
self._cutCircleExterior(pupil, (0.0, 0.0), subaruRadius)
# Cut out detector shadow
self._cutSquare(pupil, (camX, camY), hscRadius, self.input_angle, self.det_vert)
# No vignetting of this kind for the spectroscopic camera
# self._cutCircleExterior(pupil, (lensX, lensY), lensRadius)
# Cut out spider shadow
for pos, angle in zip(self._spiderStartPos, self._spiderAngles):
x = pos[0] + camX
y = pos[1] + camY
if angle == 0:
self._cutRay(pupil, (x, y), angle, subaruStrutThick, 'rad', self.wide_0)
if angle == np.pi * 2 / 3:
self._cutRay(pupil, (x, y), angle, subaruStrutThick, 'rad', self.wide_23)
if angle == np.pi * 4 / 3:
self._cutRay(pupil, (x, y), angle, subaruStrutThick, 'rad', self.wide_43)
# cut out slit shadow
self._cutRay(pupil, (2, slitFrac_dy / 18), -np.pi, subaruSlit * 1.05, 'rad')
# cut out slit holder shadow
# subaruSlit/3 is roughly the width of the holder
self._cutRay(pupil, (self.slitHolder_frac_dx / 18, 1), -np.pi / 2, subaruSlit * 0.3, 'rad')
if self.verbosity == 1:
print('Finished with getPupil')
return pupil
class Pupil(object):
"""Pupil obscuration function.
"""
def __init__(self, illuminated, size, scale):
"""!Construct a Pupil
@param[in] illuminated 2D numpy array indicating which parts of
the pupil plane are illuminated.
@param[in] size Size of pupil plane array in meters. Note
that this may be larger than the actual
diameter of the illuminated pupil to
accommodate zero-padding.
@param[in] scale Sampling interval of pupil plane array in
meters.
"""
self.illuminated = illuminated
self.size = size
self.scale = scale
class Pupil_misalign(object):
"""Apply misaligment correction to the illumination of the pupil
Developed by <NAME> (Caltech)
Copied here without modifications
"""
def __init__(self, radiusvalues, imageradius, sigtotp, misalign):
self.radiusvalues = radiusvalues
self.imageradius = imageradius
self.sigtotp = sigtotp
self.misalign = misalign
def wapp(self, A):
# Approximation function by <NAME> to approximate and correct for the
# widening of width due to the angular misalignment convolution. This
# is used to basically scale the contribution of angular misalignment and FRD
# A = angmis/sigFRD
wappA = np.sqrt(1 + A * A * (1 + A * A) / (2 + 1.5 * A * A))
return wappA
def fcorr(self, x, A):
# The function scaled so that it keeps the same (approximate) width value
# after angular convolution
correctedfam = self.fcon(x * self.wapp(A), A)
return correctedfam
def fcon(self, x, A):
# For more detail about this method, see "Analyzing Radial Profiles for FRD
# and Angular Misalignment", by <NAME>, 16/06/13.
wt = [0.1864, 0.1469, 0.1134, 0.1066, 0.1134, 0.1469, 0.1864] # from Jim Gunn's white paper,
# wt contains the normalized integrals under the angular misalignment
# convolution kernel, i.e., C(1-(x/angmisp)^2)^{-1/2} for |x|<angmisp and 0
# elsewhere. Note that the edges' centers are at +/- a, so they are
# integrated over an effective half of the length of the others.
temp = np.zeros(np.size(x))
for index in range(7):
temp = temp + wt[index] * self.ndfc(x + (index - 3) / 3 * A)
angconvolved = temp
return angconvolved
def ndfc(self, x):
# Standard model dropoff from a Gaussian convolution, normalized to brightness 1,
# radius (rh) 0, and sigTOT 1
# print(len(x))
ndfcfun = 1 - (0.5 * erf(x / np.sqrt(2)) + 0.5)
return ndfcfun
def FA(self, r, rh, sigTOT, A):
# Function that takes all significant variables of the dropoff and
# normalizes the curve to be comparable to ndfc
# r = vector of radius values, in steps of pixels
# rh = radius of half-intensity. Effectively the size of the radius of the dropoff
# sigTOT = total width of the convolution kernel that recreates the width of the dropoff
# between 85% and 15% illumination. Effectively just think of this as sigma
# A = angmis/sigFRD, that is, the ratio between the angular misalignment
# and the sigma due to only FRD. Usually this is on the order of 1-3.
FitwithAngle = self.fcorr((r - rh) / sigTOT, A)
return FitwithAngle
def __call__(self):
no_mis = self.FA(self.radiusvalues, self.imageradius, self.sigtotp, 0)
with_mis = self.FA(self.radiusvalues, self.imageradius, self.sigtotp, self.misalign)
dif_due_to_mis = with_mis - no_mis
return dif_due_to_mis
class PsfPosition(object):
"""Class that deals with positioning of the PSF model
Function find_single_realization_min_cut enables the fit to the data
"""
def __init__(self, image, oversampling, size_natural_resolution, simulation_00=False,
verbosity=0, save=None, PSF_DIRECTORY=None):
"""
Parameters
-----------------
image: `np.array`, (N, N)
oversampled model image
oversampling: `int`
by how much is the the oversampled image oversampled
simulation_00: `bool`
if True, put optical center of the model image in
the center of the final image
verbosity: `int`
how verbose the procedure is (1 for full verbosity)
save: `int`
save intermediate images on hard drive (1 for save)
"""
self.image = image
self.oversampling = oversampling
self.size_natural_resolution = size_natural_resolution
self.simulation_00 = simulation_00
self.verbosity = verbosity
if save is None:
save = 0
self.save = save
if PSF_DIRECTORY is not None:
self.PSF_DIRECTORY = PSF_DIRECTORY
self.TESTING_FOLDER = PSF_DIRECTORY + 'Testing/'
self.TESTING_PUPIL_IMAGES_FOLDER = self.TESTING_FOLDER + 'Pupil_Images/'
self.TESTING_WAVEFRONT_IMAGES_FOLDER = self.TESTING_FOLDER + 'Wavefront_Images/'
self.TESTING_FINAL_IMAGES_FOLDER = self.TESTING_FOLDER + 'Final_Images/'
@staticmethod
def cut_Centroid_of_natural_resolution_image(image, size_natural_resolution, oversampling, dx, dy):
"""Cut the central part from a larger oversampled image
@param image input image
@param size_natural_resolution size of new image in natural units
@param oversampling oversampling
@returns central part of the input image
"""
positions_from_where_to_start_cut = [int(len(image) / 2 -
size_natural_resolution / 2 -
dx * oversampling + 1),
int(len(image) / 2 -
size_natural_resolution / 2 -
dy * oversampling + 1)]
res = image[positions_from_where_to_start_cut[1]:positions_from_where_to_start_cut[1] +
int(size_natural_resolution),
positions_from_where_to_start_cut[0]:positions_from_where_to_start_cut[0] +
int(size_natural_resolution)]
return res
def find_single_realization_min_cut(
self,
input_image,
oversampling,
size_natural_resolution,
sci_image,
var_image,
mask_image,
v_flux,
double_sources=None,
double_sources_positions_ratios=[0, 0],
verbosity=0,
explicit_psf_position=None,
use_only_chi=False,
use_center_of_flux=False):
"""Move the image to find best position to downsample
the oversampled image
Parameters
-----------------
image: `np.array`, (N, N)
model image to be analyzed
(in our case this will be image of the
optical psf convolved with fiber)
oversampling: `int`
oversampling
size_natural_resolution: `int`
size of final image (in the ``natural'' units, i.e., physical pixels
on the detector)
sci_image_0: `np.array`, (N, N)
science image
var_image_0: `np.array`, (N, N)
variance image
v_flux: `float`
flux normalization
simulation_00: `bool`
if True,do not move the center, for making fair comparisons between
models - optical center in places in the center of the image
if use_center_of_flux==True the behaviour changes
and the result is the image with center of flux
in the center of the image
double_sources: `bool`
if True, fit for two sources seen in the data
double_sources_positions_ratios: `np.array`, (2,)
2 values describing init guess for the relation between
secondary and primary souces (offset, ratio)
verbosity: `int`
verbosity of the algorithm (1 for full verbosity)
explicit_psf_position: `np.array`, (2,)
x and y offset
use_only_chi: `bool`
quality of the centering is measured using chi, not chi**2
use_center_of_flux: `bool`
fit so that the center of flux of the model and
the science image is as similar as possible
Returns
----------
model_image: `np.array`, (2,)
returns model image in the size of the science image and
centered to the science image
(unless simulation_00=True or
explicit_psf_position has been passed)
Notes
----------
Called by create_optPSF_natural in ZernikeFitterPFS
Calls function create_complete_realization
(many times in order to fit the best solution)
"""
self.sci_image = sci_image
self.var_image = var_image
self.mask_image = mask_image
self.v_flux = v_flux
simulation_00 = self.simulation_00
# if you are just asking for simulated image at (0,0) there is no possibility to create double sources
if simulation_00 == 1:
double_sources = None
if double_sources is None or double_sources is False:
double_sources_positions_ratios = [0, 0]
shape_of_input_img = input_image.shape[0]
shape_of_sci_image = sci_image.shape[0]
self.shape_of_input_img = shape_of_input_img
self.shape_of_sci_image = shape_of_sci_image
if verbosity == 1:
print('parameter use_only_chi in Psf_postion is set to: ' + str(use_only_chi))
print('parameter use_center_of_flux in Psf_postion is set to: ' + str(use_center_of_flux))
print('parameter simulation_00 in Psf_postion is set to: ' + str(simulation_00))
# depending on if there is a second source in the image the algoritm splits here
# double_sources should always be None when when creating centered images (simulation_00 = True)
if double_sources is None or bool(double_sources) is False:
# if simulation_00 AND using optical center just run the realization that is set at 0,0
if simulation_00 == 1 and use_center_of_flux is False:
if verbosity == 1:
print('simulation_00 is set to 1 and use_center_of_flux==False -\
I am just returning the image at (0,0) coordinates ')
# return the solution with x and y is zero, i.e., with optical center in
# the center of the image
mean_res, single_realization_primary_renormalized, single_realization_secondary_renormalized,\
complete_realization_renormalized \
= self.create_complete_realization([0, 0], return_full_result=True,
use_only_chi=use_only_chi,
use_center_of_light=use_center_of_flux,
simulation_00=simulation_00)
# if you are fitting an actual image go through the full process
else:
# if you did not pass explict position search for the best position
if explicit_psf_position is None:
# if creating the model so that the model is centered so
# that center of light of the model matches the center of the light
# of the scientific image, manually change values for centroid_of_sci_image here
if simulation_00 == 1 and use_center_of_flux:
if self.verbosity == 1:
print('creating simulated image, center of light in center of the image')
shape_of_sci_image = 21
centroid_of_sci_image = [10.5, 10.5]
else:
# create one complete realization with default parameters - estimate
# centorids and use that knowledge to put fitting limits in the next step
centroid_of_sci_image = find_centroid_of_flux(sci_image)
time_1 = time.time()
initial_complete_realization = self.create_complete_realization(
[0, 0, double_sources_positions_ratios[0] * self.oversampling,
double_sources_positions_ratios[1]],
return_full_result=True,
use_only_chi=use_only_chi,
use_center_of_light=use_center_of_flux,
simulation_00=simulation_00)[-1]
time_2 = time.time()
if self.verbosity == 1:
print('time_2-time_1 for initial_complete_realization: ' + str(time_2 - time_1))
# center of the light for the first realization, set at optical center
centroid_of_initial_complete_realization = find_centroid_of_flux(
initial_complete_realization)
# determine offset between the initial guess and the data
offset_initial_and_sci = - \
((np.array(find_centroid_of_flux(initial_complete_realization)) -
np.array(find_centroid_of_flux(sci_image))))
if verbosity == 1:
print('centroid_of_initial_complete_realization ' +
str(find_centroid_of_flux(initial_complete_realization)))
print('centroid_of_sci_image '+str(find_centroid_of_flux(sci_image)))
print('offset_initial_and_sci: ' + str(offset_initial_and_sci))
print('[x_primary, y_primary, y_secondary,ratio_secondary] / chi2 output')
if self.save == 1:
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'initial_complete_realization',
initial_complete_realization)
# search for the best center using scipy ``shgo'' algorithm
# set the limits for the fitting procedure
y_2sources_limits = [
(offset_initial_and_sci[1] - 2) * self.oversampling,
(offset_initial_and_sci[1] + 2) * self.oversampling]
x_2sources_limits = [
(offset_initial_and_sci[0] - 1) * self.oversampling,
(offset_initial_and_sci[0] + 1) * self.oversampling]
# search for best positioning
if use_center_of_flux:
for i in range(5):
if verbosity == 1:
print("###")
if i == 0:
x_i, y_i = offset_initial_and_sci * oversampling
x_offset, y_offset = 0, 0
x_offset = x_offset + x_i
y_offset = y_offset + y_i
else:
x_offset = x_offset + x_i
y_offset = y_offset + y_i
complete_realization = self.create_complete_realization(
x=[x_offset, y_offset, 0, 0, ],
return_full_result=True, use_only_chi=True,
use_center_of_light=True, simulation_00=simulation_00)[-1]
offset_initial_and_sci = -((np.array(find_centroid_of_flux(complete_realization))
- np.array(find_centroid_of_flux(sci_image))))
if verbosity == 1:
print('offset_initial_and_sci in step ' +
str(i) + ' ' + str(offset_initial_and_sci))
print("###")
x_i, y_i = offset_initial_and_sci * oversampling
primary_position_and_ratio_x = [x_offset, y_offset]
# if use_center_of_flux=False, we have to optimize to find the best solution
else:
# implement try syntax for secondary too
try:
primary_position_and_ratio_shgo = scipy.optimize.shgo(
self.create_complete_realization,
args=(
False,
use_only_chi,
use_center_of_flux,
simulation_00),
bounds=[
(x_2sources_limits[0],
x_2sources_limits[1]),
(y_2sources_limits[0],
y_2sources_limits[1])],
n=10,
sampling_method='sobol',
options={
'ftol': 1e-3,
'maxev': 10})
primary_position_and_ratio = scipy.optimize.minimize(
self.create_complete_realization,
args=(
False,
use_only_chi,
use_center_of_flux,
simulation_00),
x0=primary_position_and_ratio_shgo.x,
method='Nelder-Mead',
options={
'xatol': 0.00001,
'fatol': 0.00001})
primary_position_and_ratio_x = primary_position_and_ratio.x
except BaseException as e:
print(e)
print('search for primary position failed')
primary_position_and_ratio_x = [0, 0]
# return the best result, based on the result of the conducted search
mean_res, single_realization_primary_renormalized,\
single_realization_secondary_renormalized, complete_realization_renormalized \
= self.create_complete_realization(primary_position_and_ratio_x,
return_full_result=True,
use_only_chi=use_only_chi,
use_center_of_light=use_center_of_flux,
simulation_00=simulation_00)
if self.save == 1:
np.save(
self.TESTING_FINAL_IMAGES_FOLDER +
'single_realization_primary_renormalized',
single_realization_primary_renormalized)
np.save(
self.TESTING_FINAL_IMAGES_FOLDER +
'single_realization_secondary_renormalized',
single_realization_secondary_renormalized)
np.save(
self.TESTING_FINAL_IMAGES_FOLDER +
'complete_realization_renormalized',
complete_realization_renormalized)
if self.verbosity == 1:
if simulation_00 != 1:
print('We are fitting for only one source')
print('One source fitting result is ' + str(primary_position_and_ratio_x))
print('type(complete_realization_renormalized)' +
str(type(complete_realization_renormalized[0][0])))
centroid_of_complete_realization_renormalized = find_centroid_of_flux(
complete_realization_renormalized)
# determine offset between the initial guess and the data
offset_final_and_sci = - \
(np.array(centroid_of_complete_realization_renormalized) -
np.array(centroid_of_sci_image))
print('offset_final_and_sci: ' + str(offset_final_and_sci))
return complete_realization_renormalized, primary_position_and_ratio_x
# if you did pass explicit_psf_position for the solution evalute the code here
else:
mean_res, single_realization_primary_renormalized,\
single_realization_secondary_renormalized, complete_realization_renormalized\
= self.create_complete_realization(explicit_psf_position,
return_full_result=True,
use_only_chi=use_only_chi,
use_center_of_light=use_center_of_flux)
if self.save == 1:
np.save(
self.TESTING_FINAL_IMAGES_FOLDER +
'single_realization_primary_renormalized',
single_realization_primary_renormalized)
np.save(
self.TESTING_FINAL_IMAGES_FOLDER +
'single_realization_secondary_renormalized',
single_realization_secondary_renormalized)
np.save(
self.TESTING_FINAL_IMAGES_FOLDER +
'complete_realization_renormalized',
complete_realization_renormalized)
if self.verbosity == 1:
if simulation_00 != 1:
print('We are passing value for only one source')
print('One source fitting result is ' + str(explicit_psf_position))
print('type(complete_realization_renormalized)' +
str(type(complete_realization_renormalized[0][0])))
return complete_realization_renormalized, explicit_psf_position
else:
# TODO: need to make possible that you can pass your own values for double source
# create one complete realization with default parameters - estimate
# centroids and use that knowledge to put fitting limits in the next step
centroid_of_sci_image = find_centroid_of_flux(sci_image)
initial_complete_realization = self.create_complete_realization([0,
0,
double_sources_positions_ratios[0] #noqa: E501
* self.oversampling,
double_sources_positions_ratios[1]], #noqa: E501
return_full_result=True,
use_only_chi=use_only_chi,
use_center_of_light= #noqa: E251
use_center_of_flux,
simulation_00=simulation_00)[-1]
centroid_of_initial_complete_realization = find_centroid_of_flux(initial_complete_realization)
# determine offset between the initial guess and the data
offset_initial_and_sci = - \
(np.array(centroid_of_initial_complete_realization) - np.array(centroid_of_sci_image))
if verbosity == 1:
print('Evaulating double source psf positioning loop')
print('offset_initial_and_sci: ' + str(offset_initial_and_sci))
print('[x_primary, y_primary, y_secondary,ratio_secondary] / chi2 output')
if self.save == 1:
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'sci_image', sci_image)
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'initial_complete_realization',
initial_complete_realization)
# implement that it does not search if second object far away while in focus
# focus size is 20 - do not search if second pfs is more than 15 pixels away
if shape_of_sci_image == 20 and np.abs(self.double_sources_positions_ratios[0]) > 15:
if verbosity == 1:
print('fitting second source, but assuming that second source is too far')
# if the second spot is more than 15 pixels away
# copying code from the non-double source part
# search for the best center using scipy ``shgo'' algorithm
# set the limits for the fitting procedure
y_2sources_limits = [
(offset_initial_and_sci[1] - 2) * self.oversampling,
(offset_initial_and_sci[1] + 2) * self.oversampling]
x_2sources_limits = [
(offset_initial_and_sci[0] - 1) * self.oversampling,
(offset_initial_and_sci[0] + 1) * self.oversampling]
# search for best positioning
# implement try for secondary too
try:
# print('(False,use_only_chi,use_center_of_flux)'+str((False,use_only_chi,use_center_of_flux)))
primary_position_and_ratio_shgo = scipy.optimize.shgo(
self.create_complete_realization,
args=(False, use_only_chi,
use_center_of_flux, simulation_00),
bounds=[(x_2sources_limits[0], x_2sources_limits[1]),
(y_2sources_limits[0], y_2sources_limits[1])],
n=10, sampling_method='sobol',
options={'ftol': 1e-3, 'maxev': 10})
if verbosity == 1:
print('starting finer positioning')
primary_position_and_ratio = scipy.optimize.minimize(
self.create_complete_realization,
args=(False, use_only_chi,
use_center_of_flux, simulation_00),
x0=primary_position_and_ratio_shgo.x,
method='Nelder-Mead',
options={'xatol': 0.00001, 'fatol': 0.00001})
primary_position_and_ratio_x = primary_position_and_ratio.x
except BaseException:
print('search for primary position failed')
primary_position_and_ratio_x = [0, 0]
primary_secondary_position_and_ratio_x = np.array([0., 0., 0., 0.])
primary_secondary_position_and_ratio_x[0] = primary_position_and_ratio_x[0]
primary_secondary_position_and_ratio_x[1] = primary_position_and_ratio_x[1]
else:
# set the limits for the fitting procedure
y_2sources_limits = [
(offset_initial_and_sci[1] - 2) * self.oversampling,
(offset_initial_and_sci[1] + 2) * self.oversampling]
x_2sources_limits = [
(offset_initial_and_sci[0] - 1) * self.oversampling,
(offset_initial_and_sci[0] + 1) * self.oversampling]
y_2sources_limits_second_source = [
(self.double_sources_positions_ratios[0] - 2) * oversampling,
(self.double_sources_positions_ratios[0] + 2) * oversampling]
primary_secondary_position_and_ratio = scipy.optimize.shgo(
self.create_complete_realization,
args=(False, use_only_chi,
use_center_of_flux, simulation_00),
bounds=[
(x_2sources_limits[0],
x_2sources_limits[1]),
(y_2sources_limits[0],
y_2sources_limits[1]),
(y_2sources_limits_second_source[0],
y_2sources_limits_second_source[1]),
(self.double_sources_positions_ratios[1] / 2,
2 * self.double_sources_positions_ratios[1])],
n=10, sampling_method='sobol',
options={'ftol': 1e-3, 'maxev': 10})
primary_secondary_position_and_ratio_x = primary_secondary_position_and_ratio.x
# return best result
mean_res, single_realization_primary_renormalized,
single_realization_secondary_renormalized, complete_realization_renormalized \
= self.create_complete_realization(primary_secondary_position_and_ratio_x,
return_full_result=True, use_only_chi=use_only_chi,
use_center_of_light=use_center_of_flux,
simulation_00=simulation_00)
if self.save == 1:
np.save(
self.TESTING_FINAL_IMAGES_FOLDER +
'single_realization_primary_renormalized',
single_realization_primary_renormalized)
np.save(
self.TESTING_FINAL_IMAGES_FOLDER +
'single_realization_secondary_renormalized',
single_realization_secondary_renormalized)
np.save(
self.TESTING_FINAL_IMAGES_FOLDER +
'complete_realization_renormalized',
complete_realization_renormalized)
if self.verbosity == 1:
print('We are fitting for two sources')
print('Two source fitting result is ' + str(primary_secondary_position_and_ratio_x))
print('type(complete_realization_renormalized)' +
str(type(complete_realization_renormalized[0][0])))
return complete_realization_renormalized, primary_secondary_position_and_ratio_x
def create_complete_realization(
self,
x,
return_full_result=False,
use_only_chi=False,
use_center_of_light=False,
simulation_00=False):
"""Create one complete downsampled realization of the image,
from the full oversampled image
Parameters
----------
x: `np.array`, (4,)
array contaning x_primary, y_primary,
offset in y to secondary source, \
ratio in flux from secondary to primary;
the units are oversampled pixels
return_full_result: `bool`
if True, returns the images itself (not just chi**2)
use_only_chi: `bool`
if True, minimize chi; if False, minimize chi^2
use_center_of_light: `bool`
if True, minimize distance to center of light, in focus
simulation_00: `bool`
if True,do not move the center, for making fair comparisons between
models - optical center in places in the center of the image
if use_center_of_light==True the behaviour changes
and the result is the image with center of flux
in the center of the image
Returns
----------
chi_2_almost_multi_values: `float`
returns the measure of quality
(chi**2, chi, or distance of center
of light between science and model image)
distance of center of light between science
and model image is given in units of pixels
single_primary_realization_renormalized: `np.array`, (N, N)
image containg the model corresponding
to the primary source in the science image
single_secondary_realization_renormalized: `np.array`, (N, N)
image containg the model corresponding
to the secondary source in the science image
complete_realization_renormalized: `np.array`, (N, N)
image combining the primary
and secondary source (if secondary source is needed)
Notes
----------
TODO: implement that you are able to call outside find_single_realization_min_cut
Called by find_single_realization_min_cut
Calls create_chi_2_almost_Psf_position
"""
# oversampled input model image
image = self.image
sci_image = self.sci_image
var_image = self.var_image
mask_image = self.mask_image
shape_of_sci_image = self.size_natural_resolution
oversampling = self.oversampling
v_flux = self.v_flux
# central position of the create oversampled image
center_position = int(np.floor(image.shape[0] / 2))
# to be applied on x-axis
primary_offset_axis_1 = x[0]
# to be applied on y-axis
primary_offset_axis_0 = x[1]
if simulation_00 == 1:
simulation_00 = True
# if you are only fitting for primary image
# add zero values for secondary image
if len(x) == 2:
ratio_secondary = 0
else:
ratio_secondary = x[3]
if len(x) == 2:
secondary_offset_axis_1 = 0
secondary_offset_axis_0 = 0
else:
secondary_offset_axis_1 = primary_offset_axis_1
secondary_offset_axis_0 = x[2] + primary_offset_axis_0
shape_of_oversampled_image = int(shape_of_sci_image * oversampling / 2)
# from https://github.com/Subaru-PFS/drp_stella/blob/\
# 6cceadfc8721fcb1c7eb1571cf4b9bc8472e983d/src/SpectralPsf.cc
# // Binning by an odd factor requires the centroid at the center of a pixel.
# // Binning by an even factor requires the centroid on the edge of a pixel.
# the definitions used in primary image
# we separate if the image shape is odd or even, but at the moment there is no difference
if np.modf(shape_of_oversampled_image / 2)[0] == 0.0:
# 'shape is an even number'
shift_x_mod = np.array(
[-(np.round(primary_offset_axis_1) - primary_offset_axis_1),
-np.round(primary_offset_axis_1)])
shift_y_mod = np.array(
[-(np.round(primary_offset_axis_0) - primary_offset_axis_0),
-np.round(primary_offset_axis_0)])
else:
# 'shape is an odd number'
shift_x_mod = np.array(
[-(np.round(primary_offset_axis_1) - primary_offset_axis_1),
-np.round(primary_offset_axis_1)])
shift_y_mod = np.array(
[-(np.round(primary_offset_axis_0) - primary_offset_axis_0),
-np.round(primary_offset_axis_0)])
image_integer_offset = image[center_position +
int(shift_y_mod[1]) - 1 -
shape_of_oversampled_image:center_position +
int(shift_y_mod[1]) +
shape_of_oversampled_image + 1,
center_position +
int(shift_x_mod[1]) - 1 -
shape_of_oversampled_image: center_position +
int(shift_x_mod[1]) +
shape_of_oversampled_image + 1]
if simulation_00:
image_integer_offset = image[center_position +
int(shift_y_mod[1]) - 1 -
shape_of_oversampled_image:center_position +
int(shift_y_mod[1]) +
shape_of_oversampled_image + 1 + 1,
center_position +
int(shift_x_mod[1]) - 1 -
shape_of_oversampled_image: center_position +
int(shift_x_mod[1]) +
shape_of_oversampled_image + 1 + 1]
print('image_integer_offset shape: ' + str(image_integer_offset.shape))
image_integer_offset_lsst = lsst.afw.image.image.ImageD(image_integer_offset.astype('float64'))
oversampled_Image_LSST_apply_frac_offset = lsst.afw.math.offsetImage(
image_integer_offset_lsst, shift_x_mod[0], shift_y_mod[0], algorithmName='lanczos5', buffer=5)
single_primary_realization_oversampled = oversampled_Image_LSST_apply_frac_offset.array[1:-1, 1:-1]
assert single_primary_realization_oversampled.shape[0] == shape_of_sci_image * oversampling
single_primary_realization = resize(
single_primary_realization_oversampled, (shape_of_sci_image, shape_of_sci_image), ())
###################
# This part is skipped if there is only primary source in the image
# go through secondary loop if the flux ratio is not zero
# (TODO: if secondary too far outside the image, do not go through secondary)
if ratio_secondary != 0:
# overloading the definitions used in primary image
if np.modf(shape_of_oversampled_image / 2)[0] == 0.0:
# print('shape is an even number')
shift_x_mod = np.array(
[-(np.round(secondary_offset_axis_1) - secondary_offset_axis_1),
-np.round(secondary_offset_axis_1)])
shift_y_mod = np.array(
[-( | np.round(secondary_offset_axis_0) | numpy.round |
"""
This module provides a function to evaluate potential outliers in the aseg.stats
values.
"""
# ------------------------------------------------------------------------------
# subfunctions
def readAsegStats(path_aseg_stats):
"""
A function to read aseg.stats files.
"""
# read file
with open(path_aseg_stats) as stats_file:
aseg_stats = stats_file.read().splitlines()
# initialize
aseg = dict()
# read measures
for line in aseg_stats:
if '# Measure BrainSeg,' in line:
aseg.update({'BrainSeg' : float(line.split(',')[3])})
elif '# Measure BrainSegNotVent,' in line:
aseg.update({'BrainSegNotVent' : float(line.split(',')[3])})
elif '# Measure BrainSegNotVentSurf,' in line:
aseg.update({'BrainSegNotVentSurf' : float(line.split(',')[3])})
elif '# Measure VentricleChoroidVol,' in line:
aseg.update({'VentricleChoroidVol' : float(line.split(',')[3])})
elif '# Measure lhCortex,' in line:
aseg.update({'lhCortex' : float(line.split(',')[3])})
elif '# Measure rhCortex,' in line:
aseg.update({'rhCortex' : float(line.split(',')[3])})
elif '# Measure Cortex,' in line:
aseg.update({'Cortex' : float(line.split(',')[3])})
elif '# Measure lhCerebralWhiteMatter,' in line:
aseg.update({'lhCerebralWhiteMatter' : float(line.split(',')[3])})
elif '# Measure rhCerebralWhiteMatter,' in line:
aseg.update({'rhCerebralWhiteMatter' : float(line.split(',')[3])})
elif '# Measure CerebralWhiteMatter,' in line:
aseg.update({'CerebralWhiteMatter' : float(line.split(',')[3])})
elif '# Measure SubCortGray,' in line:
aseg.update({'SubCortGray' : float(line.split(',')[3])})
elif '# Measure TotalGray,' in line:
aseg.update({'TotalGray' : float(line.split(',')[3])})
elif '# Measure SupraTentorial,' in line:
aseg.update({'SupraTentorial' : float(line.split(',')[3])})
elif '# Measure SupraTentorialNotVent,' in line:
aseg.update({'SupraTentorialNotVent' : float(line.split(',')[3])})
elif '# Measure SupraTentorialNotVentVox,' in line:
aseg.update({'SupraTentorialNotVentVox' : float(line.split(',')[3])})
elif '# Measure Mask,' in line:
aseg.update({'Mask' : float(line.split(',')[3])})
elif '# Measure BrainSegVol-to-eTIV,' in line:
aseg.update({'BrainSegVol_to_eTIV' : float(line.split(',')[3])})
elif '# Measure MaskVol-to-eTIV,' in line:
aseg.update({'MaskVol_to_eTIV' : float(line.split(',')[3])})
elif '# Measure lhSurfaceHoles,' in line:
aseg.update({'lhSurfaceHoles' : float(line.split(',')[3])})
elif '# Measure rhSurfaceHoles,' in line:
aseg.update({'rhSurfaceHoles' : float(line.split(',')[3])})
elif '# Measure SurfaceHoles,' in line:
aseg.update({'SurfaceHoles' : float(line.split(',')[3])})
elif '# Measure EstimatedTotalIntraCranialVol,' in line:
aseg.update({'EstimatedTotalIntraCranialVol' : float(line.split(',')[3])})
elif 'Left-Lateral-Ventricle' in line:
aseg.update({'Left-Lateral-Ventricle' : float(line.split()[3])})
elif 'Left-Inf-Lat-Vent' in line:
aseg.update({'Left-Inf-Lat-Vent' : float(line.split()[3])})
elif 'Left-Cerebellum-White-Matter' in line:
aseg.update({'Left-Cerebellum-White-Matter' : float(line.split()[3])})
elif 'Left-Cerebellum-Cortex' in line:
aseg.update({'Left-Cerebellum-Cortex' : float(line.split()[3])})
elif 'Left-Thalamus-Proper' in line:
aseg.update({'Left-Thalamus-Proper' : float(line.split()[3])})
elif 'Left-Caudate' in line:
aseg.update({'Left-Caudate' : float(line.split()[3])})
elif 'Left-Putamen' in line:
aseg.update({'Left-Putamen' : float(line.split()[3])})
elif 'Left-Pallidum' in line:
aseg.update({'Left-Pallidum' : float(line.split()[3])})
elif '3rd-Ventricle' in line:
aseg.update({'3rd-Ventricle' : float(line.split()[3])})
elif '4th-Ventricle' in line:
aseg.update({'4th-Ventricle' : float(line.split()[3])})
elif 'Brain-Stem' in line:
aseg.update({'Brain-Stem' : float(line.split()[3])})
elif 'Left-Hippocampus' in line:
aseg.update({'Left-Hippocampus' : float(line.split()[3])})
elif 'Left-Amygdala' in line:
aseg.update({'Left-Amygdala' : float(line.split()[3])})
elif 'CSF' in line:
aseg.update({'CSF' : float(line.split()[3])})
elif 'Left-Accumbens-area' in line:
aseg.update({'Left-Accumbens-area' : float(line.split()[3])})
elif 'Left-VentralDC' in line:
aseg.update({'Left-VentralDC' : float(line.split()[3])})
elif 'Left-vessel' in line:
aseg.update({'Left-vessel' : float(line.split()[3])})
elif 'Left-choroid-plexus' in line:
aseg.update({'Left-choroid-plexus' : float(line.split()[3])})
elif 'Right-Lateral-Ventricle' in line:
aseg.update({'Right-Lateral-Ventricle' : float(line.split()[3])})
elif 'Right-Inf-Lat-Vent' in line:
aseg.update({'Right-Inf-Lat-Vent' : float(line.split()[3])})
elif 'Right-Cerebellum-White-Matter' in line:
aseg.update({'Right-Cerebellum-White-Matter' : float(line.split()[3])})
elif 'Right-Cerebellum-Cortex' in line:
aseg.update({'Right-Cerebellum-Cortex' : float(line.split()[3])})
elif 'Right-Thalamus-Proper' in line:
aseg.update({'Right-Thalamus-Proper' : float(line.split()[3])})
elif 'Right-Caudate' in line:
aseg.update({'Right-Caudate' : float(line.split()[3])})
elif 'Right-Putamen' in line:
aseg.update({'Right-Putamen' : float(line.split()[3])})
elif 'Right-Pallidum' in line:
aseg.update({'Right-Pallidum' : float(line.split()[3])})
elif 'Right-Hippocampus' in line:
aseg.update({'Right-Hippocampus' : float(line.split()[3])})
elif 'Right-Amygdala' in line:
aseg.update({'Right-Amygdala' : float(line.split()[3])})
elif 'Right-Accumbens-area' in line:
aseg.update({'Right-Accumbens-area' : float(line.split()[3])})
elif 'Right-VentralDC' in line:
aseg.update({'Right-VentralDC' : float(line.split()[3])})
elif 'Right-vessel' in line:
aseg.update({'Right-vessel' : float(line.split()[3])})
elif 'Right-choroid-plexus' in line:
aseg.update({'Right-choroid-plexus' : float(line.split()[3])})
elif '5th-Ventricle' in line:
aseg.update({'5th-Ventricle' : float(line.split()[3])})
elif 'WM-hypointensities' in line:
aseg.update({'WM-hypointensities' : float(line.split()[3])})
elif 'Left-WM-hypointensities' in line:
aseg.update({'Left-WM-hypointensities' : float(line.split()[3])})
elif 'Right-WM-hypointensities' in line:
aseg.update({'Right-WM-hypointensities' : float(line.split()[3])})
elif 'non-WM-hypointensities' in line:
aseg.update({'non-WM-hypointensities' : float(line.split()[3])})
elif 'Left-non-WM-hypointensities' in line:
aseg.update({'Left-non-WM-hypointensities' : float(line.split()[3])})
elif 'Right-non-WM-hypointensities' in line:
aseg.update({'Right-non-WM-hypointensities' : float(line.split()[3])})
elif 'Optic-Chiasm' in line:
aseg.update({'Optic-Chiasm' : float(line.split()[3])})
elif 'CC_Posterior' in line:
aseg.update({'CC_Posterior' : float(line.split()[3])})
elif 'CC_Mid_Posterior' in line:
aseg.update({'CC_Mid_Posterior' : float(line.split()[3])})
elif 'CC_Central' in line:
aseg.update({'CC_Central' : float(line.split()[3])})
elif 'CC_Mid_Anterior' in line:
aseg.update({'CC_Mid_Anterior' : float(line.split()[3])})
elif 'CC_Anterior' in line:
aseg.update({'CC_Anterior' : float(line.split()[3])})
# return
return aseg
# ------------------------------------------------------------------------------
# outlier table
def outlierTable():
"""
A function to provide normative values for Freesurfer segmentations and
parcellations.
"""
# define
outlierDict = dict([
('Left-Accumbens-area', dict([('lower' , 210.87844594754), ('upper', 718.01022026916)])),
('Right-Accumbens-area', dict([('lower' , 304.86134907845), ('upper', 751.63838456345)])),
('Left-Amygdala', dict([('lower' , 1179.73655974083), ('upper', 1935.09415214717)])),
('Right-Amygdala', dict([('lower' , 1161.54746836742), ('upper', 2002.14187676668)])),
('Brain-Stem', dict([('lower' , 18048.54263155760), ('upper', 25300.51090318110)])),
('Left-Caudate', dict([('lower' , 2702.73311142764), ('upper', 4380.54479618196)])),
('Right-Caudate', dict([('lower' , 2569.61140834210), ('upper', 4412.61035536070)])),
('Left-Hippocampus', dict([('lower' , 3432.26483953083), ('upper', 4934.43236139507)])),
('Right-Hippocampus', dict([('lower' , 3580.74371035841), ('upper', 5067.49668145829)])),
('Left-Pallidum', dict([('lower' , 935.47686324176), ('upper', 1849.42861796994)])),
('Right-Pallidum', dict([('lower' , 1078.14975428593), ('upper', 1864.08951102817)])),
('Left-Putamen', dict([('lower' , 3956.23134409153), ('upper', 6561.97642872937)])),
('Right-Putamen', dict([('lower' , 3768.88684356957), ('upper', 6142.52870810603)])),
('Left-Thalamus-Proper', dict([('lower' , 6483.36121320953), ('upper', 9489.46749012527)])),
('Right-Thalamus-Proper', dict([('lower' , 6065.70220487045), ('upper', 8346.88382091555)])),
('Left-VentralDC', dict([('lower' , 3182.42264293449), ('upper', 4495.77412707751)])),
('Right-VentralDC', dict([('lower' , 3143.88280953869), ('upper', 4407.63641978371)]))
])
# return
return outlierDict
# ------------------------------------------------------------------------------
# main function
def outlierDetection(subjects, subjects_dir, output_dir, outlierDict, min_no_subjects=10):
"""
A function to evaluate potential outliers in the aseg.stats values.
"""
# imports
import os
import csv
import numpy as np
import pandas as pd
from qatoolspython.outlierDetection import readAsegStats
# create a dictionary with all data from all subjects, and create a list of all available keys
aseg = dict()
all_aseg_keys = list()
for subject in subjects:
path_aseg_stats = os.path.join(subjects_dir, subject, "stats", "aseg.stats")
aseg_stats = readAsegStats(path_aseg_stats)
aseg.update({subject : aseg_stats})
all_aseg_keys.extend(list(aseg_stats.keys()))
all_aseg_keys = list(sorted(set(all_aseg_keys)))
# compare individual data against sample statistics (if more than min_no_subjects cases)
outlierSampleNonpar = dict()
outlierSampleParam = dict()
outlierSampleNonparNum = dict()
outlierSampleParamNum = dict()
if len(subjects) >= min_no_subjects:
# compute means, sd, medians, and quantiles based on sample
df = pd.DataFrame.from_dict(aseg).transpose()
iqr = np.percentile(df, 75, axis=0) - | np.percentile(df, 25, axis=0) | numpy.percentile |
from PyQt5 import QtWidgets, uic, QtCore, Qt
from PyQt5.QtWidgets import QAction, QMessageBox, QFileDialog, QDesktopWidget, QColorDialog, QFontDialog, QDialog, QTableWidgetItem, QVBoxLayout, QSplashScreen, QProgressBar
from PyQt5.QtGui import QIcon, QPixmap
import sys, os, time
from webbrowser import open_new_tab
import xlwt
import subprocess as sp
from plotting import *
from mode import *
from recorder import *
from read_outfiles import *
from utilities import *
import matplotlib as mpl
from RS import*
import numpy as np
import pandas as pd
from pyvistaqt import QtInteractor
main=None
class FeViewMain(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(FeViewMain, self).__init__(parent)
# load MainWindows.ui from Qt Designer
uic.loadUi('UI\MainWindows.ui', self)
# add the pyvista interactor object
vlayout = QVBoxLayout()
self.p=self.plot_widget = QtInteractor(self.frame)
self.p.show_axes()
vlayout.addWidget(self.plot_widget.interactor)
self.frame.setLayout(vlayout)
self.setCentralWidget(self.frame)
vlayout.setContentsMargins(0, 0, 0, 0)
# add some tool bar
self.btn_tool_openTCL = QAction(QIcon('UI/icon/Open.png'),'Open TCL File', self)
self.btn_tool_editTCL = QAction(QIcon('UI/icon/edit.png'),'Edit TCL File with CypressEditor', self)
self.btn_tool_run_OS = QAction(QIcon('UI/icon/run.png'),'run TCL file with OpenSees', self)
self.btn_iso = QAction(QIcon('UI/icon/iso.png'),'View isometric', self)
self.btn_iso.setCheckable(True) # toolbar button checkable
self.btn_xy_zpluss = QAction(QIcon('UI/icon/xy_zpluss.png'), 'View xy_zpluss', self)
self.btn_xy_zpluss.setCheckable(True)
self.btn_xy_zminus = QAction(QIcon('UI/icon/xy_zminus.png'), 'View xy_zminus', self)
self.btn_xy_zminus.setCheckable(True)
self.btn_xz_ypluss = QAction(QIcon('UI/icon/xz_ypluss.png'), 'View xz_ypluss', self)
self.btn_xz_ypluss.setCheckable(True)
self.btn_xz_yminus = QAction(QIcon('UI/icon/xz_yminus.png'), 'View xz_yminus', self)
self.btn_xz_yminus.setCheckable(True)
self.btn_yz_xpluss = QAction(QIcon('UI/icon/yz_xpluss.png'), 'View yz_xpluss', self)
self.btn_yz_xpluss.setCheckable(True)
self.btn_yz_xminus = QAction(QIcon('UI/icon/yz_xminus.png'), 'View yz_xminus', self)
self.btn_yz_xminus.setCheckable(True)
self.btn_node_label = QAction(QIcon('UI/icon/nl.png'), 'View Node Label', self)
self.btn_node_label.setCheckable(True)
self.btn_node_cord = QAction(QIcon('UI/icon/nc.png'), 'View Node Co-ordinate', self)
self.btn_node_cord.setCheckable(True)
self.btn_load = QAction(QIcon('UI/icon/load.png'), 'View Point Load', self)
self.btn_load.setCheckable(True)
self.btn_color_plot_background= QAction(QIcon('UI/icon/color_plot_background.png'), 'Change Plot Background Color', self)
self.btn_color_gui = QAction(QIcon('UI/icon/color_gui.png'), 'Change Theme Color', self)
self.btn_font = QAction(QIcon('UI/icon/font.png'), 'Change Font Style', self)
self.btn_plot_image = QAction(QIcon('UI/icon/plot_image.png'), 'Save Plot as Image', self)
self.btn_plot_image_wb = QAction(QIcon('UI/icon/plot_image_wb.png'), 'Save Plot as Image with White Background', self)
self.btn_calc = QAction(QIcon('UI/icon/calculator.png'), 'Calculator', self)
self.btn_minumize = QAction(QIcon('UI/icon/minimize.png'), 'Mimimize the Window', self)
self.btn_maximize = QAction(QIcon('UI/icon/maximize.png'), 'Maximize the Window', self)
self.btn_full_s = QAction(QIcon('UI/icon/full_s.png'), 'Fullscreen', self)
self.btn_center = QAction(QIcon('UI/icon/center.png'), 'Center', self)
self.btn_min_s = QAction(QIcon('UI/icon/min.png'), 'Minimum Window Size', self)
self.btn_max_s = QAction(QIcon('UI/icon/max.png'), 'Maximum Window Size', self)
self.btn_restore = QAction(QIcon('UI/icon/rest_w.png'), 'Restore Window', self)
self.btn_help = QAction(QIcon('UI/icon/help.png'), 'Help', self)
self.btn_about = QAction(QIcon('UI/icon/info.png'), 'Info', self)
self.btn_close = QAction(QIcon('UI/icon/close.png'), 'Exir', self)
toolbar = self.addToolBar('Exit')
toolbar.addAction(self.btn_tool_openTCL)
toolbar.addAction(self.btn_tool_editTCL)
toolbar.addAction(self.btn_tool_run_OS)
toolbar.addSeparator()
toolbar.addAction(self.btn_iso)
toolbar.addAction(self.btn_xy_zpluss)
toolbar.addAction(self.btn_xy_zminus)
toolbar.addAction(self.btn_xz_ypluss)
toolbar.addAction(self.btn_xz_yminus)
toolbar.addAction(self.btn_yz_xpluss)
toolbar.addAction(self.btn_yz_xminus)
toolbar.addSeparator()# add separator
toolbar.addAction(self.btn_node_label)
toolbar.addAction(self.btn_node_cord)
toolbar.addAction(self.btn_load)
toolbar.addSeparator()
toolbar.addAction(self.btn_color_plot_background)
toolbar.addAction(self.btn_color_gui)
toolbar.addAction(self.btn_font)
toolbar.addSeparator()
toolbar.addAction(self.btn_plot_image)
toolbar.addAction(self.btn_plot_image_wb)
toolbar.addAction(self.btn_calc)
toolbar.addSeparator()
toolbar.addAction(self.btn_minumize)
toolbar.addAction(self.btn_maximize)
toolbar.addAction(self.btn_full_s)
toolbar.addAction(self.btn_center)
toolbar.addAction(self.btn_min_s)
toolbar.addAction(self.btn_max_s)
toolbar.addAction(self.btn_restore)
toolbar.addSeparator()
toolbar.addAction(self.btn_help)
toolbar.addAction(self.btn_about)
toolbar.addAction(self.btn_close)
toolbar.addSeparator()
# margin & layout setting for toolbar
toolbar.setContentsMargins(0, 0, 0, 0)
toolbar.layout().setSpacing(0)
toolbar.layout().setContentsMargins(0, 0, 0, 0)
self.btn_tool_openTCL.triggered.connect(self.openTCL) # call function for 'Open TCL file' toolbar button
self.actionOpen.triggered.connect(self.openTCL) # call function for 'Open TCL file' main manu button
self.btn_apply_static.clicked.connect(self.DispStatic)
self.actionApply_Static.triggered.connect(self.DispStatic)
self.btn_apply_modal.clicked.connect(self.DispModal)
self.actionApply_Modal.triggered.connect(self.DispModal)
self.btn_apply_dynamic.clicked.connect(self.DispDynamic)
self.Apply_Dyanamic.triggered.connect(self.DispDynamic)
self.btn_response_static.clicked.connect(self.res_static)
self.actionShow_Response.triggered.connect(self.res_static)
self.btn_response_dynamic.clicked.connect(self.res_dynamic)
self.actionShow_Response_dynamic.triggered.connect(self.res_dynamic)
self.btn_tool_editTCL.triggered.connect(self.edit_TCL)
self.actionEdit.triggered.connect(self.edit_TCL)
self.btn_tool_run_OS.triggered.connect(self.runOS)
self.actionRun_OpenSees.triggered.connect(self.runOS)
self.btn_iso.triggered.connect(self.iso)
self.btn_xy_zpluss.triggered.connect(self.xy_zpluss)
self.btn_xy_zminus.triggered.connect(self.xy_zminus)
self.btn_xz_ypluss.triggered.connect(self.xz_ypluss)
self.btn_xz_yminus.triggered.connect(self.xz_yminus)
self.btn_yz_xpluss.triggered.connect(self.yz_xpluss)
self.btn_yz_xminus.triggered.connect(self.yz_xminus)
self.actionFeView.triggered.connect(self.about_feview)
self.btn_about.triggered.connect(self.about_feview)
self.actionPlot_Background_Color.triggered.connect(self.Plot_Background_Color)
self.btn_color_plot_background.triggered.connect(self.Plot_Background_Color)
self.actionGUI_Font.triggered.connect(self.GUI_Font)
self.btn_font.triggered.connect(self.GUI_Font)
self.actionTheme_Color.triggered.connect(self.gui_color)
self.btn_color_gui.triggered.connect(self.gui_color)
self.btn_plot_image.triggered.connect(self.savePlot)
self.actionWith_background.triggered.connect(self.savePlot)
self.btn_plot_image_wb.triggered.connect(self.savePlot_wb)
self.actionWhite_Background.triggered.connect(self.savePlot_wb)
self.btn_calc.triggered.connect(self.calculator)
self.actionMinimize.triggered.connect(lambda: self.showMinimized())
self.btn_minumize.triggered.connect(lambda: self.showMinimized())
self.actionMaximize.triggered.connect(lambda: self.showMaximized())
self.btn_maximize.triggered.connect(lambda: self.showMaximized())
self.actionFull_Screen.triggered.connect(lambda: self.showFullScreen())
self.btn_full_s.triggered.connect(lambda: self.showFullScreen())
self.actionCenter.triggered.connect(lambda: self.center())
self.btn_center.triggered.connect(lambda: self.center())
self.actionMinimum_Size.triggered.connect(lambda: self.resize(self.minimumSize()))
self.btn_min_s.triggered.connect(lambda: self.resize(self.minimumSize()))
self.actionMaximum_Size.triggered.connect(lambda: self.resize(self.maximumSize()))
self.btn_max_s.triggered.connect(lambda: self.resize(self.maximumSize()))
self.actionRestore.triggered.connect(lambda: self.showNormal())
self.btn_restore.triggered.connect(lambda: self.showNormal())
self.actionSSL.triggered.connect(lambda: open_new_tab('Help\FeView_Help.chm'))
self.btn_help.triggered.connect(lambda: open_new_tab('Help\FeView_Help.chm'))
self.actionOpenSees.triggered.connect(lambda: open_new_tab('https://opensees.berkeley.edu'))
self.actionSSL_Website.triggered.connect(lambda: open_new_tab('http://www.kim2kie.com/3_ach/SSL_Software.php'))
self.actionFeView_Website.triggered.connect(lambda: open_new_tab('http://www.kim2kie.com/3_ach/FeView/FeView.php'))
self.btn_node_label.triggered.connect(self.nodelebels)
self.actionNode_Label.triggered.connect(self.nodelebels)
self.btn_node_cord.triggered.connect(self.nodecoordinates)
self.actionNode_Coordinate.triggered.connect(self.nodecoordinates)
self.btn_load.triggered.connect(self.pointload_show)
self.actionLoad.triggered.connect(self.pointload_show)
self.actionExit.triggered.connect(self.close)
self.btn_close.triggered.connect(self.close)
self.actionMesh_Fiew.triggered.connect(self.mesh_view_model)
self.actionSmooth_View.triggered.connect(self.smoth_view_model)
self.actionWireframe.triggered.connect(self.wiremesh_model)
self.actionMesh_View_2.triggered.connect(self.mesh_view_model_deform)
self.actionSmooth_View_2.triggered.connect(self.smoth_view_model_deform)
self.actionMesh_View_Wiremesh_undeform.triggered.connect(self.mesh_wiremesh_model_deform)
self.actionSmooth_View_Wiremesh_undeform.triggered.connect(self.smooth_wiremesh_model_deform)
self.btn_datatable_static.clicked.connect(self.data_table_static)
self.actionData_Table.triggered.connect(self.data_table_static)
self.btn_datatable_modal.clicked.connect(self.data_table_modal)
self.actionData_Table_modal.triggered.connect(self.data_table_modal)
self.btn_datatable_dynamic.clicked.connect(self.data_table_dynamic)
self.actionData_Table_dynamic.triggered.connect(self.data_table_dynamic)
self.actionView_load.triggered.connect(self.load_setting_arrow)
self.reportEdit.keyReleaseEvent = self.handleKeyRelease
self.addInfoText("Opend tcl file")
self.prgb = QProgressBar(self)
self.statusBar().addPermanentWidget(self.prgb)
self.dialogs = list()
def progress(self, value, newLines):
#self.te.append('\n'.join(newLines))
self.prgb.setValue(value)
def addInfoText(self, text):
"""Adds info text"""
return self.reportEdit.insertPlainText("\n >>"+str(text))
def handleKeyRelease(self, event):
"""Handles key inputs to report box"""
if(event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter):
self.interpretUserInput(self.reportEdit.toPlainText())
# function to unchecked another model display style setting except 'mesh view'
def mesh_view_model(self):
self.actionSmooth_View.setChecked(False)
self.actionWireframe.setChecked(False)
# function to unchecked another model display style setting except 'smooth view'
def smoth_view_model(self):
self.actionMesh_Fiew.setChecked(False)
self.actionWireframe.setChecked(False)
# function to unchecked another model display style setting except 'wiremesh view'
def wiremesh_model(self):
self.actionMesh_Fiew.setChecked(False)
self.actionSmooth_View.setChecked(False)
# function to unchecked another deform model display style setting except 'mesh view'
def mesh_view_model_deform(self):
self.actionSmooth_View_2.setChecked(False)
self.actionMesh_View_Wiremesh_undeform.setChecked(False)
self.actionSmooth_View_Wiremesh_undeform.setChecked(False)
# function to unchecked another deform model display style setting except 'smooth view'
def smoth_view_model_deform(self):
self.actionMesh_View_2.setChecked(False)
self.actionMesh_View_Wiremesh_undeform.setChecked(False)
self.actionSmooth_View_Wiremesh_undeform.setChecked(False)
# function to unchecked another deform model display style setting except 'mesh view+wiremesh'
def mesh_wiremesh_model_deform(self):
self.actionMesh_View_2.setChecked(False)
self.actionSmooth_View_2.setChecked(False)
self.actionSmooth_View_Wiremesh_undeform.setChecked(False)
# function to unchecked another deform model display style setting except 'smooth view+wiremesh'
def smooth_wiremesh_model_deform(self):
self.actionMesh_View_2.setChecked(False)
self.actionSmooth_View_2.setChecked(False)
self.actionMesh_View_Wiremesh_undeform.setChecked(False)
def openTCL(self):
try:
global numModes #set numModes as global variable
# create file dialog function to browse file path
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
self.fileName, _ = QFileDialog.getOpenFileName(self, "OpenSees File", "","OpenSees File (*.tcl)", options=options)
self.file_path, self.file_name = os.path.split(self.fileName)
[filename0, sep, ext] = self.file_name.partition('.')
# make path for output files
self.result_directory = os.path.join(self.file_path, r'out_files_%s' % filename0)
if not os.path.exists(self.result_directory):
# create directory for output files
os.mkdir(self.result_directory)
# clear all actors from plot interface
self.prgb.setMaximum(len(node(self.fileName)))
self.p.clear()
if self.actionSmooth_View.isChecked() == True:
# call plotter considering smooth view
plotter(self.p, self.fileName, 'smooth_view',NodeCoords(self.fileName), None, None)
elif self.actionWireframe.isChecked() == True:
# call plotter considering wiremesh view
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName),None, None)
elif self.actionMesh_Fiew.isChecked() == True:
# call plotter considering mesh view
plotter(self.p, self.fileName, 'mesh_view',NodeCoords(self.fileName), None, None)
#plotter_rigiddiaphram(self.p, self.fileName, NodeCoords(self.fileName))
if (ndm_v(self.fileName))==2:
self.p.view_xy() # initial setting for 2d interface considering x-y axis view
else:
self.p.view_isometric() # initial setting for 3d interface considering isometric view
# read number of modes as "numModes"
numModes=modeNumber(self.fileName)
# clear previous item from "Mode Num." Combobox
self.cb_numNodes.clear()
if numModes.size>0:
for i in range(int(numModes)):
# add item to "Mode Num." combobox as Mode_1...
self.cb_numNodes.addItem('Mode_'+str(i+1))
self.recorder_disp, self.recorder_rot, self.recorder_force, self.recorder_moment, self.recorder_accel, self.recorder_vel = recorder_types(
self.fileName)
if self.recorder_disp==1:
# add item to "Component" combobox for displacement in static analysis result
self.cb_node_contour_static.addItem('Displacement, Ux',)
self.cb_node_contour_static.addItem('Displacement, Uy')
self.cb_node_contour_static.addItem('Displacement, Uz')
self.cb_node_contour_static.addItem('Displacement, Uxyz')
if self.recorder_rot==1:
# add item to "Component" combobox for rotation in static analysis result
self.cb_node_contour_static.addItem('Rotation, Rx')
self.cb_node_contour_static.addItem('Rotation, Ry')
self.cb_node_contour_static.addItem('Rotation, Rz')
self.cb_node_contour_static.addItem('Rotation, Rxyz')
if self.recorder_force==1:
# add item to "Component" combobox for force reaction in static analysis result
self.cb_node_contour_static.addItem('Force Reaction, RFx')
self.cb_node_contour_static.addItem('Force Reaction, RFy')
self.cb_node_contour_static.addItem('Force Reaction, RFz')
self.cb_node_contour_static.addItem('Force Reaction, RFxyz')
if self.recorder_moment==1:
# add item to "Component" combobox for moment reaction in static analysis result
self.cb_node_contour_static.addItem('Moment Reaction, RMx')
self.cb_node_contour_static.addItem('Moment Reaction, RMy')
self.cb_node_contour_static.addItem('Moment Reaction, RMz')
self.cb_node_contour_static.addItem('Moment Reaction, RMxyz')
if self.recorder_disp == 1:
# add item to "Component" combobox for displacement in dynamic analysis result
self.cb_node_contour_dynamic.addItem('Displacement, Ux')
self.cb_node_contour_dynamic.addItem('Displacement, Uy')
self.cb_node_contour_dynamic.addItem('Displacement, Uz')
self.cb_node_contour_dynamic.addItem('Displacement, Uxyz')
if self.recorder_rot == 1:
# add item to "Component" combobox for rotation in dynamic analysis result
self.cb_node_contour_dynamic.addItem('Rotation, Rx')
self.cb_node_contour_dynamic.addItem('Rotation, Ry')
self.cb_node_contour_dynamic.addItem('Rotation, Rz')
self.cb_node_contour_dynamic.addItem('Rotation, Rxyz')
if self.recorder_force == 1:
# add item to "Component" combobox for force reaction in dynamic analysis result
self.cb_node_contour_dynamic.addItem('Force Reaction, RFx')
self.cb_node_contour_dynamic.addItem('Force Reaction, RFy')
self.cb_node_contour_dynamic.addItem('Force Reaction, RFz')
self.cb_node_contour_dynamic.addItem('Force Reaction, RFxyz')
if self.recorder_moment == 1:
# add item to "Component" combobox for moment reaction in dynamic analysis result
self.cb_node_contour_dynamic.addItem('Moment Reaction, RMx')
self.cb_node_contour_dynamic.addItem('Moment Reaction, RMy')
self.cb_node_contour_dynamic.addItem('Moment Reaction, RMz')
self.cb_node_contour_dynamic.addItem('Moment Reaction, RMxyz')
if self.recorder_accel == 1:
# add item to "Component" combobox for acceleration in dynamic analysis result
self.cb_node_contour_dynamic.addItem('Acceleration, Ax')
self.cb_node_contour_dynamic.addItem('Acceleration, Ay')
self.cb_node_contour_dynamic.addItem('Acceleration, Az')
self.cb_node_contour_dynamic.addItem('Acceleration, Axyz')
if self.recorder_vel == 1:
# add item to "Component" combobox for velocity in dynamic analysis result
self.cb_node_contour_dynamic.addItem('Velocity, Vx')
self.cb_node_contour_dynamic.addItem('Velocity, Vy')
self.cb_node_contour_dynamic.addItem('Velocity, Vz')
self.cb_node_contour_dynamic.addItem('Velocity, Vxyz')
self.setWindowTitle(
# windows title to show file path and filename
"{}[*] - {}".format((self.fileName + ' ['+filename0)+']', 'FeView'))
try:
# show total node and element in status bar
self.statusBar().showMessage('Total Node : '+str(len(node(self.fileName)))+'; Total Element :'+total_element(self.fileName))
except:
QMessageBox.critical(self, "Error", "No node or element found")
if self.actionView_load.isChecked()==True:
# show point load
point_load(self.fileName,self.p,load_arrow_size, load_font_size,load_arrow_color,load_font_color)
if self.actionView_Support.isChecked() == True:
# show support
support(self.fileName,self.p)
self.addInfoText("Successfully loaded file \n" + self.fileName)
except:
QMessageBox.critical(self, "Error", "Please check TCL file")
def DispStatic(self):
try:
self.btn_apply_modal.setChecked(False)
self.btn_apply_dynamic.setChecked(False)
scalefactor = float(self.tb_sef_scale_factor.text()) # scale factor for diformation (static, modal and dynamic analysis)
if self.recorder_disp==1:
# read output files for displacement
self.outdispFile = OpenSeesOutputRead(os.path.join(self.result_directory,'Node_displacements.out'))
if step_static(self.fileName).size>0:
# number of steps for static (if dynamic/transient analysis also included)
self.step_statics=int(step_static(self.fileName))
else:
# number of steps for only static analysis
self.step_statics = len(self.outdispFile[:, 1])
self.step_dynamic = len(self.outdispFile[:, 1]) - self.step_statics # steps for transient analysis
if self.recorder_disp == 1:
# read output files for displacement
self.deformation=(out_response((os.path.join(self.result_directory,'Node_displacements.out')), self.step_statics, ndm_v(self.fileName),'all'))
self.dispNodeCoords = NodeCoords(self.fileName) + (scalefactor * self.deformation)
if self.recorder_rot == 1:
# read output files for rotation
self.rotation=(out_response((os.path.join(self.result_directory,'Node_rotations.out')), self.step_statics, ndm_v(self.fileName),'rotation_moment'))
self.outrotFile = OpenSeesOutputRead(os.path.join(self.result_directory, 'Node_rotations.out'))
if self.recorder_force == 1:
# read output files for force reaction
self.forcereaction=(out_response((os.path.join(self.result_directory,'Node_forceReactions.out')), self.step_statics, ndm_v(self.fileName),'all'))
self.outfreactFile = OpenSeesOutputRead(os.path.join(self.result_directory, 'Node_forceReactions.out'))
if self.recorder_moment == 1:
# read output files for moment reaction
self.momentreaction = (out_response((os.path.join(self.result_directory, 'Node_momentReactions.out')), self.step_statics,ndm_v(self.fileName),'rotation_moment'))
self.outmreactFile = OpenSeesOutputRead(os.path.join(self.result_directory, 'Node_momentReactions.out'))
self.p.clear()
node_contour_type = (self.cb_node_contour_static.currentText()) # get current text from "Component" combobox (Static result)
if self.actionMesh_View_2.isChecked() == True:
if node_contour_type=='Displacement, Ux':
scalars = self.deformation[:, 0]
d_max_x= np.max(np.abs(self.deformation[:, 0]))
stitle = 'Displacement, Ux (Max. = '+str(d_max_x)+')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Displacement, Uy':
scalars = self.deformation[:, 1]
d_max_y = np.max(np.abs(self.deformation[:, 1]))
stitle = 'Displacement, Uy (Max. = ' + str(d_max_y) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Displacement, Uz':
scalars = self.deformation[:, 2]
d_max_z = np.max(np.abs(self.deformation[:, 2]))
stitle = 'Displacement, Uz (Max. = ' + str(d_max_z) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Displacement, Uxyz':
scalars = self.deformation[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
d_max_xyz = np.max(np.abs(scalars))
stitle = 'Displacement, Uxyz (Max. = ' + str(d_max_xyz) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Rotation, Rx':
scalars = self.rotation[:, 0]
stitle = 'Rotation, Rx (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Rotation, Ry':
scalars = self.rotation[:, 1]
stitle = 'Rotation, Ry (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Rotation, Rz':
scalars = self.rotation[:, 2]
stitle = 'Rotation, Rz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Rotation, Rxyz':
scalars = self.rotation[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Rotation, Rxyz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Force Reaction, RFx':
scalars = self.forcereaction[:, 0]
stitle = 'Force Reaction, RFx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Force Reaction, RFy':
scalars = self.forcereaction[:, 1]
stitle = 'Force Reaction, RFy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Force Reaction, RFz':
scalars = self.forcereaction[:, 2]
stitle = 'Force Reaction, RFz (Max. = ' + str(np.max( | np.abs(scalars) | numpy.abs |
import numpy as np
import ipdb
import os,sys,re
from nltk import word_tokenize
from six.moves import cPickle as pickle
from sklearn.cross_validation import train_test_split
from collections import Counter
from MARCO.POMDP.MarkovLoc_Grid import getMapGrid
from MARCO.POMDP.MarkovLoc_Jelly import getMapJelly
from MARCO.POMDP.MarkovLoc_L import getMapL
from MARCO.Robot.Meanings import Wall,End,Empty
#######################################################################################################
data_dir = 'data/'
SEED = 42
np.random.seed(SEED)
path_patt = re.compile(r'\d+,\s*\d+,\s*[-]*\d+')
# actions
FW = 0
L = 1
R = 2
STOP = 3
PAD_decode = 4
actions_str = [
"FORWARD",
"LEFT",
"RIGHT",
"STOP",
"<PAD>",
]
num_actions = len(actions_str)
forward_step = 1 # 1 cell
rotation_step = 90 # degrees
# Special indicator for sequence padding
EOS = '<EOS>'
PAD = '<PAD>'
RARE = '<RARE>'
#######################################################################################################
class Sample(object):
def __init__(self,_inst=[],_actions=[],path=[],_id='',sP=-1,eP=-1,_map_name=''):
self._instructions = _inst
self._actions = _actions
self._id = _id
# path: sequence of position states (x,y,th)
self._path = path
# start and end position (id_localization) | global (multisentence) start and end
self._startPos = sP
self._endPos = eP
self._map_name = _map_name
def __repr__(self):
res = ("{ instructions:\n"+
" "+str(self._instructions) + '\n'
" actions:\n"+
" "+str(verbose_actions(self._actions)) + '\n'
" path:\n"+
" "+str(self._path)+' }')
return res
class MapData:
def __init__(self,_map_name,_map):
# can be Grid, Jelly or L
self.name = _map_name.lower()
# map object
self.map = _map
# format: [Sample(x,y,sample_id)]
self.samples = []
def add_sample(self,_instructions,_actions,_path,_id,sP,eP,map_name):
# add new sample (nav_instructions,actions,sample_id)
# input: instructions, actions, path, sample_id, start_pos, end_pos, map_name
self.samples.append( Sample(_instructions,_actions,_path,_id,sP,eP,map_name) )
def get_multi_sentence_samples(self):
# return: [[Sample], [Sample]]
ms_sample_list = []
prev_id = self.samples[0]._id
n_sam = len(self.samples)
ms_sample = []
for i in xrange(n_sam):
if self.samples[i]._id != prev_id:
ms_sample_list.append(ms_sample)
ms_sample = [self.samples[i]]
else:
ms_sample.append(self.samples[i])
prev_id = self.samples[i]._id
# add last batch
ms_sample_list.append(ms_sample)
return ms_sample_list
def verbose_actions(actions):
#print string command for each action
return [actions_str[act_id] for act_id in actions]
def get_actions_and_path(path_text,_map):
"""
Extract action and path seq from raw string in data (FW(x,y,th);L(x,y,th)...)
"""
list_pre_act = path_text.split(';')
n_act = len(list_pre_act)
actions = []
path = []
for i in xrange(n_act):
x,y,th = -1,-1,-1
id_act = -1
if i==n_act-1:
str_action = list_pre_act[i].strip('(').strip(')').split(',')
x,y,th = [int(comp.strip()) for comp in str_action]
id_act = STOP
else:
prx = list_pre_act[i].find('(')
id_act = actions_str.index(list_pre_act[i][:prx])
x,y,th = [int(comp.strip()) for comp in list_pre_act[i][prx+1:-1].split(',')]
pose = _map.platdir2orient(th)
xg,yg = _map.locations[ _map.plat2place(x,y) ]
if xg < 1 or yg < 1:
print("Map: ",_map.name)
print(" xp,yp: ", x,y)
print(" xg,yg: ", xg,yg)
print("="*30)
ipdb.set_trace()
path.append( (xg,yg,pose) )
actions.append(id_act)
return actions,path
"""
Read single and multiple sentence instructions
return: {map_name : MapData object [with data in 'samples' attribute]}
"""
def get_data():
map_data = {
'grid' : MapData("grid" ,getMapGrid()),
'jelly' : MapData("jelly",getMapJelly()),
'l' : MapData("l",getMapL())
}
for map_name, data_obj in map_data.items():
filename = map_name + '.settrc'
sample_id = ''
flag_toggle = False
toggle = 0
actions = path = tokens = []
start_pos = end_pos = -1
for line in open( os.path.join(data_dir,filename) ):
line=line.strip("\n")
if line=='':
#ipdb.set_trace()
# reset variables
flag_toggle = False
toggle = 0
actions = path = tokens = []
start_pos = end_pos = -1
sample_id = ''
continue
if line.startswith("Cleaned-"):
prex = "Cleaned-"
sample_id = line[len(prex):]
if line.find('map=')!=-1:
# ignore line: y=... map=... x=...
flag_toggle=True
temp = line.split('\t')
start_pos = int(temp[0][2:]) # y=...
end_pos = int(temp[-1][2:]) # x=...
continue
if flag_toggle:
if toggle==0:
# read instructions
tokens = word_tokenize(line)
else:
# read actions and path
actions,path = get_actions_and_path(line,data_obj.map)
# save new single-sentence sample
data_obj.add_sample(tokens, actions, path, sample_id, start_pos, end_pos, map_name)
# reset variables
actions = path = tokens = []
toggle = (toggle+1)%2
#END-IF-TOGGLE
#END-FOR-READ-FILE
#END-FOR-MAPS
return map_data
##########################################################################################
##########################################################################################
class Fold:
def __init__(self,train_set,val_set,test_set,test_multi_set,vocab):
self.train_data = train_set
self.valid_data = val_set
self.test_single_data = test_set
self.test_multi_data = test_multi_set
self.vocabulary = vocab
self.vocabulary_size = len(vocab)
"""
Shuffles and splits data in train.val, and test sets for each fold conf
Each fold is one-map-out conf with (train:0.9,val:0.1)
return: [fold0,fold1,fold2]
"""
def get_folds_vDev(dir='data/', val=0.1, force=False):
pickle_file = 'folds_vDev.pickle'
filename = os.path.join(dir,pickle_file)
folds = []
if force or not os.path.exists(filename):
# Make pickle object
dataByMap = get_data()
map_names = dataByMap.keys()
n_names = len(map_names)
# Iteration over folds
for i in range(n_names):
# reset arrays
train_set = []
valid_set = []
complete_set = [] # for universal vocab
#
test_single_set = dataByMap[map_names[i]].samples
test_multi_set = dataByMap[map_names[i]].get_multi_sentence_samples()
for j in range(n_names):
if j != i:
# shuffle data before splitting
data = np.array(dataByMap[map_names[j]].samples) # shuffle in separate array, preserver order for multi_sentence building
| np.random.shuffle(data) | numpy.random.shuffle |
import warnings
import pytest
import numpy as np
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from ADPYNE.AutoDiff import AutoDiff, vectorize
import ADPYNE.elemFunctions as ef
# helper function tests
def test_convertNonArray_array():
AD = AutoDiff(np.array([[1,2]]),1)
assert np.all(np.equal(AD.val, np.array([[1,2]])))
def test_convertNonArray_num():
AD = AutoDiff(1,1)
assert np.all(np.equal(AD.val, np.array([[1]])))
def test_calcJacobian_array():
AD = AutoDiff(1,2)
assert np.all(np.equal(AD.jacobian, np.array([[1]])))
def test_calcJacobian_array_withJ():
AD = AutoDiff(1,1,1,0,np.array([[1]]))
assert np.all(np.equal(AD.jacobian, np.array([[1]])))
def test_calcJacobian_vector():
AD = AutoDiff(4, np.array([[2, 1]]).T, n=2, k=1)
assert np.all(np.equal(AD.jacobian, np.array([[1, 0]])))
AD = AutoDiff(3, np.array([[1, 2]]).T, n=2, k=2)
assert np.all(np.equal(AD.jacobian, np.array([[0, 1]])))
def test_calcDerivative():
AD = AutoDiff(4, 2, n=4, k=3)
assert np.all(np.equal(AD.der, np.array([[0, 0, 2, 0]])))
# addition tests
def test_add_ad_results():
# single input cases
# positive numbers
x = AutoDiff(5, 2)
f = x + x
assert f.val == 10
assert f.der == 4
assert f.jacobian == 2
# negative numbers
y = AutoDiff(-5, 2)
f = y + y
assert f.val == -10
assert f.der == 4
assert f.jacobian == 2
def test_add_vector_results():
x = AutoDiff(np.array([[3],[1]]), np.array([[2, 1]]).T, 2, 1)
y = AutoDiff(np.array([[2],[-3]]), | np.array([[3, 2]]) | numpy.array |
#!/usr/bin/env python3
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# title :data/timeseries/cognitive_tasks/cognitive_data.py
# author :be
# contact :<EMAIL>
# created :29/10/2019
# version :1.0
# python_version :3.7
"""
Set of cognitive tasks
^^^^^^^^^^^^^^^^^^^^^^
A data handler for cognitive tasks as implemented in Masse et al (PNAS). The
user can construct individual datasets with this data handler and use each of
these datasets to train a model in a continual leraning setting.
"""
import numpy as np
from torch import from_numpy
# from Masse et al. code base, needed for task generation
import hypnettorch.data.timeseries.cognitive_tasks.stimulus as stim_masse
import hypnettorch.data.timeseries.cognitive_tasks.parameters as params_masse
from hypnettorch.data.dataset import Dataset
# TODO Use `SequentialDataset` as baseclass.
class CognitiveTasks(Dataset):
"""An instance of this class shall represent a one of the 20 cognitive
tasks.
"""
def __init__(self, task_id=0, num_train=80, num_test=20, num_val=None,
rstate=None):
"""Generate a new dataset.
We use the MultiStimulus class from Masse el al. to genereate
the inputs and outputs of different cognitive tasks in accordance with
the data handling structures of the hnet code base.
Note that masks (part of the Masse et al. trial generator) will be
handled independently of this data handler.
Args:
num_train (int): Number of training samples.
num_test (int): Number of test samples.
num_val (optional): Number of validation samples.
rstate: If ``None``, the current random state of numpy is used to
generate the data.
"""
super().__init__()
# set random state
if rstate is not None:
self._rstate = rstate
else:
self._rstate = np.random
# TODO: generate task library and load train / test data instead of
# generating them for every call. Keeping this version as a quick fix
# for now.
# get train and test data
train_x, train_y = self._generate_trial_samples(num_train,task_id)
test_x, test_y = self._generate_trial_samples(num_test,task_id)
# Create validation data if requested.
if num_val is not None:
val_x, val_y = self._generate_trial_samples(num_val,task_id)
in_data = np.vstack([train_x, test_x, val_x])
out_data = np.vstack([train_y, test_y, val_y])
else:
in_data = np.vstack([train_x, test_x])
out_data = np.vstack([train_y, test_y])
# Specify internal data structure.
self._data['classification'] = True
self._data['sequence'] = True
self._data['in_shape'] = [68]
self._data['out_shape'] = [9]
self._data['is_one_hot'] = True
self._data['num_classes'] = 9
self._data['task_id'] = task_id
self._data['in_data'] = in_data
self._data['out_data'] = out_data
self._data['train_inds'] = np.arange(num_train)
self._data['test_inds'] = np.arange(num_train, num_train + num_test)
if num_val is not None:
n_start = num_train + num_test
self._data['val_inds'] = np.arange(n_start, n_start + num_val)
def _generate_trial_samples(self,n_samples,task_id):
"""Generate a certain number of trials
Args:
n_samples
task_id
Returns:
(tuple): Tuple containing:
- **x**: Matrix of trial inputs of shape
``[batch_size, in_size*time_steps]``.
- **y**: Matrix of trial targets of shape
``[batch_size, in_size*time_steps]``.
"""
# update batch_size in their parameter dict to get desired number of
# trials for training, then create stim object
params_masse.update_parameters({'batch_size': n_samples})
# create new stim object with the updated parameters
stim = stim_masse.MultiStimulus(self._rstate)
# generate trials and reshape
_, x, y, _, _ = stim.generate_trial(task_id)
x = self._flatten_tensor(x)
y = self._flatten_tensor(y)
return x, y
def _flatten_tensor(self,in_tensor):
"""Flattens the trial data tensors to the format expected by the
dataset class.
Args:
in_tensor: Numpy array of shape
``[time_steps, batch_size, in_size]``.
Returns:
out_mat: Numpy array of shape ``[batch_size, in_size*time_steps]``.
"""
(time_steps, batch_size, in_size) = in_tensor.shape
in_tensor = np.moveaxis(in_tensor,[0,1,2],[2,0,1])
out_mat = np.reshape(in_tensor,[batch_size, in_size*time_steps])
return out_mat
def input_to_torch_tensor(self, x, device, mode='inference',
force_no_preprocessing=False, sample_ids=None):
"""This method can be used to map the internal numpy arrays to PyTorch
tensors.
Args:
(....): See docstring of method
:meth:`data.dataset.Dataset.input_to_torch_tensor`.
Returns:
(torch.Tensor): The given input ``x`` as 3D PyTorch tensor. It has
dimensions ``[T, B, N]``, where ``T`` is the number of time steps
per stimulus, ``B`` is the batch size and ``N`` the number of input
units.
"""
assert(self._data['in_data'].shape[1] % np.prod(self.in_shape) == 0)
num_time_steps = self._data['in_data'].shape[1] // \
| np.prod(self.in_shape) | numpy.prod |
import unittest
import numpy as np
from gradient_descent import GradientDescent
class TestCase(unittest.TestCase):
def test_weights(self):
gd = GradientDescent(alpha=0.1)
X = np.array([[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
y = np.array([1, 1, 2, 2])
gd.fit(X, y)
self.assertEquals(5, len(gd.weights))
def test_fit(self):
gd = GradientDescent(alpha=0.1)
X = np.array([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]])
y = np.array([1, 2])
result = gd.fit(X, y)
self.assertNotEqual(0, len(result))
def test_predict(self):
gd = GradientDescent(alpha=0.1)
X = np.array([[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
y = np.array([1, 1, 2, 2])
gd.fit(X, y)
self.assertEquals(1, gd.predict( | np.array([1, 2, 3, 4, 5]) | numpy.array |
'''
File name: envs/bridge.py
Author: <NAME>
Date created: 11/26/2021
'''
import numpy as np
from collections import OrderedDict
from rlcard.envs import Env
from rlcard.games.bridge import Game
from rlcard.games.bridge.game import BridgeGame
from rlcard.games.bridge.utils.action_event import ActionEvent
from rlcard.games.bridge.utils.bridge_card import BridgeCard
from rlcard.games.bridge.utils.move import CallMove, PlayCardMove
# [] Why no_bid_action_id in bidding_rep ?
# It allows the bidding always to start with North.
# If North is not the dealer, then he must call 'no_bid'.
# Until the dealer is reached, 'no_bid' must be the call.
# I think this might help because it keeps a player's bid in a fixed 'column'.
# Note: the 'no_bid' is only inserted in the bidding_rep, not in the actual game.
#
# [] Why current_player_rep ?
# Explanation here.
#
# [] Note: hands_rep maintain the hands by N, E, S, W.
#
# [] Note: trick_rep maintains the trick cards by N, E, S, W.
# The trick leader can be deduced since play is in clockwise direction.
#
# [] Note: is_bidding_rep can be deduced from bidding_rep.
# I think I added is_bidding_rep before bidding_rep and thus it helped in early testing.
# My early testing had just the player's hand: I think the model conflated the bidding phase with the playing phase in this situation.
# Although is_bidding_rep is not needed, keeping it may improve learning.
#
# [] Note: bidding_rep uses the action_id instead of one hot encoding.
# I think one hot encoding would make the input dimension significantly larger.
#
class BridgeEnv(Env):
''' Bridge Environment
'''
def __init__(self, config):
self.name = 'bridge'
self.game = Game()
super().__init__(config=config)
self.bridgePayoffDelegate = DefaultBridgePayoffDelegate()
self.bridgeStateExtractor = DefaultBridgeStateExtractor()
state_shape_size = self.bridgeStateExtractor.get_state_shape_size()
self.state_shape = [[1, state_shape_size] for _ in range(self.num_players)]
self.action_shape = [None for _ in range(self.num_players)]
def get_payoffs(self):
''' Get the payoffs of players.
Returns:
(list): A list of payoffs for each player.
'''
return self.bridgePayoffDelegate.get_payoffs(game=self.game)
def get_perfect_information(self):
''' Get the perfect information of the current state
Returns:
(dict): A dictionary of all the perfect information of the current state
'''
return self.game.round.get_perfect_information()
def _extract_state(self, state): # wch: don't use state 211126
''' Extract useful information from state for RL.
Args:
state (dict): The raw state
Returns:
(numpy.array): The extracted state
'''
return self.bridgeStateExtractor.extract_state(game=self.game)
def _decode_action(self, action_id):
''' Decode Action id to the action in the game.
Args:
action_id (int): The id of the action
Returns:
(ActionEvent): The action that will be passed to the game engine.
'''
return ActionEvent.from_action_id(action_id=action_id)
def _get_legal_actions(self):
''' Get all legal actions for current state.
Returns:
(list): A list of legal actions' id.
'''
raise NotImplementedError # wch: not needed
class BridgePayoffDelegate(object):
def get_payoffs(self, game: BridgeGame):
''' Get the payoffs of players. Must be implemented in the child class.
Returns:
(list): A list of payoffs for each player.
Note: Must be implemented in the child class.
'''
raise NotImplementedError
class DefaultBridgePayoffDelegate(BridgePayoffDelegate):
def __init__(self):
self.make_bid_bonus = 2
def get_payoffs(self, game: BridgeGame):
''' Get the payoffs of players.
Returns:
(list): A list of payoffs for each player.
'''
contract_bid_move = game.round.contract_bid_move
if contract_bid_move:
declarer = contract_bid_move.player
bid_trick_count = contract_bid_move.action.bid_amount + 6
won_trick_counts = game.round.won_trick_counts
declarer_won_trick_count = won_trick_counts[declarer.player_id % 2]
defender_won_trick_count = won_trick_counts[(declarer.player_id + 1) % 2]
declarer_payoff = bid_trick_count + self.make_bid_bonus if bid_trick_count <= declarer_won_trick_count else declarer_won_trick_count - bid_trick_count
defender_payoff = defender_won_trick_count
payoffs = []
for player_id in range(4):
payoff = declarer_payoff if player_id % 2 == declarer.player_id % 2 else defender_payoff
payoffs.append(payoff)
else:
payoffs = [0, 0, 0, 0]
return np.array(payoffs)
class BridgeStateExtractor(object): # interface
def get_state_shape_size(self) -> int:
raise NotImplementedError
def extract_state(self, game: BridgeGame):
''' Extract useful information from state for RL. Must be implemented in the child class.
Args:
game (BridgeGame): The game
Returns:
(numpy.array): The extracted state
'''
raise NotImplementedError
@staticmethod
def get_legal_actions(game: BridgeGame):
''' Get all legal actions for current state.
Returns:
(OrderedDict): A OrderedDict of legal actions' id.
'''
legal_actions = game.judger.get_legal_actions()
legal_actions_ids = {action_event.action_id: None for action_event in legal_actions}
return OrderedDict(legal_actions_ids)
class DefaultBridgeStateExtractor(BridgeStateExtractor):
def __init__(self):
super().__init__()
self.max_bidding_rep_index = 40 # Note: max of 40 calls
self.last_bid_rep_size = 1 + 35 + 3 # no_bid, bid, pass, dbl, rdbl
def get_state_shape_size(self) -> int:
state_shape_size = 0
state_shape_size += 4 * 52 # hands_rep_size
state_shape_size += 4 * 52 # trick_rep_size
state_shape_size += 52 # hidden_cards_rep_size
state_shape_size += 4 # vul_rep_size
state_shape_size += 4 # dealer_rep_size
state_shape_size += 4 # current_player_rep_size
state_shape_size += 1 # is_bidding_rep_size
state_shape_size += self.max_bidding_rep_index # bidding_rep_size
state_shape_size += self.last_bid_rep_size # last_bid_rep_size
state_shape_size += 8 # bid_amount_rep_size
state_shape_size += 5 # trump_suit_rep_size
return state_shape_size
def extract_state(self, game: BridgeGame):
''' Extract useful information from state for RL.
Args:
game (BridgeGame): The game
Returns:
(numpy.array): The extracted state
'''
extracted_state = {}
legal_actions: OrderedDict = self.get_legal_actions(game=game)
raw_legal_actions = list(legal_actions.keys())
current_player = game.round.get_current_player()
current_player_id = current_player.player_id
# construct hands_rep of hands of players
hands_rep = [np.zeros(52, dtype=int) for _ in range(4)]
if not game.is_over():
for card in game.round.players[current_player_id].hand:
hands_rep[current_player_id][card.card_id] = 1
if game.round.is_bidding_over():
dummy = game.round.get_dummy()
other_known_player = dummy if dummy.player_id != current_player_id else game.round.get_declarer()
for card in other_known_player.hand:
hands_rep[other_known_player.player_id][card.card_id] = 1
# construct trick_pile_rep
trick_pile_rep = [np.zeros(52, dtype=int) for _ in range(4)]
if game.round.is_bidding_over() and not game.is_over():
trick_moves = game.round.get_trick_moves()
for move in trick_moves:
player = move.player
card = move.card
trick_pile_rep[player.player_id][card.card_id] = 1
# construct hidden_card_rep (during trick taking phase)
hidden_cards_rep = np.zeros(52, dtype=int)
if not game.is_over():
if game.round.is_bidding_over():
declarer = game.round.get_declarer()
if current_player_id % 2 == declarer.player_id % 2:
hidden_player_ids = [(current_player_id + 1) % 2, (current_player_id + 3) % 2]
else:
hidden_player_ids = [declarer.player_id, (current_player_id + 2) % 2]
for hidden_player_id in hidden_player_ids:
for card in game.round.players[hidden_player_id].hand:
hidden_cards_rep[card.card_id] = 1
else:
for player in game.round.players:
if player.player_id != current_player_id:
for card in player.hand:
hidden_cards_rep[card.card_id] = 1
# construct vul_rep
vul_rep = np.array(game.round.tray.vul, dtype=int)
# construct dealer_rep
dealer_rep = np.zeros(4, dtype=int)
dealer_rep[game.round.tray.dealer_id] = 1
# construct current_player_rep
current_player_rep = np.zeros(4, dtype=int)
current_player_rep[current_player_id] = 1
# construct is_bidding_rep
is_bidding_rep = np.array([1] if game.round.is_bidding_over() else [0])
# construct bidding_rep
bidding_rep = np.zeros(self.max_bidding_rep_index, dtype=int)
bidding_rep_index = game.round.dealer_id # no_bid_action_ids allocated at start so that north always 'starts' the bidding
for move in game.round.move_sheet:
if bidding_rep_index >= self.max_bidding_rep_index:
break
elif isinstance(move, PlayCardMove):
break
elif isinstance(move, CallMove):
bidding_rep[bidding_rep_index] = move.action.action_id
bidding_rep_index += 1
# last_bid_rep
last_bid_rep = np.zeros(self.last_bid_rep_size, dtype=int)
last_move = game.round.move_sheet[-1]
if isinstance(last_move, CallMove):
last_bid_rep[last_move.action.action_id - ActionEvent.no_bid_action_id] = 1
# bid_amount_rep and trump_suit_rep
bid_amount_rep = | np.zeros(8, dtype=int) | numpy.zeros |
"""Script for sampling COV, burstiness and memory coeficient, and
their uncertainties, on many faults and plotting them
<NAME>
University of Otago
2020
"""
import os, sys
import ast
from glob import glob
from operator import itemgetter
from re import finditer
import numpy as np
from scipy.optimize import curve_fit
from scipy.odr import Model, RealData, ODR
import scipy.odr.odrpack as odrpack
from scipy.stats import expon, gamma, weibull_min, ks_2samp, kstest
# !!! Dangerous hack to swap Weibull for gamma
#from scipy.stats import weibull_min as gamma #
# !!!
from matplotlib import pyplot
from matplotlib.patches import PathPatch
import matplotlib.gridspec as gridspec
from matplotlib.ticker import FormatStrFormatter
from scipy.stats import binom, kde
from adjustText import adjust_text
from QuakeRates.dataman.event_dates import EventSet
from QuakeRates.dataman.parse_oxcal import parse_oxcal
from QuakeRates.dataman.parse_age_sigma import parse_age_sigma
from QuakeRates.dataman.parse_params import parse_param_file, \
get_event_sets, file_len
from QuakeRates.utilities.bilinear import bilinear_reg_zero_slope, \
bilinear_reg_fix, bilinear_reg_fix_zero_slope
from QuakeRates.utilities.memory_coefficient import burstiness, memory_coefficient
filepath = '../params'
param_file_list = glob(os.path.join(filepath, '*.txt'))
param_file_list_NZ = ['Akatore_TaylorSilva_2019.txt',
'AlpineHokuriCk_Berryman_2012_simple.txt',
'AlpineSouthWestland_Cochran_2017_simple.txt',
'AwatereEast_Nicol_2016_simple.txt',
'ClarenceEast_Nicol_2016_simple.txt',
'CloudyFault_Nicol_2016_simple.txt',
'Dunstan_GNS_unpub_simple.txt',
'HopeConway_Hatem_2019_simple.txt',
'Hope_Khajavi_2016_simple.txt',
'Ihaia_Nicol_2016_simple.txt',
'Oaonui_Nicol_2016_simple.txt',
'Ohariu_Nicol_2016_simple.txt',
'Paeroa_Nicol_2016_simple.txt',
'Pihama_Nicol_2016_simple.txt',
'PortersPassEast_Nicol_2016_simple.txt',
'Ngakuru_Nicol_2016_simple.txt',
'Mangatete_Nicol_2016_simple.txt',
'Rangipo_Nicol_2016_simple.txt',
'Rotoitipakau_Nicol_2016_simple.txt',
'Rotohauhau_Nicol_2016_simple.txt',
'Snowden_Nicol_2016_simple.txt',
'Vernon_Nicol_2016_simple.txt',
'WairarapaSouth_Nicol_2016_simple.txt',
'Wairau_Nicol_2018_simple.txt',
'Waimana_Nicol_2016_simple.txt',
'Wellington_Langridge_2011_simple.txt',
'Waitangi_GNS_unpub_simple.txt',
'Whakatane_Nicol_2016_simple.txt',
'Whirinaki_Nicol_2016_simple.txt']
# List of faults in study by Williams et al 2019
# Note this is not entirely the same, as there are some records from
# that study that are not included in ours.
param_file_list_W = ['AlpineHokuriCk_Berryman_2012_simple.txt',
'HaywardTysons_Lienkaemper_2007_simple.txt',
'SanJacintoMysticLake_Onderdonk_2018_simple.txt',
'NorthAnatolianElmacik_Fraser_2010_simple.txt',
'SanAndreasWrightwood_Weldon_2004_simple.txt',
'SanAndreasCarizzo_Akciz_2010_simple.txt',
'SanJacintoHogLake_Rockwell_2015_simple.txt',
'SanAndreasMissionCk_Fumal_2002_simple.txt',
'SanAndreasPalletCk_Scharer_2011_simple.txt',
'Xorkoli_Altyn_Tagh_Yuan_2018.txt',
'NorthAnatolianYaylabeli_Kozaci_2011_simple.txt',
'ElsinoreTemecula_Vaughan_1999_simple.txt',
'DeadSeaJordan_Ferry_2011_simple.txt',
'SanAndreasBigBend_Scharer_2017_simple.txt',
'WasatchBrigham_McCalpin_1996_simple.txt',
'Irpinia_Pantosti_1993_simple.txt',
'WasatchWeber_Duross_2011_simple.txt',
'WasatchNilphi_Duross_2017_simple.txt',
'LomaBlanca_Williams_2017_simple.txt',
'AlaskaPWSCopper_Plafker_1994_simple.txt',
'NankaiTrough_Hori_2004_simple.txt',
'CascadiaNth_Adams_1994_simple.txt',
'CascadiaSth_Goldfinger_2003_simple.txt',
'JavonCanyon_SarnaWojicki_1987_simple.txt',
'NewGuinea_Ota_1996_simple.txt',
'ChileMargin_Moernaut_2018_simple.txt']
#param_file_list = []
#for f in param_file_list_NZ:
#for f in param_file_list_W:
# param_file_list.append(os.path.join(filepath, f))
n_samples = 10000 # Number of Monte Carlo samples of the eq chronologies
half_n = int(n_samples/2)
print(half_n)
annotate_plots = False # If True, lable each fault on the plot
plot_folder = './plots'
if not os.path.exists(plot_folder):
os.makedirs(plot_folder)
# Define subset to take
#faulting_styles = ['Reverse']
#faulting_styles = ['Normal']
#faulting_styles = ['Strike_slip']
faulting_styles = ['all']
tectonic_regions = ['all']
#tectonic_regions = ['Intraplate_noncratonic', 'Intraplate_cratonic', 'Near_plate_boundary']
#tectonic_regions = ['Plate_boundary_master', 'Plate_boundary_network']
#tectonic_regions = ['Plate_boundary_network', 'Near_plate_boundary']
#tectonic_regions = ['Plate_boundary_master']
#tectonic_regions = ['Subduction']
#tectonic_regions = ['Near_plate_boundary']
min_number_events = 5 # Use for all other calculations.
min_num_events_mem = 6 # Use for memory coefficient
#Summarise for comment to add to figure filename
fig_comment = ''
#fig_comment = 'NZ_examples_'
#fig_comment = 'Williams2019_'
for f in faulting_styles:
fig_comment += f
fig_comment += '_'
for t in tectonic_regions:
fig_comment += t
fig_comment += '_'
fig_comment += str(min_number_events)
#fig_comment += 'test_add_event_data'
def piecewise_linear(x, x0, y0, k1, k2):
return np.piecewise(x, [x < x0], [lambda x:k1*x + y0-k1*x0, lambda x:k2*x + y0-k2*x0])
def camel_case_split(identifier):
matches = finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)
return [m.group(0) for m in matches]
plot_colours = []
all_ie_times = []
added_events = [] # Store names of records where we've added an event due to
# exceptionally long current open interval
covs = []
cov_bounds = []
burstinesses = []
burstiness_bounds = []
burstiness_stds = []
burstinesses_expon = []
burstinesses_gamma = []
ie_gamma_alpha = []
memory_coefficients = []
memory_bounds = []
memory_stds = []
memory_spearman_coefficients = []
memory_spearman_bounds = []
memory_spearman_lag2_coef = []
memory_spearman_lag2_bounds = []
long_term_rates = []
long_term_rate_stds = []
slip_rates = []
slip_rate_stds = []
slip_rate_bounds = []
max_interevent_times = []
min_interevent_times = []
min_paired_interevent_times = []
std_min_paired_interevent_times = []
std_min_interevent_times = []
std_max_interevent_times = []
max_interevent_times_bounds = []
min_interevent_times_bounds = []
min_paired_interevent_times_bounds = []
ratio_min_pair_max = []
ratio_min_max = []
std_ratio_min_pair_max = []
std_ratio_min_max = []
ratio_min_pair_max_bounds =[]
ratio_min_max_bounds = []
names, event_sets, event_certainties, num_events, tect_regions, fault_styles = \
get_event_sets(param_file_list, tectonic_regions,
faulting_styles, min_number_events)
references = []
# Get citations for each dataset from filename
for s in param_file_list:
sp = s.split('_')
if sp[0].split('/')[2] in names:
references.append(sp[1] + ' ' + sp[2])
n_faults = len(names)
print('Number of faults', n_faults)
for i, event_set in enumerate(event_sets):
# Handle cases with uncertain number of events. Where events identification is
# unsure, event_certainty is given a value of 0, compared with 1 for certain
# events
# First generate chronologies assuming all events are certain
# event_set.name = names[i]
event_set.gen_chronologies(n_samples, observation_end=2020, min_separation=1)
event_set.calculate_cov()
event_set.cov_density()
event_set.memory_coefficient()
event_set.memory_spearman_rank_correlation()
# Store all inter-event times for global statistics
all_ie_times.append(event_set.interevent_times)
# Now calculate some statistics on the sampled chronologies
event_set.basic_chronology_stats()
# Plot histogram of interevent times
figfile = os.path.join(plot_folder, ('interevent_times_%s.png' % names[i]))
event_set.plot_interevent_time_hist(fig_filename=figfile)
# Fit gamma distirbution to event set data
event_set.fit_gamma()
ie_gamma_alpha.append(event_set.mean_gamma_alpha_all) # Get mean estimate of alpha
min_paired_interevent_times.append(event_set.mean_minimum_pair_interevent_time)
max_interevent_times.append(event_set.mean_maximum_interevent_time)
min_interevent_times.append(event_set.mean_minimum_interevent_time)
std_min_paired_interevent_times.append(event_set.std_minimum_pair_interevent_time)
std_min_interevent_times.append(event_set.std_minimum_interevent_time)
std_max_interevent_times.append(event_set.std_maximum_interevent_time)
if event_set.std_maximum_interevent_time == 0:
print('Zero std_maximum_interevent_time for ', names[i])
slip_rates.append(event_set.slip_rates[0])
slip_rate_bounds.append([event_set.slip_rates[1], event_set.slip_rates[2]])
slip_rate_stds.append(abs(np.log10(event_set.slip_rates[2]) - \
np.log10(event_set.slip_rates[1]))/4) # Approx from 95% intervals
max_interevent_times_bounds.append([abs(event_set.mean_maximum_interevent_time -
event_set.maximum_interevent_time_lb),
abs(event_set.mean_maximum_interevent_time -
event_set.maximum_interevent_time_ub)])
min_interevent_times_bounds.append([abs(event_set.mean_minimum_interevent_time -
event_set.minimum_interevent_time_lb),
abs(event_set.mean_minimum_interevent_time -
event_set.minimum_interevent_time_ub)])
min_paired_interevent_times_bounds.append([abs(event_set.mean_minimum_pair_interevent_time -
event_set.minimum_pair_interevent_time_lb),
abs(event_set.mean_minimum_pair_interevent_time -
event_set.minimum_pair_interevent_time_ub)])
ratio_min_pair_max.append(event_set.mean_ratio_min_pair_max)
ratio_min_max.append(event_set.mean_ratio_min_max)
std_ratio_min_pair_max.append(event_set.std_ratio_min_pair_max)
std_ratio_min_max.append(event_set.std_ratio_min_max)
ratio_min_pair_max_bounds.append([abs(event_set.mean_ratio_min_pair_max -
event_set.ratio_min_pair_max_lb),
abs(event_set.mean_ratio_min_pair_max -
event_set.ratio_min_pair_max_ub)])
ratio_min_max_bounds.append([abs(event_set.mean_ratio_min_max -
event_set.ratio_min_max_lb),
abs(event_set.mean_ratio_min_max -
event_set.ratio_min_max_ub)])
# Generate random exponentially and gamma distributed samples of length num_events - 1
# i.e. the number of inter-event times in the chronology. These will be used
# later for testing
scale = 100 # Fix scale, as burstiness is independent of scale for exponentiall distribution
ie_times_expon = expon(scale=scale).rvs(size=(n_samples*(event_set.num_events-1)))
ie_times_expon = np.reshape(np.array(ie_times_expon), (n_samples, (event_set.num_events-1)))
ie_times_expon_T = ie_times_expon.T
burst_expon = burstiness(ie_times_expon_T)
# Gamma
alpha_g = 2.3 #2.2 #1.6 ##2.35 #2.4 #2.0
ie_times_g = gamma(alpha_g, scale=scale).rvs(size=(n_samples*(event_set.num_events-1)))
ie_times_g = np.reshape(np.array(ie_times_g), (n_samples, (event_set.num_events-1)))
ie_times_g_T = ie_times_g.T
burst_g = burstiness(ie_times_g_T)
# Now generate chronologies assuming uncertain events did not occur
if sum(event_certainties[i]) < event_set.num_events:
indices = np.where(event_certainties[i] == 1)
indices = list(indices[0])
# print(indices[0], type(indices))
events_subset = list(itemgetter(*indices)(event_set.event_list))
event_set_certain = EventSet(events_subset)
event_set_certain.name = names[i]
event_set_certain.gen_chronologies(n_samples, observation_end=2019, min_separation=1)
event_set_certain.calculate_cov()
event_set_certain.cov_density()
event_set_certain.basic_chronology_stats()
event_set_certain.memory_coefficient()
event_set_certain.memory_spearman_rank_correlation()
# Generate random exponentially distributed samples of length num_events - 1
# i.e. the number of inter-event times in the chronology. These will be used
# later for testing
ie_times_expon_certain = expon(scale=scale).rvs(size=(n_samples*(len(indices)-1)))
ie_times_expon_certain = np.reshape(np.array(ie_times_expon_certain), (n_samples, (len(indices)-1)))
ie_times_expon_certain_T = ie_times_expon_certain.T
burst_expon_certain = burstiness(ie_times_expon_certain_T)
ie_times_g_certain = gamma(alpha_g, scale=scale).rvs(size=(n_samples*(event_set.num_events-1)))
ie_times_g_certain = np.reshape(np.array(ie_times_g_certain), (n_samples, (event_set.num_events-1)))
ie_times_g_certain_T = ie_times_g_certain.T
burst_g_certain = burstiness(ie_times_g_T)
# Now combine results from certain chronolgies with uncertain ones
combined_covs = np.concatenate([event_set.covs[:half_n],
event_set_certain.covs[:half_n]])
combined_burstiness = np.concatenate([event_set.burstiness[:half_n],
event_set_certain.burstiness[:half_n]])
combined_memory = np.concatenate([event_set.mem_coef[:half_n],
event_set_certain.mem_coef[:half_n]])
combined_memory_spearman = np.concatenate([event_set.rhos[:half_n],
event_set_certain.rhos[:half_n]])
combined_memory_spearman_lag2 = np.concatenate([event_set.rhos2[:half_n],
event_set_certain.rhos2[:half_n]])
combined_burst_expon = np.concatenate([burst_expon[:half_n],
burst_expon_certain[:half_n]])
combined_burst_g = np.concatenate([burst_g[:half_n],
burst_g_certain[:half_n]])
covs.append(combined_covs)
burstinesses.append(combined_burstiness)
memory_coefficients.append(combined_memory)
memory_stds.append(np.std(np.array(combined_memory)))
memory_spearman_coefficients.append(combined_memory_spearman)
memory_spearman_lag2_coef.append(combined_memory_spearman_lag2)
burstinesses_expon.append(combined_burst_expon)
burstinesses_gamma.append(combined_burst_g)
cov_bounds.append([abs(np.mean(combined_covs) - \
min(event_set.cov_lb, event_set_certain.cov_lb)),
abs(np.mean(combined_covs) - \
max(event_set.cov_ub, event_set_certain.cov_ub))])
burstiness_bounds.append([abs(np.mean(combined_burstiness) - \
min(event_set.burstiness_lb,
event_set_certain.burstiness_lb)),
abs(np.mean(combined_burstiness) - \
max(event_set.burstiness_ub,
event_set_certain.burstiness_ub))])
memory_bounds.append([abs(np.mean(combined_memory) - \
min(event_set.memory_lb,
event_set_certain.memory_lb)),
abs(np.mean(combined_memory) - \
max(event_set.memory_ub,
event_set_certain.memory_ub))])
memory_spearman_bounds.append([abs(np.mean(combined_memory_spearman) - \
min(event_set.rho_lb,
event_set_certain.rho_lb)),
abs(np.mean(combined_memory_spearman) - \
max(event_set.rho_ub,
event_set_certain.rho_ub))])
memory_spearman_lag2_bounds.append([abs(np.mean(combined_memory_spearman_lag2) - \
min(event_set.rho2_lb,
event_set_certain.rho2_lb)),
abs(np.mean(combined_memory_spearman_lag2) - \
max(event_set.rho2_ub,
event_set_certain.rho2_ub))])
# Combine, taking n/2 samples from each set
combined_ltrs = np.concatenate([event_set.long_term_rates[:half_n],
event_set_certain.long_term_rates[:half_n]])
burstiness_stds.append(np.std(combined_burstiness))
print(len(combined_ltrs))
long_term_rates.append(combined_ltrs)
long_term_rate_stds.append(np.std(combined_ltrs))
else:
covs.append(event_set.covs)
burstinesses.append(event_set.burstiness)
memory_coefficients.append(event_set.mem_coef)
memory_stds.append(np.std(np.array(event_set.mem_coef)))
memory_spearman_coefficients.append(event_set.rhos)
memory_spearman_lag2_coef.append(event_set.rhos2)
long_term_rates.append(event_set.long_term_rates)
burstinesses_expon.append(burst_expon)
burstinesses_gamma.append(burst_g)
cov_bounds.append([abs(event_set.mean_cov - event_set.cov_lb),
abs(event_set.mean_cov - event_set.cov_ub)])
burstiness_bounds.append([abs(event_set.mean_burstiness - event_set.burstiness_lb),
abs(event_set.mean_burstiness - event_set.burstiness_ub)])
memory_bounds.append([abs(event_set.mean_mem_coef - event_set.memory_lb),
abs(event_set.mean_mem_coef - event_set.memory_ub)])
memory_spearman_bounds.append([abs(event_set.mean_rho - event_set.rho_lb),
abs(event_set.mean_rho - event_set.rho_ub)])
memory_spearman_lag2_bounds.append([abs(event_set.mean_rho2 - event_set.rho2_lb),
abs(event_set.mean_rho2 - event_set.rho2_ub)])
burstiness_stds.append(event_set.std_burstiness)
long_term_rate_stds.append(np.mean(long_term_rates))
# Get colours for plotting later
if event_set.faulting_style == 'Normal':
plot_colours.append('r')
elif event_set.faulting_style == 'Reverse':
plot_colours.append('b')
elif event_set.faulting_style == 'Strike_slip':
plot_colours.append('g')
else:
plot_colours.append('k')
if event_set.add_events: # List of records where we model long open interval
added_events.append(event_set.name)
# Convert to numpy arrays and transpose where necessary
num_events = np.array(num_events)
all_ie_times = np.array(all_ie_times)
max_interevent_times = np.array(max_interevent_times)
min_interevent_times = np.array(min_interevent_times)
min_paired_interevent_times = np.array(min_paired_interevent_times)
std_max_interevent_times = np.array(std_max_interevent_times)
std_min_interevent_times = np.array(std_min_interevent_times)
std_min_paired_interevent_times = np.array(std_min_paired_interevent_times)
max_interevent_times_bounds = np.array(max_interevent_times_bounds).T
min_interevent_times_bounds = np.array(min_interevent_times_bounds).T
min_paired_interevent_times_bounds = np.array(min_paired_interevent_times_bounds).T
long_term_rates_T = np.array(long_term_rates).T
mean_ltr = np.mean(long_term_rates_T, axis = 0)
long_term_rate_stds = np.array(long_term_rate_stds)
slip_rates = np.array(slip_rates).T
slip_rate_bounds = np.array(slip_rate_bounds).T
slip_rate_stds = np.array(slip_rate_stds).T
print('Mean_ltr', mean_ltr)
std_ltr = np.std(long_term_rates_T, axis = 0)
ltr_bounds = np.array([abs(mean_ltr - (np.percentile(long_term_rates_T, 2.5, axis=0))),
abs(mean_ltr - (np.percentile(long_term_rates_T, 97.5, axis=0)))])
ratio_min_pair_max = np.array(ratio_min_pair_max)
ratio_min_max = np.array(ratio_min_max)
std_ratio_min_pair_max = np.array(std_ratio_min_pair_max)
std_ratio_min_max = np.array(std_ratio_min_max)
ratio_min_pair_max_bounds = np.array(ratio_min_pair_max_bounds).T
ratio_min_max_bounds = np.array(ratio_min_max_bounds).T
cov_bounds = np.array(cov_bounds).T
burstiness_bounds = np.array(burstiness_bounds).T
burstiness_stds = np.array(burstiness_stds)
burstiness_expon = np.array(burstinesses_expon)
burstiness_gamma = np.array(burstinesses_gamma)
inds = np.where(num_events >= min_num_events_mem) # Get memory coefficients for more than 6 events
memory_coefficients = np.array(memory_coefficients)
memory_coefficients_min = memory_coefficients[inds]
memory_stds = np.array(memory_stds)
memory_stds_min = memory_stds[inds]
memory_bounds_min = np.array(memory_bounds)[inds].T
memory_bounds = np.array(memory_bounds).T
memory_spearman_bounds = np.array(memory_spearman_bounds).T
memory_spearman_lag2_bounds = np.array(memory_spearman_lag2_bounds).T
ie_gamma_alpha = np.array(ie_gamma_alpha)
# Now plot the means and 95% error bars of COV
pyplot.clf()
ax = pyplot.subplot(111)
mean_covs = []
for i, cov_set in enumerate(covs):
mean_cov = np.mean(cov_set)
mean_covs.append(mean_cov)
colours = []
for mean_cov in mean_covs:
if mean_cov <= 0.9:
colours.append('b')
elif mean_cov > 0.9 and mean_cov <= 1.1:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_ltr, mean_covs,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, mean_covs,
yerr = cov_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, mean_covs, marker = 's', c=plot_colours,
s=25, zorder=2)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], mean_covs[i]),
fontsize=8)
ax.set_ylim([0, 2.5])
ax.set_xlim([1./1000000, 1./40])
ax.set_xscale('log')
ax.set_xlabel('Long-term rate (events per year)')
ax.set_ylabel('COV')
figname = 'mean_cov_vs_lt_rate_%s.png' % fig_comment
pyplot.savefig(figname)
################################
# Plot burstiness against mean ltr
pyplot.clf()
ax = pyplot.subplot(111)
mean_bs = []
for i, b_set in enumerate(burstinesses):
mean_b = np.mean(b_set)
mean_bs.append(mean_b)
colours = []
for mean_b in mean_bs:
if mean_b <= -0.05:
colours.append('b')
elif mean_b > -0.05 and mean_b <= 0.05:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_ltr, mean_bs,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, mean_bs,
yerr = burstiness_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, mean_bs, marker = 's', c=plot_colours,
s=25, zorder=2)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], mean_bs[i]),
fontsize=8)
# Add B=0 linear
pyplot.plot([1./1000000, 1./40], [0, 0], linestyle='dashed', linewidth=1, c='0.5')
ax.set_xscale('log')
ax.set_xlabel('Long-term rate (events per year)')
ax.set_ylabel('B')
# Now do a bi-linear fit to the data
mean_bs = np.array(mean_bs)
indices = np.flatnonzero(mean_ltr > 3e-4)
indices = indices.flatten()
indices_slow_faults = np.flatnonzero(mean_ltr <= 3e-4)
indices_slow_faults = indices_slow_faults.flatten()
# Fit fast rate faults
lf = np.polyfit(np.log10(mean_ltr[indices]),
mean_bs[indices], 1)
# Now force to be a flat line1
lf[0] = 0.
lf[1] = np.mean(mean_bs[indices])
std_lf = np.std(mean_bs[indices])
xvals_short = np.arange(1.5e-4, 2e-2, 1e-4)
yvals = lf[0]*np.log10(xvals_short) + lf[1]
pyplot.plot(xvals_short, yvals, c='0.2')
# Fit slow faults
if len(indices_slow_faults > 1):
lf_slow = np.polyfit(np.log10(mean_ltr[indices_slow_faults]),
mean_bs[indices_slow_faults], 1)
xvals_short = np.arange(1e-6, 1.5e-4, 1e-6)
yvals = lf_slow[0]*np.log10(xvals_short) + lf_slow[1]
pyplot.plot(xvals_short, yvals, c='0.2')
# Add formula for linear fits of data
print('Fits for B vs LTR')
txt = 'Y = {:=+6.2f} +/- {:4.2f}'.format(lf[1], std_lf)
print(txt)
ax.annotate(txt, (2e-4, 0.2), fontsize=8)
try:
txt = 'Y = {:4.2f}Log(x) {:=+6.2f}'.format(lf_slow[0], lf_slow[1])
print(txt)
ax.annotate(txt, (1.5e-6, 0.75), fontsize=8)
except:
pass
# Now try bilinear ODR linear fit
data = odrpack.RealData(np.log10(mean_ltr), mean_bs,
sx=np.log10(long_term_rate_stds), sy=burstiness_stds)
bilin = odrpack.Model(bilinear_reg_zero_slope)
odr = odrpack.ODR(data, bilin, beta0=[-3, -1.0, -4]) # array are starting values
odr.set_job(fit_type=0)
out = odr.run()
print(out.sum_square)
out.pprint()
a = out.beta[0]
b = out.beta[1]
hx = out.beta[2]
xvals = np.arange(1.e-6, 2e-2, 1e-6)
yrng = a*np.log10(xvals) + b #10**(b + a * xvals)
ylevel = a*hx + b #10**(b + a * hx)
print('ylevel', ylevel)
print(10**ylevel)
idx = xvals > 10**hx
yrng[idx] = (ylevel)
print('yrng', yrng)
print('hx', hx)
pyplot.plot(xvals, yrng, c='g')
# Bilinear fixed hinge
hxfix = np.log10(2e-4)
bilin_hxfix_cons_slope = odrpack.Model(bilinear_reg_fix_zero_slope)
odr = odrpack.ODR(data, bilin_hxfix_cons_slope, beta0=[-3, -1.0])
odr.set_job(fit_type=0)
out = odr.run()
print('bilinear hxfix_cons_slope')
print(out.sum_square)
out.pprint()
a = out.beta[0]
b = out.beta[1]
yrng = a*np.log10(xvals) + b
ylevel = a*hxfix + b
print('ylevel hxfix zero slope', ylevel)
print(10**ylevel)
idx = xvals > 10**hxfix
yrng[idx] = (ylevel)
print('yrng', yrng)
print('hx', hxfix)
pyplot.plot(xvals, yrng, c='r')
figname = 'burstiness_vs_lt_rate_%s.png' % fig_comment
pyplot.savefig(figname)
#########################
# Plot burstiness against slip rate
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(slip_rates, mean_bs,
xerr = slip_rate_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(slip_rates, mean_bs,
yerr = burstiness_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(slip_rates, mean_bs, marker = 's', c=plot_colours,
s=25, zorder=2)
ax.set_ylim([-1, 1])
ax.set_xlim([1./1000, 100])
# Add B=0 linear
pyplot.plot([1./1000, 100], [0, 0], linestyle='dashed', linewidth=1, c='0.5')
ax.set_xscale('log')
ax.set_xlabel('Slip rate (mm/yr)')
ax.set_ylabel('B')
# Now try linear ODR linear fit
def f(B, x):
return B[0]*x + B[1]
print(slip_rates)
print(np.log10(slip_rates))
print(slip_rate_stds)
print(np.log10(slip_rate_stds))
print(burstiness_stds)
wd = 1./np.power(burstiness_stds, 2)
print(wd)
we = 1./np.power(slip_rate_stds, 2)
print(we)
# Std dev already in log-space
data = odrpack.RealData(np.log10(slip_rates), mean_bs,
sx=np.sqrt(slip_rate_stds), sy=np.sqrt(burstiness_stds))
linear = odrpack.Model(f)
odr = odrpack.ODR(data, linear, beta0=[-1, -1.0,])
odr.set_job(fit_type=0)
out = odr.run()
out.pprint()
a = out.beta[0]
b = out.beta[1]
xvals = np.arange(1.e-4, 1e2, 1e-2)
yrng = a*np.log10(xvals) + b #10**(b + a * xvals)
pyplot.plot(xvals, yrng, c='0.6')
txt = 'Y = {:4.2f}Log(x) {:=+6.2f}'.format(a, b)
print(txt)
ax.annotate(txt, (1e0, 0.9), color='0.6')
# Now try bilinear fixed hinge
bilin = odrpack.Model(bilinear_reg_fix_zero_slope)
odr = odrpack.ODR(data, bilin, beta0=[-1, -1.0, -1])
odr.set_job(fit_type=0)
out = odr.run()
out.pprint()
a = out.beta[0]
b = out.beta[1]
yrng = a*np.log10(xvals) + b
ylevel = a*hxfix + b
print('ylevel hxfix zero slope', ylevel)
print(10**ylevel)
idx = xvals > 10**hxfix
yrng[idx] = (ylevel)
print('yrng', yrng)
print('hx', hxfix)
pyplot.plot(xvals, yrng, c='0.2')
txt = 'Y = {:4.2f}Log(x) {:=+6.2f}, x < {:4.2f}'.format(a, b, np.power(10,hxfix))
print(txt)
ax.annotate(txt, (2e-3, 0.9), color='0.2')
txt = 'Y = {:4.2f}, x >= {:4.2f}'.format(ylevel, np.power(10,hxfix))
print(txt)
ax.annotate(txt, (1.2e-2, 0.8), color='0.2')
figname = 'burstiness_vs_slip_rate_%s.png' % fig_comment
pyplot.savefig(figname)
figname = 'burstiness_vs_slip_rate_%s.pdf' % fig_comment
pyplot.savefig(figname)
# Plot memory coefficients against long term rates
pyplot.clf()
ax = pyplot.subplot(111)
mean_mems = []
mean_ltr_mem = mean_ltr[inds]
ltr_bounds_mem = ltr_bounds.T[inds].T
for i, mem_set in enumerate(memory_coefficients):
mean_mem = np.mean(mem_set)
# print('Mean memory coefficient combined', mean_mem)
mean_mems.append(mean_mem)
mean_mems = np.array(mean_mems)
colours = []
plot_colours_mem = list(np.array(plot_colours)[inds])
for mean_mem in mean_mems:
if mean_mem <= -0.05:
colours.append('b')
elif mean_mem > -0.05 and mean_mem <= 0.05:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_ltr_mem, mean_mems[inds],
xerr = ltr_bounds_mem,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr_mem, mean_mems[inds],
yerr = memory_bounds_min,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr_mem, mean_mems[inds], marker = 's', c=plot_colours_mem,
s=25, zorder=2)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], mean_mems[i]),
fontsize=8)
ax.set_xlim([1./1000000, 1./40])
ax.set_xscale('log')
ax.set_xlabel('Long-term rate (events per year)')
ax.set_ylabel('M')
figname = 'memory_coefficient_vs_lt_rate_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot Spearman Rank coefficients against long term rates
pyplot.clf()
ax = pyplot.subplot(111)
mean_mems_L1 = []
for i, mem_set in enumerate(memory_spearman_coefficients):
mean_mem = np.mean(mem_set)
mean_mems_L1.append(mean_mem)
colours = []
for mean_mem in mean_mems_L1:
if mean_mem <= -0.05:
colours.append('b')
elif mean_mem > -0.05 and mean_mem <= 0.05:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_ltr, mean_mems_L1,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, mean_mems_L1,
yerr = memory_spearman_bounds,
elinewidth=0.7,
ecolor = '0.3',
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, mean_mems_L1, marker = 's', c=plot_colours,
s=25, zorder=2)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], mean_mems_L1[i]),
fontsize=8)
ax.set_xlim([1./1000000, 1./40])
ax.set_xscale('log')
ax.set_xlabel('Long-term rate (events per year)')
ax.set_ylabel('M (Spearman Rank)')
figname = 'memory_coefficient_Spearman_vs_lt_rate_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot Spearman Rank (Lag-2) coefficients against long term rates
pyplot.clf()
ax = pyplot.subplot(111)
mean_mems_L2 = []
for i, mem_set in enumerate(memory_spearman_lag2_coef):
mean_mem = np.mean(mem_set)
mean_mems_L2.append(mean_mem)
colours = []
for mean_mem in mean_mems_L2:
if mean_mem <= -0.05:
colours.append('b')
elif mean_mem > -0.05 and mean_mem <= 0.05:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_ltr, mean_mems_L2,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, mean_mems_L2,
yerr = memory_spearman_lag2_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, mean_mems_L2, marker = 's', c=plot_colours,
s=25, zorder=2)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], mean_mems_L2[i]),
fontsize=8)
ax.set_xlim([1./1000000, 1./40])
ax.set_xscale('log')
ax.set_xlabel('Long-term rate (events per year)')
ax.set_ylabel('M (Spearman Rank Lag-2)')
figname = 'memory_coefficient_Spearman_Lag2_vs_lt_rate_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot Spearman rank Lag-1 against Lag-2
# Plot Spearman Rank coefficients against long term rates
pyplot.clf()
ax = pyplot.subplot(111)
colours = []
for mean_mem in mean_mems_L1:
if mean_mem <= -0.05:
colours.append('b')
elif mean_mem > -0.05 and mean_mem <= 0.05:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_mems_L1, mean_mems_L2,
xerr = memory_spearman_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_mems_L1, mean_mems_L2,
yerr = memory_spearman_lag2_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_mems_L1, mean_mems_L2, marker = 's', c=plot_colours,
s=25, zorder=2)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_mems_L1[i], mean_mems_L2[i]),
fontsize=8)
ax.set_xlabel('M (Spearman Rank Lag-1)')
ax.set_ylabel('M (Spearman Rank Lag-2)')
figname = 'memory_coefficient_Spearman_L1_vs_L2_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot COV against number of events to look at sampling biases
pyplot.clf()
ax = pyplot.subplot(111)
mean_covs = []
for i, cov_set in enumerate(covs):
mean_cov = np.mean(cov_set)
mean_covs.append(mean_cov)
colours = []
for mean_cov in mean_covs:
if mean_cov <= 0.9:
colours.append('b')
elif mean_cov > 0.9 and mean_cov <= 1.1:
colours.append('g')
else:
colours.append('r')
pyplot.errorbar(mean_covs, num_events,
xerr = cov_bounds,
ecolor = '0.6',
linestyle="None")
pyplot.scatter(mean_covs, num_events, marker = 's', c=plot_colours, s=25)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_covs[i], num_events[i]),
fontsize=8)
ax.set_xlabel('COV')
ax.set_ylabel('Number of events in earthquake record')
figname = 'mean_cov_vs_number_events_%s.png' % fig_comment
pyplot.savefig(figname)
# Now plot basic statistics
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(max_interevent_times, min_interevent_times,
yerr = min_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(max_interevent_times, min_interevent_times,
xerr = max_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(max_interevent_times, min_interevent_times,
marker = 's', c=colours, s=25, zorder=2)
ax.set_xlabel('Maximum interevent time')
ax.set_ylabel('Minimum interevent time')
ax.set_xscale('log')
ax.set_yscale('log')
# Label low-slip rate faults
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(max_interevent_times[i], min_interevent_times[i]),
fontsize=8)
# Linear fit only bottom end of data
indices = np.argwhere(max_interevent_times < 10000).flatten()
indices_slow_faults = np.argwhere(max_interevent_times >= 10000).flatten()
lf = np.polyfit(np.log10(max_interevent_times[indices]),
np.log10(min_interevent_times[indices]), 1)
xvals_short = np.arange(100, 1e4, 100)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals)
# Add formula for linear fit to low-end of data
txt = 'Log(Y) = %.2fLog(x) + %.2f' % (lf[0], lf[1])
print(txt)
ax.annotate(txt, (800, 10000))
figname = 'min_vs_max_interevent_time_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot minimum pairs
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(max_interevent_times, min_paired_interevent_times,
yerr = min_paired_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(max_interevent_times, min_paired_interevent_times,
xerr = max_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(max_interevent_times, min_paired_interevent_times,
marker = 's', c=colours, s=25, zorder=2)
ax.set_xlabel('Maximum interevent time')
ax.set_ylabel('Minimum interevent time \n(mean of two shortest consecutive interevent times)')
ax.set_xscale('log')
ax.set_yscale('log')
# Label low-slip rate faults
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(max_interevent_times[i], min_paired_interevent_times[i]),
fontsize=8)
# Now fit with a regression in log-log space
xvals = np.arange(100, 2e6, 100) # For plotting
# Linear fit
lf = np.polyfit(np.log10(max_interevent_times),
np.log10(min_paired_interevent_times), 1)
log_yvals = lf[0]*np.log10(xvals) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals, yvals)
# Linear fit only bottom end of data
indices = np.argwhere(max_interevent_times < 10000).flatten()
lf = np.polyfit(np.log10(max_interevent_times[indices]),
np.log10(min_paired_interevent_times[indices]), 1)
xvals_short = np.arange(100, 1e4, 100)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals)
# Add formula for linear fit to low-end of data
txt = 'Log(Y) = %.2fLog(x) + %.2f' % (lf[0], lf[1])
print(txt)
ax.annotate(txt, (100, 10000))
# Quadratic fit
qf = np.polyfit(np.log10(max_interevent_times),
np.log10(min_paired_interevent_times), 2)
print(qf)
log_yvals = qf[0]*np.log10(xvals)**2 + qf[1]*np.log10(xvals) + qf[2]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals, yvals)
figname = 'min_pair_vs_max_interevent_time_%s.png' % fig_comment
pyplot.savefig(figname)
# Similar plots, against long term rates
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(mean_ltr, min_interevent_times,
yerr = min_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, min_interevent_times,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, min_interevent_times,
marker='s', c=colours, s=25, zorder=2)
ax.set_xlabel('Long-term rate')
ax.set_ylabel('Minimum interevent time')
ax.set_xscale('log')
ax.set_yscale('log')
# Label low-slip rate faults
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], min_interevent_times[i]),
fontsize=8)
# Linear fit only bottom end of data
indices = np.argwhere(mean_ltr > 2e-4).flatten()
lf = np.polyfit(np.log10(mean_ltr[indices]),
np.log10(min_interevent_times[indices]), 1)
xvals_short = np.arange(5e-4, 1e-2, 1e-4)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals)
# Add formula for linear fit to low-end of data
txt = 'Log(Y) = %.2fLog(x) + %.2f' % (lf[0], lf[1])
ax.annotate(txt, (1e-4, 10000))
figname = 'min_interevent_time_vs_ltr_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot long term rate against minimum pair
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(mean_ltr, min_paired_interevent_times,
yerr = min_paired_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, min_paired_interevent_times,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, min_paired_interevent_times,
marker='s', c=colours, s=25, zorder=2)
ax.set_xlabel('Long-term rate')
ax.set_ylabel('Minimum interevent time \n(mean of two shortest consecutive interevent times)')
ax.set_xscale('log')
ax.set_yscale('log')
# Label low-slip rate faults
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], min_paired_interevent_times[i]),
fontsize=8)
# Linear fit only bottom end of data
indices = np.argwhere(mean_ltr > 2e-4).flatten()
lf = np.polyfit(np.log10(mean_ltr[indices]),
np.log10(min_paired_interevent_times[indices]), 1)
xvals_short = np.arange(5e-4, 1e-2, 1e-4)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals)
# Add formula for linear fit to low-end of data
txt = 'Log(Y) = %.2fLog(x) + %.2f' % (lf[0], lf[1])
print(txt)
ax.annotate(txt, (1e-4, 10000))
figname = 'min_pair_vs_ltr_%s.png' % fig_comment
pyplot.savefig(figname)
# Plot long term rate against maximum interevent time
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(mean_ltr, max_interevent_times,
yerr = max_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, max_interevent_times,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, max_interevent_times,
marker='s', c=plot_colours, s=25, zorder=2)
ax.set_xlabel('Long-term rate')
ax.set_ylabel('Maximum interevent time')
ax.set_xscale('log')
ax.set_yscale('log')
# Label low-slip rate faults
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], max_interevent_times[i]),
fontsize=8)
# Linear fit only bottom end of data
indices = np.argwhere(mean_ltr > 2e-10).flatten() # All data for now
lf = np.polyfit(np.log10(mean_ltr[indices]),
np.log10(max_interevent_times[indices]), 1)
xvals_short = np.arange(2e-6, 1e-2, 1e-6)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals)
# Add formula for linear fit to low-end of data
txt = 'Log(Y) = %.2fLog(x) + %.2f' % (lf[0], lf[1])
print(txt)
ax.annotate(txt, (1e-4, 100000))
figname = 'max_interevent_time_vs_ltr_%s.png' % fig_comment
pyplot.savefig(figname)
# Now plot ratios against long term rates
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(mean_ltr, ratio_min_pair_max,
yerr = ratio_min_pair_max_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, ratio_min_pair_max,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, ratio_min_pair_max,
marker='s', c=plot_colours, s=25, zorder=2)
ax.set_xlabel('Long-term rate')
ax.set_ylabel('Minimum pair interevent time: maximum interevent time')
ax.set_xscale('log')
ax.set_yscale('log')
# Label low-slip rate faults
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], ratio_min_pair_max[i]),
fontsize=8)
# Linear fit high and low long term rate data separately
indices = np.argwhere(mean_ltr > 4e-4).flatten()
indices_slow_faults = np.argwhere(mean_ltr <= 4e-4).flatten()
lf = np.polyfit(np.log10(mean_ltr[indices]),
np.log10(ratio_min_pair_max[indices]), 1)
xvals_short = np.arange(2e-4, 5e-2, 1e-4)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals, c='k')
# Add formula for linear fit to low-end of data
txt = 'Log(Y) = %.2fLog(x) + %.2f' % (lf[0], lf[1])
print(txt)
ax.annotate(txt, (5e-4, 1e-2))
# Slow long-term rates
print('At if statement')
if len(indices_slow_faults) > 0:
print('Plotting slow faults')
lf = np.polyfit(np.log10(mean_ltr[indices_slow_faults]),
np.log10(ratio_min_pair_max[indices_slow_faults]), 1)
xvals_short = np.arange(2e-6, 4e-4, 1e-6)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals, c='k')
# Add formula for linear fit to low-end of data
txt = 'Log(Y) = %.2fLog(x) + %.2f' % (lf[0], lf[1])
print(txt)
ax.annotate(txt, (1e-5, 5e-3))
figname = 'min_pair_max_ratio_vs_ltr_%s.png' % fig_comment
pyplot.savefig(figname)
# Now plot ratios against long term rates
pyplot.clf()
ax = pyplot.subplot(111)
pyplot.errorbar(mean_ltr, ratio_min_max,
yerr = ratio_min_max_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, ratio_min_max,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.7,
linestyle="None", zorder=1)
pyplot.scatter(mean_ltr, ratio_min_max,
marker = 's', c=plot_colours, s=25, zorder=2)
ax.set_xlabel('Long-term rate')
ax.set_ylabel('Minimum interevent time: maximum interevent time')
ax.set_xscale('log')
ax.set_yscale('log')
# Label low-slip rate faults
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], ratio_min_max[i]),
fontsize=8)
# Linear fit only bottom end of data
indices = np.argwhere(mean_ltr > 9e-5).flatten()
indices_slow_faults = np.argwhere(mean_ltr <= 9e-5).flatten()
lf = np.polyfit(np.log10(mean_ltr[indices]),
np.log10(ratio_min_max[indices]), 1)
# Now just plot as constant mean value
lf[0] = 0
lf[1] = np.mean(np.log10(ratio_min_max[indices]))
xvals_short = np.arange(3.46e-5, 1e-2, 1e-4)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals, c='k')
# Add formula for linear fit to low-end of data
txt = 'Log(Y) = {:4.2f}Log(x) {:=+6.2f}'.format(lf[0], lf[1])
print(txt)
ax.annotate(txt, (1e-4, 1e-3))
# Slow long-term rates
if len(indices_slow_faults) > 0:
lf = np.polyfit(np.log10(mean_ltr[indices_slow_faults]),
np.log10(ratio_min_max[indices_slow_faults]), 1)
xvals_short = np.arange(2e-6, 3.47e-5, 1e-6)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals, c='k')
# Add formula for linear fit to low-end of data
# txt = 'Log(Y) = %.2fLog(x) %+.2f' % (lf[0], lf[1])
txt = 'Log(Y) = {:4.2f} {:=+6.2f}'.format(lf[0], lf[1])
print(txt)
ax.annotate(txt, (3e-6, 8e-1))
figname = 'min_max_ratio_vs_ltr_%s.png' % fig_comment
pyplot.savefig(figname)
#############################################
# Make multipanel figure plot
pyplot.clf()
fig = pyplot.figure(1)
# set up subplot grid
gridspec.GridSpec(3, 2)
#First plot
pyplot.subplot2grid((3, 2), (0,0), colspan=1, rowspan=1)
ax = pyplot.gca()
# Plot burstiness against mean ltr
pyplot.errorbar(mean_ltr, mean_bs,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.5,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, mean_bs,
yerr = burstiness_bounds,
ecolor = '0.3',
elinewidth=0.5,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, mean_bs, marker = 's', c=plot_colours,
s=18, zorder=2)
ax.set_ylim([-1, 1])
ax.set_xlim([1./1000000, 1./40])
pyplot.plot([1./1000000, 1./40], [0, 0], linestyle='dashed', linewidth=1, c='0.5')
ax.set_xscale('log')
ax.set_xlabel('Long-term rate (events per year)', fontsize=10)
ax.set_ylabel('B', fontsize=10)
# Add a legend using some dummy data
line1 = ax.scatter([1], [100], marker = 's', c = 'r', s=18)
line2 = ax.scatter([1], [100], marker = 's', c = 'g', s=18)
line3 = ax.scatter([1], [100], marker = 's', c = 'b', s=18)
pyplot.legend((line1, line2, line3), ('Normal', 'Strike slip', 'Reverse'))
# Bilinear fixed hinge and constant slope ODR
hxfix = np.log10(2e-4)
bilin_hxfix_cons_slope = odrpack.Model(bilinear_reg_fix_zero_slope)
data = odrpack.RealData(np.log10(mean_ltr), mean_bs,
sx=np.log10(np.sqrt(long_term_rate_stds)), sy=np.sqrt(burstiness_stds))
odr = odrpack.ODR(data, bilin_hxfix_cons_slope, beta0=[-3, -1.0])
odr.set_job(fit_type=0)
out = odr.run()
out.pprint()
a = out.beta[0]
b = out.beta[1]
xvals = np.arange(1e-6, 2e-2, 1e-5)
yrng = a*np.log10(xvals) + b
ylevel = a*hxfix + b
print('ylevel hxfix zero slope', ylevel)
print(10**ylevel)
idx = xvals > 10**hxfix
yrng[idx] = (ylevel)
print('yrng', yrng)
print('hx', hxfix)
pyplot.plot(xvals, yrng, c='0.4')
txt = 'y = {:4.2f}Log(x) {:=+6.2f}, x < {:3.1E}'.format(a, b, np.power(10, hxfix))
ax.annotate(txt, (1.5e-6, -0.85), fontsize=8)
txt = 'y = {:=+4.2f}, x >= {:3.1E}'.format(ylevel, np.power(10, hxfix))
ax.annotate(txt, (1.5e-6, -0.95), fontsize=8)
ax.annotate('a)', (-0.23, 0.98), xycoords = 'axes fraction', fontsize=10)
# Add second plot (Memory vs LTR)
pyplot.subplot2grid((3, 2), (0,1), colspan=1, rowspan=1)
ax = pyplot.gca()
pyplot.errorbar(mean_ltr_mem, mean_mems[inds],
xerr = ltr_bounds_mem,
ecolor = '0.3',
elinewidth=0.5,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr_mem, mean_mems[inds],
yerr = memory_bounds_min,
ecolor = '0.3',
elinewidth=0.5,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr_mem, mean_mems[inds], marker = 's', c=plot_colours_mem,
s=18, zorder=2)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], mean_mems[i]),
fontsize=8)
#ax.set_xlim([-1, 1])
ax.set_xlim([1./1000000, 1./40])
ax.set_ylim([-1, 1])
pyplot.plot([1./1000000, 1./40], [0, 0], linestyle='dashed', linewidth=1, c='0.5')
ax.set_xscale('log')
ax.set_xlabel('Long-term rate (events per year)', fontsize=10)
ax.set_ylabel('M', fontsize=10)
def linear_func(B, x):
return B[0]*x + B[1]
# Bilinear fixed hinge and constant slope ODR
hxfix = np.log10(2e-4)
bilin_hxfix_cons_slope = odrpack.Model(bilinear_reg_fix_zero_slope)
long_term_rate_stds_mem = long_term_rate_stds[inds]
data = odrpack.RealData(np.log10(mean_ltr_mem), mean_mems[inds],
sx=np.log10(np.sqrt(long_term_rate_stds_mem)), sy=np.sqrt(memory_stds_min))
odr = odrpack.ODR(data, bilin_hxfix_cons_slope, beta0=[-3, -1.0])
odr.set_job(fit_type=0)
out = odr.run()
out.pprint()
a = out.beta[0]
b = out.beta[1]
yrng = a*np.log10(xvals) + b
ylevel = a*hxfix + b
print('ylevel hxfix zero slope', ylevel)
print(ylevel)
idx = xvals > 10**hxfix
yrng[idx] = (ylevel)
print('yrng', yrng)
print('hx', hxfix)
pyplot.plot(xvals, yrng, c='0.4', linestyle = '--')
txt = 'y = {:4.2f}Log(x) {:=+6.2f}, x < {:3.1E}'.format(a, b, np.power(10, hxfix))
ax.annotate(txt, (1.5e-6, -0.85), fontsize=8)
txt = 'y = {:4.2f}, x >= {:3.1E}'.format(ylevel, np.power(10, hxfix))
ax.annotate(txt, (1.5e-6, -0.95), fontsize=8)
# Linear ODR fit
linear = odrpack.Model(linear_func)
odr = odrpack.ODR(data, linear, beta0=[-1, -1.0,])
odr.set_job(fit_type=0)
out = odr.run()
out.pprint()
a = out.beta[0]
b = out.beta[1]
xvals = np.arange(1.e-4, 1e2, 1e-2)
yrng = a*np.log10(xvals) + b #10**(b + a * xvals)
ax.annotate('b)', (-0.23, 0.98), xycoords = 'axes fraction', fontsize=10)
# Add third plot
pyplot.subplot2grid((3, 2), (1,0), colspan=1, rowspan=1)
ax = pyplot.gca()
mean_bs_mem = mean_bs[inds]
burstiness_bounds_mem = burstiness_bounds.T[inds].T
pyplot.errorbar(mean_mems[inds], mean_bs_mem,
xerr = memory_bounds_min,
ecolor = '0.3',
elinewidth=0.5,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_mems[inds], mean_bs_mem,
yerr = burstiness_bounds_mem,
ecolor = '0.3',
elinewidth=0.5,
linestyle="None",
zorder=1)
pyplot.scatter(mean_mems[inds], mean_bs_mem, marker = 's', c=plot_colours_mem,
s=18, zorder=2)
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:7],
(mean_mems[i], mean_bs[i]),
fontsize=8)
ax.set_xlim([-1, 1])
ax.set_ylim([-1, 1])
# Add y = 0, x=0 lines
pyplot.plot([0,0],[-1, 1], linestyle='dashed', linewidth=1, c='0.5')
pyplot.plot([-1,1],[0, 0], linestyle='dashed', linewidth=1, c='0.5')
#Orthogonal linear fit
def linear_func(B, x):
return B[0]*x + B[1]
linear_model = Model(linear_func)
burstiness_stds_mem = burstiness_stds[inds]
data = RealData(np.array(mean_mems[inds]).flatten(),
np.array(mean_bs_mem).flatten(),
sx = np.sqrt(memory_stds.flatten()),
sy = np.sqrt(burstiness_stds_mem.flatten()))
# Set up ODR with the model and data
odr = ODR(data, linear_model, beta0=[1., -1.])
out = odr.run()
out.pprint()
xvals = np.arange(-0.75, 0.75, 0.01)
yvals = linear_func(out.beta, xvals)
pyplot.plot(xvals, yvals, c='0.4')
ax.set_ylabel('B', fontsize=10)
ax.set_xlabel('M', fontsize=10)
# Add formula for linear fit to low-end of data
txt = 'y = {:4.2f}Log(x) {:=+6.2f}'.format(out.beta[0], out.beta[1])
print(txt)
ax.annotate(txt, (-0.95, 0.8), fontsize=8)
ax.annotate('c)', (-0.23, 0.98), xycoords = 'axes fraction', fontsize=10)
# Add fourth plot
pyplot.subplot2grid((3, 2), (1,1), colspan=1, rowspan=1)
ax = pyplot.gca()
pyplot.errorbar(mean_ltr, max_interevent_times,
yerr = max_interevent_times_bounds,
ecolor = '0.3',
elinewidth=0.5,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, max_interevent_times,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.5,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, max_interevent_times,
marker='s', c=plot_colours, s=18, zorder=2)
ax.set_xlabel('Long-term rate (events per year)', fontsize=10)
ax.set_ylabel(r'$\tau_{max}$', fontsize=10)
ax.set_xscale('log')
ax.set_yscale('log')
# Label low-slip rate faults
for i, txt in enumerate(names):
if max_interevent_times[i] > 10 and annotate_plots:
ax.annotate(txt[:4],
(mean_ltr[i], max_interevent_times[i]),
fontsize=8)
indices = np.argwhere(mean_ltr > 2e-10).flatten() # All data for now
lf = np.polyfit(np.log10(mean_ltr[indices]),
np.log10(max_interevent_times[indices]), 1)
xvals_short = np.arange(2e-6, 2e-2, 1e-6)
log_yvals = lf[0]*np.log10(xvals_short) + lf[1]
yvals = np.power(10, log_yvals)
pyplot.plot(xvals_short, yvals, c='0.4')
# Add formula for linear fit to low-end of data
txt = 'Log(y) = %.2fLog(x) + %.2f' % (lf[0], lf[1])
print(txt)
ax.annotate(txt, (1e-5, 2000000), fontsize=8)
ax.annotate('d)', (-0.23, 0.98), xycoords = 'axes fraction', fontsize=10)
# Add fifth plot
pyplot.subplot2grid((3, 2), (2,0), colspan=1, rowspan=1)
ax = pyplot.gca()
pyplot.errorbar(mean_ltr, ratio_min_max,
yerr = ratio_min_max_bounds,
ecolor = '0.3',
elinewidth=0.5,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, ratio_min_max,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.5,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, ratio_min_max,
marker='s', c=plot_colours, s=18, zorder=2)
ax.set_xlabel('Long-term rate (events per year)', fontsize=10)
ax.set_ylabel(r'$\tau_{min}$ / $\tau_{max}$', fontsize=10)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim([1e-6, 2e-2])
ax.set_ylim([5e-4, 2])
# Bilinear fixed hinge and constant slope ODR
hxfix = np.log10(2e-4)
bilin_hxfix_cons_slope = odrpack.Model(bilinear_reg_fix_zero_slope)
data = odrpack.RealData(np.log10(mean_ltr), np.log10(ratio_min_max),
sx=np.log10(np.sqrt(long_term_rate_stds)), sy=np.log10(np.sqrt(std_ratio_min_max)))
odr = odrpack.ODR(data, bilin_hxfix_cons_slope, beta0=[-3, -1.0])
odr.set_job(fit_type=0)
out = odr.run()
out.pprint()
a = out.beta[0]
b = out.beta[1]
log_y = a*np.log10(xvals) + b
yrng = np.power(10, log_y)
ylevel = np.power(10, (a*hxfix + b))
print('ylevel hxfix zero slope', ylevel)
print(10**ylevel)
idx = xvals > 10**hxfix
yrng[idx] = (ylevel)
print('yrng', yrng)
print('hx', hxfix)
# Now try inverting fo hinge point
bilin = odrpack.Model(bilinear_reg_zero_slope)
odr = odrpack.ODR(data, bilin, beta0=[-3, -1.0, -4]) # array are starting values
odr.set_job(fit_type=0)
out = odr.run()
out.pprint()
a = out.beta[0]
b = out.beta[1]
hx = out.beta[2]
xvals = np.arange(1.e-6, 2e-2, 1e-6)
log_y = a*np.log10(xvals) + b
yrng = np.power(10, log_y)
ylevel = np.power(10, a*hx + b) #10**(b + a * hx)
print('ylevel', ylevel)
print(10**ylevel)
idx = xvals > 10**hx
yrng[idx] = (ylevel)
print('yrng', yrng)
print('hx', hx)
pyplot.plot(xvals, yrng, c='0.4')
txt = 'Log(y) = {:4.2f}Log(x) {:=+6.2f}, x < {:3.1E}'.format(a, b, np.power(10, hx))
ax.annotate(txt, (1.5e-6, 1.08), fontsize=8)
txt = 'y = {:4.2f}, x >= {:3.1E}'.format(ylevel, np.power(10, hx))
ax.annotate(txt, (1.5e-6, 0.6), fontsize=8)
ax.annotate('e)', (-0.23, 0.98), xycoords = 'axes fraction', fontsize=10)
# Add sixth plot
pyplot.subplot2grid((3, 2), (2,1), colspan=1, rowspan=1)
ax = pyplot.gca()
pyplot.errorbar(mean_ltr, ratio_min_pair_max,
yerr = ratio_min_pair_max_bounds,
ecolor = '0.3',
elinewidth=0.5,
linestyle="None",
zorder=1)
pyplot.errorbar(mean_ltr, ratio_min_pair_max,
xerr = ltr_bounds,
ecolor = '0.3',
elinewidth=0.5,
linestyle="None",
zorder=1)
pyplot.scatter(mean_ltr, ratio_min_pair_max,
marker='s', c=plot_colours, s=18, zorder=2)
ax.set_xlabel('Long-term rate (events per year)', fontsize=10)
ax.set_ylabel(r'$\bar{\tau}_{min(p)}$ / $\tau_{max}$', fontsize=10)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim([1e-6, 2e-2])
ax.set_ylim([5e-4, 2])
# Bilinear fixed hinge and constant slope ODR
hxfix = np.log10(2e-4)
bilin_hxfix_cons_slope = odrpack.Model(bilinear_reg_fix_zero_slope)
data = odrpack.RealData(np.log10(mean_ltr), np.log10(ratio_min_pair_max),
sx=np.log10(np.sqrt(long_term_rate_stds)),
sy=np.log10(np.sqrt(std_ratio_min_pair_max)))
odr = odrpack.ODR(data, bilin_hxfix_cons_slope, beta0=[-3, -1.0])
odr.set_job(fit_type=0)
out = odr.run()
out.pprint()
a = out.beta[0]
b = out.beta[1]
log_y = a*np.log10(xvals) + b
yrng = np.power(10, log_y)
ylevel = np.power(10, (a*hxfix + b))
print('ylevel hxfix zero slope', ylevel)
print(10**ylevel)
idx = xvals > 10**hxfix
yrng[idx] = (ylevel)
print('yrng', yrng)
print('hx', hxfix)
# Now try inverting fo hinge point
bilin = odrpack.Model(bilinear_reg_zero_slope)
odr = odrpack.ODR(data, bilin, beta0=[-3, -1.0, -4]) # array are starting values
odr.set_job(fit_type=0)
out = odr.run()
out.pprint()
a = out.beta[0]
b = out.beta[1]
hx = out.beta[2]
xvals = np.arange(1.e-6, 2e-2, 1e-6)
log_y = a* | np.log10(xvals) | numpy.log10 |
import cv2
import numpy as np
import argparse
import glob
def jacobian(x_shape, y_shape):
x = np.array(range(x_shape))
y = np.array(range(y_shape))
x, y = | np.meshgrid(x, y) | numpy.meshgrid |
import numpy as np
import scipy.stats
from ..base_parameters import (
ParamHelper, PriorHelper, PrecondHelper,
get_value_func, get_hyperparam_func, get_dim_func,
set_value_func, set_hyperparam_func,
)
from .._utils import (
normal_logpdf,
matrix_normal_logpdf,
pos_def_mat_inv,
varp_stability_projection,
tril_vector_to_mat,
)
import logging
logger = logging.getLogger(name=__name__)
## Implementations of Vector, Square, Rectangular Parameters
# Single Square
class VectorParamHelper(ParamHelper):
def __init__(self, name='mu', dim_names=None):
self.name = name
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
n = np.shape(kwargs[self.name])
if np.ndim(kwargs[self.name]) != 1:
raise ValueError("{} must be vector".format(self.name))
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: n})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
mu = np.reshape(vector[vector_index:vector_index+n], (n))
var_dict[self.name] = mu
return vector_index+n
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {1} vector".format(
self.name, self.dim_names[0]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class VectorPriorHelper(PriorHelper):
def __init__(self, name='mu', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name)
self._var_col_name = 'var_col_{0}'.format(name)
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
n = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
if not np.isscalar(kwargs[self._var_col_name]):
raise ValueError("{} must be scalar".format(self._var_col_name))
else:
raise ValueError("{} must be provided".format(self._var_col_name))
prior._set_check_dim(**{self.dim_names[0]: n})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
var_dict[self.name] = np.random.multivariate_normal(
mean=mean_mu,
cov=var_col_mu*pos_def_mat_inv(Qinv),
)
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(self.prior[self.dim_names[0]])
S_prevprev = var_col_mu**-1 + \
sufficient_stat[self.name]['S_prevprev']
S_curprev = mean_mu * var_col_mu**-1 + \
sufficient_stat[self.name]['S_curprev']
post_mean_mu = S_curprev/S_prevprev
var_dict[self.name] = np.random.multivariate_normal(
mean=post_mean_mu,
cov=pos_def_mat_inv(Qinv)/S_prevprev,
)
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
LQinv = tril_vector_to_mat(parameters.var_dict[self._lt_vec_name])
else:
LQinv = np.eye(prior.dim[self.dim_names[0]])
logprior += normal_logpdf(parameters.var_dict[self.name],
mean=mean_mu,
Lprec=var_col_mu_k**-0.5 * LQinv,
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
mu = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinv = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
grad[self.name] = -1.0 * np.dot(var_col_mu**-1 * Qinv, mu - mean_mu)
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
mu = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_mu = mu.copy()
else:
mean_mu = np.zeros_like(mu)
var_col_mu = var
prior_kwargs[self._mean_name] = mean_mu
prior_kwargs[self._var_col_name] = var_col_mu
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
var = kwargs['var']
mean_mu = np.zeros((n))
var_col_mu = var
default_kwargs[self._mean_name] = mean_mu
default_kwargs[self._var_col_name] = var_col_mu
return
class VectorPrecondHelper(PrecondHelper):
def __init__(self, name='mu', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['n'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.dot(Q, grad[self.name])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.linalg.solve(LQinv.T,
np.random.normal(loc=0, size=(LQinv.shape[0]))
)
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Multiple Square
class VectorsParamHelper(ParamHelper):
def __init__(self, name='mu', dim_names=None):
self.name = name
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
num_states, n = np.shape(kwargs[self.name])
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: n,
self.dim_names[1]: num_states})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
mu = np.reshape(vector[vector_index:vector_index+num_states*n],
(num_states, n))
var_dict[self.name] = mu
return vector_index+num_states*n
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is {2} {1} vectors".format(
self.name, self.dim_names[0], self.dim_names[1]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class VectorsPriorHelper(PriorHelper):
def __init__(self, name='mu', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name) # num_states by n
self._var_col_name = 'var_col_{0}'.format(name) # num_states by n
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
num_states, n = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
num_states2 = np.size(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if (num_states != num_states2):
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{self.dim_names[0]: n,
self.dim_names[1]: num_states})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(prior.dim[self.dim_names[0]])
for _ in prior.dim[self.dim_names[1]]])
mus = [None for k in range(prior.dim[self.dim_names[1]])]
for k in range(len(mus)):
mus[k] = np.random.multivariate_normal(
mean=mean_mu[k],
cov=var_col_mu[k]*pos_def_mat_inv(Qinvs[k]),
)
var_dict[self.name] = np.array(mus)
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
num_states, n = np.shape(mean_mu)
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(n)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(n) for _ in range(num_states)])
mus = [None for k in range(num_states)]
for k in range(len(mus)):
S_prevprev = var_col_mu[k]**-1 + \
sufficient_stat[self.name]['S_prevprev'][k]
S_curprev = mean_mu[k] * var_col_mu[k]**-1 + \
sufficient_stat[self.name]['S_curprev'][k]
post_mean_mu_k = S_curprev/S_prevprev
mus[k] = np.random.multivariate_normal(
mean=post_mean_mu_k,
cov=pos_def_mat_inv(Qinvs[k])/S_prevprev,
)
var_dict[self.name] = np.array(mus)
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
num_states, n = np.shape(mean_mu)
if self._var_row_name is not None:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in parameters.var_dict[self._lt_vec_name]])
else:
LQinvs = np.array([np.eye(n)
for _ in range(num_states)])
for mu_k, mean_mu_k, var_col_mu_k, LQinv_k in zip(
parameters.var_dict[self.name], mean_mu, var_col_mu, LQinvs):
logprior += normal_logpdf(mu_k,
mean=mean_mu_k,
Lprec=var_col_mu_k**-0.5 * LQinv_k,
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mu = parameters.var_dict[self.name]
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
num_states, n = np.shape(mean_mu)
if self._var_row_name is not None:
Qinvs = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinvs = np.array([np.eye(n)
for _ in range(num_states)])
grad[self.name] = np.array([
-1.0 * np.dot(var_col_mu[k]**-1 * Qinvs[k], mu[k] - mean_mu[k])
for k in range(num_states)])
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
mu = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_mu = mu.copy()
else:
mean_mu = np.zeros_like(mu)
var_col_mu = np.array([
var for _ in range(A.shape[0])
])
prior_kwargs[self._mean_name] = mean_mu
prior_kwargs[self._var_col_name] = var_col_mu
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
var = kwargs['var']
mean_mu = np.zeros((num_states, n))
var_col_mu = np.ones((num_states))*var
default_kwargs[self._mean_name] = mean_mu
default_kwargs[self._var_col_name] = var_col_mu
return
class VectorsPrecondHelper(PrecondHelper):
def __init__(self, name='mu', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.array([
np.dot(Q[k], grad[self.name][k])
for k in range(Q.shape[0])
])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.array([
np.linalg.solve(LQinv[k].T,
np.random.normal(loc=0, size=LQinv.shape[-1])
)
for k in range(LQinv.shape[0])
])
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Single Square
class SquareMatrixParamHelper(ParamHelper):
def __init__(self, name='A', dim_names=None):
self.name = name
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
n, n2 = np.shape(kwargs[self.name])
if n != n2:
raise ValueError("{} must be square matrices".format(self.name))
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: n})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('thresh', True):
A = param.var_dict[self.name]
A = varp_stability_projection(A,
eigenvalue_cutoff=name_kwargs.get(
'eigenvalue_cutoff', 0.9999),
var_name=self.name,
logger=logger)
param.var_dict[self.name] = A
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
A = np.reshape(vector[vector_index:vector_index+n**2], (n, n))
var_dict[self.name] = A
return vector_index+n**2
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {1} by {1} matrix".format(
self.name, self.dim_names[0]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class SquareMatrixPriorHelper(PriorHelper):
def __init__(self, name='A', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name)
self._var_col_name = 'var_col_{0}'.format(name)
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
n, n2 = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
n3 = np.size(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if n != n2:
raise ValueError("{} must be square".format(self._mean_name))
if n != n3:
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{self.dim_names[0]: n})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
var_dict[self.name] = scipy.stats.matrix_normal(
mean=mean_A,
rowcov=pos_def_mat_inv(Qinv),
colcov=np.diag(var_col_A),
).rvs()
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(self.prior[self.dim_names[0]])
S_prevprev = np.diag(var_col_A**-1) + \
sufficient_stat[self.name]['S_prevprev']
S_curprev = mean_A * var_col_A**-1 + \
sufficient_stat[self.name]['S_curprev']
var_dict[self.name] = scipy.stats.matrix_normal(
mean=np.linalg.solve(S_prevprev, S_curprev.T).T,
rowcov=pos_def_mat_inv(Qinv),
colcov=pos_def_mat_inv(S_prevprev),
).rvs()
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
LQinv = tril_vector_to_mat(parameters.var_dict[self._lt_vec_name])
else:
LQinv = np.eye(prior.dim[self.dim_names[0]])
logprior += matrix_normal_logpdf(parameters.var_dict[self.name],
mean=mean_A,
Lrowprec=LQinv,
Lcolprec=np.diag(var_col_A**-0.5),
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
A = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinv = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
grad[self.name] = -1.0 * np.dot(Qinv, A - mean_A) * var_col_A**-1
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
A = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_A = A.copy()
else:
mean_A = np.zeros_like(A)
var_col_A = np.ones(A.shape[0])*var
prior_kwargs[self._mean_name] = mean_A
prior_kwargs[self._var_col_name] = var_col_A
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
var = kwargs['var']
mean_A = np.zeros((n,n))
var_col_A = np.ones(n)*var
default_kwargs[self._mean_name] = mean_A
default_kwargs[self._var_col_name] = var_col_A
return
class SquareMatrixPrecondHelper(PrecondHelper):
def __init__(self, name='A', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['n'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.dot(Q, grad[self.name])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.linalg.solve(LQinv.T,
np.random.normal(loc=0, size=LQinv.shape)
)
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Multiple Square
class SquareMatricesParamHelper(ParamHelper):
def __init__(self, name='A', dim_names=None):
self.name = name
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
num_states, n, n2 = np.shape(kwargs[self.name])
if n != n2:
raise ValueError("{} must be square matrices".format(self.name))
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: n,
self.dim_names[1]: num_states})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('thresh', True):
A = param.var_dict[self.name]
for k, A_k in enumerate(A):
A_k = varp_stability_projection(A_k,
eigenvalue_cutoff=name_kwargs.get(
'eigenvalue_cutoff', 0.9999),
var_name=self.name,
logger=logger)
A[k] = A_k
param.var_dict[self.name] = A
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
A = np.reshape(vector[vector_index:vector_index+num_states*n**2],
(num_states, n, n))
var_dict[self.name] = A
return vector_index+num_states*n**2
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {2} of {1} by {1} matrices".format(
self.name, self.dim_names[0], self.dim_names[1]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class SquareMatricesPriorHelper(PriorHelper):
def __init__(self, name='A', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name)
self._var_col_name = 'var_col_{0}'.format(name)
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
num_states, n, n2 = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
num_states2, n3 = np.shape(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if n != n2:
raise ValueError("{} must be square".format(self._mean_name))
if (n != n3) or (num_states != num_states2):
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{self.dim_names[0]: n,
self.dim_names[1]: num_states})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
n = prior.dim[self.dim_names[0]]
num_states = prior.dim[self.dim_names[1]]
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(n)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(n) for _ in range(num_states)])
As = [None for k in range(num_states)]
for k in range(len(As)):
As[k] = scipy.stats.matrix_normal(
mean=mean_A[k],
rowcov=pos_def_mat_inv(Qinvs[k]),
colcov=np.diag(var_col_A[k]),
).rvs()
var_dict[self.name] = np.array(As)
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
n = prior.dim[self.dim_names[0]]
num_states = prior.dim[self.dim_names[1]]
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(n)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(n) for _ in range(num_states)])
As = [None for k in range(num_states)]
for k in range(len(As)):
S_prevprev = np.diag(var_col_A[k]**-1) + \
sufficient_stat[self.name]['S_prevprev'][k]
S_curprev = mean_A[k] * var_col_A[k]**-1 + \
sufficient_stat[self.name]['S_curprev'][k]
As[k] = scipy.stats.matrix_normal(
mean=np.linalg.solve(S_prevprev, S_curprev.T).T,
rowcov=pos_def_mat_inv(Qinvs[k]),
colcov=pos_def_mat_inv(S_prevprev),
).rvs()
var_dict[self.name] = np.array(As)
return
def logprior(self, prior, logprior, parameters, **kwargs):
n = prior.dim[self.dim_names[0]]
num_states = prior.dim[self.dim_names[1]]
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
LQinv_vec = getattr(parameters, self._lt_vec_name)
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in LQinv_vec])
else:
LQinvs = np.array([np.eye(n) for _ in range(num_states)])
for A_k, mean_A_k, var_col_A_k, LQinv_k in zip(
parameters.var_dict[self.name], mean_A, var_col_A, LQinvs):
logprior += matrix_normal_logpdf(A_k,
mean=mean_A_k,
Lrowprec=LQinv_k,
Lcolprec=np.diag(var_col_A_k**-0.5),
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
A = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinvs = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinvs = np.array([np.eye(prior.dim[self.dim_names[0]])
for _ in prior.dim[self.dim_names[1]]])
grad[self.name] = np.array([
-1.0 * np.dot(Qinvs[k], A[k] - mean_A[k]) * var_col_A[k]**-1
for k in range(prior.dim[self.dim_names[1]])
])
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
A = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_A = A.copy()
else:
mean_A = np.zeros_like(A)
var_col_A = np.array([
np.ones(A.shape[0])*var for _ in range(A.shape[0])
])
prior_kwargs[self._mean_name] = mean_A
prior_kwargs[self._var_col_name] = var_col_A
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
var = kwargs['var']
mean_A = np.zeros((num_states, n,n))
var_col_A = np.ones((num_states,n))*var
default_kwargs[self._mean_name] = mean_A
default_kwargs[self._var_col_name] = var_col_A
return
class SquareMatricesPrecondHelper(PrecondHelper):
def __init__(self, name='A', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.array([
np.dot(Q[k], grad[self.name][k])
for k in range(Q.shape[0])
])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.array([
np.linalg.solve(LQinv[k].T,
np.random.normal(loc=0, size=LQinv[k].shape)
)
for k in range(LQinv.shape[0])
])
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Single Rectangular (m by n)
class RectMatrixParamHelper(ParamHelper):
def __init__(self, name='A', dim_names=None):
self.name = name
self.dim_names = ['m','n'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
m, n = np.shape(kwargs[self.name])
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{
self.dim_names[0]: m,
self.dim_names[1]: n,
})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('thresh', False):
A = param.var_dict[self.name]
A = varp_stability_projection(A,
eigenvalue_cutoff=name_kwargs.get(
'eigenvalue_cutoff', 0.9999),
var_name=self.name,
logger=logger)
param.var_dict[self.name] = A
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
if name_kwargs.get('fixed_eye', False):
k = min(param.dim[self.dim_names[0]], param.dim[self.dim_names[1]])
A = param.var_dict[self.name]
A[0:k, 0:k] = np.eye(k)
param.var_dict[self.name] = A
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
m = kwargs[self.dim_names[0]]
n = kwargs[self.dim_names[1]]
A = np.reshape(vector[vector_index:vector_index+m*n], (m, n))
var_dict[self.name] = A
return vector_index+m*n
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {1} by {2} matrix".format(
self.name, self.dim_names[0], self.dim_names[1]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class RectMatrixPriorHelper(PriorHelper):
def __init__(self, name='A', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name) # m by n ndarray
self._var_col_name = 'var_col_{0}'.format(name) # n ndarray
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name) # m by m ndarray
self.dim_names = ['m', 'n'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
m, n = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
n2 = np.size(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if n != n2:
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{
self.dim_names[0]: m,
self.dim_names[1]: n,
})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
var_dict[self.name] = scipy.stats.matrix_normal(
mean=mean_A,
rowcov=pos_def_mat_inv(Qinv),
colcov=np.diag(var_col_A),
).rvs()
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(self.prior[self.dim_names[0]])
S_prevprev = np.diag(var_col_A**-1) + \
sufficient_stat[self.name]['S_prevprev']
S_curprev = mean_A * var_col_A**-1 + \
sufficient_stat[self.name]['S_curprev']
var_dict[self.name] = scipy.stats.matrix_normal(
mean=np.linalg.solve(S_prevprev, S_curprev.T).T,
rowcov=pos_def_mat_inv(Qinv),
colcov=pos_def_mat_inv(S_prevprev),
).rvs()
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
LQinv = tril_vector_to_mat(parameters.var_dict[self._lt_vec_name])
else:
LQinv = np.eye(prior.dim[self.dim_names[0]])
logprior += matrix_normal_logpdf(parameters.var_dict[self.name],
mean=mean_A,
Lrowprec=LQinv,
Lcolprec=np.diag(var_col_A**-0.5),
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
A = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinv = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
grad[self.name] = -1.0 * np.dot(Qinv, A - mean_A) * var_col_A**-1
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
A = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_A = A.copy()
else:
mean_A = np.zeros_like(A)
var_col_A = np.ones(A.shape[1])*var
prior_kwargs[self._mean_name] = mean_A
prior_kwargs[self._var_col_name] = var_col_A
return
def get_default_kwargs(self, default_kwargs, **kwargs):
m = kwargs[self.dim_names[0]]
n = kwargs[self.dim_names[1]]
var = kwargs['var']
mean_A = np.zeros((m,n))
var_col_A = np.ones(n)*var
default_kwargs[self._mean_name] = mean_A
default_kwargs[self._var_col_name] = var_col_A
return
class RectMatrixPrecondHelper(PrecondHelper):
def __init__(self, name='A', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['m', 'n'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.dot(Q, grad[self.name])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
m = parameters.dim[self.dim_names[0]]
n = parameters.dim[self.dim_names[1]]
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.linalg.solve(LQinv.T,
np.random.normal(loc=0, size=(m, n))
)
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Multiple Rectangular
class RectMatricesParamHelper(ParamHelper):
def __init__(self, name='A', dim_names=None):
self.name = name
self.dim_names = ['m', 'n', 'num_states'] \
if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
num_states, m, n = np.shape(kwargs[self.name])
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{
self.dim_names[0]: m,
self.dim_names[1]: n,
self.dim_names[2]: num_states,
})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('thresh', False):
A = param.var_dict[self.name]
for k, A_k in enumerate(A):
A_k = varp_stability_projection(A_k,
eigenvalue_cutoff=name_kwargs.get(
'eigenvalue_cutoff', 0.9999),
var_name=self.name,
logger=logger)
A[k] = A_k
param.var_dict[self.name] = A
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
if name_kwargs.get('fixed_eye', False):
k = min(param.dim[self.dim_names[0]], param.dim[self.dim_names[1]])
A = param.var_dict[self.name]
for kk in range(self.num_states):
A[kk, 0:k, 0:k] = np.eye(k)
param.var_dict[self.name] = A
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
m = kwargs[self.dim_names[0]]
n = kwargs[self.dim_names[1]]
num_states = kwargs[self.dim_names[2]]
A = np.reshape(vector[vector_index:vector_index+num_states*m*n],
(num_states, m, n))
var_dict[self.name] = A
return vector_index+num_states*m*n
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {3} by {1} by {2} matrices".format(
self.name, self.dim_names[0],
self.dim_names[1], self.dim_names[2]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class RectMatricesPriorHelper(PriorHelper):
def __init__(self, name='A', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name) # num_states x m x n
self._var_col_name = 'var_col_{0}'.format(name) # num_states x n
self._var_row_name = var_row_name # num_states x m x m
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['m', 'n', 'num_states'] \
if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
num_states, m, n = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
num_states2, n2 = np.shape(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if (n != n2) or (num_states != num_states2):
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{
self.dim_names[0]: m,
self.dim_names[1]: n,
self.dim_names[2]: num_states})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
num_states, m, n = np.shape(mean_A)
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(m)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(m) for _ in range(num_states)])
As = [None for k in range(num_states)]
for k in range(len(As)):
As[k] = scipy.stats.matrix_normal(
mean=mean_A[k],
rowcov=pos_def_mat_inv(Qinvs[k]),
colcov=np.diag(var_col_A[k]),
).rvs()
var_dict[self.name] = np.array(As)
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
num_states, m, n = np.shape(mean_A)
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(m)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(m) for _ in range(num_states)])
As = [None for k in range(num_states)]
for k in range(len(As)):
S_prevprev = np.diag(var_col_A[k]**-1) + \
sufficient_stat[self.name]['S_prevprev'][k]
S_curprev = mean_A[k] * var_col_A[k]**-1 + \
sufficient_stat[self.name]['S_curprev'][k]
As[k] = scipy.stats.matrix_normal(
mean=np.linalg.solve(S_prevprev, S_curprev.T).T,
rowcov=pos_def_mat_inv(Qinvs[k]),
colcov=pos_def_mat_inv(S_prevprev),
).rvs()
var_dict[self.name] = np.array(As)
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
num_states, m, n = np.shape(mean_A)
if self._var_row_name is not None:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in parameters.var_dict[self._lt_vec_name]])
else:
LQinvs = np.array([np.eye(m) for _ in range(num_states)])
for A_k, mean_A_k, var_col_A_k, LQinv_k in zip(
parameters.var_dict[self.name], mean_A, var_col_A, LQinvs):
logprior += matrix_normal_logpdf(A_k,
mean=mean_A_k,
Lrowprec=LQinv_k,
Lcolprec=np.diag(var_col_A_k**-0.5),
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_A = prior.hyperparams[self._mean_name]
var_col_A = prior.hyperparams[self._var_col_name]
A = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinvs = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinvs = np.array([np.eye(prior.dim[self.dim_names[0]])
for _ in prior.dim[self.dim_names[2]]])
grad[self.name] = np.array([
-1.0 * np.dot(Qinvs[k], A[k] - mean_A[k]) * var_col_A[k]**-1
for k in range(prior.dim[self.dim_names[2]])
])
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
A = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_A = A.copy()
else:
mean_A = np.zeros_like(A)
var_col_A = np.array([
np.ones(A.shape[2])*var for _ in range(A.shape[0])
])
prior_kwargs[self._mean_name] = mean_A
prior_kwargs[self._var_col_name] = var_col_A
return
def get_default_kwargs(self, default_kwargs, **kwargs):
m = kwargs[self.dim_names[0]]
n = kwargs[self.dim_names[1]]
num_states = kwargs[self.dim_names[2]]
var = kwargs['var']
mean_A = | np.zeros((num_states,m,n)) | numpy.zeros |
import math
import numbers
import random
from PIL import Image, ImageOps
from torchvision.transforms import functional as F
from torchvision import transforms
import numpy as np
"""
Most of codes here are from
https://github.com/zijundeng/pytorch-semantic-segmentation/blob/master/utils/joint_transforms.py
"""
def pad_to_target(img, target_height, target_width, label=0):
# Pad image with zeros to the specified height and width if needed
# This op does nothing if the image already has size bigger than target_height and target_width.
w, h = img.size
left = top = right = bottom = 0
doit = False
if target_width > w:
delta = target_width - w
left = delta // 2
right = delta - left
doit = True
if target_height > h:
delta = target_height - h
top = delta // 2
bottom = delta - top
doit = True
if doit:
img = ImageOps.expand(img, border=(left, top, right, bottom), fill=label)
assert img.size[0] >= target_width
assert img.size[1] >= target_height
return img
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, mask):
assert img.size == mask.size
for t in self.transforms:
img, mask = t(img, mask)
return img, mask
class Safe32Padding(object):
def __call__(self, img, mask=None):
width, height = img.size
if (height % 32) != 0: height += 32 - (height % 32)
if (width % 32) != 0: width += 32 - (width % 32)
if mask:
return pad_to_target(img, height, width), pad_to_target(mask, height, width)
else:
return pad_to_target(img, height, width)
class Resize(object):
def __init__(self, size):
self.w = 0
self.h = 0
if isinstance(size, int):
self.w = size
self.h = size
elif isinstance(size, tuple) and len(size) == 2:
if isinstance(size[0], int) and isinstance(size[1], int):
self.w = size[0]
self.h = size[1]
else:
raise ValueError
else:
raise ValueError
def __call__(self, img, mask):
return (img.resize((self.w, self.h), Image.NEAREST),
mask.resize((self.w, self.h), Image.BILINEAR))
class RandomCrop(object):
def __init__(self, size, padding=0):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
def __call__(self, img, mask):
if self.padding > 0:
img = ImageOps.expand(img, border=self.padding, fill=0)
mask = ImageOps.expand(mask, border=self.padding, fill=0)
assert img.size == mask.size
w, h = img.size
th, tw = self.size
if w == tw and h == th:
return img, mask
if w < tw or h < th:
return img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST)
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th))
class CenterCrop(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img, mask):
assert img.size == mask.size
w, h = img.size
th, tw = self.size
x1 = int(round((w - tw) / 2.))
y1 = int(round((h - th) / 2.))
return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th))
class RandomHorizontallyFlip(object):
def __call__(self, img, mask):
if random.random() < 0.5:
return img.transpose(Image.FLIP_LEFT_RIGHT), mask.transpose(Image.FLIP_LEFT_RIGHT)
return img, mask
class FreeScale(object):
def __init__(self, size):
self.size = tuple(reversed(size)) # size: (h, w)
def __call__(self, img, mask):
assert img.size == mask.size
return img.resize(self.size, Image.BILINEAR), mask.resize(self.size, Image.NEAREST)
class RandomSizedCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, mask):
assert img.size == mask.size
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(0.45, 1.0) * area
aspect_ratio = random.uniform(0.5, 2)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
x1 = random.randint(0, img.size[0] - w)
y1 = random.randint(0, img.size[1] - h)
img = img.crop((x1, y1, x1 + w, y1 + h))
mask = mask.crop((x1, y1, x1 + w, y1 + h))
assert (img.size == (w, h))
return img.resize((self.size, self.size), Image.BILINEAR), mask.resize((self.size, self.size),
Image.NEAREST)
# Fallback
resize = Resize(self.size)
crop = CenterCrop(self.size)
return crop(*resize(img, mask))
class RandomRotate(object):
def __init__(self, degree):
self.degree = degree
def __call__(self, img, mask):
rotate_degree = random.random() * 2 * self.degree - self.degree
return img.rotate(rotate_degree, Image.BILINEAR), mask.rotate(rotate_degree, Image.NEAREST)
class JointRandomAffine(transforms.RandomAffine):
def __call__(self, img, mask):
ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img.size)
return (
F.affine(img, *ret, resample=self.resample, fillcolor=self.fillcolor),
F.affine(mask, *ret, resample=self.resample, fillcolor=self.fillcolor)
)
class RandomResizedCrop(object):
"""Crop the given PIL Image to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR):
if isinstance(size, tuple):
self.size = size
else:
self.size = (size, size)
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("range should be of kind (min, max)")
self.interpolation = interpolation
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for attempt in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if (in_ratio < min(ratio)):
w = img.size[0]
h = w / min(ratio)
elif (in_ratio > max(ratio)):
h = img.size[1]
w = h * max(ratio)
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
def __call__(self, img, mask):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
return (F.resized_crop(img, i, j, h, w, self.size, self.interpolation), F.resized_crop(mask, i, j, h, w, self.size, self.interpolation))
def __repr__(self):
interpolate_str = _pil_interpolation_to_str[self.interpolation]
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
format_string += ', interpolation={0})'.format(interpolate_str)
return format_string
class RandomSized(object):
def __init__(self, size):
self.size = size
self.scale = Scale(self.size)
self.crop = RandomCrop(self.size)
def __call__(self, img, mask):
assert img.size == mask.size
w = int(random.uniform(0.5, 2) * img.size[0])
h = int(random.uniform(0.5, 2) * img.size[1])
img, mask = img.resize((w, h), Image.BILINEAR), mask.resize((w, h), Image.NEAREST)
return self.crop(*self.scale(img, mask))
class SlidingCropOld(object):
def __init__(self, crop_size, stride_rate, ignore_label):
self.crop_size = crop_size
self.stride_rate = stride_rate
self.ignore_label = ignore_label
def _pad(self, img, mask):
h, w = img.shape[: 2]
pad_h = max(self.crop_size - h, 0)
pad_w = max(self.crop_size - w, 0)
img = np.pad(img, ((0, pad_h), (0, pad_w), (0, 0)), 'constant')
mask = np.pad(mask, ((0, pad_h), (0, pad_w)), 'constant', constant_values=self.ignore_label)
return img, mask
def __call__(self, img, mask):
assert img.size == mask.size
w, h = img.size
long_size = max(h, w)
img = np.array(img)
mask = np.array(mask)
if long_size > self.crop_size:
stride = int(math.ceil(self.crop_size * self.stride_rate))
h_step_num = int(math.ceil((h - self.crop_size) / float(stride))) + 1
w_step_num = int(math.ceil((w - self.crop_size) / float(stride))) + 1
img_sublist, mask_sublist = [], []
for yy in xrange(h_step_num):
for xx in xrange(w_step_num):
sy, sx = yy * stride, xx * stride
ey, ex = sy + self.crop_size, sx + self.crop_size
img_sub = img[sy: ey, sx: ex, :]
mask_sub = mask[sy: ey, sx: ex]
img_sub, mask_sub = self._pad(img_sub, mask_sub)
img_sublist.append(Image.fromarray(img_sub.astype(np.uint8)).convert('RGB'))
mask_sublist.append(Image.fromarray(mask_sub.astype(np.uint8)).convert('P'))
return img_sublist, mask_sublist
else:
img, mask = self._pad(img, mask)
img = Image.fromarray(img.astype(np.uint8)).convert('RGB')
mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
return img, mask
class SlidingCrop(object):
def __init__(self, crop_size, stride_rate, ignore_label):
self.crop_size = crop_size
self.stride_rate = stride_rate
self.ignore_label = ignore_label
def _pad(self, img, mask):
h, w = img.shape[: 2]
pad_h = max(self.crop_size - h, 0)
pad_w = max(self.crop_size - w, 0)
img = np.pad(img, ((0, pad_h), (0, pad_w), (0, 0)), 'constant')
mask = np.pad(mask, ((0, pad_h), (0, pad_w)), 'constant', constant_values=self.ignore_label)
return img, mask, h, w
def __call__(self, img, mask):
assert img.size == mask.size
w, h = img.size
long_size = max(h, w)
img = | np.array(img) | numpy.array |
import sys
sys.path.append('../')
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sns
from network import Protocol, NetworkManager, BCPNNPerfect, TimedInput
from connectivity_functions import create_orthogonal_canonical_representation, build_network_representation
from connectivity_functions import get_weights_from_probabilities, get_probabilities_from_network_representation
from analysis_functions import calculate_recall_time_quantities, get_weights
from analysis_functions import get_weights_collections
def generate_plot_for_variable(filename, x_values, xlabel):
format = '.pdf'
folder = './plot_producers/off_line_rule_learning_'
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ax.plot(x_values, w_self_vector, 'o-', markersize=markersize, linewidth=linewidth, label=r'$w_{self}$')
ax.plot(x_values, w_next_vector, 'o-', markersize=markersize, linewidth=linewidth, label=r'$w_{next}$')
ax.plot(x_values, w_rest_vector, 'o-', markersize=markersize, linewidth=linewidth, label=r'$w_{rest}$')
ax.axhline(0, ls='--', color='gray')
ax.axvline(0, ls='--', color='gray')
ax.set_ylabel(r'$w$')
ax.set_xlabel(xlabel)
ax.legend()
type = 'w'
aux_filename = folder + filename + type + format
fig.savefig(aux_filename, frameon=False, dpi=110, bbox_inches='tight')
fig.clear()
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ax.plot(x_values, factor * Pij_self_vector, 'o-', markersize=markersize, linewidth=linewidth,
label=r'$P_{self}$')
ax.plot(x_values, factor * Pij_next_vector, 'o-', markersize=markersize, linewidth=linewidth,
label=r'$P_{next}$')
ax.plot(x_values, factor * Pij_rest_vector, 'o-', markersize=markersize, linewidth=linewidth,
label=r'$P_{rest}$')
ax.plot(x_values, factor * pi_self_vector, 'o-', markersize=markersize, linewidth=linewidth,
label=r'$p_i * p_j s$', color='black')
ax.axhline(0, ls='--', color='gray')
ax.axvline(0, ls='--', color='gray')
ax.set_ylabel(r'Probabilities')
ax.set_xlabel(xlabel)
ax.legend()
type = 'p'
aux_filename = folder + filename + type + format
fig.savefig(aux_filename, frameon=False, dpi=110, bbox_inches='tight')
fig.clear()
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ax.plot(x_values, persistence_time_vector, 'o-', markersize=markersize,
linewidth=linewidth, label=r'$T_{persistence}$')
ax.plot(x_values, success_vector / 100.0, 'o-', markersize=markersize,
linewidth=linewidth, label=r'Success')
ax.axhline(0, ls='--', color='gray')
ax.axvline(0, ls='--', color='gray')
ax.set_ylabel(r'$T_{persistence} (s)$')
ax.set_xlabel(xlabel)
ax.legend()
type = 'time'
aux_filename = folder + filename + type + format
fig.savefig(aux_filename, frameon=False, dpi=110, bbox_inches='tight')
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ax.plot(x_values[:-1], np.diff(Pij_self_vector) / np.abs(Pij_self_vector[:-1]),
'o-', markersize=markersize, linewidth=linewidth, label=r'$P_{self}$', alpha=alpha)
ax.plot(x_values[:-1], np.diff(Pij_next_vector) / np.abs(Pij_next_vector[:-1]), 'o-',
markersize=markersize, linewidth=linewidth, label=r'$P_{next}$', alpha=alpha)
ax.plot(x_values[:-1], np.diff(Pij_rest_vector) / np.abs(Pij_rest_vector[:-1]),
'o-', markersize=markersize, linewidth=linewidth, label=r'$P_{rest}$', alpha=alpha)
ax.plot(x_values[:-1], np.diff(pi_self_vector) / np.abs(pi_self_vector[:-1]), 'o-', alpha=alpha,
markersize=markersize, linewidth=linewidth, color='black', label=r'$p_i * p_j$')
ax.axhline(0, ls='--', color='gray')
ax.axvline(0, ls='--', color='gray')
ax.set_ylabel(r'$\Delta $ Probabilities')
ax.set_xlabel(xlabel)
ax.legend()
type = 'diff'
aux_filename = folder + filename + type + format
fig.savefig(aux_filename, frameon=False, dpi=110, bbox_inches='tight')
plt.close()
sns.set(font_scale=2.8)
sns.set_style(style='white')
epsilon = 10e-10
from_pattern = 2
to_pattern = 3
figsize = (16, 12)
markersize = 25
linewidth = 10
factor = 1.0
alpha = 0.8
normal_palette = sns.color_palette()
plot_training_time = False
plot_tau_z = False
plot_resting_time = False
plot_epochs = False
plot_inter_sequence_time = False
plot_inter_pulse_interval = False
plot_minicolumns_fixed = True
plot_minicolumns_var = True
plot_hypercolumns = False
#####################
# General parameters
#####################
always_learning = False
strict_maximum = True
perfect = False
z_transfer = False
k_perfect = True
diagonal_zero = False
normalized_currents = True
g_w_ampa = 2.0
g_w = 0.0
g_a = 10.0
tau_a = 0.250
G = 1.0
sigma = 0.0
tau_m = 0.020
tau_z_pre_ampa = 0.025
tau_z_post_ampa = 0.025
tau_p = 10.0
hypercolumns = 1
minicolumns = 10
n_patterns = 10
# Manager properties
dt = 0.001
values_to_save = ['o']
# Protocol
training_time = 0.100
inter_sequence_interval = 1.0
inter_pulse_interval = 0.0
epochs = 3
resting_time = 3.0
# Recall
T_recall = 3.0
n = 1
T_cue = 0.050
##############################
# Training time
##############################
if plot_training_time:
epsilon_ = epsilon
num = 20
training_times = np.linspace(0.050, 1.0, num=num)
success_vector = np.zeros(num)
persistence_time_vector = np.zeros(num)
w_self_vector = np.zeros(num)
w_next_vector = np.zeros(num)
w_rest_vector = np.zeros(num)
pi_self_vector = np.zeros(num)
Pij_self_vector = np.zeros(num)
pi_next_vector = np.zeros(num)
Pij_next_vector = np.zeros(num)
pi_rest_vector = np.zeros(num)
Pij_rest_vector = np.zeros(num)
for index, training_time_ in enumerate(training_times):
matrix = create_orthogonal_canonical_representation(minicolumns, hypercolumns)[:n_patterns]
network_representation = build_network_representation(matrix, minicolumns, hypercolumns)
timed_input = TimedInput(network_representation, dt, training_time_,
inter_pulse_interval=inter_pulse_interval,
inter_sequence_interval=inter_sequence_interval,
epochs=epochs, resting_time=resting_time)
S = timed_input.build_timed_input()
z_pre = timed_input.build_filtered_input_pre(tau_z_pre_ampa)
z_post = timed_input.build_filtered_input_post(tau_z_pre_ampa)
pi, pj, P = timed_input.calculate_probabilities_from_time_signal(filtered=True)
w_timed, beta_timed = get_weights_from_probabilities(pi, pj, P, minicolumns, hypercolumns, epsilon_)
nn = BCPNNPerfect(hypercolumns, minicolumns, g_w_ampa=g_w_ampa, g_w=g_w, g_a=g_a, tau_a=tau_a, tau_m=tau_m,
sigma=sigma, G=G, tau_z_pre_ampa=tau_z_pre_ampa, tau_z_post_ampa=tau_z_post_ampa, tau_p=tau_p,
z_transfer=z_transfer, diagonal_zero=diagonal_zero, strict_maximum=strict_maximum,
perfect=perfect, k_perfect=k_perfect, always_learning=always_learning,
normalized_currents=normalized_currents)
# Build the manager
manager = NetworkManager(nn=nn, dt=dt, values_to_save=values_to_save)
# Build the protocol for training
nn.w_ampa = w_timed
# Recall
patterns_indexes = [i for i in range(n_patterns)]
sequences = [patterns_indexes]
# manager.run_network_recall(T_recall=1.0, T_cue=0.100, I_cue=0, reset=True, empty_history=True)
aux = calculate_recall_time_quantities(manager, T_recall, T_cue, n, sequences)
total_sequence_time, mean, std, success, timings = aux
w_self, w_next, w_rest = get_weights(manager, from_pattern, to_pattern, mean=False)
success_vector[index] = success
persistence_time_vector[index] = mean
w_self_vector[index] = w_self
w_next_vector[index] = w_next
w_rest_vector[index] = w_rest
pi_self_vector[index] = pi[from_pattern] * pj[from_pattern]
Pij_self_vector[index] = P[from_pattern, from_pattern]
pi_next_vector[index] = pi[from_pattern] * pj[to_pattern]
Pij_next_vector[index] = P[to_pattern, from_pattern]
pi_rest_vector[index] = pi[from_pattern] * pj[to_pattern + 1]
Pij_rest_vector[index] = P[to_pattern + 1, from_pattern]
# Plot
filename = 'training_time_'
x_values = training_times
xlabel = r'Training Time (s)'
generate_plot_for_variable(filename, x_values, xlabel)
##############################
# tau_z
###############################
if plot_tau_z:
num = 15
tau_z_vector = np.linspace(0.025, 0.250, num=num)
success_vector = np.zeros(num)
persistence_time_vector = np.zeros(num)
w_self_vector = | np.zeros(num) | numpy.zeros |
# -*- coding: utf-8 -*-
"""Test cases for the Square Exponential covariance function and its spatial gradient.
Testing is sparse at the moment. The C++ implementations are tested thoroughly (gpp_covariance_test.hpp/cpp) and
we rely more on :mod:`moe.tests.optimal_learning.python.cpp_wrappers.covariance_test`'s comparison
with C++ for verification of the Python code.
TODO(GH-175): Ping testing for spatial gradients and hyperparameter gradients/hessian.
TODO(GH-176): Make test structure general enough to support other covariance functions automatically.
"""
import numpy
import testify as T
from moe.optimal_learning.python.geometry_utils import ClosedInterval
from moe.optimal_learning.python.python_version.covariance import SquareExponential
from moe.optimal_learning.python.python_version.domain import TensorProductDomain
import moe.tests.optimal_learning.python.gaussian_process_test_utils as gp_utils
from moe.tests.optimal_learning.python.optimal_learning_test_case import OptimalLearningTestCase
class SquareExponentialTest(OptimalLearningTestCase):
"""Tests for the computation of the SquareExponential covariance and spatial gradient of covariance.
Tests cases are against manually verified results in various spatial dimensions and some ping tests.
"""
@T.class_setup
def base_setup(self):
"""Set up parameters for test cases."""
self.epsilon = 2.0 * numpy.finfo(numpy.float64).eps
self.CovarianceClass = SquareExponential
self.one_dim_test_sets = numpy.array([
[1.0, 0.1],
[2.0, 0.1],
[1.0, 1.0],
[0.1, 10.0],
[1.0, 1.0],
[0.1, 10.0],
])
self.three_dim_test_sets = numpy.array([
[1.0, 0.1, 0.1, 0.1],
[1.0, 0.1, 0.2, 0.1],
[1.0, 0.1, 0.2, 0.3],
[2.0, 0.1, 0.1, 0.1],
[2.0, 0.1, 0.2, 0.1],
[2.0, 0.1, 0.2, 0.3],
[0.1, 10.0, 1.0, 0.1],
[1.0, 10.0, 1.0, 0.1],
[10.0, 10.0, 1.0, 0.1],
[0.1, 10.0, 1.0, 0.1],
[1.0, 10.0, 1.0, 0.1],
[10.0, 10.0, 1.0, 0.1],
])
def test_square_exponential_covariance_one_dim(self):
"""Test the SquareExponential covariance function against correct values for different sets of hyperparameters in 1D."""
for hyperparameters in self.one_dim_test_sets:
signal_variance = hyperparameters[0]
length = hyperparameters[1]
covariance = self.CovarianceClass(hyperparameters)
# One length away
truth = signal_variance * numpy.exp(-0.5)
self.assert_scalar_within_relative(
covariance.covariance(numpy.array([0.0]), numpy.array(length)),
truth,
self.epsilon,
)
# Sym
self.assert_scalar_within_relative(
covariance.covariance(numpy.array(length), numpy.array([0.0])),
truth,
self.epsilon,
)
# One length * sqrt 2 away
truth = signal_variance * numpy.exp(-1.0)
self.assert_scalar_within_relative(
covariance.covariance(numpy.array([0.0]), numpy.array([length * numpy.sqrt(2)])),
truth,
self.epsilon,
)
def test_square_exponential_covariance_three_dim(self):
"""Test the SquareExponential covariance function against correct values for different sets of hyperparameters in 3D."""
for hyperparameters in self.three_dim_test_sets:
signal_variance = hyperparameters[0]
length = hyperparameters[1:]
covariance = self.CovarianceClass(hyperparameters)
self.assert_scalar_within_relative(
covariance.covariance(numpy.array([0.0, 0.0, 0.0]), numpy.array([0.0, 0.0, length[2]])),
signal_variance * numpy.exp(-0.5),
self.epsilon,
)
self.assert_scalar_within_relative(
covariance.covariance(numpy.array([0.0, 0.0, 0.0]), numpy.array([0.0, length[1], 0.0])),
signal_variance * numpy.exp(-0.5),
self.epsilon,
)
self.assert_scalar_within_relative(
covariance.covariance(numpy.array([0.0, 0.0, 0.0]), numpy.array([length[0], 0.0, 0.0])),
signal_variance * numpy.exp(-0.5),
self.epsilon,
)
self.assert_scalar_within_relative(
covariance.covariance(
numpy.array([0.0, 0.0, 0.0]),
numpy.array([
numpy.sqrt(3) / 3.0 * length[0],
numpy.sqrt(3) / 3.0 * length[1],
numpy.sqrt(3) / 3.0 * length[2],
]),
),
signal_variance * numpy.exp(-0.5),
self.epsilon,
)
# Sym
self.assert_scalar_within_relative(
covariance.covariance(
numpy.array([
numpy.sqrt(3) / 3.0 * length[0],
numpy.sqrt(3) / 3.0 * length[1],
numpy.sqrt(3) / 3.0 * length[2],
]),
numpy.array([0.0, 0.0, 0.0]),
),
signal_variance * numpy.exp(-0.5),
self.epsilon,
)
def test_square_exponential_grad_covariance_three_dim(self):
"""Test the SquareExponential grad_covariance function against correct values for different sets of hyperparameters in 3D."""
for hyperparameters in self.three_dim_test_sets:
length = hyperparameters[1:]
covariance = self.CovarianceClass(hyperparameters)
# Same point
truth = numpy.array([0.0, 0.0, 0.0])
grad_cov = covariance.grad_covariance(numpy.array([0.0, 0.0, 0.0]), numpy.array([0.0, 0.0, 0.0]))
self.assert_vector_within_relative(grad_cov, truth, 0.0)
# One length away
truth1 = numpy.array([
0.0,
0.0,
1.0 / length[2] * covariance.covariance(numpy.array([0.0, 0.0, 0.0]), numpy.array([0.0, 0.0, length[2]])),
])
grad_cov1 = covariance.grad_covariance(numpy.array([0.0, 0.0, 0.0]), numpy.array([0.0, 0.0, length[2]]))
self.assert_vector_within_relative(grad_cov1, truth1, self.epsilon)
# Sym is opposite
truth2 = truth1.copy()
truth2[2] *= -1.0
grad_cov2 = covariance.grad_covariance(numpy.array([0.0, 0.0, length[2]]), numpy.array([0.0, 0.0, 0.0]))
self.assert_vector_within_relative(grad_cov2, truth2, self.epsilon)
T.assert_equal(grad_cov1[2], -grad_cov2[2])
def test_hyperparameter_gradient_pings(self):
"""Ping test (compare analytic result to finite difference) the gradient wrt hyperparameters."""
h = 2.0e-3
tolerance = 4.0e-5
num_tests = 10
dim = 3
num_hyperparameters = dim + 1
hyperparameter_interval = ClosedInterval(3.0, 5.0)
domain = TensorProductDomain(ClosedInterval.build_closed_intervals_from_list([[-1.0, 1.0], [-1.0, 1.0], [-1.0, 1.0]]))
points1 = domain.generate_uniform_random_points_in_domain(num_tests)
points2 = domain.generate_uniform_random_points_in_domain(num_tests)
for i in xrange(num_tests):
point_one = points1[i, ...]
point_two = points2[i, ...]
covariance = gp_utils.fill_random_covariance_hyperparameters(
hyperparameter_interval,
num_hyperparameters,
covariance_type=self.CovarianceClass,
)
analytic_grad = covariance.hyperparameter_grad_covariance(point_one, point_two)
for k in xrange(covariance.num_hyperparameters):
hyperparameters_old = covariance.hyperparameters
# hyperparamter + h
hyperparameters_p = numpy.copy(hyperparameters_old)
hyperparameters_p[k] += h
covariance.hyperparameters = hyperparameters_p
cov_p = covariance.covariance(point_one, point_two)
covariance.hyperparameters = hyperparameters_old
# hyperparamter - h
hyperparameters_m = | numpy.copy(hyperparameters_old) | numpy.copy |
__author__ = 'third'
import numpy as np
class Flock(object):
def __init__(self,
flock_size=50,
formation_flying_distance=100,
formation_flying_strength=0.125,
alert_distance=10,
attraction_strength=0.01,
axes_min=-500,
axes_max=1500,
lower_position_limit=np.array([-450, 300]),
upper_position_limit=np.array([50, 600]),
lower_velocity_limit=np.array([0, -20]),
upper_velocity_limit= | np.array([10, 20]) | numpy.array |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Enable Python3 code in Python2 - Must be first in file!
from __future__ import print_function # print("text")
from __future__ import division # 2/3 == 0.666; 2//3 == 0
from __future__ import (
absolute_import,
) # 'import submodule2' turns into 'from . import submodule2'
from builtins import range # replaces range with xrange
from loguru import logger
# import logging
#
# logger = logging.getLogger(__name__)
import io, os
import json
import copy
import re
import numpy as np
import scipy
import scipy.ndimage
import skimage.transform
import skimage.morphology
import io3d
# dont display some anoying warnings
import warnings
warnings.filterwarnings("ignore", ".* scipy .* output shape of zoom.*")
###########################################
# Crop/Pad/Fraction
###########################################
def getDataPadding(data):
"""
Returns counts of zeros at the end and start of each axis of N-dim array
Output for 3D data: [ [pad_start,pad_end], [pad_start,pad_end], [pad_start,pad_end] ]
"""
ret_l = []
for dim in range(len(data.shape)):
widths = []
s = []
for dim_s in range(len(data.shape)):
s.append(slice(0, data.shape[dim_s]))
for i in range(data.shape[dim]):
s[dim] = i
widths.append(np.sum(data[tuple(s)]))
widths = np.asarray(widths).astype(np.bool)
pad = [np.argmax(widths), np.argmax(widths[::-1])] # [pad_before, pad_after]
ret_l.append(pad)
return ret_l
def cropArray(
data, pads, padding_value=0
): # TODO - skimage.util.crop; convergent programming is funny
"""
Removes/Adds specified number of values at start and end of every axis of N-dim array
Input: [ [pad_start,pad_end], [pad_start,pad_end], [pad_start,pad_end] ]
Positive values crop, Negative values pad.
"""
pads = [[-p[0], -p[1]] for p in pads]
return padArray(data, pads, padding_value=padding_value)
def padArray(data, pads, padding_value=0): # TODO - skimage.util.pad
"""
Removes/Adds specified number of values at start and end of every axis of N-dim array
Input: [ [pad_start,pad_end], [pad_start,pad_end], [pad_start,pad_end] ]
Positive values pad, Negative values crop.
"""
crops = [[-min(0, p[0]), -min(0, p[1])] for p in pads]
pads = [[max(0, p[0]), max(0, p[1])] for p in pads]
# cropping
s = []
for dim in range(len(data.shape)):
s.append(slice(crops[dim][0], data.shape[dim] - crops[dim][1]))
data = data[tuple(s)]
# padding
full_shape = np.asarray(data.shape) + np.asarray(
[np.sum(pads[dim]) for dim in range(len(pads))]
)
out = (np.zeros(full_shape, dtype=data.dtype) + padding_value).astype(data.dtype)
s = []
for dim in range(len(data.shape)):
s.append(slice(pads[dim][0], out.shape[dim] - pads[dim][1]))
out[tuple(s)] = data
return out
def getDataFractions(data2d, fraction_defs=[], mask=None, return_slices=False):
"""
Returns views (in tuple) on 2D array defined by percentages of width and height
fraction_defs - [{"h":(3/4,1),"w":(0,1)},...]
mask - used for calculation of width and height based on segmented data
return_slices - if True returns slice() tuples instead of views into array
"""
if mask is None:
height = data2d.shape[0]
height_offset = 0
width = data2d.shape[1]
width_offset = 0
elif np.sum(mask) == 0:
height = 0
height_offset = 0
width = 0
width_offset = 0
else:
pads = getDataPadding(mask)
height = data2d.shape[0] - pads[0][1] - pads[0][0]
height_offset = pads[0][0]
width = data2d.shape[1] - pads[1][1] - pads[1][0]
width_offset = pads[1][0]
def get_index(length, offset, percent):
return offset + int(np.round(length * percent))
fractions = []
slices = []
for fd in fraction_defs:
h_s = slice(
get_index(height, height_offset, fd["h"][0]),
get_index(height, height_offset, fd["h"][1]) + 1,
)
w_s = slice(
get_index(width, width_offset, fd["w"][0]),
get_index(width, width_offset, fd["w"][1]) + 1,
)
slices.append((h_s, w_s))
fractions.append(data2d[(h_s, w_s)])
r = slices if return_slices else fractions
if len(r) == 1:
return r[0]
else:
return tuple(r)
###########################################
# Resize
###########################################
def resizeScipy(data, toshape, order=1, mode="reflect", cval=0):
"""
Resize array to shape with scipy.ndimage.zoom
Use this on big data, because skimage.transform.resize consumes absurd amount of RAM memory
(many times size of input array), while scipy.ndimage.zoom consumes none.
scipy.ndimage.zoom also keeps correct dtype of output array.
Output is a bit (or VERY) wrong, and a lot of minor bugs:
https://github.com/scipy/scipy/issues/7324
https://github.com/scipy/scipy/issues?utf8=%E2%9C%93&q=is%3Aopen%20is%3Aissue%20label%3Ascipy.ndimage%20zoom
"""
order = 0 if (data.dtype == np.bool) else order # for masks
zoom = np.asarray(toshape, dtype=np.float) / np.asarray(data.shape, dtype=np.float)
data = scipy.ndimage.zoom(data, zoom=zoom, order=order, mode=mode, cval=cval)
if np.any(data.shape != toshape):
logger.error(
"Wrong output shape of zoom: %s != %s" % (str(data.shape), str(toshape))
)
return data
def resizeSkimage(data, toshape, order=1, mode="reflect", cval=0):
"""
Resize array to shape with skimage.transform.resize
Eats memory like crazy (many times size of input array), but very good results.
"""
dtype = data.dtype # remember correct dtype
data = skimage.transform.resize(
data, toshape, order=order, mode=mode, cval=cval, clip=True, preserve_range=True
)
# fix dtype after skimage.transform.resize
if (data.dtype != dtype) and (dtype in [np.bool, np.integer]):
data = np.round(data).astype(dtype)
elif data.dtype != dtype:
data = data.astype(dtype)
return data
# TODO - test resize version with RegularGridInterpolator, (only linear and nn order)
# https://scipy.github.io/devdocs/generated/scipy.interpolate.RegularGridInterpolator.html
# https://stackoverflow.com/questions/30056577/correct-usage-of-scipy-interpolate-regulargridinterpolator
def resize(data, toshape, order=1, mode="reflect", cval=0):
return resizeScipy(data, toshape, order=order, mode=mode, cval=cval)
def resizeWithUpscaleNN(data, toshape, order=1, mode="reflect", cval=0):
"""
All upscaling is done with 0 order interpolation (Nearest-neighbor) to prevent ghosting effect.
(Examples of ghosting effect can be seen for example in 3Dircadb1.19)
Any downscaling is done with given interpolation order.
If input is binary mask (np.bool) order=0 is forced.
"""
# calc both resize shapes
scale = np.asarray(data.shape, dtype=np.float) / np.asarray(toshape, dtype=np.float)
downscale_shape = np.asarray(toshape, dtype=np.int).copy()
if scale[0] > 1.0:
downscale_shape[0] = data.shape[0]
if scale[1] > 1.0:
downscale_shape[1] = data.shape[1]
if scale[2] > 1.0:
downscale_shape[2] = data.shape[2]
upscale_shape = np.asarray(toshape, dtype=np.int).copy()
# downscale with given interpolation order
data = resize(data, downscale_shape, order=order, mode=mode, cval=cval)
# upscale with 0 order interpolation
if not np.all(downscale_shape == upscale_shape):
data = resize(data, upscale_shape, order=0, mode=mode, cval=cval)
return data
###########################################
# Segmentation
###########################################
def getSphericalMask(size=5, spacing=[1, 1, 1]):
""" Size is in mm """
shape = (
np.asarray([size] * 3, dtype=np.float) / np.asarray(spacing, dtype=np.float)
).astype(np.int)
shape[shape < 1] = 1
mask = skimage.morphology.ball(51, dtype=np.float)
mask = resizeSkimage(mask, shape, order=1, mode="edge", cval=0) > 0.001
return mask
def getDiskMask(size=5, spacing=[1, 1, 1]):
""" Size is in mm """
shape = (
np.asarray([size] * 3, dtype=np.float) / np.asarray(spacing, dtype=np.float)
).astype(np.int)
shape[shape < 1] = 1
shape[0] = 1
mask = np.expand_dims(skimage.morphology.disk(51, dtype=np.bool), axis=0)
mask = resizeSkimage(mask, shape, order=1, mode="edge", cval=0) > 0.001
return mask
def binaryClosing(data, structure, cval=0):
"""
Does scipy.ndimage.morphology.binary_closing() without losing data near borders
Big sized structures can make this take a long time
"""
padding = np.max(structure.shape)
tmp = (
np.zeros(np.asarray(data.shape) + padding * 2, dtype=data.dtype) + cval
).astype(np.bool)
tmp[padding:-padding, padding:-padding, padding:-padding] = data
tmp = scipy.ndimage.morphology.binary_closing(tmp, structure=structure)
return tmp[padding:-padding, padding:-padding, padding:-padding]
def binaryFillHoles(data, z_axis=False, y_axis=False, x_axis=False):
"""
Does scipy.ndimage.morphology.binary_fill_holes() as if at the start and end of [z/y/x]-axis is solid wall
"""
if not (z_axis or x_axis or y_axis):
return scipy.ndimage.morphology.binary_fill_holes(data)
# fill holes on z-axis
if z_axis:
tmp = np.ones((data.shape[0] + 2, data.shape[1], data.shape[2]))
tmp[1:-1, :, :] = data
tmp = scipy.ndimage.morphology.binary_fill_holes(tmp)
data = tmp[1:-1, :, :]
# fill holes on y-axis
if y_axis:
tmp = np.ones((data.shape[0], data.shape[1] + 2, data.shape[2]))
tmp[:, 1:-1, :] = data
tmp = scipy.ndimage.morphology.binary_fill_holes(tmp)
data = tmp[:, 1:-1, :]
# fill holes on x-axis
if x_axis:
tmp = np.ones((data.shape[0], data.shape[1], data.shape[2] + 2))
tmp[:, :, 1:-1] = data
tmp = scipy.ndimage.morphology.binary_fill_holes(tmp)
data = tmp[:, :, 1:-1]
return data
def regionGrowing(data3d, seeds, mask, spacing=None, max_dist=-1, mode="watershed"):
"""
Does not ignore 'geography' of data when calculating 'distances' growing regions.
Has 2 modes, 'random_walker' and 'watershed'.
data3d - data3d or binary mask to be segmented
seeds - seeds, are converted to np.int8
mask - extremely important for 'random_walker', accidentally processing whole data eats 10s of GB.
spacing - voxel spacing, if None cube spacing is assumed.
max_dist - tries to limit maximal growth distance from seeds (ignores 'geography of data')
mode - 'random_walker'/'watershed'
'random_walker' mode is based on diffusion of probability.
Should not ignore brightness of pixels (I think?) - different brightness == harder diffusion
A lot more memory required then 'watershed'. (1.7GB vs 4.2GB MAXMEM used)
'watershed' mode based on filling hypothetical basins in data with liquid.
In problem of segmentation in CT data, is only useful in very specific situations.
(grayscale data3d doesnt work in very useful way with this).
If used together with spacing parameter, a lot more memory is required (1.7GB vs 4.3GB MAXMEM used).
Lowest possible used memory is when mode='watershed' and spacing=None
"""
# note - large areas that are covered by seeds do not increase memory requirements
# (works almost as if they had mask == 0)
seeds = seeds.astype(np.int8).copy()
mask = mask.copy()
# limit max segmentation distance
if max_dist > 0:
mask[
scipy.ndimage.morphology.distance_transform_edt(
seeds == 0, sampling=spacing
)
> max_dist
] = 0
# remove sections in mask that are not connected to any seeds # TODO - test if this lowers memory requirements
mask = skimage.measure.label(mask, background=0)
tmp = mask.copy()
tmp[seeds == 0] = 0
for l in np.unique(tmp)[1:]:
mask[mask == l] = -1
mask = mask == -1
del tmp
# if only one seed, return everything connected to it (done in last step).
unique = np.unique(seeds)[1:]
if len(unique) == 1:
return mask.astype(np.int8) * unique[0]
# segmentation
if mode not in ["random_walker", "watershed"]:
logger.warning(
"Invalid region growing mode '%s', defaulting to 'random_walker'"
% str(mode)
)
mode = "random_walker"
if mode == "random_walker":
seeds[mask == 0] = -1
seeds = skimage.segmentation.random_walker(
data3d, seeds, mode="cg_mg", copy=False, spacing=spacing
)
seeds[seeds == -1] = 0
elif (
mode == "watershed"
): # TODO - maybe more useful if edge filter is done first, when using grayscale data??
# resize data to cube spacing
if spacing is not None:
shape_orig = data3d.shape
shape_cube = np.asarray(data3d.shape, dtype=np.float) * np.asarray(
spacing, dtype=np.float
) # 1x1x1mm
shape_cube = (shape_cube / np.min(spacing)).astype(
np.int
) # upscale target size, so there is no loss in quality
order = 0 if (data3d.dtype == np.bool) else 1 # for masks
data3d = resize(data3d, shape_cube, order=order, mode="reflect")
mask = resize(mask, shape_cube, order=0, mode="reflect")
tmp = seeds
seeds = np.zeros(shape_cube, dtype=seeds.dtype)
for s in np.unique(tmp)[1:]:
seeds[resize(tmp == s, shape_cube, order=0, mode="reflect")] = s
del tmp
seeds = skimage.morphology.watershed(data3d, seeds, mask=mask)
# resize back to original spacing/shape
if spacing is not None:
tmp = seeds
seeds = np.zeros(shape_orig, dtype=seeds.dtype)
for s in np.unique(tmp)[1:]:
seeds[resize(tmp == s, shape_orig, order=0, mode="reflect")] = s
return seeds
###################
# Memory saving
###################
def compressArray(mask):
""" Compresses numpy array from RAM to RAM """
mask_comp = io.BytesIO()
| np.savez_compressed(mask_comp, mask) | numpy.savez_compressed |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 17 13:23:03 2018
@author: amaity
This module primarily is used to validate/compare the
the estimated PDF with the actual PDF for each
phases as well as for the total execution
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import ptss_utils as ptsl
import timeit
from pylab import meshgrid
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from scipy import stats
def compute_pdf_distance_v1():
"""
An improved version of compute_mean_diff1
Computes the difference mean (and std) of estimated
subframe execution time distribution
and actual subframe execution time distribution
PDFs of subframe execution time are computed
using method-1
"""
shape = (ptsl.W,ptsl.M,2) # Workload, Cores and Mean estimated, Mean actual and Number of Cores
ret_mean = np.full(shape,-1.7) # for storing the mean
ret_std = np.full(shape,-1.7) # for storing the std
# The pdfs are already computed, load them from the pdf db file
ph1_table = np.load("pdf-risk-db3/ph1db.npy")
ph2_table = np.load("pdf-risk-db3/ph2db.npy")
ph3_table = np.load("pdf-risk-db3/ph3db.npy")
ph4_table = np.load("pdf-risk-db3/ph4db.npy")
i1_table = np.load("pdf-risk-db3/i1db.npy")
i2_table = np.load("pdf-risk-db3/i2db.npy")
crc_table = np.load("pdf-risk-db3/crcdb.npy")
for w in range(89,100):
i1 = ptsl.etPDF(i1_table[w,:],ptsl.BINS)
i2 = ptsl.etPDF(i2_table[w,:],ptsl.BINS)
crc = ptsl.etPDF(crc_table[w,:],ptsl.BINS)
#print(pd.DataFrame(i2_table[w,:]))
#return
for m in range(1,ptsl.M):
start_time = timeit.default_timer()
# Retrieve the PDFs of all the phases
pdf1 = ptsl.etPDF(ph1_table[w,m,:],ptsl.BINS)
pdf2 = ptsl.etPDF(ph2_table[w,m,:],ptsl.BINS)
pdf3 = ptsl.etPDF(ph3_table[w,m,:],ptsl.BINS)
pdf4 = ptsl.etPDF(ph4_table[w,m,:],ptsl.BINS)
# Compose the execution time distribution
sfet = pdf1 + pdf1 + pdf2 + pdf2 + i1 + i1 + pdf3 + pdf3 + i2 + pdf4 + crc
ret_mean[w,m,0] = sfet.mean()
ret_std[w,m,0] = sfet.std()
print(sfet)
# (Actual Distribution)
tmp = pd.read_csv("/home/amaity/Desktop/Datasets/ptss-raw-execution-data/ecolab-knl-2018-10-28/alloc_prbs-"+str(w+1)+"_cores-"+str(m+1)+"/dataset_sf.csv")
c2 = (tmp['ExecutionTime'].values) * 1000.0
# Median Filtering
m2 = np.median(c2)
c4 = list(filter((lambda x : abs(x-m2) < 5*m2),c2))
ret_mean[w,m,1] = np.mean(c4)
ret_std[w,m,1] = np.std(c4)
print("Actual Distribution Mean : %f, std %f" % (np.mean(c4),np.std(c4)))
# Also compute the error
err_mean = abs(ret_mean[w,m,0] - ret_mean[w,m,1])*100/ret_mean[w,m,0]
err_std = abs(ret_std[w,m,0] - ret_std[w,m,1])*100/ret_std[w,m,0]
elapsed = timeit.default_timer() - start_time
print("Error mean : %.2f, std : %.2f"%(err_mean,err_std))
print("Computed discrepancy for %d prbs on %d cores in %f seconds\n\n"%(w+1,m,elapsed))
np.save("pdf-discrepancy-mean.npy",ret_mean)
np.save("pdf-discrepancy-std.npy",ret_std)
#return ret
def compute_pdf_distance_v2():
"""
The PDFs of subframe
from each phase using method-2
"""
shape = (ptsl.W,ptsl.M,2) # Workload, Cores and Mean estimated, Mean actual and Number of Cores
ret_mean = np.full(shape,-1.7) # for storing the mean
ret_std = np.full(shape,-1.7) # for storing the std
# The pdfs are already computed, load them from the pdf db file
ph1s1_table = np.load("pdf-db3-v2/ph1s1db.npy")
ph2s1_table = np.load("pdf-db3-v2/ph2s1db.npy")
i1s1_table = np.load("pdf-db3-v2/i1s1db.npy")
ph3s1_table = np.load("pdf-db3-v2/ph3s1db.npy")
ph1s2_table = np.load("pdf-db3-v2/ph1s2db.npy")
ph2s2_table = np.load("pdf-db3-v2/ph2s2db.npy")
ph3s2_table = np.load("pdf-db3-v2/ph3s2db.npy")
i1s2_table = np.load("pdf-db3-v2/i1s2db.npy")
i2_table = np.load("pdf-db3-v2/i2db.npy")
ph4_table = np.load("pdf-db3-v2/ph4db.npy")
crc_table = np.load("pdf-db3-v2/crcdb.npy")
for w in range(89,100):
for m in range(1,ptsl.M):
start_time = timeit.default_timer()
# Retrieve the PDFs of all the phases
pdf1s1 = ptsl.etPDF(ph1s1_table[w,m,:],ptsl.BINS)
pdf2s1 = ptsl.etPDF(ph2s1_table[w,m,:],ptsl.BINS)
i1s1 = ptsl.etPDF(i1s1_table[w,m,:],ptsl.BINS)
pdf3s1 = ptsl.etPDF(ph3s1_table[w,m,:],ptsl.BINS)
pdf1s2 = ptsl.etPDF(ph1s2_table[w,m,:],ptsl.BINS)
pdf2s2 = ptsl.etPDF(ph2s2_table[w,m,:],ptsl.BINS)
i1s2 = ptsl.etPDF(i1s2_table[w,m,:],ptsl.BINS)
pdf3s2 = ptsl.etPDF(ph3s2_table[w,m,:],ptsl.BINS)
i2 = ptsl.etPDF(i2_table[w,m,:],ptsl.BINS)
pdf4 = ptsl.etPDF(ph4_table[w,m,:],ptsl.BINS)
crc = ptsl.etPDF(crc_table[w,m,:],ptsl.BINS)
# Compose the execution time distribution
sfet = pdf1s1 + pdf1s2 + pdf2s1 + pdf2s2 + i1s1 + i1s2 + pdf3s1 + pdf3s2 + i2 + pdf4 + crc
ret_mean[w,m,0] = sfet.mean()
ret_std[w,m,0] = sfet.std()
#print(sfet)
# (Actual Distribution)
tmp = pd.read_csv("/home/amaity/Desktop/Datasets/ptss-raw-execution-data/ecolab-knl-2018-10-28/alloc_prbs-"+str(w+1)+"_cores-"+str(m+1)+"/dataset_sf.csv")
c2 = (tmp['ExecutionTime'].values) * 1000.0
# Median Filtering
m2 = np.median(c2)
c4 = list(filter((lambda x : abs(x-m2) < 5*m2),c2))
ret_mean[w,m,1] = np.mean(c4)
ret_std[w,m,1] = np.std(c4)
#print("Actual Distribution Mean : %f, std %f" % (np.mean(c4),np.std(c4)))
# # Plot the pdf
# fig,axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))
# # Actual (PDF)
# axes[0].set_title("Actual PDF")
# axes[0].hist(c4,bins=ptsl.BINS,color='blue')
# axes[0].set_ylabel("Probabilty Distribution")
# axes[0].set_xlabel("Execution Time (us)")
# axes[0].set_xlim(500,5000)
# # Estimated (PDF)
# axes[1].set_title("Estimated PDF")
# axes[1].plot(sfet.xp,sfet.pdf(sfet.xp),color='black')
# axes[1].set_ylabel("Probabilty Distribution")
# axes[1].set_xlabel("Execution Time (us)")
# axes[1].set_xlim(500,5000)
# #axes[1].set_xlim(0.7,3)
# Also compute the error
err_mean = abs(ret_mean[w,m,0] - ret_mean[w,m,1])*100/ret_mean[w,m,0]
err_std = abs(ret_std[w,m,0] - ret_std[w,m,1])*100/ret_std[w,m,0]
elapsed = timeit.default_timer() - start_time
print("Error mean : %.2f, std : %.2f"%(err_mean,err_std))
print("Computed discrepancy for %d prbs on %d cores in %f seconds\n\n"%(w+1,m,elapsed))
np.save("pdf-discrepancy-mean.npy",ret_mean)
np.save("pdf-discrepancy-std.npy",ret_std)
#return ret
def plot_err(file2,ext):
"""
Plot The error between the
actual subframe execution time
and estimated subframe execution
time
"""
prb = np.array(range(1,ptsl.W+1)) # 1-100 (100 values)
alloc = np.array(range(2,ptsl.M+1)) # 2-26 (25 values)
X,Y = meshgrid(prb,alloc)
stat = | np.load(file2) | numpy.load |
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pylab
########################################################################
# 2017 - <NAME>
# Purpose of this class is to visualise a list of images from the CIFAR dataset
# How many columns to show in a grid
MAX_COLS = 5
#PlotImages method takes an list of Images and their respective labels in the second parameter
#Then it renders them using matplotlib imshow method in a 5 column matrix
def PlotImages(arrayImages,arrayClassLabels,reShapeRequired=False):
totalImages=len(arrayImages)
if(reShapeRequired==True):
arrayImages = | np.reshape(arrayImages, (totalImages,32,32,3)) | numpy.reshape |
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: <EMAIL>
import cv2
import math
import time
import torch
import trimesh
import pyrender
import numpy as np
from matplotlib import pyplot as plt
from lib.data_utils import kp_utils
from lib.models.smpl import SMPL, SMPL_MODEL_DIR, get_smpl_faces
from lib.data_utils.img_utils import torch2numpy, torch_vid2numpy, normalize_2d_kp
class WeakPerspectiveCamera(pyrender.Camera):
def __init__(self,
scale,
translation,
znear=pyrender.camera.DEFAULT_Z_NEAR,
zfar=None,
name=None):
super(WeakPerspectiveCamera, self).__init__(
znear=znear,
zfar=zfar,
name=name,
)
self.scale = scale
self.translation = translation
def get_projection_matrix(self, width=None, height=None):
P = np.eye(4)
P[0, 0] = self.scale
P[1, 1] = self.scale
P[0, 3] = self.translation[0] * self.scale
P[1, 3] = -self.translation[1] * self.scale
P[2, 2] = -1
return P
def get_colors():
colors = {
'pink': np.array([197, 27, 125]), # L lower leg
'light_pink': np.array([233, 163, 201]), # L upper leg
'light_green': np.array([161, 215, 106]), # L lower arm
'green': np.array([77, 146, 33]), # L upper arm
'red': np.array([215, 48, 39]), # head
'light_red': np.array([252, 146, 114]), # head
'light_orange': np.array([252, 141, 89]), # chest
'purple': np.array([118, 42, 131]), # R lower leg
'light_purple': np.array([175, 141, 195]), # R upper
'light_blue': np.array([145, 191, 219]), # R lower arm
'blue': np.array([69, 117, 180]), # R upper arm
'gray': np.array([130, 130, 130]), #
'white': np.array([255, 255, 255]), #
}
return colors
def render_image(img, verts, cam, faces=None, angle=None, axis=None, resolution=224, output_fn=None):
if faces is None:
faces = get_smpl_faces()
mesh = trimesh.Trimesh(vertices=verts, faces=faces)
Rx = trimesh.transformations.rotation_matrix(math.radians(180), [1, 0, 0])
mesh.apply_transform(Rx)
if angle and axis:
R = trimesh.transformations.rotation_matrix(math.radians(angle), axis)
mesh.apply_transform(R)
if output_fn:
mesh.export(output_fn)
camera_translation = np.array([-cam[1], cam[2], 2 * 5000. / (img.shape[0] * cam[0] + 1e-9)])
np.save(output_fn.replace('.obj', '.npy'), camera_translation)
# Save the rotated mesh
# R = trimesh.transformations.rotation_matrix(math.radians(270), [0,1,0])
# rotated_mesh = mesh.copy()
# rotated_mesh.apply_transform(R)
# rotated_mesh.export(output_fn.replace('.obj', '_rot.obj'))
scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0],
ambient_light=(0.3, 0.3, 0.3)
)
material = pyrender.MetallicRoughnessMaterial(
metallicFactor=0.0,
alphaMode='OPAQUE',
baseColorFactor=(1.0, 1.0, 0.9, 1.0)
)
mesh = pyrender.Mesh.from_trimesh(mesh, material=material)
scene.add(mesh, 'mesh')
camera_pose = np.eye(4)
camera = WeakPerspectiveCamera(
scale=cam[0],
translation=cam[1:],
zfar=1000.
)
scene.add(camera, pose=camera_pose)
light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=1)
light_pose = np.eye(4)
light_pose[:3, 3] = [0, -1, 1]
scene.add(light, pose=light_pose)
light_pose[:3, 3] = [0, 1, 1]
scene.add(light, pose=light_pose)
light_pose[:3, 3] = [1, 1, 2]
scene.add(light, pose=light_pose)
r = pyrender.OffscreenRenderer(viewport_width=resolution,
viewport_height=resolution,
point_size=1.0)
color, _ = r.render(scene, flags=pyrender.RenderFlags.RGBA)
# color = color[:, ::-1, :]
valid_mask = (color[:, :, -1] > 0)[:, :, np.newaxis]
output_img = color[:, :, :-1] * valid_mask + (1 - valid_mask) * img
image = output_img.astype(np.uint8)
text = f's: {cam[0]:.2f}, tx: {cam[1]:.2f}, ty: {cam[2]:.2f}'
cv2.putText(image, text, (5, 10), 0, 0.4, color=(0,255,0))
return image
def draw_SMPL_joints2D(joints2D, image, kintree_table=None, color='red'):
rcolor = get_colors()['red'].tolist()
lcolor = get_colors()['blue'].tolist()
# color = get_colors()[color].tolist()
for i in range(1, kintree_table.shape[1]):
j1 = kintree_table[0][i]
j2 = kintree_table[1][i]
color = lcolor if i % 2 == 0 else rcolor
pt1, pt2 = (joints2D[j1, 0], joints2D[j1, 1]), (joints2D[j2, 0], joints2D[j2, 1])
cv2.line(image, pt1=pt1, pt2=pt2, color=color, thickness=2)
cv2.circle(image, pt1, 4, color, -1)
cv2.circle(image, pt2, 4, color, -1)
# for i in range(joints2D.shape[0]):
# color = lcolor if i % 2 == 0 else rcolor
# pt1 = (joints2D[i, 0], joints2D[i, 1])
# cv2.circle(image, pt1, 4, color, -1)
return image
def show3Dpose(channels, ax, radius=40, lcolor='#ff0000', rcolor='#0000ff'):
vals = channels
connections = [[0, 1], [1, 2], [2, 3], [0, 4], [4, 5],
[5, 6], [0, 7], [7, 8], [8, 9], [9, 10],
[8, 11], [11, 12], [12, 13], [8, 14], [14, 15], [15, 16]]
LR = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], dtype=bool)
for ind, (i,j) in enumerate(connections):
x, y, z = [np.array([vals[i, c], vals[j, c]]) for c in range(3)]
ax.plot(x, y, z, lw=2, c=lcolor if LR[ind] else rcolor)
RADIUS = radius # space around the subject
xroot, yroot, zroot = vals[0, 0], vals[0, 1], vals[0, 2]
ax.set_xlim3d([-RADIUS + xroot, RADIUS + xroot])
ax.set_zlim3d([-RADIUS + zroot, RADIUS + zroot])
ax.set_ylim3d([-RADIUS + yroot, RADIUS + yroot])
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
def visualize_sequence(sequence):
seqlen, size = sequence.shape
sequence = sequence.reshape((seqlen, -1, 3))
fig = plt.figure(figsize=(12, 7))
for i in range(seqlen):
ax = fig.add_subplot('111', projection='3d', aspect=1)
show3Dpose(sequence[i], ax, radius=0.6)
ax.view_init(-75, -90)
plt.draw()
plt.pause(0.01)
plt.cla()
plt.close()
def visualize_preds(image, preds, target=None, target_exists=True, dataset='common', vis_hmr=False):
with torch.no_grad():
if isinstance(image, torch.Tensor):
image = torch2numpy(image)
# import random
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# cv2.imwrite(f'sample_images/{random.randint(0,100)}.jpg', image)
pred_theta = preds['theta']
pred_camera = pred_theta[:3]
# pred_pose = pred_theta[3:75]
# pred_shape = pred_theta[75:]
pred_kp_2d = preds['kp_2d']
pred_verts = preds['verts']
if target_exists:
target_kp_2d = target['kp_2d']
pred_kp_2d = np.concatenate([pred_kp_2d, np.ones((pred_kp_2d.shape[0], 1))], axis=-1)
faces = get_smpl_faces()
pred_image = draw_skeleton(image.copy(), pred_kp_2d, dataset=dataset)
if target_exists:
if vis_hmr:
target_verts = target['verts']
target_cam = target['cam']
target_image = render_image(
img=image.copy(),
verts=target_verts,
faces=faces,
cam=target_cam
)
else:
target_image = draw_skeleton(image.copy(), target_kp_2d, dataset=dataset)
render = render_image(
img=image.copy(),
verts=pred_verts,
faces=faces,
cam=pred_camera
)
white_img = np.zeros_like(image)
render_side = render_image(
img=white_img.copy(),
verts=pred_verts,
faces=faces,
cam=pred_camera,
angle=90,
axis=[0,1,0]
)
if target_exists:
result_image = np.hstack([image, pred_image, target_image, render, render_side])
else:
result_image = np.hstack([image, pred_image, render, render_side])
return result_image
def batch_visualize_preds(images, preds, target=None, max_images=16, idxs=None,
target_exists=True, dataset='common'):
if max_images is None or images.shape[0] < max_images:
max_images = images.shape[0]
# preds = preds[-1] # get the final output
with torch.no_grad():
for k, v in preds.items():
if isinstance(preds[k], torch.Tensor):
preds[k] = v.detach().cpu().numpy()
if target_exists:
for k, v in target.items():
if isinstance(target[k], torch.Tensor):
target[k] = v.cpu().numpy()
result_images = []
indexes = range(max_images) if idxs is None else idxs
for idx in indexes:
single_pred = {}
for k, v in preds.items():
single_pred[k] = v[idx]
if target_exists:
single_target = {}
for k, v in target.items():
single_target[k] = v[idx]
else:
single_target = None
img = visualize_preds(images[idx], single_pred, single_target, target_exists,
dataset=dataset)
result_images.append(img)
result_image = np.vstack(result_images)
return result_image
def batch_visualize_vid_preds(video, preds, target, max_video=4, vis_hmr=False, dataset='common'):
with torch.no_grad():
if isinstance(video, torch.Tensor):
video = torch_vid2numpy(video) # NTCHW
video = np.transpose(video, (0, 1, 3, 4, 2))[:max_video] # NTCHW->NTHWC
batch_size, tsize = video.shape[:2]
if vis_hmr:
features = target['features']
target_verts, target_cam = get_regressor_output(features)
target['verts'] = target_verts
target['cam'] = target_cam
with torch.no_grad():
for k, v in preds.items():
if isinstance(preds[k], torch.Tensor):
preds[k] = v.cpu().numpy()[:max_video]
for k, v in target.items():
if isinstance(target[k], torch.Tensor):
target[k] = v.cpu().numpy()[:max_video]
batch_videos = [] # NTCHW*4
for batch_id in range(batch_size):
result_video = [] #TCHW*4
for t_id in range(tsize):
image = video[batch_id, t_id]
single_pred = {}
single_target = {}
for k, v in preds.items():
single_pred[k] = v[batch_id, t_id]
for k, v in target.items():
single_target[k] = v[batch_id, t_id]
img = visualize_preds(image, single_pred, single_target,
vis_hmr=vis_hmr, dataset=dataset)
result_video.append(img[np.newaxis, ...])
result_video = np.concatenate(result_video)
batch_videos.append(result_video[np.newaxis, ...])
final_video = | np.concatenate(batch_videos) | numpy.concatenate |
import gm
from math import sqrt
from networkx.algorithms.cluster import average_clustering, triangles
import numpy as np
from numpy.core.fromnumeric import sort
import pandas as pd
import matplotlib.pyplot as plt
import math
import networkx as nx
import matplotlib.animation as animation
import networkx.algorithms.community as nx_comm
import warnings
from matplotlib.patches import Rectangle
warnings.filterwarnings('ignore')
####################################################################
####################################################################
#-----------------------PARAMETERS BEGIN---------------------------#
####################################################################
####################################################################
# ------------------------ALGORITHM SETTINGS--------------------------#
N = 100 # Number of sensor nodes
M = 2 # Space dimensions
D = 10 # Desired distance among nodes,i.e. algebraic constraints
K = 1.2 # Scaling factor
R = K*D # Interaction range
A = 5
B = 5
C = np.abs(A-B)/np.sqrt(4*A*B)
D_prime = D * 3 # desired distance between obstacles and node
R_prime = K * D_prime # Interaction range with obstacles
EPSILON = 0.1
H_alpha = 0.2
H_beta = 0.9
C1_ALPHA = 10
C2_ALPHA = 2 * np.sqrt(C1_ALPHA)
C1_BETA = 150
C2_BETA = 2*np.sqrt(C1_BETA)
C1_GAMMA = 120
C2_GAMMA = 2 * np.sqrt(C1_GAMMA)
DELTA_T = 0.01 # time interval for calculating speed
ITERATION = 5000 # total step number
# ---------------------STATISTICAL SETTINGS---------------------------#
# whole process parameters recording
POSITION_X = np.zeros([N, ITERATION]) # X position of each agent
POSITION_Y = np.zeros([N, ITERATION]) # Y position of each agent
AVERAGE_VELOCITY = np.zeros([1,ITERATION]) # the average speed for each iter
MAX_V = np.zeros([1,ITERATION])# the max speed of points for each iter
AVERAGE_X_POSITION = np.zeros([1,ITERATION]) # average X
MAX_VELOCITY_X = np.zeros([1,ITERATION]) # max speed X
VELOCITY_MAGNITUDES = np.zeros([N, ITERATION]) # velocity of each agent
# acceleration
ACCELERACTION_X = np.zeros([N, ITERATION])
AVERAGE_X_ACC = np.zeros([1, ITERATION])
AVERAGE_Y_ACC = np.zeros([1, ITERATION])
ACCELERACTION_Y = np.zeros([N, ITERATION])
ACCELERACTION_MAGNITUDES = np.zeros([N, ITERATION])
AVERAGE_ACC_MAGNITUDES = np.zeros([1, ITERATION])
CONNECTIVITY = np.zeros([ITERATION, 1]) # connectivity for the network
ORIENTATION = np.random.rand(N,ITERATION)*360 # direction of each agent while flying
DEGREE_CENTRALITY = np.zeros([N, ITERATION])
DEGREES = np.zeros([N, ITERATION])
MAX_DEGREE = np.zeros([1, ITERATION])
MAX_DEGREE_NODE = np.zeros([1, ITERATION])
MAX_DEGREE_CENTRALITY = np.zeros([1, ITERATION])
MAX_DEGREE_CENTRALITY_NODE = np.zeros([1, ITERATION])
CLUS_COEF = np.zeros([N, ITERATION])
AVERAGE_CLUSTERING = np.zeros([1, ITERATION])
MAX_CLUSTERING = np.zeros([1, ITERATION])
MAX_CLUSTERING_NODE = np.zeros([1, ITERATION])
TRANGLES = np.zeros([N, ITERATION])
NUM_TRIANGLES = np.zeros([1, ITERATION])
MAX_TRANGLE_NUM = np.zeros([1, ITERATION])
MAX_TRANGLE_NODE = np.zeros([1, ITERATION])
DISTANCE = np.array([N, ITERATION])
#---------------------AGENT/OBSTACLES SETTINGS------------------------#
# target position
target_position = np.array([650, 50])
# position of obstacles, could be adding more obstacles
obstacles = np.array([[400, 90],[400,20]] )
# Obstacle radius
Rk = np.array([[1],[1]])
num_obstacles = obstacles.shape[0]
# target_points = np.zeros([ITERATION, M])
center_of_mass = np.zeros([ITERATION, M])
# initial position
formation_range = 200
nodes = np.random.rand(N, M) * formation_range # nodes initial position,and instant position
nodes_velocity = np.zeros([N, M])
initial_x = 0
initial_y = 0
for i in range(0,N):
nodes[i][0] -= initial_x
nodes[i][1] -= initial_y
#-------------------------GRAPH SETTINGS -----------------------------#
G = nx.Graph()
nodes_list = [i for i in range(len(nodes))] # must build the graph from nodes rather than edges
G.add_nodes_from(nodes_list)
# adding pos to each node
for i in range(0,N):
G.nodes[i]['pos'] = (nodes[i][0],nodes[i][1])
# adjacency_matrix = np.zeros([N, N])
a_ij_matrix = np.zeros([N, N])
#---------------------------UTILITIES--------------------------------#
SNAPSHOT_INTERVAL = 100 # screenshot interval
markersize=8 # node marker size
fig_counter = 0
img_path,flocking_path,attributes_path,properties_path,speed_path = gm.get_file_path()
####################################################################
####################################################################
#-----------------------PARAMETERS END-----------------------------#
####################################################################
####################################################################
def update_orientation(i,t):
if t == 0:
theata = ORIENTATION[i,t]
else:
delta_x = POSITION_X[i,t]-POSITION_X[i,t-1]
delta_y = POSITION_Y[i,t]-POSITION_Y[i,t-1]
theata = math.atan2(delta_y,delta_x)*360/(2*np.pi)
return theata
def sigma_norm(z):
norm = np.linalg.norm(z)
val = EPSILON*(norm**2)
val = np.sqrt(1 + val) - 1
val = val/EPSILON
return val
def create_adjacency_matrix():
adjacency_matrix = np.zeros([N, N])
for i in range(0, N):
for j in range(0, N):
if i != j:
# val = nodes[i] - nodes[j]
distance = np.linalg.norm(nodes[i] - nodes[j])
if distance <= R:
adjacency_matrix[i, j] = 1
return adjacency_matrix
def get_edge_list():
nodes_edge_list =[]
for i in range(0, N):
for j in range(i+1, N):
distance = np.linalg.norm(nodes[j] - nodes[i])
if distance <= R:
nodes_edge_list.append((i,j,round(distance))) # adding tuple edges
return nodes_edge_list
def get_triangles_properties():
# triangles
trangles = nx.triangles(G)
num_triangles = sum(triangles(G).values())
max_trangle_num = max(trangles.values())
max_trangle_node = max(trangles, key = lambda k : trangles.get(k))
return trangles,num_triangles,max_trangle_num,max_trangle_node
def get_clustering_property():
# clustering coefficient
clus_coef = nx.clustering(G)
average_clustering = nx.average_clustering(G)
max_clustering = max(clus_coef.values())
max_clustering_node =max(clus_coef, key = lambda k : clus_coef.get(k))
return clus_coef,average_clustering, max_clustering, max_clustering_node
def get_degree_property():
# max node degree
degree = dict(G.degree())
max_degree = max(degree.values())
max_degree_node = max(degree, key = lambda k : degree.get(k))
# degree centrality
degree_centrality = nx.degree_centrality(G)
max_degree_centrality = max(degree_centrality.values())
max_degree_centrality_node = max(degree_centrality, key = lambda k : degree_centrality.get(k))
return degree_centrality,max_degree, max_degree_node, max_degree_centrality, max_degree_centrality_node
def record_graph_properties(t):
#-------------- get graph properties --------------#
# triangles
trangles,num_triangles,max_trangle_num,max_trangle_node = get_triangles_properties()
TRANGLES[:,t] = list(trangles.values())
NUM_TRIANGLES[:,t] = num_triangles
MAX_TRANGLE_NUM[:,t] = max_trangle_num
MAX_TRANGLE_NODE[:,t] = max_trangle_node
# clustering coefficient
clus_coef,average_clustering, max_clustering, max_clustering_node = get_clustering_property()
CLUS_COEF[:,t] = list(clus_coef.values())
AVERAGE_CLUSTERING[:,t] = average_clustering
MAX_CLUSTERING[:,t] = max_clustering
MAX_CLUSTERING_NODE[:,t] = max_clustering_node
# max node degree
degree_centrality,max_degree, max_degree_node, max_degree_centrality, max_degree_centrality_node = get_degree_property()
DEGREE_CENTRALITY[:,t] = list(degree_centrality.values())
DEGREES[:,t] = np.array(list(dict(G.degree()).values()))
MAX_DEGREE[:,t] = max_degree
MAX_DEGREE_NODE[:,t] = max_degree_node
MAX_DEGREE_CENTRALITY[:,t] = max_degree_centrality
MAX_DEGREE_CENTRALITY_NODE[:,t] =max_degree_centrality_node
AVERAGE_X_POSITION[:,t] = np.average(POSITION_X[:,t])
AVERAGE_X_ACC[:,t] =np.average(ACCELERACTION_X[:,t])
AVERAGE_Y_ACC[:,t] =np.average(ACCELERACTION_Y[:,t])
# draw max velocity node
max_v_index = VELOCITY_MAGNITUDES[:, t].argmax() # return max velocity node index
MAX_VELOCITY_X[:,t] = POSITION_X[max_v_index,t]
MAX_V[:,t] = max(VELOCITY_MAGNITUDES[:, t])
AVERAGE_VELOCITY[:,t] = np.average(VELOCITY_MAGNITUDES[:, t])
# CONNECTIVITY[t] = nx.average_node_connectivity(G)
# distance for each node to destination
for i in range(0,N):
distance = np.linalg.norm(nodes[i] - target_position)
DISTANCE[i:t] = distance
return [trangles, num_triangles, max_trangle_num, max_trangle_node,\
clus_coef, average_clustering, max_clustering, max_clustering_node,\
degree_centrality, max_degree, max_degree_node, max_degree_centrality,\
max_degree_centrality_node]
# plot stastitical properites
def plot_properties():
fig = plt.figure('Properties',figsize=(20,20))
# trajectory
traject_plot = fig.add_subplot(title ='Trajectory',xlabel='Position X',ylabel='Position Y')
for i in range(0, N):
traject_plot.plot(POSITION_X[i, :], POSITION_Y[i, :])
fig_name = properties_path + '/Trajectory.png'
fig.savefig(fig_name, dpi=fig.dpi)
fig.clf()
velocity_plot = fig.add_subplot(title='Velocity',xlabel='Iteration',ylabel = 'Velocity')
for i in range(0, N):
velocity_plot.plot(VELOCITY_MAGNITUDES[i, :])
fig_name = properties_path + '/Velocity.png'
fig.savefig(fig_name, dpi=fig.dpi)
fig.clf()
orientation_plot = fig.add_subplot(121,title='Orientation vs Position',xlabel = 'Position X',ylabel='Orientation')
orientation2_plot = fig.add_subplot(122,title='Orientation vs Iteration',xlabel = 'Iteration',ylabel='Orientation')
for i in range(0, N):
orientation_plot.plot(POSITION_X[i, :], ORIENTATION[i, :])
orientation2_plot.plot(ORIENTATION[i, :])
fig_name = properties_path + '/Orientation.png'
fig.savefig(fig_name, dpi=fig.dpi)
fig.clf()
# triangles_plot = fig.add_subplot(223)
# triangles_plot.title.set_text('triangles_plot')
# clus_coef_plot = fig.add_subplot(224)
# clus_coef_plot.title.set_text('clus_coef_plot')
# for i in range(0, N):
# traject_plot.plot(POSITION_X[i, :], POSITION_Y[i, :])
# triangles_plot.plot(TRANGLES[i,:])
# clus_coef_plot.plot(CLUS_COEF[i,:])
# degree_centrality_plot.plot(DEGREE_CENTRALITY[i,:])
# acceleraction_plot.plot(ACCELERACTION_MAGNITUDES[i,:])
# degree_centrality_plot = fig.add_subplot(235)
# degree_centrality_plot.title.set_text('degree_centrality_plot')
# acceleraction_plot = fig.add_subplot(236)
# acceleraction_plot.title.set_text('acceleraction_plot')
# acceleraction_plot.plot(MAX_DEGREE)
# acceleraction_plot.plot(MAX_CLUSTERING)
# acceleraction_plot.plot(MAX_TRANGLE_NUM)
# acceleraction_plot.plot(MAX_V)
# acceleraction_plot.plot(AVERAGE_VELOCITY)
#save property image
# fig_name = properties_path + '/proterties.png'
# fig.savefig(fig_name, dpi=fig.dpi)
def plot_deployment():
fig = plt.figure('initial deployment')
ax = fig.add_subplot()
for i in range(0,N):
theata_i = ORIENTATION[i,0]
marker,scale = gm.gen_arrow_head_marker(theata_i)
ax.plot(nodes[i, 0], nodes[i, 1], marker = marker,ms = markersize)
# plot neighbors and edges
def plot_neighbors(t,f,nodes_edge_list):
ax = f.add_subplot()
ax.set_xlim(0,1000)
ax.set_ylim(-500,500)
ax.title.set_text('time {} s'.format(t*DELTA_T))
ax.plot(target_position[0], target_position[1], 'ro', color='green')
ax.plot(center_of_mass[0:t, 0], center_of_mass[0:t, 1], color='black')
# ax.add_patch(Rectangle((450,-50),100,100,fc='none',lw = 1,ec ='g' ))
ax.add_patch(Rectangle((300,-200),400,400,fc='none',lw = 1,ec ='g' ))
for i in range(0, num_obstacles):
# ax.add_artist(plt.Circle((obstacles[i, 0],obstacles[i, 1]), Rk[i], color='red'))
for k in range(len(obstacles[i])):
ax.scatter(obstacles[i][0],obstacles[i][1],color = 'red',s = D_prime)
# plot agents
for i in range(0, N):
theata_i = ORIENTATION[i,t]
marker,scale = gm.gen_arrow_head_marker(theata_i)
ax.plot(nodes[i, 0], nodes[i, 1], marker = marker,ms = markersize)
# plot edges
for e in nodes_edge_list:
start_node = e[0]
end_node = e[1]
ax.plot([nodes[start_node, 0], nodes[end_node,0]],
[nodes[start_node, 1], nodes[end_node,1]],'b-',lw=0.5)
def plot_position_speed(fig):
vel_plot = fig.add_subplot(211,title='maximum speed point in the flock',xlabel='the floak average position',ylabel='speed')
vel_plot.set_xlim(-100,2000)
vel_plot.set_ylim(-100,2000)
vel_plot.plot(AVERAGE_X_POSITION[0,:],MAX_V[0,:],'bo',label = 'max speed')
vel_plot.plot(AVERAGE_X_POSITION[0,:],AVERAGE_VELOCITY[0,:],'rx',label = 'average speed')
acc_x_plot = fig.add_subplot(223,title='accelerations',xlabel='the floak average position',ylabel='accleartion')
acc_x_plot.plot(AVERAGE_X_POSITION[0,:],AVERAGE_X_ACC[0,:],'gx',label = 'average x acceleration')
acc_y_plot = fig.add_subplot(224,title='accelerations',xlabel='the floak average position',ylabel='accleartion')
acc_y_plot.plot(AVERAGE_X_POSITION[0,:],AVERAGE_Y_ACC[0,:],'gx',label = 'average y acceleration')
vel_plot.legend(loc='upper right')
acc_x_plot.legend(loc='upper right')
acc_y_plot.legend(loc='upper right')
def draw_network(G,data,fig,t):
triangles = data[0]
num_triangles = data[1]
max_trangle_node = data[3]
clus_coef = data[4]
average_clustering = data[5]
max_clustering = data[6]
max_clustering_node = data[7]
max_degree = data[9]
max_degree_node = data[10]
max_degree_centrality = data[11]
max_degree_centrality_node = data[12]
#--------------Drawing----------------------------#
network_plot = fig.add_subplot(221)
network_plot.title.set_text('network shape')
pos=nx.get_node_attributes(G,'pos')
nx.draw_networkx(G,pos = pos,ax=network_plot,node_size = 100,font_size = 6 )
fig.suptitle('This is the time {} s network'.format(t * DELTA_T))
degrees_plot =fig.add_subplot(222)
degrees_plot.title.set_text('degrees')
node_degrees = dict(G.degree())
plt.bar(node_degrees.keys(),node_degrees.values(),color='r')
# ------ Draw labels of the weight -----------------#
# labels = nx.get_edge_attributes(G,'weight')
# nx.draw_networkx_edge_labels(G,pos,edge_labels=labels)
# -------------draw text box------------------------
# textstr = 'shape'
# props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
# ax1.text(0.05, 0.95, textstr, transform=ax1.transAxes,fontsize=14,
# verticalalignment='top', bbox=props)
attributes_plot = fig.add_subplot(223)
attributes_plot.title.set_text('network attributes and connectivity')
plt.xlabel('node number')
plt.ylabel('number of traingles pass through')
plt.bar(list(triangles.keys()),triangles.values(),color='g')
attributes_plot.get_xaxis().set_visible(False)
# column labels
col_labels =['attributes','value','node num']
# attributes data
data = [num_triangles,
average_clustering,
max_degree_centrality
]
# the data of the table
celltext=np.array([['number of triangles', data[0], max_trangle_node],
['average clustering coefficient', round(data[1],2), max_clustering_node],
['max_degree_centrality', round(data[2],2),max_degree_centrality_node]
])
# utilize pandas DataFrame
df = pd.DataFrame(celltext, columns=col_labels)
# draw the table
table = attributes_plot.table(cellText=df.values, colLabels=df.columns, loc='bottom')
table.auto_set_font_size(False)
table.set_fontsize(12)
table.auto_set_column_width(list(range(len(col_labels))))
# plot attributes
clu_coef_plot = fig.add_subplot(224)
#------- details of clustering coefficient--------------#
clu_coef_plot.title.set_text('clustering coefficietn for each node')
plt.xlabel('node number')
plt.ylabel('clustering coefficient for each node')
plt.bar(list(clus_coef.keys()),clus_coef.values(),color='b')
#-----------------triangles for each node----------------------#
# ax3.title.set_text('triangles for each node')
# plt.xlabel('node number')
# plt.ylabel('number of traingles pass through')
# plt.bar(list(trangles.keys()),trangles.values(),color='g')
# update the graph, extract the properties of the flock
def update_graph(G,edge_list):
# extract the pure edges relationship
edges = [(e[0], e[1] ) for e in edge_list]
edges_old = list(G.edges())
edges_remove= [ e for e in edges_old if e not in edges] #edges to be removed
G.update(edges = edges,nodes =nodes_list) # update graph
G.remove_edges_from(edges_remove)
# update nodes position
for i in range(0,N):
G.nodes[i]['pos'] = tuple(nodes[i])
# update edge weight, must update edge first then weight, update will overwrite the edge attributes
for e in edge_list:
x = e[0] # source_node
y = e[1] # targe_node
weight = e[2]
G[x][y]['weight'] = weight
return G
def bump_function(z,H):
if 0 <= z < H:
return 1
elif H <= z < 1:
val = (z-H)/(1-H)
val = np.cos(np.pi*val)
val = (1+val)/2
return val
else:
return 0
def sigma_1(z):
val = 1 + z **2
val = np.sqrt(val)
val = z/val
return val
def sigma_1_gamma(z):
val = 1 + np.linalg.norm(z)**2
val = np.sqrt(val)
val = z/val
return val
def phi(z):
val_1 = A + B
val_2 = sigma_1(z + C)
val_3 = A - B
val = val_1 * val_2 + val_3
val = val / 2
return val
def phi_alpha(z):
input_1 = z/sigma_norm(R) # Sigma norm of R is R_alpha
input_2 = z - sigma_norm(D) # Sigma norm of D is D_alpha
val_1 = bump_function(input_1,H_alpha)
val_2 = phi(input_2)
val = val_1 * val_2
return val
def phi_beta(z):
val1 = bump_function(z/D_BETA,H_beta)
val2 = sigma_1(z-D_BETA) - 1
return val1 * val2
def get_a_ij(i, j):
val_1 = nodes[j] - nodes[i]
val_2 = sigma_norm(val_1)/sigma_norm(R)
val = bump_function(val_2,H_alpha)
return val
def get_n_ij(i, j):
val_1 = nodes[j] - nodes[i]
norm = np.linalg.norm(val_1)
val_2 = 1 + EPSILON * norm**2
val = val_1/np.sqrt(val_2)
return val
# get ui_beta from lemma 4
def get_ui_beta(i,q_i, p_i):
sum_1 = np.array([0.0, 0.0])
sum_2 = np.array([0.0, 0.0])
ui_beta = 0
# for each obstacles
for k in range(num_obstacles):
yk = obstacles[k]
a_k = (q_i - yk) / np.linalg.norm(q_i-yk)
mu = Rk[k] / np.linalg.norm(q_i-yk)
P = 1 - np.matmul(a_k.T, a_k)
q_i_k = mu*q_i + (1-mu) * yk
p_i_k = mu * P * p_i
distance = np.linalg.norm(q_i_k - q_i)
if distance < R_prime:
n_i_k = (q_i_k - q_i) /(np.sqrt(1 + EPSILON * (np.linalg.norm(q_i_k-q_i))**2))
b_i_k = bump_function(sigma_norm(q_i_k-q_i) / D_BETA, H_beta)
sum_1 += phi_beta(sigma_norm(q_i_k-q_i)) * n_i_k
sum_2 += b_i_k * (p_i_k-p_i)
ui_beta = C1_BETA * sum_1 + C2_BETA * sum_2
return ui_beta
def get_ui_beta_hyper(i,q_i, p_i):
sum_1 = np.array([0.0, 0.0])
sum_2 = np.array([0.0, 0.0])
ui_beta = 0
# for each obstacles
for k in range(num_obstacles):
yk = obstacles[k]
a_k = (q_i - yk) / np.linalg.norm(q_i-yk)
P = 1 - np.matmul(a_k.T, a_k)
q_i_k = P*q_i + (1-P) * yk
p_i_k = P * p_i
distance = np.linalg.norm(q_i_k - q_i)
if distance < R_prime:
n_i_k = (q_i_k - q_i) /(np.sqrt(1 + EPSILON * (np.linalg.norm(q_i_k-q_i))**2))
b_i_k = bump_function(sigma_norm(q_i_k-q_i) / D_BETA, H_beta)
sum_1 += C1_BETA * phi_beta(sigma_norm(q_i_k-q_i)) * n_i_k
sum_2 += C2_BETA * b_i_k * (p_i_k-p_i)
ui_beta = sum_1 + sum_2
return ui_beta
def get_u_i(i,q_i,p_i,target_pos):
sum_1 = np.array([0.0, 0.0])
sum_2 = np.array([0.0, 0.0])
for j in range(0, N):
distance = np.linalg.norm(nodes[j] - nodes[i])
if distance <= R:
phi_alpha_val = phi_alpha(sigma_norm(nodes[j] - nodes[i]))
sum_1 += phi_alpha_val * get_n_ij(i, j)
sum_2 += get_a_ij(i, j) * (nodes_velocity[j] - nodes_velocity[i])
ui_alpha = C1_ALPHA * sum_1 + C2_ALPHA * sum_2
ui_gamma = - C1_GAMMA * sigma_1_gamma(nodes[i] - target_pos) - C2_GAMMA * (p_i - 0)
ui_beta = get_ui_beta(i,q_i,p_i) # ui_beta 不参与j循环
ui = ui_alpha + ui_beta + ui_gamma
return ui
def get_positions_static(G):
counter = 0
graph_data = []
gm.clear_img_path(flocking_path,attributes_path,properties_path,speed_path)
for t in range(0, ITERATION):
# print(np.linalg.matrix_rank(adjacency_matrix))
adjacency_matrix = create_adjacency_matrix()
# print(np.linalg.matrix_rank(adjacency_matrix))
# CONNECTIVITY[t] = (1 / N) * np.linalg.matrix_rank(adjacency_matrix)
center_of_mass[t] = np.array([np.mean(nodes[:, 0]), np.mean(nodes[:, 1])])
nodes_edge_list= get_edge_list()
G = update_graph(G,nodes_edge_list)
if t == 0:
for i in range(0, N):
POSITION_X[i, t] = nodes[i, 0]
POSITION_Y[i, t] = nodes[i, 1]
else:
for i in range(0, N):
# p_i == old_velocity in the paper
# q_i === old_position
old_velocity = nodes_velocity[i]
old_position = np.array([POSITION_X[i, t-1],
POSITION_Y[i, t-1]])
# TODO add target_pos
u_i = get_u_i(i, old_position, old_velocity,target_position)
ACCELERACTION_X[i,t] = u_i[0]
ACCELERACTION_Y[i,t] = u_i[1]
#update position
new_position = old_position + DELTA_T * old_velocity + (DELTA_T ** 2 / 2) * u_i
[POSITION_X[i, t], POSITION_Y[i, t]] = new_position
ACCELERACTION_MAGNITUDES[i,t] = np.linalg.norm(u_i)
nodes[i, :] = new_position
# update velocity
new_velocity = (new_position - old_position) / DELTA_T
nodes_velocity[i] = new_velocity
VELOCITY_MAGNITUDES[i, t] = np.linalg.norm(new_velocity)
#update orientation
ORIENTATION[i,t] = update_orientation(i,t)
graph_data = record_graph_properties(t)
if (t) % SNAPSHOT_INTERVAL == 0:
fig_flock = plt.figure('flocking',figsize=(12,10))
plot_neighbors(t,fig_flock,nodes_edge_list)
f_path = flocking_path + '/step {} _flock.png'.format(counter)
plt.savefig(f_path)
fig_flock.clf()
fig_main = plt.figure('attributes',figsize=(12,12))
draw_network(G,graph_data,fig_main,t)
a_path = attributes_path + '/step {} _attribute.png'.format(counter)
plt.savefig(a_path)
fig_main.clf()
fig_speed = plt.figure('/Speed and position',figsize=(12,12))
plot_position_speed(fig_speed)
s_path = speed_path + '/step {} _speed.png'.format(counter)
plt.savefig(s_path)
fig_speed.clf()
counter += 1
# plt.show()
def save_parameter():
path = img_path + '/parameter.txt'
f = open(path,'w')
a = 'C1_ALPHA: {}\n C2_ALPHA: {} \n\
C1_BETA: {}\n C2_BETA: {} \n\
C1_GAMMA: {}\n C2_GAMMA:{}\n\
N : {}\n M:{}\nD:{} \n\
K : {} \n R:{} \n A:{}\n\
B:{} \n C:{} \n\
D_prime:{}\n R_prime{} \n\
EPSILON:{} \n H_alpha{} \n\
DELTA:{}\n ITERATION:{}\n\
'.format(C1_ALPHA,C2_ALPHA,C1_BETA,C2_BETA,C1_GAMMA,C2_GAMMA,N,M,D,K,R,A,B,C,D_prime,R_prime,EPSILON,H_alpha,DELTA_T,ITERATION)
a.lstrip('\t')
f.write(a)
f.close()
def build_square_obstacles(x1,y1,x2,y2,x_n,y_n):
dl_y = np.linspace(y1,y2,y_n)
dl_x = np.linspace(x1,x2,x_n)
left = np.zeros((y_n,2))
right = np.zeros((y_n,2))
top = np.zeros((x_n,2))
bottom = np.zeros((x_n,2))
left[:,0] = x1
left[:,1] = dl_y
right[:,0] = x2
right[:,1] = dl_y
top[:,0] = dl_x
top[:,1] =y2
bottom[:,0] = dl_x
bottom[:,1] = y1
return np.concatenate((left,right,top,bottom))
def parameter_changer(i,itr_num,obst=True):
global N
global M
global D
global K
global R
global A
global B
global C
global D_prime
global R_prime
global R_BETA
global D_BETA
global EPSILON
global H_alpha,H_beta
global C1_ALPHA
global C2_ALPHA
global C1_BETA
global C2_BETA
global C1_GAMMA
global C2_GAMMA
global ITERATION
global POSITION_X
global POSITION_Y
global AVERAGE_VELOCITY
global MAX_V
global AVERAGE_X_POSITION
global MAX_VELOCITY_X
global VELOCITY_MAGNITUDES
global ACCELERACTION_X
global AVERAGE_X_ACC
global AVERAGE_Y_ACC
global ACCELERACTION_Y
global ACCELERACTION_MAGNITUDES
global AVERAGE_ACC_MAGNITUDES
global CONNECTIVITY
global ORIENTATION
global DEGREE_CENTRALITY
global DEGREES
global MAX_DEGREE
global MAX_DEGREE_NODE
global MAX_DEGREE_CENTRALITY
global MAX_DEGREE_CENTRALITY_NODE
global CLUS_COEF
global AVERAGE_CLUSTERING
global MAX_CLUSTERING
global MAX_CLUSTERING_NODE
global TRANGLES
global NUM_TRIANGLES
global MAX_TRANGLE_NUM
global MAX_TRANGLE_NODE
global DISTANCE
global target_position,obstacles,Rk,num_obstacles
global center_of_mass,formation_range
global nodes,nodes_velocity,nodes_list
global fig_counter
global a_ij_matrix
global img_path,flocking_path,attributes_path,properties_path,speed_path
N = 100 # Number of sensor nodes
M = 2 # Space dimensions
D = 10 # Desired distance among nodes,i.e. algebraic constraints
K = 1.2 # Scaling factor
R = K*D # Interaction range
A = 5
B = 5
C = np.abs(A-B)/np.sqrt(4*A*B)
ITERATION = itr_num
POSITION_X = np.zeros([N, ITERATION]) # X position of each agent
POSITION_Y = np.zeros([N, ITERATION]) # Y position of each agent
AVERAGE_VELOCITY = | np.zeros([1,ITERATION]) | numpy.zeros |
import numpy as np
"""
备注:本示例中所有的matrix都使用的是array来体现
实际上python中存在专门的matrix
# 主要注意matrix和array的区别
# matrix 使用更加接近matlab的使用方式
简单示例如下:
B = np.matrix('1,2;3,4')
print(B*B)
# 注意:若使用matrix,则*直接表示矩阵乘法
# 而元素对应乘积则使用 multiply() 函数
"""
def vector_dot():
a = np.array([1, 3, 5, 7, 9])
b = | np.array([1, 2, 3, 4, 5]) | numpy.array |
import argparse
import numpy as np
from PIL import Image
from pathlib import Path
from openvino.inference_engine import IECore
from semseg.utils.visualize import generate_palette
from semseg.utils.utils import timer
class Inference:
def __init__(self, model: str) -> None:
files = Path(model).iterdir()
for file in files:
if file.suffix == '.xml':
model = str(file)
elif file.suffix == '.bin':
weights = str(file)
ie = IECore()
model = ie.read_network(model=model, weights=weights)
self.input_info = next(iter(model.input_info))
self.output_info = next(iter(model.outputs))
self.img_size = model.input_info['input'].input_data.shape[-2:]
self.palette = generate_palette(11, background=True)
self.engine = ie.load_network(network=model, device_name='CPU')
self.mean = np.array([0.485, 0.456, 0.406]).reshape(-1, 1, 1)
self.std = np.array([0.229, 0.224, 0.225]).reshape(-1, 1, 1)
def preprocess(self, image: Image.Image) -> np.ndarray:
image = image.resize(self.img_size)
image = np.array(image, dtype=np.float32).transpose(2, 0, 1)
image /= 255
image -= self.mean
image /= self.std
image = image[np.newaxis, ...]
return image
def postprocess(self, seg_map: np.ndarray) -> np.ndarray:
seg_map = | np.argmax(seg_map, axis=1) | numpy.argmax |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# Modified by <NAME> (<EMAIL>) and <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import logging
import os
import numpy as np
import torch
from core.evaluate import accuracy
from core.inference import get_final_preds
from utils.transforms import flip_back
from utils.vis import save_debug_images
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import save_image
logger = logging.getLogger(__name__)
# def plot_grad_flow(named_parameters):
# ave_grads = []
# layers = []
# for n, p in named_parameters:
# if (p.requires_grad) and ("bias" not in n):
# layers.append(n)
# ave_grads.append(p.grad.abs().mean().detach().cpu().numpy())
# plt.plot(ave_grads, alpha=0.3, color="b")
# plt.hlines(0, 0, len(ave_grads) + 1, linewidth=1, color="k")
# plt.xticks(range(0, len(ave_grads), 1), layers, rotation="vertical")
# plt.xlim(xmin=0, xmax=len(ave_grads))
# plt.xlabel("Layers")
# plt.ylabel("average gradient")
# plt.title("Gradient flow")
# plt.grid(True)
# plt.savefig('F:/Projects/Transpose/TransPose/model_weights')
# # plt.show(block=True)
def plot_grad_flow(named_parameters):
'''Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow'''
ave_grads = []
max_grads = []
layers = []
for n, p in named_parameters:
if (p.requires_grad) and ("bias" not in n):
layers.append(n)
ave_grads.append(p.grad.abs().mean().detach().cpu().numpy())
max_grads.append(p.grad.abs().max().detach().cpu().numpy())
plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c")
plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b")
plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color="k")
plt.xticks(range(0, len(ave_grads), 1), layers, rotation="vertical")
plt.xlim(left=0, right=len(ave_grads))
plt.ylim(bottom=-0.001, top=2) # zoom in on the lower gradient regions
plt.xlabel("Layers")
plt.ylabel("average gradient")
plt.title("Gradient flow")
plt.grid(True)
plt.legend([Line2D([0], [0], color="c", lw=4),
Line2D([0], [0], color="b", lw=4),
Line2D([0], [0], color="k", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])
plt.savefig('/content/drive/MyDrive/transaction_output/stanford/model_weightsjpg')
def accuracy_classification(output, target):
num_batch = output.shape[0]
if not num_batch == target.shape[0]:
raise ValueError
pred = np.argmax(output, axis=1)
true_ = (pred == target).sum()
return true_ / num_batch
def train(config, train_loader, model, criterion, optimizer, epoch,
output_dir, tb_log_dir, writer_dict):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acc = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target, target_weight, meta) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# compute output
outputs = model(input)
target = target.cuda(non_blocking=True)
target_weight = target_weight.cuda(non_blocking=True)
if isinstance(outputs, list):
loss = criterion(outputs[0], target, target_weight)
for output in outputs[1:]:
loss += criterion(output, target, target_weight)
else:
output = outputs
loss = criterion(output, target, target_weight)
# loss = criterion(output, target, target_weight)
# compute gradient and do update step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure accuracy and record loss
losses.update(loss.item(), input.size(0))
_, avg_acc, cnt, pred = accuracy(output.detach().cpu().numpy(),
target.detach().cpu().numpy())
acc.update(avg_acc, cnt)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % config.PRINT_FREQ == 0:
msg = 'Epoch: [{0}][{1}/{2}]\t' \
'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
'Speed {speed:.1f} samples/s\t' \
'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \
'Loss {loss.val:.5f} ({loss.avg:.5f})\t' \
'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
speed=input.size(0) / batch_time.val,
data_time=data_time, loss=losses, acc=acc)
logger.info(msg)
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', losses.val, global_steps)
writer.add_scalar('train_acc', acc.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i)
# save_debug_images(config, input, meta, target, pred*4, output,
# prefix)
def train_resnet(config, train_loader, model, criterion, optimizer, epoch,
output_dir, tb_log_dir, writer_dict):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acc = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, meta) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# compute output
outputs, _ = model(input)
target = meta['target'].cuda(non_blocking=True)
if isinstance(outputs, list):
loss = criterion(outputs[0], target)
for output in outputs[1:]:
loss += criterion(output, target)
else:
output = outputs
loss = criterion(output, target)
# loss = criterion(output, target, target_weight)
# compute gradient and do update step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure accuracy and record loss
losses.update(loss.item(), input.size(0))
avg_acc = accuracy_classification(output.detach().cpu().numpy(),
target.detach().cpu().numpy())
num_imgs = input.shape[0]
acc.update(avg_acc, num_imgs)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % config.PRINT_FREQ == 0:
msg = 'Epoch: [{0}][{1}/{2}]\t' \
'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
'Speed {speed:.1f} samples/s\t' \
'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \
'Loss {loss.val:.5f} ({loss.avg:.5f})\t' \
'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
speed=input.size(0) / batch_time.val,
data_time=data_time, loss=losses, acc=acc)
logger.info(msg)
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', losses.val, global_steps)
writer.add_scalar('train_acc', acc.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i)
# save_debug_images(config, input, meta, target, pred*4, output,
# prefix)
def train_transaction(config, train_loader, model, criterion, optimizer, epoch,
output_dir, tb_log_dir, writer_dict):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acc1 = AverageMeter()
acc2 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, meta) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# compute output
# outputs, action_outputs_trans, action_outputs_linear = model(input)
# a = list()
# for x in range(len(list(model.parameters()))):
# a.append(list(model.parameters())[x].clone())
outputs, action_outputs_trans = model(input)
target = meta['target'].cuda(non_blocking=True)
if isinstance(action_outputs_trans, list):
loss = criterion(action_outputs_trans[0], target)
for output in action_outputs_trans[1:]:
loss += criterion(output, target)
else:
output1 = action_outputs_trans
# output2 = action_outputs_linear
# loss_trans = criterion(output1, target)
# loss_linear = criterion(output2, target)
# loss = a1 * loss_trans + a2 * loss_linear
loss = criterion(output1, target)
# loss = criterion(output, target, target_weight)
# compute gradient and do update step
optimizer.zero_grad()
loss.backward()
plot_grad_flow(model.named_parameters())
optimizer.step()
# b = list()
# for x in range(len(list(model.parameters()))):
# mine = list(model.parameters())[x].grad
# b.append(list(model.parameters())[x].clone())
# c = list()
# d = list()
# for idx in range(len(a)):
# if a[idx].requires_grad:
# c.append(a[idx] == b[idx])
# d.append(b[idx].grad)
# measure accuracy and record loss
losses.update(loss.item(), input.size(0))
avg_acc1 = accuracy_classification(output1.detach().cpu().numpy(),
target.detach().cpu().numpy())
# avg_acc2 = accuracy_classification(output2.detach().cpu().numpy(),
# target.detach().cpu().numpy())
num_imgs = input.shape[0]
acc1.update(avg_acc1, num_imgs)
# acc2.update(avg_acc2, num_imgs)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % config.PRINT_FREQ == 0:
msg = 'Epoch: [{0}][{1}/{2}]\t' \
'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
'Speed {speed:.1f} samples/s\t' \
'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \
'Loss {loss.val:.5f} ({loss.avg:.5f})\n' \
'Accuracy of First Branch {acc1.val:.3f} ({acc1.avg:.3f})\n'.format(
epoch, i, len(train_loader), batch_time=batch_time,
speed=input.size(0) / batch_time.val,
data_time=data_time, loss=losses, acc1=acc1)
logger.info(msg)
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', losses.val, global_steps)
writer.add_scalar('train_acc_first_branch', acc1.val, global_steps)
# writer.add_scalar('train_acc_second_branch', acc2.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i)
# save_debug_images(config, input, meta, target, pred*4, output,
# prefix)
def validate(config, val_loader, val_dataset, model, criterion, output_dir,
tb_log_dir, writer_dict=None):
batch_time = AverageMeter()
losses = AverageMeter()
acc = AverageMeter()
# switch to evaluate mode
model.eval()
num_samples = len(val_dataset)
all_preds = np.zeros(
(num_samples, config.MODEL.NUM_JOINTS, 3),
dtype=np.float32
)
all_boxes = np.zeros((num_samples, 6))
image_path = []
filenames = []
imgnums = []
idx = 0
with torch.no_grad():
end = time.time()
for i, (input, target, target_weight, meta) in enumerate(val_loader):
# compute output
'''with att maps'''
# outputs, atten_maps = model(input)
'''without att maps'''
outputs = model(input)
# inspect_atten_map_by_locations(input, model, query_locations,
# model_name="transposer", mode='dependency',
# save_img=True, threshold=0.0)
if isinstance(outputs, list):
output = outputs[-1]
else:
output = outputs
if config.TEST.FLIP_TEST:
# this part is ugly, because pytorch has not supported negative index
# input_flipped = model(input[:, :, :, ::-1])
input_flipped = np.flip(input.cpu().numpy(), 3).copy()
input_flipped = torch.from_numpy(input_flipped).cuda()
'''with att maps'''
# outputs_flipped, atten_maps_flip = model(input_flipped)
'''without att maps'''
outputs_flipped = model(input_flipped)
if isinstance(outputs_flipped, list):
output_flipped = outputs_flipped[-1]
else:
output_flipped = outputs_flipped
output_flipped = flip_back(output_flipped.cpu().numpy(),
val_dataset.flip_pairs)
output_flipped = torch.from_numpy(output_flipped.copy()).cuda()
# atten_maps_flip = atten_maps_flip[:, :, :, ::-1]
output = (output + output_flipped) * 0.5
target = target.cuda(non_blocking=True)
target_weight = target_weight.cuda(non_blocking=True)
loss = criterion(output, target, target_weight)
num_images = input.size(0)
# measure accuracy and record loss
losses.update(loss.item(), num_images)
_, avg_acc, cnt, pred = accuracy(output.cpu().numpy(),
target.cpu().numpy())
acc.update(avg_acc, cnt)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
c = meta['center'].numpy()
s = meta['scale'].numpy()
score = meta['score'].numpy()
preds, maxvals = get_final_preds(
config, output.clone().cpu().numpy(), c, s)
all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
all_preds[idx:idx + num_images, :, 2:3] = maxvals
# double check this all_boxes parts
all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
all_boxes[idx:idx + num_images, 4] = | np.prod(s * 200, 1) | numpy.prod |
import numpy as np
import matplotlib.pyplot as plt
from lightkurve.lightcurve import LightCurve
import matplotlib as mpl
from astropy.time import Time
from astropy import time, coordinates as coord, units as u
import astropy.constants as c
from mpl_toolkits.axes_grid1 import make_axes_locatable
import time as timer
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from lightkurve.lightcurve import LightCurve as LC
from lightkurve.search import search_lightcurve
from mpl_toolkits.axes_grid1 import make_axes_locatable
import batman
from astropy.table import Table
COLOR = 'k'#'#FFFAF1'
plt.rcParams['font.size'] = 18
plt.rcParams['text.color'] = COLOR
plt.rcParams['axes.labelcolor'] = COLOR
plt.rcParams['xtick.color'] = COLOR
plt.rcParams['ytick.color'] = COLOR
plt.rcParams['xtick.major.width'] = 3
plt.rcParams['ytick.major.width'] = 3
plt.rcParams['xtick.major.size'] = 8 #12
plt.rcParams['ytick.major.size'] = 8 #12
plt.rcParams['xtick.minor.width'] = 1
plt.rcParams['ytick.minor.width'] = 1
plt.rcParams['xtick.minor.size'] = 6
plt.rcParams['ytick.minor.size'] = 6
plt.rcParams['axes.linewidth'] = 3
lw = 5
plt.rcParams['text.color'] = COLOR
plt.rcParams['xtick.color'] = COLOR
plt.rcParams['ytick.color'] = COLOR
plt.rcParams['axes.labelcolor'] = COLOR
#plt.rcParams['axes.spines.top'] = False
#plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.labelcolor'] = COLOR
plt.rcParams['axes.edgecolor'] = COLOR
plt.rcParams['figure.facecolor'] = 'none'
plt.rcParams['legend.facecolor'] = 'none'
edgecolor = '#05021f'
k2_ec = '#b8b4b2'
parula = np.load('/Users/arcticfox/parula_colors.npy')[np.linspace(0,160,4,dtype=int)]
parula = ['#eb9c3b', '#74BB43', '#1A48A0', '#742C64', '#74BB43', '#eb9c3b',
'#eb9c3b', '#74BB43', '#1A48A0', '#eb9c3b']
#parula = ['#B3240B', '#74BB43', '#0494EC', '#BC84DC']
gp_mod = np.load('gp_loose.npy', allow_pickle=True).tolist()
map_soln=np.load('map_soln_loose.npy', allow_pickle=True).tolist()
extras = np.load('extras_loose.npy', allow_pickle=True).tolist()
planets=['c','d','b','e']
periods = map_soln['period']#np.array([8.249147, 12.401369, 24.141445, 36.695032307689445])
t0s = map_soln['t0']
t0s = | np.append(t0s, [t0s[1]+periods[1], t0s[0]+periods[0], t0s[0]+periods[0]*2,
t0s[1]+periods[1]*2, t0s[2]+periods[2], t0s[0]+periods[0]*3]) | numpy.append |
import numpy as np
def cone_search(ra_center,dec_center,ra_list,dec_list,angular_scale):
#--------------------------------------------------------
# Function to do a cone search around any (ra,dec) pointing with respect
# to a list of ra,dec entries.
#
# Input:- ra_center = RA of center (Degrees)
# dec_center = DEC of center (Degrees)
# ra_list = RA list of input catalogue (Degrees)
# dec_list = DEC list of input catalogue (Degrees)
# angular_scale = angular search radius in arcsec
#
#
# Output:- out:- struture containing RA,DEC and logical operator
# identifying the objects
#
# Written by R.B. Mar 11 2013
#---------------------------------------------------------
deg2rad=np.pi/180
rad2deg=180./np.pi
ra_center=deg2rad*np.array(ra_center)
dec_center=deg2rad*np.array(dec_center)
ra_list=deg2rad*np.array(ra_list)
dec_list=deg2rad*np.array(dec_list)
theta =np.arccos( | np.sin(dec_center) | numpy.sin |
# Copyright 2020-21 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MaskRcnn Rcnn for mask network."""
import numpy as np
import mindspore.common.dtype as mstype
import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore.common.tensor import Tensor
from mindspore.common.initializer import initializer
from mindspore import context
def _conv(in_channels, out_channels, kernel_size=1, stride=1, padding=0, pad_mode='pad'):
"""Conv2D wrapper."""
shape = (out_channels, in_channels, kernel_size, kernel_size)
weights = initializer("XavierUniform", shape=shape, dtype=mstype.float32)
shape_bias = (out_channels,)
bias = Tensor(np.array( | np.zeros(shape_bias) | numpy.zeros |
# coding=UTF-8
from manimlib.imports import *
import numpy as np
numbers = [21, 99, 49, 11, 66, 5, 78, 86]
class Sort(Scene):
def construct(self):
# 显示文字
text1 = Text("归并排序\n\n采用分治法\n先二分成无数个子序列\n再对每个子序列排序\n最后合并为有序序列", color=WHITE, font="黑体")
text1.scale(1.5)
text1.move_to(np.array([0.0, 0.0, 0.0]))
self.play(ShowCreation(text1))
self.wait(2)
self.play(Uncreate(text1))
# 1级
group1 = VGroup()
for i in range(8):
group1.add(Square(side_length=1))
if i > 0: group1[i].next_to(group1[i-1], RIGHT, 0)
group1.move_to(np.array([0.0, 3.0, 0.0]))
self.play(FadeIn(group1))
# 数字
elements = []
for i in range(len(numbers)):
elements.append(Integer(numbers[i]))
elements[i].move_to(np.array([-3.5 + i * 1.0, 3.0, 0.0]))
self.play(ShowCreation(elements[i]))
# 2级
arrow1to2_1 = Arrow(start=np.array([-0.5, 2.5, 0.0]), end= | np.array([-3.0, 1.5, 0.0]) | numpy.array |
#!/usr/bin/env python
"""
Written by <NAME> <<EMAIL>>. Copyright 2015
Based on original code by <NAME>. Copyright 2007
This code is licensed under the GNU GPL version 2, see COPYING for details.
"""
from __future__ import print_function, division
from collections import OrderedDict
try:
import pickle
except ImportError:
import cPickle as pickle # pylint: disable=import-error
from pkg_resources import parse_version
from PIL import Image
import numpy as np
assert parse_version(np.__version__) >= parse_version('1.9.0'), \
"numpy >= 1.9.0 is required for daltonize"
try:
import matplotlib as mpl
_NO_MPL = False
except ImportError:
_NO_MPL = True
from flask_restx import reqparse
def transform_colorspace(img, mat):
"""Transform image to a different color space.
Arguments:
----------
img : array of shape (M, N, 3)
mat : array of shape (3, 3)
conversion matrix to different color space
Returns:
--------
out : array of shape (M, N, 3)
"""
# Fast element (=pixel) wise matrix multiplication
return | np.einsum("ij, ...j", mat, img) | numpy.einsum |
"""Contains functions pertaining to the design of physical and chemical unit
processes of AguaClara water treatment plants.
"""
from aguaclara.core.units import u
import aguaclara.core.constants as con
import aguaclara.core.utility as ut
import aguaclara.core.pipes as pipe
import numpy as np
from scipy import interpolate, integrate
import warnings
############################ Gas ##############################
@ut.list_handler()
def density_air(Pressure, MolarMass, Temperature):
"""
.. deprecated::
`density_air` is deprecated; use `density_gas` instead.
"""
warnings.warn('density_air is deprecated; use density_gas instead.',
UserWarning)
return density_gas(Pressure, MolarMass, Temperature)
@ut.list_handler()
def density_gas(Pressure, MolarMass, Temperature):
"""Return the density of air at the given pressure, molar mass, and
temperature.
:param Pressure: pressure of air in the system
:type Pressure: u.pascal
:param MolarMass: molar mass of air in the system
:type MolarMass: u.gram/u.mol
:param Temperature: Temperature of air in the system
:type Temperature: u.degK
:return: density of air in the system
:rtype: u.kg/u.m**3
"""
return (Pressure * MolarMass / (u.R * Temperature)).to(u.kg/u.m**3)
########################## Geometry ###########################
@ut.list_handler()
def area_circle(DiamCircle):
"""Return the area of a circle given its diameter.
:param DiamCircle: diameter of circle
:type DiamCircle: u.m
:return: area of circle
:rtype: u.m**2
"""
ut.check_range([DiamCircle.magnitude, ">0", "DiamCircle"])
return (np.pi / 4 * DiamCircle**2)
@ut.list_handler()
def diam_circle(AreaCircle):
"""Return the diameter of a circle given its area.
:param AreaCircle: area of circle
:type AreaCircle: u.m**2
:return: diameter of circle
:rtype: u.m
"""
ut.check_range([AreaCircle.magnitude, ">0", "AreaCircle"])
return np.sqrt(4 * AreaCircle / np.pi)
####################### Water Properties #######################
#:
RE_TRANSITION_PIPE = 2100
#: Table of temperatures and the corresponding water density.
#:
#: WATER_DENSITY_TABLE[0] is a list of water temperatures, in Kelvin.
#: WATER_DENSITY_TABLE[1] is the corresponding densities, in kg/m³.
WATER_DENSITY_TABLE = [(273.15, 278.15, 283.15, 293.15, 303.15, 313.15,
323.15, 333.15, 343.15, 353.15, 363.15, 373.15
), (999.9, 1000, 999.7, 998.2, 995.7, 992.2,
988.1, 983.2, 977.8, 971.8, 965.3, 958.4
)
]
@ut.list_handler()
def viscosity_dynamic(temp):
"""
.. deprecated::
`viscosity_dynamic` is deprecated; use `viscosity_dynamic_water`
instead.
"""
warnings.warn('viscosity_dynamic is deprecated; use '
'viscosity_dynamic_water instead.', UserWarning)
return viscosity_dynamic_water(temp)
@ut.list_handler()
def viscosity_dynamic_water(Temperature):
"""Return the dynamic viscosity of water at a given temperature.
:param Temperature: temperature of water
:type Temperature: u.degK
:return: dynamic viscosity of water
:rtype: u.kg/(u.m*u.s)
"""
ut.check_range([Temperature.magnitude, ">=0", "Temperature in Kelvin"])
return 2.414 * (10**-5) * u.kg/(u.m*u.s) * 10**(247.8*u.degK /
(Temperature - 140*u.degK))
@ut.list_handler()
def density_water(Temperature=None, *, temp=None):
"""Return the density of water at a given temperature.
:param Temperature: temperature of water
:type Temperature: u.degK
:param temp: deprecated; use Temperature instead
:return: density of water
:rtype: u.kg/u.m**3
"""
if Temperature is not None and temp is not None:
raise TypeError("density_water received both Temperature and temp")
elif Temperature is None and temp is None:
raise TypeError("density_water missing Temperature argument")
elif temp is not None:
warnings.warn("temp is deprecated; use Temperature instead.",
UserWarning)
Temperature = temp
ut.check_range([Temperature.magnitude, ">=0", "Temperature in Kelvin"])
rhointerpolated = interpolate.CubicSpline(WATER_DENSITY_TABLE[0],
WATER_DENSITY_TABLE[1])
Temperature = Temperature.to(u.degK).magnitude
return rhointerpolated(Temperature).item() * u.kg/u.m**3
@ut.list_handler()
def viscosity_kinematic(temp):
"""
.. deprecated::
`viscosity_kinematic` is deprecated; use `viscosity_kinematic_water`
instead.
"""
warnings.warn('viscosity_kinematic is deprecated; use '
'viscosity_kinematic_water instead.', UserWarning)
return viscosity_kinematic_water(temp)
@ut.list_handler()
def viscosity_kinematic_water(Temperature):
"""Return the kinematic viscosity of water at a given temperature.
:param Temperature: temperature of water
:type Temperature: u.degK
:return: kinematic viscosity of water
:rtype: u.m**2/u.s
"""
ut.check_range([Temperature.magnitude, ">=0", "Temperature in Kelvin"])
return (viscosity_dynamic_water(Temperature) / density_water(Temperature))
####################### Hydraulic Radius #######################
@ut.list_handler()
def radius_hydraulic(Width, Depth, openchannel):
"""
.. deprecated::
`radius_hydraulic` is deprecated; use `radius_hydraulic_rect` instead.
"""
warnings.warn('radius_hydraulic is deprecated; use radius_hydraulic_rect '
'instead.', UserWarning)
return radius_hydraulic_rect(Width, Depth, openchannel)
@ut.list_handler()
def radius_hydraulic_rect(Width, Depth, OpenChannel):
"""Return the hydraulic radius of a rectangular channel given width and
depth of water.
:param Width: width of channel
:type Width: u.m
:param Depth: depth of water in channel
:type Depth: u.m
:param OpenChannel: true if channel is open, false if closed
:type OpenChannel: boolean
:return: hydraulic radius of rectangular channel
:rtype: u.m
"""
ut.check_range([Width.magnitude, ">0", "Width"],
[Depth.magnitude, ">0", "Depth"],
[OpenChannel, "boolean", "OpenChannel"])
if OpenChannel:
return ((Width*Depth) / (Width + 2*Depth))
else:
return ((Width*Depth) / (2 * (Width+Depth)))
@ut.list_handler()
def radius_hydraulic_general(Area, PerimWetted):
"""
.. deprecated::
`radius_hydraulic_general` is deprecated; use
`radius_hydraulic_channel` instead.
"""
warnings.warn('radius_hydraulic_general is deprecated; use '
'radius_hydraulic_channel instead.', UserWarning)
return radius_hydraulic_channel(Area, PerimWetted)
@ut.list_handler()
def radius_hydraulic_channel(Area, PerimWetted):
"""Return the hydraulic radius of a general channel given cross sectional
area and wetted perimeter.
:param Area: cross sectional area of channel
:type Area: u.m**2
:param PerimWetted: wetted perimeter of channel
:type PerimWetted: u.m
:return: hydraulic radius of general channel
:rtype: u.m
"""
ut.check_range([Area.magnitude, ">0", "Area"],
[PerimWetted.magnitude, ">0", "Wetted perimeter"])
return (Area / PerimWetted)
####################### Reynolds Number #######################
@ut.list_handler()
def re_pipe(FlowRate, Diam, Nu):
"""Return the Reynolds number of flow through a pipe.
:param FlowRate: flow rate through pipe
:type FlowRate: u.m**3/u.s
:param Diam: diameter of pipe
:type Diam: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:return: Reynolds number of flow through pipe
:rtype: u.dimensionless
"""
ut.check_range([FlowRate.magnitude, ">0", "Flow rate"],
[Diam.magnitude, ">0", "Diameter"],
[Nu.magnitude, ">0", "Nu"])
return ((4 * FlowRate) / (np.pi * Diam * Nu)).to(u.dimensionless)
@ut.list_handler()
def re_rect(FlowRate, Width, Depth, Nu, OpenChannel=None, *, openchannel=None):
"""Return the Reynolds number of flow through a rectangular channel.
:param FlowRate: flow rate through channel
:type FlowRate: u.m**3/u.s
:param Width: width of channel
:type Width: u.m
:param Depth: depth of water in channel
:type Depth: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param OpenChannel: true if channel is open, false if closed
:type OpenChannel: boolean
:param openchannel: deprecated; use OpenChannel instead
:return: Reynolds number of flow through rectangular channel
:rtype: u.dimensionless
"""
ut.check_range([FlowRate.magnitude, ">0", "Flow rate"],
[Nu.magnitude, ">0", "Nu"])
if OpenChannel is not None and openchannel is not None:
raise TypeError("re_rect received both OpenChannel and openchannel")
elif OpenChannel is None and openchannel is None:
raise TypeError("re_rect missing OpenChannel argument")
elif openchannel is not None:
warnings.warn("openchannel is deprecated; use OpenChannel instead.",
UserWarning)
OpenChannel = openchannel
return (4 * FlowRate * radius_hydraulic_rect(Width, Depth, OpenChannel)
/ (Width * Depth * Nu)).to(u.dimensionless)
@ut.list_handler()
def re_general(Vel, Area, PerimWetted, Nu):
"""
.. deprecated::
`re_general` is deprecated; use `re_channel` instead.
"""
warnings.warn('re_general is deprecated; use re_channel instead.',
UserWarning)
return re_channel(Vel, Area, PerimWetted, Nu)
@ut.list_handler()
def re_channel(Vel, Area, PerimWetted, Nu):
"""Return the Reynolds number of flow through a general cross section.
:param Vel: velocity of fluid
:type Vel: u.m/u.s
:param Area: cross sectional area of channel
:type Area: u.m**2
:param PerimWetted: wetted perimeter of channel
:type PerimWetted: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:return: Reynolds number of flow through general cross section
:rtype: u.dimensionless
"""
ut.check_range([Vel.magnitude, ">=0", "Velocity"],
[Nu.magnitude, ">0", "Nu"])
return (4 * radius_hydraulic_channel(Area, PerimWetted) * Vel / Nu).to(u.dimensionless)
########################### Friction ###########################
@ut.list_handler()
def fric(FlowRate, Diam, Nu, PipeRough):
"""
.. deprecated::
`fric` is deprecated; use `fric_pipe` instead.
"""
warnings.warn('fric is deprecated; use fric_pipe instead', UserWarning)
return fric_pipe(FlowRate, Diam, Nu, PipeRough)
@ut.list_handler()
def fric_pipe(FlowRate, Diam, Nu, Roughness):
"""Return the friction factor for pipe flow.
For laminar flow, the friction factor is 64 is divided the Reynolds number.
For turbulent flows, friction factor is calculated using the Swamee-Jain
equation, which works best for Re > 3000 and ε/Diam < 0.02.
:param FlowRate: flow rate through pipe
:type FlowRate: u.m**3/u.s
:param Diam: diameter of pipe
:type Diam: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param Roughness: roughness of pipe
:type Roughness: u.m
:return: friction factor of flow through pipe
:rtype: u.dimensionless
"""
ut.check_range([Roughness.magnitude, ">=0", "Pipe roughness"])
if re_pipe(FlowRate, Diam, Nu) >= RE_TRANSITION_PIPE:
f = (0.25 / (np.log10(Roughness / (3.7 * Diam)
+ 5.74 / re_pipe(FlowRate, Diam, Nu) ** 0.9
)
) ** 2
)
else:
f = 64 / re_pipe(FlowRate, Diam, Nu)
return f * u.dimensionless
@ut.list_handler()
def fric_rect(FlowRate, Width, Depth, Nu, Roughness=None, OpenChannel=None, *,
PipeRough=None, openchannel=None):
"""Return the friction factor of a rectangular channel.
The Swamee-Jain equation is adapted for a rectangular channel.
:param FlowRate: flow rate through channel
:type FlowRate: u.m**3/u.s
:param Width: width of channel
:type Width: u.m
:param Depth: depth of water in channel
:type Depth: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param Roughness: roughness of channel
:type Roughness: u.m
:param OpenChannel: true if channel is open, false if closed
:type OpenChannel: boolean
:param PipeRough: deprecated; use Roughness instead
:param openchannel: deprecated; use OpenChannel instead
:return: friction factor of flow through rectangular channel
:rtype: u.dimensionless
"""
if Roughness is not None and PipeRough is not None:
raise TypeError("fric_rect received both Roughness and PipeRough")
elif Roughness is None and PipeRough is None:
raise TypeError("fric_rect missing Roughness argument")
elif OpenChannel is not None and openchannel is not None:
raise TypeError("fric_rect received both OpenChannel and openchannel")
elif OpenChannel is None and openchannel is None:
raise TypeError("fric_rect missing OpenChannel argument")
else:
if PipeRough is not None:
warnings.warn("PipeRough is deprecated; use Roughness instead.",
UserWarning)
Roughness = PipeRough
if openchannel is not None:
warnings.warn("openchannel is deprecated; use OpenChannel instead.",
UserWarning)
OpenChannel = openchannel
ut.check_range([Roughness.magnitude, ">=0", "Pipe roughness"])
if re_rect(FlowRate, Width, Depth, Nu, OpenChannel) >= RE_TRANSITION_PIPE:
# Diam = 4*R_h in adapted Swamee-Jain equation
return (0.25 * u.dimensionless
/ (np.log10((Roughness
/ (3.7 * 4
* radius_hydraulic_rect(Width, Depth,
OpenChannel)
)
)
+ (5.74 / (re_rect(FlowRate, Width, Depth,
Nu, OpenChannel) ** 0.9)
)
)
) ** 2
)
else:
return 64 * u.dimensionless / re_rect(FlowRate, Width, Depth, Nu,
OpenChannel)
@ut.list_handler()
def fric_general(Area, PerimWetted, Vel, Nu, PipeRough):
"""
.. deprecated::
`fric_general` is deprecated; use `fric_channel` instead.
"""
warnings.warn('fric_general is deprecated; use fric_channel instead.',
UserWarning)
return fric_channel(Area, PerimWetted, Vel, Nu, PipeRough)
@ut.list_handler()
def fric_channel(Area, PerimWetted, Vel, Nu, Roughness):
"""Return the friction factor for a general channel.
The Swamee-Jain equation is adapted for a general cross-section.
:param Area: cross sectional area of channel
:type Area: u.m**2
:param PerimWetted: wetted perimeter of channel
:type PerimWetted: u.m
:param Vel: velocity of fluid
:type Vel: u.m/u.s
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param PipeRough: roughness of channel
:type PipeRough: u.m
:return: friction factor for flow through general channel
:rtype: u.dimensionless
"""
ut.check_range([Roughness.magnitude, ">=0", "Pipe roughness"])
if re_channel(Vel, Area, PerimWetted, Nu) >= RE_TRANSITION_PIPE:
# Diam = 4*R_h in adapted Swamee-Jain equation
f = (0.25 /
(np.log10((Roughness
/ (3.7 * 4
* radius_hydraulic_channel(Area, PerimWetted)
)
)
+ (5.74
/ re_channel(Vel, Area, PerimWetted, Nu) ** 0.9
)
)
) ** 2
)
else:
f = 64 / re_channel(Vel, Area, PerimWetted, Nu)
return f * u.dimensionless
######################### Head Loss #########################
@ut.list_handler()
def headloss_fric(FlowRate, Diam, Length, Nu, PipeRough):
"""
.. deprecated::
`headloss_fric` is deprecated; use `headloss_major_pipe` instead.
"""
warnings.warn('headloss_fric is deprecated; use headloss_major_pipe instead',
UserWarning)
return headloss_major_pipe(FlowRate, Diam, Length, Nu, PipeRough)
@ut.list_handler()
def headloss_major_pipe(FlowRate, Diam, Length, Nu, Roughness):
"""Return the major head loss (due to wall shear) in a pipe.
This function applies to both laminar and turbulent flows.
:param FlowRate: flow rate through pipe
:type FlowRate: u.m**3/u.s
:param Diam: diameter of pipe
:type Diam: u.m
:param Length: length of pipe
:type Length: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param Roughness: roughness of pipe
:type Roughness: u.m
:return: major head loss in pipe
:rtype: u.m
"""
ut.check_range([Length.magnitude, ">0", "Length"])
return (fric_pipe(FlowRate, Diam, Nu, Roughness)
* 8 / (u.gravity * np.pi**2)
* (Length * FlowRate**2) / Diam**5
).to(u.m)
@ut.list_handler()
def headloss_exp(FlowRate, Diam, KMinor):
"""
.. deprecated::
`headloss_exp` is deprecated; use `headloss_minor_pipe` instead.
"""
warnings.warn('headloss_exp is deprecated; use headloss_minor_pipe instead',
UserWarning)
return headloss_minor_pipe(FlowRate, Diam, KMinor)
@ut.list_handler()
def headloss_minor_pipe(FlowRate, Diam, KMinor):
"""Return the minor head loss (due to changes in geometry) in a pipe.
This function applies to both laminar and turbulent flows.
:param FlowRate: flow rate through pipe
:type FlowRate: u.m**3/u.s
:param Diam: diameter of pipe
:type Diam: u.m
:param KMinor: minor loss coefficient
:type KMinor: u.dimensionless or unitless
:return: minor head loss in pipe
:rtype: u.m
"""
ut.check_range([FlowRate.magnitude, ">0", "Flow rate"],
[Diam.magnitude, ">0", "Diameter"],
[KMinor, ">=0", "K minor"])
return (KMinor * 8 / (u.gravity * np.pi**2) * FlowRate**2 / Diam**4).to(u.m)
@ut.list_handler()
def headloss(FlowRate, Diam, Length, Nu, PipeRough, KMinor):
"""
.. deprecated::
`headloss` is deprecated; use `headloss_pipe` instead.
"""
warnings.warn('headloss is deprecated; use headloss_pipe instead',
UserWarning)
return headloss_pipe(FlowRate, Diam, Length, Nu, PipeRough, KMinor)
@ut.list_handler()
def headloss_pipe(FlowRate, Diam, Length, Nu, Roughness, KMinor):
"""Return the total head loss from major and minor losses in a pipe.
This function applies to both laminar and turbulent flows.
:param FlowRate: flow rate through pipe
:type FlowRate: u.m**3/u.s
:param Diam: diameter of pipe
:type Diam: u.m
:param Length: length of pipe
:type Length: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param Roughness: roughness of pipe
:type Roughness: u.m
:param KMinor: minor loss coefficient
:type KMinor: u.dimensionless or unitless
:return: total head loss in pipe
:rtype: u.m
"""
return (headloss_major_pipe(FlowRate, Diam, Length, Nu, Roughness)
+ headloss_minor_pipe(FlowRate, Diam, KMinor))
@ut.list_handler()
def headloss_fric_rect(FlowRate, Width, Depth, Length, Nu, PipeRough, openchannel):
"""
.. deprecated::
`headloss_fric_rect` is deprecated; use `headloss_major_rect` instead.
"""
warnings.warn('headloss_fric_rect is deprecated; use headloss_major_rect instead',
UserWarning)
return headloss_major_rect(FlowRate, Width, Depth, Length, Nu, PipeRough, openchannel)
@ut.list_handler()
def headloss_major_rect(FlowRate, Width, Depth, Length, Nu, Roughness, OpenChannel):
"""Return the major head loss due to wall shear in a rectangular channel.
This equation applies to both laminar and turbulent flows.
:param FlowRate: flow rate through channel
:type FlowRate: u.m**3/u.s
:param Width: width of channel
:type Width: u.m
:param Depth: depth of water in channel
:type Depth: u.m
:param Length: length of channel
:type Length: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param Roughness: roughness of channel
:type Roughness: u.m
:param OpenChannel: true if channel is open, false if closed
:type OpenChannel: boolean
:return: major head loss in rectangular channel
:rtype: u.m
"""
ut.check_range([Length.magnitude, ">0", "Length"])
return (fric_rect(FlowRate, Width, Depth, Nu,
Roughness, OpenChannel)
* Length
/ (4 * radius_hydraulic_rect(Width, Depth, OpenChannel))
* FlowRate**2
/ (2 * u.gravity * (Width*Depth)**2)
).to(u.m)
@ut.list_handler()
def headloss_exp_rect(FlowRate, Width, Depth, KMinor):
"""
.. deprecated::
`headloss_exp_rect` is deprecated; use `headloss_minor_rect` instead.
"""
warnings.warn('headloss_exp_rect is deprecated; use headloss_minor_rect instead',
UserWarning)
return headloss_minor_rect(FlowRate, Width, Depth, KMinor)
@ut.list_handler()
def headloss_minor_rect(FlowRate, Width, Depth, KMinor):
"""Return the minor head loss due to expansion in a rectangular channel.
This equation applies to both laminar and turbulent flows.
:param FlowRate: flow rate through channel
:type FlowRate: u.m**3/u.s
:param Width: width of channel
:type Width: u.m
:param Depth: depth of water in channel
:type Depth: u.m
:param KMinor: minor loss coefficient
:type KMinor: u.dimensionless or unitless
:return: minor head loss in rectangular channel
:rtype: u.m
"""
ut.check_range([FlowRate.magnitude, ">0", "Flow rate"],
[Width.magnitude, ">0", "Width"],
[Depth.magnitude, ">0", "Depth"],
[KMinor, ">=0", "K minor"])
return (KMinor * FlowRate**2
/ (2 * u.gravity * (Width*Depth)**2)
).to(u.m)
@ut.list_handler()
def headloss_rect(FlowRate, Width, Depth, Length, KMinor, Nu, Roughness=None,
OpenChannel=None, *, PipeRough=None, openchannel=None):
"""Return the total head loss from major and minor losses in a rectangular
channel.
This equation applies to both laminar and turbulent flows.
:param FlowRate: flow rate through channel
:type FlowRate: u.m**3/u.s
:param Width: width of channel
:type Width: u.m
:param Depth: depth of water in channel
:type Depth: u.m
:param Length: length of channel
:type Length: u.m
:param KMinor: minor loss coefficient
:type KMinor: u.dimensionless or unitless
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param Roughness: roughness of channel
:type Roughness: u.m
:param OpenChannel: true if channel is open, false if closed
:type OpenChannel: boolean
:param PipeRough: deprecated; use Roughness instead
:type openchannel: deprecated; use OpenChannel instead
:return: total head loss in rectangular channel
:rtype: u.m
"""
if Roughness is not None and PipeRough is not None:
raise TypeError("headloss_rect received both Roughness and PipeRough")
elif Roughness is None and PipeRough is None:
raise TypeError("headloss_rect missing Roughness argument")
elif OpenChannel is not None and openchannel is not None:
raise TypeError("headloss_rect received both OpenChannel and openchannel")
elif OpenChannel is None and openchannel is None:
raise TypeError("headloss_rect missing OpenChannel argument")
else:
if PipeRough is not None:
warnings.warn("PipeRough is deprecated; use Roughness instead.",
UserWarning)
Roughness = PipeRough
if openchannel is not None:
warnings.warn("openchannel is deprecated; use OpenChannel instead.",
UserWarning)
OpenChannel = openchannel
return (headloss_minor_rect(FlowRate, Width, Depth, KMinor)
+ headloss_major_rect(FlowRate, Width, Depth, Length,
Nu, Roughness, OpenChannel))
@ut.list_handler()
def headloss_fric_general(Area, PerimWetted, Vel, Length, Nu, PipeRough):
"""
.. deprecated::
`headloss_fric_general` is deprecated; use `headloss_major_channel` instead.
"""
warnings.warn('headloss_fric_general` is deprecated; use `headloss_major_channel` instead',
UserWarning)
return headloss_major_channel(Area, PerimWetted, Vel, Length, Nu, PipeRough)
@ut.list_handler()
def headloss_major_channel(Area, PerimWetted, Vel, Length, Nu, Roughness):
"""Return the major head loss due to wall shear in a general channel.
This equation applies to both laminar and turbulent flows.
:param Area: cross sectional area of channel
:type Area: u.m**2
:param PerimWetted: wetted perimeter of channel
:type PerimWetted: u.m
:param Vel: velocity of fluid
:type Vel: u.m/u.s
:param Length: length of channel
:type Length: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param Roughness: roughness of channel
:type Roughness: u.m
:return: major head loss in general channel
:rtype: u.m
"""
ut.check_range([Length.magnitude, ">0", "Length"])
return (fric_channel(Area, PerimWetted, Vel, Nu, Roughness) * Length
/ (4 * radius_hydraulic_channel(Area, PerimWetted))
* Vel**2 / (2*u.gravity)
).to(u.m)
@ut.list_handler()
def headloss_exp_general(Vel, KMinor):
"""
.. deprecated::
`headloss_exp_general` is deprecated; use `headloss_minor_channel` instead.
"""
warnings.warn('headloss_exp_general` is deprecated; use `headloss_minor_channel` instead',
UserWarning)
return headloss_minor_channel(Vel, KMinor)
@ut.list_handler()
def headloss_minor_channel(Vel, KMinor):
"""Return the minor head loss due to expansion in a general channel.
This equation applies to both laminar and turbulent flows.
:param Vel: velocity of fluid
:type Vel: u.m/u.s
:param KMinor: minor loss coefficient
:type KMinor: u.dimensionless or unitless
:return: minor head loss in general channel
:rtype: u.m
"""
ut.check_range([Vel.magnitude, ">0", "Velocity"],
[KMinor, '>=0', 'K minor'])
return (KMinor * Vel**2 / (2*u.gravity)).to(u.m)
@ut.list_handler()
def headloss_gen(Area, Vel, PerimWetted, Length, KMinor, Nu, PipeRough):
"""
.. deprecated::
`headloss_gen` is deprecated; use `headloss_channel` instead.
"""
warnings.warn('headloss_gen` is deprecated; use `headloss_channel` instead',
UserWarning)
return headloss_channel(Area, Vel, PerimWetted, Length, KMinor, Nu, PipeRough)
@ut.list_handler()
def headloss_channel(Area, Vel, PerimWetted, Length, KMinor, Nu, Roughness):
"""Return the total head loss from major and minor losses in a general
channel.
This equation applies to both laminar and turbulent flows.
:param Area: cross sectional area of channel
:type Area: u.m**2
:param Vel: velocity of fluid
:type Vel: u.m/u.s
:param PerimWetted: wetted perimeter of channel
:type PerimWetted: u.m
:param Length: length of channel
:type Length: u.m
:param KMinor: minor loss coefficient
:type KMinor: u.dimensionless or unitless
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param Roughness: roughness of channel
:type Roughness: u.m
:return: total head loss in general channel
:rtype: u.m
"""
return (headloss_minor_channel(Vel, KMinor)
+ headloss_major_channel(Area, PerimWetted, Vel,
Length, Nu, Roughness)).to(u.m)
@ut.list_handler()
def headloss_manifold(FlowRate, Diam, Length, KMinor, Nu, Roughness=None, NumOutlets=None, *, PipeRough=None):
"""Return the total head loss through the manifold.
:param FlowRate: flow rate through manifold
:type FlowRate: u.m**3/u.s
:param Diam: diameter of manifold
:type Diam: u.m
:param Length: length of manifold
:type Length: u.m
:param KMinor: minor loss coefficient
:type KMinor: u.dimensionless or unitless
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param Roughness: roughness of manifold
:type Roughness: u.m
:param NumOutlets: number of outlets from manifold
:type NumOutlets: u.dimensionless or unitless
:param PipeRough: deprecated; use Roughness instead
:return: total headloss through manifold
:rtype: u.m
"""
ut.check_range([NumOutlets, ">0, int", 'Number of outlets'])
if Roughness is not None and PipeRough is not None:
raise TypeError("headloss_manifold received both Roughness and PipeRough")
elif Roughness is None and PipeRough is None:
raise TypeError("headloss_manifold missing Roughness argument")
elif NumOutlets is None:
raise TypeError("headloss_manifold missing NumOutlets argument")
elif PipeRough is not None:
warnings.warn("PipeRough is deprecated; use Roughness instead.",
UserWarning)
Roughness = PipeRough
return (headloss_pipe(FlowRate, Diam, Length, Nu, Roughness, KMinor)
* ((1/3)
+ (1 / (2*NumOutlets))
+ (1 / (6*NumOutlets**2))
)
).to(u.m)
@ut.list_handler()
def elbow_minor_loss(q, id_, k):
"""
.. deprecated::
`elbow_minor_loss` is deprecated; use `headloss_minor_elbow` instead.
"""
warnings.warn('elbow_minor_loss is deprecated; use headloss_minor_elbow instead',
UserWarning)
return headloss_minor_elbow(q, id_, k)
@ut.list_handler()
def headloss_minor_elbow(FlowRate, Diam, KMinor):
"""Return the minor head loss (due to changes in geometry) in an elbow.
:param FlowRate: flow rate through pipe
:type FlowRate: u.m**3/u.s
:param Diam: diameter of pipe
:type Diam: u.m
:param KMinor: minor loss coefficient
:type KMinor: u.dimensionless or unitless
:return: minor head loss in pipe
:rtype: u.m
"""
vel = FlowRate / area_circle(Diam)
minor_loss = KMinor * vel ** 2 / (2 * u.gravity)
return minor_loss.to(u.m)
######################### Orifices #########################
@ut.list_handler()
def flow_orifice(Diam, Height, RatioVCOrifice):
"""Return the flow rate of the orifice.
:param Diam: diameter of orifice
:type Diam: u.m
:param Height: piezometric height of orifice
:type Height: u.m
:param RatioVCOrifice: vena contracta ratio of orifice
:type RatioVCOrifice: u.dimensionless or unitless
:return: flow rate of orifice
:rtype: u.m**3/u.s
"""
ut.check_range([Diam.magnitude, ">0", "Diameter"],
[RatioVCOrifice, "0-1", "VC orifice ratio"])
if Height.magnitude > 0:
return (RatioVCOrifice * area_circle(Diam)
* np.sqrt(2 * u.gravity * Height)).to(u.m**3/u.s)
else:
return 0 * u.m**3/u.s
@ut.list_handler()
def flow_orifice_vert(Diam, Height, RatioVCOrifice):
"""Return the vertical flow rate of the orifice.
:param Diam: diameter of orifice
:type Diam: u.m
:param Height: piezometric height of orifice
:type Height: u.m
:param RatioVCOrifice: vena contracta ratio of orifice
:type RatioVCOrifice: u.dimensionless or unitless
:return: vertical flow rate of orifice
:rtype: u.m**3/u.s
"""
ut.check_range([RatioVCOrifice, "0-1", "VC orifice ratio"])
Diam = Diam.to(u.m)
Height = Height.to(u.m)
if Height > -Diam / 2:
flow_vert = integrate.quad(lambda z: (Diam*np.sin(np.arccos(z*u.m/(Diam/2)))
* np.sqrt(Height - z*u.m)
).magnitude,
- Diam.magnitude / 2,
min(Diam/2, Height).magnitude)
return (flow_vert[0] * u.m**2.5 * RatioVCOrifice *
np.sqrt(2 * u.gravity)).to(u.m**3/u.s)
else:
return 0 * u.m**3/u.s
@ut.list_handler()
def head_orifice(Diam, RatioVCOrifice, FlowRate):
"""Return the piezometric head of the orifice.
:param Diam: diameter of orifice
:type Diam: u.m
:param RatioVCOrifice: vena contracta ratio of orifice
:type RatioVCOrifice: u.dimensionless or unitless
:param FlowRate: flow rate of orifice
:type FlowRate: u.m**3/u.s
:return: head of orifice
:rtype: u.m
"""
ut.check_range([Diam.magnitude, ">0", "Diameter"],
[FlowRate.magnitude, ">0", "Flow rate"],
[RatioVCOrifice, "0-1", "VC orifice ratio"])
return ((FlowRate
/ (RatioVCOrifice * area_circle(Diam))
)**2
/ (2*u.gravity)
).to(u.m)
@ut.list_handler()
def area_orifice(Height, RatioVCOrifice, FlowRate):
"""Return the area of the orifice.
:param Height: piezometric height of orifice
:type Height: u.m
:param RatioVCOrifice: vena contracta ratio of orifice
:type RatioVCOrifice: u.dimensionless or unitless
:param FlowRate: flow rate of orifice
:type FlowRate: u.m**3/u.s
:return: area of orifice
:rtype: u.m**2
"""
ut.check_range([Height.magnitude, ">0", "Height"],
[FlowRate.magnitude, ">0", "Flow rate"],
[RatioVCOrifice, "0-1, >0", "VC orifice ratio"])
return (FlowRate / (RatioVCOrifice * np.sqrt(2 * u.gravity *
Height))).to(u.m**2)
@ut.list_handler()
def num_orifices(FlowRate, RatioVCOrifice, HeadLossOrifice, DiamOrifice):
"""Return the number of orifices.
:param FlowRate: flow rate of orifice
:type FlowRate: u.m**3/u.s
:param RatioVCOrifice: vena contracta ratio of orifice
:type RatioVCOrifice: u.dimensionless or unitless
:param HeadLossOrifice: head loss of orifice
:type HeadLossOrifice: u.m
:param DiamOrifice: diameter of orifice
:type DiamOrifice: u.m
:return: number of orifices
:rtype: u.dimensionless
"""
return np.ceil(area_orifice(HeadLossOrifice, RatioVCOrifice, FlowRate)
/ area_circle(DiamOrifice)).to(u.dimensionless)
########################### Flows ###########################
@ut.list_handler()
def flow_transition(Diam, Nu):
"""Return the flow rate for the laminar/turbulent transition.
:param Diam: diameter of pipe
:type Diam: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:return: flow rate for laminar/turbulent transition
:rtype: u.m**3/u.s
"""
ut.check_range([Diam.magnitude, ">0", "Diameter"],
[Nu.magnitude, ">0", "Nu"])
return (np.pi * Diam * RE_TRANSITION_PIPE * Nu / 4).to(u.m**3/u.s)
@ut.list_handler()
def flow_hagen(Diam, HeadLossMajor=None, Length=None, Nu=None, *, HeadLossFric=None):
"""Return the flow rate for laminar flow with only major losses.
:param Diam: diameter of pipe
:type Diam: u.m
:param HeadLossMajor: head loss due to friction
:type HeadLossMajor: u.m
:param Length: length of pipe
:type Length: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param HeadLossFric: deprecated; use HeadLossMajor instead
:return: flow rate for laminar flow with only major losses
:rtype: u.m**3/u.s
"""
if HeadLossMajor is not None and HeadLossFric is not None:
raise TypeError("flow_hagen received both HeadLossMajor and HeadLossFric")
elif HeadLossMajor is None and HeadLossFric is None:
raise TypeError("flow_hagen missing HeadLossMajor argument")
elif Length is None:
raise TypeError("flow_hagen missing Length argument")
elif Nu is None:
raise TypeError("flow_hagen missing Nu argument")
elif HeadLossFric is not None:
warnings.warn("HeadLossFric is deprecated; use HeadLossMajor instead.",
UserWarning)
HeadLossMajor = HeadLossFric
ut.check_range([Diam.magnitude, ">0", "Diameter"],
[Length.magnitude, ">0", "Length"],
[HeadLossMajor.magnitude, ">=0", "Headloss due to friction"],
[Nu.magnitude, ">0", "Nu"])
return ((np.pi*Diam**4) / (128*Nu) * u.gravity * HeadLossMajor
/ Length).to(u.m**3/u.s)
@ut.list_handler()
def flow_swamee(Diam, HeadLossMajor=None, Length=None, Nu=None, Roughness=None, *, HeadLossFric=None, PipeRough=None):
"""Return the flow rate for turbulent flow with only major losses.
:param Diam: diameter of pipe
:type Diam: u.m
:param HeadLossMajor: head loss due to friction
:type HeadLossMajor: u.m
:param Length: length of pipe
:type Length: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param Roughness: roughness of pipe
:type Roughness: u.m
:param HeadLossFric: deprecated; use HeadLossMajor instead
:param PipeRough: deprecated; use Roughness instead
:return: flow rate for turbulent flow with only major losses
:rtype: u.m**3/u.s
"""
if HeadLossMajor is not None and HeadLossFric is not None:
raise TypeError("flow_swamee received both HeadLossMajor and HeadLossFric")
elif HeadLossMajor is None and HeadLossFric is None:
raise TypeError("flow_swamee missing HeadLossMajor argument")
elif Length is None:
raise TypeError("flow_swamee missing Length argument")
elif Nu is None:
raise TypeError("flow_swamee missing Nu argument")
elif Roughness is not None and PipeRough is not None:
raise TypeError("flow_swamee received both Roughness and PipeRough")
elif Roughness is None and PipeRough is None:
raise TypeError("flow_swamee missing Roughness argument")
else:
if HeadLossFric is not None:
warnings.warn("HeadLossFric is deprecated; use HeadLossMajor instead.",
UserWarning)
HeadLossMajor = HeadLossFric
if PipeRough is not None:
warnings.warn("PipeRough is deprecated; use Roughness instead.",
UserWarning)
Roughness = PipeRough
ut.check_range([Diam.magnitude, ">0", "Diameter"],
[Length.magnitude, ">0", "Length"],
[HeadLossMajor.magnitude, ">0", "Headloss due to friction"],
[Nu.magnitude, ">0", "Nu"],
[Roughness.magnitude, ">=0", "Pipe roughness"])
logterm = np.log10(Roughness / (3.7 * Diam)
+ 2.51 * Nu * np.sqrt(Length / (2 * u.gravity
* HeadLossMajor
* Diam**3)
)
)
return ((-np.pi / np.sqrt(2)) * Diam**(5/2) * logterm
* np.sqrt(u.gravity * HeadLossMajor / Length)
).to(u.m**3/u.s)
@ut.list_handler()
def flow_pipemajor(Diam, HeadLossFric, Length, Nu, PipeRough):
"""
.. deprecated::
`flow_pipemajor` is deprecated; use `flow_major_pipe` instead.
"""
warnings.warn('flow_pipemajor is deprecated; use '
'flow_major_pipe instead.', UserWarning)
return flow_major_pipe(Diam, HeadLossFric, Length, Nu, PipeRough)
@ut.list_handler()
def flow_major_pipe(Diam, HeadLossMajor, Length, Nu, Roughness):
"""Return the flow rate with only major losses.
This function applies to both laminar and turbulent flows.
:param Diam: diameter of pipe
:type Diam: u.m
:param HeadLossMajor: head loss due to friction
:type HeadLossMajor: u.m
:param Length: length of pipe
:type Length: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param Roughness: roughness of pipe
:type Roughness: u.m
:return: flow rate with only major losses
:rtype: u.m**3/u.s
"""
FlowHagen = flow_hagen(Diam, HeadLossMajor, Length, Nu)
if FlowHagen < flow_transition(Diam, Nu):
return FlowHagen
else:
return flow_swamee(Diam, HeadLossMajor, Length, Nu, Roughness)
@ut.list_handler()
def flow_pipeminor(Diam, HeadLossExpans, KMinor):
"""
.. deprecated::
`flow_pipeminor` is deprecated; use `flow_minor_pipe` instead.
"""
warnings.warn('flow_pipeminor is deprecated; use '
'flow_minor_pipe instead.', UserWarning)
return flow_minor_pipe(Diam, HeadLossExpans, KMinor)
@ut.list_handler()
def flow_minor_pipe(Diam, HeadLossMinor, KMinor):
"""Return the flow rate with only minor losses.
This function applies to both laminar and turbulent flows.
:param Diam: diameter of pipe
:type Diam: u.m
:param HeadLossExpans: head loss due to expansion
:type HeadLossExpans: u.m
:param KMinor: minor loss coefficient
:type KMinor: u.dimensionless or unitless
:return: flow rate with only minor losses
:rtype: u.m**3/u.s
"""
ut.check_range([HeadLossMinor.magnitude, ">=0",
"Headloss due to expansion"],
[KMinor, ">0", "K minor"])
return (area_circle(Diam) * np.sqrt(2 * u.gravity * HeadLossMinor
/ KMinor)
).to(u.m**3/u.s)
@ut.list_handler()
def flow_pipe(Diam, HeadLoss, Length, Nu, Roughness=None, KMinor=None, *, PipeRough=None):
"""Return the flow rate in a pipe.
This function works for both major and minor losses as well as
both laminar and turbulent flows.
:param Diam: diameter of pipe
:type Diam: u.m
:param HeadLoss: total head loss from major and minor losses
:type HeadLoss: u.m
:param Length: length of pipe
:type Length: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param Roughness: roughness of pipe
:type Roughness: u.m
:param KMinor: minor loss coefficient
:type KMinor: u.dimensionless or unitless
:param PipeRough: deprecated; use Roughness instead
:return: flow rate in pipe
:rtype: u.m**3/u.s
"""
if Roughness is not None and PipeRough is not None:
raise TypeError("flow_pipe received both Roughness and PipeRough")
elif Roughness is None and PipeRough is None:
raise TypeError("flow_pipe missing Roughness argument")
elif KMinor is None:
raise TypeError("flow_pipe missing KMinor argument")
elif PipeRough is not None:
warnings.warn("PipeRough is deprecated; use Roughness instead.",
UserWarning)
Roughness = PipeRough
if KMinor == 0:
FlowRate = flow_major_pipe(Diam, HeadLoss, Length, Nu,
Roughness)
else:
FlowRatePrev = 0
err = 1.0
FlowRate = min(flow_major_pipe(Diam, HeadLoss, Length,
Nu, Roughness),
flow_minor_pipe(Diam, HeadLoss, KMinor)
)
while err > 0.01:
FlowRatePrev = FlowRate
HLFricNew = (HeadLoss * headloss_major_pipe(FlowRate, Diam, Length,
Nu, Roughness)
/ (headloss_major_pipe(FlowRate, Diam, Length,
Nu, Roughness)
+ headloss_minor_pipe(FlowRate, Diam, KMinor)
)
)
FlowRate = flow_major_pipe(Diam, HLFricNew, Length,
Nu, Roughness)
if FlowRate == 0:
err = 0.0
else:
err = (abs(FlowRate - FlowRatePrev)
/ ((FlowRate + FlowRatePrev) / 2)
)
return FlowRate.to(u.m**3/u.s)
########################## Diameters ##########################
@ut.list_handler()
def diam_hagen(FlowRate, HeadLossMajor=None, Length=None, Nu=None, *, HeadLossFric=None):
"""Return the inner diameter of a pipe with laminar flow and no minor losses.
The Hagen Poiseuille equation is dimensionally correct and returns the
inner diameter of a pipe given the flow rate and the head loss due
to shear on the pipe walls. The Hagen Poiseuille equation does NOT take
minor losses into account. This equation ONLY applies to laminar flow.
:param FlowRate: flow rate of pipe
:type FlowRate: u.m**3/u.s
:param HeadLossFric: head loss due to friction
:type HeadLossFric: u.m
:param Length: length of pipe
:type Length: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param HeadLossFric: deprecated; use HeadLossMajor instead
:return: inner diameter of pipe
:rtype: u.m
"""
if HeadLossMajor is not None and HeadLossFric is not None:
raise TypeError("diam_hagen received both HeadLossMajor and HeadLossFric")
elif HeadLossMajor is None and HeadLossFric is None:
raise TypeError("diam_hagen missing HeadLossMajor argument")
elif Length is None:
raise TypeError("diam_hagen missing Length argument")
elif Nu is None:
raise TypeError("diam_hagen missing Nu argument")
elif HeadLossFric is not None:
warnings.warn("HeadLossFric is deprecated; use HeadLossMajor instead.",
UserWarning)
HeadLossMajor = HeadLossFric
ut.check_range([FlowRate.magnitude, ">0", "Flow rate"],
[Length.magnitude, ">0", "Length"],
[HeadLossMajor.magnitude, ">0", "Headloss due to friction"],
[Nu.magnitude, ">0", "Nu"])
return (((128 * Nu * FlowRate * Length)
/ (u.gravity * HeadLossMajor * np.pi)
) ** (1/4)).to(u.m)
@ut.list_handler()
def diam_swamee(FlowRate, HeadLossMajor=None, Length=None, Nu=None, Roughness=None, *, HeadLossFric=None, PipeRough=None):
"""Return the inner diameter of a pipe with turbulent flow and no minor losses.
The Swamee Jain equation is dimensionally correct and returns the
inner diameter of a pipe given the flow rate and the head loss due
to shear on the pipe walls. The Swamee Jain equation does NOT take
minor losses into account. This equation ONLY applies to turbulent
flow.
:param FlowRate: flow rate of pipe
:type FlowRate: u.m**3/u.s
:param HeadLossFric: head loss due to friction
:type HeadLossFric: u.m
:param Length: length of pipe
:type Length: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param PipeRough: roughness of pipe
:type PipeRough: u.m
:param HeadLossFric: deprecated; use HeadLossMajor instead
:param PipeRough: deprecated; use Roughness instead
:return: inner diameter of pipe
:rtype: u.m
"""
if HeadLossMajor is not None and HeadLossFric is not None:
raise TypeError("diam_swamee received both HeadLossMajor and HeadLossFric")
elif HeadLossMajor is None and HeadLossFric is None:
raise TypeError("diam_swamee missing HeadLossMajor argument")
elif Length is None:
raise TypeError("diam_swamee missing Length argument")
elif Nu is None:
raise TypeError("diam_swamee missing Nu argument")
elif Roughness is not None and PipeRough is not None:
raise TypeError("diam_swamee received both Roughness and PipeRough")
elif Roughness is None and PipeRough is None:
raise TypeError("diam_swamee missing Roughness argument")
else:
if HeadLossFric is not None:
warnings.warn("HeadLossFric is deprecated; use HeadLossMajor instead.",
UserWarning)
HeadLossMajor = HeadLossFric
if PipeRough is not None:
warnings.warn("PipeRough is deprecated; use Roughness instead.",
UserWarning)
Roughness = PipeRough
ut.check_range([FlowRate.magnitude, ">0", "Flow rate"],
[Length.magnitude, ">0", "Length"],
[HeadLossMajor.magnitude, ">0", "Headloss due to friction"],
[Nu.magnitude, ">0", "Nu"],
[Roughness.magnitude, ">=0", "Pipe roughness"])
a = ((Roughness ** 1.25)
* ((Length * FlowRate**2)
/ (u.gravity * HeadLossMajor)
)**4.75
).to_base_units()
b = (Nu**5 * FlowRate**47
* (Length / (u.gravity * HeadLossMajor)) ** 26
).to_base_units()**0.2
return (0.66 * (a+b)**0.04).to(u.m)
@ut.list_handler()
def diam_pipemajor(FlowRate, HeadLossFric, Length, Nu, PipeRough):
"""
.. deprecated::
`diam_pipemajor` is deprecated; use `diam_major_pipe` instead.
"""
warnings.warn('diam_pipemajor is deprecated; use '
'diam_major_pipe instead.', UserWarning)
return diam_major_pipe(FlowRate, HeadLossFric, Length, Nu, PipeRough)
@ut.list_handler()
def diam_major_pipe(FlowRate, HeadLossMajor, Length, Nu, Roughness):
"""Return the pipe inner diameter that would result in given major losses.
This function applies to both laminar and turbulent flow.
:param FlowRate: flow rate of pipe
:type FlowRate: u.m**3/u.s
:param HeadLossMajor: head loss due to friction
:type HeadLossMajor: u.m
:param Length: length of pipe
:type Length: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param Roughness: roughness of pipe
:type Roughness: u.m
:return: inner diameter of pipe
:rtype: u.m
"""
DiamLaminar = diam_hagen(FlowRate, HeadLossMajor, Length, Nu)
if re_pipe(FlowRate, DiamLaminar, Nu) <= RE_TRANSITION_PIPE:
return DiamLaminar
else:
return diam_swamee(FlowRate, HeadLossMajor, Length,
Nu, Roughness)
@ut.list_handler()
def diam_pipeminor(FlowRate, HeadLossExpans, KMinor):
"""
.. deprecated::
`diam_pipeminor` is deprecated; use `diam_minor_pipe` instead.
"""
warnings.warn('diam_pipeminor is deprecated; use '
'diam_minor_pipe instead.', UserWarning)
return diam_minor_pipe(FlowRate, HeadLossExpans, KMinor)
@ut.list_handler()
def diam_minor_pipe(FlowRate, HeadLossMinor, KMinor):
"""Return the pipe inner diameter that would result in the given minor losses.
This function applies to both laminar and turbulent flow.
:param FlowRate: flow rate of pipe
:type FlowRate: u.m**3/u.s
:param HeadLossMinor: head loss due to expansion
:type HeadLossMinor: u.m
:param KMinor: minor loss coefficient
:type KMinor: u.dimensionless or unitless
:return: inner diameter of pipe
:rtype: u.m
"""
ut.check_range([FlowRate.magnitude, ">0", "Flow rate"],
[KMinor, ">=0", "K minor"],
[HeadLossMinor.magnitude, ">0", "Headloss due to expansion"])
return (np.sqrt(4 * FlowRate / np.pi)
* (KMinor / (2 * u.gravity * HeadLossMinor)) ** (1/4)
).to(u.m)
@ut.list_handler()
def diam_pipe(FlowRate, HeadLoss, Length, Nu, PipeRough, KMinor):
"""Return the pipe inner diameter that would result in the given total head
loss.
This function applies to both laminar and turbulent flow and
incorporates both minor and major losses.
:param FlowRate: flow rate of pipe
:type FlowRate: u.m**3/u.s
:param HeadLoss: total head loss from major and minor losses
:type HeadLoss: u.m
:param Length: length of pipe
:type Length: u.m
:param Nu: kinematic viscosity of fluid
:type Nu: u.m**2/u.s
:param PipeRough: roughness of pipe
:type PipeRough: u.m
:param KMinor: minor loss coefficient
:type KMinor: u.dimensionless or unitless
:return: inner diameter of pipe
:rtype: u.m
"""
if KMinor == 0:
Diam = diam_major_pipe(FlowRate, HeadLoss, Length, Nu,
PipeRough)
else:
Diam = max(diam_major_pipe(FlowRate, HeadLoss,
Length, Nu, PipeRough),
diam_minor_pipe(FlowRate, HeadLoss, KMinor))
err = 1.00
while err > 0.001:
DiamPrev = Diam
HLFricNew = (HeadLoss * headloss_major_pipe(FlowRate, Diam, Length,
Nu, PipeRough
)
/ (headloss_major_pipe(FlowRate, Diam, Length,
Nu, PipeRough
)
+ headloss_minor_pipe(FlowRate, Diam, KMinor
)
)
)
Diam = diam_major_pipe(FlowRate, HLFricNew, Length, Nu, PipeRough
)
err = abs(Diam - DiamPrev) / ((Diam + DiamPrev) / 2)
return Diam.to(u.m)
@ut.list_handler()
def pipe_ID(FlowRate, Pressure):
"""Return the inner diameter of a pipe for a given pressure
recovery constraint.
:param FlowRate: flow rate of pipe
:type FlowRate: u.m**3/u.s
:param Pressure: pressure recovery constraint
:type Pressure: u.m
:return: inner diameter of pipe
:rtype: u.m
"""
ut.check_range([FlowRate.magnitude, ">0", "Flow rate"],
[Pressure.magnitude, ">0", "Pressure"])
return np.sqrt(FlowRate/((np.pi/4)*np.sqrt(2*u.gravity*Pressure))).to(u.m)
############################ Weirs ############################
@ut.list_handler()
def width_rect_weir(FlowRate, Height):
"""
.. deprecated::
`width_rect_weir` is deprecated; use `width_weir_rect` instead.
"""
warnings.warn('width_rect_weir is deprecated; use '
'width_weir_rect instead.', UserWarning)
return width_weir_rect(FlowRate, Height)
@ut.list_handler()
def width_weir_rect(FlowRate, Height):
"""Return the width of a rectangular weir given its flow rate and the
height of the water above the weir. For a weir that is a vertical pipe,
this value is the circumference.
:param FlowRate: flow rate over weir
:type FlowRate: u.m**3/u.s
:param Height: height of water above weir
:type Height: u.m
:return: width of weir
:rtypes: u.m
"""
ut.check_range([FlowRate.magnitude, ">0", "Flow rate"],
[Height.magnitude, ">0", "Height"])
return ((3 / 2) * FlowRate / (con.VC_ORIFICE_RATIO
* np.sqrt(2 * u.gravity) * Height ** (3 / 2))
).to(u.m)
@ut.list_handler()
def headloss_weir(FlowRate, Width):
"""
.. deprecated::
`headloss_weir` is deprecated; use `headloss_weir_rect` instead.
"""
warnings.warn('headloss_weir is deprecated; use '
'headloss_weir_rect instead.', UserWarning)
return headloss_weir_rect(FlowRate, Width)
@ut.list_handler()
def headloss_weir_rect(FlowRate, Width):
"""Return the head loss of a rectangular or vertical pipe weir.
Head loss for a weir is the difference in height between the water
upstream of the weir and the top of the weir.
:param FlowRate: flow rate over weir
:type FlowRate: u.m**3/u.s
:param Width: width of weir (circumference for a vertical pipe)
:type Width: u.m
:return: head loss of weir
:rtypes: u.m
"""
ut.check_range([FlowRate.magnitude, ">0", "Flow rate"],
[Width.magnitude, ">0", "Width"])
return ((((3/2) * FlowRate
/ (con.VC_ORIFICE_RATIO * np.sqrt(2 * u.gravity) * Width)
) ** 2).to(u.m**3)) ** (1/3)
@ut.list_handler()
def flow_rect_weir(Height, Width):
"""
.. deprecated::
`flow_rect_weir` is deprecated; use `flow_weir_rect` instead.
"""
warnings.warn('flow_rect_weir is deprecated; use '
'flow_weir_rect instead.', UserWarning)
return flow_weir_rect(Height, Width)
@ut.list_handler()
def flow_weir_rect(Height, Width):
"""Return the flow rate of a rectangular or vertical pipe weir.
:param Height: height of water above weir
:type Height: u.m
:param Width: width of weir (circumference for a vertical pipe)
:type Width: u.m
:return: flow of weir
:rtype: u.m**3/u.s
"""
ut.check_range([Height.magnitude, ">0", "Height"],
[Width.magnitude, ">0", "Width"])
return ((2/3) * con.VC_ORIFICE_RATIO
* (np.sqrt(2*u.gravity) * Height**(3/2))
* Width).to(u.m**3/u.s)
######################## Porous Media ########################
class DeprecatedFunctionError(Exception):
def __init__(self, message):
self.message = message
@ut.list_handler()
def headloss_kozeny(Length, DiamMedia=None, ApproachVel=None, Porosity=None, Nu=None, *, Diam=None, Vel=None):
"""
.. deprecated::
`headloss_kozeny` is deprecated; use `headloss_ergun` instead.
"""
raise DeprecatedFunctionError("This function is deprecated. Please use headloss_ergun.")
@ut.list_handler()
def re_ergun(ApproachVel, DiamMedia, Temperature, Porosity):
"""Return the Reynolds number for flow through porous media.
:param ApproachVel: approach velocity or superficial fluid velocity
:type ApproachVel: u.m/u.s
:param DiamMedia: particle diameter
:type DiamMedia: u.m
:param Temperature: temperature of porous medium
:type Temperature: u.degK
:param Porosity: porosity of porous medium
:type Porosity: u.dimensionless or unitless
:return: Reynolds number for flow through porous media
:rtype: u.dimensionless
"""
ut.check_range([ApproachVel.magnitude, ">0", "ApproachVel"],
[DiamMedia.magnitude, ">0", "DiamMedia"],
[Porosity, "0-1", "Porosity"])
if Porosity == 1:
raise ValueError("Porosity is " + str(Porosity) + " must be great than\
or equal to 0 and less than 1")
return (ApproachVel * DiamMedia /
(viscosity_kinematic_water(Temperature)
* (1 - Porosity))).to(u.dimensionless)
@ut.list_handler()
def fric_ergun(ApproachVel, DiamMedia, Temperature, Porosity):
"""Return the friction factor for flow through porous media.
:param ApproachVel: superficial fluid velocity (VelSuperficial?)
:type ApproachVel: u.m/u.s
:param DiamMedia: particle diameter
:type DiamMedia: u.m
:param Temperature: temperature of porous medium
:type Temperature: u.degK
:param Porosity: porosity of porous medium
:type Porosity: u.dimensionless or unitless
:return: friction factor for flow through porous media
:rtype: u.dimensionless
"""
return (300 / re_ergun(ApproachVel, DiamMedia, Temperature, Porosity)
+ 3.5 * u.dimensionless)
@ut.list_handler()
def headloss_ergun(ApproachVel, DiamMedia, Temperature, Porosity, Length):
"""Return the frictional head loss for flow through porous media.
:param ApproachVel: superficial fluid velocity (VelSuperficial?)
:type ApproachVel: u.m/u.s
:param DiamMedia: particle diameter
:type DiamMedia: u.m
:param Temperature: temperature of porous medium
:type Temperature: u.degK
:param Porosity: porosity of porous medium
:type Porosity: u.dimensionless or unitless
:param Length: length of pipe or duct
:type Length: u.m
:return: frictional head loss for flow through porous media
:rtype: u.m
"""
return (fric_ergun(ApproachVel, DiamMedia, Temperature, Porosity)
* Length / DiamMedia * ApproachVel**2 / (2*u.gravity) * (1-Porosity)
/ Porosity**3).to(u.m)
@ut.list_handler()
def g_cs_ergun(ApproachVel, DiamMedia, Temperature, Porosity):
"""Camp Stein velocity gradient for flow through porous media.
:param ApproachVel: superficial fluid velocity (VelSuperficial?)
:type ApproachVel: u.m/u.s
:param DiamMedia: particle diameter
:type DiamMedia: u.m
:param Temperature: temperature of porous medium
:type Temperature: u.degK
:param Porosity: porosity of porous medium
:type Porosity: u.dimensionless or unitless
:return: Camp Stein velocity gradient for flow through porous media
:rtype: u.Hz
"""
return np.sqrt(fric_ergun(ApproachVel, DiamMedia, Temperature, Porosity)
* ApproachVel**3 * (1-Porosity)
/ (2 * viscosity_kinematic_water(Temperature) * DiamMedia
* Porosity**4)).to(u.Hz)
######################## Miscellaneous ########################
@ut.list_handler()
def height_water_critical(FlowRate, Width):
"""Return the critical local water height.
:param FlowRate: flow rate of water
:type FlowRate: u.m**3/u.s
:param Width: width of channel (????????)
:type Width: u.m
:return: critical water height
:rtype: u.m
"""
ut.check_range([FlowRate.magnitude, ">0", "Flow rate"],
[Width.magnitude, ">0", "Width"])
return ((FlowRate / (Width * np.sqrt(1*u.gravity))) ** (2/3)).to(u.m)
@ut.list_handler()
def vel_horizontal(HeightWaterCritical):
"""Return the horizontal velocity. (at the critical water depth??????)
:param HeightWaterCritical: critical water height
:type HeightWaterCritical: u.m
:return: horizontal velocity
:rtype: u.m/u.s
"""
ut.check_range([HeightWaterCritical.magnitude, ">0", "Critical height of water"])
return np.sqrt(u.gravity * HeightWaterCritical).to(u.m/u.s)
@ut.list_handler()
def manifold_id_alt(q, pr_max):
"""Return the inner diameter of a manifold when major losses are
negligible.
"""
manifold_id_alt = np.sqrt(
4 * q / (
np.pi * np.sqrt(
2 * u.gravity * pr_max
)
)
)
return manifold_id_alt
@ut.list_handler()
def manifold_id(q, h, l, q_ratio, nu, eps, k, n):
id_new = 2 * u.inch
id_old = 0 * u.inch
error = 1
while error > 0.01:
id_old = id_new
id_new = (
((8 * q ** 2) / (u.gravity * np.pi ** 2 * h)) *
(
(
1 + fric_pipe(q, id_old, nu, eps) *
(1 / 3 + 1 / (2 * n) + 1 / (6 * n ** 2))
) /
(1 - q_ratio ** 2)
)
) ** (1 / 4)
error = np.abs(id_old - id_new) / id_new
return id_new
@ut.list_handler()
def manifold_nd(q, h, l, q_ratio, nu, eps, k, n, sdr):
manifold_nd = pipe.ND_SDR_available(
manifold_id(q, h, l, q_ratio, nu, eps, k, n),
sdr
)
return manifold_nd
@ut.list_handler()
def horiz_chan_w(q, depth, hl, l, nu, eps, manifold, k):
hl = min(hl, depth / 3)
horiz_chan_w_new = q / ((depth - hl) * np.sqrt(2 * u.gravity * hl))
error = 1
i = 0
while error > 0.001 and i < 20:
w = horiz_chan_w_new
i = i + 1
horiz_chan_w_new = np.sqrt(
(
1 + k +
fric_rect(q, w, depth - hl, nu, eps, True) *
(l / (4 * radius_hydraulic_rect(w, depth - hl, True))) *
(1 - (2 * (int(manifold) / 3)))
) / (2 * u.gravity * hl)
) * (q / (depth - hl))
error = np.abs(horiz_chan_w_new - w) / (horiz_chan_w_new + w)
return horiz_chan_w_new.to(u.m)
@ut.list_handler()
def horiz_chan_h(q, w, hl, l, nu, eps, manifold):
h_new = (q / (w * np.sqrt(2 * u.gravity * hl))) + hl
error = 1
i = 0
while error > 0.001 and i < 200:
h = h_new
hl_local = min(hl, h / 3)
i = i + 1
h_new = (q/ w) * np.sqrt((1 + \
fric_rect(q, w, h - hl_local, nu, eps, True) * (l / (4 * \
radius_hydraulic_rect(w, h - hl_local, True))) * (1 - 2 * (int(manifold) / 3))
)/ (2 * u.gravity * hl_local)) + (hl_local)
error = | np.abs(h_new - h) | numpy.abs |
"""
created on Sep 22, 2017
@author: <NAME>, jajcay(at)cs.cas.cz
"""
import numpy as np
def cross_correlation(a, b, max_lag):
"""
Cross correlation with lag.
When computing cross-correlation, the first parameter, a, is
in 'future' with positive lag and in 'past' with negative lag.
"""
a = (a - np.mean(a)) / (np.std(a, ddof = 1) * (len(a) - 1))
b = (b - np.mean(b)) / np.std(b, ddof = 1)
cor = np.correlate(a, b, 'full')
return cor[len(cor)//2 - max_lag : len(cor)//2 + max_lag+1]
def kdensity_estimate(a, kernel = 'gaussian', bandwidth = 1.0):
"""
Estimates kernel density. Uses sklearn.
kernels: 'gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine'
"""
from sklearn.neighbors import KernelDensity
a = a[:, None]
x = np.linspace(a.min(), a.max(), 100)[:, None]
kde = KernelDensity(kernel = kernel, bandwidth = bandwidth).fit(a)
logkde = kde.score_samples(x)
return np.squeeze(x), np.exp(logkde)
def detrend_with_return(arr, axis = 0):
"""
Removes the linear trend along the axis, ignoring Nans.
"""
a = arr.copy()
rnk = len(a.shape)
# determine axis
if axis < 0:
axis += rnk # axis -1 means along last dimension
# reshape that axis is 1. dimension and other dimensions are enrolled into 2. dimensions
newdims = np.r_[axis, 0:axis, axis + 1:rnk]
newdata = np.reshape(np.transpose(a, tuple(newdims)), (a.shape[axis], np.prod(a.shape, axis = 0) // a.shape[axis]))
newdata = newdata.copy()
# compute linear fit as least squared residuals
x = | np.arange(0, a.shape[axis], 1) | numpy.arange |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2019-08-21 at 12:28
@author: cook
"""
from astropy.table import Table
from astropy import constants as cc
from astropy import units as uu
import numpy as np
import os
from scipy.optimize import curve_fit
import warnings
from apero import core
from apero import lang
from apero.core import constants
from apero.core import math as mp
from apero.core.core import drs_log
from apero.core.core import drs_file
from apero.io import drs_data
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'science.rv.general.py'
__INSTRUMENT__ = 'None'
# Get constants
Constants = constants.load(__INSTRUMENT__)
# Get version and author
__version__ = Constants['DRS_VERSION']
__author__ = Constants['AUTHORS']
__date__ = Constants['DRS_DATE']
__release__ = Constants['DRS_RELEASE']
# get param dict
ParamDict = constants.ParamDict
DrsFitsFile = drs_file.DrsFitsFile
# Get function string
display_func = drs_log.display_func
# Get Logging function
WLOG = drs_log.wlog
# Get the text types
TextEntry = lang.drs_text.TextEntry
TextDict = lang.drs_text.TextDict
# alias pcheck
pcheck = core.pcheck
# Speed of light
# noinspection PyUnresolvedReferences
speed_of_light_ms = cc.c.to(uu.m / uu.s).value
# noinspection PyUnresolvedReferences
speed_of_light = cc.c.to(uu.km / uu.s).value
# =============================================================================
# Define functions
# =============================================================================
def measure_fp_peaks(params, props, limit, normpercent):
"""
Measure the positions of the FP peaks
Returns the pixels positions and Nth order of each FP peak
:param p: parameter dictionary, ParamDict containing constants
Must contain at least:
drift_peak_border_size: int, the border size (edges in
x-direction) for the FP fitting
algorithm
drift_peak_fpbox_size: int, the box half-size (in pixels) to
fit an individual FP peak to - a
gaussian will be fit to +/- this size
from the center of the FP peak
drift_peak_peak_sig_lim: dictionary, the sigma above the median
that a peak must have to be recognised
as a valid peak (before fitting a
gaussian) dictionary must have keys
equal to the lamp types (hc, fp)
drift_peak_inter_peak_spacing: int, the minimum spacing between
peaks in order to be recognised
as a valid peak (before fitting
a gaussian)
log_opt: string, log option, normally the program name
:param loc: parameter dictionary, ParamDict containing data
Must contain at least:
speref: numpy array (2D), the reference spectrum
wave: numpy array (2D), the wave solution image
lamp: string, the lamp type (either 'hc' or 'fp')
:return loc: parameter dictionary, the updated parameter dictionary
Adds/updates the following:
ordpeak: numpy array (1D), the order number for each valid FP
peak
xpeak: numpy array (1D), the central position each gaussain fit
to valid FP peak
ewpeak: numpy array (1D), the FWHM of each gaussain fit
to valid FP peak
vrpeak: numpy array (1D), the radial velocity drift for each
valid FP peak
llpeak: numpy array (1D), the delta wavelength for each valid
FP peak
amppeak: numpy array (1D), the amplitude for each valid FP peak
"""
func_name = __NAME__ + '.create_drift_file()'
# get the reference data and the wave data
speref = np.array(props['SPEREF'])
wave = props['WAVE']
# storage for order of peaks
allpeaksize = []
allordpeak = []
allxpeak = []
allewpeak = []
allvrpeak = []
allllpeak = []
allamppeak = []
alldcpeak = []
allshapepeak = []
# loop through the orders
for order_num in range(speref.shape[0]):
# storage for order of peaks
ordpeak = []
xpeak = []
ewpeak = []
vrpeak = []
llpeak = []
amppeak = []
dcpeak = []
shapepeak = []
# storage of warnings
warn_dict = dict()
# set number of peaks rejected to zero
nreject = 0
# set a counter for total number of peaks
ipeak = 0
# get the pixels for this order
tmp = np.array(speref[order_num, :])
# define indices
index = np.arange(len(tmp))
# ------------------------------------------------------------------
# normalize the spectrum
tmp = tmp / np.nanpercentile(tmp, normpercent)
# ------------------------------------------------------------------
# find the peaks
with warnings.catch_warnings(record=True) as w:
peakmask = (tmp[1:-1] > tmp[2:]) & (tmp[1:-1] > tmp[:-2])
peakpos = np.where(peakmask)[0]
# work out the FP width for this order
size = int(np.nanmedian(peakpos[1:] - peakpos[:-1]))
# ------------------------------------------------------------------
# mask for finding maximum peak
mask = np.ones_like(tmp)
# mask out the edges
mask[:size + 1] = 0
mask[-(size + 1):] = 0
# ------------------------------------------------------------------
# loop for peaks that are above a value of limit
while mp.nanmax(mask * tmp) > limit:
# --------------------------------------------------------------
# find peak along the order
maxpos = np.nanargmax(mask * tmp)
maxtmp = tmp[maxpos]
# --------------------------------------------------------------
# get the values around the max position
index_peak = index[maxpos - size: maxpos + size]
tmp_peak = tmp[maxpos - size: maxpos + size]
# --------------------------------------------------------------
# mask out this peak for next iteration of while loop
mask[maxpos - (size // 2):maxpos + (size // 2) + 1] = 0
# --------------------------------------------------------------
# return the initial guess and the best fit
p0, gg, _, warns = fit_fp_peaks(index_peak, tmp_peak, size)
# --------------------------------------------------------------
# only keep peaks within +/- 1 pixel of original peak
# (gaussian fit is to find sub-pixel value)
cond = np.abs(maxpos - gg[1]) < 1
if cond:
# work out the radial velocity of the peak
lambefore = wave[order_num, maxpos - 1]
lamafter = wave[order_num, maxpos + 1]
deltalam = lamafter - lambefore
# get the radial velocity
waveomax = wave[order_num, maxpos]
radvel = speed_of_light_ms * deltalam / (2.0 * waveomax)
# add to storage
ordpeak.append(order_num)
xpeak.append(gg[1])
ewpeak.append(gg[2])
vrpeak.append(radvel)
llpeak.append(deltalam)
amppeak.append(maxtmp)
shapepeak.append(gg[3])
dcpeak.append(gg[4])
else:
# add to rejected
nreject += 1
# iterator
ipeak += 1
# --------------------------------------------------------------
# deal with warnings
if warns is not None:
if warns in warn_dict:
warn_dict[warns] += 1
else:
warn_dict[warns] = 1
# --------------------------------------------------------------
# log how many FPs were found and how many rejected
wargs = [order_num, ipeak, nreject]
WLOG(params, '', TextEntry('40-018-00001', args=wargs))
# ------------------------------------------------------------------
# print warnings
for key in list(warn_dict.keys()):
wargs = [warn_dict[key], key]
WLOG(params, 'warning', TextEntry('00-018-00001', args=wargs))
# ------------------------------------------------------------------
# add values to all storage (and sort by xpeak)
indsort = np.argsort(xpeak)
allordpeak.append(np.array(ordpeak)[indsort])
allxpeak.append(np.array(xpeak)[indsort])
allewpeak.append(np.array(ewpeak)[indsort])
allvrpeak.append(np.array(vrpeak)[indsort])
allllpeak.append(np.array(llpeak)[indsort])
allamppeak.append(np.array(amppeak)[indsort])
allshapepeak.append(np.array(shapepeak)[indsort])
alldcpeak.append(np.array(dcpeak)[indsort])
allpeaksize.append(size)
# store values in loc
props['ORDPEAK'] = np.concatenate(allordpeak).astype(int)
props['XPEAK'] = np.concatenate(allxpeak)
props['PEAK2PEAK'] = np.concatenate(allewpeak)
props['VRPEAK'] = np.concatenate(allvrpeak)
props['LLPEAK'] = np.concatenate(allllpeak)
props['AMPPEAK'] = np.concatenate(allamppeak)
props['DCPEAK'] = np.concatenate(alldcpeak)
props['SHAPEPEAK'] = np.concatenate(allshapepeak)
props['PEAKSIZE'] = np.array(allpeaksize)
# set source
keys = ['ORDPEAK', 'XPEAK', 'PEAK2PEAK', 'VRPEAK', 'LLPEAK', 'AMPPEAK',
'DCPEAK', 'SHAPEPEAK', 'PEAKSIZE']
props.set_sources(keys, func_name)
# Log the total number of FP lines found
wargs = [len(props['XPEAK'])]
WLOG(params, 'info', TextEntry('40-018-00002', args=wargs))
# return the property parameter dictionary
return props
def fit_fp_peaks(x, y, size, return_model=False):
# storage of warnings
warns = None
# get gauss function
ea_airy = mp.ea_airy_function
# set up initial guess
pnames = ['amp', 'pos', 'period', 'shape', 'dc']
# [amp, position, period, exponent, zero point]
p0 = [np.max(y) - np.min(y), np.median(x), size, 1.5,
np.max([0, np.min(y)])]
# set up the bounds
lowerbounds = [0.5 * p0[0], p0[1] - 2, 0.7 * p0[2], 1.0, 0.0]
upperbounds = [2.0 * p0[0], p0[1] + 2, 1.3 * p0[2], 10.0, 0.5 * p0[0]]
bounds = [lowerbounds, upperbounds]
# test bounds make sense
for p_it in range(len(lowerbounds)):
if lowerbounds[p_it] >= upperbounds[p_it]:
if warns is None:
warns = ''
warns += ('\nBoundError: Lower bound {0} incorrect (lower={1} '
'upper={2})'.format(pnames[p_it], lowerbounds[p_it],
upperbounds[p_it]))
if p0[p_it] < lowerbounds[p_it] or p0[p_it] > upperbounds[p_it]:
if warns is None:
warns = ''
warns += ('\nBoundError: Inital guess for {0} out of bounds '
'(guess={1} lower={2} upper={3})'
''.format(pnames[p_it], p0[p_it],
lowerbounds[p_it], upperbounds[p_it]))
# deal with bad bounds
if warns is not None:
popt = [np.nan, np.nan, np.nan, np.nan, np.nan]
pcov = None
model = np.repeat([np.nan], len(x))
else:
# try to fit etiennes airy function
try:
with warnings.catch_warnings(record=True) as _:
popt, pcov = curve_fit(ea_airy, x, y, p0=p0, bounds=bounds)
model = ea_airy(x, *popt)
except ValueError as e:
# log that ydata or xdata contains NaNs
popt = [np.nan, np.nan, np.nan, np.nan, np.nan]
pcov = None
warns = '{0}: {1}'.format(type(e), e)
model = np.repeat([np.nan], len(x))
except RuntimeError as e:
popt = [np.nan, np.nan, np.nan, np.nan, np.nan]
pcov = None
warns = '{0}: {1}'.format(type(e), e)
model = np.repeat([np.nan], len(x))
# deal with returning model
if return_model:
return p0, popt, pcov, warns, model
else:
# return the guess and the best fit
return p0, popt, pcov, warns
def remove_wide_peaks(params, props, cutwidth):
"""
Remove peaks that are too wide
:param p: parameter dictionary, ParamDict containing constants
:param loc: parameter dictionary, ParamDict containing data
Must contain at least:
ordpeak: numpy array (1D), the order number for each valid FP
peak
xpeak: numpy array (1D), the central position each gaussain fit
to valid FP peak
ewpeak: numpy array (1D), the FWHM of each gaussain fit
to valid FP peak
vrpeak: numpy array (1D), the radial velocity drift for each
valid FP peak
llpeak: numpy array (1D), the delta wavelength for each valid
FP peak
amppeak: numpy array (1D), the amplitude for each valid FP peak
:param expwidth: float or None, the expected width of FP peaks - used to
"normalise" peaks (which are then subsequently removed
if > "cutwidth") if expwidth is None taken from
p['DRIFT_PEAK_EXP_WIDTH']
:param cutwidth: float or None, the normalised width of FP peaks thatis too
large normalised width FP FWHM - expwidth
cut is essentially: FP FWHM < (expwidth + cutwidth), if
cutwidth is None taken from p['DRIFT_PEAK_NORM_WIDTH_CUT']
:return loc: parameter dictionary, the updated parameter dictionary
Adds/updates the following:
ordpeak: numpy array (1D), the order number for each valid FP
peak (masked to remove wide peaks)
xpeak: numpy array (1D), the central position each gaussain fit
to valid FP peak (masked to remove wide peaks)
ewpeak: numpy array (1D), the FWHM of each gaussain fit
to valid FP peak (masked to remove wide peaks)
vrpeak: numpy array (1D), the radial velocity drift for each
valid FP peak (masked to remove wide peaks)
llpeak: numpy array (1D), the delta wavelength for each valid
FP peak (masked to remove wide peaks)
amppeak: numpy array (1D), the amplitude for each valid FP peak
(masked to remove wide peaks)
"""
func_name = __NAME__ + '.remove_wide_peaks()'
# define a mask to cut out wide peaks
mask = np.array(props['PEAK2PEAK']) < cutwidth
# apply mask
props['ORDPEAK'] = props['ORDPEAK'][mask]
props['XPEAK'] = props['XPEAK'][mask]
props['PEAK2PEAK'] = props['PEAK2PEAK'][mask]
props['VRPEAK'] = props['VRPEAK'][mask]
props['LLPEAK'] = props['LLPEAK'][mask]
props['AMPPEAK'] = props['AMPPEAK'][mask]
# check for and remove double-fitted lines
# save old position
props['XPEAK_OLD'] = | np.copy(props['XPEAK']) | numpy.copy |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 19 15:28:32 2019
@author: shuoz
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 25 10:40:54 2019
@author: shuoz
"""
import os
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score#, roc_auc_score
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.feature_selection import SelectPercentile, f_classif
import matplotlib.pyplot as plt
from matplotlib import rc
from sider import SIDeRSVM
from utils.load_data import load_data
import nibabel as nib
from nilearn import plotting
from sklearn.svm import SVC
def info2onehot(y):
n_sample = y.shape[0]
label_unique = np.unique(y)
n_unique = label_unique.shape[0]
A = np.zeros((n_sample, n_unique))
for i in range(len(label_unique)):
A[np.where(y == label_unique[i]), i] = 1
return A
def cat_onehot(X1, X2):
n_row1 = X1.shape[0]
n_col1 = X1.shape[1]
n_row2 = X2.shape[0]
n_col2 = X2.shape[1]
X = np.zeros((n_row1+n_row2, n_col1+n_col2))
X[:n_row1, :n_col1] = X1
X[n_row1:, n_col1:] = X2
return X
def get_hsic(X, Y, kernel_x='linear', kernel_y='linear', **kwargs):
n = X.shape[0]
I = np.eye(n)
H = I - 1. / n * np.ones((n, n))
Kx = pairwise_kernels(X, metric=kernel_x, **kwargs)
Ky = pairwise_kernels(Y, metric=kernel_y, **kwargs)
return 1/np.square(n-1) * np.trace(np.linalg.multi_dot([Kx, H, Ky, H]))
def plot_coef(coef, img_name, maskimg, maskvox, thre_rate=0.05):
coef = coef.reshape(-1)
# selection = SelectPercentile(f_classif, percentile=thre_rate)
n_voxel_th = int(coef.shape[0] * thre_rate)
top_voxel_idx = (abs(coef)).argsort()[::-1][:n_voxel_th]
thre = coef[top_voxel_idx[-1]]
# coef_to_plot = np.zeros(coef.shape[0])
# coef_to_plot[top_voxel_idx] = coef[top_voxel_idx]
# thre = np.amax(abs(coef)) * thre_rate # high absulte value times threshold rate
coef_array = | np.zeros((91, 109, 91)) | numpy.zeros |
from contextlib import suppress
from collections import OrderedDict, namedtuple
import json
import os
import numpy as np
from PyQt5 .QtCore import pyqtSignal, QObject
from sscanss.config import settings, INSTRUMENTS_PATH
from sscanss.core.instrument import read_instrument_description_file, Sequence, Simulation
from sscanss.core.io import (write_project_hdf, read_project_hdf, read_3d_model, read_points, read_vectors,
write_binary_stl, write_points, validate_vector_length)
from sscanss.core.scene import validate_instrument_scene_size
from sscanss.core.util import PointType, LoadVector, Attributes, POINT_DTYPE
IDF = namedtuple('IDF', ['name', 'path', 'version'])
class MainWindowModel(QObject):
"""Manages project data and communicates to view via signals"""
sample_model_updated = pyqtSignal(object)
instrument_model_updated = pyqtSignal(object)
simulation_created = pyqtSignal()
sample_changed = pyqtSignal()
fiducials_changed = pyqtSignal()
measurement_points_changed = pyqtSignal()
measurement_vectors_changed = pyqtSignal()
instrument_controlled = pyqtSignal(int)
def __init__(self):
super().__init__()
self.project_data = None
self.save_path = ''
self.all_sample_key = 'All Samples'
self.simulation = None
self.instruments = {}
self.updateInstrumentList()
@property
def instrument(self):
"""Gets the diffraction instrument associated with the project
:return: diffraction instrument
:rtype: Instrument
"""
return self.project_data['instrument']
@instrument.setter
def instrument(self, value):
"""Sets the instrument
:param value: diffraction instrument
:type value: Instrument
"""
self.project_data['instrument'] = value
self.notifyChange(Attributes.Instrument)
def updateInstrumentList(self):
"""Updates the list of instrument description files found in the instrument directories"""
self.instruments.clear()
custom_path = settings.value(settings.Key.Custom_Instruments_Path)
directories = [path for path in (custom_path, INSTRUMENTS_PATH) if os.path.isdir(path)]
if not directories:
return
for path in directories:
for name in os.listdir(path):
idf = os.path.join(path, name, 'instrument.json')
if not os.path.isfile(idf):
continue
data = {}
with suppress(OSError, ValueError):
with open(idf) as json_file:
data = json.load(json_file)
instrument_data = data.get('instrument', None)
if instrument_data is None:
continue
name = instrument_data.get('name', '').strip().upper()
version = instrument_data.get('version', '').strip()
if name and version:
self.instruments[name] = IDF(name, idf, version)
def createProjectData(self, name, instrument=None):
"""Creates a new project
:param name: name of project
:type name: str
:param instrument: name of instrument
:type instrument: Union[str, None]
"""
self.project_data = {'name': name,
'instrument': None,
'instrument_version': None,
'sample': OrderedDict(),
'fiducials': np.recarray((0, ), dtype=POINT_DTYPE),
'measurement_points': np.recarray((0,), dtype=POINT_DTYPE),
'measurement_vectors': np.empty((0, 3, 1), dtype=np.float32),
'alignment': None}
if instrument is not None:
self.changeInstrument(instrument)
def checkInstrumentVersion(self):
"""Checks the project instrument version is the same as in the instrument list
:return: indicates if instrument version is the same
:rtype: bool
"""
if self.instrument.name not in self.instruments:
return False
if self.project_data['instrument_version'] != self.instruments[self.instrument.name].version:
return False
return True
def saveProjectData(self, filename):
"""Saves the project data to a HDF file
:param filename: filename
:type filename: str
"""
write_project_hdf(self.project_data, filename)
def changeInstrument(self, name):
"""Changes the current instrument to instrument with given name
:param name: name of instrument
:type name: str
"""
instrument = read_instrument_description_file(self.instruments[name].path)
if not validate_instrument_scene_size(instrument):
raise ValueError('The scene is too big the distance from the origin exceeds max extent')
self.instrument = instrument
self.project_data['instrument_version'] = self.instruments[name].version
self.correctVectorDetectorSize()
def correctVectorDetectorSize(self):
"""Folds or expands the measurement vectors to match size required by current instrument"""
vectors = self.measurement_vectors
new_size = 3 * len(self.instrument.detectors)
if vectors.size == 0:
self.measurement_vectors = np.empty((0, new_size, 1), dtype=np.float32)
elif vectors.shape[1] > new_size:
fold = vectors.shape[1] // 3
temp = np.zeros((vectors.shape[0], new_size, vectors.shape[2] * fold), dtype=np.float32)
temp[:, 0:3, :] = np.dstack( | np.hsplit(vectors, fold) | numpy.hsplit |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import numpy as np
from pymatgen.io.pwscf import PWInput, PWInputError, PWOutput
from pymatgen.util.testing import PymatgenTest
from pymatgen.core import Lattice, Structure
class PWInputTest(PymatgenTest):
def test_init(self):
s = self.get_structure("Li2O")
self.assertRaises(
PWInputError,
PWInput,
s,
control={"calculation": "scf", "pseudo_dir": "./"},
pseudo={"Li": "Li.pbe-n-kjpaw_psl.0.1.UPF"},
)
def test_str_mixed_oxidation(self):
s = self.get_structure("Li2O")
s.remove_oxidation_states()
s[1] = "Li1"
pw = PWInput(
s,
control={"calculation": "scf", "pseudo_dir": "./"},
pseudo={
"Li": "Li.pbe-n-kjpaw_psl.0.1.UPF",
"Li+": "Li.pbe-n-kjpaw_psl.0.1.UPF",
"O": "O.pbe-n-kjpaw_psl.0.1.UPF",
},
system={"ecutwfc": 50},
)
ans = """&CONTROL
calculation = 'scf',
pseudo_dir = './',
/
&SYSTEM
ecutwfc = 50,
ibrav = 0,
nat = 3,
ntyp = 3,
/
&ELECTRONS
/
&IONS
/
&CELL
/
ATOMIC_SPECIES
Li 6.9410 Li.pbe-n-kjpaw_psl.0.1.UPF
Li+ 6.9410 Li.pbe-n-kjpaw_psl.0.1.UPF
O 15.9994 O.pbe-n-kjpaw_psl.0.1.UPF
ATOMIC_POSITIONS crystal
O 0.000000 0.000000 0.000000
Li+ 0.750178 0.750178 0.750178
Li 0.249822 0.249822 0.249822
K_POINTS automatic
1 1 1 0 0 0
CELL_PARAMETERS angstrom
2.917389 0.097894 1.520005
0.964634 2.755036 1.520005
0.133206 0.097894 3.286918
"""
self.assertEqual(pw.__str__().strip(), ans.strip())
def test_str_without_oxidation(self):
s = self.get_structure("Li2O")
s.remove_oxidation_states()
pw = PWInput(
s,
control={"calculation": "scf", "pseudo_dir": "./"},
pseudo={
"Li": "Li.pbe-n-kjpaw_psl.0.1.UPF",
"O": "O.pbe-n-kjpaw_psl.0.1.UPF",
},
system={"ecutwfc": 50},
)
ans = """&CONTROL
calculation = 'scf',
pseudo_dir = './',
/
&SYSTEM
ecutwfc = 50,
ibrav = 0,
nat = 3,
ntyp = 2,
/
&ELECTRONS
/
&IONS
/
&CELL
/
ATOMIC_SPECIES
Li 6.9410 Li.pbe-n-kjpaw_psl.0.1.UPF
O 15.9994 O.pbe-n-kjpaw_psl.0.1.UPF
ATOMIC_POSITIONS crystal
O 0.000000 0.000000 0.000000
Li 0.750178 0.750178 0.750178
Li 0.249822 0.249822 0.249822
K_POINTS automatic
1 1 1 0 0 0
CELL_PARAMETERS angstrom
2.917389 0.097894 1.520005
0.964634 2.755036 1.520005
0.133206 0.097894 3.286918
"""
self.assertEqual(pw.__str__().strip(), ans.strip())
def test_str_with_oxidation(self):
s = self.get_structure("Li2O")
pw = PWInput(
s,
control={"calculation": "scf", "pseudo_dir": "./"},
pseudo={
"Li+": "Li.pbe-n-kjpaw_psl.0.1.UPF",
"O2-": "O.pbe-n-kjpaw_psl.0.1.UPF",
},
system={"ecutwfc": 50},
)
ans = """&CONTROL
calculation = 'scf',
pseudo_dir = './',
/
&SYSTEM
ecutwfc = 50,
ibrav = 0,
nat = 3,
ntyp = 2,
/
&ELECTRONS
/
&IONS
/
&CELL
/
ATOMIC_SPECIES
Li+ 6.9410 Li.pbe-n-kjpaw_psl.0.1.UPF
O2- 15.9994 O.pbe-n-kjpaw_psl.0.1.UPF
ATOMIC_POSITIONS crystal
O2- 0.000000 0.000000 0.000000
Li+ 0.750178 0.750178 0.750178
Li+ 0.249822 0.249822 0.249822
K_POINTS automatic
1 1 1 0 0 0
CELL_PARAMETERS angstrom
2.917389 0.097894 1.520005
0.964634 2.755036 1.520005
0.133206 0.097894 3.286918
"""
self.assertEqual(pw.__str__().strip(), ans.strip())
def test_read_str(self):
string = """
&CONTROL
calculation = 'scf'
pseudo_dir = './'
wf_collect = .TRUE.
/
&SYSTEM
ibrav = 0,
nat = 53
ntyp = 2
input_dft = 'PBE'
ecutwfc = 80
nspin = 1
nbnd = 280
/
&ELECTRONS
/
&IONS
/
&CELL
/
ATOMIC_SPECIES
Mg 24.3050 Mg_ONCV_PBE-1.2.upf
O 15.9994 O_ONCV_PBE-1.2.upf
ATOMIC_POSITIONS crystal
Mg -0.000000000 0.000000000 -0.000000000
Mg 0.000000000 1.000000000 0.333134366
Mg 0.000000000 1.000000000 0.666865634
Mg -0.000000000 0.333134366 1.000000000
Mg 0.000037606 0.333320465 0.333320465
Mg 0.000000000 0.333134366 0.666865634
Mg 0.000000000 0.666865634 1.000000000
Mg 0.000000000 0.666865634 0.333134366
Mg -0.000037606 0.666679535 0.666679535
Mg 0.333134366 0.000000000 0.000000000
Mg 0.333320465 0.000037606 0.333320465
Mg 0.333134366 1.000000000 0.666865634
Mg 0.333320465 0.333320465 0.000037606
Mg 0.333320465 0.333320465 0.333320465
Mg 0.331436170 0.331436170 0.668563830
Mg 0.333134366 0.666865634 -0.000000000
Mg 0.331436170 0.668563830 0.331436170
Mg 0.331436170 0.668563830 0.668563830
Mg 0.666865634 0.000000000 0.000000000
Mg 0.666865634 0.000000000 0.333134366
Mg 0.666679535 -0.000037606 0.666679535
Mg 0.666865634 0.333134366 -0.000000000
Mg 0.668563830 0.331436170 0.331436170
Mg 0.668563830 0.331436170 0.668563830
Mg 0.666679535 0.666679535 -0.000037606
Mg 0.668563830 0.668563830 0.331436170
Mg 0.666679535 0.666679535 0.666679535
O 0.166588534 0.166588534 0.166588534
O 0.166588534 0.166588534 0.500235399
O 0.166465543 0.166465543 0.833534457
O 0.166588534 0.500235399 0.166588534
O 0.166169242 0.500000000 0.500000000
O 0.166169242 0.500000000 0.833830758
O 0.166465543 0.833534457 0.166465543
O 0.166169242 0.833830758 0.500000000
O 0.166465543 0.833534457 0.833534457
O 0.500235399 0.166588534 0.166588534
O 0.500000000 0.166169242 0.500000000
O 0.500000000 0.166169242 0.833830758
O 0.500000000 0.500000000 0.166169242
O 0.500000000 0.500000000 0.833830758
O 0.500000000 0.833830758 0.166169242
O 0.500000000 0.833830758 0.500000000
O 0.499764601 0.833411466 0.833411466
O 0.833534457 0.166465543 0.166465543
O 0.833830758 0.166169242 0.500000000
O 0.833534457 0.166465543 0.833534457
O 0.833830758 0.500000000 0.166169242
O 0.833830758 0.500000000 0.500000000
O 0.833411466 0.499764601 0.833411466
O 0.833534457 0.833534457 0.166465543
O 0.833411466 0.833411466 0.499764601
O 0.833411466 0.833411466 0.833411466
K_POINTS gamma
CELL_PARAMETERS angstrom
0.000000 6.373854 6.373854
6.373854 0.000000 6.373854
6.373854 6.373854 0.000000
"""
lattice = | np.array([[0.0, 6.373854, 6.373854], [6.373854, 0.0, 6.373854], [6.373854, 6.373854, 0.0]]) | numpy.array |
from builtins import zip
from builtins import range
import numpy as np
from .baseStacker import BaseStacker
import warnings
__all__ = ['setupDitherStackers', 'wrapRADec', 'wrapRA', 'inHexagon', 'polygonCoords',
'BaseDitherStacker',
'RandomDitherFieldPerVisitStacker', 'RandomDitherFieldPerNightStacker',
'RandomDitherPerNightStacker',
'SpiralDitherFieldPerVisitStacker', 'SpiralDitherFieldPerNightStacker',
'SpiralDitherPerNightStacker',
'HexDitherFieldPerVisitStacker', 'HexDitherFieldPerNightStacker',
'HexDitherPerNightStacker',
'RandomRotDitherPerFilterChangeStacker']
# Stacker naming scheme:
# [Pattern]Dither[Field]Per[Timescale].
# Timescale indicates how often the dither offset is changed.
# The presence of 'Field' indicates that a new offset is chosen per field, on the indicated timescale.
# The absence of 'Field' indicates that all visits within the indicated timescale use the same dither offset.
# Original dither stackers (Random, Spiral, Hex) written by <NAME> (<EMAIL>)
# Additional dither stackers written by <NAME> (<EMAIL>), with addition of
# constraining dither offsets to be within an inscribed hexagon (code modifications for use here by LJ).
def setupDitherStackers(raCol, decCol, degrees, **kwargs):
b = BaseStacker()
stackerList = []
if raCol in b.sourceDict:
stackerList.append(b.sourceDict[raCol](degrees=degrees, **kwargs))
if decCol in b.sourceDict:
if b.sourceDict[raCol] != b.sourceDict[decCol]:
stackerList.append(b.sourceDict[decCol](degrees=degrees, **kwargs))
return stackerList
def wrapRADec(ra, dec):
"""
Wrap RA into 0-2pi and Dec into +/0 pi/2.
Parameters
----------
ra : numpy.ndarray
RA in radians
dec : numpy.ndarray
Dec in radians
Returns
-------
numpy.ndarray, numpy.ndarray
Wrapped RA/Dec values, in radians.
"""
# Wrap dec.
low = np.where(dec < -np.pi / 2.0)[0]
dec[low] = -1 * (np.pi + dec[low])
ra[low] = ra[low] - np.pi
high = np.where(dec > np.pi / 2.0)[0]
dec[high] = np.pi - dec[high]
ra[high] = ra[high] - np.pi
# Wrap RA.
ra = ra % (2.0 * np.pi)
return ra, dec
def wrapRA(ra):
"""
Wrap only RA values into 0-2pi (using mod).
Parameters
----------
ra : numpy.ndarray
RA in radians
Returns
-------
numpy.ndarray
Wrapped RA values, in radians.
"""
ra = ra % (2.0 * np.pi)
return ra
def inHexagon(xOff, yOff, maxDither):
"""
Identify dither offsets which fall within the inscribed hexagon.
Parameters
----------
xOff : numpy.ndarray
The x values of the dither offsets.
yoff : numpy.ndarray
The y values of the dither offsets.
maxDither : float
The maximum dither offset.
Returns
-------
numpy.ndarray
Indexes of the offsets which are within the hexagon inscribed inside the 'maxDither' radius circle.
"""
# Set up the hexagon limits.
# y = mx + b, 2h is the height.
m = np.sqrt(3.0)
b = m * maxDither
h = m / 2.0 * maxDither
# Identify offsets inside hexagon.
inside = np.where((yOff < m * xOff + b) &
(yOff > m * xOff - b) &
(yOff < -m * xOff + b) &
(yOff > -m * xOff - b) &
(yOff < h) & (yOff > -h))[0]
return inside
def polygonCoords(nside, radius, rotationAngle):
"""
Find the x,y coords of a polygon.
This is useful for plotting dither points and showing they lie within
a given shape.
Parameters
----------
nside : int
The number of sides of the polygon
radius : float
The radius within which to plot the polygon
rotationAngle : float
The angle to rotate the polygon to.
Returns
-------
[float, float]
List of x/y coordinates of the points describing the polygon.
"""
eachAngle = 2 * np.pi / float(nside)
xCoords = np.zeros(nside, float)
yCoords = np.zeros(nside, float)
for i in range(0, nside):
xCoords[i] = np.sin(eachAngle * i + rotationAngle) * radius
yCoords[i] = np.cos(eachAngle * i + rotationAngle) * radius
return list(zip(xCoords, yCoords))
class BaseDitherStacker(BaseStacker):
"""Base class for dither stackers.
The base class just adds an easy way to define a stacker as one of the 'dither' types of stackers.
These run first, before any other stackers.
Parameters
----------
raCol : str, optional
The name of the RA column in the data.
Default 'fieldRA'.
decCol : str, optional
The name of the Dec column in the data.
Default 'fieldDec'.
degrees : bool, optional
Flag whether RA/Dec should be treated as (and kept as) degrees.
maxDither : float, optional
The radius of the maximum dither offset, in degrees.
Default 1.75 degrees.
inHex : bool, optional
If True, offsets are constrained to lie within a hexagon inscribed within the maxDither circle.
If False, offsets can lie anywhere out to the edges of the maxDither circle.
Default True.
"""
colsAdded = []
def __init__(self, raCol='fieldRA', decCol='fieldDec', degrees=True,
maxDither=1.75, inHex=True):
# Instantiate the RandomDither object and set internal variables.
self.raCol = raCol
self.decCol = decCol
self.degrees = degrees
# Convert maxDither to radians for internal use.
self.maxDither = np.radians(maxDither)
self.inHex = inHex
# self.units used for plot labels
if self.degrees:
self.units = ['deg', 'deg']
else:
self.units = ['rad', 'rad']
# Values required for framework operation: this specifies the data columns required from the database.
self.colsReq = [self.raCol, self.decCol]
class RandomDitherFieldPerVisitStacker(BaseDitherStacker):
"""
Randomly dither the RA and Dec pointings up to maxDither degrees from center,
with a different offset for each field, for each visit.
Parameters
----------
raCol : str, optional
The name of the RA column in the data.
Default 'fieldRA'.
decCol : str, optional
The name of the Dec column in the data.
Default 'fieldDec'.
degrees : bool, optional
Flag whether RA/Dec should be treated as (and kept as) degrees.
maxDither : float, optional
The radius of the maximum dither offset, in degrees.
Default 1.75 degrees.
inHex : bool, optional
If True, offsets are constrained to lie within a hexagon inscribed within the maxDither circle.
If False, offsets can lie anywhere out to the edges of the maxDither circle.
Default True.
randomSeed : int or None, optional
If set, then used as the random seed for the numpy random number generation for the dither offsets.
Default None.
"""
# Values required for framework operation: this specifies the name of the new columns.
colsAdded = ['randomDitherFieldPerVisitRa', 'randomDitherFieldPerVisitDec']
def __init__(self, raCol='fieldRA', decCol='fieldDec', degrees=True, maxDither=1.75,
inHex=True, randomSeed=None):
"""
@ MaxDither in degrees
"""
super().__init__(raCol=raCol, decCol=decCol, degrees=degrees, maxDither=maxDither, inHex=inHex)
self.randomSeed = randomSeed
def _generateRandomOffsets(self, noffsets):
xOut = np.array([], float)
yOut = np.array([], float)
maxTries = 100
tries = 0
while (len(xOut) < noffsets) and (tries < maxTries):
dithersRad = np.sqrt(self._rng.rand(noffsets * 2)) * self.maxDither
dithersTheta = self._rng.rand(noffsets * 2) * np.pi * 2.0
xOff = dithersRad * np.cos(dithersTheta)
yOff = dithersRad * np.sin(dithersTheta)
if self.inHex:
# Constrain dither offsets to be within hexagon.
idx = inHexagon(xOff, yOff, self.maxDither)
xOff = xOff[idx]
yOff = yOff[idx]
xOut = np.concatenate([xOut, xOff])
yOut = np.concatenate([yOut, yOff])
tries += 1
if len(xOut) < noffsets:
raise ValueError('Could not find enough random points within the hexagon in %d tries. '
'Try another random seed?' % (maxTries))
self.xOff = xOut[0:noffsets]
self.yOff = yOut[0:noffsets]
def _run(self, simData, cols_present=False):
if cols_present:
# Column already present in data; assume it is correct and does not need recalculating.
return simData
# Generate random numbers for dither, using defined seed value if desired.
if not hasattr(self, '_rng'):
if self.randomSeed is not None:
self._rng = np.random.RandomState(self.randomSeed)
else:
self._rng = np.random.RandomState(2178813)
# Generate the random dither values.
noffsets = len(simData[self.raCol])
self._generateRandomOffsets(noffsets)
# Add to RA and dec values.
if self.degrees:
ra = np.radians(simData[self.raCol])
dec = np.radians(simData[self.decCol])
else:
ra = simData[self.raCol]
dec = simData[self.decCol]
simData['randomDitherFieldPerVisitRa'] = (ra + self.xOff / np.cos(dec))
simData['randomDitherFieldPerVisitDec'] = dec + self.yOff
# Wrap back into expected range.
simData['randomDitherFieldPerVisitRa'], simData['randomDitherFieldPerVisitDec'] = \
wrapRADec(simData['randomDitherFieldPerVisitRa'], simData['randomDitherFieldPerVisitDec'])
# Convert to degrees
if self.degrees:
for col in self.colsAdded:
simData[col] = np.degrees(simData[col])
return simData
class RandomDitherFieldPerNightStacker(RandomDitherFieldPerVisitStacker):
"""
Randomly dither the RA and Dec pointings up to maxDither degrees from center,
one dither offset per new night of observation of a field.
e.g. visits within the same night, to the same field, have the same offset.
Parameters
----------
raCol : str, optional
The name of the RA column in the data.
Default 'fieldRA'.
decCol : str, optional
The name of the Dec column in the data.
Default 'fieldDec'.
degrees : bool, optional
Flag whether RA/Dec should be treated as (and kept as) degrees.
fieldIdCol : str, optional
The name of the fieldId column in the data.
Used to identify fields which should be identified as the 'same'.
Default 'fieldId'.
nightCol : str, optional
The name of the night column in the data.
Default 'night'.
maxDither : float, optional
The radius of the maximum dither offset, in degrees.
Default 1.75 degrees.
inHex : bool, optional
If True, offsets are constrained to lie within a hexagon inscribed within the maxDither circle.
If False, offsets can lie anywhere out to the edges of the maxDither circle.
Default True.
randomSeed : int or None, optional
If set, then used as the random seed for the numpy random number generation for the dither offsets.
Default None.
"""
# Values required for framework operation: this specifies the names of the new columns.
colsAdded = ['randomDitherFieldPerNightRa', 'randomDitherFieldPerNightDec']
def __init__(self, raCol='fieldRA', decCol='fieldDec', degrees=True, fieldIdCol='fieldId',
nightCol='night', maxDither=1.75, inHex=True, randomSeed=None):
"""
@ MaxDither in degrees
"""
# Instantiate the RandomDither object and set internal variables.
super().__init__(raCol=raCol, decCol=decCol, degrees=degrees,
maxDither=maxDither, inHex=inHex, randomSeed=randomSeed)
self.nightCol = nightCol
self.fieldIdCol = fieldIdCol
# Values required for framework operation: this specifies the data columns required from the database.
self.colsReq = [self.raCol, self.decCol, self.nightCol, self.fieldIdCol]
def _run(self, simData, cols_present=False):
if cols_present:
return simData
# Generate random numbers for dither, using defined seed value if desired.
if not hasattr(self, '_rng'):
if self.randomSeed is not None:
self._rng = np.random.RandomState(self.randomSeed)
else:
self._rng = np.random.RandomState(872453)
# Generate the random dither values, one per night per field.
fields = np.unique(simData[self.fieldIdCol])
nights = np.unique(simData[self.nightCol])
self._generateRandomOffsets(len(fields) * len(nights))
if self.degrees:
ra = np.radians(simData[self.raCol])
dec = np.radians(simData[self.decCol])
else:
ra = simData[self.raCol]
dec = simData[self.decCol]
# counter to ensure new random numbers are chosen every time
delta = 0
for fieldid in np.unique(simData[self.fieldIdCol]):
# Identify observations of this field.
match = np.where(simData[self.fieldIdCol] == fieldid)[0]
# Apply dithers, increasing each night.
nights = simData[self.nightCol][match]
vertexIdxs = np.searchsorted(np.unique(nights), nights)
vertexIdxs = vertexIdxs % len(self.xOff)
# ensure that the same xOff/yOff entries are not chosen
delta = delta + len(vertexIdxs)
simData['randomDitherFieldPerNightRa'][match] = (ra[match] +
self.xOff[vertexIdxs] /
np.cos(dec[match]))
simData['randomDitherFieldPerNightDec'][match] = (dec[match] +
self.yOff[vertexIdxs])
# Wrap into expected range.
simData['randomDitherFieldPerNightRa'], simData['randomDitherFieldPerNightDec'] = \
wrapRADec(simData['randomDitherFieldPerNightRa'], simData['randomDitherFieldPerNightDec'])
if self.degrees:
for col in self.colsAdded:
simData[col] = np.degrees(simData[col])
return simData
class RandomDitherPerNightStacker(RandomDitherFieldPerVisitStacker):
"""
Randomly dither the RA and Dec pointings up to maxDither degrees from center,
one dither offset per night.
All fields observed within the same night get the same offset.
Parameters
----------
raCol : str, optional
The name of the RA column in the data.
Default 'fieldRA'.
decCol : str, optional
The name of the Dec column in the data.
Default 'fieldDec'.
degrees : bool, optional
Flag whether RA/Dec should be treated as (and kept as) degrees.
nightCol : str, optional
The name of the night column in the data.
Default 'night'.
maxDither : float, optional
The radius of the maximum dither offset, in degrees.
Default 1.75 degrees.
inHex : bool, optional
If True, offsets are constrained to lie within a hexagon inscribed within the maxDither circle.
If False, offsets can lie anywhere out to the edges of the maxDither circle.
Default True.
randomSeed : int or None, optional
If set, then used as the random seed for the numpy random number generation for the dither offsets.
Default None.
"""
# Values required for framework operation: this specifies the names of the new columns.
colsAdded = ['randomDitherPerNightRa', 'randomDitherPerNightDec']
def __init__(self, raCol='fieldRA', decCol='fieldDec', degrees=True, nightCol='night',
maxDither=1.75, inHex=True, randomSeed=None):
"""
@ MaxDither in degrees
"""
# Instantiate the RandomDither object and set internal variables.
super().__init__(raCol=raCol, decCol=decCol, degrees=degrees,
maxDither=maxDither, inHex=inHex, randomSeed=randomSeed)
self.nightCol = nightCol
# Values required for framework operation: this specifies the data columns required from the database.
self.colsReq = [self.raCol, self.decCol, self.nightCol]
def _run(self, simData, cols_present=False):
if cols_present:
return simData
# Generate random numbers for dither, using defined seed value if desired.
if not hasattr(self, '_rng'):
if self.randomSeed is not None:
self._rng = np.random.RandomState(self.randomSeed)
else:
self._rng = np.random.RandomState(66334)
# Generate the random dither values, one per night.
nights = np.unique(simData[self.nightCol])
self._generateRandomOffsets(len(nights))
if self.degrees:
ra = np.radians(simData[self.raCol])
dec = np.radians(simData[self.decCol])
else:
ra = simData[self.raCol]
dec = simData[self.decCol]
# Add to RA and dec values.
for n, x, y in zip(nights, self.xOff, self.yOff):
match = np.where(simData[self.nightCol] == n)[0]
simData['randomDitherPerNightRa'][match] = (ra[match] +
x / np.cos(dec[match]))
simData['randomDitherPerNightDec'][match] = dec[match] + y
# Wrap RA/Dec into expected range.
simData['randomDitherPerNightRa'], simData['randomDitherPerNightDec'] = \
wrapRADec(simData['randomDitherPerNightRa'], simData['randomDitherPerNightDec'])
if self.degrees:
for col in self.colsAdded:
simData[col] = np.degrees(simData[col])
return simData
class SpiralDitherFieldPerVisitStacker(BaseDitherStacker):
"""
Offset along an equidistant spiral with numPoints, out to a maximum radius of maxDither.
Each visit to a field receives a new, sequential offset.
Parameters
----------
raCol : str, optional
The name of the RA column in the data.
Default 'fieldRA'.
decCol : str, optional
The name of the Dec column in the data.
Default 'fieldDec'.
degrees : bool, optional
Flag whether RA/Dec should be treated as (and kept as) degrees.
fieldIdCol : str, optional
The name of the fieldId column in the data.
Used to identify fields which should be identified as the 'same'.
Default 'fieldId'.
numPoints : int, optional
The number of points in the spiral.
Default 60.
maxDither : float, optional
The radius of the maximum dither offset, in degrees.
Default 1.75 degrees.
nCoils : int, optional
The number of coils the spiral should have.
Default 5.
inHex : bool, optional
If True, offsets are constrained to lie within a hexagon inscribed within the maxDither circle.
If False, offsets can lie anywhere out to the edges of the maxDither circle.
Default True.
"""
# Values required for framework operation: this specifies the names of the new columns.
colsAdded = ['spiralDitherFieldPerVisitRa', 'spiralDitherFieldPerVisitDec']
def __init__(self, raCol='fieldRA', decCol='fieldDec', degrees=True, fieldIdCol='fieldId',
numPoints=60, maxDither=1.75, nCoils=5, inHex=True):
"""
@ MaxDither in degrees
"""
super().__init__(raCol=raCol, decCol=decCol, degrees=degrees, maxDither=maxDither, inHex=inHex)
self.fieldIdCol = fieldIdCol
# Convert maxDither from degrees (internal units for ra/dec are radians)
self.numPoints = numPoints
self.nCoils = nCoils
# Values required for framework operation: this specifies the data columns required from the database.
self.colsReq = [self.raCol, self.decCol, self.fieldIdCol]
def _generateSpiralOffsets(self):
# First generate a full archimedean spiral ..
theta = np.arange(0.0001, self.nCoils * np.pi * 2., 0.001)
a = self.maxDither/theta.max()
if self.inHex:
a = 0.85 * a
r = theta * a
# Then pick out equidistant points along the spiral.
arc = a / 2.0 * (theta * np.sqrt(1 + theta**2) + np.log(theta + np.sqrt(1 + theta**2)))
stepsize = arc.max()/float(self.numPoints)
arcpts = np.arange(0, arc.max(), stepsize)
arcpts = arcpts[0:self.numPoints]
rpts = np.zeros(self.numPoints, float)
thetapts = np.zeros(self.numPoints, float)
for i, ap in enumerate(arcpts):
diff = np.abs(arc - ap)
match = np.where(diff == diff.min())[0]
rpts[i] = r[match]
thetapts[i] = theta[match]
# Translate these r/theta points into x/y (ra/dec) offsets.
self.xOff = rpts * np.cos(thetapts)
self.yOff = rpts * np.sin(thetapts)
def _run(self, simData, cols_present=False):
if cols_present:
return simData
# Generate the spiral offset vertices.
self._generateSpiralOffsets()
# Now apply to observations.
if self.degrees:
ra = np.radians(simData[self.raCol])
dec = np.radians(simData[self.decCol])
else:
ra = simData[self.raCol]
dec = simData[self.decCol]
for fieldid in np.unique(simData[self.fieldIdCol]):
match = np.where(simData[self.fieldIdCol] == fieldid)[0]
# Apply sequential dithers, increasing with each visit.
vertexIdxs = np.arange(0, len(match), 1)
vertexIdxs = vertexIdxs % self.numPoints
simData['spiralDitherFieldPerVisitRa'][match] = (ra[match] +
self.xOff[vertexIdxs] /
np.cos(dec[match]))
simData['spiralDitherFieldPerVisitDec'][match] = (dec[match] +
self.yOff[vertexIdxs])
# Wrap into expected range.
simData['spiralDitherFieldPerVisitRa'], simData['spiralDitherFieldPerVisitDec'] = \
wrapRADec(simData['spiralDitherFieldPerVisitRa'], simData['spiralDitherFieldPerVisitDec'])
if self.degrees:
for col in self.colsAdded:
simData[col] = np.degrees(simData[col])
return simData
class SpiralDitherFieldPerNightStacker(SpiralDitherFieldPerVisitStacker):
"""
Offset along an equidistant spiral with numPoints, out to a maximum radius of maxDither.
Each field steps along a sequential series of offsets, each night it is observed.
Parameters
----------
raCol : str, optional
The name of the RA column in the data.
Default 'fieldRA'.
decCol : str, optional
The name of the Dec column in the data.
Default 'fieldDec'.
degrees : bool, optional
Flag whether RA/Dec should be treated as (and kept as) degrees.
fieldIdCol : str, optional
The name of the fieldId column in the data.
Used to identify fields which should be identified as the 'same'.
Default 'fieldId'.
nightCol : str, optional
The name of the night column in the data.
Default 'night'.
numPoints : int, optional
The number of points in the spiral.
Default 60.
maxDither : float, optional
The radius of the maximum dither offset, in degrees.
Default 1.75 degrees.
nCoils : int, optional
The number of coils the spiral should have.
Default 5.
inHex : bool, optional
If True, offsets are constrained to lie within a hexagon inscribed within the maxDither circle.
If False, offsets can lie anywhere out to the edges of the maxDither circle.
Default True.
"""
# Values required for framework operation: this specifies the names of the new columns.
colsAdded = ['spiralDitherFieldPerNightRa', 'spiralDitherFieldPerNightDec']
def __init__(self, raCol='fieldRA', decCol='fieldDec', degrees=True, fieldIdCol='fieldId',
nightCol='night', numPoints=60, maxDither=1.75, nCoils=5, inHex=True):
"""
@ MaxDither in degrees
"""
super().__init__(raCol=raCol, decCol=decCol, degrees=degrees, fieldIdCol=fieldIdCol,
numPoints=numPoints, maxDither=maxDither, nCoils=nCoils, inHex=inHex)
self.nightCol = nightCol
# Values required for framework operation: this specifies the data columns required from the database.
self.colsReq.append(self.nightCol)
def _run(self, simData, cols_present=False):
if cols_present:
return simData
self._generateSpiralOffsets()
if self.degrees:
ra = np.radians(simData[self.raCol])
dec = np.radians(simData[self.decCol])
else:
ra = simData[self.raCol]
dec = simData[self.decCol]
for fieldid in np.unique(simData[self.fieldIdCol]):
# Identify observations of this field.
match = np.where(simData[self.fieldIdCol] == fieldid)[0]
# Apply a sequential dither, increasing each night.
nights = simData[self.nightCol][match]
vertexIdxs = np.searchsorted(np.unique(nights), nights)
vertexIdxs = vertexIdxs % self.numPoints
simData['spiralDitherFieldPerNightRa'][match] = (ra[match] +
self.xOff[vertexIdxs] /
np.cos(dec[match]))
simData['spiralDitherFieldPerNightDec'][match] = (dec[match] +
self.yOff[vertexIdxs])
# Wrap into expected range.
simData['spiralDitherFieldPerNightRa'], simData['spiralDitherFieldPerNightDec'] = \
wrapRADec(simData['spiralDitherFieldPerNightRa'], simData['spiralDitherFieldPerNightDec'])
if self.degrees:
for col in self.colsAdded:
simData[col] = np.degrees(simData[col])
return simData
class SpiralDitherPerNightStacker(SpiralDitherFieldPerVisitStacker):
"""
Offset along an equidistant spiral with numPoints, out to a maximum radius of maxDither.
All fields observed in the same night receive the same sequential offset, changing per night.
Parameters
----------
raCol : str, optional
The name of the RA column in the data.
Default 'fieldRA'.
decCol : str, optional
The name of the Dec column in the data.
Default 'fieldDec'.
degrees : bool, optional
Flag whether RA/Dec should be treated as (and kept as) degrees.
fieldIdCol : str, optional
The name of the fieldId column in the data.
Used to identify fields which should be identified as the 'same'.
Default 'fieldId'.
nightCol : str, optional
The name of the night column in the data.
Default 'night'.
numPoints : int, optional
The number of points in the spiral.
Default 60.
maxDither : float, optional
The radius of the maximum dither offset, in degrees.
Default 1.75 degrees.
nCoils : int, optional
The number of coils the spiral should have.
Default 5.
inHex : bool, optional
If True, offsets are constrained to lie within a hexagon inscribed within the maxDither circle.
If False, offsets can lie anywhere out to the edges of the maxDither circle.
Default True.
"""
# Values required for framework operation: this specifies the names of the new columns.
colsAdded = ['spiralDitherPerNightRa', 'spiralDitherPerNightDec']
def __init__(self, raCol='fieldRA', decCol='fieldDec', degrees=True, fieldIdCol='fieldId',
nightCol='night', numPoints=60, maxDither=1.75, nCoils=5, inHex=True):
"""
@ MaxDither in degrees
"""
super().__init__(raCol=raCol, decCol=decCol, degrees=degrees, fieldIdCol=fieldIdCol,
numPoints=numPoints, maxDither=maxDither, nCoils=nCoils, inHex=inHex)
self.nightCol = nightCol
# Values required for framework operation: this specifies the data columns required from the database.
self.colsReq.append(self.nightCol)
def _run(self, simData, cols_present=False):
if cols_present:
return simData
self._generateSpiralOffsets()
nights = np.unique(simData[self.nightCol])
if self.degrees:
ra = np.radians(simData[self.raCol])
dec = np.radians(simData[self.decCol])
else:
ra = simData[self.raCol]
dec = simData[self.decCol]
# Add to RA and dec values.
vertexIdxs = | np.searchsorted(nights, simData[self.nightCol]) | numpy.searchsorted |
# -*- coding: utf-8 -*-
import functools
import sys
from argparse import ArgumentParser
from contextlib import contextmanager
import tensorflow as tf
from pprint import pformat
from matplotlib import pyplot
from tensorflow.contrib.framework import arg_scope, add_arg_scope
import tfsnippet as spt
from tfsnippet import DiscretizedLogistic
from tfsnippet.examples.utils import (MLResults,
save_images_collection,
bernoulli_as_pixel,
bernoulli_flow,
bernoulli_flow,
print_with_title)
import numpy as np
from tfsnippet.layers import pixelcnn_2d_output
from tfsnippet.preprocessing import UniformNoiseSampler
from ood_regularizer.experiment.datasets.overall import load_overall
from ood_regularizer.experiment.datasets.svhn import load_svhn
from ood_regularizer.experiment.models.real_nvp import make_real_nvp, RealNVPConfig
from ood_regularizer.experiment.models.utils import get_mixed_array
from ood_regularizer.experiment.utils import make_diagram, plot_fig
import os
class ExpConfig(spt.Config):
# model parameters
z_dim = 256
act_norm = False
weight_norm = False
batch_norm = False
l2_reg = 0.0002
kernel_size = 3
shortcut_kernel_size = 1
nf_layers = 20
pixelcnn_level = 5
# training parameters
result_dir = None
write_summary = True
max_epoch = 40
warm_up_start = 40
initial_beta = -3.0
uniform_scale = False
use_transductive = True
mixed_train = False
mixed_ratio1 = 0.1
mixed_ratio2 = 0.9
self_ood = False
in_dataset_test_ratio = 1.0
in_dataset = 'cifar10'
out_dataset = 'svhn'
max_step = None
batch_size = 64
smallest_step = 5e-5
initial_lr = 0.0002
lr_anneal_factor = 0.5
lr_anneal_epoch_freq = []
lr_anneal_step_freq = None
n_critical = 5
# evaluation parameters
train_n_qz = 1
test_n_qz = 10
test_batch_size = 64
test_epoch_freq = 200
plot_epoch_freq = 20
epsilon = -20.0
min_logstd_of_q = -3.0
sample_n_z = 100
x_shape = (32, 32, 3)
x_shape_multiple = 3072
extra_stride = 2
count_experiment = False
config = ExpConfig()
@add_arg_scope
def batch_norm(inputs, training=False, scope=None):
return tf.layers.batch_normalization(inputs, training=training, name=scope)
@add_arg_scope
def dropout(inputs, training=False, scope=None):
print(inputs, training)
return spt.layers.dropout(inputs, rate=0.2, training=training, name=scope)
@add_arg_scope
@spt.global_reuse
def p_net(input):
input = tf.to_float(input)
# prepare for the convolution stack
output = spt.layers.pixelcnn_2d_input(input)
# apply the PixelCNN 2D layers.
for i in range(config.pixelcnn_level):
output = spt.layers.pixelcnn_conv2d_resnet(
output,
out_channels=64,
vertical_kernel_size=(2, 3),
horizontal_kernel_size=(2, 2),
activation_fn=tf.nn.leaky_relu,
normalizer_fn=batch_norm,
dropout_fn=dropout
)
output_list = [spt.layers.pixelcnn_conv2d_resnet(
output,
out_channels=256,
vertical_kernel_size=(2, 3),
horizontal_kernel_size=(2, 2),
activation_fn=tf.nn.leaky_relu,
normalizer_fn=batch_norm,
dropout_fn=dropout
) for i in range(config.x_shape[-1])]
# get the final output of the PixelCNN 2D network.
output_list = [pixelcnn_2d_output(output) for output in output_list]
output = tf.stack(output_list, axis=-2)
print(output)
output = tf.reshape(output, (-1,) + config.x_shape + (256,)) # [batch, height, weight, channel, 256]
return output
class MyIterator(object):
def __init__(self, iterator):
self._iterator = iter(iterator)
self._next = None
self._has_next = True
self.next()
@property
def has_next(self):
return self._has_next
def next(self):
if not self._has_next:
raise StopIteration()
ret = self._next
try:
self._next = next(self._iterator)
except StopIteration:
self._next = None
self._has_next = False
else:
self._has_next = True
return ret
def __iter__(self):
return self
def __next__(self):
return self.next()
def limited(iterator, n):
i = 0
try:
while i < n:
yield next(iterator)
i += 1
except StopIteration:
pass
def get_var(name):
pfx = name.rsplit('/', 1)
if len(pfx) == 2:
vars = tf.global_variables(pfx[0] + '/')
else:
vars = tf.global_variables()
for var in vars:
if var.name.split(':', 1)[0] == name:
return var
raise NameError('Variable {} not exist.'.format(name))
def main():
# parse the arguments
arg_parser = ArgumentParser()
spt.register_config_arguments(config, arg_parser, title='Model options')
spt.register_config_arguments(spt.settings, arg_parser, prefix='tfsnippet',
title='TFSnippet options')
arg_parser.parse_args(sys.argv[1:])
# print the config
print_with_title('Configurations', pformat(config.to_dict()), after='\n')
# open the result object and prepare for result directories
results = MLResults(config.result_dir)
results.save_config(config) # save experiment settings for review
while True:
try:
results.make_dirs('plotting/sample', exist_ok=True)
results.make_dirs('plotting/z_plot', exist_ok=True)
results.make_dirs('plotting/train.reconstruct', exist_ok=True)
results.make_dirs('plotting/test.reconstruct', exist_ok=True)
results.make_dirs('train_summary', exist_ok=True)
results.make_dirs('checkpoint/checkpoint', exist_ok=True)
break
except Exception:
pass
if config.count_experiment:
with open('/home/cwx17/research/ml-workspace/projects/wasserstein-ood-regularizer/count_experiments', 'a') as f:
f.write(results.system_path("") + '\n')
f.close()
# prepare for training and testing data
# It is important: the `x_shape` must have channel dimension, even it is 1! (i.e. (28, 28, 1) for MNIST)
# And the value of images should not be normalized, ranged from 0 to 255.
# prepare for training and testing data
(x_train, y_train, x_test, y_test) = load_overall(config.in_dataset)
(svhn_train, svhn_train_y, svhn_test, svhn_test_y) = load_overall(config.out_dataset)
config.x_shape = x_train.shape[1:]
config.x_shape_multiple = 1
for x in config.x_shape:
config.x_shape_multiple *= x
# input placeholders
input_x = tf.placeholder(
dtype=tf.int32, shape=(None,) + config.x_shape, name='input_x')
learning_rate = spt.AnnealingVariable(
'learning_rate', config.initial_lr, config.lr_anneal_factor)
# derive the loss and lower-bound for training
with tf.name_scope('training'), \
arg_scope([batch_norm, dropout], training=True):
train_p_net = p_net(input_x)
theta_loss = tf.reduce_sum(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=input_x, logits=train_p_net),
axis=np.arange(-len(config.x_shape), 0)
)
theta_loss = tf.reduce_mean(theta_loss)
theta_loss += tf.losses.get_regularization_loss()
# derive the nll and logits output for testing
with tf.name_scope('testing'), \
arg_scope([batch_norm], training=True):
test_p_net = p_net(input_x)
ele_test_ll = -tf.reduce_sum(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=input_x, logits=test_p_net),
axis=np.arange(-len(config.x_shape), 0)
) / config.x_shape_multiple / np.log(2)
# derive the nll and logits output for testing
with tf.name_scope('evaluating'), \
arg_scope([batch_norm], training=False):
eval_p_net = p_net(input_x)
ele_eval_ll = -tf.reduce_sum(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=input_x, logits=eval_p_net),
axis=np.arange(-len(config.x_shape), 0)
) / config.x_shape_multiple / np.log(2)
eval_nll = -tf.reduce_mean(
ele_eval_ll
)
# derive the optimizer
with tf.name_scope('optimizing'):
theta_params = tf.trainable_variables('p_net')
with tf.variable_scope('theta_optimizer'):
theta_optimizer = tf.train.AdamOptimizer(learning_rate)
theta_grads = theta_optimizer.compute_gradients(theta_loss, theta_params)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
theta_train_op = theta_optimizer.apply_gradients(theta_grads)
cifar_train_flow = spt.DataFlow.arrays([x_train], config.test_batch_size)
cifar_test_flow = spt.DataFlow.arrays([x_test], config.test_batch_size)
svhn_train_flow = spt.DataFlow.arrays([svhn_train], config.test_batch_size)
svhn_test_flow = spt.DataFlow.arrays([svhn_test], config.test_batch_size)
train_flow = spt.DataFlow.arrays([x_train], config.batch_size, shuffle=True, skip_incomplete=True)
tmp_train_flow = spt.DataFlow.arrays([x_train], config.test_batch_size, shuffle=True, skip_incomplete=True)
mixed_array = np.concatenate([x_test, svhn_test])
mixed_test_flow = spt.DataFlow.arrays([mixed_array], config.batch_size, shuffle=True, skip_incomplete=True)
reconstruct_test_flow = spt.DataFlow.arrays([x_test], 100, shuffle=True, skip_incomplete=True)
reconstruct_train_flow = spt.DataFlow.arrays([x_train], 100, shuffle=True, skip_incomplete=True)
with spt.utils.create_session().as_default() as session, \
train_flow.threaded(5) as train_flow:
spt.utils.ensure_variables_initialized()
experiment_dict = {
'cifar10': '/mnt/mfs/mlstorage-experiments/cwx17/51/e5/02279d802d3aa91641f5',
'cifar100': '/mnt/mfs/mlstorage-experiments/cwx17/41/e5/02279d802d3a3b1541f5',
'tinyimagenet': '/mnt/mfs/mlstorage-experiments/cwx17/39/d5/02812baa4f70601441f5',
'svhn': '/mnt/mfs/mlstorage-experiments/cwx17/ef/d5/02c52d867e43601441f5',
'celeba': '/mnt/mfs/mlstorage-experiments/cwx17/11/e5/02279d802d3a601441f5',
'constant': '/mnt/mfs/mlstorage-experiments/cwx17/d8/e5/02c52d867e439b8f72f5',
'noise': '/mnt/mfs/mlstorage-experiments/cwx17/01/e5/02732c28dc8d9b8f72f5',
'fashion_mnist28': '/mnt/mfs/mlstorage-experiments/cwx17/21/e5/02279d802d3afd9441f5',
'kmnist28': '/mnt/mfs/mlstorage-experiments/cwx17/29/d5/02812baa4f70601441f5',
'mnist28': '/mnt/mfs/mlstorage-experiments/cwx17/df/d5/02c52d867e43601441f5',
'not_mnist28': '/mnt/mfs/mlstorage-experiments/cwx17/01/e5/02279d802d3a601441f5',
'omniglot28': '/mnt/mfs/mlstorage-experiments/cwx17/f0/e5/02279d802d3a601441f5',
'noise28': '/mnt/mfs/mlstorage-experiments/cwx17/50/f5/02c52d867e4358f6e2f5',
'constant28': '/mnt/mfs/mlstorage-experiments/cwx17/40/f5/02c52d867e4391f6e2f5'
}
print(experiment_dict)
if config.in_dataset in experiment_dict:
restore_dir = experiment_dict[config.in_dataset] + '/checkpoint'
restore_checkpoint = os.path.join(
restore_dir, 'checkpoint', 'checkpoint.dat-{}'.format(config.max_epoch))
else:
restore_dir = results.system_path('checkpoint')
restore_checkpoint = None
# train the network
with spt.TrainLoop(tf.trainable_variables(),
var_groups=['q_net', 'p_net', 'posterior_flow', 'G_theta', 'D_psi', 'G_omega', 'D_kappa'],
max_epoch=config.max_epoch + 1,
max_step=config.max_step,
summary_dir=(results.system_path('train_summary')
if config.write_summary else None),
summary_graph=tf.get_default_graph(),
early_stopping=False,
checkpoint_dir=results.system_path('checkpoint'),
checkpoint_epoch_freq=100,
restore_checkpoint=restore_checkpoint
) as loop:
loop.print_training_summary()
spt.utils.ensure_variables_initialized()
epoch_iterator = loop.iter_epochs()
# adversarial training
for epoch in epoch_iterator:
if epoch == config.max_epoch + 1:
def permutation_test(flow, ratio):
R = min(max(1, int(ratio * config.test_batch_size - 1)), config.test_batch_size - 1)
print('R={}'.format(R))
packs = []
for [batch_x] in flow:
for i in range(len(batch_x)):
for [batch_y] in mixed_test_flow:
for [batch_z] in tmp_train_flow:
batch = np.concatenate(
[batch_x[i:i + 1], batch_y[:R], batch_z[:config.test_batch_size - R - 1]],
axis=0)
pack = session.run(
ele_test_ll, feed_dict={
input_x: batch,
}) # [batch_size]
pack = | np.asarray(pack) | numpy.asarray |
"""
@brief test log(time=3s)
"""
import unittest
import pickle
from io import BytesIO
import numpy
import scipy.sparse
import pandas
from sklearn import __version__ as sklver
from sklearn.datasets import make_blobs
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.exceptions import ConvergenceWarning
try:
from sklearn.utils._testing import ignore_warnings
except ImportError:
from sklearn.utils.testing import ignore_warnings
from pyquickhelper.pycode import ExtTestCase
from pyquickhelper.loghelper import BufferedPrint
from pyquickhelper.texthelper import compare_module_version
from mlinsights.mlmodel._kmeans_constraint_ import (
linearize_matrix, _compute_strategy_coefficient,
_constraint_association_gain)
from mlinsights.mlmodel import ConstraintKMeans
class TestSklearnConstraintKMeans(ExtTestCase):
def test_mat_lin(self):
mat = numpy.identity(3)
lin = linearize_matrix(mat)
exp = numpy.array([[1., 0., 0.],
[0., 0., 1.],
[0., 0., 2.],
[0., 1., 0.],
[1., 1., 1.],
[0., 1., 2.],
[0., 2., 0.],
[0., 2., 1.],
[1., 2., 2.]])
self.assertEqual(exp, lin)
def test_mat_lin_add(self):
mat = numpy.identity(3)
mat2 = numpy.identity(3) * 3
lin = linearize_matrix(mat, mat2)
exp = numpy.array([[1., 0., 0., 3.],
[0., 0., 1., 0.],
[0., 0., 2., 0.],
[0., 1., 0., 0.],
[1., 1., 1., 3.],
[0., 1., 2., 0.],
[0., 2., 0., 0.],
[0., 2., 1., 0.],
[1., 2., 2., 3.]])
self.assertEqual(exp, lin)
def test_mat_lin_sparse(self):
mat = numpy.identity(3)
mat[0, 2] = 8
mat[1, 2] = 5
mat[2, 1] = 7
mat = scipy.sparse.csr_matrix(mat)
lin = linearize_matrix(mat)
exp = numpy.array([[1., 0., 0.],
[8., 0., 2.],
[1., 1., 1.],
[5., 1., 2.],
[7., 2., 1.],
[1., 2., 2.]])
self.assertEqual(exp, lin)
def test_mat_lin_sparse_add(self):
mat = numpy.identity(3)
mat[0, 2] = 8
mat[1, 2] = 5
mat[2, 1] = 7
mat2 = numpy.identity(3) * 3
mat = scipy.sparse.csr_matrix(mat)
mat2 = scipy.sparse.csr_matrix(mat2)
lin = linearize_matrix(mat, mat2)
exp = numpy.array([[1., 0., 0., 3.],
[8., 0., 2., 0.],
[1., 1., 1., 3.],
[5., 1., 2., 0.],
[7., 2., 1., 0.],
[1., 2., 2., 3.]])
self.assertEqual(exp, lin)
def test_mat_lin_sparse2(self):
mat = numpy.identity(3)
mat[0, 1] = 8
mat[1, 1] = 0
mat[2, 1] = 7
mat = scipy.sparse.csr_matrix(mat)
lin = linearize_matrix(mat)
exp = numpy.array([[1., 0., 0.],
[8., 0., 1.],
[7., 2., 1.],
[1., 2., 2.]])
self.assertEqual(exp, lin)
def test_mat_lin_sparse3(self):
mat = numpy.identity(3)
mat[0, 1] = 8
mat[2, 1] = 7
mat = scipy.sparse.csr_matrix(mat)
lin = linearize_matrix(mat)
exp = numpy.array([[1., 0., 0.],
[8., 0., 1.],
[1., 1., 1.],
[7., 2., 1.],
[1., 2., 2.]])
self.assertEqual(exp, lin)
def test_mat_sort(self):
mat = numpy.identity(3)
mat[2, 0] = 0.3
mat[1, 0] = 0.2
mat[0, 0] = 0.1
exp = numpy.array([[0.1, 0., 0.], [0.2, 1., 0.], [0.3, 0., 1.]])
sort = mat[mat[:, 0].argsort()]
self.assertEqual(exp, sort)
mat.sort(axis=0)
self.assertNotEqual(exp, mat)
mat.sort(axis=1)
self.assertNotEqual(exp, mat)
@ignore_warnings(category=ConvergenceWarning)
def test_kmeans_constraint(self):
mat = numpy.array([[0, 0], [0.2, 0.2], [-0.1, -0.1], [1, 1]])
km = ConstraintKMeans(n_clusters=2, verbose=0, strategy='distance',
balanced_predictions=True)
km.fit(mat)
self.assertEqual(km.cluster_centers_.shape, (2, 2))
self.assertEqualFloat(km.inertia_, 0.455)
if km.labels_[0] == 0:
self.assertEqual(km.labels_, numpy.array([0, 1, 0, 1]))
self.assertEqual(km.cluster_centers_, numpy.array(
[[-0.05, -0.05], [0.6, 0.6]]))
else:
self.assertEqual(km.labels_, numpy.array([1, 0, 1, 0]))
self.assertEqual(km.cluster_centers_, numpy.array(
[[0.6, 0.6], [-0.05, -0.05]]))
pred = km.predict(mat)
if km.labels_[0] == 0:
self.assertEqual(pred, numpy.array([0, 1, 0, 1]))
else:
self.assertEqual(pred, numpy.array([1, 0, 1, 0]))
def test_kmeans_constraint_constraint(self):
mat = numpy.array([[0, 0], [0.2, 0.2], [-0.1, -0.1], [1, 1]])
km = ConstraintKMeans(n_clusters=2, verbose=0, strategy='distance',
balanced_predictions=True)
km.fit(mat)
self.assertEqual(km.cluster_centers_.shape, (2, 2))
self.assertEqualFloat(km.inertia_, 0.455)
if km.labels_[0] == 0:
self.assertEqual(km.labels_, numpy.array([0, 1, 0, 1]))
self.assertEqual(km.cluster_centers_, numpy.array(
[[-0.05, -0.05], [0.6, 0.6]]))
else:
self.assertEqual(km.labels_, numpy.array([1, 0, 1, 0]))
self.assertEqual(km.cluster_centers_, numpy.array(
[[0.6, 0.6], [-0.05, -0.05]]))
pred = km.predict(mat)
if km.labels_[0] == 0:
self.assertEqual(pred, numpy.array([0, 1, 0, 1]))
else:
self.assertEqual(pred, numpy.array([1, 0, 1, 0]))
@ignore_warnings(category=ConvergenceWarning)
def test_kmeans_constraint_sparse(self):
mat = numpy.array([[0, 0], [0.2, 0.2], [-0.1, -0.1], [1, 1]])
mat = scipy.sparse.csr_matrix(mat)
km = ConstraintKMeans(n_clusters=2, verbose=0, strategy='distance')
km.fit(mat)
self.assertEqual(km.cluster_centers_.shape, (2, 2))
self.assertEqualFloat(km.inertia_, 0.455)
if km.labels_[0] == 0:
self.assertEqual(km.labels_, numpy.array([0, 1, 0, 1]))
self.assertEqual(km.cluster_centers_, numpy.array(
[[-0.05, -0.05], [0.6, 0.6]]))
else:
self.assertEqual(km.labels_, numpy.array([1, 0, 1, 0]))
self.assertEqual(km.cluster_centers_, numpy.array(
[[0.6, 0.6], [-0.05, -0.05]]))
pred = km.predict(mat)
if km.labels_[0] == 0:
self.assertEqual(pred, numpy.array([0, 0, 0, 1]))
else:
self.assertEqual(pred, numpy.array([1, 1, 1, 0]))
def test_kmeans_constraint_pipeline(self):
data = load_iris()
X, y = data.data, data.target
X_train, X_test, y_train, y_test = train_test_split(X, y)
km = ConstraintKMeans(strategy='distance')
pipe = make_pipeline(km, LogisticRegression())
try:
pipe.fit(X_train, y_train)
except AttributeError as e:
if compare_module_version(sklver, "0.24") < 0:
return
raise e
pred = pipe.predict(X_test)
score = accuracy_score(y_test, pred)
self.assertGreater(score, 0.8)
score2 = pipe.score(X_test, y_test)
self.assertEqual(score, score2)
rp = repr(km)
self.assertStartsWith("ConstraintKMeans(", rp)
def test_kmeans_constraint_grid(self):
df = pandas.DataFrame(dict(y=[0, 1, 0, 1, 0, 1, 0, 1],
X1=[0.5, 0.6, 0.52, 0.62,
0.5, 0.6, 0.51, 0.61],
X2=[0.5, 0.6, 0.7, 0.5,
1.5, 1.6, 1.7, 1.8]))
X = df.drop('y', axis=1)
y = df['y']
model = make_pipeline(ConstraintKMeans(random_state=0, strategy='distance'),
DecisionTreeClassifier())
res = model.get_params(True)
self.assertNotEmpty(res)
parameters = {
'constraintkmeans__n_clusters': [2, 3, 4],
'constraintkmeans__balanced_predictions': [False, True],
}
clf = GridSearchCV(model, parameters, cv=3)
clf.fit(X, y)
pred = clf.predict(X)
self.assertEqual(pred.shape, (8,))
def test_kmeans_constraint_pickle(self):
df = pandas.DataFrame(dict(y=[0, 1, 0, 1, 0, 1, 0, 1],
X1=[0.5, 0.6, 0.52, 0.62,
0.5, 0.6, 0.51, 0.61],
X2=[0.5, 0.6, 0.7, 0.5, 1.5, 1.6, 1.7, 1.8]))
X = df.drop('y', axis=1)
y = df['y']
model = ConstraintKMeans(n_clusters=2, strategy='distance')
model.fit(X, y)
pred = model.transform(X)
st = BytesIO()
pickle.dump(model, st)
st = BytesIO(st.getvalue())
rec = pickle.load(st)
pred2 = rec.transform(X)
self.assertEqualArray(pred, pred2)
def test__compute_sortby_coefficient(self):
m1 = | numpy.array([[1., 2.], [4., 5.]]) | numpy.array |
"""Flipboard domain."""
from rlpy.tools import FONTSIZE, id2vec, plt
from .domain import Domain
import numpy as np
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
]
__license__ = "BSD 3-Clause"
__author__ = "<NAME>"
class FlipBoard(Domain):
"""
A domain based on the last puzzle of Doors and Rooms Game stage 5-3.
The goal of the game is to get all elements of a 4x4 board
to have value 1.
The initial state is the following::
1 0 0 0
0 0 0 0
0 1 0 0
0 0 1 0
**STATE:** a 4x4 array of binary values. \n
**ACTION:** Invert the value of a given [Row, Col] (from 0->1 or 1->0).\n
**TRANSITION:** Determinisically flip all elements of the board on the same
row OR col of the action. \n
**REWARD:** -1 per step. 0 when the board is solved [all ones]
**REFERENCE:**
.. seealso::
`gameday inc. Doors and Rooms game <http://bit.ly/SYqdZI>`_
"""
BOARD_SIZE = 4
STEP_REWARD = -1
# Visual Stuff
domain_fig = None
move_fig = None
def __init__(self):
boards_num = self.BOARD_SIZE ** 2
super().__init__(
num_actions=boards_num,
statespace_limits=np.tile([0, 1], (boards_num, 1)),
discount_factor=1.0,
episode_cap=min(100, boards_num),
)
def show_domain(self, a=0):
s = self.state
# Draw the environment
if self.domain_fig is None:
self.move_fig = plt.subplot(111)
s = s.reshape((self.BOARD_SIZE, self.BOARD_SIZE))
self.domain_fig = plt.imshow(
s, cmap="FlipBoard", interpolation="nearest", vmin=0, vmax=1
)
plt.xticks(np.arange(self.BOARD_SIZE), fontsize=FONTSIZE)
plt.yticks(np.arange(self.BOARD_SIZE), fontsize=FONTSIZE)
# pl.tight_layout()
a_row, a_col = id2vec(a, [self.BOARD_SIZE, self.BOARD_SIZE])
self.move_fig = self.move_fig.plot(a_col, a_row, "kx", markersize=30.0)
plt.show()
a_row, a_col = id2vec(a, [self.BOARD_SIZE, self.BOARD_SIZE])
self.move_fig.pop(0).remove()
# print a_row,a_col
# Instead of '>' you can use 'D', 'o'
self.move_fig = plt.plot(a_col, a_row, "kx", markersize=30.0)
s = s.reshape((self.BOARD_SIZE, self.BOARD_SIZE))
self.domain_fig.set_data(s)
plt.draw()
def step(self, a):
ns = self.state.copy()
ns = np.reshape(ns, (self.BOARD_SIZE, -1))
a_row, a_col = id2vec(a, [self.BOARD_SIZE, self.BOARD_SIZE])
ns[a_row, :] = np.logical_not(ns[a_row, :])
ns[:, a_col] = np.logical_not(ns[:, a_col])
ns[a_row, a_col] = not ns[a_row, a_col]
if self.is_terminal():
terminal = True
r = 0
else:
terminal = False
r = self.STEP_REWARD
ns = ns.flatten()
self.state = ns.copy()
return r, ns, terminal, self.possible_actions()
def s0(self):
self.state = np.array(
[[1, 0, 0, 0], [0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]], dtype="bool"
).flatten()
return self.state, self.is_terminal(), self.possible_actions()
def is_terminal(self):
return | np.count_nonzero(self.state) | numpy.count_nonzero |
#!/usr/bin/python3
import numpy as np
from numpy import matlib
from numpy import random
import sys
import copy
import scipy.signal
import scipy.stats.stats
from matplotlib import pyplot as plt
import unittest
def Norm(t):
while t > np.pi:
t -= 2 * np.pi
while t < -np.pi:
t += 2 * np.pi
return t
def Sign(n):
return 1.0 if n >= 0.0 else -1.0
class Airfoil(object):
def __init__(self, A, rho, lifting=5.0, cmax=1.2):
self.A = A # Cross-sectional area, m^2
self.rho = rho # Density of medium, kg / m^2
self.lifting = lifting
self.Cmax = cmax
def ClipAlpha(self, alpha):
return np.clip(Norm(alpha), -np.pi / 2, np.pi / 2)
def atanClCd(self, alpha):
"""
Based on playing around with some common profiles,
assuming a linear relationship to calculate
atan2(Cl(alpha), Cd(alpha)) w.r.t. alpha
seems reasonable.
"""
clipalpha = self.ClipAlpha(alpha)
deltaatan = -Sign(alpha) if abs(alpha) < np.pi / 2.0 else 0.0
return (np.pi / 2.0 - abs(clipalpha)) * np.sign(clipalpha), deltaatan
def normClCd(self, alpha):
"""
Calculates sqrt(Cl^2 + Cd^2). This
doesn't seem to capture typical profiles
at particularly high angles of attack, but
it seems a fair approximation. This may
cause us to be more incliuned to sail
straight downwind than we really should be.
True profiles have a dip ~70-80 deg angle of attack.
Returns norm, deltanorm/deltaalpha
"""
alpha = self.ClipAlpha(alpha)
exp = np.exp(-self.lifting * abs(alpha))
norm = self.Cmax * (1.0 - exp)
deltanorm = self.Cmax * self.lifting * exp * Sign(alpha)
return norm, deltanorm
def F(self, alpha, v):
"""
Arguments:
alpha: Airfoil angle of attack
v: Relative speed in medium
Returns:
F, deltaF/deltaalpha: Note: deltaF does not account for heel
"""
clipalpha = self.ClipAlpha(alpha)
S = 0.5 * self.rho * self.A * v ** 2
norm, deltanorm = self.normClCd(clipalpha)
F = S * norm
deltaF = S * deltanorm
# Account for stupid angles of attack
deltaF *= -1.0 if abs(alpha) > np.pi / 2.0 else 1.0
return F, deltaF
class DebugForces(object):
def __init__(self):
self.taunet = []
self.Flon = []
self.Flat = []
self.Fs = []
self.Fk = []
self.Fr = []
self.gammas = []
self.gammak = []
self.gammar = []
self.FBlon = []
self.FBlat = []
self.taus = []
self.tauk = []
self.taur = []
self.tauB = []
def UpdateZero(self):
self.Update(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0)
def Update(self, taunet, Flon, Flat, Fs, Fk, Fr, gammas,
gammak, gammar, FBlon, FBlat, taus, tauk, taur, tauB):
self.taunet.append(taunet)
self.Flon.append(Flon)
self.Flat.append(Flat)
self.Fs.append(Fs)
self.Fk.append(Fk)
self.Fr.append(Fr)
self.gammas.append(gammas)
self.gammak.append(gammak)
self.gammar.append(gammar)
self.FBlon.append(FBlon)
self.FBlat.append(FBlat)
self.taus.append(taus)
self.tauk.append(tauk)
self.taur.append(taur)
self.tauB.append(tauB)
def Flonlat(self, F, gamma):
lon = [f * np.cos(g) for f, g in zip(F, gamma)]
lat = [f * np.sin(g) for f, g in zip(F, gamma)]
return lon, lat
def Fslonlat(self):
return self.Flonlat(self.Fs, self.gammas)
def Fklonlat(self):
return self.Flonlat(self.Fk, self.gammak)
def Frlonlat(self):
return self.Flonlat(self.Fr, self.gammar)
class Physics(object):
def __init__(self):
self.hs = 1.5 # Height of sail CoE above origin, m
self.hk = -0.7 # Height of keel CoE above origin, m
self.hr = 0.0 # Height of rudder CoE above origin, m
# Positions longitudinally on the boat relative
# to the origin, in m:
self.rs = 0.1
self.rk = 0.0
self.rr = -0.9
# Distance of the CoE from the rotational point (i.e.,
# 0 would be a rudder that required no force to turn)
self.ls = 0.25
self.lr = 0.0
rhowater = 1000.0 # Density of water, kg / m^3
rhoair = 1.225 # Density of air, kg / m^3
As = 2.0 # Sail Area, m^2
Ak = .3 # Keel Area, m^2
Ar = .04 # Rudder Area, m^2
self.sail = Airfoil(As, rhoair, 5.0, 1.4)
self.keel = Airfoil(Ak, rhowater, 8.0, 1.4)
self.rudder = Airfoil(Ar, rhowater, 4.0, 1.7)
self.Blon = 15.0 # Damping term, N / (m / s)
self.Blat = 25.0 # Lateral damping term, bigger b/c hull long/thin)
# self.Bv = 10.0
self.Bomega = 500 # Damping term, N * m / (rad / sec)
self.hb = -1.0 # Height of CoM of boat ballast, m
self.wb = 14.0 * 9.8 # Weight of boat ballast, N
self.J = 10.0 # Boat Moment of Inertia about yaw, kg * m^2
self.m = 25.0 # Boat mass, kg
def SailForces(self, thetaw, vw, deltas):
"""
Calculates and returns forces from the sail.
Arguments:
thetaw: Wind, 0 = running downwind, +pi / 2 = wind from port
vw: Wind speed, m / s
deltas: Sail angle, 0 = all in, +pi / 2 = sail on starboard
heel: Boat heel, 0 = upright
Returns:
Fs: Magnitude of force from sail (N)
gammas: Angle of force from sail (rad, 0 = forwards, +pi / 2 = pushing to port)
deltaFs: Derivative of Fs w.r.t. deltas
deltagamma: Derivative of gamma w.r.t. deltas
"""
alphas = -Norm(thetaw + deltas + np.pi)
atanC, deltaatan = self.sail.atanClCd(alphas)
Fs, deltaFs = self.sail.F(alphas, vw)
#Fs = Fs if abs(alphas) > 0.08 else 0.0
gammas = Norm(atanC - thetaw)
deltaFs = deltaFs * -1.0 # -1 = dalpha / ddeltas
deltagamma = deltaatan * -1.0 # -1 = dalpha / ddeltas
return Fs, gammas, deltaFs, deltagamma
def KeelForces(self, thetac, vc):
"""
Calculates and returns forces from the sail.
Arguments:
thetac: Current, 0 = Boat going straight, +pi / 2 = Boat drifting to starboard
vc: Speed in water, m / s
heel: Boat heel, 0 = upright
Returns:
Fk: Magnitude of force from keel (N)
gammak: Angle of force from keel (rad, 0 = forwards, +pi / 2 = pushing to port)
"""
alphak = -Norm(thetac)
atanC, _ = self.keel.atanClCd(alphak)
atanC = (np.pi / 2.0 - 0.05) * np.sign(alphak)
Fk, deltaFk = self.keel.F(alphak, vc)
gammak = Norm(atanC - thetac + np.pi)
return Fk, gammak
def RudderForces(self, thetac, vc, deltar):
"""
Calculates and returns forces from the sail.
Arguments:
thetac: Current, 0 = Boat going straight, +pi / 2 = Boat drifting to starboard
vc: Speed in water, m / s
deltar: Rudder angle, 0 = straight, + pi / 2 = rudder on starboard
heel: Boat heel, 0 = upright
Returns:
Fr: Magnitude of force from rudder (N)
gammar: Angle of force from rudder (rad, 0 = forwards, +pi / 2 = pushing to port)
deltaFr: dFr / ddeltar
deltagamma: dgammar / ddeltar
"""
alphar = -Norm(thetac + deltar)
alphar = np.clip(alphar, -.25, .25)
atanC = (np.pi / 2.0 - 0.05) * Sign(alphar)
Fr = 0.5 * self.rudder.A * self.rudder.rho * vc ** 2 * 5.0 * abs(alphar)
gammar = Norm(atanC - thetac + np.pi)
deltaFr = 0.5 * self.rudder.A * self.rudder.rho * vc ** 2 * 5.0 * -Sign(alphar)
deltagamma = 0.0
return Fr, gammar, deltaFr, deltagamma
def SailTorque(self, Fs, gammas, deltas, heel, deltaFs,
deltagammas, deltaheel):
"""
Calculate yaw torque from sail, using output from SailForces
Returns the torque and the derivative of the torque
w.r.t. deltas.
"""
sheel = np.sin(heel)
cheel = np.cos(heel)
cdeltas = np.cos(deltas)
sdeltas = np.sin(deltas)
return Fs * ((self.rs - self.ls * cdeltas) * np.sin(gammas) * cheel
+ self.hk * np.cos(gammas) * sheel), 0.0
r = np.sqrt((self.rs - self.ls * cdeltas) ** 2 + (self.hs * sheel) ** 2)
drds = ((self.rs - self.ls * cdeltas) * (self.ls * sdeltas) \
+ (self.hs * sheel) * (self.hs * cheel) * deltaheel) \
/ r
atany = -self.hs * sheel
atanx = self.rs - self.ls * cdeltas
theta = gammas - np.arctan2(atany, atanx)
stheta = np.sin(theta)
dsthetads = np.cos(theta) * \
(deltagammas -
(atanx * (-self.hs * cheel * deltaheel) -
atany * (self.ls * cdeltas))
/ (atanx ** 2 + atany ** 2))
dcheelds = -sheel * deltaheel
tau = r * Fs * stheta * cheel
dtauds = r * Fs * stheta * dcheelds \
+ r * Fs * dsthetads * cheel \
+ r * deltaFs * stheta * cheel \
+ drds * Fs * stheta * cheel
return tau, dtauds
def KeelTorque(self, Fk, gammak, heel):
"""
Calculate yaw torque from keel, using output from KeelForces
"""
return Fk * (self.rk * np.sin(gammak) * np.cos(heel)
+ self.hk * np.cos(gammak) * np.sin(heel))
r = np.sqrt(self.rk ** 2 + (self.hk * np.sin(heel)) ** 2)
theta = gammak - np.arctan2(-self.hk * np.sin(heel), self.rk)
return r * Fk * np.sin(theta) * np.cos(heel)
def RudderTorque(self, Fr, gammar, heel, deltaFr, deltaheel):
"""
Calculate yaw torque from rudder, using output from RudderForces
Assumes self.hr is negligible.
"""
tau = self.rr * Fr * np.sin(gammar) * np.cos(heel)
dtaudr = self.rr * np.cos(heel) * deltaFr * np.sin(gammar)
dtauds = -self.rr * Fr * np.sin(gammar) * np.sin(heel) * deltaheel
dtauds = 0.0 # Not sure if above dtauds is still good.
return tau, dtaudr, dtauds
def ApproxHeel(self, Fs, gammas, Fk, gammak, deltaFs, deltagammas):
"""
Returns equilibrium heel angle for a given Fs, Fk,
as well as the derivative of the heel with respect
to deltas
"""
tanheel = (Fs * self.hs * np.sin(gammas) + Fk * self.hk * np.sin(gammak)) / (self.hb * self.wb)
heel = np.arctan(tanheel)
dheel = self.hs * (deltaFs * np.sin(gammas) + Fs * np.cos(gammas) * deltagammas) \
/ ((1.0 + tanheel ** 2) * self.hb * self.wb)
return heel, dheel
def NetForce(self, thetaw, vw, thetac, vc, deltas, deltar, heel, omega, debugf=None):
"""
Sum up all the forces and return net longitudinal and lateral forces, and net torque
Arguments:
thetaw: Wind dir
vw: wind speed
thetac: Water dir
vc: Water speed
deltas: sail angle
deltar: rudder angle
heel: Duh.
omega: boat rotational velocity, rad / s
debugf: DebugForces instance for... debugging
Returns: Flon, Flat, taunet, newheel
"""
Fs, gammas, dFsds, dgsds= self.SailForces(thetaw, vw, deltas)
Fk, gammak = self.KeelForces(thetac, vc)
heel, dheelds = self.ApproxHeel(Fs, gammas, Fk, gammak, dFsds, dgsds)
Fr, gammar, dFrdr, dgrdr = self.RudderForces(thetac, vc, deltar)
taus, dtausds = self.SailTorque(Fs, gammas, deltas, heel, dFsds, dgsds, dheelds)
tauk = self.KeelTorque(Fk, gammak, heel)
taur, dtaurdr, dtaurds = self.RudderTorque(Fr, gammar, heel, dFrdr, dheelds)
tauB = -self.Bomega * omega * abs(omega)
FBlon = -self.Blon * vc * abs(vc) * np.cos(thetac)
FBlat = self.Blat * vc * np.sin(thetac)
Flon = Fs * np.cos(gammas) + Fk * np.cos(gammak) + Fr * np.cos(gammar) + FBlon
Flat = (Fs * np.sin(gammas) + Fk * np.sin(gammak) + Fr * np.sin(gammar)) * np.cos(heel) + FBlat
taunet = taus + tauk + taur + tauB
newheel, _ = self.ApproxHeel(Fs, gammas, Fk, gammak, 0, 0)
#print("Flon: ", Flon, " Flat: ", Flat, " Blon: ", -self.Blon * vc * np.cos(thetac),
# " Fs ", Fs, " gammas ", gammas, " Fk ", Fk, " gammak ", gammak, " Fr ", Fr,
# " gammar ", gammar)
#print("taunet ", taunet, " taus ", taus, " tauk ", tauk, " taur ", taur, " Btau",
# -self.Bomega * omega)
if debugf != None:
debugf.Update(taunet, Flon, Flat, Fs, Fk, Fr, gammas,
gammak, gammar, FBlon, FBlat, taus, tauk, taur, tauB)
return Flon, Flat, taunet, newheel
def Yadaptive(self, thetaw, vw, thetac, vc, yaw, omega, deltas, deltar):
"""
Using: u = {F_lon, tau_net}
beta = {Blon, Bomega, Ar, rs, taubias, 1}
"""
YFlonBlon = -vc * abs(vc) * np.cos(thetac)
Fr, gammar, _, _ = self.RudderForces(thetac, vc, deltar)
YFlonAr = Fr * np.cos(gammar) / self.rudder.A
Fs, gammas, _, _= self.SailForces(thetaw, vw, deltas)
Fk, gammak = self.KeelForces(thetac, vc)
YFlonconst = Fs * np.cos(gammas) + Fk * np.cos(gammak)
YFlon = np.matrix([[YFlonBlon, 0.0, YFlonAr, 0.0, 0.0, YFlonconst]])
heel, _ = self.ApproxHeel(Fs, gammas, Fk, gammak, 0.0, 0.0)
taur, _, _ = self.RudderTorque(Fr, gammar, heel, 0.0, 0.0)
tauk = self.KeelTorque(Fk, gammak, heel)
taus, _ = self.SailTorque(Fs, gammas, deltas, heel, 0.0, 0.0, 0.0)
YtauBomega = -omega * abs(omega)
YtauAr = taur / self.rudder.A
Ytaurs = Fs * np.sin(gammas) * np.cos(heel)
Ytauconst = tauk + (taus - Ytaurs * self.rs)
Ytau = np.matrix([[0.0, YtauBomega, YtauAr, Ytaurs, 1.0, Ytauconst]])
#print("Ytau: ", Ytau)
#print("YFlon: ", YFlon)
return np.concatenate((YFlon, Ytau), axis=0)
def Update(self, truewx, truewy, x, y, vx, vy, yaw, omega, deltas, deltar,
heel, dt, flopsail=False, debugf=None):
thetac = -Norm(np.arctan2(vy, vx) - yaw)
vc = np.sqrt(vx ** 2 + vy ** 2)
appwx = truewx - vx
appwy = truewy - vy
thetaw = Norm(-np.arctan2(appwy, appwx) + yaw)
vw = np.sqrt(appwx ** 2 + appwy ** 2) * 1.6 # For wind gradient
if flopsail:
deltas = abs(deltas) if thetaw > 0 else -abs(deltas)
#print("thetac ", thetac, " vc ", vc, " thetaw ", thetaw, " vw ", vw)
Flon, Flat, tau, newheel = self.NetForce(
thetaw, vw, thetac, vc, deltas, deltar, heel, omega, debugf)
if False:
# For approximating damping force, with overall force as input,
# state as [pos, vel]
Ac = np.matrix([[0.0, 1.0],
[0.0, -self.Bv / self.m]])
Bc = np.matrix([[0.0], [1.0 / self.m]])
(Ad, Bd, _, _, _) = scipy.signal.cont2discrete((Ac, Bc, Ac, Bc), dt)
statex = np.matrix([[x], [vx]])
forcex = Flon * np.cos(yaw) - Flat * np.sin(yaw)
statex = Ad * statex + Bd * forcex
statey = np.matrix([[y], [vy]])
forcey = Flon * np.sin(yaw) + Flat * np.cos(yaw)
statey = Ad * statey + Bd * forcey
x = statex[0, 0]
y = statey[0, 0]
vx = statex[1, 0]
vy = statey[1, 0]
else:
ax = (Flon * np.cos(yaw) - Flat * np.sin(yaw)) / self.m
ay = (Flon * np.sin(yaw) + Flat * np.cos(yaw)) / self.m
x += vx * dt + 0.5 * ax * dt ** 2
y += vy * dt + 0.5 * ay * dt ** 2
vx += ax * dt
vy += ay * dt
alpha = tau / self.J
yaw += omega * dt + 0.5 * alpha * dt ** 2
yaw = Norm(yaw)
omega += alpha * dt
kHeel = 0.3
heel = heel + (1.0 - np.exp(-kHeel * dt)) * (newheel - heel)
# heel = newheel
thetac = -Norm(np.arctan2(vy, vx) - yaw)
vc = np.sqrt(vx ** 2 + vy ** 2)
return x, y, vx, vy, yaw, omega, heel, thetac, vc, thetaw, vw
def RunBase(self, ts, winds, x0, v0, yaw0, omega0, heel0, control,
flopsail=False, debugf=None):
"""
ts: Times to simulate over, e.g. [0, .1, .2, .3, .4]
to simulate 4 steps of 0.1sec each
winds: list of two lists, where each sublist
is of length ts and contains the true wind
at that time
x0: list of length 2 = (x, y) initial positoin
v0: list of length 2 = (x, y) initial velocity
yaw0: float, initial yaw
omega0: float, initial time derivative of yaw
heel0: float, intitial heel
control: Function, of the form:
Params:
i: current index from ts/winds that we are at
t: ts[i]
thetaw: Apparent wind dir
vw: Apparent wind vel
thetac: Apparent current
vc: Apparent water speed
Returns: deltas, deltar
"""
xs = [x0[0]]
ys = [x0[1]]
vxs = [v0[0]]
vys = [v0[1]]
yaws = [yaw0]
omegas = [omega0]
heels = [heel0]
vcs = [np.hypot(v0[0], v0[1])]
thetacs = [Norm(np.arctan2(v0[1], v0[0]) + yaws[0])]
vws = [0.0]
thetaws = [0.0]
deltass = []
deltars = []
for i in range(1, len(ts)):
dt = np.clip(ts[i] - ts[i - 1], 0.001, 0.2)
wx = winds[0][i]
wy = winds[1][i]
deltas, deltar = control(
i, ts[i], thetaws[-1], vws[-1], thetacs[-1], vcs[-1], yaws[-1], omegas[-1])
deltass.append(deltas)
deltars.append(deltar)
x, y, vx, vy, yaw, omega, heel, thetac, vc, thetaw, vw = self.Update(
wx, wy, xs[-1], ys[-1], vxs[-1], vys[-1], yaws[-1], omegas[-1],
deltas, deltar, heels[-1], dt, flopsail, debugf)
if abs(vx) > 100:
vx = 0
vy = 0
omega = 0
heel = 0
xs.append(x)
ys.append(y)
vxs.append(vx)
vys.append(vy)
yaws.append(yaw)
omegas.append(omega)
heels.append(heel)
thetacs.append(thetac)
vcs.append(vc)
thetaws.append(thetaw)
vws.append(vw)
deltass.append(0.0)
deltars.append(0.0)
return xs, ys, vxs, vys, yaws, omegas, heels, thetacs, vcs,\
thetaws, vws, deltass, deltars
def Run(self, wind, v0, omega0, heel0, control, dt=0.01, niter=200, flopsail=True, debugf=None):
winds = wind
if not isinstance(wind[0], list):
wx = [wind[0]] * niter
wy = [wind[1]] * niter
winds = [wx, wy]
ts = [i * dt for i in range(niter)]
return self.RunBase(ts, winds, [0.0, 0.0], v0, 0.0, omega0, heel0,
control, flopsail=flopsail, debugf=debugf)
xs = [0]
ys = [0]
vxs = [v0[0]]
vys = [v0[1]]
yaws = [0]
omegas = [omega0]
heels = [heel0]
vcs = [np.hypot(v0[0], v0[1])]
thetacs = [Norm(np.arctan2(v0[1], v0[0]) + yaws[0])]
for i in range(niter):
#print(i * dt)
x, y, vx, vy, yaw, omega, heel, thetac, vc = self.Update(
wx, wy, xs[-1], ys[-1], vxs[-1], vys[-1], yaws[-1], omegas[-1],
deltas, deltar, heels[-1], dt)
if abs(vx) > 100:
vx = 0
vy = 0
omega = 0
heel = 0
xs.append(x)
ys.append(y)
vxs.append(vx)
vys.append(vy)
yaws.append(yaw)
omegas.append(omega)
heels.append(heel)
thetacs.append(thetac)
vcs.append(vc)
return xs, ys, vxs, vys, yaws, omegas, heels, thetacs, vcs
class Controller(object):
def __init__(self, physics):
self.physics = physics
self.maxrud = 0.25
self.Qtau = 0.01
self.Qf = 1.0
self.goalyaw = -np.pi / 2.0
self.maxyawrefacc = 0.2
self.maxyawrefvel = 0.2
self.yawref = 0.0
self.omegaref = 0.0
self.Kbeta = np.diag([0.0, 0.0, 0.01, 0.05, 0.01, 0.0])
self.beta = np.matrix([[physics.Blon],
[physics.Bomega],
[physics.rudder.A],
[physics.rs],
[0.0],
[1.0]])
self.betamin = np.matrix([[0.0],
[0.0],
[0.01],
[-1.0],
[-10.0],
[1.0]])
self.betamax = np.matrix([[1000.0],
[10000.0],
[1.0],
[1.0],
[10.0],
[1.0]])
self.Lambda = np.diag([1.0, 1.0])
self.lastt = float("nan")
self.Kref = 0.95
self.betas = []
self.torques = []
self.yawrefs = []
def Clear(self):
self.betas = []
self.torques = []
self.yawrefs = []
def ClipSail(self, deltas, thetaw):
maxsail = abs(Norm(np.pi - thetaw))
return np.clip(deltas, 0.0 if thetaw > 0.0 else -maxsail,
maxsail if thetaw > 0.0 else 0.0)
def ClipRudder(self, deltar, thetac):
return np.clip(deltar, -self.maxrud - thetac, self.maxrud - thetac)
def Adapt(self, thetaw, vw, thetac, vc, yaw, omega, deltas, deltar,
goalyaw, goalomega):
# u = Y beta
# u = u_r + diff = Y beta
Y = self.physics.Yadaptive(
thetaw, vw, thetac, vc, yaw, omega, deltas, deltar)
yawdiff = Norm(goalyaw - yaw)
omegadiff = goalomega - omega
vcgoal = vc
diff = np.matrix([[0.0], [omegadiff]]) +\
self.Lambda * np.matrix([[vcgoal - vc], [yawdiff]])
#print("diff: ", diff)
#print("dot: ", (Y.T * diff).T)
betadot = -self.Kbeta * Y.T * diff
return betadot
def ControlMaxForce(self, i, t, thetaw, vw, thetac, vc, yaw, omega):
dt = t - self.lastt
if np.isnan(self.lastt):
dt = 0.0
self.lastt = t
# self.Qtau = 1.0
goalomega = 0.0
taue = 20.0 * Norm(self.goalyaw - yaw) + (goalomega - omega) * 15.0\
- self.beta[4, 0]
#taue = 0.0
constraint = 0.0
_, _, _, taues, mini, deltas, deltar = control.GlobalMaxForceTorque(
thetaw, vw, thetac, vc, taue, constraint, 20)
if mini >= 0:
self.torques.append(taues[mini])
else:
self.torques.append(float("nan"))
self.yawrefs.append(self.yawref)
if np.isnan(deltas) and constraint == 0.0:
self.betas.append(self.beta)
return 0.0, 0.0
betadot = self.Adapt(
thetaw, vw, thetac, vc, yaw, omega, deltas, deltar,
self.yawref, self.omegaref)
if vc < 0.5:
betadot *= 0
self.beta += betadot * dt
self.beta = np.clip(self.beta, self.betamin, self.betamax)
self.betas.append(self.beta)
#print(self.beta.T)
self.physics.rudder.A = self.beta[2, 0]
self.physics.rs = self.beta[3, 0]
cur_yaw = self.yawref
cur_omega = self.omegaref
if i % 1 == 0:
K = self.Kref
cur_yaw = K * self.yawref + (1 - K) * yaw
cur_omega = K * self.omegaref + (1 - K) * omega
max_acc = self.maxyawrefacc
max_vel = self.maxyawrefvel
exp_vel = np.clip(Norm(self.goalyaw - cur_yaw), -max_vel, max_vel)
exp_acc = np.clip(exp_vel - cur_omega, -max_acc, max_acc)
if self.maxyawrefvel < 0.0:
self.yawref = self.goalyaw
elif self.maxyawrefacc < 0.0:
self.yawref = cur_yaw + exp_vel * dt
else:
self.omegaref = cur_omega + exp_acc * dt
self.yawref = cur_yaw + cur_omega * dt + exp_acc * 0.5 * dt * dt
self.yawref = Norm(self.yawref)
if | np.isnan(deltas) | numpy.isnan |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os, sys
# add python path of PadleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 4)))
if parent_path not in sys.path:
sys.path.append(parent_path)
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import base
import ppdet.modeling.ops as ops
from ppdet.modeling.tests.test_base import LayerTest
def make_rois(h, w, rois_num, output_size):
rois = | np.zeros((0, 4)) | numpy.zeros |
# -*- coding: utf-8 -*-
# ncon.py
import jax.numpy as np
import numpy as onp
def jncon(tensor_list, connect_list_in, cont_order=None, check_network=True):
"""
------------------------
by <NAME> (c) for www.tensors.net, (v1.31) - last modified 30/8/2019
------------------------
Network CONtractor. Input is an array of tensors 'tensor_list' and an array \
of vectors 'connect_list_in', with each vector labelling the indices of the \
corresponding tensor. Labels should be positive integers for contracted \
indices and negative integers for free indices. Optional input 'cont_order' \
can be used to specify order of index contractions (otherwise defaults to \
ascending order of the positive indices). Checking of the consistancy of the \
input network can be disabled for slightly faster operation.
Further information can be found at: https://arxiv.org/abs/1402.0939
"""
# put inputs into a list if necessary
if type(tensor_list) is not list:
tensor_list = [tensor_list]
if type(connect_list_in[0]) is not list:
connect_list_in = [connect_list_in]
connect_list = [0 for x in range(len(connect_list_in))]
for ele in range(len(connect_list_in)):
connect_list[ele] = onp.array(connect_list_in[ele])
# generate contraction order if necessary
flat_connect = onp.array([item for sublist in connect_list for item in sublist])
if cont_order == None:
cont_order = onp.unique(flat_connect[flat_connect > 0])
else:
cont_order = onp.array(cont_order)
# check inputs if enabled
if check_network:
dims_list = [list(tensor.shape) for tensor in tensor_list]
check_inputs(connect_list, flat_connect, dims_list, cont_order)
# do all partial traces
for ele in range(len(tensor_list)):
num_cont = len(connect_list[ele]) - len(onp.unique(connect_list[ele]))
if num_cont > 0:
tensor_list[ele], connect_list[ele], cont_ind = partial_trace(tensor_list[ele], connect_list[ele])
cont_order = onp.delete(cont_order, onp.intersect1d(cont_order,cont_ind,return_indices=True)[1])
# do all binary contractions
while len(cont_order) > 0:
# identify tensors to be contracted
cont_ind = cont_order[0]
locs = [ele for ele in range(len(connect_list)) if sum(connect_list[ele] == cont_ind) > 0]
# do binary contraction
cont_many, A_cont, B_cont = onp.intersect1d(connect_list[locs[0]], connect_list[locs[1]], assume_unique=True, return_indices=True)
tensor_list.append(np.tensordot(tensor_list[locs[0]], tensor_list[locs[1]], axes=( list(A_cont), list(B_cont) ) ) )
connect_list.append(onp.append(onp.delete(connect_list[locs[0]], A_cont), onp.delete(connect_list[locs[1]], B_cont)))
# remove contracted tensors from list and update cont_order
del tensor_list[locs[1]]
del tensor_list[locs[0]]
del connect_list[locs[1]]
del connect_list[locs[0]]
cont_order = onp.delete(cont_order,onp.intersect1d(cont_order,cont_many, assume_unique=True, return_indices=True)[1])
# do all outer products
while len(tensor_list) > 1:
s1 = tensor_list[-2].shape
s2 = tensor_list[-1].shape
tensor_list[-2] = np.outer(tensor_list[-2].reshape(onp.prod(s1)),
tensor_list[-1].reshape(onp.prod(s2))).reshape(onp.append(s1,s2))
connect_list[-2] = onp.append(connect_list[-2],connect_list[-1])
del tensor_list[-1]
del connect_list[-1]
# do final permutation
if len(connect_list[0]) > 0:
return np.transpose(tensor_list[0],onp.argsort(-connect_list[0]))
else:
return tensor_list[0]
#-----------------------------------------------------------------------------
def partial_trace(A, A_label):
""" Partial trace on tensor A over repeated labels in A_label """
num_cont = len(A_label) - len(onp.unique(A_label))
if num_cont > 0:
dup_list = []
for ele in onp.unique(A_label):
if sum(A_label == ele) > 1:
dup_list.append([onp.where(A_label == ele)[0]])
cont_ind = onp.array(dup_list).reshape(2*num_cont,order='F')
free_ind = onp.delete(onp.arange(len(A_label)),cont_ind)
cont_dim = onp.prod(onp.array(A.shape)[cont_ind[:num_cont]])
free_dim = | onp.array(A.shape) | numpy.array |
from time import time
import os
import math
import random
import heapq
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers,models
from tensorflow.keras import initializers,regularizers,optimizers
from Dataset import Dataset
from Client import Client
from train_model import *
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class Single():
def __init__(self, epochs = 100, verbose = 5,topK = 10,data_name = 'ml-1m', model_name = 'gmf'):
self.epochs = epochs
self.verbose = verbose
self.topK = topK
#dataset
t1 = time()
dataset = Dataset("./Data/" + data_name)
self.num_users, self.num_items = dataset.get_train_data_shape()
self.test_datas = dataset.load_test_file()
self.test_negatives = dataset.load_negative_file()
self.train_datas = dataset.load_train_file()
print("Server Load data done [%.1f s]. #user=%d, #item=%d, #test=%d"
% (time()-t1, self.num_users, self.num_items, len(self.test_datas)))
#model
if model_name == "gmf":
self.model = get_compiled_gmf_model(self.num_users,self.num_items)
elif model_name == "mlp":
self.model = get_compiled_mlp_model(self.num_users,self.num_items)
elif model_name == "neumf":
self.model = get_compiled_neumf_model(self.num_users,self.num_items)
def evaluate_model(self):
"""
Evaluate the performance (Hit_Ratio, NDCG) of top-K recommendation
Return: score of each test rating.
"""
hits, ndcgs = [], []
for idx in range(len(self.test_datas)):
rating = self.test_datas[idx]
items = self.test_negatives[idx]
user_id = rating[0]
gtItem = rating[1]
items.append(gtItem)
# Get prediction scores
map_item_score = {}
users = np.full(len(items), user_id, dtype='int32')
predictions = self.model.predict([users, | np.array(items) | numpy.array |
from __future__ import absolute_import
from yt.mods import *
import matplotlib
import pylab
from .output_tests import SingleOutputTest, YTDatasetTest, create_test
from yt.analysis_modules.halo_finding.api import *
import hashlib
import numpy as np
# Tests the number of halos returned by the HOP halo finder on a dataset
class TestHaloCountHOP(YTDatasetTest):
threshold = 80.0
def run(self):
# Find the halos using vanilla HOP.
halos = HaloFinder(self.ds, threshold=self.threshold, dm_only=False)
# We only care about the number of halos.
self.result = len(halos)
def compare(self, old_result):
# The new value should be identical to the old one.
self.compare_value_delta(self.result, old_result, 0)
def plot(self):
return []
# Tests the number of halos returned by the FOF halo finder on a dataset
class TestHaloCountFOF(YTDatasetTest):
link = 0.2
padding = 0.02
def run(self):
# Find the halos using FOF.
halos = FOFHaloFinder(self.ds, link=self.link, dm_only=False,
padding=self.padding)
# We only care about the number of halos.
self.result = len(halos)
def compare(self, old_result):
# The new value should be identical to the old one.
self.compare_value_delta(self.result, old_result, 0)
def plot(self):
return []
# Tests the number of halos returned by the Parallel HOP halo finder on a
# dataset
class TestHaloCountPHOP(YTDatasetTest):
threshold = 80.0
def run(self):
# Find the halos using parallel HOP.
halos = parallelHF(self.ds, threshold=self.threshold, dm_only=False)
# We only care about the number of halos.
self.result = len(halos)
def compare(self, old_result):
# The new value should be identical to the old one.
self.compare_value_delta(self.result, old_result, 0)
def plot(self):
return []
class TestHaloComposition(YTDatasetTest):
threshold=80.0
def run(self):
# Find the halos using vanilla HOP.
halos = HaloFinder(self.ds, threshold=self.threshold, dm_only=False)
# The result is a list of the particle IDs, stored
# as sets for easy comparison.
IDs = []
for halo in halos:
IDs.append(set(halo["particle_index"]))
self.result = IDs
def compare(self, old_result):
# All the sets should be identical.
pairs = zip(self.result, old_result)
for pair in pairs:
if len(pair[0] - pair[1]) != 0:
return False
return True
# Tests the content of the halos returned by the HOP halo finder on a dataset
# by comparing the hash of the arrays of all the particles contained in each
# halo. Evidently breaks on parallel runtime. DO NOT USE.
class TestHaloCompositionHashHOP(YTDatasetTest):
threshold=80.0
def run(self):
# Find the halos using vanilla HOP.
halos = HaloFinder(self.ds, threshold=self.threshold, dm_only=False)
# The result is a flattened array of the arrays of the particle IDs for
# each halo
IDs = []
for halo in halos:
IDs.append(halo["particle_index"])
IDs = | np.concatenate(IDs) | numpy.concatenate |
import random
from collections import defaultdict
import torch
from torch.utils.data import Dataset
from data.base_dataset import BaseDataset
from utils import load_json, compute_rel
import pickle
import networkx
import numpy as np
g_use_heuristic_relation_matrix = False
g_add_in_room_relation = False
g_add_random_parent_link = False
g_prepend_room = False
g_shuffle_subject_object = False
def suncg_collate_fn(batch):
"""
Collate function to be used when wrapping SuncgDataset in a
DataLoader. Returns a tuple of the following:
- objs: LongTensor of shape (O,) giving object categories
- boxes: FloatTensor of shape (O, 4)
- triples: LongTensor of shape (T, 3) giving triples
- obj_to_img: LongTensor of shape (O,) mapping objects to room
- triple_to_img: LongTensor of shape (T,) mapping triples to room
"""
all_ids, all_objs, all_boxes, all_triples, all_angles, all_attributes = [], [], [], [], [], []
all_obj_to_room, all_triple_to_room = [], []
obj_offset = 0
for i, (room_id, objs, boxes, triples, angles, attributes) in enumerate(batch):
if objs.dim() == 0 or triples.dim() == 0:
continue
O, T = objs.size(0), triples.size(0)
all_objs.append(objs)
all_angles.append(angles)
all_attributes.append(attributes)
all_boxes.append(boxes)
all_ids.append(room_id)
triples = triples.clone()
triples[:, 0] += obj_offset
triples[:, 2] += obj_offset
all_triples.append(triples)
all_obj_to_room.append(torch.LongTensor(O).fill_(i))
all_triple_to_room.append(torch.LongTensor(T).fill_(i))
obj_offset += O
all_ids = torch.LongTensor(all_ids)
all_objs = torch.cat(all_objs)
all_boxes = torch.cat(all_boxes)
all_triples = torch.cat(all_triples)
all_angles = torch.cat(all_angles)
all_attributes = torch.cat(all_attributes)
all_obj_to_room = torch.cat(all_obj_to_room)
all_triple_to_room = torch.cat(all_triple_to_room)
out = (all_ids, all_objs, all_boxes, all_triples, all_angles, all_attributes, all_obj_to_room, all_triple_to_room)
return out
class SuncgDataset(BaseDataset):
def __init__(self, data_dir, valid_types_dir = "metadata/valid_types.json", train_3d=True, touching_relations=True, use_attr_30=False):
super(Dataset, self).__init__()
self.train_3d = train_3d
assert self.train_3d
# Do we train using 3D coors? You want True.
self.use_attr_30 = use_attr_30
# Do we want to train on object attributes? Split by 70:30? Tall/Short & Large/Small & None?
print("Starting to read the json file for SUNCG")
self.data = load_json(data_dir)
# Json file for cleaned & normalized data
self.room_ids = [int(i) for i in list(self.data)]
self.touching_relations = touching_relations
# Do objects touch? Works either way
# Construction dict
# obj_name is object type (chair/table/sofa etc. etc.)
# pred_name is relation type (left/right etc.)
# idx_to_name maps respective index back to object type or relation name
valid_types = load_json(valid_types_dir)
self.vocab = {'object_idx_to_name': ['__room__'] + valid_types}
# map obj type to idx
self.vocab['object_name_to_idx'] = {}
for i, name in enumerate(self.vocab['object_idx_to_name']):
self.vocab['object_name_to_idx'][name] = i
# map idx to relation type
self.vocab['pred_idx_to_name'] = [
'__in_room__',
'left of',
'right of',
'behind',
'in front of',
'inside',
'surrounding',
'left touching',
'right touching',
'front touching',
'behind touching',
'front left',
'front right',
'back left',
'back right',
'on',
]
# We don't actually use the front left, front right, back left, back right
# map relation type to idx
self.vocab['pred_name_to_idx'] = {}
for idx, name in enumerate(self.vocab['pred_idx_to_name']):
self.vocab['pred_name_to_idx'][name] = idx
self.vocab['attrib_idx_to_name'] = [
'none',
'tall',
'short',
'large',
'small',
]
self.vocab['attrib_name_to_idx'] = {}
for idx, name in enumerate(self.vocab['attrib_idx_to_name']):
self.vocab['attrib_name_to_idx'][name] = idx
self.image_id_to_objects = defaultdict(list)
self.room_bboxes = {}
for room_id in self.data:
room = self.data[room_id]
room_id = int(room_id)
self.image_id_to_objects[room_id] = room["valid_objects"]
self.room_bboxes[room_id] = room["bbox"]
self.size_data = load_json(
"metadata/size_info_many.json")
self.size_data_30 = load_json(
"metadata/30_size_info_many.json")
if g_use_heuristic_relation_matrix:
self.relation_score_matrix = self.get_relation_score_matrix()
def total_objects(self):
total_objs = 0
for i, room_id in enumerate(self.room_ids):
num_objs = len(self.image_id_to_objects[room_id])
total_objs += num_objs
return total_objs
def __len__(self):
return len(self.room_ids)
def return_room_ids(self):
return self.room_ids
def get_by_room_id(self, room_id):
try:
idx = self.room_ids.index(int(room_id))
except:
print("Get by room id failed! Defaulting to 0.")
idx = 0
return self.__getitem__(idx)
# -------------------new------------------
def get_relation_score_matrix(self, path = "new/relation_graph_v1.p"):
vocab = self.vocab
print("loading relation score matrix from: ", path)
R_G = pickle.load(open(path,"rb"))
relation_score_matrix = np.zeros((len(vocab['object_idx_to_name']), len(vocab['object_idx_to_name']))) + 0.6
for i in range(len(vocab['object_idx_to_name'])):
obj1 = vocab['object_idx_to_name'][i]
if obj1 == "shower_curtain":
continue
if obj1 == "floor_mat":
obj1 = "floor"
if obj1 == "night_stand":
obj1 = "stand"
if obj1 not in R_G.nodes:
continue
max_count_obj = max([R_G.edges[edge]['count'] for edge in R_G.edges(obj1)])
for j in range(len(vocab['object_idx_to_name'])):
obj2 = vocab['object_idx_to_name'][j]
if obj2 == "shower_curtain":
continue
if obj2 == "floor_mat":
obj2 = "floor"
if obj2 == "night_stand":
obj2 = "stand"
if obj2 not in R_G.nodes:
continue
if (obj1, obj2) not in R_G.edges:
continue
relation_score_matrix[i][j] += np.log(R_G.edges[(obj1, obj2)]["count"]) / np.log(max_count_obj)
return relation_score_matrix
def __getitem__(self, index, shuffle_obj = True):
room_id = self.room_ids[index]
objs, boxes, angles = [], [], []
if g_prepend_room:
objs.append(self.vocab['object_name_to_idx']['__room__'])
room_bbox = self.room_bboxes[room_id]
x0 = 0.0
y0 = 0.0
z0 = 0.0
x1 = room_bbox[0]
y1 = room_bbox[1]
z1 = room_bbox[2]
if self.train_3d:
boxes.append(torch.FloatTensor([x0, y0, z0, x1, y1, z1]))
else:
boxes.append(torch.FloatTensor([x0, z0, x1, z1]))
angles.append(0)
obj_data_list = self.image_id_to_objects[room_id]
if shuffle_obj:
random.shuffle(obj_data_list)
for object_data in obj_data_list:
obj_type = object_data["type"]
objs.append(self.vocab['object_name_to_idx'][obj_type])
bbox = object_data['new_bbox']
# Get min/max of the bbox
x0 = bbox[0][0]
y0 = bbox[0][1]
z0 = bbox[0][2]
x1 = bbox[1][0]
y1 = bbox[1][1]
z1 = bbox[1][2]
if self.train_3d:
boxes.append(torch.FloatTensor([x0, y0, z0, x1, y1, z1]))
else:
boxes.append(torch.FloatTensor([x0, z0, x1, z1]))
theta = object_data['rotation']
angles.append(theta)
if not g_prepend_room:
objs.append(self.vocab['object_name_to_idx']['__room__'])
room_bbox = self.room_bboxes[room_id]
x0 = 0.0
y0 = 0.0
z0 = 0.0
x1 = room_bbox[0]
y1 = room_bbox[1]
z1 = room_bbox[2]
if self.train_3d:
boxes.append(torch.FloatTensor([x0, y0, z0, x1, y1, z1]))
else:
boxes.append(torch.FloatTensor([x0, z0, x1, z1]))
angles.append(0)
objs = torch.LongTensor(objs)
boxes = torch.stack(boxes, dim=0)
# Angles are discrete, so make it a long tensor
angles = torch.LongTensor(angles)
# # Compute centers of all objects
# obj_centers = []
# if self.train_3d:
# for i, obj_idx in enumerate(objs):
# x0, y0, z0, x1, y1, z1 = boxes[i]
# mean_x = 0.5 * (x0 + x1)
# mean_y = 0.5 * (y0 + y1)
# mean_z = 0.5 * (z0 + z1)
# obj_centers.append([mean_x, mean_y, mean_z])
# else:
# for i, obj_idx in enumerate(objs):
# x0, z0, x1, z1 = boxes[i]
# mean_x = 0.5 * (x0 + x1)
# mean_z = 0.5 * (z0 + z1)
# obj_centers.append([mean_x, mean_z])
# obj_centers = torch.FloatTensor(obj_centers)
# Compute scene graphs
triples = []
num_objs = objs.size(0)
__room__ = self.vocab['object_name_to_idx']['__room__']
real_objs = []
if num_objs > 1:
# get non-room object indices
real_objs = (objs != __room__).nonzero().squeeze(1)
if self.train_3d:
# special: "on" relationships
on_rels = defaultdict(list)
for cur in real_objs:
choices = [obj for obj in real_objs if obj != cur]
for other in choices:
cur_box = boxes[cur]
other_box = boxes[other]
p = compute_rel(cur_box, other_box, None, None)
if p == "on":
p = self.vocab['pred_name_to_idx']['on']
triples.append([cur, p, other])
on_rels[cur].append(other)
# new: add random parent link
if g_add_random_parent_link:
for cur in real_objs:
if cur in on_rels.keys():
# "on" relation is an absolute parent link
choices = on_rels[cur]
other = random.choice(choices)
p = len(self.vocab['pred_name_to_idx']) # 16: parent link
triples.append([cur, p, other])
else:
# random choose a parent
choices = [obj for obj in real_objs if obj != cur]
if g_prepend_room:
choices.append(0)
else:
choices.append(objs.size(0)- 1)
other = random.choice(choices)
if (g_prepend_room and other == 0) or (not g_prepend_room and other == objs.size(0)- 1):
p = self.vocab['pred_name_to_idx']["__in_room__"]
triples.append([cur, p, other])
else:
# real relation
p = compute_rel(boxes[cur], boxes[other], None, None)
p = self.vocab['pred_name_to_idx'][p]
triples.append([cur, p, other])
# add parent link
triples.append([cur, len(self.vocab['pred_name_to_idx']), other])
else:
# add random relationships
for cur in real_objs:
choices = [obj for obj in real_objs if obj != cur]
# ---------- new ---------------
if g_use_heuristic_relation_matrix:
prob = [self.relation_score_matrix[objs[cur], objs[otr]] for otr in real_objs if otr != cur]
prob = | np.asarray(prob) | numpy.asarray |
"""
AiiDA-abinit output parser.
"""
import abipy.abilab as abilab
import netCDF4 as nc
import numpy as np
from abipy.dynamics.hist import HistFile
from abipy.flowtk import events
from aiida.common import exceptions
from aiida.engine import ExitCode
from aiida.orm import Dict, TrajectoryData, StructureData
from aiida.parsers.parser import Parser
from aiida.plugins import DataFactory
from pymatgen import Element
from pymatgen.core import units
units_suffix = '_units'
default_charge_units = 'e'
default_dipole_units = 'Debye'
default_energy_units = 'eV'
default_force_units = 'ev / angstrom'
default_k_points_units = '1 / angstrom'
default_length_units = 'Angstrom'
default_magnetization_units = 'Bohrmag / cell'
default_polarization_units = 'C / m^2'
default_stress_units = 'GPascal'
class AbinitParser(Parser):
"""
Basic AiiDA parser for the ouptut of an Abinit calculation.
"""
def parse(self, **kwargs):
"""
Parse outputs, store results in database.
Receives in input a dictionary of retrieved nodes.
"""
ionmov = self.node.inputs['parameters'].get_dict().get('ionmov', 0)
optcell = self.node.inputs['parameters'].get_dict().get('optcell', 0)
if ionmov == 0 and optcell == 0:
is_relaxation = False
else:
is_relaxation = True
try:
_ = self.retrieved
except exceptions.NotExistent:
return self.exit_codes.ERROR_NO_RETRIEVED_TEMPORARY_FOLDER
exit_code = self._parse_GSR()
if exit_code is not None:
return exit_code
if is_relaxation:
exit_code = self._parse_trajectory() # pylint: disable=assignment-from-none
if exit_code is not None:
return exit_code
return ExitCode(0)
def _parse_GSR(self):
"""Parser for the Abnit GSR file that contains most information"""
## STDOUT ##
# Output file - aiida.out
fname = self.node.get_attribute('output_filename')
# Absolute path of the folder in which files are stored
path = self.node.get_remote_workdir()
if fname not in self.retrieved.list_object_names():
return self.exit_codes.ERROR_MISSING_OUTPUT_FILES
# Read the output log file for potential errors.
parser = events.EventsParser()
report = parser.parse(path + '/' + fname)
# Did the run have ERRORS:
if len(report.errors) > 0:
for e in report.errors:
self.logger.error(e.message)
return self.exit_codes.ERROR_OUTPUT_CONTAINS_ABORT
# Did the run contain WARNINGS:
if len(report.warnings) > 0:
for w in report.warnings:
self.logger.warning(w.message)
# Did the run complete
if not report.run_completed:
return self.exit_codes.ERROR_OUTPUT_CONTAINS_ABORT
## GSR ##
# Output GSR Abinit NetCDF file - Default name is aiidao_GSR.nc
fname = self.node.get_attribute('output_gsr')
# Absolute path of the folder in which aiidao_GSR.nc is stored
path = self.node.get_remote_workdir()
if fname not in self.retrieved.list_object_names():
return self.exit_codes.ERROR_MISSING_OUTPUT_FILES
with abilab.abiopen(path + '/' + fname) as gsr:
gsr_data = {
'abinit_version':
gsr.abinit_version,
'cart_stress_tensor':
gsr.cart_stress_tensor.tolist(),
'cart_stress_tensor' + units_suffix:
default_stress_units,
'is_scf_run':
bool(gsr.is_scf_run),
# 'cart_forces': gsr.cart_forces.tolist(),
# 'cart_forces' + units_suffix: default_force_units,
'forces':
gsr.cart_forces.tolist(), # backwards compatibility
'forces' + units_suffix:
default_force_units,
'energy':
float(gsr.energy),
'energy' + units_suffix:
default_energy_units,
'e_localpsp':
float(gsr.energy_terms.e_localpsp),
'e_localpsp' + units_suffix:
default_energy_units,
'e_eigenvalues':
float(gsr.energy_terms.e_eigenvalues),
'e_eigenvalues' + units_suffix:
default_energy_units,
'e_ewald':
float(gsr.energy_terms.e_ewald),
'e_ewald' + units_suffix:
default_energy_units,
'e_hartree':
float(gsr.energy_terms.e_hartree),
'e_hartree' + units_suffix:
default_energy_units,
'e_corepsp':
float(gsr.energy_terms.e_corepsp),
'e_corepsp' + units_suffix:
default_energy_units,
'e_corepspdc':
float(gsr.energy_terms.e_corepspdc),
'e_corepspdc' + units_suffix:
default_energy_units,
'e_kinetic':
float(gsr.energy_terms.e_kinetic),
'e_kinetic' + units_suffix:
default_energy_units,
'e_nonlocalpsp':
float(gsr.energy_terms.e_nonlocalpsp),
'e_nonlocalpsp' + units_suffix:
default_energy_units,
'e_entropy':
float(gsr.energy_terms.e_entropy),
'e_entropy' + units_suffix:
default_energy_units,
'entropy':
float(gsr.energy_terms.entropy),
'entropy' + units_suffix:
default_energy_units,
'e_xc':
float(gsr.energy_terms.e_xc),
'e_xc' + units_suffix:
default_energy_units,
'e_xcdc':
float(gsr.energy_terms.e_xcdc),
'e_xcdc' + units_suffix:
default_energy_units,
'e_paw':
float(gsr.energy_terms.e_paw),
'e_paw' + units_suffix:
default_energy_units,
'e_pawdc':
float(gsr.energy_terms.e_pawdc),
'e_pawdc' + units_suffix:
default_energy_units,
'e_elecfield':
float(gsr.energy_terms.e_elecfield),
'e_elecfield' + units_suffix:
default_energy_units,
'e_magfield':
float(gsr.energy_terms.e_magfield),
'e_magfield' + units_suffix:
default_energy_units,
'e_fermie':
float(gsr.energy_terms.e_fermie),
'e_fermie' + units_suffix:
default_energy_units,
'e_sicdc':
float(gsr.energy_terms.e_sicdc),
'e_sicdc' + units_suffix:
default_energy_units,
'e_exactX':
float(gsr.energy_terms.e_exactX),
'e_exactX' + units_suffix:
default_energy_units,
'h0':
float(gsr.energy_terms.h0),
'h0' + units_suffix:
default_energy_units,
'e_electronpositron':
float(gsr.energy_terms.e_electronpositron),
'e_electronpositron' + units_suffix:
default_energy_units,
'edc_electronpositron':
float(gsr.energy_terms.edc_electronpositron),
'edc_electronpositron' + units_suffix:
default_energy_units,
'e0_electronpositron':
float(gsr.energy_terms.e0_electronpositron),
'e0_electronpositron' + units_suffix:
default_energy_units,
'e_monopole':
float(gsr.energy_terms.e_monopole),
'e_monopole' + units_suffix:
default_energy_units,
'pressure':
float(gsr.pressure),
'pressure' + units_suffix:
default_stress_units
}
try:
# will return an integer 0 if non-magnetic calculation is run; convert it to a float
total_magnetization = float(gsr.ebands.get_collinear_mag())
gsr_data['total_magnetization'] = total_magnetization
gsr_data['total_magnetization' + units_suffix] = default_magnetization_units
except ValueError as valerr:
# get_collinear_mag will raise ValueError if it doesn't know what to do
if 'Cannot calculate collinear magnetization' in valerr.args[0]:
pass
else:
raise valerr
self.out("output_parameters", Dict(dict=gsr_data))
def _parse_trajectory(self):
"""Abinit trajectory parser."""
def _voigt_to_tensor(voigt):
tensor = np.zeros((3, 3))
tensor[0, 0] = voigt[0]
tensor[1, 1] = voigt[1]
tensor[2, 2] = voigt[2]
tensor[1, 2] = voigt[3]
tensor[0, 2] = voigt[4]
tensor[0, 1] = voigt[5]
tensor[2, 1] = tensor[1, 2]
tensor[2, 0] = tensor[0, 2]
tensor[1, 0] = tensor[0, 1]
return tensor
# Absolute path of the folder in which aiidao_GSR.nc is stored
path = self.node.get_remote_workdir()
# HIST Abinit NetCDF file - Default name is aiidao_HIST.nc
fname = self.node.get_attribute('output_hist')
if fname not in self.retrieved.list_object_names():
return self.exit_codes.ERROR_MISSING_OUTPUT_FILES
with HistFile(path + '/' + fname) as hf:
structures = hf.structures
output_structure = StructureData(pymatgen=structures[-1])
with nc.Dataset(path + '/' + fname, 'r') as ds: # pylint: disable=no-member
n_steps = ds.dimensions['time'].size
energy_ha = ds.variables['etotal'][:].data # Ha
energy_kin_ha = ds.variables['ekin'][:].data # Ha
forces_cart_ha_bohr = ds.variables['fcart'][:, :, :].data # Ha/bohr
positions_cart_bohr = ds.variables['xcart'][:, :, :].data # bohr
stress_voigt = ds.variables['strten'][:, :].data # Ha/bohr^3
stepids = np.arange(n_steps)
symbols = np.array([specie.symbol for specie in structures[0].species],
dtype='<U2')
cells = np.array(
[structure.lattice.matrix for structure in structures]).reshape(
(n_steps, 3, 3))
energy = energy_ha * units.Ha_to_eV
energy_kin = energy_kin_ha * units.Ha_to_eV
forces = forces_cart_ha_bohr * units.Ha_to_eV / units.bohr_to_ang
positions = positions_cart_bohr * units.bohr_to_ang
stress = np.array([_voigt_to_tensor(sv) for sv in stress_voigt
]) * units.Ha_to_eV / units.bohr_to_ang**3
total_force = np.array([ | np.sum(f) | numpy.sum |
#!/usr/bin/python
import itertools
import numpy as np
import pytest
from scipy import stats
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.svm import SVC
GAMMA = 1.
COEF0 = 1.
def main():
# polynomial()
cross_validation()
# rbf()
def read_dataset(dataset_type):
dataset = np.loadtxt('features.' + dataset_type)
return dataset[:, 1:], dataset[:, 0] # X, y
def polynomial():
clf = SVC(C=.01, kernel='poly', degree=2, gamma=GAMMA, coef0=COEF0)
for offset in [0, 1]:
num_supports, E_ins = [], []
digits = np.array([0, 2, 4, 6, 8], dtype=float) + offset
for digit in digits:
X_training, y_training = read_dataset('train')
y_training[~np.isclose(y_training, digit)] = -1.
clf.fit(X_training, y_training)
E_ins.append(1 - clf.score(X_training, y_training))
num_supports.append(clf.n_support_.sum())
chosen_idx = np.argmax(E_ins) if offset == 0 else np.argmin(E_ins)
print('digit={}: E_in={}, num_supports={}'.format(digits[chosen_idx],
E_ins[chosen_idx], num_supports[chosen_idx]))
print('\n--------------------\n')
X_training, y_training = read_dataset('train')
one_or_five = np.isclose(y_training, 1.) | np.isclose(y_training, 5.)
X_training, y_training = X_training[one_or_five], y_training[one_or_five]
X_test, y_test = read_dataset('test')
one_or_five = np.isclose(y_test, 1.) | np.isclose(y_test, 5.)
X_test, y_test = X_test[one_or_five], y_test[one_or_five]
Cs = [.001, .01, .1, 1.]
clfs = [SVC(C=C, kernel='poly', degree=2, gamma=GAMMA, coef0=COEF0)
for C in Cs]
[clf.fit(X_training, y_training) for clf in clfs]
num_supports = [clf.n_support_.sum() for clf in clfs]
E_ins = [1 - clf.score(X_training, y_training) for clf in clfs]
E_outs = [1 - clf.score(X_test, y_test) for clf in clfs]
print('num_supports={}'.format(num_supports))
print('E_ins={}'.format(E_ins))
print('diff E_ins={}'.format(np.diff(E_ins, 1)))
print('E_outs={}'.format(E_outs))
print('diff E_outs={}'.format(np.diff(E_outs, 1)))
print('\n--------------------\n')
Cs = [.0001, .001, .01, 1]
degrees = [2, 5]
clfs = {C: {degree: SVC(C=C, kernel='poly', degree=degree, gamma=GAMMA,
coef0=COEF0).fit(X_training, y_training)
for degree in degrees}
for C in Cs}
E_ins = [1 - clf.score(X_training, y_training)
for clf in clfs[.0001].values()]
print('C=0.0001: E_ins={}'.format(E_ins))
num_supports = [clf.n_support_.sum() for clf in clfs[.001].values()]
print('C=0.001: num_supports={}'.format(num_supports))
E_ins = [1 - clf.score(X_training, y_training)
for clf in clfs[.01].values()]
print('C=0.01: E_ins={}'.format(E_ins))
E_outs = [1 - clf.score(X_test, y_test)
for clf in clfs[1].values()]
print('C=1: E_outs={}'.format(E_outs))
def cross_validation():
X_training, y_training = read_dataset('train')
one_or_five = np.isclose(y_training, 1.) | np.isclose(y_training, 5.)
X_training, y_training = X_training[one_or_five], y_training[one_or_five]
Cs = [.0001, .001, .01, .1, 1.]
clfs = [GridSearchCV(SVC(kernel='poly', degree=2, gamma=GAMMA, coef0=COEF0),
param_grid=dict(C=Cs),
cv=KFold(n_splits=10, shuffle=True),
n_jobs=8).fit(X_training, y_training)
for _ in range(100)]
chosen_Cs = [clf.best_params_['C'] for clf in clfs]
E_cvs = [1 - clf.best_score_ for clf in clfs]
print(stats.mode(chosen_Cs))
print(np.mean(E_cvs))
def rbf():
X_training, y_training = read_dataset('train')
one_or_five = np.isclose(y_training, 1.) | np.isclose(y_training, 5.)
X_training, y_training = X_training[one_or_five], y_training[one_or_five]
X_test, y_test = read_dataset('test')
one_or_five = np.isclose(y_test, 1.) | np.isclose(y_test, 5.)
X_test, y_test = X_test[one_or_five], y_test[one_or_five]
Cs = [.01, 1, 100, 1e4, 1e6]
clfs = [SVC(C=C, kernel='rbf', gamma=GAMMA).fit(X_training, y_training)
for C in Cs]
E_ins = [1 - clf.score(X_training, y_training) for clf in clfs]
print('E_ins={}'.format(E_ins))
print('argmin E_ins={}'.format(np.argmin(E_ins)))
E_outs = [1 - clf.score(X_test, y_test) for clf in clfs]
print('E_outs={}'.format(E_outs))
print('argmin E_outs={}'.format( | np.argmin(E_outs) | numpy.argmin |
"""
Script from <NAME>, used for the SHREC17 competion
"""
import os
import subprocess
from joblib import Parallel, delayed
from pathlib import Path
import numpy as np
from scipy.spatial.distance import pdist, squareform
from sklearn.metrics import precision_recall_curve, precision_score
from spherical_cnn import models, util
def make_shrec17_output_thresh(descriptors, scores, fnames, outdir,
distance='cosine', dists=None, thresh=None):
if dists is None:
dists = squareform(pdist(descriptors, distance))
fnames = [os.path.splitext(f)[0] for f in fnames]
os.makedirs(outdir, exist_ok=True)
if not isinstance(thresh, dict):
thresh = {i: thresh for i in range(scores.shape[1])}
predclass = scores.argmax(axis=1)
lens = Parallel(n_jobs=-1)(delayed(make_shrec17_output_thresh_loop)
(d, f, s, c, thresh, fnames, predclass, outdir)
for d, f, s, c in zip(dists, fnames, scores, predclass))
print('avg # of elements returned {:2f} {:2f}'.format(np.mean(lens), np.std(lens)))
def make_shrec17_output_thresh_loop(d, f, s, c, thresh, fnames, predclass, outdir, max_retrieved=1000):
t = thresh[c]
fd = [(ff, dd)
for dd, ff, cc in zip(d, fnames, predclass)
# chose whether to include same class or not
if (dd < t) or (cc == c)]
# if (dd < t)]
fi = [ff[0] for ff in fd]
di = [ff[1] for ff in fd]
ranking = []
for i in np.argsort(di):
if fi[i] not in ranking:
ranking.append(fi[i])
ranking = ranking[:max_retrieved]
with open(os.path.join(outdir, f), 'w') as fout:
[print(r, file=fout) for r in ranking]
return len(ranking)
def make_shrec17_output(descriptors, scores, fnames, outdir,
distance='cosine', dists=None,
max_retrieved=1000):
if dists is None:
dists = squareform(pdist(descriptors, distance))
fnames = [os.path.splitext(f)[0] for f in fnames]
os.makedirs(outdir, exist_ok=True)
predclass = scores.argmax(axis=1)
for d, f, s in zip(dists, fnames, scores):
# return elements from top nc classes
nc = 1
cs = np.argsort(s)[::-1][:nc]
# list elements of the selected classes and its distances
fi, di = [], []
for c in cs:
fi += [ff for ff, cc in zip(fnames, predclass) if cc == c]
di += [dd for dd, cc in zip(d, predclass) if cc == c]
# also include elements with distance less than the median
median = | np.median(di) | numpy.median |
'''
Contains kinematic model to generate joint angles from orientation, position, feet location.
'''
import numpy as np
from .matrix_transforms import RpToTrans, TransToRp, TransInv, RPY, TransformVector
from collections import OrderedDict
class Kinematics:
def __init__(self, frame_parameters, linked_leg_parameters):
self.linked_leg_parameters = linked_leg_parameters
self.com_offset = frame_parameters['com_offset']
# Leg Parameters
self.shoulder_length = frame_parameters['shoulder_length']
self.upper_leg_length = frame_parameters['upper_leg_length']
self.lower_leg_length = frame_parameters['lower_leg_length']
# Leg Vector desired_positions
# Distance Between Hips
# Length
self.hip_x = frame_parameters['hip_x']
# Width
self.hip_y = frame_parameters['hip_y']
# Distance Between Feet
# Length
self.foot_x = frame_parameters['foot_x']
# Width
self.foot_y = frame_parameters['foot_y']
# Body Height
self.height = frame_parameters['height']
# Dictionary to store Hip and Foot Transforms
# Transform of Hip relative to world frame
# With Body Centroid also in world frame
Rwb = np.eye(3)
self.WorldToHip = OrderedDict()
self.ph_FL = np.array([self.hip_x / 2.0, self.hip_y / 2.0, 0])
self.WorldToHip["FL"] = RpToTrans(Rwb, self.ph_FL)
self.ph_FR = np.array([self.hip_x / 2.0, -self.hip_y / 2.0, 0])
self.WorldToHip["FR"] = RpToTrans(Rwb, self.ph_FR)
self.ph_BL = np.array([-self.hip_x / 2.0, self.hip_y / 2.0, 0])
self.WorldToHip["BL"] = RpToTrans(Rwb, self.ph_BL)
self.ph_BR = np.array([-self.hip_x / 2.0, -self.hip_y / 2.0, 0])
self.WorldToHip["BR"] = RpToTrans(Rwb, self.ph_BR)
# Transform of Foot relative to world frame
# With Body Centroid also in world frame
self.WorldToFoot = OrderedDict()
self.pf_FL = np.array(
[self.foot_x / 2.0, self.foot_y / 2.0, -self.height])
self.WorldToFoot["FL"] = RpToTrans(Rwb, self.pf_FL)
self.pf_FR = np.array(
[self.foot_x / 2.0, -self.foot_y / 2.0, -self.height])
self.WorldToFoot["FR"] = RpToTrans(Rwb, self.pf_FR)
self.pf_BL = np.array(
[-self.foot_x / 2.0, self.foot_y / 2.0, -self.height])
self.WorldToFoot["BL"] = RpToTrans(Rwb, self.pf_BL)
self.pf_BR = np.array(
[-self.foot_x / 2.0, -self.foot_y / 2.0, -self.height])
self.WorldToFoot["BR"] = RpToTrans(Rwb, self.pf_BR)
def _hip_to_foot(self, orn, pos, T_bf):
"""
Converts a desired position and orientation from
home position, with a desired body-to-foot Transform
into a body-to-hip Transform, which is used to extract
and return the Hip To Foot Vector.
:param orn: A 3x1 np.array([]) of Roll, Pitch, Yaw angles
:param pos: A 3x1 np.array([]) of X, Y, Z coordinates
:param T_bf: Dictionary of desired body-to-foot Transforms.
:return: Hip To Foot Vector for each of Spot's Legs.
"""
# only get rotation component
rotation_matrix, _ = TransToRp(RPY(orn[0], orn[1], orn[2]))
position_vector = pos
T_wb = RpToTrans(rotation_matrix, position_vector)
# Dictionary to store vectors
HipToFoot_List = OrderedDict()
for i, (key, T_wh) in enumerate(self.WorldToHip.items()):
# ORDER: FL, FR, BL, BR
# Extract vector component
_, p_bf = TransToRp(T_bf[key])
# Step 1, get T_bh for each leg
T_bh = np.dot(TransInv(T_wb), T_wh)
# Step 2, get T_hf for each leg
# VECTOR ADDITION METHOD
_, p_bh = TransToRp(T_bh)
p_hf0 = p_bf - p_bh
# TRANSFORM METHOD
T_hf = np.dot(TransInv(T_bh), T_bf[key])
_, p_hf1 = TransToRp(T_hf)
# They should yield the same result
if p_hf1.all() != p_hf0.all():
print("NOT EQUAL")
p_hf = p_hf1
HipToFoot_List[key] = p_hf
return HipToFoot_List
def _get_domain(self, x, y, z):
"""
Calculates the leg's Domain and caps it in case of a breach
:param x,y,z: hip-to-foot distances in each dimension
:return: Leg Domain D
"""
D = (y**2 + (-z)**2 - self.shoulder_length**2 +
(-x)**2 - self.upper_leg_length**2 - self.lower_leg_length**2) / (
2 * self.lower_leg_length * self.upper_leg_length)
#if D > 1 or D < -1:
# print("---------DOMAIN BREACH---------")
return np.clip(D, -1.0, 1.0)
def _solve_joint_angles(self, xyz_coord, legType):
"""
Leg Inverse Kinematics Solver
:param xyz_coord: hip-to-foot distances in each dimension
:param legType: leg type to determine orientation
:return: Joint Angles required for desired position
"""
x = xyz_coord[0]
y = xyz_coord[1]
z = xyz_coord[2]
D = self._get_domain(x, y, z)
if legType == "FR" or legType == "BR":
shoulder_direction_offset = -1
elif legType == "FL" or legType == "BL":
shoulder_direction_offset = 1
lower_leg_angle = np.arctan2(-np.sqrt(1 - D**2), D)
sqrt_component = y**2 + (-z)**2 - self.shoulder_length**2
if sqrt_component < 0.0:
sqrt_component = 0.0
shoulder_angle = -np.arctan2(z, y) - np.arctan2(
np.sqrt(sqrt_component), shoulder_direction_offset * self.shoulder_length)
upper_leg_angle = np.arctan2(-x, np.sqrt(sqrt_component)) - np.arctan2(
self.lower_leg_length * np.sin(lower_leg_angle),
self.upper_leg_length + self.lower_leg_length * np.cos(lower_leg_angle))
joint_angles = np.array(
[-shoulder_angle, upper_leg_angle, lower_leg_angle])
return joint_angles
def inverse_kinematics(self, orn, pos, T_bf):
"""
Uses HipToFoot() to convert a desired position
and orientation wrt Spot's home position into a
Hip To Foot Vector, which is fed into the LegIK solver.
Finally, the resultant joint angles are returned
from the LegIK solver for each leg.
:param orn: A 3x1 np.array([]) with Spot's Roll, Pitch, Yaw angles
:param pos: A 3x1 np.array([]) with Spot's X, Y, Z coordinates
:param T_bf: Dictionary of desired body-to-foot Transforms.
:return: Joint angles for each joint.
"""
# Modify x by com offset
pos[0] += self.com_offset
# 4 legs, 3 joints per leg
joint_angles = np.zeros((4, 3))
# Steps 1 and 2 of pipeline here110
HipToFoot = self._hip_to_foot(orn, pos, T_bf)
for i, (key, p_hf) in enumerate(HipToFoot.items()):
# Step 3, compute joint angles from T_hf for each leg
joint_angles[i, :] = self._solve_joint_angles(p_hf, key)
return joint_angles.flatten()
def get_joint_angles_linked_legs(self, joint_angles):
joint_angles_linked_leg = np.empty(12)
# Convert joint angles into joint angles for linked legs
for i in range(12):
if i % 3 == 0: # Hip
joint_angles_linked_leg[i] = joint_angles[i]
if i % 3 == 1: # Upper leg
joint_angles_linked_leg[i] = joint_angles[i]
if i % 3 == 2: # Lower leg
# convert joint angles into linked leg kinematics orientation
upper_leg_angle = joint_angles[i - 1] - np.pi/2
lower_leg_angle = np.pi - joint_angles[i]
"""
# Lower leg servo origin.
Ax = -21
Ay = -20
# Upper leg servo crank arm origin.
Dx = 0
Dy = 0
# Link lengths
L2 = 23
L3 = 31
L4 = 24
L5 = 28
L6 = 29
L7 = 105
L8 = 100
L9 = 23
"""
Ax = self.linked_leg_parameters['A']['x']
Ay = self.linked_leg_parameters['A']['y']
Dx = self.linked_leg_parameters['D']['x']
Dy = self.linked_leg_parameters['D']['y']
L2 = self.linked_leg_parameters['L2']
L3 = self.linked_leg_parameters['L3']
L4 = self.linked_leg_parameters['L4']
L5 = self.linked_leg_parameters['L5']
L6 = self.linked_leg_parameters['L6']
L7 = self.linked_leg_parameters['L7']
L8 = self.linked_leg_parameters['L8']
L9 = self.linked_leg_parameters['L9']
L1 = np.sqrt(np.square(Dx - Ax) + np.square(Dy - Ay))
theta1 = np.arcsin((Dy - Ay) / L1)
beta2 = lower_leg_angle
theta4 = upper_leg_angle
beta3 = np.pi - beta2
DF = np.sqrt(np.square(L8) + np.square(L9) -
2 * L8 * L9 * np.cos(beta3))
beta5 = np.arccos(
(np.square(DF) + np.square(L8) - np.square(L9)) / (2 * DF * L8))
# TODO: add this check in the main kinematics calculaions
# to set angle limits that reflect in the simulation.
beta6_vars = (np.square(L6) + np.square(DF) -
np.square(L7)) / (2 * L6 * DF)
beta6 = np.arccos(np.clip(beta6_vars, -1.0, 1.0))
theta5 = beta6 + beta5 + theta4
beta4 = np.arccos(
(np.square(L4) + np.square(L6) - np.square(L5)) / (2 * L4 * L6))
theta3 = beta4 + theta5
beta9 = np.pi - theta3 + theta1
AC = np.sqrt(np.square(L1) + np.square(L4) -
2 * L1 * L4 * | np.cos(beta9) | numpy.cos |
import os
import time
import warnings
import multiprocessing as mp
from typing import List
import pandas as pd
import numpy as np
import scipy
import scipy.stats as stats
import matplotlib.pyplot as plt
from dateutil.relativedelta import relativedelta
from datetime import datetime
from tqdm import tqdm
from pvrpm.core.enums import ConfigKeys as ck
from pvrpm.core.case import SamCase
from pvrpm.core.components import Components
from pvrpm.core.utils import summarize_dc_energy, component_degradation
from pvrpm.core.logger import logger
def cf_interval(alpha: float, std: float, num_samples: int) -> float:
"""
Calculates the two tails margin of error given the desired input. The margin of error is the value added and subtracted by the sample mean to obtain the confidence interval
Sample sizes less then equal to 30 use t score, greater then 30 use z score
Args:
alpha (float): The significance level for the interval
std (float): The standard deviation of the data
num_samples (int): The number of samples in the data
Returns:
float: The margin of error
"""
# two tails
alpha = alpha / 2
if num_samples > 30:
score = stats.norm.ppf(alpha)
else:
score = stats.t.ppf(1 - alpha, num_samples - 1)
return score * std / np.sqrt(num_samples)
def simulate_day(case: SamCase, comp: Components, day: int):
"""
Updates and increments the simulation by a day, performing all neccesary component updates.
Args:
case (:obj:`SamCase`): The current Sam Case of the simulation
comp (:obj:`Components`): The components class containing all the outputs for this simulation
day (int): Current day in the simulation
"""
# static monitoring starts the day, if available. This is updated independently of component levels
comp.update_indep_monitor(day)
for c in ck.component_keys:
if not case.config.get(c, None):
continue
df = comp.comps[c]
# if component can't fail, just continue
if case.config[c][ck.CAN_FAIL]:
# decrement time to failures for operational modules
# fail components when their time has come
comp.update_fails(c, day)
# update monitoring
comp.update_monitor(c, day)
if case.config[c][ck.CAN_REPAIR]:
# repair components when they are done and can be repaired
comp.update_repairs(c, day)
if case.config[c].get(ck.WARRANTY, None):
df["time_left_on_warranty"] -= 1
# availability
if c == ck.GRID:
# for the grid only, the availability is based on the full 24-hour day.
df.loc[df["state"] == 0, "avail_downtime"] += 24
else:
# else, use the sun hours for this day
df.loc[df["state"] == 0, "avail_downtime"] += case.daylight_hours[day % 365]
# module can still degrade even if it cant fail
if case.config[c].get(ck.DEGRADE, None):
df["days_of_degradation"] += 1
df["degradation_factor"] = [
component_degradation(case.config[c][ck.DEGRADE] / 365, d) for d in df["days_of_degradation"]
]
def run_system_realization(
case: SamCase, seed: bool = False, realization_num: int = 0, progress_bar: bool = False, debug: int = 0,
) -> Components:
"""
Run a full realization for calculating costs
Args:
case (:obj:`SamCase`): The loaded and verified case to use with the simulation
seed (bool, Optional): Whether to seed the random number generator, for multiprocessing
realization_num (int, Optional): Current realization number, used for multiprocessing
progress_bar (bool, Optional): Whether to display progress bar during the realization
debug (int, Optional): Whether to save simulation state every `debug` days (0 to turn off)
Returns:
:obj:`Components`: The components object which contains all the data for this realization
"""
if seed:
np.random.seed()
# data storage
comp = Components(case)
lifetime = case.config[ck.LIFETIME_YRS]
if case.config[ck.TRACKING]:
comp.tracker_power_loss_factor[0] = 1
comp.tracker_availability[0] = 1
# initial timestep
comp.module_degradation_factor[0] = comp.current_degradation()
comp.dc_power_availability[0] = comp.dc_availability()
comp.ac_power_availability[0] = comp.ac_availability()
if progress_bar:
iterator = tqdm(
range(1, lifetime * 365),
ascii=True,
desc=f"Running realization {realization_num}",
unit="day",
position=mp.current_process()._identity[0],
leave=False,
)
else:
logger.info(f"Running realization {realization_num}...")
iterator = range(1, lifetime * 365)
for i in iterator:
# calculate new labor rate each year
if i == 1 or i % 365 == 0:
year = np.floor(i / 365)
inflation = np.power(1 + case.config[ck.INFLATION] / 100, year)
comp.update_labor_rates(case.config[ck.LABOR_RATE] * inflation)
# Decided to remove since it doesnt make sense for only trackers to rise with inflation and not
# all other failures. Plus, this was broken.
# need to store original cost of tracker failures for each failure and increase based on that cost
# also need to take in concurrent failures
# if case.config[ck.TRACKING]:
# for fail in case.config[ck.TRACKER][ck.FAILURE].keys():
# case.config[ck.TRACKER][ck.FAILURE][fail][ck.COST] *= inflation
# save state if debugging
if debug > 0 and i % debug == 0:
state_dict = comp.snapshot()
folder = f"debug_day_{i}"
save_path = os.path.join(case.config[ck.RESULTS_FOLDER], folder)
os.makedirs(save_path, exist_ok=True)
for key, val in state_dict.items():
val.to_csv(os.path.join(save_path, f"{key}_state.csv"), index=True)
# timestep is applied each day
simulate_day(case, comp, i)
if case.config[ck.TRACKING]:
comp.tracker_availability[i], comp.tracker_power_loss_factor[i] = comp.tracker_power_loss(i)
comp.module_degradation_factor[i] = comp.current_degradation()
comp.dc_power_availability[i] = comp.dc_availability()
comp.ac_power_availability[i] = comp.ac_availability()
# create same performance adjustment tables for avail, degradation, tracker losses
if case.config[ck.TRACKING]:
daily_dc_loss = 100 * (
1 - (comp.dc_power_availability * comp.module_degradation_factor * comp.tracker_power_loss_factor)
)
else:
daily_dc_loss = 100 * (1 - (comp.dc_power_availability * comp.module_degradation_factor))
daily_ac_loss = 100 * (1 - comp.ac_power_availability)
case.value("en_dc_lifetime_losses", 1)
case.value("dc_lifetime_losses", list(daily_dc_loss))
case.value("en_ac_lifetime_losses", 1)
case.value("ac_lifetime_losses", list(daily_ac_loss))
o_m_yearly_costs = np.zeros(lifetime)
for c in ck.component_keys:
if not case.config.get(c, None):
continue
comp_yearly_cost = np.sum(np.reshape(comp.costs[c], (lifetime, 365)), axis=1)
o_m_yearly_costs += comp_yearly_cost
case.value("om_fixed", list(o_m_yearly_costs))
case.simulate()
# add the results of the simulation to the components class and return
comp.timeseries_dc_power = case.output("dc_net")
comp.timeseries_ac_power = case.value("gen")
comp.lcoe = case.output("lcoe_real")
comp.npv = case.get_npv()
# remove the first element from cf_energy_net because it is always 0, representing year 0
comp.annual_energy = np.array(case.output("cf_energy_net")[1:])
# more results, for graphing and what not
try:
comp.tax_cash_flow = case.output("cf_after_tax_cash_flow")
except AttributeError:
comp.tax_cash_flow = case.output("cf_pretax_cashflow")
for loss in ck.losses:
try:
comp.losses[loss] = case.output(loss)
except:
comp.losses[loss] = 0
return comp
def gen_results(case: SamCase, results: List[Components]) -> List[pd.DataFrame]:
"""
Generates results for the given SAM case and list of component objects containing the results of each realization.
Args:
case (:obj:`SamCase`): The loaded and verified case to use with the simulation
results (:obj:`list(Components)`): List of component objects that contain the results for each realization
Returns:
:obj:`list(pd.DataFrame)`: List of dataframes containing the results.
Note:
The order of the returned dataframes is:
- Summary Results
- Degradation Results
- DC Power
- AC Power
- Yearly Costs
"""
summary_index = ["Base Case"]
summary_data = {"lcoe": [case.base_lcoe], "npv": [case.base_npv]}
lifetime = case.config[ck.LIFETIME_YRS]
p_vals = [99, 95, 90, 75, 50, 10]
# ac energy
cumulative_ac_energy = np.cumsum(case.base_annual_energy)
for i in range(int(lifetime)):
summary_data[f"annual_ac_energy_{i+1}"] = [case.base_annual_energy[i]]
# split up so the order of columns is nicer
for i in range(int(lifetime)):
summary_data[f"cumulative_ac_energy_{i+1}"] = [cumulative_ac_energy[i]]
# dc energy
for i in range(len(case.base_dc_energy)):
summary_data[f"dc_energy_{i+1}"] = [case.base_dc_energy[i]]
# TODO: also, need to clean this up, i just use dictionaries and fill in blanks for base case, but this can be much cleaner
# per realization results
day_index = np.arange(lifetime * 365) + 1
timeseries_index = np.arange(len(results[0].timeseries_dc_power))
year_index = np.arange(lifetime) + 1
yearly_cost_index = []
degradation_data = {}
timeseries_dc_data = {}
timeseries_ac_data = {}
yearly_cost_data = {}
yearly_fail_data = {}
for i, comp in enumerate(results):
# daily degradation
degradation_data[f"Realization {i+1}"] = comp.module_degradation_factor
# power
timeseries_dc_data[f"Realization {i+1}"] = comp.timeseries_dc_power
timeseries_ac_data[f"Realization {i+1}"] = comp.timeseries_ac_power
# yearly cost and total fails for each component
yearly_cost_index.append(f"Realization {i+1}")
for c in ck.component_keys:
if not case.config.get(c, None):
continue
if c not in yearly_cost_data:
yearly_cost_data[c] = []
if c not in yearly_fail_data:
yearly_fail_data[c] = []
yearly_cost_data[c] += list(np.sum(np.reshape(comp.costs[c], (lifetime, 365)), axis=1))
# add total fails per year for each failure mode for this component level
total_fails = np.zeros(lifetime * 365)
for f in comp.summarize_failures(c).values():
total_fails += f
yearly_fail_data[c] += list(np.sum(np.reshape(total_fails, (lifetime, 365)), axis=1))
# summary
summary_index.append(f"Realization {i+1}")
summary_data["lcoe"] += [comp.lcoe]
summary_data["npv"] += [comp.npv]
# ac energy
# remove the first element from cf_energy_net because it is always 0, representing year 0
cumulative_ac_energy = np.cumsum(comp.annual_energy)
for i in range(int(lifetime)):
summary_data[f"annual_ac_energy_{i+1}"] += [comp.annual_energy[i]]
summary_data[f"cumulative_ac_energy_{i+1}"] += [cumulative_ac_energy[i]]
# dc energy
dc_energy = summarize_dc_energy(comp.timeseries_dc_power, lifetime)
for i in range(len(dc_energy)):
summary_data[f"dc_energy_{i+1}"] += [dc_energy[i]]
# calculate total failures, availability, mttr, mtbf, etc
for c in ck.component_keys:
if not case.config.get(c, None):
continue
if f"{c}_total_failures" not in summary_data:
summary_data[f"{c}_total_failures"] = [None] # no failures for base case
if f"{c}_mtbf" not in summary_data:
summary_data[f"{c}_mtbf"] = [None]
if f"{c}_mttr" not in summary_data:
summary_data[f"{c}_mttr"] = [None]
if f"{c}_mttd" not in summary_data:
summary_data[f"{c}_mttd"] = [None]
if case.config[c][ck.CAN_FAIL]:
sum_fails = comp.comps[c]["cumulative_failures"].sum()
summary_data[f"{c}_total_failures"] += [sum_fails]
for fail in case.config[c].get(ck.FAILURE, {}).keys():
if f"{c}_failures_by_type_{fail}" not in summary_data:
summary_data[f"{c}_failures_by_type_{fail}"] = [None]
summary_data[f"{c}_failures_by_type_{fail}"] += [comp.comps[c][f"failure_by_type_{fail}"].sum()]
# partial failures
for fail in case.config[c].get(ck.PARTIAL_FAIL, {}).keys():
if f"{c}_failures_by_type_{fail}" not in summary_data:
summary_data[f"{c}_failures_by_type_{fail}"] = [None]
summary_data[f"{c}_failures_by_type_{fail}"] += [comp.comps[c][f"failure_by_type_{fail}"].sum()]
# if the component had no failures, set everything here and continue
if sum_fails == 0:
summary_data[f"{c}_mtbf"] += [lifetime * 365]
summary_data[f"{c}_mttr"] += [0]
summary_data[f"{c}_mttd"] += [0]
else:
# mean time between failure
summary_data[f"{c}_mtbf"] += [lifetime * 365 * case.config[c][ck.NUM_COMPONENT] / sum_fails]
# mean time to repair
if case.config[c][ck.CAN_REPAIR]:
# take the number of fails minus whatever components have not been repaired by the end of the simulation to get the number of repairs
sum_repairs = sum_fails - len(comp.comps[c].loc[(comp.comps[c]["state"] == 0)])
if sum_repairs > 0:
summary_data[f"{c}_mttr"] += [comp.total_repair_time[c] / sum_repairs]
else:
summary_data[f"{c}_mttr"] += [0]
else:
summary_data[f"{c}_mttr"] += [0]
# mean time to detection (mean time to acknowledge)
if (
case.config[c][ck.CAN_MONITOR]
or case.config[c].get(ck.COMP_MONITOR, None)
or case.config[c].get(ck.INDEP_MONITOR, None)
):
# take the number of fails minus the components that have not been repaired and also not be detected by monitoring
mask = (comp.comps[c]["state"] == 0) & (comp.comps[c]["time_to_detection"] > 1)
sum_monitor = sum_fails - len(comp.comps[c].loc[mask])
if sum_monitor > 0:
summary_data[f"{c}_mttd"] += [comp.total_monitor_time[c] / sum_monitor]
else:
summary_data[f"{c}_mttd"] += [0]
else:
summary_data[f"{c}_mttd"] += [0]
else:
# mean time between failure
summary_data[f"{c}_total_failures"] += [0]
summary_data[f"{c}_mtbf"] += [lifetime * 365]
summary_data[f"{c}_mttr"] += [0]
summary_data[f"{c}_mttd"] += [0]
# availability
if f"{c}_availability" not in summary_data:
summary_data[f"{c}_availability"] = [None]
summary_data[f"{c}_availability"] += [
(
1
- (comp.comps[c]["avail_downtime"].sum() / (lifetime * case.annual_daylight_hours))
/ case.config[c][ck.NUM_COMPONENT]
)
]
# generate dataframes
summary_results = pd.DataFrame(index=summary_index, data=summary_data)
summary_results.index.name = "Realization"
# reorder columns for summary results
reorder = list(summary_results.columns[0:2]) # lcoe and npv
reorder += list(summary_results.columns[lifetime * 3 + 2 :]) # failures and avail
reorder += list(summary_results.columns[2 : lifetime * 3 + 2]) # energy
summary_results = summary_results[reorder]
degradation_results = pd.DataFrame(index=day_index, data=degradation_data)
dc_power_results = pd.DataFrame(index=timeseries_index, data=timeseries_dc_data)
ac_power_results = pd.DataFrame(index=timeseries_index, data=timeseries_ac_data)
dc_power_results.index.name = "Hour"
ac_power_results.index.name = "Hour"
degradation_results.index.name = "Day"
cost_index = pd.MultiIndex.from_product([yearly_cost_index, year_index], names=["Realization", "Year"])
yearly_cost_results = pd.DataFrame(index=cost_index, data=yearly_cost_data)
yearly_cost_results["total"] = yearly_cost_results.sum(axis=1)
# fails per year, same multi index as cost
yearly_fail_results = pd.DataFrame(index=cost_index, data=yearly_fail_data)
yearly_fail_results["total"] = yearly_fail_results.sum(axis=1)
stats_append = []
summary_no_base = summary_results.iloc[1:]
min = summary_no_base.min()
min.name = "min"
stats_append.append(min)
max = summary_no_base.max()
max.name = "max"
stats_append.append(max)
mean = summary_no_base.mean()
mean.name = "mean"
stats_append.append(mean)
median = summary_no_base.median()
median.name = "median"
stats_append.append(median)
std = summary_no_base.std()
std.name = "stddev"
stats_append.append(std)
conf_interval = case.config[ck.CONF_INTERVAL]
conf_int = cf_interval(1 - (conf_interval / 100), std, case.config[ck.NUM_REALIZATION])
lower_conf = mean - conf_int
lower_conf.name = f"{conf_interval}% lower confidence interval of mean"
stats_append.append(lower_conf)
upper_conf = mean + conf_int
upper_conf.name = f"{conf_interval}% upper confidence interval of mean"
stats_append.append(upper_conf)
# p test, which is using the ppf of the normal distribituion with our calculated mean and std. We use scipy's functions for this
# see https://help.helioscope.com/article/141-creating-a-p50-and-p90-with-helioscope
for p in p_vals:
values = []
# calculate the p value for every column
for m, s in zip(mean, std):
if s != 0: # for columns with no STDDEV
values.append(stats.norm.ppf((1 - p / 100), loc=m, scale=s))
else:
values.append(None)
# save results
values = pd.Series(values, index=mean.index)
values.name = f"P{p}"
stats_append.append(values)
# since pandas wants to depercate append, gotta convert series into dataframes
summary_results = pd.concat([summary_results, *[s.to_frame().transpose() for s in stats_append]])
return [
summary_results,
degradation_results,
dc_power_results,
ac_power_results,
yearly_cost_results,
yearly_fail_results,
]
def graph_results(case: SamCase, results: List[Components], save_path: str = None) -> None:
"""
Generate graphs from a list of Component objects from each realization
Args:
case (:obj:`SamCase`): The loaded and verified case to use with the simulation
results (:obj:`list(Components)`): List of component objects that contain the results for each realization
save_path (str, Optional): Path to save graphs to, if provided
"""
lifetime = case.config[ck.LIFETIME_YRS]
colors = [
"r",
"g",
"b",
"c",
"m",
"y",
"k",
"tab:orange",
"tab:brown",
"lime",
"tab:gray",
"indigo",
"navy",
"pink",
"coral",
"yellow",
"teal",
"fuchsia",
"palegoldenrod",
"darkgreen",
]
# base case data to compare to
base_losses = case.base_losses
base_load = np.array(case.base_load) if case.base_load is not None else None
base_ac_energy = np.array(case.base_ac_energy)
base_annual_energy = np.array(case.base_annual_energy)
base_tax_cash_flow = np.array(case.base_tax_cash_flow)
# parse data
avg_ac_energy = np.zeros(len(case.base_ac_energy)) # since length is variable based on frequency of weather file
avg_annual_energy = np.zeros(lifetime)
avg_losses = np.zeros(len(ck.losses))
avg_tax_cash_flow = np.zeros(lifetime + 1) # add 1 for year 0
avg_failures = np.zeros((len(ck.component_keys), lifetime * 365)) # 7 types of components
# computing the average across every realization
for comp in results:
avg_ac_energy += np.array(comp.timeseries_ac_power)
avg_annual_energy += np.array(comp.annual_energy)
avg_losses += np.array(list(comp.losses.values()))
avg_tax_cash_flow += np.array(comp.tax_cash_flow)
for i, c in enumerate(ck.component_keys):
if not case.config.get(c, None):
continue
for f in comp.summarize_failures(c).values():
avg_failures[i] += f
# monthly and annual energy
avg_ac_energy /= len(results)
avg_annual_energy /= len(results)
avg_losses /= len(results)
avg_tax_cash_flow /= len(results)
avg_failures /= len(results)
# sum up failures to be per year
avg_failures = np.sum(np.reshape(avg_failures, (len(ck.component_keys), lifetime, 365)), axis=2)
# determine the frequency of the data, same as frequncy of supplied weather file
total = int(len(avg_ac_energy) / lifetime)
if total == 8760:
freq = 1
else:
freq = 0
while total > 8760:
freq += 1
total /= freq
avg_ac_energy = np.reshape(avg_ac_energy[0::freq], (lifetime, 8760)) # yearly energy by hour
avg_ac_energy = np.sum(avg_ac_energy, axis=0) / lifetime # yearly energy average
avg_ac_energy = np.reshape(avg_ac_energy, (365, 24)) # day energy by hour
avg_day_energy_by_hour = avg_ac_energy.copy() # copy for heatmap yearly energy generation
avg_ac_energy = np.sum(avg_ac_energy, axis=1) # energy per day
base_ac_energy = np.reshape(base_ac_energy[0::freq], (lifetime, 8760))
base_ac_energy = np.sum(base_ac_energy, axis=0) / lifetime
base_ac_energy = np.reshape(base_ac_energy, (365, 24))
base_day_energy_by_hour = base_ac_energy.copy() # copy for heatmap yearly energy generation
base_ac_energy = np.sum(base_ac_energy, axis=1)
# daily load, load is the same between realizations and base
if base_load is not None:
base_load = np.reshape(base_load, (365, 24))
base_load = np.sum(base_load, axis=1)
avg_losses = {k: v for k, v in zip(ck.losses, avg_losses)} # create losses dictionary
# calculate per month energy averaged across every year on every realization
current_month = datetime(datetime.utcnow().year, 1, 1)
# relative deltas allow dynamic month lengths such that each month has the proper number of days
delta = relativedelta(months=1)
start = 0
monthly_energy = {}
monthly_load = {}
base_monthly_energy = {}
for _ in range(12):
month = current_month.strftime("%b")
num_days = ((current_month + delta) - current_month).days # number of days in this month
monthly_energy[month] = np.sum(avg_ac_energy[start : start + num_days])
base_monthly_energy[month] = np.sum(base_ac_energy[start : start + num_days])
if base_load is not None:
monthly_load[month] = np.sum(base_load[start : start + num_days])
current_month += delta
start += num_days
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
fig.set_figheight(5)
fig.set_figwidth(10)
ax1.bar(list(monthly_energy.keys()), list(monthly_energy.values()))
ax1.set_title("Realization Average")
ax1.set_xlabel("Month")
ax1.set_ylabel("kWh")
ax2.bar(list(monthly_energy.keys()), list(base_monthly_energy.values()))
ax2.set_title("Base Case")
ax2.set_xlabel("Month")
ax2.set_ylabel("kWh")
fig.suptitle("Monthly Energy Production")
fig.tight_layout()
if save_path:
plt.savefig(os.path.join(save_path, "Average Monthly Energy Production.png"), bbox_inches="tight", dpi=200)
else:
plt.show()
plt.close() # clear plot
# graph the monthly energy against the monthly load
if base_load is not None:
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
fig.set_figheight(5)
fig.set_figwidth(10)
ind = np.arange(len(monthly_energy))
ax1.bar(ind - 0.2, list(monthly_energy.values()), width=0.4, label="AC Energy")
ax1.bar(ind + 0.2, list(monthly_load.values()), width=0.4, color="tab:gray", label="Electricity Load")
ax1.set_title("Realization Average")
ax1.set_xlabel("Month")
ax1.set_xticks(ind)
ax1.set_xticklabels(labels=list(monthly_energy.keys()))
ax1.set_ylabel("kWh")
ax2.bar(ind - 0.2, list(base_monthly_energy.values()), width=0.4)
ax2.bar(ind + 0.2, list(monthly_load.values()), width=0.4, color="tab:gray")
ax2.set_title("Base Case")
ax2.set_xlabel("Month")
ax2.set_xticks(ind)
ax2.set_xticklabels(labels=list(monthly_energy.keys()))
ax2.set_ylabel("kWh")
fig.legend()
fig.suptitle("Monthly Energy and Load")
fig.tight_layout()
if save_path:
plt.savefig(os.path.join(save_path, "Average Monthly Energy and Load.png"), bbox_inches="tight", dpi=200)
else:
plt.show()
plt.close() # clear plot
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
fig.set_figheight(5)
fig.set_figwidth(10)
# add 1 to have years 1->25
ax1.bar( | np.arange(lifetime) | numpy.arange |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.optimize import curve_fit
def linear(x, m, b):
model = m*x + b
return model
root_dir = '../data/'
par = np.genfromtxt(root_dir+'final_parameters.csv', delimiter=',', dtype=None, names=True, encoding=None)
ages = | np.genfromtxt(root_dir+'final_ages_combination.csv', delimiter=',', dtype=None, names=True, encoding=None) | numpy.genfromtxt |
#
# Tests for the Unary Operator classes
#
import pybamm
import unittest
import numpy as np
from scipy.sparse import diags
class TestUnaryOperators(unittest.TestCase):
def test_unary_operator(self):
a = pybamm.Symbol("a", domain=["test"])
un = pybamm.UnaryOperator("unary test", a)
self.assertEqual(un.children[0].name, a.name)
self.assertEqual(un.domain, a.domain)
# with number
a = pybamm.InputParameter("a")
absval = pybamm.AbsoluteValue(-a)
self.assertEqual(absval.evaluate(inputs={"a": 10}), 10)
self.assertEqual(absval.evaluate(inputs={"a": 10}, known_evals={})[0], 10)
def test_negation(self):
a = pybamm.Symbol("a")
nega = pybamm.Negate(a)
self.assertEqual(nega.name, "-")
self.assertEqual(nega.children[0].name, a.name)
b = pybamm.Scalar(4)
negb = pybamm.Negate(b)
self.assertEqual(negb.evaluate(), -4)
def test_absolute(self):
a = pybamm.Symbol("a")
absa = pybamm.AbsoluteValue(a)
self.assertEqual(absa.name, "abs")
self.assertEqual(absa.children[0].name, a.name)
b = pybamm.Scalar(-4)
absb = pybamm.AbsoluteValue(b)
self.assertEqual(absb.evaluate(), 4)
def test_smooth_absolute_value(self):
a = pybamm.StateVector(slice(0, 1))
expr = pybamm.smooth_absolute_value(a, 10)
self.assertAlmostEqual(expr.evaluate(y=np.array([1]))[0, 0], 1)
self.assertEqual(expr.evaluate(y=np.array([0])), 0)
self.assertAlmostEqual(expr.evaluate(y= | np.array([-1]) | numpy.array |
import os
import subprocess
from Chaos import Chaos
import random
from numba.core.decorators import jit
import numpy as np
from anim import *
from numba import njit
from functools import partial
OUTPUT_DIR = "gifs"
def randomCoef():
c = round(random.random() * 3, 6)
return random.choice([ -c, c, 0, 0, 0 ])
def genCoefs(numCoefs):
coefs = [ randomCoef() for _ in range(numCoefs) ]
while | np.sum(coefs) | numpy.sum |
# TDA Image Analysis Project : core.standard_analysis
#
# Copyright (C) 2016-2017 TDA Image Analysis Project
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
"""
Standard numerical techniques for working with image data.
"""
import scipy.ndimage
import numpy as np
import math
import pandas as pd
def orientation_field(bmp, sigma=3):
# Author: <NAME>, 2016
# Based on algorithm by <NAME> Gerez from "Systematic methods for the
# computation of the directional fields and singular points of fingerprints," 2002.
"""
Computes orientation field (result everywhere between -pi/2 and pi/2)
from the given vector field.
"""
u = bmp.astype(float)
du = np.gradient(u)
[ux, uy] = du
Y = scipy.ndimage.filters.gaussian_filter(2.0*ux*uy, sigma=sigma)
X = scipy.ndimage.filters.gaussian_filter(ux**2.0 - uy**2.0, sigma=sigma)
return .5 * np.arctan2(Y, X)
def topological_defect_array(orientation_field):
"""
Returns a matrix of topological defects for the given orientation field.
Each entry in the matrix is the charge of the defect.
"""
JX = np.diff(orientation_field, axis=0)
JY = np.diff(orientation_field, axis=1)
JX += math.pi * (JX < -math.pi/2.0 ) - math.pi * (JX > math.pi/2.0)
JY += math.pi * (JY < -math.pi/2.0 ) - math.pi * (JY > math.pi/2.0)
return np.rint((np.diff(JY, axis=0) - np.diff(JX, axis=1))/math.pi)
def topological_defect_array_to_dataframe(td_array):
"""
Converts an array of topological defects to a dataframe containing
a list of defect locations and their charges.
"""
td = np.asarray([ (i,j,td_array[i,j]) for (i,j) in np.argwhere(td_array)])
td = td.astype(np.int)
td = pd.DataFrame(td, columns=['row', 'col', 'type'])
return td
def fourier_diff(u, order=1):
[N, M] = u.shape
[kx, ky] = np.mgrid[0:N,0:M]
kx = kx - float(N) * ( kx > float(N)/2.0 )
ky = ky - float(M) * ( ky > float(M)/2.0 )
if order % 2 == 1 and N % 2 == 0: kx[N//2,:] = 0.0
if order % 2 == 1 and M % 2 == 0: ky[:,M//2] = 0.0
kx = (kx * 2.0 * math.pi * 1j / float(N)) ** order
ky = (ky * 2.0 * math.pi * 1j / float(M)) ** order
u_fft = np.fft.fft2(u)
ux_fft = u_fft * kx
uy_fft = u_fft * ky
ux = np.real(np.fft.ifft2(ux_fft))
uy = np.real(np.fft.ifft2(uy_fft))
return [ux, uy]
def emb_wavenumber(u, method="difference", smoothing=10):
# Author: <NAME>, 2016
# Implementation of the EMB algorithm, from "A new fast method for determining local
# properties of striped patterns.," 1997.
u = scipy.ndimage.filters.gaussian_filter(u, sigma=2)
u = u - np.sum(u)/np.size(u)
u = u / np.max(np.absolute(u))
if method == "difference":
[ux, uy] = np.gradient(u)
[uxx, uxy] = np.gradient(ux)
[uxy, uyy] = np.gradient(uy)
uxxx = np.gradient(uxx)[0]
uyyy = np.gradient(uyy)[1]
elif method == "fourier":
[ux, uy] = fourier_diff(u, order=1)
[uxx, uyy] = fourier_diff(u, order=2)
[uxxx, uyyy] = fourier_diff(u, order=3)
uxy = fourier_diff(ux)[1]
else:
raise ValueError('EMB_Wavenumber: Unrecognized method "' + method + '"')
Test1 = np.absolute(u) > np.maximum(np.absolute(ux),np.absolute(uy))
Test2 = np.absolute(uxx) > | np.absolute(uyy) | numpy.absolute |
"""
"""
import numpy as np
from scipy.spatial import cKDTree
from ..buffered_subvolume_calculations import points_in_buffered_rectangle
from ..buffered_subvolume_calculations import calculate_subvolume_id
def generate_3d_regular_mesh(npts_per_dim, dmin, dmax):
"""
Function returns a regular 3d grid of npts_per_dim**3 points.
The spacing of the grid is defined by delta = (dmax-dmin)/npts_per_dim.
In each dimension, the first point has coordinate delta/2.,
and the last point has coordinate dmax - delta/2.
For example, generate_3d_regular_mesh(5, 0, 1) will occupy the 3d grid spanned by
{0.1, 0.3, 0.5, 0.7, 0.9}.
Parameters
-----------
npts_per_dim : int
Number of desired points per dimension.
dmin, dmax : float
Min/max coordinate value of the box enclosing the grid.
Returns
---------
x, y, z : array_like
Three ndarrays of length npts_per_dim**3
"""
x = np.linspace(dmin, dmax, npts_per_dim + 1)
y = np.linspace(dmin, dmax, npts_per_dim + 1)
z = np.linspace(dmin, dmax, npts_per_dim + 1)
delta = np.diff(x)[0] / 2.0
x, y, z = np.array(np.meshgrid(x[:-1], y[:-1], z[:-1]))
return x.flatten() + delta, y.flatten() + delta, z.flatten() + delta
def test1():
"""Enforce that parallel pair counts with thechopper agree exactly
with serial pair counts for a set of randomly distributed points.
"""
rng = np.random.RandomState(43)
logrbins = np.linspace(-1, np.log10(250), 25)
rbins = 10 ** logrbins
subvol_lengths_xyz = np.array((1250, 1250, 1250)).astype("f4")
rmax_xyz = np.repeat(np.max(rbins), 3).astype("f4")
period_xyz = np.array((1500, 1500, 1500)).astype("f4")
xyz_mins = np.array((0, 0, 0)).astype("f4")
xyz_maxs = xyz_mins + subvol_lengths_xyz
npts = int(2e4)
x = rng.uniform(0, period_xyz[0], npts)
y = rng.uniform(0, period_xyz[1], npts)
z = rng.uniform(0, period_xyz[2], npts)
_w = points_in_buffered_rectangle(x, y, z, xyz_mins, xyz_maxs, rmax_xyz, period_xyz)
xout, yout, zout, indx_out, in_subvol_out = _w
explicit_mask = np.ones(npts).astype(bool)
explicit_mask &= x >= xyz_mins[0]
explicit_mask &= y >= xyz_mins[1]
explicit_mask &= z >= xyz_mins[2]
explicit_mask &= x < xyz_maxs[0]
explicit_mask &= y < xyz_maxs[1]
explicit_mask &= z < xyz_maxs[2]
sample1 = [x[explicit_mask], y[explicit_mask], z[explicit_mask]]
sample1_tree = cKDTree(np.vstack(sample1).T, boxsize=period_xyz)
sample2 = [x, y, z]
sample2_tree = cKDTree(np.vstack(sample2).T, boxsize=period_xyz)
counts_scipy = sample1_tree.count_neighbors(sample2_tree, rbins)
sample3 = [xout[in_subvol_out], yout[in_subvol_out], zout[in_subvol_out]]
sample3_tree = cKDTree(np.vstack(sample3).T)
sample4 = [xout, yout, zout]
sample4_tree = cKDTree(np.vstack(sample4).T)
counts_aph = sample3_tree.count_neighbors(sample4_tree, rbins)
assert np.allclose(counts_scipy, counts_aph, rtol=0.0001), "Wrong pair counts!"
def test2():
"""Require that the calculate_subvolume_id function
returns a cellnum array with all points lying within [0, nx*ny*nz)
"""
rng = np.random.RandomState(43)
npts = int(1e2)
period = [200, 300, 800]
x = rng.uniform(0, period[0], npts)
y = rng.uniform(0, period[1], npts)
z = rng.uniform(0, period[2], npts)
nx, ny, nz = 5, 6, 7
_result = calculate_subvolume_id(x, y, z, nx, ny, nz, period)
x2, y2, z2, ix, iy, iz, cellnum = _result
assert np.all(cellnum >= 0)
assert np.all(cellnum < nx * ny * nz)
assert np.all(x == x2)
assert np.all(y == y2)
assert np.all(z == z2)
def test3():
"""Require that calculate_subvolume_id function wraps xyz points
lying outside the box back into the box.
"""
Lbox = 1.0
npts_per_dim = 5
x, y, z = generate_3d_regular_mesh(npts_per_dim, 0, Lbox)
x[0] = -0.5
y[0] = -0.5
z[0] = -0.5
nx, ny, nz = npts_per_dim, npts_per_dim, npts_per_dim
_result = calculate_subvolume_id(x, y, z, nx, ny, nz, Lbox)
x2, y2, z2, ix, iy, iz, cellnum = _result
assert np.all(cellnum >= 0)
assert np.all(cellnum < nx * ny * nz)
assert np.all(x2 >= 0)
assert np.all(y2 >= 0)
assert np.all(z2 >= 0)
assert np.any(x < 0)
assert | np.any(y < 0) | numpy.any |
# This is automatically-generated code.
# Uses the jinja2 library for templating.
import cvxpy as cp
import numpy as np
import scipy as sp
# setup
problemID = "least_abs_dev_0"
prob = None
opt_val = None
# Variable declarations
import scipy.sparse as sps
np.random.seed(0)
m = 5000
n = 200
A = np.random.randn(m,n);
A = A*sps.diags([1 / np.sqrt(np.sum(A**2, 0))], [0])
b = A.dot(10* | np.random.randn(n) | numpy.random.randn |
# -*- mode: python; coding: utf-8 -*
# Copyright (c) 2019 Radio Astronomy Software Group
# Licensed under the 3-clause BSD License
import os
import fileinput
import re
import h5py
import pytest
import numpy as np
import warnings
from astropy import units
from astropy.units import Quantity
from astropy.coordinates import (
SkyCoord,
EarthLocation,
Angle,
AltAz,
Longitude,
Latitude,
)
from astropy.time import Time, TimeDelta
import scipy.io
import pyuvdata.tests as uvtest
from pyradiosky.data import DATA_PATH as SKY_DATA_PATH
from pyradiosky import utils as skyutils
from pyradiosky import skymodel, SkyModel
GLEAM_vot = os.path.join(SKY_DATA_PATH, "gleam_50srcs.vot")
# ignore new numpy 1.20 warning emitted from h5py
pytestmark = pytest.mark.filterwarnings("ignore:Passing None into shape arguments")
@pytest.fixture
def time_location():
array_location = EarthLocation(lat="-30d43m17.5s", lon="21d25m41.9s", height=1073.0)
time = Time("2015-03-01 00:00:00", scale="utc", location=array_location)
return time, array_location
@pytest.fixture
def zenith_skycoord(time_location):
time, array_location = time_location
source_coord = SkyCoord(
alt=Angle(90, unit=units.deg),
az=Angle(0, unit=units.deg),
obstime=time,
frame="altaz",
location=array_location,
)
return source_coord.transform_to("icrs")
@pytest.fixture
def zenith_skymodel(zenith_skycoord):
icrs_coord = zenith_skycoord
ra = icrs_coord.ra
dec = icrs_coord.dec
names = "zen_source"
stokes = [1.0, 0, 0, 0] * units.Jy
return SkyModel(name=names, ra=ra, dec=dec, stokes=stokes, spectral_type="flat")
@pytest.fixture
def moonsky():
pytest.importorskip("lunarsky")
from lunarsky import MoonLocation, SkyCoord as SkyC
# Tranquility base
array_location = MoonLocation(lat="00d41m15s", lon="23d26m00s", height=0.0)
time = Time.now()
zen_coord = SkyC(
alt=Angle(90, unit=units.deg),
az=Angle(0, unit=units.deg),
obstime=time,
frame="lunartopo",
location=array_location,
)
icrs_coord = zen_coord.transform_to("icrs")
ra = icrs_coord.ra
dec = icrs_coord.dec
names = "zen_source"
stokes = [1.0, 0, 0, 0] * units.Jy
zenith_source = SkyModel(
name=names, ra=ra, dec=dec, stokes=stokes, spectral_type="flat"
)
zenith_source.update_positions(time, array_location)
yield zenith_source
@pytest.fixture
def healpix_data():
pytest.importorskip("astropy_healpix")
import astropy_healpix
nside = 32
npix = astropy_healpix.nside_to_npix(nside)
hp_obj = astropy_healpix.HEALPix(nside=nside)
frequencies = np.linspace(100, 110, 10)
pixel_area = astropy_healpix.nside_to_pixel_area(nside)
# Note that the cone search includes any pixels that overlap with the search
# region. With such a low resolution, this returns some slightly different
# results from the equivalent healpy search. Subtracting(0.75 * pixres) from
# the pixel area resolves this discrepancy for the test.
pixres = hp_obj.pixel_resolution.to("deg").value
ipix_disc = hp_obj.cone_search_lonlat(
135 * units.deg, 0 * units.deg, radius=(10 - pixres * 0.75) * units.deg
)
return {
"nside": nside,
"npix": npix,
"frequencies": frequencies,
"pixel_area": pixel_area,
"ipix_disc": ipix_disc,
}
@pytest.fixture
def mock_point_skies():
# Provides a function that makes equivalent models of different spectral types.
Ncomp = 10
Nfreqs = 30
names = np.arange(Ncomp).astype(str)
ras = Longitude(np.linspace(0, 2 * np.pi, Ncomp), "rad")
decs = Latitude(np.linspace(-np.pi / 2, np.pi / 2, Ncomp), "rad")
freq_arr = np.linspace(100e6, 130e6, Nfreqs) * units.Hz
# Spectrum = Power law
alpha = -0.5
spectrum = ((freq_arr / freq_arr[0]) ** (alpha))[None, :, None] * units.Jy
def _func(stype):
stokes = spectrum.repeat(4, 0).repeat(Ncomp, 2)
if stype in ["full", "subband"]:
stokes = spectrum.repeat(4, 0).repeat(Ncomp, 2)
stokes[1:, :, :] = 0.0 # Set unpolarized
return SkyModel(
name=names,
ra=ras,
dec=decs,
stokes=stokes,
spectral_type=stype,
freq_array=freq_arr,
)
elif stype == "spectral_index":
stokes = stokes[:, :1, :]
spectral_index = np.ones(Ncomp) * alpha
return SkyModel(
name=names,
ra=ras,
dec=decs,
stokes=stokes,
spectral_type=stype,
spectral_index=spectral_index,
reference_frequency=np.repeat(freq_arr[0], Ncomp),
)
elif stype == "flat":
stokes = stokes[:, :1, :]
return SkyModel(
name=names,
ra=ras,
dec=decs,
stokes=stokes,
spectral_type=stype,
)
yield _func
@pytest.fixture(scope="function")
def healpix_disk_old():
pytest.importorskip("astropy_healpix")
return SkyModel.from_healpix_hdf5(os.path.join(SKY_DATA_PATH, "healpix_disk.hdf5"))
@pytest.fixture(scope="function")
def healpix_disk_new():
pytest.importorskip("astropy_healpix")
return SkyModel.from_skyh5(os.path.join(SKY_DATA_PATH, "healpix_disk.skyh5"))
def test_set_spectral_params(zenith_skymodel):
with pytest.warns(
DeprecationWarning,
match="This function is deprecated, use `_set_spectral_type_params` instead.",
):
zenith_skymodel.set_spectral_type_params(zenith_skymodel.spectral_type)
def test_init_error(zenith_skycoord):
with pytest.raises(ValueError, match="If initializing with values, all of"):
SkyModel(
ra=zenith_skycoord.ra,
dec=zenith_skycoord.dec,
stokes=[1.0, 0, 0, 0] * units.Jy,
spectral_type="flat",
)
with pytest.raises(ValueError, match="component_type must be one of:"):
SkyModel(
name="zenith_source",
ra=zenith_skycoord.ra,
dec=zenith_skycoord.dec,
stokes=[1.0, 0, 0, 0],
spectral_type="flat",
component_type="foo",
)
@pytest.mark.parametrize("spec_type", ["spectral_index", "full", "subband"])
def test_init_error_freqparams(zenith_skycoord, spec_type):
with pytest.raises(ValueError, match="If initializing with values, all of"):
SkyModel(
name="zenith_source",
ra=zenith_skycoord.ra,
dec=zenith_skycoord.dec,
stokes=[1.0, 0, 0, 0],
spectral_type=spec_type,
)
def test_source_zenith_from_icrs(time_location):
"""Test single source position at zenith constructed using icrs."""
time, array_location = time_location
lst = time.sidereal_time("apparent")
tee_ra = lst
cirs_ra = skyutils._tee_to_cirs_ra(tee_ra, time)
cirs_source_coord = SkyCoord(
ra=cirs_ra,
dec=array_location.lat,
obstime=time,
frame="cirs",
location=array_location,
)
icrs_coord = cirs_source_coord.transform_to("icrs")
ra = icrs_coord.ra
dec = icrs_coord.dec
zenith_source = SkyModel(
name="icrs_zen",
ra=ra,
dec=dec,
stokes=[1.0, 0, 0, 0] * units.Jy,
spectral_type="flat",
)
zenith_source.update_positions(time, array_location)
zenith_source_lmn = zenith_source.pos_lmn.squeeze()
assert np.allclose(zenith_source_lmn, np.array([0, 0, 1]), atol=1e-5)
def test_source_zenith(time_location, zenith_skymodel):
"""Test single source position at zenith."""
time, array_location = time_location
zenith_skymodel.update_positions(time, array_location)
zenith_source_lmn = zenith_skymodel.pos_lmn.squeeze()
assert np.allclose(zenith_source_lmn, np.array([0, 0, 1]))
@pytest.mark.parametrize(
"spec_type, param",
[
("flat", "ra"),
("flat", "dec"),
("spectral_index", "reference_frequency"),
("subband", "freq_array"),
],
)
def test_init_lists(spec_type, param, zenith_skycoord):
icrs_coord = zenith_skycoord
ras = Longitude(
[zenith_skycoord.ra + Longitude(0.5 * ind * units.deg) for ind in range(5)]
)
decs = Latitude(np.zeros(5, dtype=np.float64) + icrs_coord.dec.value * units.deg)
names = ["src_" + str(ind) for ind in range(5)]
if spec_type in ["subband", "full"]:
n_freqs = 3
freq_array = [100e6, 120e6, 140e6] * units.Hz
else:
n_freqs = 1
freq_array = None
stokes = np.zeros((4, n_freqs, 5), dtype=np.float64) * units.Jy
stokes[0, :, :] = 1 * units.Jy
if spec_type == "spectral_index":
ref_freqs = np.zeros(5, dtype=np.float64) + 150e6 * units.Hz
spec_index = np.zeros(5, dtype=np.float64) - 0.8
else:
ref_freqs = None
spec_index = None
ref_model = SkyModel(
name=names,
ra=ras,
dec=decs,
stokes=stokes,
reference_frequency=ref_freqs,
spectral_index=spec_index,
freq_array=freq_array,
spectral_type=spec_type,
)
list_warning = None
if param == "ra":
ras = list(ras)
elif param == "dec":
decs = list(decs)
elif param == "reference_frequency":
ref_freqs = list(ref_freqs)
list_warning = (
"reference_frequency is a list. Attempting to convert to a Quantity."
)
warn_type = UserWarning
elif param == "freq_array":
freq_array = list(freq_array)
list_warning = "freq_array is a list. Attempting to convert to a Quantity."
warn_type = UserWarning
if list_warning is not None:
with uvtest.check_warnings(warn_type, match=list_warning):
list_model = SkyModel(
name=names,
ra=ras,
dec=decs,
stokes=stokes,
reference_frequency=ref_freqs,
spectral_index=spec_index,
freq_array=freq_array,
spectral_type=spec_type,
)
else:
list_model = SkyModel(
name=names,
ra=ras,
dec=decs,
stokes=stokes,
reference_frequency=ref_freqs,
spectral_index=spec_index,
freq_array=freq_array,
spectral_type=spec_type,
)
assert ref_model == list_model
@pytest.mark.parametrize(
"spec_type, param, msg",
[
("flat", "ra", "All values in ra must be Longitude objects"),
("flat", "ra_lat", "All values in ra must be Longitude objects"),
("flat", "dec", "All values in dec must be Latitude objects"),
("flat", "dec_lon", "All values in dec must be Latitude objects"),
(
"flat",
"stokes",
"Stokes should be passed as an astropy Quantity array not a list",
),
(
"flat",
"stokes_obj",
"Stokes should be passed as an astropy Quantity array.",
),
(
"spectral_index",
"reference_frequency",
"If reference_frequency is supplied as a list, all the elements must be Quantity objects with compatible units.",
),
(
"spectral_index",
"reference_frequency_jy",
re.escape(
"'Jy' (spectral flux density) and 'Hz' (frequency) are not convertible"
),
),
(
"spectral_index",
"reference_frequency_obj",
"If reference_frequency is supplied as a list, all the elements must be Quantity objects with compatible units.",
),
(
"subband",
"freq_array",
"If freq_array is supplied as a list, all the elements must be Quantity "
"objects with compatible units.",
),
(
"subband",
"freq_array_ang",
re.escape("'deg' (angle) and 'Hz' (frequency) are not convertible"),
),
(
"subband",
"freq_array_obj",
"If freq_array is supplied as a list, all the elements must be Quantity "
"objects with compatible units.",
),
],
)
def test_init_lists_errors(spec_type, param, msg, zenith_skycoord):
icrs_coord = zenith_skycoord
ras = Longitude(
[zenith_skycoord.ra + Longitude(0.5 * ind * units.deg) for ind in range(5)]
)
decs = Latitude(np.zeros(5, dtype=np.float64) + icrs_coord.dec.value * units.deg)
names = ["src_" + str(ind) for ind in range(5)]
if spec_type in ["subband", "full"]:
n_freqs = 3
freq_array = [100e6, 120e6, 140e6] * units.Hz
else:
n_freqs = 1
freq_array = None
stokes = np.zeros((4, n_freqs, 5), dtype=np.float64) * units.Jy
stokes[0, :, :] = 1.0 * units.Jy
if spec_type == "spectral_index":
ref_freqs = np.zeros(5, dtype=np.float64) + 150e6 * units.Hz
spec_index = np.zeros(5, dtype=np.float64) - 0.8
else:
ref_freqs = None
spec_index = None
list_warning = None
if "freq_array" in param:
list_warning = "freq_array is a list. Attempting to convert to a Quantity."
warn_type = UserWarning
elif "reference_frequency" in param:
list_warning = (
"reference_frequency is a list. Attempting to convert to a Quantity."
)
warn_type = UserWarning
if param == "ra":
ras = list(ras)
ras[1] = ras[1].value
elif param == "ra_lat":
ras = list(ras)
ras[1] = decs[1]
elif param == "dec":
decs = list(decs)
decs[1] = decs[1].value
elif param == "dec_lon":
decs = list(decs)
decs[1] = ras[1]
elif param == "reference_frequency":
ref_freqs = list(ref_freqs)
ref_freqs[1] = ref_freqs[1].value
elif param == "reference_frequency_jy":
ref_freqs = list(ref_freqs)
ref_freqs[1] = ref_freqs[1].value * units.Jy
elif param == "reference_frequency_obj":
ref_freqs = list(ref_freqs)
ref_freqs[1] = icrs_coord
elif param == "freq_array":
freq_array = list(freq_array)
freq_array[1] = freq_array[1].value
elif param == "freq_array_ang":
freq_array = list(freq_array)
freq_array[1] = ras[1]
elif param == "freq_array_obj":
freq_array = list(freq_array)
freq_array[1] = icrs_coord
elif param == "stokes":
stokes = list(stokes)
stokes[1] = stokes[1].value.tolist()
elif param == "stokes_hz":
stokes = stokes.value * units.Hz
elif param == "stokes_obj":
stokes = icrs_coord
with pytest.raises(ValueError, match=msg):
if list_warning is not None:
with uvtest.check_warnings(warn_type, match=list_warning):
SkyModel(
name=names,
ra=ras,
dec=decs,
stokes=stokes,
reference_frequency=ref_freqs,
spectral_index=spec_index,
freq_array=freq_array,
spectral_type=spec_type,
)
else:
SkyModel(
name=names,
ra=ras,
dec=decs,
stokes=stokes,
reference_frequency=ref_freqs,
spectral_index=spec_index,
freq_array=freq_array,
spectral_type=spec_type,
)
def test_skymodel_init_errors(zenith_skycoord):
icrs_coord = zenith_skycoord
ra = icrs_coord.ra
dec = icrs_coord.dec
# Check error cases
with pytest.raises(
ValueError,
match=("UVParameter _ra is not the appropriate type."),
):
SkyModel(
name="icrs_zen",
ra=ra.rad,
dec=dec,
stokes=[1.0, 0, 0, 0] * units.Jy,
spectral_type="flat",
)
with pytest.raises(
ValueError,
match=("UVParameter _dec is not the appropriate type."),
):
SkyModel(
name="icrs_zen",
ra=ra,
dec=dec.rad,
stokes=[1.0, 0, 0, 0] * units.Jy,
spectral_type="flat",
)
with pytest.raises(
ValueError,
match=(
"Only one of freq_array and reference_frequency can be specified, not both."
),
):
SkyModel(
name="icrs_zen",
ra=ra,
dec=dec,
stokes=[1.0, 0, 0, 0] * units.Jy,
spectral_type="flat",
reference_frequency=[1e8] * units.Hz,
freq_array=[1e8] * units.Hz,
)
with pytest.raises(
ValueError, match=("freq_array must have a unit that can be converted to Hz.")
):
SkyModel(
name="icrs_zen",
ra=ra,
dec=dec,
stokes=[1.0, 0, 0, 0] * units.Jy,
spectral_type="flat",
freq_array=[1e8] * units.m,
)
with pytest.raises(ValueError, match=("For point component types, the stokes")):
SkyModel(
name="icrs_zen",
ra=ra,
dec=dec,
stokes=[1.0, 0, 0, 0] * units.m,
spectral_type="flat",
freq_array=[1e8] * units.Hz,
)
with pytest.raises(
ValueError, match=("For point component types, the coherency_radec")
):
sky = SkyModel(
name="icrs_zen",
ra=ra,
dec=dec,
stokes=[1.0, 0, 0, 0] * units.Jy,
spectral_type="flat",
freq_array=[1e8] * units.Hz,
)
sky.coherency_radec = sky.coherency_radec.value * units.m
sky.check()
with pytest.raises(
ValueError,
match=("reference_frequency must have a unit that can be converted to Hz."),
):
SkyModel(
name="icrs_zen",
ra=ra,
dec=dec,
stokes=[1.0, 0, 0, 0] * units.Jy,
spectral_type="flat",
reference_frequency=[1e8] * units.m,
)
def test_skymodel_deprecated(time_location):
"""Test that old init works with deprecation."""
source_new = SkyModel(
name="Test",
ra=Longitude(12.0 * units.hr),
dec=Latitude(-30.0 * units.deg),
stokes=[1.0, 0.0, 0.0, 0.0] * units.Jy,
spectral_type="flat",
reference_frequency=np.array([1e8]) * units.Hz,
)
with pytest.warns(
DeprecationWarning,
match="The input parameters to SkyModel.__init__ have changed",
):
source_old = SkyModel(
"Test",
Longitude(12.0 * units.hr),
Latitude(-30.0 * units.deg),
[1.0, 0.0, 0.0, 0.0] * units.Jy,
np.array([1e8]) * units.Hz,
"flat",
)
assert source_new == source_old
# test numpy array for reference_frequency
with pytest.warns(
DeprecationWarning,
match="In version 0.2.0, the reference_frequency will be required to be an astropy Quantity",
):
source_old = SkyModel(
name="Test",
ra=Longitude(12.0 * units.hr),
dec=Latitude(-30.0 * units.deg),
stokes=[1.0, 0.0, 0.0, 0.0] * units.Jy,
spectral_type="flat",
reference_frequency=np.array([1e8]),
)
assert source_new == source_old
# test list of floats for reference_frequency
with pytest.warns(
DeprecationWarning,
match="In version 0.2.0, the reference_frequency will be required to be an astropy Quantity",
):
source_old = SkyModel(
name="Test",
ra=Longitude(12.0 * units.hr),
dec=Latitude(-30.0 * units.deg),
stokes=[1.0, 0.0, 0.0, 0.0] * units.Jy,
spectral_type="flat",
reference_frequency=[1e8],
)
assert source_new == source_old
with pytest.warns(
DeprecationWarning,
match="In version 0.2.0, stokes will be required to be an astropy "
"Quantity with units that are convertable to one of",
):
source_old = SkyModel(
name="Test",
ra=Longitude(12.0 * units.hr),
dec=Latitude(-30.0 * units.deg),
stokes=np.asarray([1.0, 0.0, 0.0, 0.0]),
spectral_type="flat",
reference_frequency=np.array([1e8]) * units.Hz,
)
assert source_new == source_old
source_old = SkyModel(
name="Test",
ra=Longitude(12.0 * units.hr),
dec=Latitude(-30.0 * units.deg),
stokes=[1.0, 0.0, 0.0, 0.0] * units.Jy,
spectral_type="flat",
reference_frequency=np.array([1.5e8]) * units.Hz,
)
with pytest.warns(
DeprecationWarning,
match=(
"Future equality does not pass, probably because the frequencies "
"were not checked"
),
):
assert source_new == source_old
source_old = SkyModel(
name="Test",
ra=Longitude(12.0 * units.hr),
dec=Latitude(-30.0 * units.deg + 2e-3 * units.arcsec),
stokes=[1.0, 0.0, 0.0, 0.0] * units.Jy,
spectral_type="flat",
reference_frequency=np.array([1e8]) * units.Hz,
)
with pytest.warns(
DeprecationWarning,
match=("The _dec parameters are not within the future tolerance"),
):
assert source_new == source_old
source_old = SkyModel(
name="Test",
ra=Longitude(Longitude(12.0 * units.hr) + Longitude(2e-3 * units.arcsec)),
dec=Latitude(-30.0 * units.deg),
stokes=[1.0, 0.0, 0.0, 0.0] * units.Jy,
spectral_type="flat",
reference_frequency=np.array([1e8]) * units.Hz,
)
with pytest.warns(
DeprecationWarning,
match=("The _ra parameters are not within the future tolerance"),
):
assert source_new == source_old
stokes = np.zeros((4, 2, 1)) * units.Jy
stokes[0, :, :] = 1.0 * units.Jy
source_new = SkyModel(
name="Test",
ra=Longitude(12.0 * units.hr),
dec=Latitude(-30.0 * units.deg),
stokes=stokes,
spectral_type="subband",
freq_array=np.array([1e8, 1.5e8]) * units.Hz,
)
with pytest.warns(
DeprecationWarning,
match="The input parameters to SkyModel.__init__ have changed",
):
source_old = SkyModel(
"Test",
Longitude(12.0 * units.hr),
Latitude(-30.0 * units.deg),
stokes,
np.array([1e8, 1.5e8]) * units.Hz,
"subband",
)
assert source_new == source_old
# test numpy array for freq_array
with pytest.warns(
DeprecationWarning,
match="In version 0.2.0, the freq_array will be required to be an astropy Quantity",
):
source_old = SkyModel(
name="Test",
ra=Longitude(12.0 * units.hr),
dec=Latitude(-30.0 * units.deg),
stokes=stokes,
spectral_type="subband",
freq_array=np.array([1e8, 1.5e8]),
)
assert source_new == source_old
# test list of floats for freq_array
with pytest.warns(
DeprecationWarning,
match="In version 0.2.0, the freq_array will be required to be an astropy Quantity",
):
source_old = SkyModel(
name="Test",
ra=Longitude(12.0 * units.hr),
dec=Latitude(-30.0 * units.deg),
stokes=stokes,
spectral_type="subband",
freq_array=[1e8, 1.5e8],
)
assert source_new == source_old
time, telescope_location = time_location
with pytest.warns(
DeprecationWarning,
match="Passing telescope_location to SkyModel.coherency_calc is deprecated",
):
source_new.update_positions(time, telescope_location)
source_new.coherency_calc(telescope_location)
@pytest.mark.parametrize("spec_type", ["flat", "subband", "spectral_index"])
def test_jansky_to_kelvin_loop(spec_type):
skyobj = SkyModel.from_gleam_catalog(GLEAM_vot, spectral_type=spec_type)
stokes_expected = np.zeros_like(skyobj.stokes.value) * units.K * units.sr
if spec_type == "subband":
brightness_temperature_conv = units.brightness_temperature(skyobj.freq_array)
for compi in range(skyobj.Ncomponents):
stokes_expected[:, :, compi] = (skyobj.stokes[:, :, compi] / units.sr).to(
units.K, brightness_temperature_conv
) * units.sr
else:
brightness_temperature_conv = units.brightness_temperature(
skyobj.reference_frequency
)
stokes_expected = (skyobj.stokes / units.sr).to(
units.K, brightness_temperature_conv
) * units.sr
skyobj2 = skyobj.copy()
skyobj2.jansky_to_kelvin()
assert units.quantity.allclose(skyobj2.stokes, stokes_expected, equal_nan=True)
# check no change if already in K
skyobj3 = skyobj2.copy()
skyobj3.jansky_to_kelvin()
assert skyobj3 == skyobj2
skyobj2.kelvin_to_jansky()
assert skyobj == skyobj2
# check no change if already in Jy
skyobj3 = skyobj2.copy()
skyobj3.kelvin_to_jansky()
assert skyobj3 == skyobj2
def test_jansky_to_kelvin_loop_healpix(healpix_data, healpix_disk_new):
skyobj = healpix_disk_new
stokes_expected = np.zeros_like(skyobj.stokes.value) * units.Jy / units.sr
brightness_temperature_conv = units.brightness_temperature(skyobj.freq_array)
for compi in range(skyobj.Ncomponents):
stokes_expected[:, :, compi] = (skyobj.stokes[:, :, compi]).to(
units.Jy / units.sr, brightness_temperature_conv
)
skyobj2 = skyobj.copy()
skyobj2.kelvin_to_jansky()
assert units.quantity.allclose(skyobj2.stokes, stokes_expected, equal_nan=True)
# check no change if already in Jy
skyobj3 = skyobj2.copy()
skyobj3.kelvin_to_jansky()
assert skyobj3 == skyobj2
skyobj2.jansky_to_kelvin()
assert skyobj == skyobj2
# check no change if already in K
skyobj3 = skyobj2.copy()
skyobj3.jansky_to_kelvin()
assert skyobj3 == skyobj2
def test_jansky_to_kelvin_errors(zenith_skymodel):
with pytest.raises(
ValueError,
match="Either reference_frequency or freq_array must be set to convert to K.",
):
zenith_skymodel.jansky_to_kelvin()
with pytest.raises(
ValueError,
match="Either reference_frequency or freq_array must be set to convert to Jy.",
):
zenith_skymodel.stokes = zenith_skymodel.stokes.value * units.K * units.sr
zenith_skymodel.kelvin_to_jansky()
def test_healpix_to_point_loop(healpix_data, healpix_disk_new):
skyobj = healpix_disk_new
skyobj2 = skyobj.copy()
skyobj2.healpix_to_point()
skyobj2.point_to_healpix()
assert skyobj == skyobj2
def test_healpix_to_point_errors(zenith_skymodel):
with pytest.raises(
ValueError,
match="This method can only be called if component_type is 'healpix'.",
):
zenith_skymodel.healpix_to_point()
with pytest.raises(
ValueError,
match="This method can only be called if component_type is 'point' and "
"the nside and hpx_inds parameters are set.",
):
zenith_skymodel.point_to_healpix()
def test_update_position_errors(zenith_skymodel, time_location):
time, array_location = time_location
with pytest.raises(ValueError, match=("time must be an astropy Time object.")):
zenith_skymodel.update_positions("2018-03-01 00:00:00", array_location)
with pytest.raises(ValueError, match=("telescope_location must be a.")):
zenith_skymodel.update_positions(time, time)
def test_coherency_calc_errors():
"""Test that correct errors are raised when providing invalid location object."""
coord = SkyCoord(ra=30.0 * units.deg, dec=40 * units.deg, frame="icrs")
stokes_radec = [1, -0.2, 0.3, 0.1] * units.Jy
source = SkyModel(
name="test",
ra=coord.ra,
dec=coord.dec,
stokes=stokes_radec,
spectral_type="flat",
)
with pytest.warns(UserWarning, match="Horizon cutoff undefined"):
with pytest.raises(ValueError, match="telescope_location must be an"):
source.coherency_calc().squeeze()
def test_calc_basis_rotation_matrix(time_location):
"""
This tests whether the 3-D rotation matrix from RA/Dec to Alt/Az is
actually a rotation matrix (R R^T = R^T R = I)
"""
time, telescope_location = time_location
source = SkyModel(
name="Test",
ra=Longitude(12.0 * units.hr),
dec=Latitude(-30.0 * units.deg),
stokes=[1.0, 0.0, 0.0, 0.0] * units.Jy,
spectral_type="flat",
)
source.update_positions(time, telescope_location)
basis_rot_matrix = source._calc_average_rotation_matrix()
assert np.allclose(np.matmul(basis_rot_matrix, basis_rot_matrix.T), np.eye(3))
assert np.allclose(np.matmul(basis_rot_matrix.T, basis_rot_matrix), np.eye(3))
def test_calc_vector_rotation(time_location):
"""
This checks that the 2-D coherency rotation matrix is unit determinant.
I suppose we could also have checked (R R^T = R^T R = I)
"""
time, telescope_location = time_location
source = SkyModel(
name="Test",
ra=Longitude(12.0 * units.hr),
dec=Latitude(-30.0 * units.deg),
stokes=[1.0, 0.0, 0.0, 0.0] * units.Jy,
spectral_type="flat",
)
source.update_positions(time, telescope_location)
coherency_rotation = np.squeeze(source._calc_coherency_rotation())
assert np.isclose(np.linalg.det(coherency_rotation), 1)
@pytest.mark.parametrize("spectral_type", ["flat", "full"])
def test_pol_rotator(time_location, spectral_type):
"""Test that when above_horizon is unset, the coherency rotation is done for all polarized sources."""
time, telescope_location = time_location
Nsrcs = 50
ras = Longitude(np.linspace(0, 24, Nsrcs) * units.hr)
decs = Latitude(np.linspace(-90, 90, Nsrcs) * units.deg)
names = np.arange(Nsrcs).astype("str")
fluxes = np.array([[[5.5, 0.7, 0.3, 0.0]]] * Nsrcs).T * units.Jy
# Make the last source non-polarized
fluxes[..., -1] = [[1.0], [0], [0], [0]] * units.Jy
extra = {}
# Add frequencies if "full" freq:
if spectral_type == "full":
Nfreqs = 10
freq_array = np.linspace(100e6, 110e6, Nfreqs) * units.Hz
fluxes = fluxes.repeat(Nfreqs, axis=1)
extra = {"freq_array": freq_array}
assert isinstance(fluxes, Quantity)
source = SkyModel(
name=names,
ra=ras,
dec=decs,
stokes=fluxes,
spectral_type=spectral_type,
**extra,
)
assert source._n_polarized == Nsrcs - 1
source.update_positions(time, telescope_location)
# Check the default of inds for _calc_rotation_matrix()
rots1 = source._calc_rotation_matrix()
inds = np.array([25, 45, 16])
rots2 = source._calc_rotation_matrix(inds)
assert np.allclose(rots1[..., inds], rots2)
# Unset the horizon mask and confirm that all rotation matrices are calculated.
source.above_horizon = None
with pytest.warns(UserWarning, match="Horizon cutoff undefined"):
local_coherency = source.coherency_calc()
assert local_coherency.unit == units.Jy
# Check that all polarized sources are rotated.
assert not np.all(
units.quantity.isclose(
local_coherency[..., :-1], source.coherency_radec[..., :-1]
)
)
assert units.quantity.allclose(
local_coherency[..., -1], source.coherency_radec[..., -1]
)
def analytic_beam_jones(za, az, sigma=0.3):
"""
Analytic beam with sensible polarization response.
Required for testing polarized sources.
"""
# B = np.exp(-np.tan(za/2.)**2. / 2. / sigma**2.)
B = 1
# J alone gives you the dipole beam.
# B can be used to add another envelope in addition.
J = np.array(
[[np.cos(za) * np.sin(az), np.cos(az)], [np.cos(az) * np.cos(za), -np.sin(az)]]
)
return B * J
def test_polarized_source_visibilities(time_location):
"""Test that visibilities of a polarized source match prior calculations."""
time0, array_location = time_location
ha_off = 1 / 6.0
ha_delta = 0.1
time_offsets = np.arange(-ha_off, ha_off + ha_delta, ha_delta)
zero_indx = np.argmin(np.abs(time_offsets))
# make sure we get a true zenith time
time_offsets[zero_indx] = 0.0
times = time0 + time_offsets * units.hr
ntimes = times.size
zenith = SkyCoord(
alt=90.0 * units.deg,
az=0 * units.deg,
frame="altaz",
obstime=time0,
location=array_location,
)
zenith_icrs = zenith.transform_to("icrs")
src_astropy = SkyCoord(
ra=zenith_icrs.ra, dec=zenith_icrs.dec, obstime=times, location=array_location
)
src_astropy_altaz = src_astropy.transform_to("altaz")
assert np.isclose(src_astropy_altaz.alt.rad[zero_indx], np.pi / 2)
stokes_radec = [1, -0.2, 0.3, 0.1] * units.Jy
decoff = 0.0 * units.arcmin # -0.17 * units.arcsec
raoff = 0.0 * units.arcsec
source = SkyModel(
name="icrs_zen",
ra=Longitude(zenith_icrs.ra + raoff),
dec=Latitude(zenith_icrs.dec + decoff),
stokes=stokes_radec,
spectral_type="flat",
)
coherency_matrix_local = np.zeros([2, 2, ntimes], dtype="complex128") * units.Jy
alts = np.zeros(ntimes)
azs = np.zeros(ntimes)
for ti, time in enumerate(times):
source.update_positions(time, telescope_location=array_location)
alt, az = source.alt_az
assert alt == src_astropy_altaz[ti].alt.radian
assert az == src_astropy_altaz[ti].az.radian
alts[ti] = alt
azs[ti] = az
coherency_tmp = source.coherency_calc().squeeze()
coherency_matrix_local[:, :, ti] = coherency_tmp
zas = np.pi / 2.0 - alts
Jbeam = analytic_beam_jones(zas, azs)
coherency_instr_local = np.einsum(
"ab...,bc...,dc...->ad...", Jbeam, coherency_matrix_local, np.conj(Jbeam)
)
expected_instr_local = (
np.array(
[
[
[
0.60572311 - 1.08420217e-19j,
0.60250361 + 5.42106496e-20j,
0.5999734 + 0.00000000e00j,
0.59400581 + 0.00000000e00j,
0.58875092 + 0.00000000e00j,
],
[
0.14530468 + 4.99646383e-02j,
0.14818987 + 4.99943414e-02j,
0.15001773 + 5.00000000e-02j,
0.15342311 + 4.99773672e-02j,
0.15574023 + 4.99307016e-02j,
],
],
[
[
0.14530468 - 4.99646383e-02j,
0.14818987 - 4.99943414e-02j,
0.15001773 - 5.00000000e-02j,
0.15342311 - 4.99773672e-02j,
0.15574023 - 4.99307016e-02j,
],
[
0.39342384 - 1.08420217e-19j,
0.39736029 + 2.71045133e-20j,
0.4000266 + 0.00000000e00j,
0.40545359 + 0.00000000e00j,
0.40960028 + 0.00000000e00j,
],
],
]
)
* units.Jy
)
assert units.quantity.allclose(coherency_instr_local, expected_instr_local)
def test_polarized_source_smooth_visibilities():
"""Test that visibilities change smoothly as a polarized source transits."""
array_location = EarthLocation(lat="-30d43m17.5s", lon="21d25m41.9s", height=1073.0)
time0 = Time("2015-03-01 18:00:00", scale="utc", location=array_location)
ha_off = 1
ha_delta = 0.01
time_offsets = np.arange(-ha_off, ha_off + ha_delta, ha_delta)
zero_indx = np.argmin(np.abs(time_offsets))
# make sure we get a true zenith time
time_offsets[zero_indx] = 0.0
times = time0 + time_offsets * units.hr
ntimes = times.size
zenith = SkyCoord(
alt=90.0 * units.deg,
az=0 * units.deg,
frame="altaz",
obstime=time0,
location=array_location,
)
zenith_icrs = zenith.transform_to("icrs")
src_astropy = SkyCoord(
ra=zenith_icrs.ra, dec=zenith_icrs.dec, obstime=times, location=array_location
)
src_astropy_altaz = src_astropy.transform_to("altaz")
assert np.isclose(src_astropy_altaz.alt.rad[zero_indx], np.pi / 2)
stokes_radec = [1, -0.2, 0.3, 0.1] * units.Jy
source = SkyModel(
name="icrs_zen",
ra=zenith_icrs.ra,
dec=zenith_icrs.dec,
stokes=stokes_radec,
spectral_type="flat",
)
coherency_matrix_local = np.zeros([2, 2, ntimes], dtype="complex128") * units.Jy
alts = np.zeros(ntimes)
azs = np.zeros(ntimes)
for ti, time in enumerate(times):
source.update_positions(time, telescope_location=array_location)
alt, az = source.alt_az
assert alt == src_astropy_altaz[ti].alt.radian
assert az == src_astropy_altaz[ti].az.radian
alts[ti] = alt
azs[ti] = az
coherency_tmp = source.coherency_calc().squeeze()
coherency_matrix_local[:, :, ti] = coherency_tmp
zas = np.pi / 2.0 - alts
Jbeam = analytic_beam_jones(zas, azs)
coherency_instr_local = np.einsum(
"ab...,bc...,dc...->ad...", Jbeam, coherency_matrix_local, np.conj(Jbeam)
)
# test that all the instrumental coherencies are smooth
t_diff_sec = np.diff(times.jd) * 24 * 3600
for pol_i in [0, 1]:
for pol_j in [0, 1]:
real_coherency = coherency_instr_local[pol_i, pol_j, :].real.value
real_derivative = np.diff(real_coherency) / t_diff_sec
real_derivative_diff = np.diff(real_derivative)
assert np.max(np.abs(real_derivative_diff)) < 1e-6
imag_coherency = coherency_instr_local[pol_i, pol_j, :].imag.value
imag_derivative = np.diff(imag_coherency) / t_diff_sec
imag_derivative_diff = np.diff(imag_derivative)
assert np.max(np.abs(imag_derivative_diff)) < 1e-6
# test that the stokes coherencies are smooth
stokes_instr_local = skyutils.coherency_to_stokes(coherency_instr_local)
for pol_i in range(4):
real_stokes = stokes_instr_local[pol_i, :].real.value
real_derivative = np.diff(real_stokes) / t_diff_sec
real_derivative_diff = np.diff(real_derivative)
assert np.max(np.abs(real_derivative_diff)) < 1e-6
imag_stokes = stokes_instr_local[pol_i, :].imag.value
assert np.all(imag_stokes == 0)
@pytest.mark.filterwarnings("ignore:This method reads an old 'healvis' style healpix")
def test_read_healpix_hdf5_old(healpix_data):
m = np.arange(healpix_data["npix"])
m[healpix_data["ipix_disc"]] = healpix_data["npix"] - 1
indices = np.arange(healpix_data["npix"])
with pytest.warns(
DeprecationWarning,
match="This function is deprecated, use `SkyModel.read_skyh5` or `SkyModel.read_healpix_hdf5` instead.",
):
hpmap, inds, freqs = skymodel.read_healpix_hdf5(
os.path.join(SKY_DATA_PATH, "healpix_disk.hdf5")
)
assert np.allclose(hpmap[0, :], m)
assert np.allclose(inds, indices)
assert np.allclose(freqs, healpix_data["frequencies"])
@pytest.mark.filterwarnings("ignore:This method reads an old 'healvis' style healpix")
def test_healpix_to_sky(healpix_data, healpix_disk_old):
healpix_filename = os.path.join(SKY_DATA_PATH, "healpix_disk.hdf5")
with h5py.File(healpix_filename, "r") as fileobj:
hpmap = fileobj["data"][0, ...] # Remove Nskies axis.
indices = fileobj["indices"][()]
freqs = fileobj["freqs"][()]
history = np.string_(fileobj["history"][()]).tobytes().decode("utf8")
hmap_orig = np.arange(healpix_data["npix"])
hmap_orig[healpix_data["ipix_disc"]] = healpix_data["npix"] - 1
hmap_orig = np.repeat(hmap_orig[None, :], 10, axis=0)
hmap_orig = hmap_orig * units.K
with pytest.warns(
DeprecationWarning,
match="This function is deprecated, use `SkyModel.read_skyh5` or `SkyModel.read_healpix_hdf5` instead.",
):
sky = skymodel.healpix_to_sky(hpmap, indices, freqs)
assert isinstance(sky.stokes, Quantity)
sky.history = history + sky.pyradiosky_version_str
assert healpix_disk_old == sky
assert units.quantity.allclose(healpix_disk_old.stokes[0], hmap_orig)
@pytest.mark.filterwarnings("ignore:This method reads an old 'healvis' style healpix")
def test_units_healpix_to_sky(healpix_data, healpix_disk_old):
healpix_filename = os.path.join(SKY_DATA_PATH, "healpix_disk.hdf5")
with h5py.File(healpix_filename, "r") as fileobj:
hpmap = fileobj["data"][0, ...] # Remove Nskies axis.
freqs = fileobj["freqs"][()]
freqs = freqs * units.Hz
brightness_temperature_conv = units.brightness_temperature(
freqs, beam_area=healpix_data["pixel_area"]
)
stokes = (hpmap.T * units.K).to(units.Jy, brightness_temperature_conv).T
sky = healpix_disk_old
sky.healpix_to_point()
assert units.quantity.allclose(sky.stokes[0, 0], stokes[0])
@pytest.mark.filterwarnings("ignore:recarray flux columns will no longer be labeled")
def test_healpix_recarray_loop(healpix_data, healpix_disk_new):
skyobj = healpix_disk_new
skyarr = skyobj.to_recarray()
skyobj2 = SkyModel.from_recarray(skyarr, history=skyobj.history)
assert skyobj.component_type == "healpix"
assert skyobj2.component_type == "healpix"
assert skyobj == skyobj2
@pytest.mark.filterwarnings("ignore:This method reads an old 'healvis' style healpix")
@pytest.mark.filterwarnings("ignore:This method writes an old 'healvis' style healpix")
def test_read_write_healpix_oldfunction(tmp_path, healpix_data):
healpix_filename = os.path.join(SKY_DATA_PATH, "healpix_disk.hdf5")
with h5py.File(healpix_filename, "r") as fileobj:
hpmap = fileobj["data"][0, ...] # Remove Nskies axis.
indices = fileobj["indices"][()]
freqs = fileobj["freqs"][()]
freqs = freqs * units.Hz
filename = os.path.join(tmp_path, "tempfile.hdf5")
with pytest.warns(
DeprecationWarning,
match="This function is deprecated, use `SkyModel.write_skyh5` instead.",
):
with pytest.raises(
ValueError, match="Need to provide nside if giving a subset of the map."
):
skymodel.write_healpix_hdf5(
filename, hpmap, indices[:10], freqs.to("Hz").value
)
with pytest.warns(
DeprecationWarning,
match="This function is deprecated, use `SkyModel.write_skyh5` instead.",
):
with pytest.raises(ValueError, match="Invalid map shape"):
skymodel.write_healpix_hdf5(
filename,
hpmap,
indices[:10],
freqs.to("Hz").value,
nside=healpix_data["nside"],
)
with pytest.warns(
DeprecationWarning,
match="This function is deprecated, use `SkyModel.write_skyh5` instead.",
):
skymodel.write_healpix_hdf5(filename, hpmap, indices, freqs.to("Hz").value)
with pytest.warns(
DeprecationWarning,
match="This function is deprecated, use `SkyModel.read_skyh5` or `SkyModel.read_healpix_hdf5` instead.",
):
hpmap_new, inds_new, freqs_new = skymodel.read_healpix_hdf5(filename)
assert np.allclose(hpmap_new, hpmap)
assert np.allclose(inds_new, indices)
assert np.allclose(freqs_new, freqs.to("Hz").value)
@pytest.mark.filterwarnings("ignore:This method reads an old 'healvis' style healpix")
@pytest.mark.filterwarnings("ignore:This method writes an old 'healvis' style healpix")
def test_read_write_healpix_old(tmp_path, healpix_data, healpix_disk_old):
test_filename = os.path.join(tmp_path, "tempfile.hdf5")
sky = healpix_disk_old
with pytest.warns(
DeprecationWarning,
match="This method writes an old 'healvis' style healpix HDF5 file. Support for "
"this file format is deprecated and will be removed in version 0.3.0.",
):
sky.write_healpix_hdf5(test_filename)
with pytest.warns(
DeprecationWarning,
match="This method reads an old 'healvis' style healpix HDF5 file. Support for "
"this file format is deprecated and will be removed in version 0.3.0.",
):
sky2 = SkyModel.from_healpix_hdf5(test_filename)
assert sky == sky2
@pytest.mark.filterwarnings("ignore:This method reads an old 'healvis' style healpix")
def test_read_write_healpix_old_cut_sky(tmp_path, healpix_data, healpix_disk_old):
test_filename = os.path.join(tmp_path, "tempfile.hdf5")
sky = healpix_disk_old
sky.select(component_inds=np.arange(10))
sky.check()
with pytest.warns(
DeprecationWarning,
match="This method writes an old 'healvis' style healpix HDF5 file. Support for "
"this file format is deprecated and will be removed in version 0.3.0.",
):
sky.write_healpix_hdf5(test_filename)
with pytest.warns(
DeprecationWarning,
match="This method reads an old 'healvis' style healpix HDF5 file. Support for "
"this file format is deprecated and will be removed in version 0.3.0.",
):
sky2 = SkyModel.from_healpix_hdf5(test_filename)
assert sky == sky2
@pytest.mark.filterwarnings("ignore:This method reads an old 'healvis' style healpix")
def test_read_write_healpix_old_nover_history(tmp_path, healpix_data, healpix_disk_old):
test_filename = os.path.join(tmp_path, "tempfile.hdf5")
sky = healpix_disk_old
sky.history = sky.pyradiosky_version_str
with pytest.warns(
DeprecationWarning,
match="This method writes an old 'healvis' style healpix HDF5 file. Support for "
"this file format is deprecated and will be removed in version 0.3.0.",
):
sky.write_healpix_hdf5(test_filename)
with pytest.warns(
DeprecationWarning,
match="This method reads an old 'healvis' style healpix HDF5 file. Support for "
"this file format is deprecated and will be removed in version 0.3.0.",
):
sky2 = SkyModel.from_healpix_hdf5(test_filename)
assert sky == sky2
@pytest.mark.filterwarnings("ignore:This method writes an old 'healvis' style healpix")
def test_write_healpix_error(tmp_path):
skyobj = SkyModel.from_gleam_catalog(GLEAM_vot)
test_filename = os.path.join(tmp_path, "tempfile.hdf5")
with pytest.raises(
ValueError,
match="component_type must be 'healpix' to use this method.",
):
skyobj.write_healpix_hdf5(test_filename)
def test_healpix_import_err(zenith_skymodel):
try:
import astropy_healpix
astropy_healpix.nside_to_npix(2 ** 3)
except ImportError:
errstr = "The astropy-healpix module must be installed to use HEALPix methods"
Npix = 12
hpmap = np.arange(Npix)
inds = hpmap
freqs = np.zeros(1)
with pytest.raises(ImportError, match=errstr):
skymodel.healpix_to_sky(hpmap, inds, freqs)
with pytest.raises(ImportError, match=errstr):
SkyModel(
nside=8, hpx_inds=[0], stokes=[1.0, 0.0, 0.0, 0.0], spectral_type="flat"
)
with pytest.raises(ImportError, match=errstr):
SkyModel.from_healpix_hdf5(os.path.join(SKY_DATA_PATH, "healpix_disk.hdf5"))
with pytest.raises(ImportError, match=errstr):
skymodel.write_healpix_hdf5("filename.hdf5", hpmap, inds, freqs)
zenith_skymodel.nside = 32
zenith_skymodel.hpx_inds = 0
with pytest.raises(ImportError, match=errstr):
zenith_skymodel.point_to_healpix()
zenith_skymodel._set_component_type_params("healpix")
with pytest.raises(ImportError, match=errstr):
zenith_skymodel.healpix_to_point()
def test_healpix_positions(tmp_path, time_location):
pytest.importorskip("astropy_healpix")
import astropy_healpix
# write out a healpix file, read it back in check that it is as expected
nside = 8
Npix = astropy_healpix.nside_to_npix(nside)
freqs = np.arange(100, 100.5, 0.1) * 1e6
Nfreqs = len(freqs)
hpx_map = np.zeros((Nfreqs, Npix))
ipix = 357
# Want 1 [Jy] converted to [K sr]
hpx_map[:, ipix] = skyutils.jy_to_ksr(freqs)
stokes = np.zeros((4, Nfreqs, Npix))
stokes[0] = hpx_map
with pytest.raises(
ValueError,
match="For healpix component types, the stokes parameter must have a "
"unit that can be converted to",
):
SkyModel(
nside=nside,
hpx_inds=range(Npix),
stokes=stokes * units.m,
freq_array=freqs * units.Hz,
spectral_type="full",
)
with pytest.raises(
ValueError,
match="For healpix component types, the coherency_radec parameter must have a "
"unit that can be converted to",
):
skyobj = SkyModel(
nside=nside,
hpx_inds=range(Npix),
stokes=stokes * units.K,
freq_array=freqs * units.Hz,
spectral_type="full",
)
skyobj.coherency_radec = skyobj.coherency_radec.value * units.m
skyobj.check()
with pytest.warns(
DeprecationWarning,
match="In version 0.2.0, stokes will be required to be an astropy "
"Quantity with units that are convertable to one of",
):
skyobj = SkyModel(
nside=nside,
hpx_inds=range(Npix),
stokes=stokes,
freq_array=freqs * units.Hz,
spectral_type="full",
)
filename = os.path.join(tmp_path, "healpix_single.hdf5")
with pytest.warns(
DeprecationWarning,
match="This method writes an old 'healvis' style healpix HDF5 file. Support "
"for this file format is deprecated and will be removed in version 0.3.0.",
):
skyobj.write_healpix_hdf5(filename)
time, array_location = time_location
ra, dec = astropy_healpix.healpix_to_lonlat(ipix, nside)
skycoord_use = SkyCoord(ra, dec, frame="icrs")
source_altaz = skycoord_use.transform_to(
AltAz(obstime=time, location=array_location)
)
alt_az = np.array([source_altaz.alt.value, source_altaz.az.value])
src_az = Angle(alt_az[1], unit="deg")
src_alt = Angle(alt_az[0], unit="deg")
src_za = Angle("90.d") - src_alt
src_l = np.sin(src_az.rad) * np.sin(src_za.rad)
src_m = np.cos(src_az.rad) * np.sin(src_za.rad)
src_n = np.cos(src_za.rad)
with pytest.warns(
DeprecationWarning,
match="This method reads an old 'healvis' style healpix HDF5 file. Support for "
"this file format is deprecated and will be removed in version 0.3.0.",
):
sky2 = SkyModel.from_healpix_hdf5(filename)
time.location = array_location
sky2.update_positions(time, array_location)
src_alt_az = sky2.alt_az
assert np.isclose(src_alt_az[0][ipix], src_alt.rad)
assert np.isclose(src_alt_az[1][ipix], src_az.rad)
src_lmn = sky2.pos_lmn
assert np.isclose(src_lmn[0][ipix], src_l)
assert np.isclose(src_lmn[1][ipix], src_m)
assert np.isclose(src_lmn[2][ipix], src_n)
@pytest.mark.filterwarnings("ignore:recarray flux columns will no longer be labeled")
@pytest.mark.filterwarnings("ignore:The reference_frequency is aliased as `frequency`")
@pytest.mark.parametrize("spec_type", ["flat", "subband", "spectral_index", "full"])
def test_array_to_skymodel_loop(spec_type):
spectral_type = "subband" if spec_type == "full" else spec_type
sky = SkyModel.from_gleam_catalog(GLEAM_vot, spectral_type=spectral_type)
if spec_type == "full":
sky.spectral_type = "full"
arr = sky.to_recarray()
sky2 = SkyModel.from_recarray(arr)
assert sky == sky2
if spec_type == "flat":
# again with no reference_frequency field
reference_frequency = sky.reference_frequency
sky.reference_frequency = None
arr = sky.to_recarray()
sky2 = SkyModel.from_recarray(arr)
assert sky == sky2
# again with flat & freq_array
sky.freq_array = np.atleast_1d(np.unique(reference_frequency))
sky2 = SkyModel.from_recarray(sky.to_recarray())
assert sky == sky2
def test_param_flux_cuts():
# Check that min/max flux limits in test params work.
skyobj = SkyModel.from_gleam_catalog(GLEAM_vot)
skyobj2 = skyobj.source_cuts(
min_flux=0.2 * units.Jy, max_flux=1.5 * units.Jy, inplace=False
)
for sI in skyobj2.stokes[0, 0, :]:
assert np.all(0.2 * units.Jy < sI < 1.5 * units.Jy)
components_to_keep = np.where(
(np.min(skyobj.stokes[0, :, :], axis=0) > 0.2 * units.Jy)
& (np.max(skyobj.stokes[0, :, :], axis=0) < 1.5 * units.Jy)
)[0]
skyobj3 = skyobj.select(component_inds=components_to_keep, inplace=False)
assert skyobj2 == skyobj3
@pytest.mark.parametrize("spec_type", ["flat", "subband", "spectral_index", "full"])
def test_select(spec_type, time_location):
time, array_location = time_location
skyobj = SkyModel.from_gleam_catalog(GLEAM_vot)
skyobj.beam_amp = np.ones((4, skyobj.Nfreqs, skyobj.Ncomponents))
skyobj.extended_model_group = np.empty(skyobj.Ncomponents, dtype=str)
skyobj.update_positions(time, array_location)
skyobj2 = skyobj.select(component_inds=np.arange(10), inplace=False)
skyobj.select(component_inds=np.arange(10))
assert skyobj == skyobj2
def test_select_none():
skyobj = SkyModel.from_gleam_catalog(GLEAM_vot)
skyobj2 = skyobj.select(component_inds=None, inplace=False)
assert skyobj2 == skyobj
skyobj.select(component_inds=None)
assert skyobj2 == skyobj
@pytest.mark.filterwarnings("ignore:recarray flux columns will no longer be labeled")
@pytest.mark.filterwarnings("ignore:The reference_frequency is aliased as `frequency`")
@pytest.mark.parametrize(
"spec_type, init_kwargs, cut_kwargs",
[
("flat", {}, {}),
("flat", {"reference_frequency": np.ones(20) * 200e6 * units.Hz}, {}),
("full", {"freq_array": np.array([1e8, 1.5e8]) * units.Hz}, {}),
(
"subband",
{"freq_array": np.array([1e8, 1.5e8]) * units.Hz},
{"freq_range": np.array([0.9e8, 2e8]) * units.Hz},
),
(
"subband",
{"freq_array": np.array([1e8, 1.5e8]) * units.Hz},
{"freq_range": np.array([1.1e8, 2e8]) * units.Hz},
),
(
"flat",
{"freq_array": np.array([1e8]) * units.Hz},
{"freq_range": np.array([0.9e8, 2e8]) * units.Hz},
),
],
)
def test_flux_cuts(spec_type, init_kwargs, cut_kwargs):
Nsrcs = 20
minflux = 0.5
maxflux = 3.0
ids = ["src{}".format(i) for i in range(Nsrcs)]
ras = Longitude(np.random.uniform(0, 360.0, Nsrcs), units.deg)
decs = Latitude(np.linspace(-90, 90, Nsrcs), units.deg)
stokes = np.zeros((4, 1, Nsrcs)) * units.Jy
if spec_type == "flat":
stokes[0, :, :] = np.linspace(minflux, maxflux, Nsrcs) * units.Jy
else:
stokes = np.zeros((4, 2, Nsrcs)) * units.Jy
stokes[0, 0, :] = np.linspace(minflux, maxflux / 2.0, Nsrcs) * units.Jy
stokes[0, 1, :] = np.linspace(minflux * 2.0, maxflux, Nsrcs) * units.Jy
# Add a nonzero polarization.
Ucomp = maxflux + 1.3
stokes[2, :, :] = Ucomp * units.Jy # Should not be affected by cuts.
skyobj = SkyModel(
name=ids,
ra=ras,
dec=decs,
stokes=stokes,
spectral_type=spec_type,
**init_kwargs,
)
minI_cut = 1.0
maxI_cut = 2.3
skyobj.source_cuts(
latitude_deg=30.0,
min_flux=minI_cut,
max_flux=maxI_cut,
**cut_kwargs,
)
cut_sourcelist = skyobj.to_recarray()
if "freq_range" in cut_kwargs and np.min(
cut_kwargs["freq_range"] > np.min(init_kwargs["freq_array"])
):
assert np.all(cut_sourcelist["I"] < maxI_cut)
else:
assert np.all(cut_sourcelist["I"] > minI_cut)
assert np.all(cut_sourcelist["I"] < maxI_cut)
assert np.all(cut_sourcelist["U"] == Ucomp)
@pytest.mark.parametrize(
"spec_type, init_kwargs, cut_kwargs, error_category, error_message",
[
(
"spectral_index",
{
"reference_frequency": np.ones(20) * 200e6 * units.Hz,
"spectral_index": np.ones(20) * 0.8,
},
{},
NotImplementedError,
"Flux cuts with spectral index type objects is not supported yet.",
),
(
"full",
{"freq_array": np.array([1e8, 1.5e8]) * units.Hz},
{"freq_range": [0.9e8, 2e8]},
ValueError,
"freq_range must be an astropy Quantity.",
),
(
"subband",
{"freq_array": np.array([1e8, 1.5e8]) * units.Hz},
{"freq_range": 0.9e8 * units.Hz},
ValueError,
"freq_range must have 2 elements.",
),
(
"subband",
{"freq_array": np.array([1e8, 1.5e8]) * units.Hz},
{"freq_range": np.array([1.1e8, 1.4e8]) * units.Hz},
ValueError,
"No frequencies in freq_range.",
),
],
)
def test_source_cut_error(
spec_type, init_kwargs, cut_kwargs, error_category, error_message
):
Nsrcs = 20
minflux = 0.5
maxflux = 3.0
ids = ["src{}".format(i) for i in range(Nsrcs)]
ras = Longitude(np.random.uniform(0, 360.0, Nsrcs), units.deg)
decs = Latitude(np.linspace(-90, 90, Nsrcs), units.deg)
stokes = np.zeros((4, 1, Nsrcs)) * units.Jy
if spec_type in ["flat", "spectral_index"]:
stokes[0, :, :] = np.linspace(minflux, maxflux, Nsrcs) * units.Jy
else:
stokes = np.zeros((4, 2, Nsrcs)) * units.Jy
stokes[0, 0, :] = np.linspace(minflux, maxflux / 2.0, Nsrcs) * units.Jy
stokes[0, 1, :] = np.linspace(minflux * 2.0, maxflux, Nsrcs) * units.Jy
skyobj = SkyModel(
name=ids,
ra=ras,
dec=decs,
stokes=stokes,
spectral_type=spec_type,
**init_kwargs,
)
with pytest.raises(error_category, match=error_message):
minI_cut = 1.0
maxI_cut = 2.3
skyobj.source_cuts(
latitude_deg=30.0,
min_flux=minI_cut,
max_flux=maxI_cut,
**cut_kwargs,
)
@pytest.mark.filterwarnings("ignore:recarray flux columns will no longer be labeled")
def test_circumpolar_nonrising(time_location):
# Check that the source_cut function correctly identifies sources that are circumpolar or
# won't rise.
# Working with an observatory at the HERA latitude.
time, location = time_location
Ntimes = 100
Nras = 20
Ndecs = 20
Nsrcs = Nras * Ndecs
times = time + TimeDelta(np.linspace(0, 1.0, Ntimes), format="jd")
lon = location.lon.deg
ra = np.linspace(lon - 90, lon + 90, Nras)
dec = | np.linspace(-90, 90, Ndecs) | numpy.linspace |
#Standard python libraries
import copy
import time
import os
#Dependencies
import numpy as np
import warnings
import matplotlib.pyplot as plt
from pyfftw.interfaces.numpy_fft import fft, fftshift, ifft, ifftshift, fftfreq
#UF2
from ultrafastultrafast.RK_core import RK_Wavepackets
class RK_TransientAbsorption(RK_Wavepackets):
"""This class uses WavepacketBuilder to calculate the perturbative
wavepackets needed to calculate the frequency-resolved pump-probe spectrum """
def __init__(self,parameter_file_path,*, num_conv_points=138,
initial_state=0,dt=0.1,total_num_time_points = 2000):
super().__init__(parameter_file_path, num_conv_points=num_conv_points,
initial_state=initial_state, dt=dt,
total_num_time_points = total_num_time_points)
def set_pulse_times(self,delay_time):
"""Sets a list of pulse times for the pump-probe calculation assuming
that the lowest-order, 4-wave mixing signals will be calculated, and so 4
interactions will be considered
"""
self.pulse_times = [0,0,delay_time,delay_time]
def set_pulse_shapes(self,pump_field,probe_field,*,plot_fields = True):
"""Sets a list of 4 pulse amplitudes, given an input pump shape and probe
shape. Assumes 4-wave mixing signals, and so 4 interactions
"""
self.efields = [pump_field,pump_field,probe_field,probe_field]
pump_tail = np.max(np.abs([pump_field[0],pump_field[-1]]))
probe_tail = np.max(np.abs([probe_field[0],probe_field[-1]]))
if pump_field.size != self.efield_t.size:
warnings.warn('Pump must be evaluated on efield_t, the grid defined by dt and num_conv_points')
if probe_field.size != self.efield_t.size:
warnings.warn('Probe must be evaluated on efield_t, the grid defined by dt and num_conv_points')
if pump_tail > np.max(np.abs(pump_field))/100:
warnings.warn('Consider using larger num_conv_points, pump does not decay to less than 1% of maximum value in time domain')
if probe_tail > np.max(np.abs(probe_field))/100:
warnings.warn('Consider using larger num_conv_points, probe does not decay to less than 1% of maximum value in time domain')
pump_fft = fftshift(fft(ifftshift(pump_field)))*self.dt
probe_fft = fftshift(fft(ifftshift(probe_field)))*self.dt
pump_fft_tail = np.max(np.abs([pump_fft[0],pump_fft[-1]]))
probe_fft_tail = np.max(np.abs([probe_fft[0],probe_fft[-1]]))
if pump_fft_tail > np.max(np.abs(pump_fft))/100:
warnings.warn('''Consider using smaller value of dt, pump does not decay to less than 1% of maximum value in frequency domain''')
if probe_fft_tail > np.max(np.abs(probe_fft))/100:
warnings.warn('''Consider using smaller value of dt, probe does not decay to less than 1% of maximum value in frequency domain''')
if plot_fields:
fig, axes = plt.subplots(2,2)
l1,l2, = axes[0,0].plot(self.efield_t,np.real(pump_field),self.efield_t,np.imag(pump_field))
plt.legend([l1,l2],['Real','Imag'])
axes[0,1].plot(self.efield_w,np.real(pump_fft),self.efield_w,np.imag(pump_fft))
axes[1,0].plot(self.efield_t,np.real(probe_field),self.efield_t,np.imag(probe_field))
axes[1,1].plot(self.efield_w,np.real(probe_fft),self.efield_w,np.imag(probe_fft))
axes[0,0].set_ylabel('Pump Amp')
axes[1,0].set_ylabel('Probe Amp')
axes[1,0].set_xlabel('Time')
axes[1,1].set_xlabel('Frequency')
fig.suptitle('Check that pump and probe are well-resolved in time and frequency')
def calculate_pump_wavepackets(self):
"""Calculates the wavepackets that involve only the pump, and therefore
do not need to be recalculated for different delay times
"""
# First order
self.psi1_a = self.up(self.psi0, pulse_number = 0)
self.psi1_b = self.up(self.psi0, pulse_number = 1)
# Second order
self.psi2_ab = self.down(self.psi1_a, pulse_number = 1)
def calculate_probe_wavepackets(self):
# First order
self.psi1_c = self.up(self.psi0, pulse_number = 2,gamma=self.gamma)
# Second order
self.psi2_ac = self.down(self.psi1_a, pulse_number = 2,gamma=self.gamma)
if 'DEM' in self.manifolds:
self.psi2_bc = self.up(self.psi1_b, pulse_number = 2,gamma=self.gamma)
# Third order
self.psi3_abc = self.up(self.psi2_ab, pulse_number = 2,gamma=self.gamma,
new_manifold_mask=self.psi1_c['bool_mask'].copy())
def calculate_overlap_wavepackets(self):
"""These diagrams only contribute when the two pulses either overlap,
or when the probe comes before the pump
"""
self.psi2_ca = self.down(self.psi1_c, pulse_number = 0)
self.psi3_cab = self.up(self.psi2_ca, pulse_number = 1,
new_manifold_mask=self.psi1_b['bool_mask'].copy())
if 'DEM' in self.manifolds:
self.psi2_cb = self.up(self.psi1_c, pulse_number = 1)
self.psi3_cba = self.down(self.psi2_cb, pulse_number = 0,
new_manifold_mask=self.psi1_a['bool_mask'].copy())
self.psi3_bca = self.down(self.psi2_bc, pulse_number = 0,
new_manifold_mask=self.psi1_a['bool_mask'].copy())
### Normal Diagrams
def GSB1(self):
return self.dipole_expectation(self.psi2_ab, self.psi1_c)
def GSB2(self):
return self.dipole_expectation(self.psi0,self.psi3_abc)
def SE(self):
return self.dipole_expectation(self.psi2_ac,self.psi1_b)
def ESA(self):
return self.dipole_expectation(self.psi1_a,self.psi2_bc)
### Overlap Diagrams
def GSB3(self):
return self.dipole_expectation(self.psi0,self.psi3_cab)
def extra4(self):
return self.dipole_expectation(self.psi0,self.psi3_cba)
def extra5(self):
return self.dipole_expectation(self.psi0,self.psi3_bca)
def extra6(self):
return self.dipole_expectation(self.psi1_a,self.psi2_cb)
### Calculating Spectra
def calculate_normal_signals(self):
tot_sig = self.SE() + self.GSB1() + self.GSB2()
if 'DEM' in self.manifolds:
tot_sig += self.ESA()
return tot_sig
def calculate_overlap_signals(self):
overlap_sig = self.GSB3()
if 'DEM' in self.manifolds:
additional_sig = self.extra4() + self.extra5() + self.extra6()
overlap_sig += additional_sig
return overlap_sig
def calculate_pump_probe_spectrum(self,delay_time,*,
recalculate_pump_wavepackets=True,
local_oscillator_number = -1):
"""Calculates the pump-probe spectrum for the delay_time specified.
Boolean arguments:
recalculate_pump_wavepackets - must be set to True if any aspect of the electric
field has changed since the previous calculation. Otherwise they can be re-used.
"""
delay_index, delay_time = self.get_closest_index_and_value(delay_time,
self.t)
self.set_pulse_times(delay_time)
if recalculate_pump_wavepackets:
self.calculate_pump_wavepackets()
self.calculate_probe_wavepackets()
signal_field = self.calculate_normal_signals()
if delay_index < self.efield_t.size*3/2:
# The pump and probe are still considered to be over-lapping
self.calculate_overlap_wavepackets()
signal_field += self.calculate_overlap_signals()
signal = self.polarization_to_signal(signal_field, local_oscillator_number = local_oscillator_number)
return signal
def calculate_pump_probe_spectra_vs_delay_time(self,delay_times):
"""
"""
self.delay_times = delay_times
min_sig_decay_time = self.t[-1] - (delay_times[-1])
if min_sig_decay_time < 5/self.gamma:
if min_sig_decay_time < 0:
warnings.warn("""Time mesh is not long enough to support requested
number of delay time points""")
else:
warnings.warn("""Spectra may not be well-resolved for all delay times.
For final delay time signal decays to {:.7f} of orignal value.
Consider selecting larger gamma value or a longer time
mesh""".format(np.exp(-min_sig_decay_time*self.gamma)))
t0 = time.time()
self.set_pulse_times(0)
t_pump = time.time()
self.calculate_pump_wavepackets()
signal = | np.zeros((self.w.size,delay_times.size)) | numpy.zeros |
import numpy as np
from scipy import interpolate
import pdb
import tqdm
def _estim_dist_old(quantiles, percentiles, y_min, y_max, smooth_tails, tau):
""" Estimate CDF from list of quantiles, with smoothing """
noise = np.random.uniform(low=0.0, high=1e-8, size=((len(quantiles),)))
noise_monotone = np.sort(noise)
quantiles = quantiles + noise_monotone
# Smooth tails
def interp1d(x, y, a, b):
return interpolate.interp1d(x, y, bounds_error=False, fill_value=(a, b), assume_sorted=True)
cdf = interp1d(quantiles, percentiles, 0.0, 1.0)
inv_cdf = interp1d(percentiles, quantiles, y_min, y_max)
if smooth_tails:
# Uniform smoothing of tails
quantiles_smooth = quantiles
tau_lo = tau
tau_hi = 1-tau
q_lo = inv_cdf(tau_lo)
q_hi = inv_cdf(tau_hi)
idx_lo = np.where(percentiles < tau_lo)[0]
idx_hi = np.where(percentiles > tau_hi)[0]
if len(idx_lo) > 0:
quantiles_smooth[idx_lo] = np.linspace(quantiles[0], q_lo, num=len(idx_lo))
if len(idx_hi) > 0:
quantiles_smooth[idx_hi] = np.linspace(q_hi, quantiles[-1], num=len(idx_hi))
cdf = interp1d(quantiles_smooth, percentiles, 0.0, 1.0)
inv_cdf = interp1d(percentiles, quantiles_smooth, y_min, y_max)
return cdf, inv_cdf
def _estim_dist(quantiles, percentiles, y_min, y_max, smooth_tails, tau):
""" Estimate CDF from list of quantiles, with smoothing """
noise = np.random.uniform(low=0.0, high=1e-5, size=((len(quantiles),)))
noise_monotone = np.sort(noise)
quantiles = quantiles + noise_monotone
# Smooth tails
def interp1d(x, y, a, b):
return interpolate.interp1d(x, y, bounds_error=False, fill_value=(a, b), assume_sorted=True)
cdf = interp1d(quantiles, percentiles, 0.0, 1.0)
inv_cdf = interp1d(percentiles, quantiles, y_min, y_max)
if smooth_tails:
# Uniform smoothing of tails
quantiles_smooth = quantiles
tau_lo = tau
tau_hi = 1-tau
q_lo = inv_cdf(tau_lo)
q_hi = inv_cdf(tau_hi)
idx_lo = np.where(percentiles < tau_lo)[0]
idx_hi = np.where(percentiles > tau_hi)[0]
if len(idx_lo) > 0:
quantiles_smooth[idx_lo] = np.linspace(quantiles[0], q_lo, num=len(idx_lo))
if len(idx_hi) > 0:
quantiles_smooth[idx_hi] = np.linspace(q_hi, quantiles[-1], num=len(idx_hi))
cdf = interp1d(quantiles_smooth, percentiles, 0.0, 1.0)
inv_cdf = interp1d(percentiles, quantiles_smooth, y_min, y_max)
# Standardize
breaks = | np.linspace(y_min, y_max, num=1000, endpoint=True) | numpy.linspace |
import os
import numpy as np
from PIL import Image
import pandas as pd
import torch
import torch.backends.cudnn as cudnn
from torchvision.transforms import transforms
import torchvision.transforms.functional as TF
from utils import img_utils
from gazefollowmodel.models.gazenet import GazeNet
class Infer_engine(object):
def __init__(self,rgb_transform=None,depth_transform=None,input_size=224,device="cuda",checkpoint=""):
super(Infer_engine, self).__init__()
# define torch transform
if rgb_transform is None:
transform_list = []
transform_list.append(transforms.Resize((input_size, input_size)))
transform_list.append(transforms.ToTensor())
transform_list.append(transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
self.rgb_transform=transforms.Compose(transform_list)
else:
self.rgb_transform=rgb_transform
if depth_transform is None:
self.depth_transform = transforms.Compose([
transforms.Resize((input_size, input_size)),
transforms.ToTensor(),
])
else:
self.depth_transform=depth_transform
# define input size
self.input_size=input_size
self.device=device
self.checkpoint=checkpoint
self.format_input=dict()
self.model_output=dict()
self.infer_output=dict()
self.eval=dict()
self.ground_truth=dict()
self.model=self.model_init()
# init the model
def model_init(self):
cudnn.deterministic = True
model=GazeNet(pretrained=False)
model=model.to(self.device)
model.eval()
checkpoint = torch.load(self.checkpoint)
model.load_state_dict(checkpoint)
return model
def format_model_input(self,rgb_path,depth_path,head_bbox,campara,eye_coord):
# load the rgbimg and depthimg
rgbimg=Image.open(rgb_path)
rgbimg = rgbimg.convert('RGB')
width, height = rgbimg.size
depthimg = Image.open(depth_path)
depthvalue=np.array(depthimg.copy())
depthvalue.flags.writeable=True
depthvalue=depthvalue/1000.0
# expand the head bounding box (in pixel coordinate )
x_min, y_min, x_max, y_max=map(float,img_utils.expand_head_box(head_bbox
,[width,height]))
self.img_para = [width, height]
head = rgbimg.crop((int(x_min), int(y_min), int(x_max), int(y_max)))
self.head_loc=[]
head_channel = img_utils.get_head_box_channel(x_min, y_min, x_max, y_max, width, height,
resolution=self.input_size, coordconv=False).unsqueeze(0)
rgbimg=self.rgb_transform(rgbimg)
headimg=self.rgb_transform(head)
depthimg=self.depth_transform(depthimg)
# obtain the trasformation matrix from camera coordinate system to eye coordinate system
T_c2e=self.getTransmatrix(eye_coord)
# obatain the gazevector in eye coordinate system
depthmap=depthimg.numpy()/1000.0
depthmap=depthmap[0]
Gaze_Vector_Space=self.getGazevectorspace(depthmap,campara,T_c2e)
self.format_input['rgbimg']=rgbimg
self.format_input['headimg']=headimg
self.format_input['headchannel']=head_channel
self.format_input['gvs']=Gaze_Vector_Space
self.format_input['depthmap']=depthvalue
# self.format_input=[rgbimg,headimg,head_channel,Gaze_Vector_Space]
def run_3DGazeFollowModel(self):
rgb_img= self.format_input['rgbimg']
h_img=self.format_input['headimg']
h_channel=self.format_input['headchannel']
gvs=self.format_input['gvs']
x_rgbimg=rgb_img.to(self.device).unsqueeze(0)
x_himg=h_img.to(self.device).unsqueeze(0)
x_hc=h_channel.to(self.device).unsqueeze(0)
x_gvs=gvs.to(self.device).unsqueeze(0)
pred_heatmap, pred_gazevector = self.model(x_rgbimg, x_gvs, x_himg, x_hc)
pred_heatmap=pred_heatmap.squeeze()
pred_heatmap = pred_heatmap.data.cpu().numpy()
pred_gazevector=pred_gazevector.data.cpu().numpy()
self.model_output["pred_heatmap"]=pred_heatmap
self.model_output["pred_gazevector"]=pred_gazevector
def inference(self,eye_3dim,campara,ratio=0.1):
#
img_W,img_H=self.img_para
#
pred_heatmap=self.model_output["pred_heatmap"]
pred_gazevector=self.model_output["pred_gazevector"]
depthmap=self.format_input["depthmap"]
img_H,img_W=depthmap.shape
# get the center of 2d proposal area
output_h,output_w=pred_heatmap.shape
pred_center = list(img_utils.argmax_pts(pred_heatmap))
pred_center[0]=pred_center[0]*img_W/output_w
pred_center[1]=pred_center[1]*img_H/output_h
pu_min=pred_center[0]-img_W*ratio/2
pu_max=pred_center[0]+img_W*ratio/2
pv_min=pred_center[1]-img_H*ratio/2
pv_max=pred_center[1]+img_H*ratio/2
if pu_min < 0:
pu_min, pu_max = 0, img_W * ratio
elif pu_max > img_W:
pu_max, pu_min = img_W, img_W - img_W * ratio
if pv_min < 0:
pv_min, pv_max = 0, img_H * ratio
elif pv_max > img_H:
pv_max, pv_min = img_H, img_H - img_H * ratio
pu_min,pu_max,pv_min,pv_max=map(int,[pu_min,pu_max,pv_min,pv_max])
self.rectangle=[pu_min,pu_max,pv_min,pv_max]
# unproject to 3d proposal area
range_depthmap=depthmap[pv_min:pv_max,pu_min:pu_max]
cx,cy,fx,fy,rm=campara
range_space_DW = np.linspace(pu_min, pu_max - 1, pu_max - pu_min)
range_space_DH = np.linspace(pv_min, pv_max - 1, pv_max - pv_min)
[range_space_xx, range_space_yy] = np.meshgrid(range_space_DW, range_space_DH)
range_space_X = (range_space_xx - cx) * range_depthmap / fx
range_space_Y = (range_space_yy - cy) * range_depthmap / fy
range_space_Z = range_depthmap
proposal_3d=np.dstack([range_space_X,range_space_Y,range_space_Z])
proposal_3d=proposal_3d.reshape([-1,3])
proposal_3d=np.dot(rm,proposal_3d.T)
proposal_3d=proposal_3d.T
proposal_3d=proposal_3d.reshape([pv_max-pv_min,pu_max-pu_min,3])
# trans from camera coordinate system to eye system
T_c2e=self.getTransmatrix(eye_3dim)
ones_np=np.ones((pv_max-pv_min,pu_max-pu_min,1))
proposal_3d=np.concatenate([proposal_3d,ones_np],axis=2)
proposal_3d=proposal_3d.reshape(-1,4)
proposal_3d=proposal_3d.T
proposal_3d=np.dot(T_c2e,proposal_3d)
proposal_3d=proposal_3d.T
proposal_3d=proposal_3d.reshape(pv_max-pv_min,pu_max-pu_min,4)
gaze_vector_set=proposal_3d[:,:,:3]-0
norm_value = np.linalg.norm(gaze_vector_set, axis=2, keepdims=True)
norm_value[norm_value <= 0] = 1
gaze_vector_set=gaze_vector_set/norm_value
gaze_vector_set[range_depthmap==0]=0
gaze_vector_similar_set=np.dot(gaze_vector_set,pred_gazevector)
max_index_u,max_index_v=img_utils.argmax_pts(gaze_vector_similar_set)
pred_gazetarget_eye=proposal_3d[int(max_index_v),int(max_index_u)]
# in eye coordinate system
self.infer_output["pred_gavetarget_e"]=pred_gazetarget_eye[:3]
self.infer_output["pred_gazevector_e"]=gaze_vector_set[int(max_index_v),int(max_index_u)]-0
# in camera coordinate system
# obtain the inverse transformation matrix
T_e2c=T_c2e.copy()
T_e2c[:3,:3]=T_e2c[:3,:3].T
T_e2c[:,3]=np.append(eye_3dim,1)
pred_gazetarget_camera=np.dot(T_e2c,pred_gazetarget_eye)[:3]
pred_gazevector_camera=pred_gazetarget_camera-eye_3dim
pred_gazevector_camera/(np.linalg.norm(pred_gazevector_camera)+1e-6)
self.infer_output["pred_gavetarget_c"]=pred_gazetarget_camera
self.infer_output["pred_gazevector_c"]=pred_gazevector_camera
def evaluation(self):
#
gt_gazevector_eye=self.ground_truth["gaze_vector_e"]
gt_gazetarget_eye=self.ground_truth["gaze_target3d_e"]
pred_gazevector_eye=self.infer_output["pred_gazevector_e"]
pred_gazetarget_eye=self.infer_output["pred_gavetarget_e"]
# angle error
pred_cosine_similarity=np.sum(gt_gazevector_eye*pred_gazevector_eye)
angle_error=np.arccos(pred_cosine_similarity)
angle_error=np.rad2deg(angle_error)
# dist error
dist=np.linalg.norm(pred_gazetarget_eye-gt_gazetarget_eye)
self.eval["angle_error"]=angle_error
self.eval["dist_error"]=dist
def getGroundTruth(self,aux_info):
width,height=self.img_para
# in camera coordinate system
eye_3dim, gaze_3dim,gaze_2dim = aux_info
gaze_vector=gaze_3dim-eye_3dim
gaze_u,gaze_v=gaze_2dim
gaze_2dim=np.array([gaze_u,gaze_v])
T_c2e=self.getTransmatrix(eye_3dim)
gaze_vector_eye=np.dot(T_c2e[:3,:3],gaze_vector)
gaze_3dim_eye= | np.append(gaze_3dim,1) | numpy.append |
import unittest
import pytest
import numpy as np
from geopyspark.geotrellis import SpatialKey, Tile
from geopyspark.geotrellis.constants import LayerType
from geopyspark.geotrellis.layer import TiledRasterLayer
from geopyspark.tests.base_test_class import BaseTestClass
class LocalOpertaionsTest(BaseTestClass):
extent = {'xmin': 0.0, 'ymin': 0.0, 'xmax': 33.0, 'ymax': 33.0}
layout = {'layoutCols': 1, 'layoutRows': 1, 'tileCols': 4, 'tileRows': 4}
metadata = {'cellType': 'float32ud-1.0',
'extent': extent,
'crs': '+proj=longlat +datum=WGS84 +no_defs ',
'bounds': {
'minKey': {'col': 0, 'row': 0},
'maxKey': {'col': 0, 'row': 0}},
'layoutDefinition': {
'extent': extent,
'tileLayout': {'tileCols': 4, 'tileRows': 4, 'layoutCols': 1, 'layoutRows': 1}}}
spatial_key = SpatialKey(0, 0)
@pytest.fixture(autouse=True)
def tearDown(self):
yield
BaseTestClass.pysc._gateway.close()
def test_add_int(self):
arr = np.zeros((1, 4, 4))
tile = Tile(arr, 'FLOAT', -500)
rdd = BaseTestClass.pysc.parallelize([(self.spatial_key, tile)])
tiled = TiledRasterLayer.from_numpy_rdd(LayerType.SPATIAL, rdd, self.metadata)
result = tiled + 1
actual = result.to_numpy_rdd().first()[1].cells
self.assertTrue((actual == 1).all())
def test_subtract_double(self):
arr = np.array([[[1.0, 1.0, 1.0, 1.0],
[2.0, 2.0, 2.0, 2.0],
[3.0, 3.0, 3.0, 3.0],
[4.0, 4.0, 4.0, 4.0]]], dtype=float)
tile = Tile(arr, 'FLOAT', -500)
rdd = BaseTestClass.pysc.parallelize([(self.spatial_key, tile)])
tiled = TiledRasterLayer.from_numpy_rdd(LayerType.SPATIAL, rdd, self.metadata)
result = 5.0 - tiled
actual = result.to_numpy_rdd().first()[1].cells
expected = np.array([[[4.0, 4.0, 4.0, 4.0],
[3.0, 3.0, 3.0, 3.0],
[2.0, 2.0, 2.0, 2.0],
[1.0, 1.0, 1.0, 1.0]]], dtype=float)
self.assertTrue((actual == expected).all())
def test_multiply_double(self):
arr = np.array([[[1.0, 1.0, 1.0, 1.0],
[2.0, 2.0, 2.0, 2.0],
[3.0, 3.0, 3.0, 3.0],
[4.0, 4.0, 4.0, 4.0]]], dtype=float)
tile = Tile(arr, 'FLOAT', float('nan'))
rdd = BaseTestClass.pysc.parallelize([(self.spatial_key, tile)])
tiled = TiledRasterLayer.from_numpy_rdd(LayerType.SPATIAL, rdd, self.metadata)
result = 5.0 * tiled
actual = result.to_numpy_rdd().first()[1].cells
expected = np.array([[[5.0, 5.0, 5.0, 5.0],
[10.0, 10.0, 10.0, 10.0],
[15.0, 15.0, 15.0, 15.0],
[20.0, 20.0, 20.0, 20.0]]], dtype=float)
self.assertTrue((actual == expected).all())
def test_divide_tiled_rdd(self):
arr = np.array([[[5.0, 5.0, 5.0, 5.0],
[5.0, 5.0, 5.0, 5.0],
[5.0, 5.0, 5.0, 5.0],
[5.0, 5.0, 5.0, 5.0]]], dtype=float)
divider = np.array([[[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0]]], dtype=float)
tile = Tile(arr, 'FLOAT', float('nan'))
tile2 = Tile(divider, 'FLOAT', float('nan'))
rdd = BaseTestClass.pysc.parallelize([(self.spatial_key, tile)])
rdd2 = BaseTestClass.pysc.parallelize([(self.spatial_key, tile2)])
tiled = TiledRasterLayer.from_numpy_rdd(LayerType.SPATIAL, rdd, self.metadata)
tiled2 = TiledRasterLayer.from_numpy_rdd(LayerType.SPATIAL, rdd2, self.metadata)
result = tiled / tiled2
actual = result.to_numpy_rdd().first()[1].cells
self.assertTrue((actual == 5.0).all())
def test_combined_operations(self):
arr = np.array([[[10, 10, 10, 10],
[20, 20, 20, 20],
[10, 10, 10, 10],
[20, 20, 20, 20]]], dtype=int)
tile = Tile(arr, 'INT', -500)
rdd = BaseTestClass.pysc.parallelize([(self.spatial_key, tile)])
tiled = TiledRasterLayer.from_numpy_rdd(LayerType.SPATIAL, rdd, self.metadata)
result = (tiled + tiled) / 2
actual = result.to_numpy_rdd().first()[1].cells
self.assertTrue((actual == arr).all())
def test_abs_operations(self):
arr = np.array([[[-10, -10, -10, 10],
[-20, -20, -20, 20],
[-10, -10, 10, 10],
[-20, -20, 20, 20]]], dtype=int)
expected = np.array([[[10, 10, 10, 10],
[20, 20, 20, 20],
[10, 10, 10, 10],
[20, 20, 20, 20]]], dtype=int)
tile = Tile(arr, 'INT', -500)
rdd = BaseTestClass.pysc.parallelize([(self.spatial_key, tile)])
tiled = TiledRasterLayer.from_numpy_rdd(LayerType.SPATIAL, rdd, self.metadata)
result = abs(tiled)
actual = result.to_numpy_rdd().first()[1].cells
self.assertTrue((actual == expected).all())
def test_pow_int(self):
arr = np.zeros((1, 4, 4))
tile = Tile(arr, 'FLOAT', -500)
rdd = BaseTestClass.pysc.parallelize([(self.spatial_key, tile)])
tiled = TiledRasterLayer.from_numpy_rdd(LayerType.SPATIAL, rdd, self.metadata)
result = tiled ** 5
actual = result.to_numpy_rdd().first()[1].cells
self.assertTrue((actual == 0).all())
def test_rpow_int(self):
arr = | np.full((1, 4, 4), 2, dtype='int16') | numpy.full |
'''
This is a implementation of Quantum State Tomography for Qutrits,
using techniques of following papars.
'Iterative algorithm for reconstruction of entangled states(10.1103/PhysRevA.63.040303)'
'Diluted maximum-likelihood algorithm for quantum tomography(10.1103/PhysRevA.75.042108)'
'Qudit Quantum State Tomography(10.1103/PhysRevA.66.012303)'
'On-chip generation of high-dimensional entangled quantum states and their coherent control(Nature volume 546, pages622-626(2017))'
'''
import numpy as np
from numpy import array, kron, trace, identity, sqrt, zeros, exp, pi, conjugate, random
from scipy.linalg import sqrtm
from datetime import datetime
from concurrent import futures
import os
from pathlib import Path
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pickle
"""
Definition of Three Frequency Bases:
fb1 = array([1, 0, 0])
fb2 = array([0, 1, 0])
fb3 = array([0, 0, 1])
"""
zero_base_array1 = zeros((1,3))
zero_base_array1[0][0] = 1
fb1 = zero_base_array1
zero_base_array2 = zeros((1,3))
zero_base_array2[0][1] = 1
fb2 = zero_base_array2
zero_base_array3 = zeros((1,3))
zero_base_array3[0][2] = 1
fb3 = zero_base_array3
""" Make Measurement Bases """
mb1 = (conjugate((fb1 + fb2).T) @ (fb1 + fb2)) / 2
mb2 = (conjugate((fb1 + fb3).T) @ (fb1 + fb3)) / 2
mb3 = (conjugate((fb2 + fb3).T) @ (fb2 + fb3)) / 2
mb4 = (conjugate((exp( 2*pi*1j/3) * fb1 + (exp(-2*pi*1j/3)) * fb2).T) @ (exp( 2*pi*1j/3) * fb1 + (exp(-2*pi*1j/3) * fb2))) / 2
mb5 = (conjugate((exp(-2*pi*1j/3) * fb1 + (exp( 2*pi*1j/3)) * fb2).T) @ (exp(-2*pi*1j/3) * fb1 + (exp( 2*pi*1j/3) * fb2))) / 2
mb6 = (conjugate((exp( 2*pi*1j/3) * fb1 + (exp(-2*pi*1j/3)) * fb3).T) @ ( | exp( 2*pi*1j/3) | numpy.exp |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
import glob
from satlomasproc.model import *
from satlomasproc.configuration import LSTMTrainingScriptConfig
from keras.models import load_model
__author__ = "<NAME>"
__copyright__ = "Dymaxion Labs"
__license__ = "apache-2.0"
# to run this test python -m pytest tests/test_model.py
def get_model_package_name():
model_package_name = glob.glob("models/*_model_package_*.model")[-1]
print("Using {} packaged model to test".format(model_package_name))
return model_package_name
def get_model_hyperopt_package_name():
model_package_name = glob.glob("models/*_model_hyperopt_package_*.model")[-1]
print("Using {} hyperopt packaged model to test".format(model_package_name))
return model_package_name
# Test that predictions are returned independently of shape of input datapoint
def test_predict_with_model_is_not_none():
model_package_name = get_model_package_name()
n_past_steps = 3
# datapoint as array
# datapoint = np.ones(model.input_shape[1])
datapoint = | np.ones(n_past_steps) | numpy.ones |
"""
@brief test log(time=2s)
"""
import unittest
import numpy
from pyquickhelper.pycode import ExtTestCase
from mlinsights.timeseries import build_ts_X_y
from mlinsights.timeseries.base import BaseTimeSeries
class TestBaseTimeSeries(ExtTestCase):
def test_base_parameters_split0(self):
X = None
y = numpy.arange(5) * 100
weights = numpy.arange(5) * 1000
bs = BaseTimeSeries(past=2)
nx, ny, nw = build_ts_X_y(bs, X, y, weights)
self.assertEqualArray(y[0:-2], nx[:, 0])
self.assertEqualArray(y[1:-1], nx[:, 1])
self.assertEqualArray(y[2:].reshape((3, 1)), ny)
self.assertEqualArray(weights[1:-1], nw)
def test_base_parameters_split0_all(self):
X = None
y = numpy.arange(5).astype(numpy.float64) * 100
weights = numpy.arange(5).astype(numpy.float64) * 1000
bs = BaseTimeSeries(past=2)
nx, ny, nw = build_ts_X_y(bs, X, y, weights, same_rows=True)
self.assertEqualArray(y[0:-2], nx[2:, 0])
self.assertEqualArray(y[1:-1], nx[2:, 1])
self.assertEqualArray(y[2:].reshape((3, 1)), ny[2:])
self.assertEqualArray(weights, nw)
def test_base_parameters_split0_1(self):
X = None
y = numpy.arange(5) * 100
weights = numpy.arange(5) + 1000
bs = BaseTimeSeries(past=1)
nx, ny, nw = build_ts_X_y(bs, X, y, weights)
self.assertEqual(nx.shape, (4, 1))
self.assertEqualArray(y[0:-1], nx[:, 0])
self.assertEqualArray(y[1:].reshape((4, 1)), ny)
self.assertEqualArray(weights[:-1], nw)
def test_base_parameters_split1(self):
X = numpy.arange(10).reshape(5, 2)
y = numpy.arange(5) * 100
weights = numpy.arange(5) * 1000
bs = BaseTimeSeries(past=2)
nx, ny, nw = build_ts_X_y(bs, X, y, weights)
self.assertEqualArray(X[1:-1], nx[:, :2])
self.assertEqualArray(y[0:-2], nx[:, 2])
self.assertEqualArray(y[1:-1], nx[:, 3])
self.assertEqualArray(y[2:].reshape((3, 1)), ny)
self.assertEqualArray(weights[1:-1], nw)
def test_base_parameters_split2(self):
X = numpy.arange(10).reshape(5, 2)
y = numpy.arange(5) * 100
weights = numpy.arange(5) * 1000
bs = BaseTimeSeries(past=2, delay2=3)
nx, ny, nw = build_ts_X_y(bs, X, y, weights)
self.assertEqualArray(X[1:-2], nx[:, :2])
self.assertEqualArray(y[0:-3], nx[:, 2])
self.assertEqualArray(y[1:-2], nx[:, 3])
self.assertEqualArray(numpy.array([[200, 300], [300, 400]]), ny)
self.assertEqualArray(weights[1:-2], nw)
def test_base_parameters_split_all_0(self):
X = None
y = numpy.arange(5) * 100
weights = numpy.arange(5) * 1000
bs = BaseTimeSeries(past=2, use_all_past=True)
nx, ny, nw = build_ts_X_y(bs, X, y, weights)
self.assertEqualArray(y[0:-2], nx[:, 0])
self.assertEqualArray(y[1:-1], nx[:, 1])
self.assertEqualArray(y[2:].reshape((3, 1)), ny)
self.assertEqualArray(weights[1:-1], nw)
def test_base_parameters_split_all_0_same(self):
X = None
y = | numpy.arange(5) | numpy.arange |
import numpy as np
import matplotlib.pyplot as plt
import torch
from torchvision.utils import make_grid
"""
Creates an object to sample and visualize the effect of the LSFs
by sampling from the conditional latent distributions.
"""
class vis_LatentSpace:
def __init__(self, model, mu, sd, latent_dim=10, latent_range=3, input_dim=28):
self.model = model
self.model.eval()
self.latent_dim = latent_dim
self.latent_range = latent_range
self.input_dim = input_dim
self.mu = mu
self.sd = sd
def to_img(self, x):
x = x.clamp(0, 1)
return x
def show_image(self, img):
img = self.to_img(img)
npimg = img.numpy()
plt.imshow( | np.transpose(npimg, (1, 2, 0)) | numpy.transpose |
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
# This python file is a library of python functions which provides modular
# common set-up commands for solving a problem in OpenCMISS.
# Each function has a range of input options and calls the appropriate
# OpenCMISS linked commands to set up the problem. This is a high
# level library that will allow shorter scripting for solving cardiac mechanics
# simulations and also making it easier to debug.
# Author: <NAME>
# Start Date: 20th October 2014
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
from opencmiss.iron import iron
import numpy
import math
import os
# =================================================================================#
def BasicSetUp(regionUserNumber, coordinateSystemUserNumber):
# This function sets up the world region, 3D CS, parallel computing nodes, and
# diagnostics.
# Set up diagnostics/debug
#iron.DiagnosticsSetOn(iron.DiagnosticTypes.IN,[1,2,3,4,5],
#"Diagnostics",["DOMAIN_MAPPINGS_LOCAL_FROM_GLOBAL_CALCULATE"])
# Get computational node information for parallel computing
numberOfComputationalNodes = iron.ComputationalNumberOfNodesGet()
computationalNodeNumber = iron.ComputationalNodeNumberGet()
# Set up 3D RC coordinate system
coordinateSystem = iron.CoordinateSystem()
coordinateSystem.CreateStart(coordinateSystemUserNumber)
coordinateSystem.dimension = 3
coordinateSystem.CreateFinish()
# Create world region
region = iron.Region()
region.CreateStart(regionUserNumber, iron.WorldRegion)
region.label = "Region"
region.coordinateSystem = coordinateSystem
region.CreateFinish()
# Output for diagnostics
print("----> Set up coordinate system and world region <----\n")
return numberOfComputationalNodes, computationalNodeNumber, coordinateSystem, region
# =================================================================================#
#=================================================================================#
def BasisFunction(basisUserNumber, numOfXi, option, collapsed):
# This function sets up the basis function depending on the option given.
if option[0] == 1:
# Trilinear basis function for interpolation of geometry.
basis = iron.Basis()
basis.CreateStart(basisUserNumber)
basis.numberOfXi = numOfXi
basis.type = iron.BasisTypes.LAGRANGE_HERMITE_TP
basis.interpolationXi = [iron.BasisInterpolationSpecifications.LINEAR_LAGRANGE] * numOfXi
basis.QuadratureLocalFaceGaussEvaluateSet(True)
basis.quadratureNumberOfGaussXi = [2,2,2]
basis.CreateFinish()
# Output for diagnostics
print("----> Set up trilinear basis functions for geometry, use element based interpolation for pressure <----\n")
if collapsed:
basisCol = iron.Basis()
basisCol.CreateStart(basisUserNumber+1)
basisCol.numberOfXi = numOfXi
basisCol.type = iron.BasisTypes.LAGRANGE_HERMITE_TP
basisCol.interpolationXi = [iron.BasisInterpolationSpecifications.LINEAR_LAGRANGE] * numOfXi
basisCol.QuadratureLocalFaceGaussEvaluateSet(True)
basisCol.quadratureNumberOfGaussXi = [2,2,2]
basisCol.CollapsedXiSet([iron.BasisXiCollapse.XI_COLLAPSED, iron.BasisXiCollapse.COLLAPSED_AT_XI0, iron.BasisXiCollapse.NOT_COLLAPSED])
print("---> Set up collapsed basis functions for apical elements")
basisCol.CreateFinish()
return basis, basisCol
return basis
elif option[0] == 2:
quadBasis = iron.Basis()
quadBasis.CreateStart(basisUserNumber[0])
quadBasis.InterpolationXiSet([iron.BasisInterpolationSpecifications.QUADRATIC_LAGRANGE]*numOfXi)
quadBasis.QuadratureNumberOfGaussXiSet([4]*numOfXi)
quadBasis.QuadratureLocalFaceGaussEvaluateSet(True)
quadBasis.CreateFinish()
# Tricubic Hermite basis function for interpolation of geometry.
cubicBasis = iron.Basis() # For geometry.
cubicBasis.CreateStart(basisUserNumber[1])
cubicBasis.InterpolationXiSet([iron.BasisInterpolationSpecifications.CUBIC_HERMITE] * numOfXi)
cubicBasis.QuadratureNumberOfGaussXiSet([4] * numOfXi)
cubicBasis.QuadratureLocalFaceGaussEvaluateSet(True)
cubicBasis.CreateFinish()
# Output for diagnostics
print("----> Set up tricubic hermite basis function for geometry and trilinear for hydrostatic pressure <----\n")
return quadBasis, cubicBasis
#=================================================================================#
#=================================================================================#
def GeneratedMesh(generatedMeshUserNumber, meshUserNumber, region, bases, dimensions, elements):
# This function sets up a generated mesh using user specified dimensions.
generatedMesh = iron.GeneratedMesh()
generatedMesh.CreateStart(generatedMeshUserNumber, region)
generatedMesh.TypeSet(iron.GeneratedMeshTypes.REGULAR)
generatedMesh.BasisSet(bases)
generatedMesh.ExtentSet(dimensions)
generatedMesh.NumberOfElementsSet(elements)
mesh = iron.Mesh()
generatedMesh.CreateFinish(meshUserNumber, mesh)
return generatedMesh, mesh
#=================================================================================#
#=================================================================================#
def DecompositionSetUp(decompositionUserNumber, mesh, numberOfComputationalNodes):
# This function sets up the decomposition of the mesh.
decomposition = iron.Decomposition()
decomposition.CreateStart(decompositionUserNumber, mesh)
decomposition.type = iron.DecompositionTypes.CALCULATED
decomposition.NumberOfDomainsSet(numberOfComputationalNodes)
decomposition.CalculateFacesSet(True)
decomposition.CreateFinish()
# Output for diagnostics
print("----> Set up decomposition <----\n")
return decomposition
#=================================================================================#
#=================================================================================#
def GeometricFieldSetUp(geometricFieldUserNumber, region, decomposition, option):
# Set up geometry field
geometricField = iron.Field() # Initialise
geometricField.CreateStart(geometricFieldUserNumber, region)
geometricField.MeshDecompositionSet(decomposition)
geometricField.VariableLabelSet(iron.FieldVariableTypes.U, "Geometry")
if option[0] == 2:
# Tricubic Hermite
if option[1] == 1:
geometricField.ScalingTypeSet(iron.FieldScalingTypes.UNIT)
# Output for diagnostics
print("----> Set up tricubic Hermite geometric field with unit scaling <----\n")
elif option[1] == 2:
geometricField.ScalingTypeSet(iron.FieldScalingTypes.ARITHMETIC_MEAN)
# Output for diagnostics
print("----> Set up tricubic Hermite geometric field with arithmetic mean scaling <----\n")
geometricField.CreateFinish()
return geometricField
#=================================================================================#
#=================================================================================#
def GeometricFieldInitialise(xNodes, yNodes, zNodes, geometricField, numNodes, option):
# This function initialises the geometric field with user specified coordinates.
# Initialise nodal values.
for node, value in enumerate(xNodes, 1):
geometricField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, node, 1, value)
for node, value in enumerate(yNodes, 1):
geometricField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, node, 2, value)
for node, value in enumerate(zNodes, 1):
geometricField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 1,
iron.GlobalDerivativeConstants.NO_GLOBAL_DERIV, node, 3, value)
# Initialise first derivatives.
if option[0] == 2:
# Tricubic Hermite basis.
for node in range(numNodes):
geometricField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 1,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S1, node + 1, 1, max(xNodes))
geometricField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 1,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S2, node + 1, 2, max(yNodes))
geometricField.ParameterSetUpdateNodeDP(iron.FieldVariableTypes.U, iron.FieldParameterSetTypes.VALUES, 1,
iron.GlobalDerivativeConstants.GLOBAL_DERIV_S3, node + 1, 3, max(zNodes))
# Output
print("----> Initialised geometric nodal values <----\n")
return geometricField
#=================================================================================#
#=================================================================================#
def GeometricFieldExport(region, filename):
# This function exports the undeformed geometric field.
if not os.path.exists("./results"):
os.makedirs("./results")
exportField = iron.Fields()
exportField.CreateRegion(region)
exportField.NodesExport("./results/" + filename, "FORTRAN")
exportField.ElementsExport("./results/" + filename, "FORTRAN")
exportField.Finalise()
# Output
print("----> Export undeformed geometry <----\n")
#=================================================================================#
#=================================================================================#
def ExtractNodesElements(filename):
# This function extracts nodes and element connectivity information from
# exnode and exelem files.
try:
fid_node = open(filename+'.exnode', 'r')
except IOError:
print('ERROR: Unable to open '+filename+'.exnode')
return
try:
fid_elem = open(filename+'.exelem', 'r')
except IOError:
print('ERROR: Unable to open '+filename+'.exelem')
return
for i in range(1,86):
junk = fid_elem.readline()
nodesX = []
nodesY = []
nodesZ = []
elements = []
for i in [1,2,3,4,5,6]:
junk = fid_node.readline()
# Read nodal information.
i = 0
temp = fid_node.readline()
while temp != '':
currentNode = temp.split()[1]
temp = fid_node.readline()
nodesX.append(temp.split())
temp = fid_node.readline()
nodesY.append(temp.split())
temp = fid_node.readline()
nodesZ.append(temp.split())
i = i+1
temp = fid_node.readline()
nodesX = numpy.array(nodesX)
nodesY = | numpy.array(nodesY) | numpy.array |
# coding: utf8
########################################################################
# #
# Control law : tau = P(q*-q^) + D(v*-v^) + tau_ff #
# #
########################################################################
from matplotlib import pyplot as plt
import pinocchio as pin
import numpy as np
import numpy.matlib as matlib
import tsid
import FootTrajectoryGenerator as ftg
import FootstepPlanner
import pybullet as pyb
import utils
import time
pin.switchToNumpyMatrix()
########################################################################
# Class for a PD with feed-forward Controller #
########################################################################
class controller:
""" Inverse Dynamics controller that take into account the dynamics of the quadruped to generate
actuator torques to apply on the ground the contact forces computed by the MPC (for feet in stance
phase) and to perform the desired footsteps (for feet in swing phase)
Args:
N_similation (int): maximum number of Inverse Dynamics iterations for the simulation
"""
def __init__(self, N_simulation, k_mpc, n_periods):
self.q_ref = np.array([[0.0, 0.0, 0.2027682, 0.0, 0.0, 0.0, 1.0,
0.0, 0.8, -1.6, 0, 0.8, -1.6,
0, -0.8, 1.6, 0, -0.8, 1.6]]).transpose()
self.qtsid = self.q_ref.copy()
self.vtsid = np.zeros((18, 1))
self.ades = np.zeros((18, 1))
self.error = False
self.verbose = True
# List with the names of all feet frames
self.foot_frames = ['FL_FOOT', 'FR_FOOT', 'HL_FOOT', 'HR_FOOT']
# Constraining the contacts
mu = 0.9 # friction coefficient
fMin = 1.0 # minimum normal force
fMax = 25.0 # maximum normal force
contactNormal = np.matrix([0., 0., 1.]).T # direction of the normal to the contact surface
# Coefficients of the posture task
kp_posture = 10.0 # proportionnal gain of the posture task
w_posture = 1.0 # weight of the posture task
# Coefficients of the contact tasks
kp_contact = 100.0 # proportionnal gain for the contacts
self.w_forceRef = 50.0 # weight of the forces regularization
self.w_reg_f = 50.0
# Coefficients of the foot tracking task
kp_foot = 100.0 # proportionnal gain for the tracking task
self.w_foot = 500.0 # weight of the tracking task
# Arrays to store logs
k_max_loop = N_simulation
self.f_pos = np.zeros((4, k_max_loop, 3))
self.f_vel = np.zeros((4, k_max_loop, 3))
self.f_acc = np.zeros((4, k_max_loop, 3))
self.f_pos_ref = np.zeros((4, k_max_loop, 3))
self.f_vel_ref = np.zeros((4, k_max_loop, 3))
self.f_acc_ref = np.zeros((4, k_max_loop, 3))
self.b_pos = np.zeros((k_max_loop, 6))
self.b_vel = np.zeros((k_max_loop, 6))
self.com_pos = np.zeros((k_max_loop, 3))
self.com_pos_ref = np.zeros((k_max_loop, 3))
self.c_forces = np.zeros((4, k_max_loop, 3))
self.h_ref_feet = np.zeros((k_max_loop, ))
self.goals = np.zeros((3, 4))
self.vgoals = np.zeros((3, 4))
self.agoals = np.zeros((3, 4))
self.mgoals = np.zeros((6, 4))
# Position of the shoulders in local frame
self.shoulders = np.array([[0.19, 0.19, -0.19, -0.19], [0.15005, -0.15005, 0.15005, -0.15005]])
self.footsteps = self.shoulders.copy()
self.memory_contacts = self.shoulders.copy()
# Foot trajectory generator
max_height_feet = 0.05
t_lock_before_touchdown = 0.05
self.ftgs = [ftg.Foot_trajectory_generator(max_height_feet, t_lock_before_touchdown) for i in range(4)]
# Which pair of feet is active (0 for [1, 2] and 1 for [0, 3])
self.pair = -1
# Number of TSID steps for 1 step of the MPC
self.k_mpc = k_mpc
# For update_feet_tasks function
self.dt = 0.001 # [s], time step
self.t1 = 0.14 # [s], duration of swing phase
# Rotation matrix
self.R = np.eye(3)
# Feedforward torques
self.tau_ff = np.zeros((12, 1))
# Torques sent to the robot
self.torques12 = np.zeros((12, 1))
self.tau = np.zeros((12, ))
self.ID_base = None # ID of base link
self.ID_feet = [None] * 4 # ID of feet links
# Footstep planner object
# self.fstep_planner = FootstepPlanner.FootstepPlanner(0.001, 32)
self.vu_m = np.zeros((6, 1))
self.t_stance = 0.16
self.T_gait = 0.32
self.n_periods = n_periods
self.h_ref = 0.235 - 0.01205385
self.t_swing = np.zeros((4, )) # Total duration of current swing phase for each foot
self.contacts_order = [0, 1, 2, 3]
# Parameter to enable/disable hybrid control
self.enable_hybrid_control = False
# Time since the start of the simulation
self.t = 0.0
########################################################################
# Definition of the Model and TSID problem #
########################################################################
# Set the paths where the urdf and srdf file of the robot are registered
modelPath = "/opt/openrobots/share/example-robot-data/robots"
urdf = modelPath + "/solo_description/robots/solo12.urdf"
srdf = modelPath + "/solo_description/srdf/solo.srdf"
vector = pin.StdVec_StdString()
vector.extend(item for item in modelPath)
# Create the robot wrapper from the urdf model (which has no free flyer) and add a free flyer
self.robot = tsid.RobotWrapper(urdf, vector, pin.JointModelFreeFlyer(), False)
self.model = self.robot.model()
# Creation of the Invverse Dynamics HQP problem using the robot
# accelerations (base + joints) and the contact forces
self.invdyn = tsid.InverseDynamicsFormulationAccForce("tsid", self.robot, False)
# Compute the problem data with a solver based on EiQuadProg
t = 0.0
self.invdyn.computeProblemData(t, self.qtsid, self.vtsid)
# Saving IDs for later
self.ID_base = self.model.getFrameId("base_link")
for i, name in enumerate(self.foot_frames):
self.ID_feet[i] = self.model.getFrameId(name)
# Store a frame object to avoid creating one each time
self.pos_foot = self.robot.framePosition(self.invdyn.data(), self.ID_feet[0])
#####################
# LEGS POSTURE TASK #
#####################
# Task definition (creating the task object)
self.postureTask = tsid.TaskJointPosture("task-posture", self.robot)
self.postureTask.setKp(kp_posture * matlib.ones(self.robot.nv-6).T) # Proportional gain
self.postureTask.setKd(2.0 * np.sqrt(kp_posture) * matlib.ones(self.robot.nv-6).T) # Derivative gain
# Add the task to the HQP with weight = w_posture, priority level = 1 (not real constraint)
# and a transition duration = 0.0
self.invdyn.addMotionTask(self.postureTask, w_posture, 1, 0.0)
# TSID Trajectory (creating the trajectory object and linking it to the task)
pin.loadReferenceConfigurations(self.model, srdf, False)
self.trajPosture = tsid.TrajectoryEuclidianConstant("traj_joint", self.q_ref[7:])
self.samplePosture = self.trajPosture.computeNext()
self.postureTask.setReference(self.samplePosture)
############
# CONTACTS #
############
self.contacts = 4*[None] # List to store the rigid contact tasks
for i, name in enumerate(self.foot_frames):
# Contact definition (creating the contact object)
self.contacts[i] = tsid.ContactPoint(name, self.robot, name, contactNormal, mu, fMin, fMax)
self.contacts[i].setKp((kp_contact * matlib.ones(3).T))
self.contacts[i].setKd((2.0 * np.sqrt(kp_contact) * matlib.ones(3).T))
self.contacts[i].useLocalFrame(False)
# Set the contact reference position
H_ref = self.robot.framePosition(self.invdyn.data(), self.ID_feet[i])
H_ref.translation = np.matrix(
[H_ref.translation[0, 0],
H_ref.translation[1, 0],
0.0]).T
self.contacts[i].setReference(H_ref)
# Regularization weight for the force tracking subtask
self.contacts[i].setRegularizationTaskWeightVector(
np.matrix([self.w_reg_f, self.w_reg_f, self.w_reg_f]).T)
# Adding the rigid contact after the reference contact force has been set
self.invdyn.addRigidContact(self.contacts[i], self.w_forceRef)
#######################
# FOOT TRACKING TASKS #
#######################
self.feetTask = 4*[None] # List to store the foot tracking tasks
mask = np.matrix([1.0, 1.0, 1.0, 0.0, 0.0, 0.0]).T
# Task definition (creating the task object)
for i_foot in range(4):
self.feetTask[i_foot] = tsid.TaskSE3Equality(
"foot_track_" + str(i_foot), self.robot, self.foot_frames[i_foot])
self.feetTask[i_foot].setKp(kp_foot * mask)
self.feetTask[i_foot].setKd(2.0 * np.sqrt(kp_foot) * mask)
self.feetTask[i_foot].setMask(mask)
self.feetTask[i_foot].useLocalFrame(False)
# The reference will be set later when the task is enabled
##########
# SOLVER #
##########
# Use EiquadprogFast solver
self.solver = tsid.SolverHQuadProgFast("qp solver")
# Resize the solver to fit the number of variables, equality and inequality constraints
self.solver.resize(self.invdyn.nVar, self.invdyn.nEq, self.invdyn.nIn)
def update_feet_tasks(self, k_loop, gait, looping, interface, ftps_Ids_deb):
"""Update the 3D desired position for feet in swing phase by using a 5-th order polynomial that lead them
to the desired position on the ground (computed by the footstep planner)
Args:
k_loop (int): number of time steps since the start of the current gait cycle
pair (int): the current pair of feet in swing phase, for a walking trot gait
looping (int): total number of time steps in one gait cycle
interface (Interface object): interface between the simulator and the MPC/InvDyn
ftps_Ids_deb (list): IDs of debug spheres in PyBullet
"""
# Indexes of feet in swing phase
feet = np.where(gait[0, 1:] == 0)[0]
if len(feet) == 0: # If no foot in swing phase
return 0
t0s = []
for i in feet: # For each foot in swing phase get remaining duration of the swing phase
# Index of the line containing the next stance phase
index = next((idx for idx, val in np.ndenumerate(gait[:, 1+i]) if (((val==1)))), [-1])[0]
remaining_iterations = np.cumsum(gait[:index, 0])[-1] * self.k_mpc - (k_loop % self.k_mpc)
# Compute total duration of current swing phase
i_iter = 1
self.t_swing[i] = gait[0, 0]
while gait[i_iter, 1+i] == 0:
self.t_swing[i] += gait[i_iter, 0]
i_iter += 1
i_iter = -1
while gait[i_iter, 1+i] == 0:
self.t_swing[i] += gait[i_iter, 0]
i_iter -= 1
self.t_swing[i] *= self.dt * self.k_mpc
t0s.append(np.round(self.t_swing[i] - remaining_iterations * self.dt, decimals=3))
# self.footsteps contains the target (x, y) positions for both feet in swing phase
for i in range(len(feet)):
i_foot = feet[i]
# Get desired 3D position, velocity and acceleration
if t0s[i] == 0.000:
[x0, dx0, ddx0, y0, dy0, ddy0, z0, dz0, ddz0, gx1, gy1] = (self.ftgs[i_foot]).get_next_foot(
interface.o_feet[0, i_foot], interface.ov_feet[0, i_foot], interface.oa_feet[0, i_foot],
interface.o_feet[1, i_foot], interface.ov_feet[1, i_foot], interface.oa_feet[1, i_foot],
self.footsteps[0, i_foot], self.footsteps[1, i_foot], t0s[i], self.t_swing[i_foot], self.dt)
self.mgoals[:, i_foot] = np.array([x0, dx0, ddx0, y0, dy0, ddy0])
else:
[x0, dx0, ddx0, y0, dy0, ddy0, z0, dz0, ddz0, gx1, gy1] = (self.ftgs[i_foot]).get_next_foot(
self.mgoals[0, i_foot], self.mgoals[1, i_foot], self.mgoals[2, i_foot],
self.mgoals[3, i_foot], self.mgoals[4, i_foot], self.mgoals[5, i_foot],
self.footsteps[0, i_foot], self.footsteps[1, i_foot], t0s[i], self.t_swing[i_foot], self.dt)
self.mgoals[:, i_foot] = np.array([x0, dx0, ddx0, y0, dy0, ddy0])
# Take into account vertical offset of Pybullet
z0 += interface.mean_feet_z
# Store desired position, velocity and acceleration for later call to this function
self.goals[:, i_foot] = np.array([x0, y0, z0])
self.vgoals[:, i_foot] = np.array([dx0, dy0, dz0])
self.agoals[:, i_foot] = np.array([ddx0, ddy0, ddz0])
# Update desired pos, vel, acc
self.sampleFeet[i_foot].pos(np.matrix([x0, y0, z0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]).T)
self.sampleFeet[i_foot].vel(np.matrix([dx0, dy0, dz0, 0.0, 0.0, 0.0]).T)
self.sampleFeet[i_foot].acc(np.matrix([ddx0, ddy0, ddz0, 0.0, 0.0, 0.0]).T)
# Set reference
self.feetTask[i_foot].setReference(self.sampleFeet[i_foot])
# Update footgoal for display purpose
self.feetGoal[i_foot].translation = np.matrix([x0, y0, z0]).T
# Display the goal position of the feet as green sphere in PyBullet
pyb.resetBasePositionAndOrientation(ftps_Ids_deb[i_foot],
posObj=np.array([gx1, gy1, 0.0]),
ornObj=np.array([0.0, 0.0, 0.0, 1.0]))
return 0
####################################################################
# Torque Control method #
####################################################################
def control(self, qtsid, vtsid, k_simu, solo, interface, f_applied, fsteps, gait,
ftps_Ids_deb, enable_hybrid_control=False, qmes=None, vmes=None, qmpc=None, vmpc=None):
"""Update the 3D desired position for feet in swing phase by using a 5-th order polynomial that lead them
to the desired position on the ground (computed by the footstep planner)
Args:
qtsid (19x1 array): the position/orientation of the trunk and angular position of actuators
vtsid (18x1 array): the linear/angular velocity of the trunk and angular velocity of actuators
t (float): time elapsed since the start of the simulation
k_simu (int): number of time steps since the start of the simulation
solo (object): Pinocchio wrapper for the quadruped
interface (Interface object): interface between the simulator and the MPC/InvDyn
f_applied (12 array): desired contact forces for all feet (0s for feet in swing phase)
fsteps (Xx13 array): contains the remaining number of steps of each phase of the gait (first column) and
the [x, y, z]^T desired position of each foot for each phase of the gait (12 other
columns). For feet currently touching the ground the desired position is where they
currently are.
enable_hybrid_control (bool): whether hybrid control is enabled or not
qmes (19x1 array): the position/orientation of the trunk and angular position of actuators of the real
robot (for hybrid control)
vmes (18x1 array): the linear/angular velocity of the trunk and angular velocity of actuators of the real
robot (for hybrid control)
"""
self.f_applied = f_applied
# If hybrid control is turned on/off the CoM task needs to be turned on/off too
"""if self.enable_hybrid_control != enable_hybrid_control:
if enable_hybrid_control:
# Turn on CoM task
self.invdyn.addMotionTask(self.comTask, self.w_com, 1, 0.0)
else:
# Turn off CoM task
self.invdyn.removeTask(self.comTask, 0.0)"""
# Update hybrid control parameters
self.enable_hybrid_control = enable_hybrid_control
if self.enable_hybrid_control:
self.qmes = qmes
self.vmes = vmes
# Update state of TSID
if k_simu == 0: # Some initialization during the first iteration
self.qtsid = qtsid
self.qtsid[:3] = np.zeros((3, 1)) # Discard x and y drift and height position
self.qtsid[2, 0] = 0.235 - 0.01205385
self.feetGoal = 4*[None]
self.sampleFeet = 4*[None]
self.pos_contact = 4*[None]
for i_foot in range(4):
self.feetGoal[i_foot] = self.robot.framePosition(
self.invdyn.data(), self.ID_feet[i_foot])
footTraj = tsid.TrajectorySE3Constant("foot_traj", self.feetGoal[i_foot])
self.sampleFeet[i_foot] = footTraj.computeNext()
self.pos_contact[i_foot] = np.matrix([self.footsteps[0, i_foot], self.footsteps[1, i_foot], 0.0])
else:
# Here is where we will merge the data from the state estimator and the internal state of TSID
"""# Encoders (position of joints)
self.qtsid[7:] = qtsid[7:]
# Gyroscopes (angular velocity of trunk)
self.vtsid[3:6] = vtsid[3:6]
# IMU estimation of orientation of the trunk
self.qtsid[3:7] = qtsid[3:7]"""
"""self.qtsid = qtsid.copy()
# self.qtsid[2] -= 0.015 # 0.01205385
self.vtsid = vtsid.copy()
self.qtsid[2, 0] += mpc.q_noise[0]
self.qtsid[3:7] = utils.getQuaternion(utils.quaternionToRPY(
qtsid[3:7, 0]) + np.vstack((np.array([mpc.q_noise[1:]]).transpose(), 0.0)))
self.vtsid[:6, 0] += mpc.v_noise"""
# Update internal state of TSID for the current interation
self.update_state(qtsid, vtsid)
#####################
# FOOTSTEPS PLANNER #
#####################
looping = int(self.n_periods*self.T_gait/dt) # Number of TSID iterations in one gait cycle
k_loop = (k_simu - 0) % looping # Current number of iterations since the start of the current gait cycle
# Update the desired position of footholds thanks to the footstep planner
self.update_footsteps(interface, fsteps)
######################################
# UPDATE REFERENCE OF CONTACT FORCES #
######################################
# Update the contact force tracking tasks to follow the forces computed by the MPC
self.update_ref_forces(interface)
################
# UPDATE TASKS #
################
# Enable/disable contact and 3D tracking tasks depending on the state of the feet (swing or stance phase)
self.update_tasks(k_simu, k_loop, looping, interface, gait, ftps_Ids_deb)
###############
# HQP PROBLEM #
###############
# Solve the inverse dynamics problem with TSID
self.solve_HQP_problem(self.t)
# Time incrementation
self.t += dt
###########
# DISPLAY #
###########
# Refresh Gepetto Viewer
solo.display(self.qtsid)
return self.tau
def update_state(self, qtsid, vtsid):
"""Update TSID's internal state.
Currently we directly use the state of the simulator to perform the inverse dynamics
Args:
qtsid (19x1 array): the position/orientation of the trunk and angular position of actuators
vtsid (18x1 array): the linear/angular velocity of the trunk and angular velocity of actuators
"""
self.qtsid = qtsid.copy()
self.vtsid = vtsid.copy()
return 0
def update_footsteps(self, interface, fsteps):
""" Update desired location of footsteps using information coming from the footsteps planner
Args:
interface (object): Interface object of the control loop
fsteps (20x13): duration of each phase of the gait sequence (first column)
and desired location of footsteps for these phases (other columns)
"""
self.footsteps = np.zeros((2, 4))
for i in range(4):
index = next((idx for idx, val in np.ndenumerate(fsteps[:, 3*i+1]) if ((not (val==0)) and (not np.isnan(val)))), [-1])[0]
pos_tmp = np.array(interface.oMl * (np.array([fsteps[index, (1+i*3):(4+i*3)]]).transpose()))
self.footsteps[:, i] = pos_tmp[0:2, 0]
return 0
def update_ref_forces(self, interface):
""" Update the reference contact forces that TSID should try to apply on the ground
Args:
interface (object): Interface object of the control loop
"""
for j, i_foot in enumerate([0, 1, 2, 3]):
self.contacts[i_foot].setForceReference((self.w_reg_f * interface.oMl.rotation @ self.f_applied[3*j:3*(j+1)]).T)
return 0
def update_tasks(self, k_simu, k_loop, looping, interface, gait, ftps_Ids_deb):
""" Update TSID tasks (feet tracking, contacts, force tracking)
Args:
k_simu (int): number of TSID time steps since the start of the simulation
k_loop (int): number of TSID time steps since the start of the current gait period
looping (int): number of TSID time steps in one period of gait
interface (object): Interface object of the control loop
gait (20x5 array): contains information about the contact sequence with 1s and 0s
fsteps (20x13): duration of each phase of the gait sequence (first column)
and desired location of footsteps for these phases (other columns)
"""
# Update the foot tracking tasks
self.update_feet_tasks(k_loop, gait, looping, interface, ftps_Ids_deb)
# Index of the first blank line in the gait matrix
index = next((idx for idx, val in np.ndenumerate(gait[:, 0]) if (((val==0)))), [-1])[0]
# Check status of each foot
for i_foot in range(4):
# If foot entered swing phase
if (k_loop % self.k_mpc == 0) and (gait[0, i_foot+1] == 0) and (gait[index-1, i_foot+1] == 1):
# Disable contact
self.invdyn.removeRigidContact(self.foot_frames[i_foot], 0.0)
self.contacts_order.remove(i_foot)
# Enable foot tracking task
self.invdyn.addMotionTask(self.feetTask[i_foot], self.w_foot, 1, 0.0)
# If foot entered stance phase
if (k_loop % self.k_mpc == 0) and (gait[0, i_foot+1] == 1) and (gait[index-1, i_foot+1] == 0):
# Update the position of contacts
self.pos_foot.translation = interface.o_feet[:, i_foot]
self.pos_contact[i_foot] = self.pos_foot.translation.transpose()
self.memory_contacts[:, i_foot] = interface.o_feet[0:2, i_foot]
self.feetGoal[i_foot].translation = interface.o_feet[:, i_foot].transpose()
self.contacts[i_foot].setReference(self.pos_foot)
self.goals[:, i_foot] = interface.o_feet[:, i_foot].transpose()
if not ((k_loop == 0) and (k_simu < looping)): # If it is not the first gait period
# Enable contact
self.invdyn.addRigidContact(self.contacts[i_foot], self.w_forceRef)
self.contacts_order.append(i_foot)
# Disable foot tracking task
self.invdyn.removeTask("foot_track_" + str(i_foot), 0.0)
return 0
def solve_HQP_problem(self, t):
""" Solve the QP problem by calling TSID's solver
Args:
t (float): time elapsed since the start of the simulation
"""
# Resolution of the HQP problem
HQPData = self.invdyn.computeProblemData(t, self.qtsid, self.vtsid)
self.sol = self.solver.solve(HQPData)
# Torques, accelerations, velocities and configuration computation
self.tau_ff = self.invdyn.getActuatorForces(self.sol)
self.fc = self.invdyn.getContactForces(self.sol)
self.ades = self.invdyn.getAccelerations(self.sol)
if self.enable_hybrid_control:
self.vtsid += self.ades * dt
self.qtsid = pin.integrate(self.model, self.qtsid, self.vtsid * dt)
# Check for NaN value in the output torques (means error during solving process)
if np.any(np.isnan(self.tau_ff)):
self.error = True
self.tau = np.zeros((12, 1))
else:
# Torque PD controller
P = 3.0
D = 0.3
if self.enable_hybrid_control:
torques12 = self.tau_ff + P * (self.qtsid[7:] - self.qmes[7:]) + D * (self.vtsid[6:] - self.vmes[6:])
else:
torques12 = self.tau_ff
# Saturation to limit the maximal torque
t_max = 2.5
# clip is faster than np.maximum(a_min, np.minimum(a, a_max))
self.tau = | np.clip(torques12, -t_max, t_max) | numpy.clip |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Copyright 2019-2020 Airinnova AB and the FramAT authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------
# Author: <NAME>
"""
Assembly
"""
import numpy as np
import scipy.sparse as sparse
from ._element import Element
from ._log import logger
from ._util import enumerate_with_step
def create_system_matrices(m):
"""
Assemble global tensors
* :K: global stiffness matrix
* :M: global mass matrix
* :F: global load vector
* :B: constraint matrix
"""
create_main_tensors(m)
create_bc_matrices(m)
def create_main_tensors(m):
"""
Create system tensors K, M and F
The stiffness matrix K and the mass matrix M are created as sparse matrices
"""
r = m.results
abm = m.results.get('mesh').get('abm')
ndof_total = abm.ndofs()
rows = empty(np.uint16)
cols = empty(np.uint16)
data_K = empty(np.float_)
data_M = empty(np.float_)
F = np.zeros((ndof_total, 1), dtype=np.float_)
idx_start_beam = 0
for i, mbeam in enumerate(m.iter('beam')):
for k, abstr_elem in enumerate_with_step(abm.beams[i].values(), start=idx_start_beam, step=6):
idxs = np.arange(k, k+12, 1, dtype=np.uint16)
rows = np.append(rows, np.repeat(idxs, 12))
cols = np.append(cols, np.tile(idxs, 12))
phys_elem = Element.from_abstract_element(abstr_elem)
data_K = np.append(data_K, phys_elem.stiffness_matrix_glob.flatten())
data_M = np.append(data_M, phys_elem.mass_matrix_glob.flatten())
F[k:k+12] += phys_elem.load_vector_glob
idx_start_beam += abm.ndofs_beam(i)
K = sparse_matrix(data_K, rows, cols)
M = sparse_matrix(data_M, rows, cols)
logger.info(f"System matrix size: {K.size} elements ({K.size/ndof_total**2:.2%} density)")
rtensors = r.set_feature('tensors')
rtensors.set('K', K)
rtensors.set('M', M)
rtensors.set('F', F)
def create_bc_matrices(m):
"""Assemble the constraint matrix B"""
r = m.results
abm = r.get('mesh').get('abm')
ndofs = abm.ndofs()
mbc = m.get('bc')
B_tot = np.array([])
# ----- Fix DOFs -----
for fix in mbc.iter('fix'):
num_node = abm.glob_nums[fix['node']]
B = fix_dof(num_node, ndofs, fix['fix'])
B_tot = np.vstack((B_tot, B)) if B_tot.size else B
# ----- Multipoint constraints (MPC) -----
for con in mbc.iter('connect'):
uid1, uid2 = con['node1'], con['node2']
num_node1 = abm.glob_nums[uid1]
num_node2 = abm.glob_nums[uid2]
x1 = abm.get_point_by_uid(uid1)
x2 = abm.get_point_by_uid(uid2)
B = connect(x1, x2, num_node1, num_node2, ndofs, con['fix'])
B_tot = np.vstack((B_tot, B)) if B_tot.size else B
m.results.get('tensors').set('B', B_tot)
def fix_dof(node_number, total_ndof, dof_constraints):
"""
Return part of constraint matrix B for fixed degrees of freedom
Note:
* Only non-zero rows are returned. If, say, three dof are fixed, then
B will have size 3xndof
Args:
:node_number: node_number
:total_ndof: total number of degrees of freedom
:dof_constraints: list with dofs to be fixed
"""
B = np.array([])
pos_dict = {'ux': 0, 'uy': 1, 'uz': 2, 'tx': 3, 'ty': 4, 'tz': 5}
for constraint in dof_constraints:
if constraint == 'all':
B = np.zeros((6, total_ndof))
B[0:6, 6*node_number:6*node_number+6] = np.eye(6)
break
else:
pos = pos_dict[constraint]
B_row = np.zeros((1, total_ndof))
B_row[0, 6*node_number+pos] = 1
B = | np.vstack((B, B_row)) | numpy.vstack |
#!/usr/bin/env python3
"""
SCRIPT: plot_semivariogram.py
Script for plotting empirical and fitted semivariograms based on data from
procOBA_NWP or procOBA_Sat, plus fit_semivariogram.py. For interactive
use.
REQUIREMENTS:
* Python 3
* Matplotlib
* Numpy
REVISION HISTORY:
20 Nov 2020: <NAME>. Initial specification.
"""
# Standard library
import configparser
import os
import sys
# Other libraries
import matplotlib.pyplot as plt
import numpy as np
import semivar
#------------------------------------------------------------------------------
def usage():
"""Print usage statement to standard out."""
print("Usage: %s CONFIGFILE PARAMFILE" %(sys.argv[0]))
print(" CONFIG is config file for this script")
print(" PARAMFILE contains best-fit parameters from fit_semivariogram.py")
#------------------------------------------------------------------------------
def read_param_file(paramfile):
"""Reads fitted parameters for semivariogram for plotting."""
lines = open(paramfile, "r").readlines()
sigma2_gage = None
sigma2_back = None
L_back = None
for line in lines:
key, value = line.split(":")
value = float(value)
if key == "SIGMA2_obs":
sigma2_gage = value
elif key == "SIGMA2_back":
sigma2_back = value
elif key == "L_back":
L_back = value
return sigma2_gage, sigma2_back, L_back
#------------------------------------------------------------------------------
# Check command line
if len(sys.argv) != 3:
print("[ERR] Bad command line arguments!")
usage()
sys.exit(1)
# Read config file
cfgfile = sys.argv[1]
if not os.path.exists(cfgfile):
print("[ERR] Config file %s does not exist!" %(cfgfile))
sys.exit(1)
config = configparser.ConfigParser()
config.read(cfgfile)
vario_filename, max_distance = semivar.read_input_section_cfg(config)
function_type = semivar.read_fit_section_cfg(config)
title, xlabel, ylabel, oblabel, bglabel = \
semivar.read_plot_section_cfg(config)
# Get the param file
paramfile = sys.argv[2]
if not os.path.exists(paramfile):
print("[ERR] Paramfile %s does not exist!" %(paramfile))
sys.exit(1)
# Read the datafile
distvector, variovector, samplesize = \
semivar.readdata(vario_filename, max_distance)
# Read the paramfile
sigma2_gage, sigma2_back, L_back = read_param_file(paramfile)
popt = [sigma2_gage, sigma2_back, L_back]
# Plot the semivariogram
distvector_tmp = np.array([0])
distvector = np.concatenate((distvector_tmp, distvector))
variovector_tmp = | np.array([np.nan]) | numpy.array |
# -*- coding: utf-8 -*-
"""
TODO: Please check readme.txt file first!
--
This Python2.7 program is to reproduce Table 2, 3, 4, and 5.
"""
import os
import sys
import pickle
import numpy as np
import multiprocessing
from itertools import product
from numpy.random import randint
try:
import sparse_module
try:
from sparse_module import wrap_head_tail_bisearch
except ImportError:
print('cannot find wrap_head_tail_bisearch method in sparse_module')
sparse_module = None
exit(0)
except ImportError:
print('\n'.join([
'cannot find the module: sparse_module',
'try run: \'python setup.py build_ext --inplace\' first! ']))
def expit(x):
"""
expit function. 1 /(1+exp(-x)). quote from Scipy:
The expit function, also known as the logistic function,
is defined as expit(x) = 1/(1+exp(-x)).
It is the inverse of the logit function.
expit is also known as logistic. Please see logistic
:param x: np.ndarray
:return: 1/(1+exp(-x)).
"""
out = np.zeros_like(x)
posi = np.where(x > 0.0)
nega = np.where(x <= 0.0)
out[posi] = 1. / (1. + np.exp(-x[posi]))
exp_x = np.exp(x[nega])
out[nega] = exp_x / (1. + exp_x)
return out
def logistic_predict(x, wt):
"""
To predict the probability for sample xi. {+1,-1}
:param x: (n,p) dimension, where p is the number of features.
:param wt: (p+1,) dimension, where wt[p] is the intercept.
:return: (n,1) dimension of predict probability of positive class
and labels.
"""
n, p = x.shape
pred_prob = expit(np.dot(x, wt[:p]) + wt[p])
pred_y = np.ones(n)
pred_y[pred_prob < 0.5] = -1.
return pred_prob, pred_y
def log_logistic(x):
""" return log( 1/(1+exp(-x)) )"""
out = np.zeros_like(x)
posi = np.where(x > 0.0)
nega = np.where(x <= 0.0)
out[posi] = -np.log(1. + np.exp(-x[posi]))
out[nega] = x[nega] - np.log(1. + np.exp(x[nega]))
return out
def logit_loss_grad_bl(x_tr, y_tr, wt, l2_reg, cp, cn):
"""
Calculate the balanced loss and gradient of the logistic function.
:param x_tr: (n,p), where p is the number of features.
:param y_tr: (n,), where n is the number of labels.
:param wt: current model. wt[-1] is the intercept.
:param l2_reg: regularization to avoid overfitting.
:param cp:
:param cn:
:return: {+1,-1} Logistic (val,grad) on training samples.
"""
assert len(wt) == (x_tr.shape[1] + 1)
c, n, p = wt[-1], x_tr.shape[0], x_tr.shape[1]
posi_idx = np.where(y_tr > 0) # corresponding to positive labels.
nega_idx = np.where(y_tr < 0) # corresponding to negative labels.
grad = np.zeros_like(wt)
wt = wt[:p]
yz = y_tr * (np.dot(x_tr, wt) + c)
z = expit(yz)
loss = -cp * np.sum(log_logistic(yz[posi_idx]))
loss += -cn * np.sum(log_logistic(yz[nega_idx]))
loss = loss / n + .5 * l2_reg * np.dot(wt, wt)
bl_y_tr = np.zeros_like(y_tr)
bl_y_tr[posi_idx] = cp * np.asarray(y_tr[posi_idx], dtype=float)
bl_y_tr[nega_idx] = cn * np.asarray(y_tr[nega_idx], dtype=float)
z0 = (z - 1) * bl_y_tr # z0 = (z - 1) * y_tr
grad[:p] = np.dot(x_tr.T, z0) / n + l2_reg * wt
grad[-1] = z0.sum() # do not need to regularize the intercept.
return loss, grad
def logit_loss_bl(x_tr, y_tr, wt, l2_reg, cp, cn):
"""
Calculate the balanced loss and gradient of the logistic function.
:param x_tr: (n,p), where p is the number of features.
:param y_tr: (n,), where n is the number of labels.
:param wt: current model. wt[-1] is the intercept.
:param l2_reg: regularization to avoid overfitting.
:param cp:
:param cn:
:return: return {+1,-1} Logistic (val,grad) on training samples.
"""
assert len(wt) == (x_tr.shape[1] + 1)
c, n, p = wt[-1], x_tr.shape[0], x_tr.shape[1]
posi_idx = np.where(y_tr > 0) # corresponding to positive labels.
nega_idx = np.where(y_tr < 0) # corresponding to negative labels.
wt = wt[:p]
yz = y_tr * (np.dot(x_tr, wt) + c)
loss = -cp * np.sum(log_logistic(yz[posi_idx]))
loss += -cn * np.sum(log_logistic(yz[nega_idx]))
loss = loss / n + .5 * l2_reg * np.dot(wt, wt)
return loss
def algo_head_tail_bisearch(
edges, x, costs, g, root, s_low, s_high, max_num_iter, verbose):
""" This is the wrapper of head/tail-projection proposed in [2].
:param edges: edges in the graph.
:param x: projection vector x.
:param costs: edge costs in the graph.
:param g: the number of connected components.
:param root: root of subgraph. Usually, set to -1: no root.
:param s_low: the lower bound of the sparsity.
:param s_high: the upper bound of the sparsity.
:param max_num_iter: the maximum number of iterations used in
binary search procedure.
:param verbose: print out some information.
:return: 1. the support of the projected vector
2. the projected vector
"""
prizes = x * x
# to avoid too large upper bound problem.
if s_high >= len(prizes) - 1:
s_high = len(prizes) - 1
re_nodes = wrap_head_tail_bisearch(
edges, prizes, costs, g, root, s_low, s_high, max_num_iter, verbose)
proj_w = np.zeros_like(x)
proj_w[re_nodes[0]] = x[re_nodes[0]]
return re_nodes[0], proj_w
def algo_graph_sto_iht_backtracking(
x_tr, y_tr, w0, max_epochs, s, edges, costs, num_blocks, lambda_,
g=1, root=-1, gamma=0.1, proj_max_num_iter=50, verbose=0):
np.random.seed() # do not forget it.
w_hat = np.copy(w0)
(m, p) = x_tr.shape
# if the block size is too large. just use single block
b = int(m) / int(num_blocks)
np_ = np.sum(y_tr == 1)
nn_ = np.sum(y_tr == -1)
cp = float(nn_) / float(len(y_tr))
cn = float(np_) / float(len(y_tr))
# graph projection para
h_low = int((len(w_hat) - 1) / 2)
h_high = int(h_low * (1. + gamma))
t_low = int(s)
t_high = int(s * (1. + gamma))
for epoch_i in range(max_epochs):
for ind, _ in enumerate(range(num_blocks)):
ii = randint(0, num_blocks)
block = range(b * ii, b * (ii + 1))
x_tr_b, y_tr_b = x_tr[block, :], y_tr[block]
loss_sto, grad_sto = logit_loss_grad_bl(
x_tr=x_tr_b, y_tr=y_tr_b, wt=w_hat,
l2_reg=lambda_, cp=cp, cn=cn)
# edges, x, costs, g, root, s_low, s_high, max_num_iter, verbose
h_nodes, p_grad = algo_head_tail_bisearch(
edges, grad_sto[:p], costs, g, root, h_low, h_high,
proj_max_num_iter, verbose)
p_grad = np.append(p_grad, grad_sto[-1])
fun_val_right = loss_sto
tmp_num_iter, ad_step, beta = 0, 1.0, 0.8
reg_term = np.linalg.norm(p_grad) ** 2.
while tmp_num_iter < 20:
x_tmp = w_hat - ad_step * p_grad
fun_val_left = logit_loss_bl(
x_tr=x_tr_b, y_tr=y_tr_b, wt=x_tmp,
l2_reg=lambda_, cp=cp, cn=cn)
if fun_val_left > fun_val_right - ad_step / 2. * reg_term:
ad_step *= beta
else:
break
tmp_num_iter += 1
bt_sto = np.zeros_like(w_hat)
bt_sto[:p] = w_hat[:p] - ad_step * p_grad[:p]
t_nodes, proj_bt = algo_head_tail_bisearch(
edges, bt_sto[:p], costs, g, root, t_low, t_high,
proj_max_num_iter, verbose)
w_hat[:p] = proj_bt[:p]
w_hat[p] = w_hat[p] - ad_step * grad_sto[p] # intercept.
return w_hat
def algo_sto_iht_backtracking(
x_tr, y_tr, w0, max_epochs, s, num_blocks, lambda_):
np.random.seed() # do not forget it.
w_hat = w0
(m, p) = x_tr.shape
b = int(m) / int(num_blocks)
np_ = np.sum(y_tr == 1)
nn_ = np.sum(y_tr == -1)
cp = float(nn_) / float(len(y_tr))
cn = float(np_) / float(len(y_tr))
for epoch_i in range(max_epochs):
for ind, _ in enumerate(range(num_blocks)):
ii = randint(0, num_blocks)
block = range(b * ii, b * (ii + 1))
x_tr_b, y_tr_b = x_tr[block, :], y_tr[block]
loss_sto, grad_sto = logit_loss_grad_bl(
x_tr=x_tr_b, y_tr=y_tr_b, wt=w_hat,
l2_reg=lambda_, cp=cp, cn=cn)
fun_val_right = loss_sto
tmp_num_iter, ad_step, beta = 0, 1.0, 0.8
reg_term = np.linalg.norm(grad_sto) ** 2.
while tmp_num_iter < 20:
x_tmp = w_hat - ad_step * grad_sto
fun_val_left = logit_loss_bl(
x_tr=x_tr_b, y_tr=y_tr_b, wt=x_tmp,
l2_reg=lambda_, cp=cp, cn=cn)
if fun_val_left > fun_val_right - ad_step / 2. * reg_term:
ad_step *= beta
else:
break
tmp_num_iter += 1
bt_sto = w_hat - ad_step * grad_sto
bt_sto[np.argsort(np.abs(bt_sto))[:p - s]] = 0.
w_hat = bt_sto
return w_hat
def run_single_test(para):
data, method_list, tr_idx, te_idx, s, num_blocks, lambda_, \
max_epochs, fold_i, subfold_i = para
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
res = {_: dict() for _ in method_list}
tr_data = dict()
tr_data['x'] = data['x'][tr_idx, :]
tr_data['y'] = data['y'][tr_idx]
te_data = dict()
te_data['x'] = data['x'][te_idx, :]
te_data['y'] = data['y'][te_idx]
x_tr, y_tr = tr_data['x'], tr_data['y']
w0 = np.zeros(np.shape(x_tr)[1] + 1)
# --------------------------------
# this corresponding (b=1) to IHT
w_hat = algo_sto_iht_backtracking(
x_tr, y_tr, w0, max_epochs, s, 1, lambda_)
x_te, y_te = te_data['x'], te_data['y']
pred_prob, pred_y = logistic_predict(x_te, w_hat)
posi_idx = np.nonzero(y_te == 1)[0]
nega_idx = np.nonzero(y_te == -1)[0]
print('-' * 80)
print('number of positive: %02d, missed: %02d '
'number of negative: %02d, missed: %02d ' %
(len(posi_idx), float(np.sum(pred_y[posi_idx] != 1)),
len(nega_idx), float(np.sum(pred_y[nega_idx] != -1))))
v1 = np.sum(pred_y[posi_idx] != 1) / float(len(posi_idx))
v2 = np.sum(pred_y[nega_idx] != -1) / float(len(nega_idx))
res['iht']['bacc'] = (v1 + v2) / 2.
res['iht']['acc'] = accuracy_score(y_true=y_te, y_pred=pred_y)
res['iht']['auc'] = roc_auc_score(y_true=y_te, y_score=pred_prob)
res['iht']['perf'] = res['iht']['bacc']
res['iht']['w_hat'] = w_hat
print('iht -- sparsity: %02d intercept: %.4f bacc: %.4f '
'non-zero: %.2f' %
(s, w_hat[-1], res['iht']['bacc'],
len(np.nonzero(w_hat)[0]) - 1))
# --------------------------------
w_hat = algo_sto_iht_backtracking(
x_tr, y_tr, w0, max_epochs, s, num_blocks, lambda_)
x_te, y_te = te_data['x'], te_data['y']
pred_prob, pred_y = logistic_predict(x_te, w_hat)
posi_idx = np.nonzero(y_te == 1)[0]
nega_idx = np.nonzero(y_te == -1)[0]
v1 = np.sum(pred_y[posi_idx] != 1) / float(len(posi_idx))
v2 = np.sum(pred_y[nega_idx] != -1) / float(len(nega_idx))
res['sto-iht']['bacc'] = (v1 + v2) / 2.
res['sto-iht']['acc'] = accuracy_score(y_true=y_te, y_pred=pred_y)
res['sto-iht']['auc'] = roc_auc_score(y_true=y_te, y_score=pred_prob)
res['sto-iht']['perf'] = res['sto-iht']['bacc']
res['sto-iht']['w_hat'] = w_hat
print('sto-iht -- sparsity: %02d intercept: %.4f bacc: %.4f '
'non-zero: %.2f' % (s, w_hat[-1], res['sto-iht']['bacc'],
len(np.nonzero(w_hat)[0]) - 1))
tr_data = dict()
tr_data['x'] = data['x'][tr_idx, :]
tr_data['y'] = data['y'][tr_idx]
te_data = dict()
te_data['x'] = data['x'][te_idx, :]
te_data['y'] = data['y'][te_idx]
x_tr, y_tr = tr_data['x'], tr_data['y']
w0 = np.zeros(np.shape(x_tr)[1] + 1)
# --------------------------------
# this corresponding (b=1) to GraphIHT
w_hat = algo_graph_sto_iht_backtracking(
x_tr, y_tr, w0, max_epochs, s,
data['edges'], data['costs'], 1, lambda_)
x_te, y_te = te_data['x'], te_data['y']
pred_prob, pred_y = logistic_predict(x_te, w_hat)
posi_idx = np.nonzero(y_te == 1)[0]
nega_idx = np.nonzero(y_te == -1)[0]
v1 = np.sum(pred_y[posi_idx] != 1) / float(len(posi_idx))
v2 = np.sum(pred_y[nega_idx] != -1) / float(len(nega_idx))
res['graph-iht']['bacc'] = (v1 + v2) / 2.
res['graph-iht']['acc'] = accuracy_score(y_true=y_te, y_pred=pred_y)
res['graph-iht']['auc'] = roc_auc_score(y_true=y_te, y_score=pred_prob)
res['graph-iht']['perf'] = res['graph-iht']['bacc']
res['graph-iht']['w_hat'] = w_hat
print('graph-iht -- sparsity: %02d intercept: %.4f bacc: %.4f '
'non-zero: %.2f' % (s, w_hat[-1], res['graph-iht']['bacc'],
len(np.nonzero(w_hat)[0]) - 1))
# --------------------------------
w_hat = algo_graph_sto_iht_backtracking(
x_tr, y_tr, w0, max_epochs, s,
data['edges'], data['costs'], num_blocks, lambda_)
x_te, y_te = te_data['x'], te_data['y']
pred_prob, pred_y = logistic_predict(x_te, w_hat)
posi_idx = np.nonzero(y_te == 1)[0]
nega_idx = np.nonzero(y_te == -1)[0]
v1 = np.sum(pred_y[posi_idx] != 1) / float(len(posi_idx))
v2 = np.sum(pred_y[nega_idx] != -1) / float(len(nega_idx))
res['graph-sto-iht']['bacc'] = (v1 + v2) / 2.
res['graph-sto-iht']['acc'] = accuracy_score(y_true=y_te, y_pred=pred_y)
res['graph-sto-iht']['auc'] = roc_auc_score(y_true=y_te, y_score=pred_prob)
res['graph-sto-iht']['perf'] = res['graph-sto-iht']['bacc']
res['graph-sto-iht']['w_hat'] = w_hat
print('graph-sto-iht -- sparsity: %02d intercept: %.4f bacc: %.4f '
'non-zero: %.2f' % (s, w_hat[-1], res['graph-sto-iht']['bacc'],
len(np.nonzero(w_hat)[0]) - 1))
return s, num_blocks, lambda_, res, fold_i, subfold_i
def run_parallel_tr(
data, method_list, s_list, b_list, lambda_list, max_epochs, num_cpus,
fold_i):
# 5-fold cross validation
s_auc = {_: {(s, num_blocks, lambda_): 0.0
for (s, num_blocks, lambda_) in
product(s_list, b_list, lambda_list)} for _ in method_list}
s_acc = {_: {(s, num_blocks, lambda_): 0.0
for (s, num_blocks, lambda_) in
product(s_list, b_list, lambda_list)} for _ in method_list}
s_bacc = {_: {(s, num_blocks, lambda_): 0.0
for (s, num_blocks, lambda_) in
product(s_list, b_list, lambda_list)} for _ in method_list}
input_paras = []
for sf_ii in range(len(data['data_subsplits'][fold_i])):
s_tr = data['data_subsplits'][fold_i][sf_ii]['train']
s_te = data['data_subsplits'][fold_i][sf_ii]['test']
for s, num_block, lambda_ in product(s_list, b_list, lambda_list):
input_paras.append(
(data, method_list, s_tr, s_te, s, num_block, lambda_,
max_epochs, fold_i, sf_ii))
pool = multiprocessing.Pool(processes=num_cpus)
results_pool = pool.map(run_single_test, input_paras)
pool.close()
pool.join()
sub_res = dict()
for item in results_pool:
s, num_blocks, lambda_, re, fold_i, subfold_i = item
if subfold_i not in sub_res:
sub_res[subfold_i] = []
sub_res[subfold_i].append((s, num_blocks, lambda_, re))
for sf_ii in sub_res:
res = {_: dict() for _ in method_list}
for _ in method_list:
res[_]['s_list'] = s_list
res[_]['b_list'] = b_list
res[_]['lambda_list'] = lambda_list
res[_]['auc'] = dict()
res[_]['acc'] = dict()
res[_]['bacc'] = dict()
res[_]['perf'] = dict()
res[_]['w_hat'] = {(s, num_blocks, lambda_): None
for (s, num_blocks, lambda_) in
product(s_list, b_list, lambda_list)}
for s, num_blocks, lambda_, re in sub_res[sf_ii]:
for _ in method_list:
res[_]['auc'][(s, num_blocks, lambda_)] = re[_]['auc']
res[_]['acc'][(s, num_blocks, lambda_)] = re[_]['acc']
res[_]['bacc'][(s, num_blocks, lambda_)] = re[_]['bacc']
res[_]['perf'][(s, num_blocks, lambda_)] = re[_]['perf']
res[_]['w_hat'][(s, num_blocks, lambda_)] = re[_]['w_hat']
for _ in method_list:
for (s, num_blocks, lambda_) in \
product(s_list, b_list, lambda_list):
key_para = (s, num_blocks, lambda_)
s_auc[_][key_para] += res[_]['auc'][key_para]
s_acc[_][key_para] += res[_]['acc'][key_para]
s_bacc[_][key_para] += res[_]['bacc'][key_para]
# tune by balanced accuracy
s_star = dict()
for _ in method_list:
s_star[_] = min(s_bacc[_], key=s_bacc[_].get)
best_para = s_star[_]
print('tr %15s fold_%2d s: %02d b: %03d lambda: %.4f bacc: %.4f' %
(_, fold_i, best_para[0], best_para[1], best_para[2],
s_bacc[_][best_para] / 5.0))
return s_star, s_bacc
def run_parallel_te(
data, method_list, tr_idx, te_idx, s_list, b_list,
lambda_list, max_epochs, num_cpus):
res = {_: dict() for _ in method_list}
for _ in method_list:
res[_]['s_list'] = s_list
res[_]['b_list'] = b_list
res[_]['lambda_list'] = lambda_list
res[_]['auc'] = dict()
res[_]['acc'] = dict()
res[_]['bacc'] = dict()
res[_]['perf'] = dict()
res[_]['w_hat'] = {(s, num_blocks, lambda_): None
for (s, num_blocks, lambda_) in
product(s_list, b_list, lambda_list)}
input_paras = [(data, method_list, tr_idx, te_idx, s, num_block,
lambda_, max_epochs, '', '') for s, num_block, lambda_ in
product(s_list, b_list, lambda_list)]
pool = multiprocessing.Pool(processes=num_cpus)
results_pool = pool.map(run_single_test, input_paras)
pool.close()
pool.join()
for s, num_blocks, lambda_, re, fold_i, subfold_i in results_pool:
for _ in method_list:
res[_]['auc'][(s, num_blocks, lambda_)] = re[_]['auc']
res[_]['acc'][(s, num_blocks, lambda_)] = re[_]['acc']
res[_]['bacc'][(s, num_blocks, lambda_)] = re[_]['bacc']
res[_]['perf'][(s, num_blocks, lambda_)] = re[_]['perf']
res[_]['w_hat'][(s, num_blocks, lambda_)] = re[_]['w_hat']
return res
def get_single_data(trial_i, root_input):
import scipy.io as sio
cancer_related_genes = {
4288: 'MKI67', 1026: 'CDKN1A', 472: 'ATM', 7033: 'TFF3', 2203: 'FBP1',
7494: 'XBP1', 1824: 'DSC2', 1001: 'CDH3', 11200: 'CHEK2',
7153: 'TOP2A', 672: 'BRCA1', 675: 'BRCA2', 580: 'BARD1', 9: 'NAT1',
771: 'CA12', 367: 'AR', 7084: 'TK2', 5892: 'RAD51D', 2625: 'GATA3',
7155: 'TOP2B', 896: 'CCND3', 894: 'CCND2', 10551: 'AGR2',
3169: 'FOXA1', 2296: 'FOXC1'}
data = dict()
f_name = 'overlap_data_%02d.mat' % trial_i
re = sio.loadmat(root_input + f_name)['save_data'][0][0]
data['data_X'] = np.asarray(re['data_X'], dtype=np.float64)
data_y = [_[0] for _ in re['data_Y']]
data['data_Y'] = np.asarray(data_y, dtype=np.float64)
data_edges = [[_[0] - 1, _[1] - 1] for _ in re['data_edges']]
data['data_edges'] = np.asarray(data_edges, dtype=int)
data_pathways = [[_[0], _[1]] for _ in re['data_pathways']]
data['data_pathways'] = np.asarray(data_pathways, dtype=int)
data_entrez = [_[0] for _ in re['data_entrez']]
data['data_entrez'] = np.asarray(data_entrez, dtype=int)
data['data_splits'] = {i: dict() for i in range(5)}
data['data_subsplits'] = {i: {j: dict() for j in range(5)}
for i in range(5)}
for i in range(5):
xx = re['data_splits'][0][i][0][0]['train']
data['data_splits'][i]['train'] = [_ - 1 for _ in xx[0]]
xx = re['data_splits'][0][i][0][0]['test']
data['data_splits'][i]['test'] = [_ - 1 for _ in xx[0]]
for j in range(5):
xx = re['data_subsplits'][0][i][0][j]['train'][0][0]
data['data_subsplits'][i][j]['train'] = [_ - 1 for _ in xx[0]]
xx = re['data_subsplits'][0][i][0][j]['test'][0][0]
data['data_subsplits'][i][j]['test'] = [_ - 1 for _ in xx[0]]
re_path = [_[0] for _ in re['re_path_varInPath']]
data['re_path_varInPath'] = np.asarray(re_path)
re_path_entrez = [_[0] for _ in re['re_path_entrez']]
data['re_path_entrez'] = np.asarray(re_path_entrez)
re_path_ids = [_[0] for _ in re['re_path_ids']]
data['re_path_ids'] = np.asarray(re_path_ids)
re_path_lambdas = [_ for _ in re['re_path_lambdas'][0]]
data['re_path_lambdas'] = np.asarray(re_path_lambdas)
re_path_groups = [_[0][0] for _ in re['re_path_groups_lasso'][0]]
data['re_path_groups_lasso'] = np.asarray(re_path_groups)
re_path_groups_overlap = [_[0][0] for _ in re['re_path_groups_overlap'][0]]
data['re_path_groups_overlap'] = np.asarray(re_path_groups_overlap)
re_edge = [_[0] for _ in re['re_edge_varInGraph']]
data['re_edge_varInGraph'] = np.asarray(re_edge)
re_edge_entrez = [_[0] for _ in re['re_edge_entrez']]
data['re_edge_entrez'] = np.asarray(re_edge_entrez)
data['re_edge_groups_lasso'] = np.asarray(re['re_edge_groups_lasso'])
data['re_edge_groups_overlap'] = np.asarray(re['re_edge_groups_overlap'])
for method in ['re_path_re_lasso', 're_path_re_overlap',
're_edge_re_lasso', 're_edge_re_overlap']:
res = {fold_i: dict() for fold_i in range(5)}
for fold_ind, fold_i in enumerate(range(5)):
res[fold_i]['lambdas'] = re[method][0][fold_i]['lambdas'][0][0][0]
res[fold_i]['kidx'] = re[method][0][fold_i]['kidx'][0][0][0]
res[fold_i]['kgroups'] = re[method][0][fold_i]['kgroups'][0][0][0]
res[fold_i]['kgroupidx'] = re[method][0][fold_i]['kgroupidx'][0][0]
res[fold_i]['groups'] = re[method][0][fold_i]['groups'][0]
res[fold_i]['sbacc'] = re[method][0][fold_i]['sbacc'][0]
res[fold_i]['AS'] = re[method][0][fold_i]['AS'][0]
res[fold_i]['completeAS'] = re[method][0][fold_i]['completeAS'][0]
res[fold_i]['lstar'] = re[method][0][fold_i]['lstar'][0][0][0][0]
res[fold_i]['auc'] = re[method][0][fold_i]['auc'][0]
res[fold_i]['acc'] = re[method][0][fold_i]['acc'][0]
res[fold_i]['bacc'] = re[method][0][fold_i]['bacc'][0]
res[fold_i]['perf'] = re[method][0][fold_i]['perf'][0][0]
res[fold_i]['pred'] = re[method][0][fold_i]['pred']
res[fold_i]['Ws'] = re[method][0][fold_i]['Ws'][0][0]
res[fold_i]['oWs'] = re[method][0][fold_i]['oWs'][0][0]
res[fold_i]['nextGrad'] = re[method][0][fold_i]['nextGrad'][0]
data[method] = res
import networkx as nx
g = nx.Graph()
ind_pathways = {_: i for i, _ in enumerate(data['data_entrez'])}
all_nodes = {ind_pathways[_]: '' for _ in data['re_path_entrez']}
maximum_nodes, maximum_list_edges = set(), []
for edge in data['data_edges']:
if edge[0] in all_nodes and edge[1] in all_nodes:
g.add_edge(edge[0], edge[1])
isolated_genes = set()
maximum_genes = set()
for cc in nx.connected_component_subgraphs(g):
if len(cc) <= 5:
for item in list(cc):
isolated_genes.add(data['data_entrez'][item])
else:
for item in list(cc):
maximum_nodes = set(list(cc))
maximum_genes.add(data['data_entrez'][item])
maximum_nodes = np.asarray(list(maximum_nodes))
subgraph = nx.Graph()
for edge in data['data_edges']:
if edge[0] in maximum_nodes and edge[1] in maximum_nodes:
if edge[0] != edge[1]: # remove some self-loops
maximum_list_edges.append(edge)
subgraph.add_edge(edge[0], edge[1])
data['map_entrez'] = np.asarray([data['data_entrez'][_]
for _ in maximum_nodes])
data['edges'] = np.asarray(maximum_list_edges, dtype=int)
data['costs'] = np.asarray([1.] * len(maximum_list_edges),
dtype=np.float64)
data['x'] = data['data_X'][:, maximum_nodes]
data['y'] = data['data_Y']
data['nodes'] = np.asarray(range(len(maximum_nodes)), dtype=int)
data['cancer_related_genes'] = cancer_related_genes
for edge_ind, edge in enumerate(data['edges']):
uu = list(maximum_nodes).index(edge[0])
vv = list(maximum_nodes).index(edge[1])
data['edges'][edge_ind][0] = uu
data['edges'][edge_ind][1] = vv
method_list = ['re_path_re_lasso', 're_path_re_overlap',
're_edge_re_lasso', 're_edge_re_overlap']
found_set = {method: set() for method in method_list}
for method in method_list:
for fold_i in range(5):
best_lambda = data[method][fold_i]['lstar']
kidx = data[method][fold_i]['kidx']
re = list(data[method][fold_i]['lambdas']).index(best_lambda)
ws = data[method][fold_i]['oWs'][:, re]
for item in [kidx[_] for _ in np.nonzero(ws[1:])[0]]:
if item in cancer_related_genes:
found_set[method].add(cancer_related_genes[item])
data['found_related_genes'] = found_set
return data
def run_test(method_list, n_folds, max_epochs, s_list, b_list, lambda_list,
folding_i, num_cpus, root_input, root_output):
cv_res = {_: dict() for _ in range(n_folds)}
for fold_i in range(n_folds):
data = get_single_data(folding_i, root_input)
tr_idx = data['data_splits'][fold_i]['train']
te_idx = data['data_splits'][fold_i]['test']
f_data = data.copy()
tr_data = dict()
tr_data['x'] = f_data['x'][tr_idx, :]
tr_data['y'] = f_data['y'][tr_idx]
tr_data['data_entrez'] = f_data['data_entrez']
f_data['x'] = data['x']
# data normalization
x_mean = np.tile(np.mean(f_data['x'], axis=0), (len(f_data['x']), 1))
x_std = np.tile(np.std(f_data['x'], axis=0), (len(f_data['x']), 1))
f_data['x'] = np.nan_to_num(np.divide(f_data['x'] - x_mean, x_std))
f_data['edges'] = data['edges']
f_data['costs'] = data['costs']
s_star, s_bacc = run_parallel_tr(
f_data, method_list, s_list, b_list, lambda_list, max_epochs,
num_cpus, fold_i)
cv_res[fold_i]['s_list'] = s_list
cv_res[fold_i]['b_list'] = b_list
cv_res[fold_i]['lambda_list'] = lambda_list
for _ in method_list:
cv_res[fold_i][_] = dict()
cv_res[fold_i][_]['s_bacc'] = s_bacc[_]
cv_res[fold_i][_]['s_star'] = s_star[_]
cv_res[fold_i][_]['map_entrez'] = data['map_entrez']
res = run_parallel_te(
f_data, method_list, tr_idx, te_idx, s_list, b_list, lambda_list,
max_epochs, num_cpus)
for _ in method_list:
best_para = s_star[_]
print('%15s fold_%2d s: %02d b: %03d lambda: %.4f bacc: %.4f' %
(_, fold_i, best_para[0], best_para[1], best_para[2],
res[_]['bacc'][best_para]))
cv_res[fold_i][_]['auc'] = res[_]['auc'][best_para]
cv_res[fold_i][_]['acc'] = res[_]['acc'][best_para]
cv_res[fold_i][_]['bacc'] = res[_]['bacc'][best_para]
cv_res[fold_i][_]['perf'] = res[_]['bacc'][best_para]
cv_res[fold_i][_]['w_hat'] = res[_]['w_hat'][best_para]
for _ in method_list:
re = [cv_res[fold_i][_]['bacc'] for fold_i in range(5)]
print('%15s %.4f %.4f %.4f %.4f %.4f' %
(_, re[0], re[1], re[2], re[3], re[4]))
f_name = 'results_exp_bc_%02d_%02d.pkl' % (folding_i, max_epochs)
pickle.dump(cv_res, open(root_output + f_name, 'wb'))
def summarize_data(method_list, folding_list, num_iterations, root_output):
sum_data = dict()
cancer_related_genes = {
4288: 'MKI67', 1026: 'CDKN1A', 472: 'ATM', 7033: 'TFF3', 2203: 'FBP1',
7494: 'XBP1', 1824: 'DSC2', 1001: 'CDH3', 11200: 'CHEK2',
7153: 'TOP2A', 672: 'BRCA1', 675: 'BRCA2', 580: 'BARD1', 9: 'NAT1',
771: 'CA12', 367: 'AR', 7084: 'TK2', 5892: 'RAD51D', 2625: 'GATA3',
7155: 'TOP2B', 896: 'CCND3', 894: 'CCND2', 10551: 'AGR2',
3169: 'FOXA1', 2296: 'FOXC1'}
for trial_i in folding_list:
sum_data[trial_i] = dict()
f_name = root_output + 'results_exp_bc_%02d_%02d.pkl' % \
(trial_i, num_iterations)
data = pickle.load(open(f_name))
for method in method_list:
sum_data[trial_i][method] = dict()
auc, bacc, non_zeros_list, found_genes = [], [], [], []
for fold_i in data:
auc.append(data[fold_i][method]['auc'])
bacc.append(data[fold_i][method]['bacc'])
wt = data[fold_i][method]['w_hat']
non_zeros_list.append(len(np.nonzero(wt[:len(wt) - 1])[0]))
sum_data[trial_i][method]['w_hat_%d' % fold_i] = \
wt[:len(wt) - 1]
for element in np.nonzero(wt[:len(wt) - 1])[0]:
found_genes.append(
data[fold_i][method]['map_entrez'][element])
found_genes = [cancer_related_genes[_]
for _ in found_genes
if _ in cancer_related_genes]
sum_data[trial_i][method]['auc'] = auc
sum_data[trial_i][method]['bacc'] = bacc
sum_data[trial_i][method]['num_nonzeros'] = non_zeros_list
sum_data[trial_i][method]['found_genes'] = found_genes
return sum_data
def show_test(nonconvex_method_list, folding_list, max_epochs,
root_input, root_output, latex_flag=True):
sum_data = summarize_data(nonconvex_method_list,
folding_list, max_epochs, root_output)
all_data = pickle.load(open(root_input + 'overlap_data_summarized.pkl'))
for trial_i in sum_data:
for method in nonconvex_method_list:
all_data[trial_i]['re_%s' % method] = sum_data[trial_i][method]
for method in ['re_%s' % _ for _ in nonconvex_method_list]:
re = all_data[trial_i][method]['found_genes']
all_data[trial_i]['found_related_genes'][method] = set(re)
method_list = ['re_path_re_lasso', 're_path_re_overlap',
're_edge_re_lasso', 're_edge_re_overlap',
're_iht', 're_sto-iht', 're_graph-iht', 're_graph-sto-iht']
all_involved_genes = {method: set() for method in method_list}
for trial_i in sum_data:
for method in nonconvex_method_list:
all_data[trial_i]['re_%s' % method] = sum_data[trial_i][method]
for method in ['re_%s' % _ for _ in nonconvex_method_list]:
re = all_data[trial_i][method]['found_genes']
all_data[trial_i]['found_related_genes'][method] = set(re)
for method in ['re_path_re_lasso', 're_path_re_overlap',
're_edge_re_lasso', 're_edge_re_overlap']:
for fold_i in range(5):
re = | np.nonzero(all_data[trial_i][method]['ws_%d' % fold_i]) | numpy.nonzero |
'''
Generate a webpage containing a table of sampled results
The first column is ground truth, and the rest of columns
are different methods
'''
import json, socket, pickle, random
from os.path import join, basename, isfile
from glob import glob
import numpy as np
from html4vision import Col, imagetable
# the line below is for running both in the current directory
# and the repo's root directory
import sys; sys.path.insert(0, '..'); sys.path.insert(0, '.')
from util import mkdir2
data_dir = '/data2/mengtial'
split = 'val'
annot_file = join(data_dir, 'Argoverse-HD/annotations', split + '.json')
vis_cfg = 'vis-th0.5'
out_dir = mkdir2(join(data_dir, 'Exp', 'Argoverse-HD', 'vis'))
out_name = 'single-vs-inf-gpus.html'
title = 'Single vs Infinite GPUs'
metric = 'AP'
link_video = True
n_show = 10
n_consec = None
align = True # align to the stride in each sequence
stride = 30
random.seed(0)
names = [
'Annotation',
'Single GPU',
'Infinite GPUs',
]
dirs = [
join(data_dir, 'Argoverse-HD', 'vis', split),
join(data_dir, 'Exp', 'Argoverse-HD', vis_cfg, 'rt_mrcnn50_nm_s0.5', split),
join(data_dir, 'Exp', 'Argoverse-HD', vis_cfg, 'srt_mrcnn50_nm_inf_s0.5', split),
]
for d in dirs:
print(f'python vis/make_videos_numbered.py "{d}" --fps 30')
srv_dir = data_dir
srv_port = 1234
host_name = socket.gethostname()
##
db = json.load(open(annot_file))
imgs = db['images']
seqs = db['sequences']
n_img = len(imgs)
if n_consec is None:
# naive random sampling
sel = random.choices(list(range(n_img)), k=n_show)
elif align:
# sample multiple sets of consecutive frames
start_idx = []
last_sid = None
for i, img in enumerate(imgs):
if img['sid'] != last_sid:
start_idx.append(i)
last_sid = img['sid']
start_idx = np.array(start_idx)
sel = random.choices(list(range(n_img//n_consec)), k=n_show//n_consec)
sel = np.array(sel)
sel *= n_consec
for i in range(len(sel)):
diff = sel[i] - start_idx
diff[diff < 0] = n_img
nearest = np.argmin(diff)
sel[i] -= (sel[i] - start_idx[nearest]) % stride
# it is possible to have duplicated sel, but ignore for now
consecs = np.arange(n_consec)
sel = [i + consecs for i in sel]
sel = np.array(sel).flatten().tolist()
else:
sel = random.choices(list(range(n_img//n_consec)), k=n_show//n_consec)
consecs = | np.arange(n_consec) | numpy.arange |
# loader for MNIST
import copy
import random
import numpy as np
import torch
import torch.utils.data as data_utils
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset
def select_dataset(dataset,batch_size,nsamples_train,nsamples_test,device,binary):
if dataset == 'mnist':
dl_train, dl_test = Load_MNIST(batch_size=batch_size, nsamples_train=nsamples_train,nsamples_test=nsamples_test,binary=binary)
elif dataset == 'circles':
dl_train, dl_test = Load_circles(batch_size=batch_size, nsamples=nsamples_train)
elif dataset == 'cifar10':
dl_train, dl_test = Load_CIFAR10(batch_size=batch_size, nsamples_train=nsamples_train,nsamples_test=nsamples_test)
else:
raise NotImplementedError("Selected dataset: {}, has not been implemented yet.".format(dataset))
return dl_train,dl_test
class Dataset_preload_with_label_prob(Dataset):
"""
This one preloads all the images! And always calculates the label probabilities as well, it is then up to the user whether to use one or the other.
This is of course slower than just loading one of the two, but should not be that much slower.
zero_center_label_probabilities: makes the label probabilities obey the constraint ye=0, where y is the label probabilities.
"""
def __init__(self, dataset, nsamples=-1,zero_center_label_probabilities=True,name='',binary=[]):
imgs = []
self.labels = []
self.islabeled = []
self.zero_center_label_probabilities= zero_center_label_probabilities
self.name = name
for (img,label) in dataset:
imgs.append(img)
self.labels.append(label)
if len(imgs) == nsamples and (binary == []):
break
self.imgs = torch.stack(imgs)
if binary != []: #This is used if we only want to use a subset of the images in a binary classification
idx_used = []
assert len(binary) == 2
labels = copy.deepcopy(self.labels)
for i,group_i in enumerate(binary):
for class_i in group_i:
idxs = np.where(labels == | np.float32(class_i) | numpy.float32 |
# Copyright 2019 <NAME>
#
# This file is part of RfPy.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Functions to calculate piercing points from a velocity model and slowness
values, bin them, and produce CCP stacks using receiver functions.
"""
import os
import sys
import pickle
import numpy as np
import scipy as sp
from scipy.signal import hilbert
from rfpy import binning
import matplotlib.pyplot as plt
from matplotlib import cm
class CCPimage(object):
"""
A CCPimage object contains attributes and methods to produce
Common Conversion Point (CCP) stacks for each of the main
three main phases (Ps, Pps and Pss) using radial-component
receiver functions. The object is used to project the stacks along
a linear profile, specified by start and end geographical coordinate
locations, which are subsequently averaged to produce a final
CCP image. The averaging can be done using a linear weighted sum,
or a phase-weighted sum. Methods should be used in the appropriate
sequence (see ``rfpy_ccp.py`` for details).
Note
----
By default, the object initializes with un-defined coordinate locations for
the profile. If not specified during initialization, make sure
they are specified later, before the other methods are used, e.g.
``ccpimage.xs_lat1 = 10.; ccpimage.xs_lon1 = 110.``, etc. Note also that the
default 1D velocity model may not be applicable to your region of
interest and a different model can be implemented during initialization
or later during processing.
Parameters
----------
coord_start : list
List of two floats corresponding to the (latitude, longitude)
pair for the start point of the profile
coord_end : list
List of two floats corresponding to the (latitude, longitude)
pair for the end point of the profile
weights : list
List of three floats with corresponding weights for the Ps, Pps
and Pss phases used during linear, weighted averaging
dep : :class:`~numpy.ndarray`
Array of depth values defining the 1D background seismic velocity model.
Note that the maximum depth defined here sets the maximum depth
in each of the CCP stacks and the final CCP image.
vp : :class:`~numpy.ndarray`
Array of Vp values defining the 1D background seismic velocity model
vpvs : float
Constant Vp/Vs ratio for the 1D model.
Other Parameters
----------------
radialRF : list
List of :class:`~obspy.core.Stream` objects containing the radial
receiver functions along the line. Each item in the list contains the
streams for one single station.
vs : :class:`~numpy.ndarray`
Array of Vp values defining the 1D background seismic velocity model
xs_lat1 : float
Latitude of start point defining the linear profile.
xs_lon1 : float
Longitude of start point defining the linear profile.
xs_lat2 : float
Latitude of end point defining the linear profile.
xs_lon2 : float
Longitude of end point defining the linear profile.
is_ready_for_prep : boolean
Whether or not the object is ready for the method ``prep_data``
is_ready_for_prestack : boolean
Whether or not the object is ready for the method ``prestack``
is_ready_for_ccp : boolean
Whether or not the object is ready for the method ``ccp``
is_ready_for_gccp : boolean
Whether or not the object is ready for the method ``gccp``
"""
def __init__(self, coord_start=[None, None], coord_end=[None, None],
weights=[1., 3., -3.],
dep=np.array([0., 4., 8., 14., 30., 35., 45., 110.]),
vp= | np.array([4.0, 5.9, 6.2, 6.3, 6.8, 7.2, 8.0, 8.1]) | numpy.array |
# The dataset code has been adapted from:
# https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html
# from https://github.com/pytorch/tutorials
# which has been distributed under the following license:
################################################################################
# BSD 3-Clause License
#
# Copyright (c) 2017, Pytorch contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
# For the Avalanche data loader adaptation:
################################################################################
# Copyright (c) 2022 ContinualAI #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 21-03-2022 #
# Author: <NAME> #
# #
# E-mail: <EMAIL> #
# Website: www.continualai.org #
################################################################################
from pathlib import Path
from typing import Union
import numpy as np
import torch
from PIL import Image
from torchvision.datasets.folder import default_loader
from avalanche.benchmarks.datasets import (
SimpleDownloadableDataset,
default_dataset_location,
)
from avalanche.benchmarks.datasets.penn_fudan.penn_fudan_data import (
penn_fudan_data,
)
def default_mask_loader(mask_path):
return Image.open(mask_path)
class PennFudanDataset(SimpleDownloadableDataset):
"""
The Penn-Fudan Pedestrian detection and segmentation dataset
Adapted from the "TorchVision Object Detection Finetuning Tutorial":
https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html
"""
def __init__(
self,
root: Union[str, Path] = None,
*,
transform=None,
loader=default_loader,
mask_loader=default_mask_loader,
download=True
):
"""
Creates an instance of the Penn-Fudan dataset.
:param root: The directory where the dataset can be found or downloaded.
Defaults to None, which means that the default location for
"pennfudanped" will be used.
:param transform: The transformation to apply to (img, annotations)
values.
:param loader: The image loader to use.
:param mask_loader: The mask image loader to use.
:param download: If True, the dataset will be downloaded if needed.
"""
if root is None:
root = default_dataset_location("pennfudanped")
self.imgs = None
self.masks = None
self.targets = None
self.transform = transform
self.loader = loader
self.mask_loader = mask_loader
super().__init__(
root,
penn_fudan_data[0],
penn_fudan_data[1],
download=download,
verbose=True,
)
self._load_dataset()
def _load_metadata(self):
# load all image files, sorting them to
# ensure that they are aligned
self.imgs = (self.root / "PennFudanPed" / "PNGImages").iterdir()
self.masks = (self.root / "PennFudanPed" / "PedMasks").iterdir()
self.imgs = list(sorted(self.imgs))
self.masks = list(sorted(self.masks))
self.targets = [self.make_targets(i) for i in range(len(self.imgs))]
return Path(self.imgs[0]).exists() and Path(self.masks[0]).exists()
def make_targets(self, idx):
# load images and masks
mask_path = self.masks[idx]
# note that we haven't converted the mask to RGB,
# because each color corresponds to a different instance
# with 0 being background
mask = self.mask_loader(mask_path)
# convert the PIL Image into a numpy array
mask = np.array(mask)
# instances are encoded as different colors
obj_ids = np.unique(mask)
# first id is the background, so remove it
obj_ids = obj_ids[1:]
# split the color-encoded mask into a set
# of binary masks
masks = mask == obj_ids[:, None, None]
# get bounding box coordinates for each mask
num_objs = len(obj_ids)
boxes = []
for i in range(num_objs):
pos = np.where(masks[i])
xmin = | np.min(pos[1]) | numpy.min |
import numpy as np
import math
import copy
class MultipleLayer():
def __init__(self, lr=0.001, nb_eboch=100, hidden_layer_size=2, batch_size=-1):
self.batch_size = batch_size
self.lr = lr
self.nb_eboch = nb_eboch
self.W = []
self.V = []
self.hidden_layer_size = hidden_layer_size
def phi(self, x):
return 2.0 / (1.0 + np.exp(-x)) - 1.0
def phiPrime(self,x):
return np.multiply((1.0 + x),(1.0 - x)) / 2.0
def posneg(self, WX):
output = []
for t in WX.T:
out = []
for x in t:
if (x < 0):
out.append(-1)
else:
out.append(1)
output.append(out)
return output
def fit(self, X, Y): # X = (len(X[0]) + 1, n)
X = np.vstack([X, [1.0] * len(X[0])])
print(Y[:10].shape)
WHistory = []
eHistory = []
self.W = np.reshape(np.random.normal(0, 1, self.hidden_layer_size * len(X)), (self.hidden_layer_size, len(X)))
self.V = np.reshape(np.random.normal(0, 1, (self.hidden_layer_size+1) * Y.shape[1]), (Y.shape[1], self.hidden_layer_size+1))
for step in range(self.nb_eboch):
p = np.random.permutation(len(X[0]))
X = X[:,p]
Y = Y[p]
batchIndex_list = []
if (self.batch_size == -1):
batchIndex_list.append([0, len(X[0])])
else:
for i in range(int((len(X[0]) * 1.0) / self.batch_size)):
batchIndex_list.append([i * self.batch_size, (i + 1) * self.batch_size])
for batchIndex in batchIndex_list:
start, end = batchIndex
batch = X.T[start: end].T
Hstar = np.dot(self.W, batch) # size: (hls, len(X)) * (len(X), n) =(hls, n)
H = self.phi(Hstar) # size: (hls, n)
H = np.vstack([H, [1.0] * len(H[0])]) # Add bias
Ostar = np.dot(self.V, H) # size: (size Output, hls+bias) * (hls+bias, n) =(size Output, n)
O = self.phi(Ostar) # size: (hls, len(X)) * (len(X), n) =(1, n)
print(O.shape)
e = self.posneg(O) - Y[start:end]
#print(np.mean(O - Y.T[start:end].T))
deltaO = np.multiply((O - Y[start:end].T),self.phiPrime(O))
deltaH = np.multiply(np.dot(self.V.T, deltaO),self.phiPrime(H))
deltaH = deltaH[:-1,:]# Remove Bias row
deltaW = - self.lr * np.dot(deltaH, batch.T)
deltaV = - self.lr * np.dot(deltaO, H.T)
self.V += deltaV
self.W += deltaW
WHistory.append(copy.copy(self.W))
eHistory.append(np.mean(abs(e/2.0)))
return WHistory, eHistory
def predict(self, X):
X = np.vstack([X, [1] * len(X[0])])
Hstar = np.dot(self.W, X) # size: (hls, len(X)) * (len(X), n) =(hls, n)
H = self.phi(Hstar) # size: (hls, n)
H = np.vstack([H, [1] * len(H[0])]) # Add bias
Ostar = np.dot(self.V, H) # size: (1, hls) * (hls, n) =(1, n)
O = self.phi(Ostar) # size: (hls, len(X)) * (len(X), n) =(1, n)
return self.posneg(O)
class MultipleLayerG():
def __init__(self, lr=0.0001, nb_eboch=5, hidden_layer_size=2, batch_size=200):
self.batch_size = batch_size
self.lr = lr
self.nb_eboch = nb_eboch
self.W = []
self.V = []
self.hidden_layer_size = hidden_layer_size
def phi(self, x):
return 2.0 / (1.0 + np.exp(-x)) - 1.0
def phiPrime(self,x):
return np.multiply((1.0 + x),(1.0 - x)) / 2.0
def posneg(self, WX):
output = []
for t in WX.T:
out = []
for x in t:
if (x < 0):
out.append(-1)
else:
out.append(1)
output.append(out)
return output
def fit(self, X, Y): # X = (len(X[0]) + 1, n)
X = np.vstack([X, [1.0] * len(X[0])])
WHistory = []
eHistory = []
pHistory = []
self.W = np.reshape(np.random.normal(0, 1, self.hidden_layer_size * len(X)), (self.hidden_layer_size, len(X)))
self.V = np.reshape(np.random.normal(0, 1, (self.hidden_layer_size+1) * Y.shape[0]), (Y.shape[0], self.hidden_layer_size+1))
for step in range(self.nb_eboch):
# p = np.random.permutation(len(X[0]))
# X = X.T[p].T
# Y = Y[p]
batchIndex_list = []
if (self.batch_size == -1):
batchIndex_list.append([0, len(X[0])])
else:
for i in range(int((len(X[0]) * 1.0) / self.batch_size)):
batchIndex_list.append([i * self.batch_size, (i + 1) * self.batch_size])
for batchIndex in batchIndex_list:
start, end = batchIndex
batch = X.T[start: end].T
Hstar = np.dot(self.W, batch) # size: (hls, len(X)) * (len(X), n) =(hls, n)
H = self.phi(Hstar) # size: (hls, n)
H = np.vstack([H, [1.0] * len(H[0])]) # Add bias
Ostar = np.dot(self.V, H) # size: (size Output, hls+bias) * (hls+bias, n) =(size Output, n)
O = self.phi(Ostar) # size: (hls, len(X)) * (len(X), n) =(1, n)
e = O - Y.T[start:end].T
pHistory.append(O)
deltaO = np.multiply((O - Y.T[start:end].T),self.phiPrime(O))
deltaH = np.multiply(np.dot(self.V.T, deltaO),self.phiPrime(H))
deltaH = deltaH[:-1,:]# Remove Bias row
deltaW = - self.lr * np.dot(deltaH, batch.T)
deltaV = - self.lr * np.dot(deltaO, H.T)
self.V += deltaV
self.W += deltaW
WHistory.append(copy.copy(self.W))
eHistory.append(np.mean(abs(e/2.0)))
return WHistory, eHistory, pHistory
def predict(self, X):
X = np.vstack([X, [1] * len(X[0])])
Hstar = np.dot(self.W, X) # size: (hls, len(X)) * (len(X), n) =(hls, n)
H = self.phi(Hstar) # size: (hls, n)
H = np.vstack([H, [1] * len(H[0])]) # Add bias
Ostar = np.dot(self.V, H) # size: (1, hls) * (hls, n) =(1, n)
O = self.phi(Ostar) # size: (hls, len(X)) * (len(X), n) =(1, n)
return O
class MultipleLayerGN():
def __init__(self, lr=0.0001, nb_eboch=5, hidden_layer_size=2, batch_size=200):
self.batch_size = batch_size
self.lr = lr
self.nb_eboch = nb_eboch
self.W = []
self.V = []
self.hidden_layer_size = hidden_layer_size
def phi(self, x):
return 2.0 / (1.0 + np.exp(-x)) - 1.0
def phiPrime(self,x):
return np.multiply((1.0 + x),(1.0 - x)) / 2.0
def posneg(self, WX):
output = []
for t in WX.T:
out = []
for x in t:
if (x < 0):
out.append(-1)
else:
out.append(1)
output.append(out)
return output
def fit(self, X, Y, n): # X = (len(X[0]) + 1, n)
X1 = X[:]
X = np.vstack([X, [1.0] * len(X[0])])
WHistory = []
eHistory = []
pHistory = []
self.W = np.reshape(np.random.normal(0, 1, self.hidden_layer_size * len(X)), (self.hidden_layer_size, len(X)))
self.V = np.reshape(np.random.normal(0, 1, (self.hidden_layer_size+1) * Y.shape[0]), (Y.shape[0], self.hidden_layer_size+1))
for step in range(self.nb_eboch):
p = np.random.permutation(len(X[0]))
X1 = X1[:,p]
X = X.T[p].T
Y = Y[:,p]
batchIndex_list = []
if (self.batch_size == -1):
batchIndex_list.append([0, len(X[0])])
else:
for i in range(int((len(X[0]) * 1.0) / self.batch_size)):
batchIndex_list.append([i * self.batch_size, (i + 1) * self.batch_size])
for batchIndex in batchIndex_list:
start, end = batchIndex
batch = X[:,start: end]
batch = batch[:,0:n]
batchY = Y[:,start: end]
batchY = batchY[:, 0:n]
Hstar = np.dot(self.W, batch) # size: (hls, len(X)) * (len(X), n) =(hls, n)
H = self.phi(Hstar) # size: (hls, n)
H = np.vstack([H, [1.0] * len(H[0])]) # Add bias
Ostar = np.dot(self.V, H) # size: (size Output, hls+bias) * (hls+bias, n) =(size Output, n)
O = self.phi(Ostar) # size: (hls, len(X)) * (len(X), n) =(1, n)
deltaO = np.multiply((O - batchY),self.phiPrime(O))
deltaH = np.multiply(np.dot(self.V.T, deltaO),self.phiPrime(H))
deltaH = deltaH[:-1,:]# Remove Bias row
deltaW = - self.lr * np.dot(deltaH, batch.T)
deltaV = - self.lr * | np.dot(deltaO, H.T) | numpy.dot |
#!/usr/bin/env python3
"""
"""
import math
import numpy as np
import numpy.ma as ma
from astropy import units as u
from astropy.coordinates import SkyCoord, AltAz
from iminuit import Minuit
from scipy.optimize import minimize, least_squares
from scipy.stats import norm
from ctapipe.coordinates import (
NominalFrame,
TiltedGroundFrame,
GroundFrame,
project_to_ground,
)
from ctapipe.image import neg_log_likelihood, mean_poisson_likelihood_gaussian
from ctapipe.instrument import get_atmosphere_profile_functions
from ctapipe.containers import (
ReconstructedGeometryContainer,
ReconstructedEnergyContainer,
)
from ctapipe.reco.reco_algorithms import Reconstructor
from ctapipe.utils.template_network_interpolator import (
TemplateNetworkInterpolator,
TimeGradientInterpolator,
)
__all__ = ["ImPACTReconstructor", "energy_prior", "xmax_prior", "guess_shower_depth"]
def guess_shower_depth(energy):
"""
Simple estimation of depth of shower max based on the expected gamma-ray elongation
rate.
Parameters
----------
energy: float
Energy of the shower in TeV
Returns
-------
float: Expected depth of shower maximum
"""
x_max_exp = 300 + 93 * np.log10(energy)
return x_max_exp
def energy_prior(energy, index=-1):
return -2 * np.log(energy ** index)
def xmax_prior(energy, xmax, width=100):
x_max_exp = guess_shower_depth(energy)
diff = xmax - x_max_exp
return -2 * np.log(norm.pdf(diff / width))
class ImPACTReconstructor(Reconstructor):
"""This class is an implementation if the impact_reco Monte Carlo
Template based image fitting method from parsons14. This method uses a
comparision of the predicted image from a library of image
templates to perform a maximum likelihood fit for the shower axis,
energy and height of maximum.
Because this application is computationally intensive the usual
advice to use astropy units for all quantities is ignored (as
these slow down some computations), instead units within the class
are fixed:
- Angular units in radians
- Distance units in metres
- Energy units in TeV
References
----------
.. [parsons14] <NAME>, Astroparticle Physics 56 (2014), pp. 26-34
"""
# For likelihood calculation we need the with of the
# pedestal distribution for each pixel
# currently this is not availible from the calibration,
# so for now lets hard code it in a dict
ped_table = {
"LSTCam": 2.8,
"NectarCam": 2.3,
"FlashCam": 2.3,
"CHEC": 0.5,
"DUMMY": 0,
}
spe = 0.5 # Also hard code single p.e. distribution width
def __init__(
self,
root_dir=".",
minimiser="minuit",
prior="",
template_scale=1.0,
xmax_offset=0,
use_time_gradient=False,
):
"""
Create a new instance of ImPACTReconstructor
"""
# First we create a dictionary of image template interpolators
# for each telescope type
self.root_dir = root_dir
self.priors = prior
self.minimiser_name = minimiser
self.file_names = {
"CHEC": ["GCT_05deg_ada.template.gz", "GCT_05deg_time.template.gz"],
"LSTCam": ["LST_05deg.template.gz", "LST_05deg_time.template.gz"],
"NectarCam": ["MST_05deg.template.gz", "MST_05deg_time.template.gz"],
"FlashCam": ["MST_xm_full.fits"],
}
# We also need a conversion function from height above ground to
# depth of maximum To do this we need the conversion table from CORSIKA
(
self.thickness_profile,
self.altitude_profile,
) = get_atmosphere_profile_functions("paranal", with_units=False)
# Next we need the position, area and amplitude from each pixel in the event
# making this a class member makes passing them around much easier
self.pixel_x, self.pixel_y = None, None
self.image, self.time = None, None
self.tel_types, self.tel_id = None, None
# We also need telescope positions
self.tel_pos_x, self.tel_pos_y = None, None
# And the peak of the images
self.peak_x, self.peak_y, self.peak_amp = None, None, None
self.hillas_parameters, self.ped = None, None
self.prediction = dict()
self.time_prediction = dict()
self.array_direction = None
self.array_return = False
self.nominal_frame = None
# For now these factors are required to fix problems in templates
self.template_scale = template_scale
self.xmax_offset = xmax_offset
self.use_time_gradient = use_time_gradient
def initialise_templates(self, tel_type):
"""Check if templates for a given telescope type has been initialised
and if not do it and add to the dictionary
Parameters
----------
tel_type: dictionary
Dictionary of telescope types in event
Returns
-------
boolean: Confirm initialisation
"""
for t in tel_type:
if tel_type[t] in self.prediction.keys() or tel_type[t] == "DUMMY":
continue
self.prediction[tel_type[t]] = TemplateNetworkInterpolator(
self.root_dir + "/" + self.file_names[tel_type[t]][0]
)
if self.use_time_gradient:
self.time_prediction[tel_type[t]] = TimeGradientInterpolator(
self.root_dir + "/" + self.file_names[tel_type[t]][1]
)
return True
def get_hillas_mean(self):
"""This is a simple function to find the peak position of each image
in an event which will be used later in the Xmax calculation. Peak is
found by taking the average position of the n hottest pixels in the
image.
"""
peak_x = np.zeros([len(self.pixel_x)]) # Create blank arrays for peaks
# rather than a dict (faster)
peak_y = np.zeros(peak_x.shape)
peak_amp = np.zeros(peak_x.shape)
# Loop over all tels to take weighted average of pixel
# positions This loop could maybe be replaced by an array
# operation by a numpy wizard
# Maybe a vectorize?
tel_num = 0
for hillas in self.hillas_parameters:
peak_x[tel_num] = hillas.x.to(u.rad).value # Fill up array
peak_y[tel_num] = hillas.y.to(u.rad).value
peak_amp[tel_num] = hillas.intensity
tel_num += 1
self.peak_x = peak_x # * unit # Add to class member
self.peak_y = peak_y # * unit
self.peak_amp = peak_amp
# This function would be useful elsewhere so probably be implemented in a
# more general form
def get_shower_max(self, source_x, source_y, core_x, core_y, zen):
"""Function to calculate the depth of shower maximum geometrically
under the assumption that the shower maximum lies at the
brightest point of the camera image.
Parameters
----------
source_x: float
Event source position in nominal frame
source_y: float
Event source position in nominal frame
core_x: float
Event core position in telescope tilted frame
core_y: float
Event core position in telescope tilted frame
zen: float
Zenith angle of event
Returns
-------
float: Depth of maximum of air shower
"""
# Calculate displacement of image centroid from source position (in
# rad)
disp = np.sqrt((self.peak_x - source_x) ** 2 + (self.peak_y - source_y) ** 2)
# Calculate impact parameter of the shower
impact = np.sqrt(
(self.tel_pos_x - core_x) ** 2 + (self.tel_pos_y - core_y) ** 2
)
# Distance above telescope is ratio of these two (small angle)
height = impact / disp
weight = np.power(self.peak_amp, 0.0) # weight average by sqrt amplitude
# sqrt may not be the best option...
# Take weighted mean of estimates
mean_height = np.sum(height * weight) / np.sum(weight)
# This value is height above telescope in the tilted system,
# we should convert to height above ground
mean_height *= np.cos(zen)
# Add on the height of the detector above sea level
mean_height += 2150
if mean_height > 100000 or np.isnan(mean_height):
mean_height = 100000
# Lookup this height in the depth tables, the convert Hmax to Xmax
x_max = self.thickness_profile(mean_height)
# Convert to slant depth
x_max /= np.cos(zen)
return x_max + self.xmax_offset
@staticmethod
def rotate_translate(pixel_pos_x, pixel_pos_y, x_trans, y_trans, phi):
"""
Function to perform rotation and translation of pixel lists
Parameters
----------
pixel_pos_x: ndarray
Array of pixel x positions
pixel_pos_y: ndarray
Array of pixel x positions
x_trans: float
Translation of position in x coordinates
y_trans: float
Translation of position in y coordinates
phi: float
Rotation angle of pixels
Returns
-------
ndarray,ndarray: Transformed pixel x and y coordinates
"""
cosine_angle = np.cos(phi[..., np.newaxis])
sin_angle = np.sin(phi[..., np.newaxis])
pixel_pos_trans_x = (x_trans - pixel_pos_x) * cosine_angle - (
y_trans - pixel_pos_y
) * sin_angle
pixel_pos_trans_y = (pixel_pos_x - x_trans) * sin_angle + (
pixel_pos_y - y_trans
) * cosine_angle
return pixel_pos_trans_x, pixel_pos_trans_y
def image_prediction(self, tel_type, energy, impact, x_max, pix_x, pix_y):
"""Creates predicted image for the specified pixels, interpolated
from the template library.
Parameters
----------
tel_type: string
Telescope type specifier
energy: float
Event energy (TeV)
impact: float
Impact diance of shower (metres)
x_max: float
Depth of shower maximum (num bins from expectation)
pix_x: ndarray
X coordinate of pixels
pix_y: ndarray
Y coordinate of pixels
Returns
-------
ndarray: predicted amplitude for all pixels
"""
return self.prediction[tel_type](energy, impact, x_max, pix_x, pix_y)
def predict_time(self, tel_type, energy, impact, x_max):
"""Creates predicted image for the specified pixels, interpolated
from the template library.
Parameters
----------
tel_type: string
Telescope type specifier
energy: float
Event energy (TeV)
impact: float
Impact diance of shower (metres)
x_max: float
Depth of shower maximum (num bins from expectation)
Returns
-------
ndarray: predicted amplitude for all pixels
"""
return self.time_prediction[tel_type](energy, impact, x_max)
def get_likelihood(
self,
source_x,
source_y,
core_x,
core_y,
energy,
x_max_scale,
goodness_of_fit=False,
):
"""Get the likelihood that the image predicted at the given test
position matches the camera image.
Parameters
----------
source_x: float
Source position of shower in the nominal system (in deg)
source_y: float
Source position of shower in the nominal system (in deg)
core_x: float
Core position of shower in tilted telescope system (in m)
core_y: float
Core position of shower in tilted telescope system (in m)
energy: float
Shower energy (in TeV)
x_max_scale: float
Scaling factor applied to geometrically calculated Xmax
goodness_of_fit: boolean
Determines whether expected likelihood should be subtracted from result
Returns
-------
float: Likelihood the model represents the camera image at this position
"""
# First we add units back onto everything. Currently not
# handled very well, maybe in future we could just put
# everything in the correct units when loading in the class
# and ignore them from then on
zenith = (np.pi / 2) - self.array_direction.alt.to(u.rad).value
# Geometrically calculate the depth of maximum given this test position
x_max = self.get_shower_max(source_x, source_y, core_x, core_y, zenith)
x_max *= x_max_scale
# Calculate expected Xmax given this energy
x_max_exp = guess_shower_depth(energy) # / np.cos(20*u.deg)
# Convert to binning of Xmax
x_max_bin = x_max - x_max_exp
# Check for range
if x_max_bin > 200:
x_max_bin = 200
if x_max_bin < -100:
x_max_bin = -100
# Calculate impact distance for all telescopes
impact = np.sqrt(
(self.tel_pos_x - core_x) ** 2 + (self.tel_pos_y - core_y) ** 2
)
# And the expected rotation angle
phi = np.arctan2((self.tel_pos_x - core_x), (self.tel_pos_y - core_y)) * u.rad
# Rotate and translate all pixels such that they match the
# template orientation
pix_y_rot, pix_x_rot = self.rotate_translate(
self.pixel_x, self.pixel_y, source_x, source_y, phi
)
# In the interpolator class we can gain speed advantages by using masked arrays
# so we need to make sure here everything is masked
prediction = ma.zeros(self.image.shape)
prediction.mask = ma.getmask(self.image)
time_gradients = np.zeros((self.image.shape[0], 2))
# Loop over all telescope types and get prediction
for tel_type in | np.unique(self.tel_types) | numpy.unique |
import argparse
from statistics import median_high, median_low
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from qpputils import dataparser as dt
# Define the Font for the plots
# plt.rcParams.update({'font.size': 35, 'font.family': 'serif', 'font.weight': 'normal'})
# Define the Font for the plots
plt.rcParams.update({'font.size': 40, 'font.family': 'Hind Guntur', 'font.weight': 'normal'})
"""The next three lines are used to force matplotlib to use font-Type-1 """
# plt.rcParams['ps.useafm'] = True
# plt.rcParams['pdf.use14corefonts'] = True
# plt.rcParams['text.usetex'] = True
# TODO: add logging and qrels file generation for UQV
QUERY_GROUPS = {'top': 'MaxAP', 'low': 'MinAP', 'medh': 'MedHiAP', 'medl': 'MedLoAP'}
QUANTILES = {'med': 'Med', 'top': 'Top', 'low': 'Low'}
parser = argparse.ArgumentParser(description='Script for query files pre-processing',
epilog='Use this script with Caution')
parser.add_argument('-t', '--queries', default=None, metavar='queries.txt', help='path to UQV queries txt file')
parser.add_argument('--remove', default=None, metavar='queries.txt',
help='path to queries txt file that will be removed from the final file NON UQV ONLY')
parser.add_argument('--group', default='title', choices=['low', 'top', 'medh', 'medl', 'cref'],
help='Return only the <> performing queries of each topic')
parser.add_argument('--quant', default=None, choices=['low', 'high'],
help='Return a quantile of the variants for each topic')
parser.add_argument('--ap', default=None, metavar='QLmap1000', help='path to queries AP results file')
parser.add_argument('--stats', action='store_true', help='Print statistics')
parser.add_argument('--plot_vars', action='store_true', help='Print vars AP graph')
def create_overlap_ref_queries(*queries):
df = dt.QueriesTextParser(queries[0], 'uqv').queries_df
for query_file in queries[1:]:
_df = dt.QueriesTextParser(query_file, 'uqv').queries_df
df = df.merge(_df, how='inner')
print(df)
return df
def add_original_queries(uqv_obj: dt.QueriesTextParser):
"""Don't use this function ! not tested"""
original_obj = dt.QueriesTextParser('QppUqvProj/data/ROBUST/queries.txt')
uqv_df = uqv_obj.queries_df.set_index('qid')
original_df = original_obj.queries_df.set_index('qid')
for topic, vars in uqv_obj.query_vars.items():
uqv_df.loc[vars, 'topic'] = topic
missing_list = []
for topic, topic_df in uqv_df.groupby('topic'):
if original_df.loc[original_df['text'].isin(topic_df['text'])].empty:
missing_list.append(topic)
missing_df = pd.DataFrame({'qid': '341-9-1', 'text': original_obj.queries_dict['341'], 'topic': '341'}, index=[0])
uqv_df = uqv_df.append(missing_df.set_index('qid'))
return uqv_df.sort_index().drop(columns='topic').reset_index()
def convert_vid_to_qid(df: pd.DataFrame):
_df = df.set_index('qid')
_df.rename(index=lambda x: f'{x.split("-")[0]}', inplace=True)
return _df.reset_index()
def filter_quant_variants(qdf: pd.DataFrame, apdb: dt.ResultsReader, q):
"""This function returns a df with QID: TEXT of the queries inside a quantile"""
_apdf = apdb.data_df
_list = []
for topic, q_vars in apdb.query_vars.items():
_df = _apdf.loc[q_vars]
# if 0 in q:
# # For the low quantile, 0 AP variants are removed
# _df = _df[_df['ap'] > 0]
q_vals = _df.quantile(q=q)
_qvars = _df.loc[(_df['ap'] > q_vals['ap'].min()) & (_df['ap'] <= q_vals['ap'].max())]
_list.extend(_qvars.index.tolist())
_res_df = qdf.loc[qdf['qid'].isin(_list)]
return _res_df
def filter_top_queries(qdf: pd.DataFrame, apdb: dt.ResultsReader):
_apdf = apdb.data_df
_list = []
for topic, q_vars in apdb.query_vars.items():
top_var = _apdf.loc[q_vars].idxmax()
_list.append(top_var[0])
_df = qdf.loc[qdf['qid'].isin(_list)]
return _df
def add_topic_to_qdf_from_apdb(qdf, apdb):
"""This functions will add a topic column to the queries DF using apdb"""
if 'topic' not in qdf.columns:
for topic, q_vars in apdb.query_vars.items():
qdf.loc[qdf['qid'].isin(q_vars), 'topic'] = topic
def add_topic_to_qdf(qdf: pd.DataFrame):
"""This functions will add a topic column to the queries DF"""
if 'topic' not in qdf.columns:
if 'qid' in qdf.columns:
qdf = qdf.assign(topic=lambda x: x.qid.apply(lambda y: y.split('-')[0]))
else:
qdf = qdf.reset_index().assign(topic=lambda x: x.qid.apply(lambda y: y.split('-')[0]))
return qdf
def filter_n_top_queries(qdf: pd.DataFrame, apdb: dt.ResultsReader, n):
"""This function returns a DF with top n queries per topic"""
add_topic_to_qdf_from_apdb(qdf, apdb)
_ap_vars_df = pd.merge(qdf, apdb.data_df, left_on='qid', right_index=True)
_df = _ap_vars_df.sort_values('ap', ascending=False).groupby('topic').head(n)
return _df.sort_values('qid')
def filter_n_low_queries(qdf: pd.DataFrame, apdb: dt.ResultsReader, n):
"""This function returns a DF with n lowest queries per topic"""
add_topic_to_qdf_from_apdb(qdf, apdb)
_ap_vars_df = pd.merge(qdf, apdb.data_df, left_on='qid', right_index=True)
_df = _ap_vars_df.sort_values('ap', ascending=True).groupby('topic').head(n)
return _df.sort_values('qid')
def filter_low_queries(qdf: pd.DataFrame, apdb: dt.ResultsReader):
_apdf = apdb.data_df
_list = []
for topic, q_vars in apdb.query_vars.items():
_df = _apdf.loc[q_vars]
# remove 0 ap variants
_df = _df[_df['ap'] > 0]
low_var = _df.idxmin()
_list.append(low_var[0])
_df = qdf.loc[qdf['qid'].isin(_list)]
return _df
def filter_medh_queries(qdf: pd.DataFrame, apdb: dt.ResultsReader):
_apdf = apdb.data_df
_list = []
for topic, q_vars in apdb.query_vars.items():
_df = _apdf.loc[q_vars]
_med = median_high(_df['ap'])
med_var = _df.loc[_df['ap'] == _med]
_list.append(med_var.index[0])
_df = qdf.loc[qdf['qid'].isin(_list)]
return _df
def filter_medl_queries(qdf: pd.DataFrame, apdb: dt.ResultsReader):
_apdf = apdb.data_df
_list = []
for topic, q_vars in apdb.query_vars.items():
_df = _apdf.loc[q_vars]
_med = median_low(_df['ap'])
med_var = _df.loc[_df['ap'] == _med]
_list.append(med_var.index[0])
_df = qdf.loc[qdf['qid'].isin(_list)]
return _df
def remove_duplicates(qdb: dt.QueriesTextParser):
_list = []
for topic, q_vars in qdb.query_vars.items():
_list.append(qdb.queries_df.loc[qdb.queries_df['qid'].isin(q_vars)].drop_duplicates('text'))
return pd.concat(_list)
def alternate_remove_duplicates(qdb: dt.QueriesTextParser):
"""Different commands, same result"""
_dup_list = []
for topic, q_vars in qdb.query_vars.items():
_dup_list.extend(qdb.queries_df.loc[qdb.queries_df['qid'].isin(q_vars)].duplicated('text'))
return qdb.queries_df[~qdb.queries_df['qid'].isin(qdb.queries_df.loc[_dup_list]['qid'])]
def remove_q1_from_q2(rm_df: pd.DataFrame, qdb: dt.QueriesTextParser):
"""This function will remove from queries_df in qdb the queries that exist in rm_df """
_dup_list = []
full_df = qdb.queries_df.set_index('qid')
queries_to_remove = convert_vid_to_qid(rm_df).set_index('qid').to_dict(orient='index')
for topic, q_vars in qdb.query_vars.items():
# _dup_list.extend(full_df.loc[full_df['text'] == query_text]['qid'])
topic_df = full_df.loc[q_vars]
_dup_list.extend(topic_df.loc[topic_df['text'] == queries_to_remove[topic]['text']].index.tolist())
return full_df.drop(index=_dup_list).reset_index()
def write_queries_to_files(q_df: pd.DataFrame, corpus, queries_group='title', quantile=None, remove=None):
if quantile:
file_name = f'queries_{corpus}_UQV_{quantile}_variants'
elif remove:
title = input('What queries were removed? \n')
file_name = f'queries_{corpus}_UQV_wo_{title}'
else:
file_name = f'queries_{corpus}_{queries_group}'
q_df.to_csv(f'{file_name}.txt', sep=":", header=False, index=False)
query_xml = dt.QueriesXMLWriter(q_df)
query_xml.print_queries_xml_file(f'{file_name}.xml')
def add_format(s):
s = '${:.4f}$'.format(s)
return s
def plot_robust_histograms(quant_variants_dict):
for quant, vars_df in quant_variants_dict.items():
if quant == 'all':
bins = | np.arange(4, 60) | numpy.arange |
""" Diamond solver for logistic regression """
import logging
import time
import numpy as np
from diamond.solvers.utils import dot
from diamond.solvers.utils import l2_logistic_fixed_hessian
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
class FixedHessianSolverMulti(object):
""" This class wraps the fit method"""
def __init__(self):
pass
@staticmethod
def fit(Y, designs, H_invs, sparse_inv_covs, **kwargs):
""" Fit the model. Outer iterations loop over main effects and each grouping factor
Args:
Y : array_like. Vector of binary responses in {0, 1}
designs : dict. Design matrices for main effects and grouping factors
H_invs: dict. dictionary of inverse Hessian matrix for each grouping factor
sparse_inv_covs : dict. dictionary of sparse regularization matrices,\
one for each grouping factor
Keyword Args:
min_its : int. Minimum number of outer iterations
max_its : int. Maximum number of outer iterations
tol : float. If parameters change by less than `tol`, convergence has been achieved.
inner_tol : float. Tolerance for inner loops
initial_offset : array_like. Offset vector. Defaults to 0
fit_order : list. Order in which to fit main and random effects
permute_fit_order : boolean. Change the fit order at each iteration
verbose : boolean. Display updates at every iteration
Returns:
dict: estimated intercepts, main effects, and random effects
"""
min_its = kwargs.get('min_its', 20)
max_its = kwargs.get('max_its', 5000)
tol = kwargs.get('tol', 1E-5)
inner_tol = kwargs.get('inner_tol', 1E-2)
fit_order = kwargs.get('fit_order', None)
permute_fit_order = kwargs.get('permute_fit_order', False)
initial_offset = kwargs.get('offset', 0.)
verbose = kwargs.get('verbose', False)
if not verbose:
LOGGER.setLevel(logging.WARNING)
# Cycle through fitting different groupings
start_time = time.time()
effects = {k: np.zeros(designs[k].shape[1]) for k in designs.keys()}
old_effects = {k: np.zeros(designs[k].shape[1]) for k in designs.keys()}
if fit_order is None:
fit_order = designs.keys()
for i in range(max_its):
# periodically recompute the offset
if i % 10 == 0:
offset = initial_offset
for g in designs.keys():
offset += dot(designs[g], effects[g])
change = 0.0
for grouping in fit_order:
if i > 0:
g_change = np.linalg.norm(effects[grouping] - old_effects[grouping]) / \
np.linalg.norm(effects[grouping])
else:
g_change = np.inf
# if g_change < tol and i >= min_its:
# # no need to continue converging this group
# # cutoff
# continue
old_effects[grouping] = 1.0 * effects[grouping]
offset += -dot(designs[grouping], effects[grouping])
# fit group effects
effects[grouping] = 1.0 * l2_logistic_fixed_hessian(designs[grouping],
Y,
H_invs[grouping],
sparse_inv_covs[grouping],
offset=offset,
beta=effects[grouping],
tol=inner_tol)
offset += dot(designs[grouping], effects[grouping])
change = max(change,
np.linalg.norm(effects[grouping] - old_effects[grouping]) /
np.linalg.norm(effects[grouping]))
xbeta = 0
penalty = 0
for g in designs.keys():
xbeta += dot(designs[g], effects[g])
if g != 'main':
penalty += dot(effects[g], dot(sparse_inv_covs[g], effects[g]))
loss = -1 * np.sum(dot(Y, xbeta) - np.log(1 + | np.exp(-xbeta) | numpy.exp |
"""Non-negative matrix and tensor factorization basic functions
"""
# Author: <NAME>
# License: MIT
# Jan 4, '20
# Initialize progressbar
import pandas as pd
import math
import numpy as np
from scipy.sparse.linalg import svds
from tqdm import tqdm
from scipy.stats import hypergeom
from scipy.optimize import nnls
from .nmtf_core import *
from .nmtf_utils import *
import sys
if not hasattr(sys, 'argv'):
sys.argv = ['']
EPSILON = np.finfo(np.float32).eps
def NMFInit(M, Mmis, Mt0, Mw0, nc, tolerance, LogIter, myStatusBox):
"""Initialize NMF components using NNSVD
Input:
M: Input matrix
Mmis: Define missing values (0 = missing cell, 1 = real cell)
Mt0: Initial left hand matrix (may be empty)
Mw0: Initial right hand matrix (may be empty)
nc: NMF rank
Output:
Mt: Left hand matrix
Mw: Right hand matrix
Reference
---------
<NAME>, <NAME> (2008) SVD based initialization: A head start for nonnegative matrix factorization
Pattern Recognition Pattern Recognition Volume 41, Issue 4, April 2008, Pages 1350-1362
"""
n, p = M.shape
Mmis = Mmis.astype(np.int)
n_Mmis = Mmis.shape[0]
if n_Mmis == 0:
ID = np.where(np.isnan(M) == True)
n_Mmis = ID[0].size
if n_Mmis > 0:
Mmis = (np.isnan(M) == False)
Mmis = Mmis.astype(np.int)
M[Mmis == 0] = 0
nc = int(nc)
Mt = np.copy(Mt0)
Mw = np.copy(Mw0)
if (Mt.shape[0] == 0) or (Mw.shape[0] == 0):
if n_Mmis == 0:
if nc >= min(n,p):
# arpack does not accept to factorize at full rank -> need to duplicate in both dimensions to force it work
t, d, w = svds(np.concatenate((np.concatenate((M, M), axis=1),np.concatenate((M, M), axis=1)), axis=0), k=nc)
t *= np.sqrt(2)
w *= np.sqrt(2)
d /= 2
# svd causes mem allocation problem with large matrices
# t, d, w = np.linalg.svd(M)
# Mt = t
# Mw = w.T
else:
t, d, w = svds(M, k=nc)
Mt = t[:n,:]
Mw = w[:,:p].T
#svds returns singular vectors in reverse order
Mt = Mt[:,::-1]
Mw = Mw[:,::-1]
d = d[::-1]
else:
Mt, d, Mw, Mmis, Mmsr, Mmsr2, AddMessage, ErrMessage, cancel_pressed = rSVDSolve(
M, Mmis, nc, tolerance, LogIter, 0, "", 200,
1, 1, 1, myStatusBox)
for k in range(0, nc):
U1 = Mt[:, k]
U2 = -Mt[:, k]
U1[U1 < 0] = 0
U2[U2 < 0] = 0
V1 = Mw[:, k]
V2 = -Mw[:, k]
V1[V1 < 0] = 0
V2[V2 < 0] = 0
U1 = np.reshape(U1, (n, 1))
V1 = np.reshape(V1, (1, p))
U2 = np.reshape(U2, (n, 1))
V2 = np.reshape(V2, (1, p))
if np.linalg.norm(U1 @ V1) > np.linalg.norm(U2 @ V2):
Mt[:, k] = np.reshape(U1, n)
Mw[:, k] = np.reshape(V1, p)
else:
Mt[:, k] = np.reshape(U2, n)
Mw[:, k] = np.reshape(V2, p)
return [Mt, Mw]
def rNMFSolve(
M, Mmis, Mt0, Mw0, nc, tolerance, precision, LogIter, MaxIterations, NMFAlgo, NMFFixUserLHE,
NMFFixUserRHE, NMFMaxInterm,
NMFSparseLevel, NMFRobustResampleColumns, NMFRobustNRuns, NMFCalculateLeverage, NMFUseRobustLeverage,
NMFFindParts, NMFFindCentroids, NMFKernel, NMFReweighColumns, NMFPriors, myStatusBox):
"""Estimate left and right hand matrices (robust version)
Input:
M: Input matrix
Mmis: Define missing values (0 = missing cell, 1 = real cell)
Mt0: Initial left hand matrix
Mw0: Initial right hand matrix
nc: NMF rank
tolerance: Convergence threshold
precision: Replace 0-values in multiplication rules
LogIter: Log results through iterations
MaxIterations: Max iterations
NMFAlgo: =1,3: Divergence; =2,4: Least squares;
NMFFixUserLHE: = 1 => fixed left hand matrix columns
NMFFixUserRHE: = 1 => fixed right hand matrix columns
NMFMaxInterm: Max iterations for warmup multiplication rules
NMFSparseLevel: Requested sparsity in terms of relative number of rows with 0 values in right hand matrix
NMFRobustResampleColumns: Resample columns during bootstrap
NMFRobustNRuns: Number of bootstrap runs
NMFCalculateLeverage: Calculate leverages
NMFUseRobustLeverage: Calculate leverages based on robust max across factoring columns
NMFFindParts: Enforce convexity on left hand matrix
NMFFindCentroids: Enforce convexity on right hand matrix
NMFKernel: Type of kernel used; 1: linear; 2: quadraitc; 3: radial
NMFReweighColumns: Reweigh columns in 2nd step of parts-based NMF
NMFPriors: Priors on right hand matrix
Output:
Mt: Left hand matrix
Mw: Right hand matrix
MtPct: Percent robust clustered rows
MwPct: Percent robust clustered columns
diff: Objective minimum achieved
Mh: Convexity matrix
flagNonconvex: Updated non-convexity flag on left hand matrix
"""
# Check parameter consistency (and correct if needed)
AddMessage = []
ErrMessage =''
cancel_pressed = 0
nc = int(nc)
if NMFFixUserLHE*NMFFixUserRHE == 1:
return Mt0, Mw0, np.array([]), np.array([]), 0, np.array([]), 0, AddMessage, ErrMessage, cancel_pressed
if (nc == 1) & (NMFAlgo > 2):
NMFAlgo -= 2
if NMFAlgo <= 2:
NMFRobustNRuns = 0
Mmis = Mmis.astype(np.int)
n_Mmis = Mmis.shape[0]
if n_Mmis == 0:
ID = np.where(np.isnan(M) == True)
n_Mmis = ID[0].size
if n_Mmis > 0:
Mmis = (np.isnan(M) == False)
Mmis = Mmis.astype(np.int)
M[Mmis == 0] = 0
else:
M[Mmis == 0] = 0
if NMFRobustResampleColumns > 0:
M = np.copy(M).T
if n_Mmis > 0:
Mmis = np.copy(Mmis).T
Mtemp = np.copy(Mw0)
Mw0 = np.copy(Mt0)
Mt0 = Mtemp
NMFFixUserLHEtemp = NMFFixUserLHE
NMFFixUserLHE = NMFFixUserRHE
NMFFixUserRHE = NMFFixUserLHEtemp
n, p = M.shape
try:
n_NMFPriors, nc = NMFPriors.shape
except:
n_NMFPriors = 0
NMFRobustNRuns = int(NMFRobustNRuns)
MtPct = np.nan
MwPct = np.nan
flagNonconvex = 0
# Step 1: NMF
Status = "Step 1 - NMF Ncomp=" + str(nc) + ": "
Mt, Mw, diffsup, Mhsup, NMFPriors, flagNonconvex, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
M, Mmis, Mt0, Mw0, nc, tolerance, precision, LogIter, Status, MaxIterations, NMFAlgo,
NMFFixUserLHE, NMFFixUserRHE, NMFMaxInterm, 100, NMFSparseLevel,
NMFFindParts, NMFFindCentroids, NMFKernel, NMFReweighColumns, NMFPriors, flagNonconvex, AddMessage, myStatusBox)
Mtsup = np.copy(Mt)
Mwsup = np.copy(Mw)
if (n_NMFPriors > 0) & (NMFReweighColumns > 0):
# Run again with fixed LHE & no priors
Status = "Step 1bis - NMF (fixed LHE) Ncomp=" + str(nc) + ": "
Mw = np.ones((p, nc)) / math.sqrt(p)
Mt, Mw, diffsup, Mh, NMFPriors, flagNonconvex, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
M, Mmis, Mtsup, Mw, nc, tolerance, precision, LogIter, Status, MaxIterations, NMFAlgo, nc, 0, NMFMaxInterm, 100,
NMFSparseLevel, NMFFindParts, NMFFindCentroids, NMFKernel, 0, NMFPriors, flagNonconvex, AddMessage,
myStatusBox)
Mtsup = np.copy(Mt)
Mwsup = np.copy(Mw)
# Bootstrap to assess robust clustering
if NMFRobustNRuns > 1:
# Update Mwsup
MwPct = np.zeros((p, nc))
MwBlk = np.zeros((p, NMFRobustNRuns * nc))
for iBootstrap in range(0, NMFRobustNRuns):
Boot = np.random.randint(n, size=n)
Status = "Step 2 - " + \
"Boot " + str(iBootstrap + 1) + "/" + str(NMFRobustNRuns) + " NMF Ncomp=" + str(nc) + ": "
if n_Mmis > 0:
Mt, Mw, diff, Mh, NMFPriors, flagNonconvex, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
M[Boot, :], Mmis[Boot, :], Mtsup[Boot, :], Mwsup, nc, 1.e-3, precision, LogIter, Status, MaxIterations, NMFAlgo, nc, 0,
NMFMaxInterm, 20, NMFSparseLevel, NMFFindParts, NMFFindCentroids, NMFKernel, NMFReweighColumns,
NMFPriors, flagNonconvex, AddMessage, myStatusBox)
else:
Mt, Mw, diff, Mh, NMFPriors, flagNonconvex, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
M[Boot, :], Mmis, Mtsup[Boot, :], Mwsup, nc, 1.e-3, precision, LogIter, Status, MaxIterations, NMFAlgo, nc, 0,
NMFMaxInterm, 20, NMFSparseLevel, NMFFindParts, NMFFindCentroids, NMFKernel, NMFReweighColumns,
NMFPriors, flagNonconvex, AddMessage, myStatusBox)
for k in range(0, nc):
MwBlk[:, k * NMFRobustNRuns + iBootstrap] = Mw[:, k]
Mwn = np.zeros((p, nc))
for k in range(0, nc):
if (NMFAlgo == 2) | (NMFAlgo == 4):
ScaleMw = np.linalg.norm(MwBlk[:, k * NMFRobustNRuns + iBootstrap])
else:
ScaleMw = np.sum(MwBlk[:, k * NMFRobustNRuns + iBootstrap])
if ScaleMw > 0:
MwBlk[:, k * NMFRobustNRuns + iBootstrap] = \
MwBlk[:, k * NMFRobustNRuns + iBootstrap] / ScaleMw
Mwn[:, k] = MwBlk[:, k * NMFRobustNRuns + iBootstrap]
ColClust = np.zeros(p, dtype=int)
if NMFCalculateLeverage > 0:
Mwn, AddMessage, ErrMessage, cancel_pressed = Leverage(Mwn, NMFUseRobustLeverage, AddMessage,
myStatusBox)
for j in range(0, p):
ColClust[j] = np.argmax(np.array(Mwn[j, :]))
MwPct[j, ColClust[j]] = MwPct[j, ColClust[j]] + 1
MwPct = MwPct / NMFRobustNRuns
# Update Mtsup
MtPct = np.zeros((n, nc))
for iBootstrap in range(0, NMFRobustNRuns):
Status = "Step 3 - " + \
"Boot " + str(iBootstrap + 1) + "/" + str(NMFRobustNRuns) + " NMF Ncomp=" + str(nc) + ": "
Mw = np.zeros((p, nc))
for k in range(0, nc):
Mw[:, k] = MwBlk[:, k * NMFRobustNRuns + iBootstrap]
Mt, Mw, diff, Mh, NMFPriors, flagNonconvex, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
M, Mmis, Mtsup, Mw, nc, 1.e-3, precision, LogIter, Status, MaxIterations, NMFAlgo, 0, nc, NMFMaxInterm, 20,
NMFSparseLevel, NMFFindParts, NMFFindCentroids, NMFKernel, NMFReweighColumns, NMFPriors, flagNonconvex,
AddMessage, myStatusBox)
RowClust = np.zeros(n, dtype=int)
if NMFCalculateLeverage > 0:
Mtn, AddMessage, ErrMessage, cancel_pressed = Leverage(Mt, NMFUseRobustLeverage, AddMessage,
myStatusBox)
else:
Mtn = Mt
for i in range(0, n):
RowClust[i] = np.argmax(Mtn[i, :])
MtPct[i, RowClust[i]] = MtPct[i, RowClust[i]] + 1
MtPct = MtPct / NMFRobustNRuns
Mt = Mtsup
Mw = Mwsup
Mh = Mhsup
diff = diffsup
if NMFRobustResampleColumns > 0:
Mtemp = np.copy(Mt)
Mt = np.copy(Mw)
Mw = Mtemp
Mtemp = np.copy(MtPct)
MtPct = np.copy(MwPct)
MwPct = Mtemp
return Mt, Mw, MtPct, MwPct, diff, Mh, flagNonconvex, AddMessage, ErrMessage, cancel_pressed
def NTFInit(M, Mmis, Mt_nmf, Mw_nmf, nc, tolerance, precision, LogIter, NTFUnimodal,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, init_type, myStatusBox):
"""Initialize NTF components for HALS
Input:
M: Input tensor
Mmis: Define missing values (0 = missing cell, 1 = real cell)
Mt_nmf: initialization of LHM in NMF(unstacked tensor), may be empty
Mw_nmf: initialization of RHM of NMF(unstacked tensor), may be empty
nc: NTF rank
tolerance: Convergence threshold
precision: Replace 0-values in multiplication rules
LogIter: Log results through iterations
NTFUnimodal: Apply Unimodal constraint on factoring vectors
NTFLeftComponents: Apply Unimodal/Smooth constraint on left hand matrix
NTFRightComponents: Apply Unimodal/Smooth constraint on right hand matrix
NTFBlockComponents: Apply Unimodal/Smooth constraint on block hand matrix
NBlocks: Number of NTF blocks
init_type : integer, default 0
init_type = 0 : NMF initialization applied on the reshaped matrix [1st dim x vectorized (2nd & 3rd dim)]
init_type = 1 : NMF initialization applied on the reshaped matrix [vectorized (1st & 2nd dim) x 3rd dim]
Output:
Mt: Left hand matrix
Mw: Right hand matrix
Mb: Block hand matrix
"""
AddMessage = []
n, p = M.shape
Mmis = Mmis.astype(np.int)
n_Mmis = Mmis.shape[0]
if n_Mmis == 0:
ID = np.where(np.isnan(M) == True)
n_Mmis = ID[0].size
if n_Mmis > 0:
Mmis = (np.isnan(M) == False)
Mmis = Mmis.astype(np.int)
M[Mmis == 0] = 0
nc = int(nc)
NBlocks = int(NBlocks)
init_type = int(init_type)
Status0 = "Step 1 - Quick NMF Ncomp=" + str(nc) + ": "
if init_type == 1:
#Init legacy
Mstacked, Mmis_stacked = NTFStack(M, Mmis, NBlocks)
nc2 = min(nc, NBlocks) # factorization rank can't be > number of blocks
if (Mt_nmf.shape[0] == 0) or (Mw_nmf.shape[0] == 0):
Mt_nmf, Mw_nmf = NMFInit(Mstacked, Mmis_stacked, np.array([]), np.array([]), nc2, tolerance, LogIter, myStatusBox)
else:
Mt_nmf, Mw_nmf = NMFInit(Mstacked, Mmis_stacked, Mt_nmf, Mw_nmf, nc2, tolerance, LogIter, myStatusBox)
# Quick NMF
Mt_nmf, Mw_nmf, diff, Mh, dummy1, dummy2, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
Mstacked, Mmis_stacked, Mt_nmf, Mw_nmf, nc2, tolerance, precision, LogIter, Status0,
10, 2, 0, 0, 1, 1, 0, 0, 0, 1, 0, np.array([]), 0, AddMessage, myStatusBox)
# Factorize Left vectors and distribute multiple factors if nc2 < nc
Mt = np.zeros((n, nc))
Mw = np.zeros((int(p / NBlocks), nc))
Mb = np.zeros((NBlocks, nc))
NFact = int(np.ceil(nc / NBlocks))
for k in range(0, nc2):
myStatusBox.update_status(delay=1, status="Start SVD...")
U, d, V = svds(np.reshape(Mt_nmf[:, k], (int(p / NBlocks), n)).T, k=NFact)
V = V.T
#svds returns singular vectors in reverse order
U = U[:,::-1]
V = V[:,::-1]
d = d[::-1]
myStatusBox.update_status(delay=1, status="SVD completed")
for iFact in range(0, NFact):
ind = iFact * NBlocks + k
if ind < nc:
U1 = U[:, iFact]
U2 = -U[:, iFact]
U1[U1 < 0] = 0
U2[U2 < 0] = 0
V1 = V[:, iFact]
V2 = -V[:, iFact]
V1[V1 < 0] = 0
V2[V2 < 0] = 0
U1 = np.reshape(U1, (n, 1))
V1 = np.reshape(V1, (1, int(p / NBlocks)))
U2 = np.reshape(U2, (n, 1))
V2 = np.reshape(V2, ((1, int(p / NBlocks))))
if np.linalg.norm(U1 @ V1) > np.linalg.norm(U2 @ V2):
Mt[:, ind] = np.reshape(U1, n)
Mw[:, ind] = d[iFact] * np.reshape(V1, int(p / NBlocks))
else:
Mt[:, ind] = np.reshape(U2, n)
Mw[:, ind] = d[iFact] * np.reshape(V2, int(p / NBlocks))
Mb[:, ind] = Mw_nmf[:, k]
else:
#Init default
if (Mt_nmf.shape[0] == 0) or (Mw_nmf.shape[0] == 0):
Mt_nmf, Mw_nmf = NMFInit(M, Mmis, np.array([]), np.array([]), nc, tolerance, LogIter, myStatusBox)
else:
Mt_nmf, Mw_nmf = NMFInit(M, Mmis, Mt_nmf, Mw_nmf, nc, tolerance, LogIter, myStatusBox)
# Quick NMF
Mt_nmf, Mw_nmf, diff, Mh, dummy1, dummy2, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
M, Mmis, Mt_nmf, Mw_nmf, nc, tolerance, precision, LogIter, Status0,
10, 2, 0, 0, 1, 1, 0, 0, 0, 1, 0, np.array([]), 0, AddMessage, myStatusBox)
#Factorize Left vectors
Mt = np.zeros((n, nc))
Mw = np.zeros((int(p / NBlocks), nc))
Mb = np.zeros((NBlocks, nc))
for k in range(0, nc):
myStatusBox.update_status(delay=1, status="Start SVD...")
U, d, V = svds(np.reshape(Mw_nmf[:, k], (int(p / NBlocks), NBlocks)), k=1)
V = V.T
U = np.abs(U)
V = np.abs(V)
myStatusBox.update_status(delay=1, status="SVD completed")
Mt[:, k] = Mt_nmf[:, k]
Mw[:, k] = d[0] * np.reshape(U, int(p / NBlocks))
Mb[:, k] = np.reshape(V, NBlocks)
for k in range(0, nc):
if (NTFUnimodal > 0) & (NTFLeftComponents > 0):
# Enforce unimodal distribution
tmax = np.argmax(Mt[:, k])
for i in range(tmax + 1, n):
Mt[i, k] = min(Mt[i - 1, k], Mt[i, k])
for i in range(tmax - 1, -1, -1):
Mt[i, k] = min(Mt[i + 1, k], Mt[i, k])
if (NTFUnimodal > 0) & (NTFRightComponents > 0):
# Enforce unimodal distribution
wmax = np.argmax(Mw[:, k])
for j in range(wmax + 1, int(p / NBlocks)):
Mw[j, k] = min(Mw[j - 1, k], Mw[j, k])
for j in range(wmax - 1, -1, -1):
Mw[j, k] = min(Mw[j + 1, k], Mw[j, k])
if (NTFUnimodal > 0) & (NTFBlockComponents > 0):
# Enforce unimodal distribution
bmax = np.argmax(Mb[:, k])
for iBlock in range(bmax + 1, NBlocks):
Mb[iBlock, k] = min(Mb[iBlock - 1, k], Mb[iBlock, k])
for iBlock in range(bmax - 1, -1, -1):
Mb[iBlock, k] = min(Mb[iBlock + 1, k], Mb[iBlock, k])
return [Mt, Mw, Mb, AddMessage, ErrMessage, cancel_pressed]
def rNTFSolve(M, Mmis, Mt0, Mw0, Mb0, nc, tolerance, precision, LogIter, MaxIterations, NMFFixUserLHE, NMFFixUserRHE,
NMFFixUserBHE, NMFAlgo, NMFRobustNRuns, NMFCalculateLeverage, NMFUseRobustLeverage, NTFFastHALS, NTFNIterations,
NMFSparseLevel, NTFUnimodal, NTFSmooth, NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, NTFNConv,
NMFPriors, myStatusBox):
"""Estimate NTF matrices (robust version)
Input:
M: Input matrix
Mmis: Define missing values (0 = missing cell, 1 = real cell)
Mt0: Initial left hand matrix
Mw0: Initial right hand matrix
Mb0: Initial block hand matrix
nc: NTF rank
tolerance: Convergence threshold
precision: Replace 0-values in multiplication rules
LogIter: Log results through iterations
MaxIterations: Max iterations
NMFFixUserLHE: fix left hand matrix columns: = 1, else = 0
NMFFixUserRHE: fix right hand matrix columns: = 1, else = 0
NMFFixUserBHE: fix block hand matrix columns: = 1, else = 0
NMFAlgo: =5: Non-robust version, =6: Robust version
NMFRobustNRuns: Number of bootstrap runs
NMFCalculateLeverage: Calculate leverages
NMFUseRobustLeverage: Calculate leverages based on robust max across factoring columns
NTFFastHALS: Use Fast HALS (does not accept handle missing values and convolution)
NTFNIterations: Warmup iterations for fast HALS
NMFSparseLevel : sparsity level (as defined by Hoyer); +/- = make RHE/LHe sparse
NTFUnimodal: Apply Unimodal constraint on factoring vectors
NTFSmooth: Apply Smooth constraint on factoring vectors
NTFLeftComponents: Apply Unimodal/Smooth constraint on left hand matrix
NTFRightComponents: Apply Unimodal/Smooth constraint on right hand matrix
NTFBlockComponents: Apply Unimodal/Smooth constraint on block hand matrix
NBlocks: Number of NTF blocks
NTFNConv: Half-Size of the convolution window on 3rd-dimension of the tensor
NMFPriors: Elements in Mw that should be updated (others remain 0)
Output:
Mt_conv: Convolutional Left hand matrix
Mt: Left hand matrix
Mw: Right hand matrix
Mb: Block hand matrix
MtPct: Percent robust clustered rows
MwPct: Percent robust clustered columns
diff : Objective minimum achieved
"""
AddMessage = []
ErrMessage = ''
cancel_pressed = 0
n, p0 = M.shape
nc = int(nc)
NBlocks = int(NBlocks)
p = int(p0 / NBlocks)
NTFNConv = int(NTFNConv)
if NMFFixUserLHE*NMFFixUserRHE*NMFFixUserBHE == 1:
return np.zeros((n, nc*(2*NTFNConv+1))), Mt0, Mw0, Mb0, np.zeros((n, p0)), np.ones((n, nc)), np.ones((p, nc)), AddMessage, ErrMessage, cancel_pressed
Mmis = Mmis.astype(np.int)
n_Mmis = Mmis.shape[0]
if n_Mmis == 0:
ID = np.where(np.isnan(M) == True)
n_Mmis = ID[0].size
if n_Mmis > 0:
Mmis = (np.isnan(M) == False)
Mmis = Mmis.astype(np.int)
M[Mmis == 0] = 0
else:
M[Mmis == 0] = 0
NTFNIterations = int(NTFNIterations)
NMFRobustNRuns = int(NMFRobustNRuns)
Mt = np.copy(Mt0)
Mw = np.copy(Mw0)
Mb = np.copy(Mb0)
Mt_conv = np.array([])
# Check parameter consistency (and correct if needed)
if (nc == 1) | (NMFAlgo == 5):
NMFRobustNRuns = 0
if NMFRobustNRuns == 0:
MtPct = np.nan
MwPct = np.nan
if (n_Mmis > 0 or NTFNConv > 0 or NMFSparseLevel != 0) and NTFFastHALS > 0:
NTFFastHALS = 0
reverse2HALS = 1
else:
reverse2HALS = 0
# Step 1: NTF
Status0 = "Step 1 - NTF Ncomp=" + str(nc) + ": "
if NTFFastHALS > 0:
if NTFNIterations > 0:
Mt_conv, Mt, Mw, Mb, diff, cancel_pressed = NTFSolve(
M, Mmis, Mt, Mw, Mb, nc, tolerance, LogIter, Status0,
NTFNIterations, NMFFixUserLHE, NMFFixUserRHE, NMFFixUserBHE, 0, NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, NTFNConv, NMFPriors, myStatusBox)
Mt, Mw, Mb, diff, cancel_pressed = NTFSolveFast(
M, Mmis, Mt, Mw, Mb, nc, tolerance, precision, LogIter, Status0,
MaxIterations, NMFFixUserLHE, NMFFixUserRHE, NMFFixUserBHE, NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, myStatusBox)
else:
Mt_conv, Mt, Mw, Mb, diff, cancel_pressed = NTFSolve(
M, Mmis, Mt, Mw, Mb, nc, tolerance, LogIter, Status0,
MaxIterations, NMFFixUserLHE, NMFFixUserRHE, NMFFixUserBHE, NMFSparseLevel, NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, NTFNConv, NMFPriors, myStatusBox)
Mtsup = np.copy(Mt)
Mwsup = np.copy(Mw)
Mbsup = np.copy(Mb)
diff_sup = diff
# Bootstrap to assess robust clustering
if NMFRobustNRuns > 1:
# Update Mwsup
MwPct = np.zeros((p, nc))
MwBlk = np.zeros((p, NMFRobustNRuns * nc))
for iBootstrap in range(0, NMFRobustNRuns):
Boot = np.random.randint(n, size=n)
Status0 = "Step 2 - " + \
"Boot " + str(iBootstrap + 1) + "/" + str(NMFRobustNRuns) + " NTF Ncomp=" + str(nc) + ": "
if NTFFastHALS > 0:
if n_Mmis > 0:
Mt, Mw, Mb, diff, cancel_pressed = NTFSolveFast(
M[Boot, :], Mmis[Boot, :], Mtsup[Boot, :], Mwsup, Mb, nc, 1.e-3, precision, LogIter, Status0,
MaxIterations, 1, 0, NMFFixUserBHE, NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, myStatusBox)
else:
Mt, Mw, Mb, diff, cancel_pressed = NTFSolveFast(
M[Boot, :], np.array([]), Mtsup[Boot, :], Mwsup, Mb, nc, 1.e-3, precision, LogIter, Status0,
MaxIterations, 1, 0, NMFFixUserBHE, NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, myStatusBox)
else:
if n_Mmis > 0:
Mt_conv, Mt, Mw, Mb, diff, cancel_pressed = NTFSolve(
M[Boot, :], Mmis[Boot, :], Mtsup[Boot, :], Mwsup, Mb, nc, 1.e-3, LogIter, Status0,
MaxIterations, 1, 0, NMFFixUserBHE, NMFSparseLevel, NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, NTFNConv, NMFPriors, myStatusBox)
else:
Mt_conv, Mt, Mw, Mb, diff, cancel_pressed = NTFSolve(
M[Boot, :], np.array([]), Mtsup[Boot, :], Mwsup, Mb, nc, 1.e-3, LogIter, Status0,
MaxIterations, 1, 0, NMFFixUserBHE, NMFSparseLevel, NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, NTFNConv, NMFPriors, myStatusBox)
for k in range(0, nc):
MwBlk[:, k * NMFRobustNRuns + iBootstrap] = Mw[:, k]
Mwn = np.zeros((p, nc))
for k in range(0, nc):
ScaleMw = np.linalg.norm(MwBlk[:, k * NMFRobustNRuns + iBootstrap])
if ScaleMw > 0:
MwBlk[:, k * NMFRobustNRuns + iBootstrap] = \
MwBlk[:, k * NMFRobustNRuns + iBootstrap] / ScaleMw
Mwn[:, k] = MwBlk[:, k * NMFRobustNRuns + iBootstrap]
ColClust = np.zeros(p, dtype=int)
if NMFCalculateLeverage > 0:
Mwn, AddMessage, ErrMessage, cancel_pressed = Leverage(Mwn, NMFUseRobustLeverage, AddMessage,
myStatusBox)
for j in range(0, p):
ColClust[j] = np.argmax(np.array(Mwn[j, :]))
MwPct[j, ColClust[j]] = MwPct[j, ColClust[j]] + 1
MwPct = MwPct / NMFRobustNRuns
# Update Mtsup
MtPct = np.zeros((n, nc))
for iBootstrap in range(0, NMFRobustNRuns):
Status0 = "Step 3 - " + \
"Boot " + str(iBootstrap + 1) + "/" + str(NMFRobustNRuns) + " NTF Ncomp=" + str(nc) + ": "
Mw = np.zeros((p, nc))
for k in range(0, nc):
Mw[:, k] = MwBlk[:, k * NMFRobustNRuns + iBootstrap]
if NTFFastHALS > 0:
Mt, Mw, Mb, diff, cancel_pressed = NTFSolveFast(
M, Mmis, Mtsup, Mw, Mb, nc, 1.e-3, precision, LogIter, Status0, MaxIterations, 0, 1, NMFFixUserBHE,
NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, myStatusBox)
else:
Mt_conv, Mt, Mw, Mb, diff, cancel_pressed = NTFSolve(
M, Mmis, Mtsup, Mw, Mb, nc, 1.e-3, LogIter, Status0, MaxIterations, 0, 1, NMFFixUserBHE,
NMFSparseLevel, NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, NTFNConv, NMFPriors, myStatusBox)
RowClust = np.zeros(n, dtype=int)
if NMFCalculateLeverage > 0:
Mtn, AddMessage, ErrMessage, cancel_pressed = Leverage(Mt, NMFUseRobustLeverage, AddMessage,
myStatusBox)
else:
Mtn = Mt
for i in range(0, n):
RowClust[i] = np.argmax(Mtn[i, :])
MtPct[i, RowClust[i]] = MtPct[i, RowClust[i]] + 1
MtPct = MtPct / NMFRobustNRuns
Mt = Mtsup
Mw = Mwsup
Mb = Mbsup
diff = diff_sup
if reverse2HALS > 0:
AddMessage.insert(len(AddMessage), 'Currently, Fast HALS cannot be applied with missing data or convolution window and was reversed to Simple HALS.')
return Mt_conv, Mt, Mw, Mb, MtPct, MwPct, diff, AddMessage, ErrMessage, cancel_pressed
def rSVDSolve(M, Mmis, nc, tolerance, LogIter, LogTrials, Status0, MaxIterations,
SVDAlgo, SVDCoverage, SVDNTrials, myStatusBox):
"""Estimate SVD matrices (robust version)
Input:
M: Input matrix
Mmis: Define missing values (0 = missing cell, 1 = real cell)
nc: SVD rank
tolerance: Convergence threshold
LogIter: Log results through iterations
LogTrials: Log results through trials
Status0: Initial displayed status to be updated during iterations
MaxIterations: Max iterations
SVDAlgo: =1: Non-robust version, =2: Robust version
SVDCoverage: Coverage non-outliers (robust version)
SVDNTrials: Number of trials (robust version)
Output:
Mt: Left hand matrix
Mev: Scaling factors
Mw: Right hand matrix
Mmis: Matrix of missing/flagged outliers
Mmsr: Vector of Residual SSQ
Mmsr2: Vector of Reidual variance
Reference
---------
L. Liu et al (2003) Robust singular value decomposition analysis of microarray data
PNAS November 11, 2003 vol. 100 no. 23 13167–13172
"""
AddMessage = []
ErrMessage = ''
cancel_pressed = 0
# M0 is the running matrix (to be factorized, initialized from M)
M0 = np.copy(M)
n, p = M0.shape
Mmis = Mmis.astype(np.bool_)
n_Mmis = Mmis.shape[0]
if n_Mmis > 0:
M0[Mmis == False] = np.nan
else:
Mmis = (np.isnan(M0) == False)
Mmis = Mmis.astype(np.bool_)
n_Mmis = Mmis.shape[0]
trace0 = np.sum(M0[Mmis] ** 2)
nc = int(nc)
SVDNTrials = int(SVDNTrials)
nxp = n * p
nxpcov = int(round(nxp * SVDCoverage, 0))
Mmsr = np.zeros(nc)
Mmsr2 = np.zeros(nc)
Mev = np.zeros(nc)
if SVDAlgo == 2:
MaxTrial = SVDNTrials
else:
MaxTrial = 1
Mw = np.zeros((p, nc))
Mt = np.zeros((n, nc))
Mdiff = np.zeros((n, p))
w = np.zeros(p)
t = np.zeros(n)
wTrial = np.zeros(p)
tTrial = np.zeros(n)
MmisTrial = np.zeros((n, p), dtype=np.bool)
# Outer-reference M becomes local reference M, which is the running matrix within ALS/LTS loop.
M = np.zeros((n, p))
wnorm = np.zeros((p, n))
tnorm = np.zeros((n, p))
denomw = np.zeros(n)
denomt = np.zeros(p)
StepIter = math.ceil(MaxIterations / 100)
pbar_step = 100 * StepIter / MaxIterations
if (n_Mmis == 0) & (SVDAlgo == 1):
FastCode = 1
else:
FastCode = 0
if (FastCode == 0) and (SVDAlgo == 1):
denomw[np.count_nonzero(Mmis, axis=1) < 2] = np.nan
denomt[np.count_nonzero(Mmis, axis=0) < 2] = np.nan
for k in range(0, nc):
for iTrial in range(0, MaxTrial):
myStatusBox.init_bar(delay=1)
# Copy values of M0 into M
M[:, :] = M0
Status1 = Status0 + "Ncomp " + str(k + 1) + " Trial " + str(iTrial + 1) + ": "
if SVDAlgo == 2:
# Select a random subset
M = np.reshape(M, (nxp, 1))
M[np.argsort(np.random.rand(nxp))[nxpcov:nxp]] = np.nan
M = np.reshape(M, (n, p))
Mmis[:, :] = (np.isnan(M) == False)
# Initialize w
for j in range(0, p):
w[j] = np.median(M[Mmis[:, j], j])
if np.where(w > 0)[0].size == 0:
w[:] = 1
w /= np.linalg.norm(w)
# Replace missing values by 0's before regression
M[Mmis == False] = 0
# initialize t (LTS =stochastic)
if FastCode == 0:
wnorm[:, :] = np.repeat(w[:, np.newaxis]**2, n, axis=1) * Mmis.T
denomw[:] = np.sum(wnorm, axis=0)
# Request at least 2 non-missing values to perform row regression
if SVDAlgo == 2:
denomw[np.count_nonzero(Mmis, axis=1) < 2] = np.nan
t[:] = M @ w / denomw
else:
t[:] = M @ w / np.linalg.norm(w) ** 2
t[np.isnan(t) == True] = np.median(t[np.isnan(t) == False])
if SVDAlgo == 2:
Mdiff[:, :] = np.abs(M0 - np.reshape(t, (n, 1)) @ np.reshape(w, (1, p)))
# Restore missing values instead of 0's
M[Mmis == False] = M0[Mmis == False]
M = np.reshape(M, (nxp, 1))
M[np.argsort(np.reshape(Mdiff, nxp))[nxpcov:nxp]] = np.nan
M = np.reshape(M, (n, p))
Mmis[:, :] = (np.isnan(M) == False)
# Replace missing values by 0's before regression
M[Mmis == False] = 0
iIter = 0
cont = 1
while (cont > 0) & (iIter < MaxIterations):
# build w
if FastCode == 0:
tnorm[:, :] = np.repeat(t[:, np.newaxis]**2, p, axis=1) * Mmis
denomt[:] = np.sum(tnorm, axis=0)
#Request at least 2 non-missing values to perform column regression
if SVDAlgo == 2:
denomt[np.count_nonzero(Mmis, axis=0) < 2] = np.nan
w[:] = M.T @ t / denomt
else:
w[:] = M.T @ t / np.linalg.norm(t) ** 2
w[np.isnan(w) == True] = np.median(w[np.isnan(w) == False])
# normalize w
w /= np.linalg.norm(w)
if SVDAlgo == 2:
Mdiff[:, :] = np.abs(M0 - np.reshape(t, (n, 1)) @ np.reshape(w, (1, p)))
# Restore missing values instead of 0's
M[Mmis == False] = M0[Mmis == False]
M = np.reshape(M, (nxp, 1))
# Outliers resume to missing values
M[np.argsort(np.reshape(Mdiff, nxp))[nxpcov:nxp]] = np.nan
M = np.reshape(M, (n, p))
Mmis[:, :] = (np.isnan(M) == False)
# Replace missing values by 0's before regression
M[Mmis == False] = 0
# build t
if FastCode == 0:
wnorm[:, :] = np.repeat(w[:, np.newaxis] ** 2, n, axis=1) * Mmis.T
denomw[:] = np.sum(wnorm, axis=0)
# Request at least 2 non-missing values to perform row regression
if SVDAlgo == 2:
denomw[np.count_nonzero(Mmis, axis=1) < 2] = np.nan
t[:] = M @ w / denomw
else:
t[:] = M @ w / np.linalg.norm(w) ** 2
t[np.isnan(t) == True] = np.median(t[np.isnan(t) == False])
# note: only w is normalized within loop, t is normalized after convergence
if SVDAlgo == 2:
Mdiff[:, :] = np.abs(M0 - np.reshape(t, (n, 1)) @ np.reshape(w, (1, p)))
# Restore missing values instead of 0's
M[Mmis == False] = M0[Mmis == False]
M = np.reshape(M, (nxp, 1))
# Outliers resume to missing values
M[np.argsort(np.reshape(Mdiff, nxp))[nxpcov:nxp]] = np.nan
M = np.reshape(M, (n, p))
Mmis[:, :] = (np.isnan(M) == False)
# Replace missing values by 0's before regression
M[Mmis == False] = 0
if iIter % StepIter == 0:
if SVDAlgo == 1:
Mdiff[:, :] = np.abs(M0 - np.reshape(t, (n, 1)) @ np.reshape(w, (1, p)))
Status = Status1 + 'Iteration: %s' % int(iIter)
myStatusBox.update_status(delay=1, status=Status)
myStatusBox.update_bar(delay=1, step=pbar_step)
if myStatusBox.cancel_pressed:
cancel_pressed = 1
return [Mt, Mev, Mw, Mmis, Mmsr, Mmsr2, AddMessage, ErrMessage, cancel_pressed]
diff = np.linalg.norm(Mdiff[Mmis]) ** 2 / np.where(Mmis)[0].size
if LogIter == 1:
if SVDAlgo == 2:
myStatusBox.myPrint("Ncomp: " + str(k) + " Trial: " + str(iTrial) + " Iter: " + str(
iIter) + " MSR: " + str(diff))
else:
myStatusBox.myPrint("Ncomp: " + str(k) + " Iter: " + str(iIter) + " MSR: " + str(diff))
if iIter > 0:
if abs(diff - diff0) / diff0 < tolerance:
cont = 0
diff0 = diff
iIter += 1
# save trial
if iTrial == 0:
BestTrial = iTrial
DiffTrial = diff
tTrial[:] = t
wTrial[:] = w
MmisTrial[:, :] = Mmis
elif diff < DiffTrial:
BestTrial = iTrial
DiffTrial = diff
tTrial[:] = t
wTrial[:] = w
MmisTrial[:, :] = Mmis
if LogTrials == 1:
myStatusBox.myPrint("Ncomp: " + str(k) + " Trial: " + str(iTrial) + " MSR: " + str(diff))
if LogTrials:
myStatusBox.myPrint("Ncomp: " + str(k) + " Best trial: " + str(BestTrial) + " MSR: " + str(DiffTrial))
t[:] = tTrial
w[:] = wTrial
Mw[:, k] = w
# compute eigen value
if SVDAlgo == 2:
# Robust regression of M on tw`
Mdiff[:, :] = np.abs(M0 - np.reshape(t, (n, 1)) @ np.reshape(w, (1, p)))
RMdiff = np.argsort(np.reshape(Mdiff, nxp))
t /= np.linalg.norm(t) # Normalize t
Mt[:, k] = t
Mmis = np.reshape(Mmis, nxp)
Mmis[RMdiff[nxpcov:nxp]] = False
Ycells = np.reshape(M0, (nxp, 1))[Mmis]
Xcells = np.reshape(np.reshape(t, (n, 1)) @ np.reshape(w, (1, p)), (nxp, 1))[Mmis]
Mev[k] = Ycells.T @ Xcells / np.linalg.norm(Xcells) ** 2
Mmis = np.reshape(Mmis, (n, p))
else:
Mev[k] = np.linalg.norm(t)
Mt[:, k] = t / Mev[k] # normalize t
if k == 0:
Mmsr[k] = Mev[k] ** 2
else:
Mmsr[k] = Mmsr[k - 1] + Mev[k] ** 2
Mmsr2[k] = Mmsr[k] - Mev[0] ** 2
# M0 is deflated before calculating next component
M0 = M0 - Mev[k] * np.reshape(Mt[:, k], (n, 1)) @ np.reshape(Mw[:, k].T, (1, p))
trace02 = trace0 - Mev[0] ** 2
Mmsr = 1 - Mmsr / trace0
Mmsr[Mmsr > 1] = 1
Mmsr[Mmsr < 0] = 0
Mmsr2 = 1 - Mmsr2 / trace02
Mmsr2[Mmsr2 > 1] = 1
Mmsr2[Mmsr2 < 0] = 0
if nc > 1:
RMev = np.argsort(-Mev)
Mev = Mev[RMev]
Mw0 = Mw
Mt0 = Mt
for k in range(0, nc):
Mw[:, k] = Mw0[:, RMev[k]]
Mt[:, k] = Mt0[:, RMev[k]]
Mmis[:, :] = True
Mmis[MmisTrial == False] = False
#Mmis.astype(dtype=int)
return [Mt, Mev, Mw, Mmis, Mmsr, Mmsr2, AddMessage, ErrMessage, cancel_pressed]
def non_negative_factorization(X, W=None, H=None, n_components=None,
update_W=True,
update_H=True,
beta_loss='frobenius',
use_hals=False,
n_bootstrap=None,
tol=1e-6,
max_iter=150, max_iter_mult=20,
regularization=None, sparsity=0,
leverage='standard',
convex=None, kernel='linear',
skewness=False,
null_priors=False,
random_state=None,
verbose=0):
"""Compute Non-negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) such as x = W @ H.T + Error.
This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is minimized with an alternating minimization of W
and H.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
prior W
If n_update_W == 0 , it is used as a constant, to solve for H only.
H : array-like, shape (n_features, n_components)
prior H
If n_update_H = 0 , it is used as a constant, to solve for W only.
n_components : integer
Number of components, if n_components is not set : n_components = min(n_samples, n_features)
update_W : boolean, default: True
Update or keep W fixed
update_H : boolean, default: True
Update or keep H fixed
beta_loss : string, default 'frobenius'
String must be in {'frobenius', 'kullback-leibler'}.
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss == 'kullback-leibler', the input
matrix X cannot contain zeros.
use_hals : boolean
True -> HALS algorithm (note that convex and kullback-leibler loss opions are not supported)
False-> Projected gradiant
n_bootstrap : integer, default: 0
Number of bootstrap runs.
tol : float, default: 1e-6
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations.
max_iter_mult : integer, default: 20
Maximum number of iterations in multiplicative warm-up to projected gradient (beta_loss = 'frobenius' only).
regularization : None | 'components' | 'transformation'
Select whether the regularization affects the components (H), the
transformation (W) or none of them.
sparsity : float, default: 0
Sparsity target with 0 <= sparsity <= 1 representing either:
- the % rows in W or H set to 0 (when use_hals = False)
- the mean % rows per column in W or H set to 0 (when use_hals = True)
leverage : None | 'standard' | 'robust', default 'standard'
Calculate leverage of W and H rows on each component.
convex : None | 'components' | 'transformation', default None
Apply convex constraint on W or H.
kernel : 'linear', 'quadratic', 'radial', default 'linear'
Can be set if convex = 'transformation'.
null_priors : boolean, default False
Cells of H with prior cells = 0 will not be updated.
Can be set only if prior H has been defined.
skewness : boolean, default False
When solving mixture problems, columns of X at the extremities of the convex hull will be given largest weights.
The column weight is a function of the skewness and its sign.
The expected sign of the skewness is based on the skewness of W components, as returned by the first pass
of a 2-steps convex NMF. Thus, during the first pass, skewness must be set to False.
Can be set only if convex = 'transformation' and prior W and H have been defined.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : integer, default: 0
The verbosity level (0/1).
Returns
-------
Estimator (dictionary) with following entries
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_features, n_components)
Solution to the non-negative least squares problem.
volume : scalar, volume occupied by W and H
WB : array-like, shape (n_samples, n_components)
Percent consistently clustered rows for each component.
only if n_bootstrap > 0.
HB : array-like, shape (n_features, n_components)
Percent consistently clustered columns for each component.
only if n_bootstrap > 0.
B : array-like, shape (n_observations, n_components) or (n_features, n_components)
only if active convex variant, H = B.T @ X or W = X @ B
diff : Objective minimum achieved
"""
if use_hals:
#convex and kullback-leibler loss options are not supported
beta_loss='frobenius'
convex=None
M = X
n, p = M.shape
if n_components is None:
nc = min(n, p)
else:
nc = n_components
if beta_loss == 'frobenius':
NMFAlgo = 2
else:
NMFAlgo = 1
LogIter = verbose
myStatusBox = StatusBoxTqdm(verbose=LogIter)
tolerance = tol
precision = EPSILON
if (W is None) & (H is None):
Mt, Mw = NMFInit(M, np.array([]), np.array([]), np.array([]), nc, tolerance, LogIter, myStatusBox)
init = 'nndsvd'
else:
if H is None:
Mw = np.ones((p, nc))
init = 'custom_W'
elif W is None:
Mt = np.ones((n, nc))
init = 'custom_H'
else:
init = 'custom'
for k in range(0, nc):
if NMFAlgo == 2:
Mt[:, k] = Mt[:, k] / np.linalg.norm(Mt[:, k])
Mw[:, k] = Mw[:, k] / np.linalg.norm(Mw[:, k])
else:
Mt[:, k] = Mt[:, k] / np.sum(Mt[:, k])
Mw[:, k] = Mw[:, k] / np.sum(Mw[:, k])
if n_bootstrap is None:
NMFRobustNRuns = 0
else:
NMFRobustNRuns = n_bootstrap
if NMFRobustNRuns > 1:
NMFAlgo += 2
if update_W is True:
NMFFixUserLHE = 0
else:
NMFFixUserLHE = 1
if update_H is True:
NMFFixUserRHE = 0
else:
NMFFixUserRHE = 1
MaxIterations = max_iter
NMFMaxInterm = max_iter_mult
if regularization is None:
NMFSparseLevel = 0
else:
if regularization == 'components':
NMFSparseLevel = sparsity
elif regularization == 'transformation':
NMFSparseLevel = -sparsity
else:
NMFSparseLevel = 0
NMFRobustResampleColumns = 0
if leverage == 'standard':
NMFCalculateLeverage = 1
NMFUseRobustLeverage = 0
elif leverage == 'robust':
NMFCalculateLeverage = 1
NMFUseRobustLeverage = 1
else:
NMFCalculateLeverage = 0
NMFUseRobustLeverage = 0
if convex is None:
NMFFindParts = 0
NMFFindCentroids = 0
NMFKernel = 1
elif convex == 'transformation':
NMFFindParts = 1
NMFFindCentroids = 0
NMFKernel = 1
elif convex == 'components':
NMFFindParts = 0
NMFFindCentroids = 1
if kernel == 'linear':
NMFKernel = 1
elif kernel == 'quadratic':
NMFKernel = 2
elif kernel == 'radial':
NMFKernel = 3
else:
NMFKernel = 1
if (null_priors is True) & ((init == 'custom') | (init == 'custom_H')):
NMFPriors = H
else:
NMFPriors = np.array([])
if convex is None:
NMFReweighColumns = 0
else:
if (convex == 'transformation') & (init == 'custom'):
if skewness is True:
NMFReweighColumns = 1
else:
NMFReweighColumns = 0
else:
NMFReweighColumns = 0
if random_state is not None:
RandomSeed = random_state
np.random.seed(RandomSeed)
if use_hals:
if NMFAlgo <=2:
NTFAlgo = 5
else:
NTFAlgo = 6
Mt_conv, Mt, Mw, Mb, MtPct, MwPct, diff, AddMessage, ErrMessage, cancel_pressed = rNTFSolve(
M, np.array([]), Mt, Mw, np.array([]), nc, tolerance, precision, LogIter, MaxIterations, NMFFixUserLHE, NMFFixUserRHE,
1, NTFAlgo, NMFRobustNRuns, NMFCalculateLeverage, NMFUseRobustLeverage,
0, 0, NMFSparseLevel, 0, 0, 0, 0, 0, 1, 0, np.array([]), myStatusBox)
Mev = np.ones(nc)
if (NMFFixUserLHE == 0) & (NMFFixUserRHE == 0):
# Scale
for k in range(0, nc):
ScaleMt = np.linalg.norm(Mt[:, k])
ScaleMw = np.linalg.norm(Mw[:, k])
Mev[k] = ScaleMt * ScaleMw
if Mev[k] > 0:
Mt[:, k] = Mt[:, k] / ScaleMt
Mw[:, k] = Mw[:, k] / ScaleMw
else:
Mt, Mw, MtPct, MwPct, diff, Mh, flagNonconvex, AddMessage, ErrMessage, cancel_pressed = rNMFSolve(
M, np.array([]), Mt, Mw, nc, tolerance, precision, LogIter, MaxIterations, NMFAlgo, NMFFixUserLHE,
NMFFixUserRHE, NMFMaxInterm,
NMFSparseLevel, NMFRobustResampleColumns, NMFRobustNRuns, NMFCalculateLeverage, NMFUseRobustLeverage,
NMFFindParts, NMFFindCentroids, NMFKernel, NMFReweighColumns, NMFPriors, myStatusBox)
Mev = np.ones(nc)
if (NMFFindParts == 0) & (NMFFindCentroids == 0) & (NMFFixUserLHE == 0) & (NMFFixUserRHE == 0):
# Scale
for k in range(0, nc):
if (NMFAlgo == 2) | (NMFAlgo == 4):
ScaleMt = np.linalg.norm(Mt[:, k])
ScaleMw = np.linalg.norm(Mw[:, k])
else:
ScaleMt = np.sum(Mt[:, k])
ScaleMw = | np.sum(Mw[:, k]) | numpy.sum |
from numpy.testing import assert_, assert_array_almost_equal, assert_equal, \
assert_almost_equal, assert_array_equal, \
run_module_suite, TestCase
import numpy as np
import scipy.ndimage as ndimage
types = [np.int8, np.uint8, np.int16,
np.uint16, np.int32, np.uint32,
np.int64, np.uint64,
np.float32, np.float64]
np.mod(1., 1) # Silence fmod bug on win-amd64. See #1408 and #1238.
class Test_measurements_stats(TestCase):
"""ndimage.measurements._stats() is a utility function used by other functions."""
def test_a(self):
x = [0,1,2,6]
labels = [0,0,1,1]
index = [0,1]
for shp in [(4,), (2,2)]:
x = np.array(x).reshape(shp)
labels = np.array(labels).reshape(shp)
counts, sums = ndimage.measurements._stats(x, labels=labels, index=index)
assert_array_equal(counts, [2, 2])
assert_array_equal(sums, [1.0, 8.0])
def test_b(self):
# Same data as test_a, but different labels. The label 9 exceeds the
# length of 'labels', so this test will follow a different code path.
x = [0,1,2,6]
labels = [0,0,9,9]
index = [0,9]
for shp in [(4,), (2,2)]:
x = np.array(x).reshape(shp)
labels = np.array(labels).reshape(shp)
counts, sums = ndimage.measurements._stats(x, labels=labels, index=index)
assert_array_equal(counts, [2, 2])
assert_array_equal(sums, [1.0, 8.0])
def test_a_centered(self):
x = [0,1,2,6]
labels = [0,0,1,1]
index = [0,1]
for shp in [(4,), (2,2)]:
x = np.array(x).reshape(shp)
labels = np.array(labels).reshape(shp)
counts, sums, centers = ndimage.measurements._stats(x, labels=labels,
index=index, centered=True)
assert_array_equal(counts, [2, 2])
assert_array_equal(sums, [1.0, 8.0])
assert_array_equal(centers, [0.5, 8.0])
def test_b_centered(self):
x = [0,1,2,6]
labels = [0,0,9,9]
index = [0,9]
for shp in [(4,), (2,2)]:
x = np.array(x).reshape(shp)
labels = np.array(labels).reshape(shp)
counts, sums, centers = ndimage.measurements._stats(x, labels=labels,
index=index, centered=True)
assert_array_equal(counts, [2, 2])
assert_array_equal(sums, [1.0, 8.0])
assert_array_equal(centers, [0.5, 8.0])
def test_nonint_labels(self):
x = [0,1,2,6]
labels = [0.0, 0.0, 9.0, 9.0]
index = [0.0, 9.0]
for shp in [(4,), (2,2)]:
x = np.array(x).reshape(shp)
labels = np.array(labels).reshape(shp)
counts, sums, centers = ndimage.measurements._stats(x, labels=labels,
index=index, centered=True)
assert_array_equal(counts, [2, 2])
assert_array_equal(sums, [1.0, 8.0])
assert_array_equal(centers, [0.5, 8.0])
class Test_measurements_select(TestCase):
"""ndimage.measurements._select() is a utility function used by other functions."""
def test_basic(self):
x = [0,1,6,2]
cases = [
([0,0,1,1], [0,1]), # "Small" integer labels
([0,0,9,9], [0,9]), # A label larger than len(labels)
([0.0,0.0,7.0,7.0], [0.0, 7.0]), # Non-integer labels
]
for labels, index in cases:
result = ndimage.measurements._select(x, labels=labels, index=index)
assert_(len(result) == 0)
result = ndimage.measurements._select(x, labels=labels, index=index, find_max=True)
assert_(len(result) == 1)
assert_array_equal(result[0], [1, 6])
result = ndimage.measurements._select(x, labels=labels, index=index, find_min=True)
assert_(len(result) == 1)
assert_array_equal(result[0], [0, 2])
result = ndimage.measurements._select(x, labels=labels, index=index,
find_min=True, find_min_positions=True)
assert_(len(result) == 2)
assert_array_equal(result[0], [0, 2])
assert_array_equal(result[1], [0, 3])
result = ndimage.measurements._select(x, labels=labels, index=index,
find_max=True, find_max_positions=True)
assert_(len(result) == 2)
assert_array_equal(result[0], [1, 6])
assert_array_equal(result[1], [1, 2])
def test_label01():
"label 1"
data = np.ones([])
out, n = ndimage.label(data)
assert_array_almost_equal(out, 1)
def test_label02():
"label 2"
data = np.zeros([])
out, n = ndimage.label(data)
assert_array_almost_equal(out, 0)
def test_label03():
"label 3"
data = np.ones([1])
out, n = ndimage.label(data)
assert_array_almost_equal(out, [1])
def test_label04():
"label 4"
data = np.zeros([1])
out, n = ndimage.label(data)
assert_array_almost_equal(out, [0])
def test_label05():
"label 5"
data = np.ones([5])
out, n = ndimage.label(data)
assert_array_almost_equal(out, [1, 1, 1, 1, 1])
def test_label06():
"label 6"
data = np.array([1, 0, 1, 1, 0, 1])
out, n = ndimage.label(data)
assert_array_almost_equal(out, [1, 0, 2, 2, 0, 3])
def test_label07():
"label 7"
data = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
out, n = ndimage.label(data)
assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
def test_label08():
"label 8"
data = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0]])
out, n = ndimage.label(data)
assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[3, 3, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 0]])
def test_label09():
"label 9"
data = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0]])
struct = ndimage.generate_binary_structure(2, 2)
out, n = ndimage.label(data, struct)
assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[2, 2, 0, 0, 0, 0],
[2, 2, 0, 0, 0, 0],
[0, 0, 0, 3, 3, 0]])
def test_label10():
"label 10"
data = np.array([[0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0]])
struct = ndimage.generate_binary_structure(2, 2)
out, n = ndimage.label(data, struct)
assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0]])
def test_label11():
"label 11"
for type in types:
data = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0]], type)
out, n = ndimage.label(data)
expected = [[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[3, 3, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 0]]
assert_array_almost_equal(out, expected)
assert_equal(n, 4)
def test_label12():
"label 12"
for type in types:
data = np.array([[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 0]], type)
out, n = ndimage.label(data)
expected = [[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 0]]
assert_array_almost_equal(out, expected)
assert_equal(n, 1)
def test_label13():
"label 13"
for type in types:
data = np.array([[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],
type)
out, n = ndimage.label(data)
expected = [[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
assert_array_almost_equal(out, expected)
assert_equal(n, 1)
def test_find_objects01():
"find_objects 1"
data = np.ones([], dtype=int)
out = ndimage.find_objects(data)
assert_(out == [()])
def test_find_objects02():
"find_objects 2"
data = np.zeros([], dtype=int)
out = ndimage.find_objects(data)
assert_(out == [])
def test_find_objects03():
"find_objects 3"
data = np.ones([1], dtype=int)
out = ndimage.find_objects(data)
assert_equal(out, [(slice(0, 1, None),)])
def test_find_objects04():
"find_objects 4"
data = np.zeros([1], dtype=int)
out = ndimage.find_objects(data)
assert_equal(out, [])
def test_find_objects05():
"find_objects 5"
data = np.ones([5], dtype=int)
out = ndimage.find_objects(data)
assert_equal(out, [(slice(0, 5, None),)])
def test_find_objects06():
"find_objects 6"
data = np.array([1, 0, 2, 2, 0, 3])
out = ndimage.find_objects(data)
assert_equal(out, [(slice(0, 1, None),),
(slice(2, 4, None),),
(slice(5, 6, None),)])
def test_find_objects07():
"find_objects 7"
data = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
out = ndimage.find_objects(data)
assert_equal(out, [])
def test_find_objects08():
"find_objects 8"
data = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[3, 3, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 0]])
out = ndimage.find_objects(data)
assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)),
(slice(1, 3, None), slice(2, 5, None)),
(slice(3, 5, None), slice(0, 2, None)),
(slice(5, 6, None), slice(3, 5, None))])
def test_find_objects09():
"find_objects 9"
data = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 0]])
out = ndimage.find_objects(data)
assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)),
(slice(1, 3, None), slice(2, 5, None)),
None,
(slice(5, 6, None), slice(3, 5, None))])
def test_sum01():
"sum 1"
for type in types:
input = np.array([], type)
output = ndimage.sum(input)
assert_equal(output, 0.0)
def test_sum02():
"sum 2"
for type in types:
input = np.zeros([0, 4], type)
output = ndimage.sum(input)
assert_equal(output, 0.0)
def test_sum03():
"sum 3"
for type in types:
input = np.ones([], type)
output = ndimage.sum(input)
assert_almost_equal(output, 1.0)
def test_sum04():
"sum 4"
for type in types:
input = np.array([1, 2], type)
output = ndimage.sum(input)
assert_almost_equal(output, 3.0)
def test_sum05():
"sum 5"
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.sum(input)
assert_almost_equal(output, 10.0)
def test_sum06():
"sum 6"
labels = np.array([], bool)
for type in types:
input = np.array([], type)
output = ndimage.sum(input, labels=labels)
assert_equal(output, 0.0)
def test_sum07():
"sum 7"
labels = np.ones([0, 4], bool)
for type in types:
input = np.zeros([0, 4], type)
output = ndimage.sum(input, labels=labels)
assert_equal(output, 0.0)
def test_sum08():
"sum 8"
labels = np.array([1, 0], bool)
for type in types:
input = np.array([1, 2], type)
output = ndimage.sum(input, labels=labels)
assert_equal(output, 1.0)
def test_sum09():
"sum 9"
labels = np.array([1, 0], bool)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.sum(input, labels=labels)
assert_almost_equal(output, 4.0)
def test_sum10():
"sum 10"
labels = np.array([1, 0], bool)
input = np.array([[1, 2], [3, 4]], bool)
output = ndimage.sum(input, labels=labels)
assert_almost_equal(output, 2.0)
def test_sum11():
"sum 11"
labels = np.array([1, 2], np.int8)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.sum(input, labels=labels,
index=2)
assert_almost_equal(output, 6.0)
def test_sum12():
"sum 12"
labels = np.array([[1, 2], [2, 4]], np.int8)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.sum(input, labels=labels,
index=[4, 8, 2])
assert_array_almost_equal(output, [4.0, 0.0, 5.0])
def test_mean01():
"mean 1"
labels = np.array([1, 0], bool)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.mean(input, labels=labels)
assert_almost_equal(output, 2.0)
def test_mean02():
"mean 2"
labels = np.array([1, 0], bool)
input = np.array([[1, 2], [3, 4]], bool)
output = ndimage.mean(input, labels=labels)
assert_almost_equal(output, 1.0)
def test_mean03():
"mean 3"
labels = np.array([1, 2])
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.mean(input, labels=labels,
index=2)
assert_almost_equal(output, 3.0)
def test_mean04():
"mean 4"
labels = np.array([[1, 2], [2, 4]], np.int8)
olderr = np.seterr(all='ignore')
try:
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.mean(input, labels=labels,
index=[4, 8, 2])
assert_array_almost_equal(output[[0,2]], [4.0, 2.5])
assert_(np.isnan(output[1]))
finally:
np.seterr(**olderr)
def test_minimum01():
"minimum 1"
labels = np.array([1, 0], bool)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.minimum(input, labels=labels)
assert_almost_equal(output, 1.0)
def test_minimum02():
"minimum 2"
labels = | np.array([1, 0], bool) | numpy.array |
import sys
import pickle
import numpy as np
from PIL import Image
from scipy.optimize import curve_fit
from sklearn.metrics import r2_score
from datetime import datetime
import matplotlib.pyplot as plt
from joblib import Parallel, delayed
startTime = datetime.now()
np.set_printoptions(threshold=sys.maxsize)
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
def read_data(img1):
''' helper function to make reading in DEMs easier '''
# this is the original DEM
if img1 == "original":
# img1 = Image.open('D:/01_anaktuvuk_river_fire/00_working/01_processed-data/00_study-area'
# '/li-dem_1m_sa3_fill.tif')
img1 = Image.open('D:/01_anaktuvuk_river_fire/00_working/01_processed-data/00_study-area/bens_data'
'/ben_2009_DTM_1m_small-sa.tif')
img1 = np.array(img1)
# this is the microtopo image:
if img1 == "detrended":
# img1 = Image.open('D:/01_anaktuvuk_river_fire/00_working/01_processed-data/02_microtopography'
# '/awi_2019_DTM_1m_reproj_300x300_02_microtopo_16m.tif')
img1 = Image.open("D:/01_anaktuvuk_river_fire/00_working/01_processed-data/02_microtopography/"
"ben_2009_DTM_1m_small-sa_detrended_16m.tif")
img1 = np.array(img1)
return img1
def inner(key, val, out_key):
''' fits a gaussian to every transect
height profile and adds transect parameters
to the dictionary.
:param key: coords of trough pixel
(determines center of transect)
:param val: list of transect heights,
coords, and directionality/type
:param out_key: current edge with (s, e)
:return val: updated val with:
- val[5] = fwhm_gauss --> transect width
- val[6] = mean_gauss --> transect depth
- val[7] = cod_gauss --> r2 of fit
'''
# implement the gaussian function
def my_gaus(x, a, mu, sigma):
return a * np.exp(-(x - mu) ** 2 / (2 * sigma ** 2))
# check if there's a transect to fit in the first place
# (some transects at the image edge/corner might be empty) --> but there are none
if len(val[0]) != 0:
# flip the transect along x-axis to be able to fit the Gaussian
data = val[0] * (-1) + np.max(val[0])
N = len(data) # number of data points (corresponds to width*2 + 1)
# diagonal transects are sqrt(2) times longer than straight transects
if val[2] == "diagonal":
t = np.linspace(0, (len(data)) * np.sqrt(2), N)
else:
t = np.linspace(0, len(data) - 1, N)
# provide initial guesses for the mean and sigma for fitting
mean = np.argmax(data) # mean is estimated to be at the maximum point of the flipped transect
# (lowest point within the trough)
sigma = np.sqrt(sum(data * (t - mean) ** 2) / N) + 1 # estimate for sigma is determined via the underlying data
# + 1 to avoid division by 0 for flat transects
# now fit the Gaussian & raise error for those that can't be fitted
try:
gauss_fit = curve_fit(my_gaus, t, data, p0=[1, mean, sigma], maxfev=500000,
bounds=[(-np.inf, -np.inf, 0.01), (np.inf, np.inf, 8.5)])
except RuntimeError:
print('RuntimeError is raised with edge: {0} coords {1} and elevations: {2}'.format(out_key, key, val))
# pass
try:
# recreate the fitted curve using the optimized parameters
data_gauss_fit = my_gaus(t, *gauss_fit[0])
# and finally get depth and width and r2 of fit for adding to original dictionary (val)
max_gauss = np.max(data_gauss_fit)
fwhm_gauss = 2 * np.sqrt(2 * np.log(2)) * abs(gauss_fit[0][2])
cod_gauss = r2_score(data, data_gauss_fit)
# append the parameters to val
val.append(fwhm_gauss)
val.append(max_gauss)
val.append(cod_gauss)
plotting=True
if key[0]==15 and key[1]==610:
plt.plot(t, data, '+:', label='DTM elevation', color='darkslategrey')
plt.plot(t, data_gauss_fit, color='lightseagreen',
label='fitted Gaussian')
# , d={0}, w={1}, r2={2}'.format(round(max_gauss, 2),
# round(fwhm_gauss, 2),
# round(cod_gauss, 2)
plt.legend(frameon=False)
plt.ylabel("depth below ground [m]")
plt.xlabel("transect length [m]")
plt.xticks(np.arange(9), np.arange(1, 10))
plt.text(0, 0.25, f'trough width: {round(fwhm_gauss, 2)} m', fontsize=8)
plt.text(0, 0.235, f'trough depth: {round(max_gauss, 2)} m', fontsize=8)
plt.text(0, 0.22, f'$r^2$ of fit: {round(cod_gauss, 2)}', fontsize=8)
# plt.title("direction: {0}, category: {1}".format(val[2], val[3]))
plt.savefig('./figures/fitted_to_coords_{0}_{1}.png'.format(key[0], key[1]), dpi=300)
plt.close()
except:
# bad error handling:
if val[4]:
print("a water-filled trough can't be fitted: edge: {}".format(out_key))
else:
print("something seriously wrong")
else:
print(val)
def outer(out_key, inner_dict):
''' iterate through all transects of a
single trough and send to inner()
where gaussian will be fitted.
:param out_key: current edge with (s, e)
:param inner_dict: dict of transects with:
- inner_keys: pixel-coords of trough pixels (x, y)
inbetween (s, e).
- inner_values: list with transect coordinates
and info on directionality/type
:return inner_dict: updated inner_dict with old
inner_values + transect width, height, r2 in val
'''
all_keys = []
all_vals_upd = []
# iterate through all transects of a trough
for key, val in inner_dict.items():
try:
# fit gaussian to all transects
val_upd = inner(key, val, out_key)
all_keys.append(key)
all_vals_upd.append(val_upd)
except ValueError as err:
print('{0} -- {1}'.format(out_key, err))
# recombine keys and vals to return the updated dict
inner_dict = dict(zip(all_keys, all_vals_upd))
return inner_dict
def fit_gaussian_parallel(dict_soil):
'''iterate through edges of the graph (in dict
form) and send each trough to a free CPU core
--> prepare fitting a Gaussian function
to the extracted transects in dict_soil
for parallel processing: each trough will
be handled by a single CPU core, but different
troughs can be distributed to multiple cores.
:param dict_soil: a dictionary with
- outer_keys: edge (s, e) and
- outer_values: dict of transects
with:
- inner_keys: pixel-coords of trough pixels (x, y)
inbetween (s, e).
- inner_values: list with transect coordinates and info:
- [0]: height information of transect at loc (xi, yi)
- [1]: pixel coordinates of transect (xi, yi)
--> len[1] == width*2 + 1
- [2]: directionality of transect
- [3]: transect scenario (see publication)
- [4]: presence of water
:return dict_soil2: updated dict soil
same as dict_soil with added:
- inner_values:
- val[5] = fwhm_gauss --> transect width
- val[6] = mean_gauss --> transect depth
- val[7] = cod_gauss --> r2 of fit
'''
all_outer_keys = []
# parallelize into n_jobs different jobs/CPU cores
out = Parallel(n_jobs=20)(delayed(outer)(out_key, inner_dict) for out_key, inner_dict in dict_soil.items())
# get all the outer_keys
for out_key, inner_dict in dict_soil.items():
all_outer_keys.append(out_key)
# and recombine them with the updated inner_dict
dict_soil2 = dict(zip(all_outer_keys, out))
return dict_soil2
def save_obj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def get_trough_avgs_gauss(transect_dict_fitted):
''' gather all width/depth/r2 parameters of
each transect and compute mean/median
parameter per trough. Add mean/median per
trough to the dict.
this part is mainly preparation for the
later network_analysis(.py).
:param transect_dict_fitted:
:return mean_trough_params: a copy of the
transect_dict_fitted with added mean trough
parameters to the outer dict as values.
'''
mean_trough_params = {}
empty_edges = []
# iterate through all edges/troughs
for edge, trough in transect_dict_fitted.items():
num_trans_tot = len(trough) # get the total number of transects in one edge/trough
gaus_width_sum = []
gaus_depth_sum = []
gaus_r2_sum = []
num_trans_cons = 0
water = 0
# check if an edge/trough is empty
if trough != {}:
# then iterate through all transects of the current edge/trough
for coords, trans in trough.items():
# filter out all transects that:
# a) are not between 0 m and 15 m in width (unrealistic values)
# b) have been fitted with r2 <= 0.8
# c) likely have water present
if not isinstance(trans, list):
pass
# now count number of water-filled transects per trough
elif trans[4]:
water += 1
# pass
elif len(trans[0]) != 0 and 0 < trans[5] < 15 and trans[7] > 0.8 and not trans[4]:
# append the parameters from "good" transects to the lists
gaus_width_sum.append(trans[5])
gaus_depth_sum.append(trans[6])
gaus_r2_sum.append(trans[7])
num_trans_cons += 1
# to then calculate the mean/median for each parameter
gaus_mean_width = np.mean(gaus_width_sum)
gaus_median_width = np.median(gaus_width_sum)
gaus_mean_depth = np.mean(gaus_depth_sum)
gaus_median_depth = np.median(gaus_depth_sum)
gaus_mean_r2 = np.mean(gaus_r2_sum)
gaus_median_r2 = np.median(gaus_r2_sum)
# ratio of "good" transects considered for mean/median params compared to all transects available
perc_trans_cons = np.round(num_trans_cons/num_trans_tot, 2)
perc_water_fill = np.round(water/len(trough), 2)
# add all the mean/median parameters to the inner_dict
mean_trough_params[edge] = [gaus_mean_width, gaus_median_width,
gaus_mean_depth, gaus_median_depth,
gaus_mean_r2, gaus_median_r2,
perc_trans_cons, perc_water_fill]
# and if the trough is empty, append the edge to the list of empty edges
else:
empty_edges.append(edge)
# print(transect_dict_fitted[edge])
# print('empty edges ({0} in total): {1}'.format(len(empty_edges), empty_edges))
return mean_trough_params
def plot_param_hists_box_width(transect_dict_orig_fitted_09, transect_dict_orig_fitted_19):
''' plot and save histogram and boxplot
of all transect widths distribution for
two points in time and for all vs.
filtered results.
:param transect_dict_orig_fitted_09:
dictionary of 2009 situation
:param transect_dict_orig_fitted_19:
dictionary of 2019 situation
:return: plot with hist and boxplot
'''
all_widths_09 = []
hi_widths_09 = []
for edge, inner_dic in transect_dict_orig_fitted_09.items():
for skel_pix, trans_info in inner_dic.items():
# print(trans_info)
if -30 < trans_info[5] < 30:
all_widths_09.append(np.abs(trans_info[5]))
if trans_info[7] > 0.8:
hi_widths_09.append(np.abs(trans_info[5]))
all_widths_19 = []
hi_widths_19 = []
for edge, inner_dic in transect_dict_orig_fitted_19.items():
for skel_pix, trans_info in inner_dic.items():
# print(trans_info)
if -30 < trans_info[5] < 30:
all_widths_19.append(np.abs(trans_info[5]))
if trans_info[7] > 0.8:
hi_widths_19.append(np.abs(trans_info[5]))
# print(f'all widths: \t 2009: {len(all_widths_09)} \t 2019: {len(all_widths_19)}')
# print(f'hi widths: \t 2009: {len(hi_widths_09)} \t 2019: {len(hi_widths_19)}')
print("WIDTH")
print("r2 > 0.8")
print(f'median width: \t 2009: {np.median(hi_widths_09)} \t 2019: {np.median(hi_widths_19)}')
print(f'mean width: \t 2009: {np.mean(hi_widths_09)} \t 2019: {np.mean(hi_widths_19)}')
print(f'min width: \t 2009: {np.min(hi_widths_09)} \t 2019: {np.min(hi_widths_19)}')
print(f'max width: \t 2009: {np.max(hi_widths_09)} \t 2019: {np.max(hi_widths_19)}')
print(f'std width: \t 2009: {np.std(hi_widths_09)} \t 2019: {np.std(hi_widths_19)}')
print("all r2")
print(f'median width: \t 2009: {np.median(all_widths_09)} \t 2019: {np.median(all_widths_19)}')
print(f'mean width: \t 2009: {np.mean(all_widths_09)} \t 2019: {np.mean(all_widths_19)}')
print(f'min width: \t 2009: {np.min(all_widths_09)} \t 2019: {np.min(all_widths_19)}')
print(f'max width: \t 2009: { | np.max(all_widths_09) | numpy.max |
import torch
import torch.nn as nn
import torch.nn.functional as F
import src.training_utils as training_utils
import numpy as np
from flair.training_utils import store_embeddings
from .data import TagSequence
import src.utils as utils
START_TAG = '<START>'
STOP_TAG = '<STOP>'
class BaseModel(nn.Module):
def __init__(self, embeddings, hidden_size, tag_dictionary, args):
super(BaseModel, self).__init__()
self.word_dropout_rate = args.get('word_dropout', 0.05)
self.locked_dropout_rate = args.get('locked_dropout', 0.5)
self.relearn_embeddings = args.get('relearn_embeddings', True)
self.device = args.get('device', torch.device('cuda') if torch.cuda.is_available() else 'cpu')
self.use_crf = args.get('use_crf', True)
self.cell_type = args.get('cell_type', 'LSTM') # in (RNN, LSTM, GRU)
self.tag_type_mode = args.get('tag_name', 'ner')
self.bidirectional = args.get('bidirectional', True)
self.embeddings = embeddings
self.hidden_size = hidden_size
self.tag_dictionary = tag_dictionary
self.tagset_size = len(tag_dictionary)
self.word_dropout = WordDropout(self.word_dropout_rate)
self.locked_dropout = LockedDropout(self.locked_dropout_rate)
if self.relearn_embeddings:
self.embedding2nn = torch.nn.Linear(embeddings.embedding_length, embeddings.embedding_length)
self.rnn = getattr(torch.nn, self.cell_type)(embeddings.embedding_length, hidden_size,
batch_first=True, bidirectional=self.bidirectional)
self.linear = nn.Linear((2 if self.bidirectional else 1) * self.hidden_size, self.tagset_size)
if self.use_crf:
self.transitions = nn.Parameter(torch.randn(self.tagset_size, self.tagset_size))
self.transitions.detach()[self.tag_dictionary.get_index(START_TAG), :] = -10000
self.transitions.detach()[:, self.tag_dictionary.get_index(STOP_TAG)] = -10000
self.to(self.device)
def _forward(self, sentences):
self.embeddings.embed(sentences)
lens = [len(sentence.tokens) for sentence in sentences]
embeddings_list = []
for sentence in sentences:
embeddings_list.append(torch.cat([token.get_embedding().unsqueeze(0).to(self.device) for token in sentence.tokens], 0))
x = training_utils.pad_sequence(embeddings_list, batch_first=True, padding_value=0, padding='post')
x = self.word_dropout(x)
x = self.locked_dropout(x)
if self.relearn_embeddings:
x = self.embedding2nn(x)
packed = torch.nn.utils.rnn.pack_padded_sequence(x, lens, enforce_sorted=False, batch_first=True)
rnn_output, hidden = self.rnn(packed)
x, _ = torch.nn.utils.rnn.pad_packed_sequence(rnn_output, batch_first=True)
x = self.locked_dropout(x)
x = self.linear(x)
return x
def _scores(self, x, tag_sequences, reduce_to_batch=False):
tags_list = []
for tag_sequence in tag_sequences:
tags_list.append(torch.tensor([self.tag_dictionary.get_index(tag) for tag in tag_sequence], dtype=torch.long, device=self.device))
lens = [len(tag_sequence) for tag_sequence in tag_sequences]
if self.use_crf:
y = training_utils.pad_sequence(tags_list, batch_first=True, padding_value=0, padding='post')
forward_score = self._forward_alg(x, lens)
gold_score = self._score_sentence(x, y, lens)
return gold_score, forward_score # (batch_size,), (batch_size,)
else:
score = torch.zeros(x.shape[0], x.shape[1]).to(self.device)
for i, feats, tags, length in zip(range(x.shape[0]), x, tags_list, lens):
feats = feats[:length]
for j in range(feats.shape[0]):
score[i][j] = torch.nn.functional.cross_entropy(feats[j:j+1], tags[j:j+1])
if reduce_to_batch:
reduced_score = torch.zeros(score.shape[0]).to(self.device)
for i in range(reduced_score.shape[0]):
reduced_score[i] = score[i, :lens[i]].sum()
score = reduced_score
return score # (batch_size, max_time) or (batch_size)
def _loss(self, x, tag_sequences):
if self.use_crf:
gold_score, forward_score = self._scores(x, tag_sequences)
score = forward_score - gold_score
return score.mean()
else:
entropy_score = self._scores(x, tag_sequences, reduce_to_batch=True)
return entropy_score.mean()
def evaluate(self, data_loader, embedding_storage_mode):
with torch.no_grad():
eval_loss = 0
if self.use_crf:
transitions = self.transitions.detach().cpu().numpy()
else:
transitions = None
cm = utils.ConfusionMatrix()
for batch in data_loader:
sentences, tag_sequences = batch
x = self._forward(sentences)
loss = self._loss(x, tag_sequences)
predicted_tag_sequences, confs = self._obtain_labels(x, sentences, transitions, self.tag_type_mode)
for i in range(len(sentences)):
gold = tag_sequences[i].get_span()
pred = predicted_tag_sequences[i].get_span()
for pred_span in pred:
if pred_span in gold:
cm.add_tp(pred_span[0])
else:
cm.add_fp(pred_span[0])
for gold_span in gold:
if gold_span not in pred:
cm.add_fn(gold_span[0])
eval_loss += loss.item()
store_embeddings(sentences, embedding_storage_mode)
eval_loss /= len(data_loader)
if self.tag_type_mode == 'ner':
res = utils.EvaluationResult(cm.micro_f_measure())
else:
res = utils.EvaluationResult(cm.micro_accuracy)
res.add_metric('Confusion Matrix', cm)
return eval_loss, res
def _forward_alg(self, feats, lens_):
init_alphas = torch.FloatTensor(self.tagset_size).fill_(-10000.0)
init_alphas[self.tag_dictionary.get_index(START_TAG)] = 0.
forward_var = torch.zeros(
feats.shape[0],
feats.shape[1] + 1,
feats.shape[2],
dtype=torch.float,
device=self.device,
)
forward_var[:, 0, :] = init_alphas[None, :].repeat(feats.shape[0], 1)
transitions = self.transitions.view(
1, self.transitions.shape[0], self.transitions.shape[1]
).repeat(feats.shape[0], 1, 1)
for i in range(feats.shape[1]):
emit_score = feats[:, i, :]
tag_var = (
emit_score[:, :, None].repeat(1, 1, transitions.shape[2])
+ transitions
+ forward_var[:, i, :][:, :, None]
.repeat(1, 1, transitions.shape[2])
.transpose(2, 1)
)
max_tag_var, _ = torch.max(tag_var, dim=2)
tag_var = tag_var - max_tag_var[:, :, None].repeat(
1, 1, transitions.shape[2]
)
agg_ = torch.log(torch.sum(torch.exp(tag_var), dim=2))
cloned = forward_var.clone()
cloned[:, i + 1, :] = max_tag_var + agg_
forward_var = cloned
forward_var = forward_var[range(forward_var.shape[0]), lens_, :]
terminal_var = forward_var + self.transitions[
self.tag_dictionary.get_index(STOP_TAG)
][None, :].repeat(forward_var.shape[0], 1)
alpha = log_sum_exp_batch(terminal_var)
return alpha
def _score_sentence(self, feats, tags_idx, lens_):
start = torch.tensor(
[self.tag_dictionary.get_index(START_TAG)], device=self.device
)
start = start[None, :].repeat(tags_idx.shape[0], 1)
stop = torch.tensor(
[self.tag_dictionary.get_index(STOP_TAG)], device=self.device
)
stop = stop[None, :].repeat(tags_idx.shape[0], 1)
pad_start_tags = torch.cat([start, tags_idx], 1)
pad_stop_tags = torch.cat([tags_idx, stop], 1)
for i in range(len(lens_)):
pad_stop_tags[i, lens_[i] :] = self.tag_dictionary.get_index(
STOP_TAG
)
score = torch.FloatTensor(feats.shape[0]).to(self.device)
for i in range(feats.shape[0]):
r = torch.LongTensor(range(lens_[i])).to(self.device)
score[i] = torch.sum(
self.transitions[
pad_stop_tags[i, : lens_[i] + 1], pad_start_tags[i, : lens_[i] + 1]
]
) + torch.sum(feats[i, r, tags_idx[i, : lens_[i]]])
# for mixup -> emission scores --> sum over j <= lens_[i] ( lambda[j] * f[i, j, tags_idx[i, j]] )
# transition scores --> t[tags_idx[i, j], tags_idx[i, j+1]] * (lambda[j] + lambda[j+1]) / 2.0
return score
def _viterbi_decode(self, feats, transitions):
id_start = self.tag_dictionary.get_index(START_TAG)
id_stop = self.tag_dictionary.get_index(STOP_TAG)
backpointers = np.empty(shape=(feats.shape[0], self.tagset_size), dtype=np.int_)
backscores = np.empty(shape=(feats.shape[0], self.tagset_size), dtype=np.float32)
init_vvars = np.expand_dims( | np.repeat(-10000.0, self.tagset_size) | numpy.repeat |
import headers.linear_algebra_pack as lag
import headers.pyplot_vision_pack as pvp
import headers.auxiliary_functions_pack as aux
from cycler import cycler
import matplotlib.pyplot as plt
import numpy as np
from math import *
def main():
plt.rcParams['figure.figsize'] = (12,8)
plt.rcParams['font.size'] = 16
plt.rcParams['image.cmap'] = 'gist_heat'
plt.rcParams['axes.linewidth'] = 1
plt.rcParams['axes.prop_cycle'] = cycler(color=plt.get_cmap('tab20').colors)
test_sample_size = 400
x = np.arange(test_sample_size)
trend = (np.sqrt(x + 4 * np.sqrt(x + 4)) - 10) ** 2
period_1 = 2 * ( | np.sin(0.1 * np.pi * x) | numpy.sin |
import numpy as np
from load_csv import tpm2ival
from .name2idx import C, V
from .set_model import param_values, initial_values
class SearchParam(object):
""" Specify model parameters and/or initial values to optimize
"""
# parameters
idx_params = [
C.VmaxPY,
C.KmPY,
C.kdeg,
C.kf47,
C.Vmaxr47,
C.Kmf47,
C.Kmr47,
C.kf48,
C.Kmf48,
C.Kmr48,
#C.PTEN,
C.kf49,
C.kr49,
C.Kmf49,
C.Kmr49,
C.Kmr49b,
C.kr49b,
C.kf51,
C.Vmaxr51,
C.Kmf51,
C.Kmr51,
C.Kmrb51,
C.kf52,
C.Vmaxr52,
C.Kmf52,
C.Kmr52,
C.kf54,
C.Vmaxr54,
C.Kmf54,
C.Kmr54,
C.kf55,
C.Vmaxr55,
C.Kmf55,
C.Kmr55,
C.kf38,
C.kf39,
C.kf50,
C.a98,
C.b98,
C.koff46,
C.EGF_off,
C.HRGoff_3,
C.HRGoff_4,
C.koff4,
C.koff5,
C.koff6,
C.koff7,
C.koff8,
C.koff9,
C.koff61,
C.koff62,
C.koff16,
C.koff17,
C.koff18,
C.koff19,
C.koff20,
C.koff21,
C.koff22,
C.koff23,
C.koff24,
C.koff25,
C.koff26,
C.koff27,
C.koff28,
C.koff29,
C.koff30,
C.koff31,
C.koff32,
C.koff33,
C.koff34,
C.koff35,
C.koff36,
C.koff37,
C.koff65,
C.koff66,
C.koff67,
C.koff68,
C.koff69,
C.koff70,
C.koff71,
C.koff72,
C.koff40,
C.koff41,
C.koff42,
C.koff43,
C.koff44,
C.koff45,
C.koff57,
C.koff58,
C.koff59,
C.koff60,
C.kPTP10,
C.kPTP11,
C.kPTP12,
C.kPTP13,
C.kPTP14,
C.kPTP15,
C.kPTP63,
C.kPTP64,
C.koff73,
C.koff74,
C.koff75,
C.koff76,
C.koff77,
C.koff78,
C.koff79,
C.koff80,
C.kPTP38,
C.kPTP39,
C.koff88,
C.kPTP50,
C.kf81,
C.Vmaxr81,
C.Kmf81,
C.Kmr81,
C.kf82,
C.Vmaxr82,
C.Kmf82,
C.Kmr82,
C.kf83,
C.Vmaxr83,
C.Kmf83,
C.Kmr83,
C.kf84,
C.Vmaxr84,
C.Kmf84,
C.Kmr84,
C.kf85,
C.Vmaxr85,
C.Kmf85,
C.Kmr85,
C.kcon49,
C.kon1,
C.kon86,
C.kon2,
C.kon3,
C.kon87,
C.kon4,
C.kon5,
C.kon6,
C.kon7,
C.kon8,
C.kon9,
C.kon61,
C.kon62,
C.kf10,
C.kf11,
C.kf12,
C.kf13,
C.kf14,
C.kf15,
C.kf63,
C.kf64,
C.kon16,
C.kon17,
C.kon18,
C.kon73,
C.kon19,
C.kon20,
C.kon21,
C.kon74,
C.kon22,
C.kon23,
C.kon24,
C.kon25,
C.kon75,
C.kon26,
C.kon27,
C.kon28,
C.kon29,
C.kon76,
C.kon30,
C.kon31,
C.kon32,
C.kon33,
C.kon77,
C.kon34,
C.kon35,
C.kon36,
C.kon37,
C.kon78,
C.kon65,
C.kon66,
C.kon67,
C.kon68,
C.kon79,
C.kon69,
C.kon70,
C.kon71,
C.kon72,
C.kon80,
C.kon40,
C.kon41,
C.kon42,
C.kon43,
C.kon44,
C.kon45,
C.kon88,
C.kon46,
C.kon57,
C.kon58,
C.kon59,
C.kon60,
# Nakakuki et al., Cell (2010)
C.V1,
C.Km1,
C.V5,
C.Km5,
C.V10,
C.Km10,
#C.n10,
C.p11,
C.p12,
C.p13,
C.V14,
C.Km14,
C.V15,
C.Km15,
C.KimDUSP,
C.KexDUSP,
C.V20,
C.Km20,
C.V21,
C.Km21,
C.V24,
C.Km24,
C.V25,
C.Km25,
C.KimRSK,
C.KexRSK,
C.V27,
C.Km27,
C.V28,
C.Km28,
C.V29,
C.Km29,
C.V30,
C.Km30,
C.V31,
C.Km31,
#C.n31,
C.p32,
C.p33,
C.p34,
C.V35,
C.Km35,
C.V36,
C.Km36,
C.V37,
C.Km37,
C.KimFOS,
C.KexFOS,
C.V42,
C.Km42,
C.V43,
C.Km43,
C.V44,
C.Km44,
C.p47,
C.m47,
C.p48,
C.p49,
C.m49,
C.p50,
C.p51,
C.m51,
C.V57,
C.Km57,
#C.n57,
C.p58,
C.p59,
C.p60,
C.p61,
C.KimF,
C.KexF,
C.p63,
C.KF31,
#C.nF31,
#
C.w_E1,
C.w_E2,
C.w_E3,
C.w_E4,
C.w_G,
#C.w_S,
C.w_SHC1,
C.w_SHC2,
C.w_SHC3,
C.w_SHC4,
#C.w_I,
C.w_PIK3CA,
C.w_PIK3CB,
C.w_PIK3CD,
C.w_PIK3CG,
# ver. 5
C.w_PTEN,
#C.w_R,
C.w_RASA1,
C.w_RASA2,
C.w_RASA3,
#C.w_O,
C.w_SOS1,
C.w_SOS2,
C.w_A,
#C.w_Akt,
C.w_AKT1,
C.w_AKT2,
#C.w_RsD,
C.w_HRAS,
C.w_KRAS,
C.w_NRAS,
#C.w_Raf,
C.w_ARAF,
C.w_BRAF,
C.w_RAF1,
#C.w_MEK,
C.w_MAP2K1,
C.w_MAP2K2,
C.w_T,
C.w_CREBn,
#C.w_ERKc,
C.w_MAPK1,
C.w_MAPK3,
C.w_Elk1n,
#C.w_RSKc,
C.w_RPS6KA1,
C.w_RPS6KA2,
C.w_RPS6KA3,
C.w_RPS6KA6,
]
# initial values
idx_initials = [
V.P2,
]
def get_region(self):
x = param_values()
y0 = initial_values()
search_param = self._init_search_param(x, y0)
search_rgn = np.zeros((2, len(x)+len(y0)))
# Default: 0.1 ~ 10
for i, j in enumerate(self.idx_params):
search_rgn[0, j] = search_param[i] * 0.1 # lower bound
search_rgn[1, j] = search_param[i] * 10. # upper bound
# Default: 0.5 ~ 2
for i, j in enumerate(self.idx_initials):
search_rgn[0, j+len(x)] = \
search_param[i+len(self.idx_params)] * 0.5 # lower bound
search_rgn[1, j+len(x)] = \
search_param[i+len(self.idx_params)] * 2.0 # upper bound
# search_rgn[:, C.parameter] = [lower_bound,upper_bound]
# search_rgn[:, V.specie+len(x)] = [lower_bound,upper_bound]
search_rgn[:, V.P2+len(x)] = [1.0, 1000.0]
search_rgn[:, C.w_E1] = [0.1, 100.0]
search_rgn[:, C.w_E2] = [0.1, 100.0]
search_rgn[:, C.w_E3] = [0.1, 100.0]
search_rgn[:, C.w_E4] = [0.1, 100.0]
search_rgn[:, C.w_G] = [0.1, 100.0]
#search_rgn[:, C.w_S] = [0.1, 100.0]
search_rgn[:, C.w_SHC1] = [0.1, 100.0]
search_rgn[:, C.w_SHC2] = [0.1, 100.0]
search_rgn[:, C.w_SHC3] = [0.1, 100.0]
search_rgn[:, C.w_SHC4] = [0.1, 100.0]
#search_rgn[:, C.w_I] = [0.1, 100.0]
search_rgn[:, C.w_PIK3CA] = [0.1, 100.0]
search_rgn[:, C.w_PIK3CB] = [0.1, 100.0]
search_rgn[:, C.w_PIK3CD] = [0.1, 100.0]
search_rgn[:, C.w_PIK3CG] = [0.1, 100.0]
# ver.5
search_rgn[:, C.w_PTEN] = [0.1, 100.0]
#search_rgn[:, C.w_R] = [0.1, 100.0]
search_rgn[:, C.w_RASA1] = [0.1, 100.0]
search_rgn[:, C.w_RASA2] = [0.1, 100.0]
search_rgn[:, C.w_RASA3] = [0.1, 100.0]
#search_rgn[:, C.w_O] = [0.1, 100.0]
search_rgn[:, C.w_SOS1] = [0.1, 100.0]
search_rgn[:, C.w_SOS2] = [0.1, 100.0]
search_rgn[:, C.w_A] = [0.1, 100.0]
#search_rgn[:, C.w_Akt] = [0.1, 100.0]
search_rgn[:, C.w_AKT1] = [0.1, 100.0]
search_rgn[:, C.w_AKT2] = [0.1, 100.0]
#search_rgn[:, C.w_RsD] = [0.1, 100.0]
search_rgn[:, C.w_HRAS] = [0.1, 100.0]
search_rgn[:, C.w_KRAS] = [0.1, 100.0]
search_rgn[:, C.w_NRAS] = [0.1, 100.0]
#search_rgn[:, C.w_Raf] = [0.1, 100.0]
search_rgn[:, C.w_ARAF] = [0.1, 100.0]
search_rgn[:, C.w_BRAF] = [0.1, 100.0]
search_rgn[:, C.w_RAF1] = [0.1, 100.0]
#search_rgn[:, C.w_MEK] = [0.1, 100.0]
search_rgn[:, C.w_MAP2K1] = [0.1, 100.0]
search_rgn[:, C.w_MAP2K2] = [0.1, 100.0]
search_rgn[:, C.w_T] = [0.1, 100.0]
search_rgn[:, C.w_CREBn] = [0.1, 100.0]
#search_rgn[:, C.w_ERKc] = [0.1, 100.0]
search_rgn[:, C.w_MAPK1] = [0.1, 100.0]
search_rgn[:, C.w_MAPK3] = [0.1, 100.0]
search_rgn[:, C.w_Elk1n] = [0.1, 100.0]
#search_rgn[:, C.w_RSKc] = [0.1, 100.0]
search_rgn[:, C.w_RPS6KA1] = [0.1, 100.0]
search_rgn[:, C.w_RPS6KA2] = [0.1, 100.0]
search_rgn[:, C.w_RPS6KA3] = [0.1, 100.0]
search_rgn[:, C.w_RPS6KA6] = [0.1, 100.0]
search_rgn = self._conv_lin2log(search_rgn)
return search_rgn
def update(self, indiv):
x = param_values()
y0 = initial_values()
for i, j in enumerate(self.idx_params):
x[j] = indiv[i]
for i, j in enumerate(self.idx_initials):
y0[j] = indiv[i+len(self.idx_params)]
x, y0 = tpm2ival(x, y0, 'SK-BR-3')
# constraints --------------------------------------------------------------
x[C.V6] = x[C.V5]
x[C.Km6] = x[C.Km5]
x[C.KimpDUSP] = x[C.KimDUSP]
x[C.KexpDUSP] = x[C.KexDUSP]
x[C.KimpcFOS] = x[C.KimFOS]
x[C.KexpcFOS] = x[C.KexFOS]
x[C.p52] = x[C.p47]
x[C.m52] = x[C.m47]
x[C.p53] = x[C.p48]
x[C.p54] = x[C.p49]
x[C.m54] = x[C.m49]
x[C.p55] = x[C.p50]
x[C.p56] = x[C.p51]
x[C.m56] = x[C.m51]
# --------------------------------------------------------------------------
return x, y0
def gene2val(self, indiv_gene):
search_rgn = self.get_region()
indiv = 10**(
indiv_gene * (
search_rgn[1, :] - search_rgn[0, :]
) + search_rgn[0, :]
)
return indiv
def _init_search_param(self, x, y0):
"""Initialize search_param
"""
if len(self.idx_params) != len(set(self.idx_params)):
raise ValueError('Duplicate parameters.')
elif len(self.idx_initials) != len(set(self.idx_initials)):
raise ValueError('Duplicate species.')
search_param = np.empty(
len(self.idx_params) + len(self.idx_initials)
)
for i, j in enumerate(self.idx_params):
search_param[i] = x[j]
for i, j in enumerate(self.idx_initials):
search_param[i+len(self.idx_params)] = y0[j]
if np.any(search_param == 0.):
message = 'search_param must not contain zero.'
for idx in self.idx_params:
if x[int(idx)] == 0.:
raise ValueError(
'"C.{}" in idx_params: '.format(
C.NAMES[int(idx)]
) + message
)
for idx in self.idx_initials:
if y0[int(idx)] == 0.:
raise ValueError(
'"V.{}" in idx_initials: '.format(
V.NAMES[int(idx)]
) + message
)
return search_param
def _conv_lin2log(self, search_rgn):
"""Convert Linear scale to Logarithmic scale
"""
for i in range(search_rgn.shape[1]):
if np.min(search_rgn[:, i]) < 0.0:
message = 'search_rgn[lb,ub] must be positive.'
if i <= C.NUM:
raise ValueError(
'"C.{}": '.format(C.NAMES[i]) + message
)
else:
raise ValueError(
'"V.{}": '.format(V.NAMES[i-C.NUM]) + message
)
elif np.min(search_rgn[:, i]) == 0 and | np.max(search_rgn[:, i]) | numpy.max |
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
from main_model import URD
# 加载模型
model, ae, encoder = URD('models/ae_0112.h5', 'DEC_model_final.h5').get_model()
im = cv.imread('../test_img/4.png', cv.IMREAD_GRAYSCALE)
im = cv.resize(im, (1024, 1024))
lst1 = []
lst1_resize = []
# 第一次图像分割
for i in range(16):
for j in range(16):
block = im[i*64:(i+1)*64 , j*64:(j+1)*64]
lst1.append(block)
lst1_resize.append(cv.resize(block, (16, 16)))
lst1_resize = np.asarray(lst1_resize)
lst1_resize = np.reshape(lst1_resize, (256, 16, 16, 1))
ret = model.predict(lst1_resize)
ret = ret.argmax(1)
idx_1 = np.where(ret == 0)[0]
lst2_origin = []
for x in idx_1:
for i in range(4):
for j in range(4):
block = lst1[x][i*16:(i+1)*16 , j*16 : (j+1)*16]
lst2_origin.append(block)
lst2 = np.asarray(lst2_origin)
lst2 = np.reshape(lst2, (len(idx_1) * 16 ,16,16,1))
ret = model.predict(lst2)
ret = ret.argmax(1)
idx = | np.where(ret == 0) | numpy.where |
from .Partitioner import Partitioner
import numpy as np
from nn_partition.utils.object_boundary import getboundary
class BoundarySimGuidedPartitioner(Partitioner):
def __init__(self, N=20, tolerance_eps=0.01):
Partitioner.__init__(self)
self.N= N
self.tolerance_eps= tolerance_eps
def get_output_range(self, input_range, propagator):
input_shape = input_range.shape[:-1]
num_propagator_calls = 0
interior_M = []
M=[]
M_inside=[]
info = {}
boundary_shape="convex"
sect_method = 'max'
num_inputs = input_range.shape[0]
num_partitions = (self.N)*np.ones((num_inputs,), dtype=int)
slope = np.divide((input_range[:,1] - input_range[:,0]), num_partitions)
sampling_cell = np.empty(((self.N)**2,num_inputs,2))
output_range_ = np.empty(((self.N)**2,num_inputs,2))
sampled_output_ = np.empty(((self.N)**2,num_inputs))
sampled_output_boundary_ = np.empty(((self.N)**2))
input_range_ = np.empty((self.N**2,num_inputs,2))
element=-1
for idx in product(*[range(num) for num in num_partitions]):
element = element+1
input_range_[element,:,0] = input_range[:,0]+np.multiply(idx, slope)
input_range_[element,:,1] = input_range[:,0]+np.multiply(np.array(idx)+1, slope)
sampled_input_= np.random.uniform(input_range_[element,:,0], input_range_[element,:,1], (1,num_inputs,))
sampled_output_[element,:] = propagator.forward_pass( propagator.forward_pass(sampled_input_))
sampled_output_boundary_[element] =0;
# output_range_[element,:,:],_= propagator.get_output_range(input_range_[element,:,:])
# num_propagator_calls += 1
if boundary_shape=="convex":
boundary_points= getboundary(sampled_output_,0.0)
else:
boundary_points= getboundary(sampled_output_,0.4)
for i in range(self.N**2):
if sampled_output_[i,:] in boundary_points:
sampled_output_boundary_[i]=1
propagator.get_output_range(input_range_[i,:,:])
num_propagator_calls += 1
M.append((input_range_[i,:,:], output_range_[i,:,:], sampled_output_boundary_[i]))# (Line 4)
else:
M_inside.append((input_range_[i,:,:]))
output_range_sim = np.empty_like(output_range_[0,:,:])
output_range_sim[:,0] = np.min(sampled_output_, axis=0)
output_range_sim[:,1] = np.max(sampled_output_, axis=0)
u_e = np.empty_like(output_range_sim)
u_e[:,0] = np.inf
u_e[:,1] = -np.inf
M_e = np.empty_like(output_range_sim)
M_e[:,0] = np.inf
M_e[:,1] = -np.inf
while len(M) != 0:
input_range_, output_range_,sampled_output_boundary_ = M.pop(0) # Line 9
if np.all((output_range_sim[:,0] - output_range_[:,0]) <= 0) and \
np.all((output_range_sim[:,1] - output_range_[:,1]) >= 0) or sampled_output_boundary_==0:
# Line 11
tmp = np.dstack([u_e, output_range_])
u_e[:,1] = np.max(tmp[:,1,:], axis=1)
u_e[:,0] = np.min(tmp[:,0,:], axis=1)
interior_M.append((input_range_, output_range_))
else:
# Line 14
if np.max(input_range_[:,1] - input_range_[:,0]) > self.tolerance_eps:
# Line 15
input_ranges_ = sect(input_range_, 2, select=sect_method)
# Lines 16-17
for input_range_ in input_ranges_:
output_range_,_= propagator.get_output_range(input_range_)
num_propagator_calls += 1
M.append((input_range_, output_range_,sampled_output_boundary_)) # Line 18
else: # Lines 19-20
interior_M.append((input_range_, output_range_))
break
if len(M) > 0:
M_numpy = np.dstack([output_range_ for (_, output_range_,_) in M])
M_range = np.empty_like(u_e)
M_range[:,1] = np.max(M_numpy[:,1,:], axis=1)
M_range[:,0] = np.min(M_numpy[:,0,:], axis=1)
tmp1 = np.dstack([u_e, M_range])
u_e[:,1] = np.max(tmp1[:,1,:], axis=1)
u_e[:,0] = np.min(tmp1[:,0,:], axis=1)
M1=[]
if len(M_inside) > 0:
for (input_range) in M_inside:
output_range_,_= propagator.get_output_range(input_range_)
num_propagator_calls += 1
M2 = np.dstack([M_e, output_range_])
M_e[:,1] = np.max(M2[:,1,:], axis=1)
M_e[:,0] = np.min(M2[:,0,:], axis=1)
tmp2 = np.dstack([u_e, M_e])
u_e[:,1] = | np.max(tmp2[:,1,:], axis=1) | numpy.max |
import patsy
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
pal = sns.diverging_palette(10, 220, sep=80, n=5,l=40,center='light')
pal2 = sns.diverging_palette(10, 220, sep=80, n=5,l=40,center='dark')
pal[2] = pal2[2]
def ilogit(x):
return 1/(1+ | np.exp(-x) | numpy.exp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.