prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
""" Test functions for stats module
"""
from __future__ import division, print_function, absolute_import
import warnings
import re
import sys
import pickle
from numpy.testing import (TestCase, run_module_suite, assert_equal,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_raises, assert_warns, dec)
from nose import SkipTest
import numpy
import numpy as np
from numpy import typecodes, array
from scipy._lib._version import NumpyVersion
from scipy import special
import scipy.stats as stats
from scipy.stats._distn_infrastructure import argsreduce
import scipy.stats.distributions
from scipy.special import xlogy
# python -OO strips docstrings
DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
# Generate test cases to test cdf and distribution consistency.
# Note that this list does not include all distributions.
dists = ['uniform', 'norm', 'lognorm', 'expon', 'beta',
'powerlaw', 'bradford', 'burr', 'fisk', 'cauchy', 'halfcauchy',
'foldcauchy', 'gamma', 'gengamma', 'loggamma',
'alpha', 'anglit', 'arcsine', 'betaprime', 'dgamma',
'exponnorm', 'exponweib', 'exponpow', 'frechet_l', 'frechet_r',
'gilbrat', 'f', 'ncf', 'chi2', 'chi', 'nakagami', 'genpareto',
'genextreme', 'genhalflogistic', 'pareto', 'lomax', 'halfnorm',
'halflogistic', 'fatiguelife', 'foldnorm', 'ncx2', 't', 'nct',
'weibull_min', 'weibull_max', 'dweibull', 'maxwell', 'rayleigh',
'genlogistic', 'logistic', 'gumbel_l', 'gumbel_r', 'gompertz',
'hypsecant', 'laplace', 'reciprocal', 'triang', 'tukeylambda',
'vonmises', 'vonmises_line', 'pearson3', 'gennorm', 'halfgennorm',
'rice']
def _assert_hasattr(a, b, msg=None):
if msg is None:
msg = '%s does not have attribute %s' % (a, b)
assert_(hasattr(a, b), msg=msg)
def test_api_regression():
# https://github.com/scipy/scipy/issues/3802
_assert_hasattr(scipy.stats.distributions, 'f_gen')
# check function for test generator
def check_distribution(dist, args, alpha):
D, pval = stats.kstest(dist, '', args=args, N=1000)
if (pval < alpha):
D, pval = stats.kstest(dist, '', args=args, N=1000)
# if (pval < alpha):
# D,pval = stats.kstest(dist,'',args=args, N=1000)
assert_(pval > alpha, msg="D = " + str(D) + "; pval = " + str(pval) +
"; alpha = " + str(alpha) + "\nargs = " + str(args))
# nose test generator
def test_all_distributions():
for dist in dists:
distfunc = getattr(stats, dist)
nargs = distfunc.numargs
alpha = 0.01
if dist == 'fatiguelife':
alpha = 0.001
if dist == 'frechet':
args = tuple(2*np.random.random(1)) + (0,) + tuple(2*np.random.random(2))
elif dist == 'triang':
args = tuple(np.random.random(nargs))
elif dist == 'reciprocal':
vals = np.random.random(nargs)
vals[1] = vals[0] + 1.0
args = tuple(vals)
elif dist == 'vonmises':
yield check_distribution, dist, (10,), alpha
yield check_distribution, dist, (101,), alpha
args = tuple(1.0 + np.random.random(nargs))
else:
args = tuple(1.0 + np.random.random(nargs))
yield check_distribution, dist, args, alpha
def check_vonmises_pdf_periodic(k, l, s, x):
vm = stats.vonmises(k, loc=l, scale=s)
assert_almost_equal(vm.pdf(x), vm.pdf(x % (2*numpy.pi*s)))
def check_vonmises_cdf_periodic(k, l, s, x):
vm = stats.vonmises(k, loc=l, scale=s)
assert_almost_equal(vm.cdf(x) % 1, vm.cdf(x % (2*numpy.pi*s)) % 1)
def test_vonmises_pdf_periodic():
for k in [0.1, 1, 101]:
for x in [0, 1, numpy.pi, 10, 100]:
yield check_vonmises_pdf_periodic, k, 0, 1, x
yield check_vonmises_pdf_periodic, k, 1, 1, x
yield check_vonmises_pdf_periodic, k, 0, 10, x
yield check_vonmises_cdf_periodic, k, 0, 1, x
yield check_vonmises_cdf_periodic, k, 1, 1, x
yield check_vonmises_cdf_periodic, k, 0, 10, x
def test_vonmises_line_support():
assert_equal(stats.vonmises_line.a, -np.pi)
assert_equal(stats.vonmises_line.b, np.pi)
def test_vonmises_numerical():
vm = stats.vonmises(800)
assert_almost_equal(vm.cdf(0), 0.5)
class TestRandInt(TestCase):
def test_rvs(self):
vals = stats.randint.rvs(5, 30, size=100)
assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))
assert_(len(vals) == 100)
vals = stats.randint.rvs(5, 30, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.randint.rvs(15, 46)
assert_((val >= 15) & (val < 46))
assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val)))
val = stats.randint(15, 46).rvs(3)
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pdf(self):
k = numpy.r_[0:36]
out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)
vals = stats.randint.pmf(k, 5, 30)
assert_array_almost_equal(vals, out)
def test_cdf(self):
x = numpy.r_[0:36:100j]
k = numpy.floor(x)
out = numpy.select([k >= 30, k >= 5], [1.0, (k-5.0+1)/(30-5.0)], 0)
vals = stats.randint.cdf(x, 5, 30)
assert_array_almost_equal(vals, out, decimal=12)
class TestBinom(TestCase):
def test_rvs(self):
vals = stats.binom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.binom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.binom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for Ticket #1842
vals1 = stats.binom.pmf(100, 100, 1)
vals2 = stats.binom.pmf(0, 100, 0)
assert_allclose(vals1, 1.0, rtol=1e-15, atol=0)
assert_allclose(vals2, 1.0, rtol=1e-15, atol=0)
def test_entropy(self):
# Basic entropy tests.
b = stats.binom(2, 0.5)
expected_p = np.array([0.25, 0.5, 0.25])
expected_h = -sum(xlogy(expected_p, expected_p))
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.binom(2, 0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.binom(2, 1.0)
h = b.entropy()
assert_equal(h, 0.0)
def test_warns_p0(self):
# no spurious warnigns are generated for p=0; gh-3817
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
assert_equal(stats.binom(n=2, p=0).mean(), 0)
assert_equal(stats.binom(n=2, p=0).std(), 0)
class TestBernoulli(TestCase):
def test_rvs(self):
vals = stats.bernoulli.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.bernoulli.rvs(0.75)
assert_(isinstance(val, int))
val = stats.bernoulli(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_entropy(self):
# Simple tests of entropy.
b = stats.bernoulli(0.25)
expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.bernoulli(0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.bernoulli(1.0)
h = b.entropy()
assert_equal(h, 0.0)
class TestNBinom(TestCase):
def test_rvs(self):
vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.nbinom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.nbinom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for ticket 1779
assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),
stats.nbinom.pmf(700, 721, 0.52))
# logpmf(0,1,1) shouldn't return nan (regression test for gh-4029)
val = scipy.stats.nbinom.logpmf(0, 1, 1)
assert_equal(val, 0)
class TestGeom(TestCase):
def test_rvs(self):
vals = stats.geom.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.geom.rvs(0.75)
assert_(isinstance(val, int))
val = stats.geom(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
vals = stats.geom.pmf([1, 2, 3], 0.5)
assert_array_almost_equal(vals, [0.5, 0.25, 0.125])
def test_logpmf(self):
# regression test for ticket 1793
vals1 = np.log(stats.geom.pmf([1, 2, 3], 0.5))
vals2 = stats.geom.logpmf([1, 2, 3], 0.5)
assert_allclose(vals1, vals2, rtol=1e-15, atol=0)
# regression test for gh-4028
val = stats.geom.logpmf(1, 1)
assert_equal(val, 0.0)
def test_cdf_sf(self):
vals = stats.geom.cdf([1, 2, 3], 0.5)
vals_sf = stats.geom.sf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, expected)
assert_array_almost_equal(vals_sf, 1-expected)
def test_logcdf_logsf(self):
vals = stats.geom.logcdf([1, 2, 3], 0.5)
vals_sf = stats.geom.logsf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, np.log(expected))
assert_array_almost_equal(vals_sf, np.log1p(-expected))
def test_ppf(self):
vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5)
expected = array([1.0, 2.0, 3.0])
assert_array_almost_equal(vals, expected)
class TestGennorm(TestCase):
def test_laplace(self):
# test against Laplace (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 1)
pdf2 = stats.laplace.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_norm(self):
# test against normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 2)
pdf2 = stats.norm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
class TestHalfgennorm(TestCase):
def test_expon(self):
# test against exponential (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 1)
pdf2 = stats.expon.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_halfnorm(self):
# test against half normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 2)
pdf2 = stats.halfnorm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
def test_gennorm(self):
# test against generalized normal
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, .497324)
pdf2 = stats.gennorm.pdf(points, .497324)
assert_almost_equal(pdf1, 2*pdf2)
class TestTruncnorm(TestCase):
def test_ppf_ticket1131(self):
vals = stats.truncnorm.ppf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])
assert_array_almost_equal(vals, expected)
def test_isf_ticket1131(self):
vals = stats.truncnorm.isf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])
assert_array_almost_equal(vals, expected)
def test_gh_2477_small_values(self):
# Check a case that worked in the original issue.
low, high = -11, -10
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
# Check a case that failed in the original issue.
low, high = 10, 11
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_2477_large_values(self):
# Check a case that fails because of extreme tailness.
raise SkipTest('truncnorm rvs is know to fail at extreme tails')
low, high = 100, 101
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_1489_trac_962_rvs(self):
# Check the original example.
low, high = 10, 15
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
class TestHypergeom(TestCase):
def test_rvs(self):
vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))
assert_(numpy.all(vals >= 0) &
numpy.all(vals <= 3))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.hypergeom.rvs(20, 3, 10)
assert_(isinstance(val, int))
val = stats.hypergeom(20, 3, 10).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_precision(self):
# comparison number from mpmath
M = 2500
n = 50
N = 500
tot = M
good = n
hgpmf = stats.hypergeom.pmf(2, tot, good, N)
assert_almost_equal(hgpmf, 0.0010114963068932233, 11)
def test_args(self):
# test correct output for corner cases of arguments
# see gh-2325
assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
def test_cdf_above_one(self):
# for some values of parameters, hypergeom cdf was >1, see gh-2238
assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0)
def test_precision2(self):
# Test hypergeom precision for large numbers. See #1218.
# Results compared with those from R.
oranges = 9.9e4
pears = 1.1e5
fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4
quantile = 2e4
res = []
for eaten in fruits_eaten:
res.append(stats.hypergeom.sf(quantile, oranges + pears, oranges,
eaten))
expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,
8.265601e-11, 0.1237904, 1])
assert_allclose(res, expected, atol=0, rtol=5e-7)
# Test with array_like first argument
quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]
res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)
expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]
assert_allclose(res2, expected2, atol=0, rtol=5e-7)
def test_entropy(self):
# Simple tests of entropy.
hg = stats.hypergeom(4, 1, 1)
h = hg.entropy()
expected_p = np.array([0.75, 0.25])
expected_h = -np.sum(xlogy(expected_p, expected_p))
assert_allclose(h, expected_h)
hg = stats.hypergeom(1, 1, 1)
h = hg.entropy()
assert_equal(h, 0.0)
def test_logsf(self):
# Test logsf for very large numbers. See issue #4982
# Results compare with those from R (v3.2.0):
# phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE)
# -2239.771
k = 1e4
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logsf(k, M, n, N)
exspected = -2239.771 # From R
assert_almost_equal(result, exspected, decimal=3)
class TestLoggamma(TestCase):
def test_stats(self):
# The following precomputed values are from the table in section 2.2
# of "A Statistical Study of Log-Gamma Distribution", by <NAME>
# Chan (thesis, McMaster University, 1993).
table = np.array([
# c, mean, var, skew, exc. kurt.
0.5, -1.9635, 4.9348, -1.5351, 4.0000,
1.0, -0.5772, 1.6449, -1.1395, 2.4000,
12.0, 2.4427, 0.0869, -0.2946, 0.1735,
]).reshape(-1, 5)
for c, mean, var, skew, kurt in table:
computed = stats.loggamma.stats(c, moments='msvk')
assert_array_almost_equal(computed, [mean, var, skew, kurt],
decimal=4)
class TestLogser(TestCase):
def test_rvs(self):
vals = stats.logser.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.logser.rvs(0.75)
assert_(isinstance(val, int))
val = stats.logser(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
class TestPareto(TestCase):
def test_stats(self):
# Check the stats() method with some simple values. Also check
# that the calculations do not trigger RuntimeWarnings.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
m, v, s, k = stats.pareto.stats(0.5, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.0, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.5, moments='mvsk')
assert_equal(m, 3.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.0, moments='mvsk')
assert_equal(m, 2.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.5, moments='mvsk')
assert_allclose(m, 2.5 / 1.5)
assert_allclose(v, 2.5 / (1.5*1.5*0.5))
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.0, moments='mvsk')
assert_allclose(m, 1.5)
assert_allclose(v, 0.75)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.5, moments='mvsk')
assert_allclose(m, 3.5 / 2.5)
assert_allclose(v, 3.5 / (2.5*2.5*1.5))
assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')
assert_allclose(m, 4.0 / 3.0)
assert_allclose(v, 4.0 / 18.0)
assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')
assert_allclose(m, 4.5 / 3.5)
assert_allclose(v, 4.5 / (3.5*3.5*2.5))
assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5))
assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))
class TestGenpareto(TestCase):
def test_ab(self):
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
stats.genpareto._argcheck(c) # ugh
assert_equal(stats.genpareto.a, 0.)
assert_(np.isposinf(stats.genpareto.b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
stats.genpareto._argcheck(c)
assert_allclose([stats.genpareto.a, stats.genpareto.b], [0., 0.5])
def test_c0(self):
# with c=0, genpareto reduces to the exponential distribution
rv = stats.genpareto(c=0.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.expon.pdf(x))
assert_allclose(rv.cdf(x), stats.expon.cdf(x))
assert_allclose(rv.sf(x), stats.expon.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.expon.ppf(q))
def test_cm1(self):
# with c=-1, genpareto reduces to the uniform distr on [0, 1]
rv = stats.genpareto(c=-1.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.uniform.pdf(x))
assert_allclose(rv.cdf(x), stats.uniform.cdf(x))
assert_allclose(rv.sf(x), stats.uniform.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.uniform.ppf(q))
# logpdf(1., c=-1) should be zero
assert_allclose(rv.logpdf(1), 0)
def test_x_inf(self):
# make sure x=inf is handled gracefully
rv = stats.genpareto(c=0.1)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=0.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=-1.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
def test_c_continuity(self):
# pdf is continuous at c=0, -1
x = np.linspace(0, 10, 30)
for c in [0, -1]:
pdf0 = stats.genpareto.pdf(x, c)
for dc in [1e-14, -1e-14]:
pdfc = stats.genpareto.pdf(x, c + dc)
assert_allclose(pdf0, pdfc, atol=1e-12)
cdf0 = stats.genpareto.cdf(x, c)
for dc in [1e-14, 1e-14]:
cdfc = stats.genpareto.cdf(x, c + dc)
assert_allclose(cdf0, cdfc, atol=1e-12)
def test_c_continuity_ppf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
ppf0 = stats.genpareto.ppf(q, c)
for dc in [1e-14, -1e-14]:
ppfc = stats.genpareto.ppf(q, c + dc)
assert_allclose(ppf0, ppfc, atol=1e-12)
def test_c_continuity_isf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
isf0 = stats.genpareto.isf(q, c)
for dc in [1e-14, -1e-14]:
isfc = stats.genpareto.isf(q, c + dc)
assert_allclose(isf0, isfc, atol=1e-12)
def test_cdf_ppf_roundtrip(self):
# this should pass with machine precision. hat tip @pbrod
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [1e-8, -1e-18, 1e-15, -1e-15]:
assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c),
q, atol=1e-15)
def test_logsf(self):
logp = stats.genpareto.logsf(1e10, .01, 0, 1)
assert_allclose(logp, -1842.0680753952365)
class TestPearson3(TestCase):
def test_rvs(self):
vals = stats.pearson3.rvs(0.1, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllFloat'])
val = stats.pearson3.rvs(0.5)
assert_(isinstance(val, float))
val = stats.pearson3(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllFloat'])
assert_(len(val) == 3)
def test_pdf(self):
vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),
atol=1e-6)
vals = stats.pearson3.pdf(-3, 0.1)
assert_allclose(vals, np.array([0.00313791]), atol=1e-6)
vals = stats.pearson3.pdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,
0.39885918, 0.23413173]), atol=1e-6)
def test_cdf(self):
vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),
atol=1e-6)
vals = stats.pearson3.cdf(-3, 0.1)
assert_allclose(vals, [0.00082256], atol=1e-6)
vals = stats.pearson3.cdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,
5.06649130e-01, 8.41442111e-01], atol=1e-6)
class TestPoisson(TestCase):
def test_pmf_basic(self):
# Basic case
ln2 = np.log(2)
vals = stats.poisson.pmf([0, 1, 2], ln2)
expected = [0.5, ln2/2, ln2**2/4]
assert_allclose(vals, expected)
def test_mu0(self):
# Edge case: mu=0
vals = stats.poisson.pmf([0, 1, 2], 0)
expected = [1, 0, 0]
assert_array_equal(vals, expected)
interval = stats.poisson.interval(0.95, 0)
assert_equal(interval, (0, 0))
def test_rvs(self):
vals = stats.poisson.rvs(0.5, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.poisson.rvs(0.5)
assert_(isinstance(val, int))
val = stats.poisson(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_stats(self):
mu = 16.0
result = stats.poisson.stats(mu, moments='mvsk')
assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])
mu = np.array([0.0, 1.0, 2.0])
result = stats.poisson.stats(mu, moments='mvsk')
expected = (mu, mu, [np.inf, 1, 1/np.sqrt(2)], [np.inf, 1, 0.5])
assert_allclose(result, expected)
class TestZipf(TestCase):
def test_rvs(self):
vals = stats.zipf.rvs(1.5, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.zipf.rvs(1.5)
assert_(isinstance(val, int))
val = stats.zipf(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_moments(self):
# n-th moment is finite iff a > n + 1
m, v = stats.zipf.stats(a=2.8)
assert_(np.isfinite(m))
assert_equal(v, np.inf)
s, k = stats.zipf.stats(a=4.8, moments='sk')
assert_(not np.isfinite([s, k]).all())
class TestDLaplace(TestCase):
def test_rvs(self):
vals = stats.dlaplace.rvs(1.5, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.dlaplace.rvs(1.5)
assert_(isinstance(val, int))
val = stats.dlaplace(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
assert_(stats.dlaplace.rvs(0.8) is not None)
def test_stats(self):
# compare the explicit formulas w/ direct summation using pmf
a = 1.
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
N = 37
xx = np.arange(-N, N+1)
pp = dl.pmf(xx)
m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4)
assert_equal((m, s), (0, 0))
assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8)
def test_stats2(self):
a = np.log(2.)
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
assert_equal((m, s), (0., 0.))
assert_allclose((v, k), (4., 3.25))
class TestInvGamma(TestCase):
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0',
"assert_* funcs broken with inf/nan")
def test_invgamma_inf_gh_1866(self):
# invgamma's moments are only finite for a>n
# specific numbers checked w/ boost 1.54
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
mvsk = stats.invgamma.stats(a=19.31, moments='mvsk')
expected = [0.05461496450, 0.0001723162534, 1.020362676,
2.055616582]
|
assert_allclose(mvsk, expected)
|
numpy.testing.assert_allclose
|
import os
import zipfile
import pandas as pd
import numpy as np
from glob import glob
from tqdm import tqdm
from PIL import Image
import shutil
import pickle
labels_path = 'celeba/list_attr_celeba.txt'
image_path = 'celeba/img_align_celeba/'
split_path = 'celeba/list_eval_partition.txt'
labels_df = pd.read_csv(labels_path)
label_dict = {}
for i in range(1, len(labels_df)):
label_dict[labels_df['202599'][i].split()[0]] = [x for x in labels_df['202599'][i].split()[1:]]
label_df = pd.DataFrame(label_dict).T
label_df.columns = (labels_df['202599'][0]).split()
label_df.replace(['-1'], ['0'], inplace = True)
# generate train/val/test
files = glob(image_path + '*.jpg')
split_file = open(split_path, "r")
lines = split_file.readlines()
os.mkdir('celeba/tmp/')
for i in ['train', 'val', 'test']:
os.mkdir(os.path.join('celeba/tmp/', i))
train_file_names = []
train_dict = {}
valid_file_names = []
valid_dict = {}
test_file_names = []
test_dict = {}
for i in tqdm(range(len(lines))):
file_name, sp = lines[i].split()
sp = sp.split('\n')[0]
if sp == '0':
labels = np.array(label_df[label_df.index==file_name])
train_dict[file_name] = labels
train_file_names.append(file_name)
source_path = image_path + file_name
shutil.copy2(source_path, os.path.join('celeba/tmp/train', file_name))
elif sp == '1':
labels =
|
np.array(label_df[label_df.index==file_name])
|
numpy.array
|
"""
Run CGLE example using specified config file.
"""
import int.cgle as cint
import tests
import lpde
import os
import pickle
import shutil
import configparser
import numpy as np
import matplotlib.pyplot as plt
import tqdm
import torch
from torch.utils.tensorboard import SummaryWriter
import utils_cgle
from scipy.spatial.distance import cdist
torch.set_default_dtype(torch.float32)
POINTS_W = 397.48499
plt.set_cmap('plasma')
def integrate_system(config, n, path, verbose=False, n_min=0):
"""Integrate complex Ginzburg-Landau equation."""
pars = {}
pars["c1"] = float(config["c1"])
pars["c2"] = float(config["c2"])
pars["c3"] = float(config["c3"])
pars["mu"] = float(config["mu"])
pars["L"] = float(config["L"])
data_dict = cint.integrate(pars=pars,
dt=float(config["dt"]), N=int(config["N_int"]), T=int(config["T"]),
tmin=float(config["tmin"]), tmax=float(config["tmax"]),
append_init=True)
if verbose:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(data_dict["xx"], data_dict["data"][-1].real, label='real')
ax.plot(data_dict["xx"], data_dict["data"][-1].imag, label='imag')
ax.set_xlabel(r'$\omega$')
plt.title('snapshot')
plt.legend()
plt.show()
for i in range(n_min, n):
for p in [0, -1, 1]:
data_perturbed = cint.integrate(pars=pars,
dt=data_dict["dt"], N=data_dict["N"], T=data_dict["T"],
tmin=0, tmax=data_dict["tmax"]-data_dict["tmin"],
ic='manual',
Ainit=data_dict["data"][int(i*int(config["T_off"]))] +
p*float(config["eps"]) *
data_dict["data"][int(i*int(config["T_off"]))],
append_init=True)
data_perturbed["data"] = data_perturbed["data"][:, ::int(
int(config["N_int"])/int(config["N"]))]
data_perturbed["xx"] = data_perturbed["xx"][::int(
int(config["N_int"])/int(config["N"]))]
data_perturbed["N"] = int(config["N"])
output = open(path + 'run'+str(i)+'_p_'+str(p)+'.pkl', 'wb')
pickle.dump(data_perturbed, output)
output.close()
def make_plot_paper(config):
"""Plot CGLE simulation results."""
pkl_file = open(config["GENERAL"]["save_dir"]+'/dat/run' +
config["TRAINING"]["n_train"]+'_p_'+str(0)+'.pkl', 'rb')
data_dict = pickle.load(pkl_file)
pkl_file.close()
# t_off = 2000
t_off = 0
idxs = np.arange(data_dict["N"])
np.random.shuffle(idxs)
fig = plt.figure(figsize=(POINTS_W/72, 0.9*POINTS_W/72))
ax1 = fig.add_subplot(321)
pl1 = ax1.pcolor(data_dict["xx"], data_dict["tt"][::10]+t_off,
data_dict["data_org"][1::10].real, vmin=-1, vmax=1,
rasterized=True, cmap='plasma')
ax1.set_xlabel('$x$', labelpad=-2)
ax1.set_ylabel('$t$', labelpad=0)
ax1.set_xlim((0, data_dict["L"]))
ax1.set_ylim((data_dict["tmin"]+t_off, data_dict["tmax"]+t_off))
cbar1 = plt.colorbar(pl1)
cbar1.set_label('Re $W$', labelpad=-3)
ax2 = fig.add_subplot(322)
pl2 = ax2.pcolor(np.arange(data_dict["N"]), data_dict["tt"][::10]+t_off,
data_dict["data_org"][1::10, idxs].real, vmin=-1, vmax=1,
rasterized=True, cmap='plasma')
ax2.set_xlabel('$i$', labelpad=-2)
ax2.set_ylabel('$t$', labelpad=0)
ax2.set_xlim((0, data_dict["N"]))
ax2.set_ylim((data_dict["tmin"]+t_off, data_dict["tmax"]+t_off))
cbar2 = plt.colorbar(pl2)
cbar2.set_label('Re $W$', labelpad=-3)
ax3 = fig.add_subplot(323)
v_scaled =
|
np.load(config["GENERAL"]["save_dir"]+'/v_scaled.npy')
|
numpy.load
|
import numpy as np
import vrep
import cv2
import time
import sys
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import transforms
from PIL import Image
import torch.nn as nn
from skimage.measure import compare_ssim as ssim
#import sim
from tensorboardX import SummaryWriter
import torchvision
## globals
SRV_PORT = 19999
CAMERA = "Vision_sensor"
IMAGE_PLANE = "Plane0"
DIR_LIGHT0="light"
N_BASE_IMGS=50
CAPTURED_IMGS_PATH="./capture/"
testTarget1="testTarget1"
objects_names = [CAMERA, IMAGE_PLANE, testTarget1]
label_root = 'lable.txt'
image_root = 'imgs_name.txt'
batchsize = 16
torch.set_printoptions(precision=6)
writer = SummaryWriter(log_dir='./run/')
#root =os.getcwd()+ '/capture/'#数据集的地址
#-----------ready the dataset--------------
def default_loader(path):
return Image.open(path).convert('RGB')
class MyDataset(Dataset):
###----construct function with defualt parameters
def __init__(self,image_root,label_root,transform=None,target_transform=None,loader=default_loader):
super(MyDataset,self).__init__()
all_img_name= []
all_label = []
fi = open(image_root, 'r')
for name_img_line in fi:
name_img_line = name_img_line.strip('\n')
name_img_line = name_img_line.rstrip('\n')
all_img_name.append(name_img_line)
fl = open(label_root, 'r')
for label_line in fl:
label_line = label_line.strip('\n')
label_line = label_line.rstrip('\n')
label_line = label_line.split()
all_label.append(label_line)
self.all_img_name=all_img_name
self.all_label = all_label
self.transform = transform
self.target_transform = target_transform
#self.label = []
#self.data = []
self.loader = loader
def __getitem__(self, index):
label = self.all_label[index]
#print('index is:',index)
label = np.array([i for i in label], dtype=np.float16)
label = torch.Tensor(label)
#label = transforms.Normalize([],[])
#print('label is :',label)
fn =self.all_img_name[index]
image = self.loader(fn)
if self.transform is not None:
image = self.transform(image)
#if self.target_transform is not None:
#label = self.target_transform(label)
#label = Variable(label)
#label = array.array(label)
#label=torch.Tensor(label)
return image,label
def __len__(self):
return len(self.all_img_name)
train_data = MyDataset(image_root=image_root, label_root=label_root, transform=transforms.Compose([
transforms.Resize(size=256,interpolation=2),
transforms.ToTensor(),
#transforms.Normalize(mean =(0.5, 0.5, 0.5), std = (0.5, 0.5, 0.5))
transforms.RandomErasing(p=1,scale=(0.01,0.05),ratio=(0.2,0.6),value=(100,100,100))
]))
train_loader = DataLoader(dataset=train_data, batch_size=batchsize, shuffle=True, num_workers=8,pin_memory=True)
'''
label_validation_root = 'lable_validation.txt'
image_validation_root = 'imgs_name_validation.txt'
test_data = MyDataset(image_root=image_validation_root, label_root=label_validation_root, transform=transforms.Compose([
transforms.Resize(size=256,interpolation=2),
transforms.ToTensor(),
#transforms.Normalize(mean =(0.5, 0.5, 0.5), std = (0.5, 0.5, 0.5))
]))
test_loader = DataLoader(dataset=test_data, batch_size=batchsize, shuffle=False, num_workers=8,pin_memory=True)
'''
def connect(port, message):
# connect to server
vrep.simxFinish(-1) # just in case, close all opened connections
clientID = vrep.simxStart('127.0.0.1', 19999, True, True, 5000, 5) # start a connection
if clientID != -1:
print("Connected to remote API server")
print(message)
else:
print("Not connected to remote API server")
sys.exit("Could not connect")
return clientID
def getObjectsHandles(clientID, objects):
handles = {}
for obj_idx in range(len(objects)):
err_code, handles[objects[obj_idx]] = vrep.simxGetObjectHandle(clientID, objects[obj_idx], vrep.simx_opmode_blocking)
print('err_code is :',err_code)
if err_code:
print("Failed to get a handle for object: {}, got error code: {}".format( objects[obj_idx], err_code))
break;
return handles
def setCameraRandomPose(clientID, obj, newPose):
errPos= vrep.simxSetObjectPosition(clientID, obj, -1, newPose[0,:], vrep.simx_opmode_oneshot_wait)
errOrient= vrep.simxSetObjectOrientation(clientID, obj, -1, newPose[1,:], vrep.simx_opmode_oneshot_wait)
if errPos :
print("Failed to set position for object: {}, got error code: {}".format(obj, errPos))
elif errOrient:
print("Failed to set orientation for object: {}, got error code: {}".format(obj, errOrient))
else:pass
def renderSensorImage(clientID, camera,sleep_time):
#errRender, resolution, image = vrep.simxGetVisionSensorImage(clientID, camera, 0, vrep.simx_opmode_streaming)
errRender, resolution, image = vrep.simxGetVisionSensorImage(clientID, camera, 0, vrep.simx_opmode_blocking)
#print('errRender1:', errRender)
time.sleep(sleep_time)
#errRender, resolution, image = vrep.simxGetVisionSensorImage(clientID, camera, 0, vrep.simx_opmode_buffer)
errRender, resolution, image = vrep.simxGetVisionSensorImage(clientID, camera, 0, vrep.simx_opmode_blocking)
#print('errRender:',errRender)
#print('vrep.simx_return_ok is :',vrep.simx_return_ok)
if errRender == vrep.simx_return_ok:
img = np.array(image, dtype=np.uint8)
img.resize([resolution[0], resolution[1], 3])
img = cv2.flip(img, 0)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img = cv2.resize(img,(256,256))
return img
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('torch.cuda.is_available() is:', torch.cuda.is_available())
###-----------model define-------resnet-152
model = torchvision.models.resnet152(pretrained=False)
model.load_state_dict(torch.load('resnet152-b121ed2d.pth'))
print(model)
fc_inputs = model.fc.in_features
model.fc = nn.Sequential(
nn.Linear(fc_inputs, 2048),
nn.LeakyReLU(),
nn.Linear(2048,1024),
nn.LeakyReLU(),
nn.Linear(1024,512),
nn.LeakyReLU(),
nn.Linear(512, 6)
)
model = model.to(device)
#model = torch.load('model_152.kpl')
#model = model.to(device)
print('model is:',model)
def image_switch(images):
np_images = images.cuda().data.cpu().numpy()
np_images *= 255
np_images = np_images.astype(np.uint8)#3x512x512
# cv2.namedWindow("image", cv2.WINDOW_AUTOSIZE )
np_images1 = np.swapaxes(np_images, 0, 1) # 512x3x512
np_images2 =
|
np.swapaxes(np_images1, 1, 2)
|
numpy.swapaxes
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
x =
|
np.arange(0.0, 2, 0.01)
|
numpy.arange
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
import datetime
import operator
import re
import cupy as cp
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core._compat import PANDAS_GE_120
from cudf.testing import _utils as utils
from cudf.testing._utils import assert_eq, assert_exceptions_equal
_TIMEDELTA_DATA = [
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
[1000000, 200000, 3000000],
[1000000, 200000, None],
[1],
[12, 11, 232, 223432411, 2343241, 234324, 23234],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
[1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323],
[
136457654736252,
134736784364431,
245345345545332,
223432411,
2343241,
3634548734,
23234,
],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
]
_TIMEDELTA_DATA_NON_OVERFLOW = [
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
[1000000, 200000, 3000000],
[1000000, 200000, None],
[1],
[12, 11, 232, 223432411, 2343241, 234324, 23234],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
[1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
]
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
[0.3534, 12, 22, 343, 43.53534, 4353.42],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
def test_timedelta_series_create(data, dtype):
if dtype not in ("timedelta64[ns]"):
pytest.skip(
"Bug in pandas" "https://github.com/pandas-dev/pandas/issues/35465"
)
psr = pd.Series(
cp.asnumpy(data) if isinstance(data, cp.ndarray) else data, dtype=dtype
)
gsr = cudf.Series(data, dtype=dtype)
assert_eq(psr, gsr)
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[12, 12, 22, 343, 4353534, 435342],
[0.3534, 12, 22, 343, 43.53534, 4353.42],
cp.asarray([10, 20, 30, 100]),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
@pytest.mark.parametrize("cast_dtype", ["int64", "category"])
def test_timedelta_from_typecast(data, dtype, cast_dtype):
if dtype not in ("timedelta64[ns]"):
pytest.skip(
"Bug in pandas" "https://github.com/pandas-dev/pandas/issues/35465"
)
psr = pd.Series(
cp.asnumpy(data) if isinstance(data, cp.ndarray) else data, dtype=dtype
)
gsr = cudf.Series(data, dtype=dtype)
if cast_dtype == "int64":
assert_eq(psr.values.view(cast_dtype), gsr.astype(cast_dtype).values)
else:
assert_eq(psr.astype(cast_dtype), gsr.astype(cast_dtype))
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[12, 12, 22, 343, 4353534, 435342],
[0.3534, 12, 22, 343, 43.53534, 4353.42],
cp.asarray([10, 20, 30, 100]),
],
)
@pytest.mark.parametrize("cast_dtype", utils.TIMEDELTA_TYPES)
def test_timedelta_to_typecast(data, cast_dtype):
psr = pd.Series(cp.asnumpy(data) if isinstance(data, cp.ndarray) else data)
gsr = cudf.Series(data)
assert_eq(psr.astype(cast_dtype), gsr.astype(cast_dtype))
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
[0.3534, 12, 22, 343, 43.53534, 4353.42],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
def test_timedelta_from_pandas(data, dtype):
psr = pd.Series(
cp.asnumpy(data) if isinstance(data, cp.ndarray) else data, dtype=dtype
)
gsr = cudf.from_pandas(psr)
assert_eq(psr, gsr)
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
def test_timedelta_series_to_numpy(data, dtype):
gsr = cudf.Series(data, dtype=dtype)
expected = np.array(
cp.asnumpy(data) if isinstance(data, cp.ndarray) else data, dtype=dtype
)
expected = expected[~np.isnan(expected)]
actual = gsr.dropna().to_numpy()
np.testing.assert_array_equal(expected, actual)
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
def test_timedelta_series_to_pandas(data, dtype):
gsr = cudf.Series(data, dtype=dtype)
expected = np.array(
cp.asnumpy(data) if isinstance(data, cp.ndarray) else data, dtype=dtype
)
expected = pd.Series(expected)
actual = gsr.to_pandas()
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data,other",
[
([1000000, 200000, 3000000], [1000000, 200000, 3000000]),
([1000000, 200000, None], [1000000, 200000, None]),
([], []),
([None], [None]),
([None, None, None, None, None], [None, None, None, None, None]),
(
[12, 12, 22, 343, 4353534, 435342],
[12, 12, 22, 343, 4353534, 435342],
),
(np.array([10, 20, 30, None, 100]), np.array([10, 20, 30, None, 100])),
(cp.asarray([10, 20, 30, 100]), cp.asarray([10, 20, 30, 100])),
([1000000, 200000, 3000000], [200000, 34543, 3000000]),
([1000000, 200000, None], [1000000, 200000, 3000000]),
([None], [1]),
(
[12, 12, 22, 343, 4353534, 435342],
[None, 1, 220, 3, 34, 4353423287],
),
(np.array([10, 20, 30, None, 100]), np.array([10, 20, 30, None, 100])),
(cp.asarray([10, 20, 30, 100]), cp.asarray([10, 20, 30, 100])),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
@pytest.mark.parametrize(
"ops",
[
"eq",
"ne",
"lt",
"gt",
"le",
"ge",
"add",
"radd",
"sub",
"rsub",
"floordiv",
"truediv",
"mod",
],
)
def test_timedelta_ops_misc_inputs(data, other, dtype, ops):
gsr = cudf.Series(data, dtype=dtype)
other_gsr = cudf.Series(other, dtype=dtype)
psr = gsr.to_pandas()
other_psr = other_gsr.to_pandas()
expected = getattr(psr, ops)(other_psr)
actual = getattr(gsr, ops)(other_gsr)
if ops in ("eq", "lt", "gt", "le", "ge"):
actual = actual.fillna(False)
elif ops == "ne":
actual = actual.fillna(True)
if ops == "floordiv":
expected[actual.isna().to_pandas()] = np.nan
assert_eq(expected, actual)
@pytest.mark.parametrize(
"datetime_data,timedelta_data",
[
([1000000, 200000, 3000000], [1000000, 200000, 3000000]),
([1000000, 200000, None], [1000000, 200000, None]),
([], []),
([None], [None]),
([None, None, None, None, None], [None, None, None, None, None]),
(
[12, 12, 22, 343, 4353534, 435342],
[12, 12, 22, 343, 4353534, 435342],
),
(np.array([10, 20, 30, None, 100]), np.array([10, 20, 30, None, 100])),
(cp.asarray([10, 20, 30, 100]), cp.asarray([10, 20, 30, 100])),
([1000000, 200000, 3000000], [200000, 34543, 3000000]),
([1000000, 200000, None], [1000000, 200000, 3000000]),
([None], [1]),
(
[12, 12, 22, 343, 4353534, 435342],
[None, 1, 220, 3, 34, 4353423287],
),
(np.array([10, 20, 30, None, 100]), np.array([10, 20, 30, None, 100])),
(cp.asarray([10, 20, 30, 100]), cp.asarray([10, 20, 30, 100])),
(
[12, 11, 232, 223432411, 2343241, 234324, 23234],
[11, 1132324, 2322323111, 23341, 2434, 332, 323],
),
(
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
[11, 1132324, 2322323111, 23341, 2434, 332, 323],
),
(
[11, 1132324, 2322323111, 23341, 2434, 332, 323],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
),
(
[1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
),
],
)
@pytest.mark.parametrize("datetime_dtype", utils.DATETIME_TYPES)
@pytest.mark.parametrize("timedelta_dtype", utils.TIMEDELTA_TYPES)
@pytest.mark.parametrize(
"ops",
["add", "sub"],
)
def test_timedelta_ops_datetime_inputs(
datetime_data, timedelta_data, datetime_dtype, timedelta_dtype, ops
):
gsr_datetime = cudf.Series(datetime_data, dtype=datetime_dtype)
gsr_timedelta = cudf.Series(timedelta_data, dtype=timedelta_dtype)
psr_datetime = gsr_datetime.to_pandas()
psr_timedelta = gsr_timedelta.to_pandas()
expected = getattr(psr_datetime, ops)(psr_timedelta)
actual = getattr(gsr_datetime, ops)(gsr_timedelta)
assert_eq(expected, actual)
if ops == "add":
expected = getattr(psr_timedelta, ops)(psr_datetime)
actual = getattr(gsr_timedelta, ops)(gsr_datetime)
assert_eq(expected, actual)
elif ops == "sub":
assert_exceptions_equal(
lfunc=operator.sub,
rfunc=operator.sub,
lfunc_args_and_kwargs=([psr_timedelta, psr_datetime],),
rfunc_args_and_kwargs=([gsr_timedelta, gsr_datetime],),
compare_error_message=False,
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(
{
"A": pd.Series(pd.date_range("2012-1-1", periods=3, freq="D")),
"B": pd.Series([pd.Timedelta(days=i) for i in range(3)]),
}
),
pd.DataFrame(
{
"A": pd.Series(
pd.date_range("1994-1-1", periods=50, freq="D")
),
"B": pd.Series([pd.Timedelta(days=i) for i in range(50)]),
}
),
],
)
@pytest.mark.parametrize("op", ["add", "sub"])
def test_timedelta_dataframe_ops(df, op):
pdf = df
gdf = cudf.from_pandas(pdf)
if op == "add":
pdf["C"] = pdf["A"] + pdf["B"]
gdf["C"] = gdf["A"] + gdf["B"]
elif op == "sub":
pdf["C"] = pdf["A"] - pdf["B"]
gdf["C"] = gdf["A"] - gdf["B"]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
[1000000, 200000, 3000000],
[1000000, 200000, None],
[1],
[12, 11, 232, 223432411, 2343241, 234324, 23234],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
pytest.param(
[1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323],
marks=pytest.mark.xfail(
reason="https://github.com/rapidsai/cudf/issues/5938"
),
),
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
],
)
@pytest.mark.parametrize(
"other_scalars",
[
datetime.timedelta(days=768),
datetime.timedelta(seconds=768),
datetime.timedelta(microseconds=7),
datetime.timedelta(minutes=447),
datetime.timedelta(hours=447),
datetime.timedelta(weeks=734),
np.timedelta64(4, "s"),
np.timedelta64(456, "D"),
np.timedelta64(46, "h"),
pytest.param(
np.timedelta64("nat"),
marks=pytest.mark.xfail(
condition=not PANDAS_GE_120,
reason="https://github.com/pandas-dev/pandas/issues/35529",
),
),
np.timedelta64(1, "s"),
np.timedelta64(1, "ms"),
np.timedelta64(1, "us"),
np.timedelta64(1, "ns"),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
@pytest.mark.parametrize(
"op",
[
"add",
"sub",
"truediv",
"mod",
pytest.param(
"floordiv",
marks=pytest.mark.xfail(
condition=not PANDAS_GE_120,
reason="https://github.com/pandas-dev/pandas/issues/35529",
),
),
],
)
def test_timedelta_series_ops_with_scalars(data, other_scalars, dtype, op):
gsr = cudf.Series(data=data, dtype=dtype)
psr = gsr.to_pandas()
if op == "add":
expected = psr + other_scalars
actual = gsr + other_scalars
elif op == "sub":
expected = psr - other_scalars
actual = gsr - other_scalars
elif op == "truediv":
expected = psr / other_scalars
actual = gsr / other_scalars
elif op == "floordiv":
expected = psr // other_scalars
actual = gsr // other_scalars
elif op == "mod":
expected = psr % other_scalars
actual = gsr % other_scalars
assert_eq(expected, actual)
if op == "add":
expected = other_scalars + psr
actual = other_scalars + gsr
elif op == "sub":
expected = other_scalars - psr
actual = other_scalars - gsr
elif op == "truediv":
expected = other_scalars / psr
actual = other_scalars / gsr
elif op == "floordiv":
expected = other_scalars // psr
actual = other_scalars // gsr
elif op == "mod":
expected = other_scalars % psr
actual = other_scalars % gsr
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
[1000000, 200000, 3000000],
[1000000, 200000, None],
[1],
[12, 11, 232, 223432411, 2343241, 234324, 23234],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
pytest.param(
[1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323],
marks=pytest.mark.xfail(
reason="https://github.com/rapidsai/cudf/issues/5938"
),
),
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
],
)
@pytest.mark.parametrize(
"cpu_scalar",
[
datetime.timedelta(seconds=768),
datetime.timedelta(microseconds=7),
np.timedelta64(4, "s"),
pytest.param(
np.timedelta64("nat", "s"),
marks=pytest.mark.xfail(
condition=not PANDAS_GE_120,
reason="https://github.com/pandas-dev/pandas/issues/35529",
),
),
np.timedelta64(1, "s"),
np.timedelta64(1, "ms"),
np.timedelta64(1, "us"),
|
np.timedelta64("nat", "ns")
|
numpy.timedelta64
|
import numpy
import scipy.stats
import patsy
import re
import pandas
class difference_test(object):
def __init__(self, formula_like, data = {}, conf_level = 0.95,
equal_variances = True, independent_samples = True,
wilcox_parameters = {"zero_method" : "wilcox", "correction" : False, "mode" : "auto"}, **keywords):
# Determining which test to conduct
if equal_variances == True and independent_samples == True:
name = "Independent samples t-test"
if equal_variances == True and independent_samples == False:
name = "Paired samples t-test"
if equal_variances == False and independent_samples == True:
name = "Welch's t-test"
if equal_variances == False and independent_samples == False:
name = "Wilcoxon signed-rank test"
parameters = {"zero_method" : "wilcox", "correction" : False, "mode" : "auto"}
parameters.update(wilcox_parameters)
self.DV, self.IV = patsy.dmatrices(formula_like + "- 1", data, 1)
# Checking number of groups in IV, if > 2 stop function
if len(self.IV.design_info.column_names) > 2:
return print(" ",
"ERROR",
"The independent variables has more than 2 groups.",
"This method is not appropriate for this many groups.",
" ",
sep = "\n"*2)
# Cleaning up category names from Patsy output
categories = [re.findall(r"\[(.*)\]", name)[0] for name in self.IV.design_info.column_names]
if name == "Wilcoxon signed-rank test":
self.parameters = {"Test name" : name,
"Formula" : formula_like,
"Conf. Level": conf_level,
"Categories" : categories,
"Equal variances" : equal_variances,
"Independent samples" : independent_samples,
"Wilcox parameters" : parameters}
else:
self.parameters = {"Test name" : name,
"Formula" : formula_like,
"Conf. Level": conf_level,
"Categories" : categories,
"Equal variances" : equal_variances,
"Independent samples" : independent_samples}
def conduct(self, return_type = "Dataframe", effect_size = None):
# Parameter check
if return_type.upper() not in ["DATAFRAME", "DICTIONARY"]:
return print(" ",
"Not a supported return type. Only 'Dataframe' and 'Dictionary' are supported at this time.",
" ",
sep = "\n"*2)
if effect_size != None:
if type(effect_size) == str and effect_size != "all":
effect_size = list(effect_size)
elif type(effect_size) == str and effect_size == "all":
effect_size = ["Cohen's D", "Hedge's G", "Glass's delta1", "Glass's delta2", "r"]
for es in effect_size:
if es not in [None, "Cohen's D", "Hedge's G", "Glass's delta1", "Glass's delta2", "r", "all"]:
return print(" ",
"Not a supported effect size. Either enter None or one of the following: 'Cohen's D', 'Hedge's G', 'Glass's delta1', 'Glass's delta2', 'r', and 'all'.",
" ",
sep = "\n"*2)
# Splitting into seperate arrays and getting descriptive statistics
group1, group2 = numpy.hsplit((self.DV * self.IV), 2)
group1 = numpy.trim_zeros(group1)
group2 = numpy.trim_zeros(group2)
# Getting the summary table ready - part 1
group1_info = summarize(group1, stats = ["N", "Mean", "SD", "SE", "Variance", "CI"], name = self.parameters["Categories"][0], ci_level = self.parameters["Conf. Level"], return_type = "Dictionary")
group2_info = summarize(group2, stats = ["N", "Mean", "SD", "SE", "Variance", "CI"], name = self.parameters["Categories"][1], ci_level = self.parameters["Conf. Level"], return_type = "Dictionary")
combined = summarize(numpy.vstack((group1, group2)),
stats = ["N", "Mean", "SD", "SE", "Variance", "CI"], name = "combined", ci_level = self.parameters["Conf. Level"], return_type = "Dictionary")
diff = {}
diff["Name"] = "diff"
diff["Mean"] = group1_info["Mean"] - group2_info["Mean"]
# Performing the statistical test
if self.parameters["Test name"] == "Independent samples t-test":
stat, pval = scipy.stats.ttest_ind(group1, group2, nan_policy = 'omit')
stat_name = "t"
dof = group1_info["N"] + group2_info["N"] - 2
if self.parameters["Test name"] == "Paired samples t-test":
stat, pval = scipy.stats.ttest_rel(group1, group2, nan_policy = 'omit')
stat_name = "t"
dof = group1_info["N"] - 1
if self.parameters["Test name"] == "Welch's t-test":
stat, pval = scipy.stats.ttest_ind(group1, group2, equal_var = False, nan_policy = 'omit')
stat_name = "t"
## Welch-Satterthwaite Degrees of Freedom ##
dof = -2 + (((group1_info["Variance"]/group1_info["N"]) + (group2_info["Variance"]/group2_info["N"]))**2 / ((group1_info["Variance"]/group1_info["N"])**2 / (group1_info["N"]+1) + (group2_info["Variance"]/group2_info["N"])**2 / (group2_info["N"]+1)))
if self.parameters["Test name"] == "Wilcoxon signed-rank test":
d = group1 - group2
d = numpy.reshape(d, (d.shape[0], ))
stat, pval = scipy.stats.wilcoxon(d, zero_method = self.parameters["Wilcox parameters"]['zero_method'], correction = self.parameters["Wilcox parameters"]['correction'], mode = self.parameters["Wilcox parameters"]['mode'])
stat_name = "W"
dof = group1_info["N"] - 1
# P value tails
pval_lt = scipy.stats.t.cdf(stat, dof)
pval_rt = 1 - scipy.stats.t.cdf(stat, dof)
# Creating testing information table
if self.parameters["Test name"] == "Wilcoxon signed-rank test":
result_table = {self.parameters["Test name"] : [f"({self.parameters['Categories'][0]} = {self.parameters['Categories'][1]})",
f"{stat_name} =",
"Two sided p-value ="],
"Results" : ['',
float(stat),
float(pval)]}
else:
result_table = {self.parameters["Test name"] : [f"Difference ({self.parameters['Categories'][0]} - {self.parameters['Categories'][1]})",
"Degrees of freedom =",
f"{stat_name} =",
"Two sided test p-value =",
"Difference < 0 p-value =",
"Difference > 0 p-value ="
],
"Results" : [float(diff["Mean"]),
float(dof),
float(stat),
float(pval),
float(pval_lt),
float(pval_rt),
]}
# Creating effect size table
if effect_size != None:
if self.parameters["Test name"] == "Wilcoxon signed-rank test":
if effect_size != "r":
print(" ",
f"Only Point-Biserial r will be calulcated for the {self.parameters['Test name']}.",
" ",
sep = "\n"*2)
r = stat / scipy.stats.rankdata(d).sum()
result_table[self.parameters["Test name"]].append("Point-Biserial r")
result_table["Results"].append(float(r))
else:
for es in effect_size:
if es == "<NAME>":
if self.parameters["Test name"] == "Paired samples t-test":
#### DECIDE IF YOU WANT TO SUPPORT THIS VERSION - USED IN RESEARCHPY TTEST()
# Cohen's Dz (within-subjects design)
#d = stat / numpy.sqrt(group1_info["N"])
#result_table[self.parameters["Test name"]].append("Cohen's Dz")
#result_table["Results"].append(float(d))
# Cohen's Dav (within-subjects design)
d = (group1_info["Mean"] - group2_info["Mean"]) / ((group1_info["SD"] + group2_info["SD"]) / 2)
result_table[self.parameters["Test name"]].append("Cohen's Dav")
result_table["Results"].append(float(d))
else:
# Cohen's d (between-subjects desgn)
d = (group1_info['Mean'] - group2_info["Mean"]) / numpy.sqrt((((group1_info["N"] - 1)*group1_info["Variance"] + (group2_info["N"] - 1)*group2_info["Variance"]) / (group1_info["N"] + group2_info["N"] - 2)))
result_table[self.parameters["Test name"]].append("Cohen's Ds")
result_table["Results"].append(float(d))
if es == "Hedge's G":
if self.parameters["Test name"] == "Paired samples t-test":
# Cohen's Dz (within-subjects design)
#d = stat / numpy.sqrt(group1_info["N"])
# Cohen's Dav (within-subjects design)
d = (group1_info["Mean"] - group2_info["Mean"]) / ((group1_info["SD"] + group2_info["SD"]) / 2)
g = d * (1 - (3 / ((4*(group1_info["N"] + group2_info["N"])) - 9)))
result_table[self.parameters["Test name"]].append("Hedge's Gav")
result_table["Results"].append(float(g))
else:
# Cohen's d (between-subjects desgn)
d = (group1_info['Mean'] - group2_info["Mean"]) /
|
numpy.sqrt((((group1_info["N"] - 1)*group1_info["Variance"] + (group2_info["N"] - 1)*group2_info["Variance"]) / (group1_info["N"] + group2_info["N"] - 2)))
|
numpy.sqrt
|
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, absolute_import
from functools import reduce
import numpy as np
import gpflow
import tensorflow as tf
from .spectral_covariance import make_Kuu, make_Kuf
from .kronecker_ops import kvs_dot_vec
class GPMC_1d(gpflow.models.GPModel):
def __init__(self, X, Y, ms, a, b, kern, likelihood,
mean_function=gpflow.mean_functions.Zero()):
"""
Here we assume the interval is [a,b]
"""
assert X.shape[1] == 1
assert isinstance(kern, (gpflow.kernels.Matern12,
gpflow.kernels.Matern32,
gpflow.kernels.Matern52))
kern = kern
gpflow.models.GPModel.__init__(self, X, Y, kern,
likelihood, mean_function)
self.num_data = X.shape[0]
self.num_latent = Y.shape[1]
self.a = a
self.b = b
self.ms = ms
# initialize variational parameters
Ncos = self.ms.size
Nsin = self.ms.size - 1
if isinstance(self.kern, gpflow.kernels.Matern12):
Ncos += 1
elif isinstance(self.kern, gpflow.kernels.Matern32):
Ncos += 1
Nsin += 1
else:
raise NotImplementedError
self.V = gpflow.param.Param(np.zeros((Ncos + Nsin, 1)))
self.V.prior = gpflow.priors.Gaussian(0., 1.)
#@<EMAIL>.<EMAIL>Flow()
def mats(self):
Kuf = make_Kuf(self.X, self.a, self.b, self.ms)
Kuu = make_Kuu(self.kern, self.a, self.b, self.ms)
KiKuf = Kuu.solve(Kuf)
var = self.kern.K(X)
return var, KiKuf, Kuf
def build_predict(self, X, full_cov=False):
# given self.V, compute q(f)
Kuf = make_Kuf(X, self.a, self.b, self.ms)
Kuu = make_Kuu(self.kern, self.a, self.b, self.ms)
KiKuf = Kuu.solve(Kuf)
RKiKuf = Kuu.matmul_sqrt(KiKuf)
mu = tf.matmul(tf.transpose(RKiKuf), self.V)
if full_cov:
# Kff
var = self.kern.K(X)
# Qff
var = var - tf.matmul(tf.transpose(Kuf), KiKuf)
var = tf.expand_dims(var, 2)
else:
# Kff:
var = self.kern.Kdiag(X)
# Qff
var = var - tf.reduce_sum(Kuf * KiKuf, 0)
var = tf.reshape(var, (-1, 1))
return mu, var
def build_likelihood(self):
# compute the mean and variance of the latent function
f_mu, f_var = self.build_predict(self.X, full_cov=False)
E_lik = self.likelihood.variational_expectations(f_mu, f_var, self.Y)
return tf.reduce_sum(E_lik)
def kron_vec_sqrt_transpose(K, vec):
"""
K is a list of objects to be kroneckered
vec is a N x 1 tf_array
"""
N_by_1 = tf.stack([-1, 1])
def f(v, k):
v = tf.reshape(v, tf.stack([k.sqrt_dims, -1]))
v = k.matmul_sqrt_transpose(v)
return tf.reshape(tf.transpose(v), N_by_1) # transposing first flattens the vector in column order
return reduce(f, K, vec)
class GPMC_kron(gpflow.models.GPModel):
def __init__(self, X, Y, ms, a, b, kerns, likelihood, mean_function=None):
"""
X is a np array of stimuli
Y is a np array of responses
ms is a np integer array defining the frequencies (usually np.arange(M))
a is a np array of the lower limits
b is a np array of the upper limits
kerns is a list of (Matern) kernels, one for each column of X
likelihood is a gpflow likelihood
# Note: we use the same frequencies for each dimension in this code for simplicity.
"""
assert a.size == b.size == len(kerns) == X.shape[1]
for kern in kerns:
assert isinstance(kern, (gpflow.kernels.Matern12,
gpflow.kernels.Matern32,
gpflow.kernels.Matern52))
if mean_function is None:
mean_function = gpflow.mean_functions.Zero()
gpflow.models.GPModel.__init__(self, X, Y, kern=None,
likelihood=likelihood, mean_function=mean_function)
self.num_data = X.shape[0]
self.num_latent = 1 # multiple columns not supported in this version
self.a = a
self.b = b
self.ms = ms
# initialize variational parameters
self.Ms = []
for kern in kerns:
Ncos_d = self.ms.size
Nsin_d = self.ms.size - 1
if isinstance(kern, gpflow.kernels.Matern12):
Ncos_d += 1
elif isinstance(kern, gpflow.kernels.Matern32):
Ncos_d += 1
Nsin_d += 1
elif isinstance(kern, gpflow.kernels.Matern52):
Ncos_d += 2
Nsin_d += 1
else:
raise NotImplementedError
self.Ms.append(Ncos_d + Nsin_d)
self.kerns = gpflow.param.ParamList(kerns)
self.V = gpflow.param.Param(np.zeros((np.prod(self.Ms), 1)))
self.V.prior = gpflow.priors.Gaussian(0., 1.)
def build_predict(self, X, full_cov=False):
Kuf = [make_Kuf(k, X[:, i:i+1], a, b, self.ms) for i, (k, a, b) in enumerate(zip(self.kerns, self.a, self.b))]
Kuu = [make_Kuu(k, a, b, self.ms) for k, a, b, in zip(self.kerns, self.a, self.b)]
KiKuf = [Kuu_d.solve(Kuf_d) for Kuu_d, Kuf_d in zip(Kuu, Kuf)]
RV = kron_vec_sqrt_transpose(Kuu, self.V) # M x 1
mu = kvs_dot_vec([tf.transpose(KiKuf_d) for KiKuf_d in KiKuf], RV) # N x 1
if full_cov:
raise NotImplementedError
else:
# Kff:
var = reduce(tf.mul, [k.Kdiag(X[:, i:i+1]) for i, k in enumerate(self.kerns)])
# Qff
var = var - reduce(tf.mul, [tf.reduce_sum(Kuf_d * KiKuf_d, 0) for Kuf_d, KiKuf_d in zip(Kuf, KiKuf)])
var = tf.reshape(var, (-1, 1))
return mu + self.mean_function(self.X), var
def build_likelihood(self):
Kuf = [make_Kuf(k, self.X[:, i:i+1], a, b, self.ms)
for i, (k, a, b) in enumerate(zip(self.kerns, self.a, self.b))]
Kuu = [make_Kuu(k, a, b, self.ms) for k, a, b, in zip(self.kerns, self.a, self.b)]
# get mu and var of F
KiKuf = [Kuu_d.solve(Kuf_d) for Kuu_d, Kuf_d in zip(Kuu, Kuf)]
RV = kron_vec_sqrt_transpose(Kuu, self.V) # M x 1
mu = kvs_dot_vec([tf.transpose(KiKuf_d) for KiKuf_d in KiKuf], RV) # N x 1
mu += self.mean_function(self.X)
var = reduce(tf.mul, [k.Kdiag(self.X[:, i:i+1]) for i, k in enumerate(self.kerns)])
var = var - reduce(tf.mul, [tf.reduce_sum(Kuf_d * KiKuf_d, 0) for Kuf_d, KiKuf_d in zip(Kuf, KiKuf)])
var = tf.reshape(var, (-1, 1))
E_lik = self.likelihood.variational_expectations(mu, var, self.Y)
return tf.reduce_sum(E_lik)
if __name__ == '__main__':
from matplotlib import pyplot as plt
np.random.seed(0)
X = np.random.rand(80, 1)*10 - 5
X = np.sort(X, axis=0)
Y = np.cos(3*X) + 2*np.sin(5*X) + np.random.randn(*X.shape)*0.8
Y =
|
np.exp(Y)
|
numpy.exp
|
# =================================================
# Course : legged robots
# Alumno : <NAME>
# Info : useful functions for robotics labs
# =================================================
# ======================
# required libraries
# ======================
import os
import numpy as np
import pinocchio as pin
from copy import copy
from numpy.linalg import inv
from numpy import multiply as mul
from numpy import matmul as mx
from numpy import transpose as tr
import pandas as pd
# =============
# functions
# =============
def sinusoidal_reference_generator(q0, a, f, t_change, t):
"""
@info: generates a sine signal for "t_change" seconds then change to constant reference.
@inputs:
------
- q0: initial joint/cartesian position
- a: amplitude
- f: frecuency [hz]
- t_change: change from sinusoidal to constant reference [sec]
- t: simulation time [sec]
@outputs:
-------
- q, dq, ddq: joint/carteisan position, velocity and acceleration
"""
w = 2*np.pi*f # [rad/s]
if t<=t_change:
q = q0 + a*np.sin(w*t) # [rad]
dq = a*w*np.cos(w*t) # [rad/s]
ddq = -a*w*w*np.sin(w*t) # [rad/s^2]
else:
q = q0 + a*np.sin(w*t_change) # [rad]
dq = 0 # [rad/s]
ddq = 0 # [rad/s^2]
return q, dq, ddq
def step_reference_generator(q0, a, t_step, t):
"""
@info: generate a constant reference.
@inputs:
------
- q0: initial joint/cartesian position
- a: constant reference
- t_step: start step [sec]
- t: simulation time [sec]
@outputs:
-------
- q, dq, ddq: joint/carteisan position, velocity and acceleration
"""
if t>=t_step:
q = q0 + a # [rad]
dq = 0 # [rad/s]
ddq = 0 # [rad/s^2]
else:
q = copy(q0) # [rad]
dq = 0 # [rad/s]
ddq = 0 # [rad/s^2]
return q, dq, ddq
def circular_trayectory_generator(t,radius=0.05, z_amp=0.02, rpy_amp=np.zeros(3), freq_xyz=0.1, freq_rpy=0.1):
"""
@info generate points of a circular trayectory.
@inputs:
-------
- t : simulation time [s]
- radius: radius of circular trajectory on xy-plane [m]
- z_amp: amplitude of sinusoidal trajectory on z-plane [m]
- freq: frequency [hz]
Outpus:
-------
- pose: end-effector position (xyz) and orientation (rpy)
- dpose: end-effector velocity (xyz) and dorientation (rpy)
"""
# Parameters of circular trayetory
w_xyz = 2*np.pi*freq_xyz # angular velocity [rad/s]
w_rpy = 2*np.pi*freq_rpy # angular velocity [rad/s]
pos0 = np.array([0.5, 0.0, 0.0]) # initial states
# xyz position
pos = np.array([pos0[0]+radius*np.cos(w_xyz*(t)), pos0[1]+radius*np.sin(w_xyz*(t)), pos0[2]+z_amp*np.sin(w_xyz*t)])
# xyz velocity
vel = np.array([radius*(-w_xyz)*np.sin(w_xyz*(t)), radius*(+w_xyz)*np.cos(w_xyz*(t)), z_amp*w_xyz*np.cos(w_xyz*t)])
# rpy orientation
R = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
rpy = rot2rpy(R) + rpy_amp*np.sin(w_rpy*t)
drpy = rpy_amp*w_rpy*np.cos(w_rpy*t)
# return end-effector pose and its time-derivative
return np.concatenate((pos, rpy), axis=0), np.concatenate((vel, drpy), axis=0)
def reference_trajectory(x_des, dx_des, x_ref0, dx_ref0, dt):
"""
Info: Generates a reference trajectory based on a desired trajectory.
Inputs:
------
- x_des: desired trajectory
- x_ref0: initial conditions of x_ref
- dt: sampling time
"""
psi = 1 # damping factor
wn = 4 # natural frecuency
k0 = wn*wn
k1 = 2*psi*wn
# compute ddx_ref
ddx_ref = np.multiply(dx_des,k1) + np.multiply(x_des,k0) - np.multiply(dx_ref0,k1) - np.multiply(x_ref0,k0)
# double integration
dx_ref = dx_ref0 + dt*ddx_ref
x_ref = x_ref0 + dt*dx_ref
return x_ref, dx_ref, ddx_ref
def update_learning_rate(x, x_min=0.1, x_max=0.7, y_min=0.01, y_max=1):
"""
@info function to update learning rate
@inputs:
--------
- x: input signal
- x_min: behind this value the output is 1
- x_mx: above this value the output is 0
"""
#x = np.linspace(0, 1.2, 100)
y = np.piecewise(x, [x < x_min, (x >=x_min)* (x< x_max), x >= x_max], \
[y_max, lambda x: (x_min-x)/(x_max-x_min)+1, y_min ])
return y
def tl(array):
"""
@info: add element to list
"""
return array.tolist()
def rot2axisangle(R):
"""
@info: computes axis/angle values from rotation matrix
@inputs:
--------
- R: rotation matrix
@outputs:
--------
- angle: angle of rotation
- axis: axis of rotation
"""
R32 = R[2,1]
R23 = R[1,2]
R13 = R[0,2]
R31 = R[2,0]
R21 = R[1,0]
R12 = R[0,1]
tr = np.diag(R).sum()
# angle
angle = np.arctan2(0.5*np.sqrt( np.power(R21-R12,2)+np.power(R31-R13,2)+np.power(R32-R23,2)), 0.5*(tr-1))
# axis
if angle!=0:
rx = (R32-R23)/(2*np.sin(angle))
ry = (R13-R31)/(2*np.sin(angle))
rz = (R21-R12)/(2*np.sin(angle))
axis = np.array([rx, ry, rz])
else:
axis = np.zeros(3)
return angle, axis
def angleaxis2rot(w):
"""
@info: computes rotation matrix from angle/axis representation
@inputs:
------
-
"""
print("development...")
def rot2quat(R):
"""
@info: computes quaternion from rotation matrix
@input:
------
- R: Rotation matrix
@output:
-------
- Q: Quaternion [w, ex, ey, ez]
"""
dEpsilon = 1e-6
Q = np.zeros(4)
Q[0] = 0.5*np.sqrt(R[0,0]+R[1,1]+R[2,2]+1.0)
if ( np.fabs(R[0,0]-R[1,1]-R[2,2]+1.0) < dEpsilon ):
Q[1] = 0.0
else:
Q[1] = 0.5*np.sign(R[2,1]-R[1,2])*np.sqrt(R[0,0]-R[1,1]-R[2,2]+1.0)
if ( np.fabs(R[1,1]-R[2,2]-R[0,0]+1.0) < dEpsilon ):
Q[2] = 0.0
else:
Q[2] = 0.5*np.sign(R[0,2]-R[2,0])*np.sqrt(R[1,1]-R[2,2]-R[0,0]+1.0)
if ( np.fabs(R[2,2]-R[0,0]-R[1,1]+1.0) < dEpsilon ):
Q[3] = 0.0
else:
Q[3] = 0.5*np.sign(R[1,0]-R[0,1])*np.sqrt(R[2,2]-R[0,0]-R[1,1]+1.0)
return Q
def quatError(Qdes, Qmed):
"""
@info: computes quaterion error (Q_e = Q_d . Q_m*).
@inputs:
------
- Qdes: desired quaternion
- Q : measured quaternion
@output:
-------
- Qe : quaternion error
"""
we = Qdes[0]*Qmed[0] + np.dot(Qdes[1:4].T,Qmed[1:4]) - 1
e = -Qdes[0]*Qmed[1:4] + Qmed[0]*Qdes[1:4] - np.cross(Qdes[1:4], Qmed[1:4])
Qe = np.array([ we, e[0], e[1], e[2] ])
return Qe
def axisangle_error(R_des, R_med):
"""
@info: computes orientation error and represent with angle/axis.
@inputs:
------
- R_d: desired orientation
- R_m: measured orientation
@outputs:
--------
- e_o: orientation error
"""
R_e = R_med.T.dot(R_des)
angle_e, axis_e = rot2axisangle(R_e)
e_o = R_med.dot(angle_e*axis_e) # w.r.t world frame
return e_o
def rpy2rot(rpy):
"""
@info: computes rotation matrix from roll, pitch, yaw (ZYX euler angles) representation
@inputs:
-------
- rpy[0]: rotation in z-axis (roll)
- rpy[1]: rotation in y-axis (pitch)
- rpy[2]: rotation in x-axis (yaw)
@outputs:
--------
- R: rotation matrix
"""
Rz = np.array([[ np.cos(rpy[0]) , -np.sin(rpy[0]) , 0],
[ np.sin(rpy[0]) , np.cos(rpy[0]) , 0],
[ 0 , 0 , 1]])
Ry = np.array([[np.cos(rpy[1]) , 0 , np.sin(rpy[1])],
[ 0 , 1 , 0],
[ -np.sin(rpy[1]) , 0 , np.cos(rpy[1])]])
Rx = np.array([ [ 1 , 0 , 0],
[0 , np.cos(rpy[2]) , -np.sin(rpy[2])],
[0 , np.sin(rpy[2]) , np.cos(rpy[2])]])
R = np.dot(np.dot(Rz, Ry), Rx)
return R
def rot2rpy(R):
"""
@info: computes roll, pitch, yaw (ZYX euler angles) from rotation matrix
@inputs:
-------
- R: rotation matrix
@outputs:
--------
- rpy[0]: rotation in z-axis (roll)
- rpy[1]: rotation in y-axis (pitch)
- rpy[2]: rotation in x-axis (yaw)
"""
R32 = R[2,1]
R31 = R[2,0]
R33 = R[2,2]
R21 = R[1,0]
R11 = R[0,0]
rpy = np.zeros(3)
rpy[1] = np.arctan2(-R31, np.sqrt(R32*R32 + R33*R33))
rpy[0] = np.arctan2(R21/np.cos(rpy[1]), R11/np.cos(rpy[1]))
rpy[2] = np.arctan2(R32/np.cos(rpy[1]), R33/np.cos(rpy[1]))
return rpy
def rot2rpy_unwrapping(R, rpy_old):
"""
@info: computes roll, pitch, yaw (ZYX euler angles) from rotation matrix
@inputs:
-------
- R: rotation matrix
@outputs:
--------
- rpy[0]: rotation in z-axis (roll)
- rpy[1]: rotation in y-axis (pitch)
- rpy[2]: rotation in x-axis (yaw)
"""
R32 = R[2,1]
R31 = R[2,0]
R33 = R[2,2]
R21 = R[1,0]
R11 = R[0,0]
rpy = np.zeros(3)
rpy[1] = np.arctan2(-R31, np.sqrt(R32*R32 + R33*R33))
rpy[0] = np.arctan2(R21/np.cos(rpy[1]), R11/np.cos(rpy[1]))
rpy[2] = np.arctan2(R32/np.cos(rpy[1]), R33/np.cos(rpy[1]))
for i in range(3):
if(rpy[i]<=(rpy_old[i]-np.pi)):
rpy[i] +=2*np.pi
elif(rpy[i]>=(rpy_old[i]+np.pi)):
rpy[i] -=2*np.pi
return rpy
def rpy2angularVel(rpy, drpy):
"""
@info: compute angular velocity (w) from euler angles (roll, pitch and yaw) and its derivaties
@inputs:
-------
- rpy[0]: rotation in z-axis (roll)
- rpy[1]: rotation in y-axis (pitch)
- rpy[2]: rotation in x-axis (yaw)
- drpy[0]: rotation ratio in z-axis
- drpy[1]: rotation ratio in y-axis
- drpy[2]: rotation ratio in x-axis
@outputs:
--------
- w: angular velocity
"""
E0 = np.array( [[0, -np.sin(rpy[0]), np.cos(rpy[0])*np.cos(rpy[1])], \
[0, np.cos(rpy[0]), np.sin(rpy[0])*np.cos(rpy[1])], \
[1, 0, -np.sin(rpy[1]) ]])
w = np.dot(E0, drpy)
return w
def angularVel2rpy(w, rpy):
"""
@info: compute angular velocity (w) from euler angles (roll, pitch and yaw) and its derivaties
@inputs:
-------
- rpy[0]: rotation in z-axis (roll)
- rpy[1]: rotation in y-axis (pitch)
- rpy[2]: rotation in x-axis (yaw)
- w: angular velocity
@outputs:
--------
- drpy[0]: rotation ratio in z-axis
- drpy[1]: rotation ratio in y-axis
- drpy[2]: rotation ratio in x-axis
"""
E0 = np.array( [[0, -np.sin(rpy[0]), np.cos(rpy[0])*np.cos(rpy[1])], \
[0, np.cos(rpy[0]), np.sin(rpy[0])*np.cos(rpy[1])], \
[1, 0, -np.sin(rpy[1]) ]])
drpy = np.dot(inv(E0), w)
return drpy
def rpy2angularAccel(rpy, drpy, ddrpy):
"""
@info: compute angular velocity (w) from euler angles (roll, pitch and yaw) and its derivaties
@inputs:
-------
- rpy[0]: rotation in z-axis (roll)
- rpy[1]: rotation in y-axis (pitch)
- rpy[2]: rotation in x-axis (yaw)
- drpy[0]: rotation speed in z-axis
- drpy[1]: rotation speed in y-axis
- drpy[2]: rotation speed in z-axis
- ddrpy[0]: rotation acceleration in z-axis
- ddrpy[1]: rotation acceleration in y-axis
- ddrpy[2]: rotation acceleration in x-axis
@outputs:
--------
- dw: angular acceleration
"""
E0 = np.array( [[0, -np.sin(rpy[0]), np.cos(rpy[0])*np.cos(rpy[1])], \
[0, np.cos(rpy[0]), np.sin(rpy[0])*np.cos(rpy[1])], \
[1, 0, -np.sin(rpy[1]) ]])
E1 = np.array( [[0, -np.cos(rpy[0])*drpy[0], -np.sin(rpy[0])*drpy[0]*np.cos(rpy[1])-np.cos(rpy[0])*np.sin(rpy[1])*drpy[1]], \
[0, -np.sin(rpy[0])*drpy[0],
|
np.cos(rpy[0])
|
numpy.cos
|
#Code based on Andres code from November 2017
import numpy as np
from six.moves import xrange
def part2dens3d(part_pos, box_l, bin_x=128):
"""
Calculate 3D matter density using numpy histograms
:param part_pos: particle positions in the shape (N, D), where N is particle number and D is dimension
:param box_l: box length in comoving Mpc/h
:param bin_x: desired bins per axis for the histogram
:return: density field
"""
hist, _edges = np.histogramdd(np.vstack((part_pos[:, 0], part_pos[:, 1], part_pos[:, 2])).T,
bins=bin_x, range=[[0, box_l], [0, box_l], [0, box_l]])
del _edges
return hist
def part2dens2d(part_pos, box_l, bin_x=128):
"""
Calculate 2D matter density using numpy histograms
:param part_pos: particle positions in the shape (N, D), where N is particle number and D is dimension
:param box_l: box length in comoving Mpc/h
:param bin_x: desired bins per axis for the histogram
:return: density field
"""
hist, _edgex, _edgey = np.histogram2d(part_pos[:, 0], part_pos[:, 1], bins=bin_x, range=[[0, box_l], [0, box_l], [0, box_l]])
del _edgex, _edgey
return hist
def dens2overdens(density, mean_density=None):
"""
Calculate the overdensity corresponding to a density field
:param density: input density field
:param mean_density: if defined normalisation is calculated according to (density - mean(density)) / mean_density
:return: overdensity field
"""
#assert np.ndim(density) == 3, 'density is not 3D'
if mean_density:
delta = (density - np.mean(density)) / mean_density
else:
mean_density = np.mean(density)
if mean_density == 0.:
delta = np.zeros(shape=density.shape)
else:
delta = density / mean_density - 1.
return delta
def power_spectrum(field_x, box_l, bin_k, field_y=None, log_sampling=True):
"""
Measures the mass power spectrum of a 2D or 3D input field for a given number of bins in Fourier space.
:param field_x: 3D input field to compute the power spectrum of (typically the overdensity field), dimensionless
:param box_l: box length of image/cube/box or whatever, units of Mpc or Mpc/h
:param bin_k: number of bins in Fourier space
:return: power_k, k: 1D mass power spectrum of field_x, same units as [box_l]**3 and corresponding k values
"""
# assert np.ndim(field_x) == 3, 'field_x is not 3D'
box_pix = np.size(field_x, axis=0) # pixel number per axis
box_dim = np.ndim(field_x) # dimension
# This first 'paragraph' is to create masks of indices corresponding to one Fourier bin each
_freq = np.fft.fftfreq(n=box_pix, d=box_l / box_pix) * 2 * np.pi
_rfreq = np.fft.rfftfreq(n=box_pix, d=box_l / box_pix) * 2 * np.pi
if box_dim == 2:
_kx, _ky = np.meshgrid(_freq, _rfreq, indexing='ij')
_k_abs = np.sqrt(_kx ** 2. + _ky ** 2.)
elif box_dim == 3:
_kx, _ky, _kz = np.meshgrid(_freq, _freq, _rfreq, indexing='ij')
_k_abs = np.sqrt(_kx ** 2. + _ky ** 2. + _kz ** 2.)
else:
raise ValueError('field_x is not 2D or 3D')
# The following complicated line is actually only creating a 1D array spanning k-space logarithmically from minimum _k_abs to maximum.
# To start slightly below the minimum and finish slightly above the maximum I use ceil and floor.
# To ceil and floor not to the next integer but to the next 15th digit, I multiply by 1e15 before flooring and divide afterwards.
# Since the ceiled/floored value is actually the exponent used for the logspace, going to the next integer would be way too much.
if log_sampling:
_k_log = np.logspace(np.floor(np.log10(
|
np.min(_k_abs[1:])
|
numpy.min
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for davidson.py."""
import logging
import unittest
import numpy
import numpy.linalg
import scipy.linalg
import scipy.sparse
import scipy.sparse.linalg
from openfermion.ops.operators import QubitOperator
from openfermion.linalg.davidson import (Davidson, DavidsonOptions,
QubitDavidson, SparseDavidson,
append_random_vectors, orthonormalize)
def generate_matrix(dimension):
"""Generates matrix with shape (dimension, dimension)."""
numpy.random.seed(dimension)
rand = numpy.array(numpy.random.rand(dimension, dimension))
numpy.random.seed(dimension * 2)
diag = numpy.array(range(dimension)) + numpy.random.rand(dimension)
# Makes sure matrix is hermitian, which is symmetric when real.
matrix = rand + rand.conj().T + numpy.diag(diag)
return matrix
def generate_sparse_matrix(dimension, diagonal_factor=30):
"""Generates a hermitian sparse matrix with specified dimension."""
numpy.random.seed(dimension)
diagonal = sorted(numpy.array(numpy.random.rand(dimension)))
numpy.random.seed(dimension - 1)
off_diagonal = numpy.array(numpy.random.rand(dimension - 1))
# Makes sure matrix is hermitian, which is symmetric when real.
matrix = numpy.diag(diagonal) * diagonal_factor
for row in range(dimension - 2):
col = row + 1
matrix[row, col] = off_diagonal[row]
matrix[col, row] = off_diagonal[row]
return matrix
def get_difference(linear_operator, eigen_values, eigen_vectors):
"""Get difference of M * v - lambda v."""
return numpy.max(
numpy.abs(
linear_operator.dot(eigen_vectors) - eigen_vectors * eigen_values))
class DavidsonOptionsTest(unittest.TestCase):
""""Tests for DavidsonOptions class."""
def setUp(self):
"""Sets up all variables needed for DavidsonOptions class."""
self.max_subspace = 10
self.max_iterations = 100
self.eps = 1e-7
self.davidson_options = DavidsonOptions(self.max_subspace,
self.max_iterations, self.eps)
def test_init(self):
"""Tests vars in __init__()."""
self.assertEqual(self.davidson_options.max_subspace, self.max_subspace)
self.assertEqual(self.davidson_options.max_iterations,
self.max_iterations)
self.assertAlmostEqual(self.davidson_options.eps, self.eps, places=8)
self.assertFalse(self.davidson_options.real_only)
def test_set_dimension_small(self):
"""Tests set_dimension() with a small dimension."""
dimension = 6
self.davidson_options.set_dimension(dimension)
self.assertEqual(self.davidson_options.max_subspace, dimension + 1)
def test_set_dimension_large(self):
"""Tests set_dimension() with a large dimension not affecting
max_subspace."""
self.davidson_options.set_dimension(60)
self.assertEqual(self.davidson_options.max_subspace, self.max_subspace)
def test_invalid_max_subspace(self):
"""Test for invalid max_subspace."""
with self.assertRaises(ValueError):
DavidsonOptions(max_subspace=1)
def test_invalid_max_iterations(self):
"""Test for invalid max_iterations."""
with self.assertRaises(ValueError):
DavidsonOptions(max_iterations=0)
def test_invalid_eps(self):
"""Test for invalid eps."""
with self.assertRaises(ValueError):
DavidsonOptions(eps=-1e-6)
def test_invalid_dimension(self):
"""Test for invalid dimension."""
with self.assertRaises(ValueError):
self.davidson_options.set_dimension(0)
class DavidsonTest(unittest.TestCase):
""""Tests for Davidson class with a real matrix."""
def setUp(self):
"""Sets up all variables needed for Davidson class."""
dimension = 10
matrix = generate_matrix(dimension)
def mat_vec(vec):
"""Trivial matvec with a numpy matrix."""
return numpy.dot(matrix, vec)
self.linear_operator = scipy.sparse.linalg.LinearOperator(
(dimension, dimension), matvec=mat_vec)
self.diagonal = numpy.diag(matrix)
self.davidson = Davidson(linear_operator=self.linear_operator,
linear_operator_diagonal=self.diagonal)
self.matrix = matrix
self.initial_guess = numpy.eye(self.matrix.shape[0], 10)
self.eigen_values = numpy.array([
1.15675714,
1.59132505,
2.62268014,
4.44533793,
5.3722743,
5.54393114,
7.73652405,
8.50089897,
9.4229309,
15.54405993,
])
def test_init(self):
"""Test for __init__()."""
davidson = self.davidson
self.assertAlmostEqual(
numpy.max(numpy.abs(self.matrix - self.matrix.T)), 0)
self.assertTrue(davidson.linear_operator)
self.assertTrue(
numpy.allclose(davidson.linear_operator_diagonal, self.diagonal))
# Options default values except max_subspace.
self.assertEqual(davidson.options.max_subspace, 11)
self.assertAlmostEqual(davidson.options.eps, 1e-6, places=8)
self.assertFalse(davidson.options.real_only)
def test_with_built_in(self):
"""Compare with eigenvalues from built-in functions."""
eigen_values, _ = numpy.linalg.eig(self.matrix)
eigen_values = sorted(eigen_values)
self.assertTrue(numpy.allclose(eigen_values, self.eigen_values))
# Checks for eigh() function.
eigen_values, eigen_vectors = numpy.linalg.eigh(self.matrix)
self.assertAlmostEqual(
get_difference(self.davidson.linear_operator, eigen_values,
eigen_vectors), 0)
def test_lowest_invalid_operator(self):
"""Test for get_lowest_n() with invalid linear operator."""
with self.assertRaises(ValueError):
Davidson(None, numpy.eye(self.matrix.shape[0], 8))
def test_lowest_zero_n(self):
"""Test for get_lowest_n() with invalid n_lowest."""
with self.assertRaises(ValueError):
self.davidson.get_lowest_n(0)
def test_lowest_invalid_shape(self):
"""Test for get_lowest_n() with invalid dimension for initial guess."""
with self.assertRaises(ValueError):
self.davidson.get_lowest_n(
1, numpy.ones((self.matrix.shape[0] * 2, 1), dtype=complex))
def test_get_lowest_n_trivial_guess(self):
"""Test for get_lowest_n() with trivial initial guess."""
with self.assertRaises(ValueError):
self.davidson.get_lowest_n(
1, numpy.zeros((self.matrix.shape[0], 1), dtype=complex))
def test_get_lowest_fail(self):
"""Test for get_lowest_n() with n_lowest = 1."""
n_lowest = 1
initial_guess = self.initial_guess[:, :n_lowest]
success, eigen_values, _ = self.davidson.get_lowest_n(n_lowest,
initial_guess,
max_iterations=2)
self.assertTrue(not success)
self.assertTrue(numpy.allclose(eigen_values, numpy.array([1.41556103])))
def test_get_lowest_with_default(self):
"""Test for get_lowest_n() with default n_lowest = 1."""
numpy.random.seed(len(self.eigen_values))
success, eigen_values, _ = self.davidson.get_lowest_n()
self.assertTrue(success)
self.assertTrue(numpy.allclose(eigen_values, self.eigen_values[:1]))
def test_get_lowest_one(self):
"""Test for get_lowest_n() with n_lowest = 1."""
n_lowest = 1
initial_guess = self.initial_guess[:, :n_lowest]
success, eigen_values, _ = self.davidson.get_lowest_n(n_lowest,
initial_guess,
max_iterations=10)
self.assertTrue(success)
self.assertTrue(
numpy.allclose(eigen_values, self.eigen_values[:n_lowest]))
def test_get_lowest_two(self):
"""Test for get_lowest_n() with n_lowest = 2.
See the iteration results (eigenvalues and max error) below:
[1.87267714 4.06259537] 3.8646520980719212
[1.28812931 2.50316266] 1.548676934730246
[1.16659255 1.82600658] 0.584638880856119
[1.15840263 1.65254981] 0.4016803134102507
[1.15675714 1.59132505] 0
"""
n_lowest = 2
initial_guess = self.initial_guess[:, :n_lowest]
success, eigen_values, eigen_vectors = self.davidson.get_lowest_n(
n_lowest, initial_guess, max_iterations=5)
self.assertTrue(success)
self.assertTrue(
numpy.allclose(eigen_values, self.eigen_values[:n_lowest]))
self.assertTrue(
numpy.allclose(self.davidson.linear_operator * eigen_vectors,
eigen_vectors * eigen_values))
def test_get_lowest_two_subspace(self):
"""Test for get_lowest_n() with n_lowest = 2.
See the iteration results (eigenvalues and max error) below:
[1.87267714 4.06259537] 3.8646520980719212
[1.28812931 2.50316266] 1.548676934730246
[1.16659255 1.82600658] 0.584638880856119
[1.15947254 1.69773006] 0.5077687725257688
[1.1572995 1.61393264] 0.3318982487563453
"""
self.davidson.options.max_subspace = 8
expected_eigen_values = numpy.array([1.1572995, 1.61393264])
n_lowest = 2
initial_guess = self.initial_guess[:, :n_lowest]
success, eigen_values, eigen_vectors = self.davidson.get_lowest_n(
n_lowest, initial_guess, max_iterations=5)
self.assertTrue(not success)
self.assertTrue(numpy.allclose(eigen_values, expected_eigen_values))
self.assertFalse(
numpy.allclose(self.davidson.linear_operator * eigen_vectors,
eigen_vectors * eigen_values))
def test_get_lowest_six(self):
"""Test for get_lowest_n() with n_lowest = 6."""
n_lowest = 6
initial_guess = self.initial_guess[:, :n_lowest]
success, eigen_values, _ = self.davidson.get_lowest_n(n_lowest,
initial_guess,
max_iterations=2)
self.assertTrue(success)
self.assertTrue(
numpy.allclose(eigen_values, self.eigen_values[:n_lowest]))
def test_get_lowest_all(self):
"""Test for get_lowest_n() with n_lowest = 10."""
n_lowest = 10
initial_guess = self.initial_guess[:, :n_lowest]
success, eigen_values, _ = self.davidson.get_lowest_n(n_lowest,
initial_guess,
max_iterations=1)
self.assertTrue(success)
self.assertTrue(
numpy.allclose(eigen_values, self.eigen_values[:n_lowest]))
class QubitDavidsonTest(unittest.TestCase):
""""Tests for QubitDavidson class with a QubitOperator."""
def setUp(self):
"""Sets up all variables needed for QubitDavidson class."""
self.coefficient = 2
self.n_qubits = 12
def test_get_lowest_n(self):
"""Test for get_lowest_n()."""
dimension = 2**self.n_qubits
qubit_operator = QubitOperator.zero()
for i in range(min(self.n_qubits, 4)):
numpy.random.seed(dimension + i)
qubit_operator += QubitOperator(((i, 'Z'),),
numpy.random.rand(1)[0])
qubit_operator *= self.coefficient
davidson = QubitDavidson(qubit_operator, self.n_qubits)
n_lowest = 6
numpy.random.seed(dimension)
initial_guess = numpy.random.rand(dimension, n_lowest)
success, eigen_values, eigen_vectors = davidson.get_lowest_n(
n_lowest, initial_guess, max_iterations=20)
expected_eigen_values = -3.80376934 *
|
numpy.ones(n_lowest)
|
numpy.ones
|
import os
import datetime
import time
import cv2
import numpy as np
import json
import logging
import argparse
from scipy.ndimage.filters import rank_filter
import utils
def low_pass_filter(img, low_pass_fraction = 0.3, **kwargs):
rows, cols = img.shape
crow, ccol = rows // 2 , cols // 2 # center point
row_lp = int(crow * low_pass_fraction)
col_lp = int(ccol * low_pass_fraction)
# Transform to Fourier space
f =
|
np.fft.fft2(img)
|
numpy.fft.fft2
|
# -*- coding: utf-8 -*-
from datetime import timedelta
from distutils.version import LooseVersion
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas import (
DatetimeIndex, Int64Index, Series, Timedelta, TimedeltaIndex, Timestamp,
date_range, timedelta_range
)
from pandas.errors import NullFrequencyError
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(params=['B', 'D'])
def freq(request):
return request.param
class TestTimedeltaIndexArithmetic(object):
# Addition and Subtraction Operations
# -------------------------------------------------------------
# TimedeltaIndex.shift is used by __add__/__sub__
def test_tdi_shift_empty(self):
# GH#9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
def test_tdi_shift_hours(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_tdi_shift_minutes(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_tdi_shift_int(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
tm.assert_index_equal(result, expected)
def test_tdi_shift_nonstandard_freq(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
tm.assert_index_equal(result, expected)
def test_shift_no_freq(self):
# GH#19147
tdi = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00'], freq=None)
with pytest.raises(NullFrequencyError):
tdi.shift(2)
# -------------------------------------------------------------
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2,
|
np.multiply(idx, 2)
|
numpy.multiply
|
"""
This module provides dictionaries for generating
`~matplotlib.colors.LinearSegmentedColormap`, and a dictionary of these
dictionaries.
"""
import pathlib
import matplotlib.colors as colors
import numpy as np
import astropy.units as u
__all__ = [
'aia_color_table', 'sswidl_lasco_color_table', 'eit_color_table',
'sxt_color_table', 'xrt_color_table', 'trace_color_table',
'sot_color_table', 'hmi_mag_color_table', 'suvi_color_table',
'rhessi_color_table', 'std_gamma_2', 'euvi_color_table', 'solohri_lya1216_color_table',
]
CMAP_DATA_DIR = pathlib.Path(__file__).parent.absolute() / 'data'
def create_cdict(r, g, b):
"""
Create the color tuples in the correct format.
"""
i = np.linspace(0, 1, r.size)
cdict = {name: list(zip(i, el / 255.0, el / 255.0))
for el, name in [(r, 'red'), (g, 'green'), (b, 'blue')]}
return cdict
def _cmap_from_rgb(r, g, b, name):
cdict = create_cdict(r, g, b)
return colors.LinearSegmentedColormap(name, cdict)
def cmap_from_rgb_file(name, fname):
"""
Create a colormap from a RGB .csv file.
The .csv file must have 3 equal-length columns of integer data, with values
between 0 and 255, which are the red, green, and blue values for the colormap.
Parameters
----------
name : str
Name of the colormap.
fname : str
Filename of data file. Relative to the sunpy colormap data directory.
Returns
-------
matplotlib.colors.LinearSegmentedColormap
"""
data = np.loadtxt(CMAP_DATA_DIR / fname, delimiter=',')
if data.shape[1] != 3:
raise RuntimeError(f'RGB data files must have 3 columns (got {data.shape[1]})')
return _cmap_from_rgb(data[:, 0], data[:, 1], data[:, 2], name)
def get_idl3():
# The following values describe color table 3 for IDL (Red Temperature)
return np.loadtxt(CMAP_DATA_DIR / 'idl_3.csv', delimiter=',')
def solohri_lya1216_color_table():
solohri_lya1216 = get_idl3()
solohri_lya1216[:, 2] = solohri_lya1216[:, 0] * np.linspace(0, 1, 256)
return _cmap_from_rgb(*solohri_lya1216.T, 'SolO EUI HRI Lyman Alpha')
def create_aia_wave_dict():
idl_3 = get_idl3()
r0, g0, b0 = idl_3[:, 0], idl_3[:, 1], idl_3[:, 2]
c0 = np.arange(256, dtype='f')
c1 = (np.sqrt(c0) * np.sqrt(255.0)).astype('f')
c2 = (np.arange(256)**2 / 255.0).astype('f')
c3 = ((c1 + c2 / 2.0) * 255.0 / (c1.max() + c2.max() / 2.0)).astype('f')
aia_wave_dict = {
1600*u.angstrom: (c3, c3, c2),
1700*u.angstrom: (c1, c0, c0),
4500*u.angstrom: (c0, c0, b0 / 2.0),
94*u.angstrom: (c2, c3, c0),
131*u.angstrom: (g0, r0, r0),
171*u.angstrom: (r0, c0, b0),
193*u.angstrom: (c1, c0, c2),
211*u.angstrom: (c1, c0, c3),
304*u.angstrom: (r0, g0, b0),
335*u.angstrom: (c2, c0, c1)
}
return aia_wave_dict
@u.quantity_input
def aia_color_table(wavelength: u.angstrom):
"""
Returns one of the fundamental color tables for SDO AIA images.
Based on aia_lct.pro part of SDO/AIA on SSWIDL written by <NAME> (2010/04/12).
Parameters
----------
wavelength : `~astropy.units.quantity`
Wavelength for the desired AIA color table.
"""
aia_wave_dict = create_aia_wave_dict()
try:
r, g, b = aia_wave_dict[wavelength]
except KeyError:
raise ValueError("Invalid AIA wavelength. Valid values are "
"1600,1700,4500,94,131,171,193,211,304,335.")
return _cmap_from_rgb(r, g, b, 'SDO AIA {:s}'.format(str(wavelength)))
@u.quantity_input
def eit_color_table(wavelength: u.angstrom):
"""
Returns one of the fundamental color tables for SOHO EIT images.
"""
# SOHO EIT Color tables
# EIT 171 IDL Name EIT Dark Bot Blue
# EIT 195 IDL Name EIT Dark Bot Green
# EIT 284 IDL Name EIT Dark Bot Yellow
# EIT 304 IDL Name EIT Dark Bot Red
try:
color = {171*u.angstrom: 'dark_blue', 195*u.angstrom: 'dark_green',
284*u.angstrom: 'yellow', 304*u.angstrom: 'dark_red',
}[wavelength]
except KeyError:
raise ValueError("Invalid EIT wavelength. Valid values are "
"171, 195, 284, 304.")
return cmap_from_rgb_file('SOHO EIT {:s}'.format(str(wavelength)), f'eit_{color}.csv')
def sswidl_lasco_color_table(number):
"""
Returns one of the SSWIDL-defined color tables for SOHO LASCO images.
This function is included to allow users to access the SSWIDL-
defined LASCO color tables provided by SunPy. It is recommended to
use the function 'lasco_color_table' to obtain color tables for use
with LASCO data and Helioviewer JP2 images.
"""
try:
return cmap_from_rgb_file(f'SOHO LASCO C{number}', f'lasco_c{number}.csv')
except OSError:
raise ValueError("Invalid LASCO number. Valid values are 2, 3.")
# Translated from the JP2Gen IDL SXT code lct_yla_gold.pro. Might be better
# to explicitly copy the numbers from the IDL calculation. This is a little
# more compact.
sxt_gold_r = np.concatenate((np.linspace(0, 255, num=185,
endpoint=False), 255 * np.ones(71)))
sxt_gold_g = 255 * (np.arange(256)**1.25) / (255.0**1.25)
sxt_gold_b = np.concatenate((np.zeros(185), 255.0 * np.arange(71) / 71.0))
grayscale = np.arange(256)
grayscale.setflags(write=False)
def sxt_color_table(sxt_filter):
"""
Returns one of the fundamental color tables for Yokhoh SXT images.
"""
try:
r, g, b = {
'al': (sxt_gold_r, sxt_gold_g, sxt_gold_b),
'wh': (grayscale, grayscale, grayscale)
}[sxt_filter]
except KeyError:
raise ValueError("Invalid SXT filter type number. Valid values are "
"'al', 'wh'.")
return _cmap_from_rgb(r, g, b, 'Yohkoh SXT {:s}'.format(sxt_filter.title()))
def xrt_color_table():
"""
Returns the color table used for all Hinode XRT images.
"""
idl_3 = get_idl3()
r0, g0, b0 = idl_3[:, 0], idl_3[:, 1], idl_3[:, 2]
return _cmap_from_rgb(r0, g0, b0, 'Hinode XRT')
def cor_color_table(number):
"""
Returns one of the fundamental color tables for STEREO coronagraph images.
"""
# STEREO COR Color tables
if number not in [1, 2]:
raise ValueError("Invalid COR number. Valid values are " "1, 2.")
return cmap_from_rgb_file(f'STEREO COR{number}', f'stereo_cor{number}.csv')
def trace_color_table(measurement):
"""
Returns one of the standard color tables for TRACE JP2 files.
"""
if measurement == 'WL':
return cmap_from_rgb_file(f'TRACE {measurement}', 'grayscale.csv')
try:
return cmap_from_rgb_file(f'TRACE {measurement}', f'trace_{measurement}.csv')
except OSError:
raise ValueError(
"Invalid TRACE filter waveband passed. Valid values are "
"171, 195, 284, 1216, 1550, 1600, 1700, WL")
def sot_color_table(measurement):
"""
Returns one of the standard color tables for SOT files (following osdc
convention).
The relations between observation and color have been defined in
hinode.py
"""
idl_3 = get_idl3()
r0, g0, b0 = idl_3[:, 0], idl_3[:, 1], idl_3[:, 2]
try:
r, g, b = {
'intensity': (r0, g0, b0),
}[measurement]
except KeyError:
raise ValueError(
"Invalid (or not supported) SOT type. Valid values are: "
"intensity")
return _cmap_from_rgb(r, g, b, f'Hinode SOT {measurement:s}')
def iris_sji_color_table(measurement, aialike=False):
"""
Return the standard color table for IRIS SJI files.
"""
# base vectors for IRIS SJI color tables
c0 = np.arange(0, 256)
c1 = (np.sqrt(c0) * np.sqrt(255)).astype(np.uint8)
c2 = (c0**2 / 255.).astype(np.uint8)
c3 = ((c1 + c2 / 2.) * 255. / (
|
np.max(c1)
|
numpy.max
|
from satpy import Scene, find_files_and_readers
import sys
import os
import subprocess as sp
import multiprocessing as mp
import numpy as np
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from pyhdf.SD import SD, SDC
from itertools import repeat
import pandas as pd
import pickle
import datetime
from time import time
from pyorbital.orbital import get_observer_look
from pyorbital.astronomy import get_alt_az
sys.path.append(
os.path.dirname(os.path.realpath(__file__))
)
from him8analysis import read_h8_folder, halve_res, quarter_res, generate_band_arrays
from caliop_tools import number_to_bit, custom_feature_conversion, calipso_to_datetime
### Global Variables ###
band_dict={
'1': 0.4703, # all channels are in microns
'2': 0.5105,
'3': 0.6399,
'4': 0.8563,
'5': 1.6098,
'6': 2.257,
'7': 3.8848,
'8': 6.2383,
'9': 6.9395,
'10': 7.3471,
'11': 8.5905,
'12': 9.6347,
'13': 10.4029,
'14': 11.2432,
'15': 12.3828,
'16': 13.2844
}
### General Tools ###
def write_list(input_list, list_filename, list_dir):
"""
Writes a list to a .txt file. Specify the directory it is stored in.
:param input_list: list type.
:param list_filename: str type. The filename of the .txt file to be written.
:param list_dir: str type. Full path to the directory the file is stored in.
:return: list type. List of strings from the .txt file.
"""
if list_filename[-4:] != '.txt': # Check if the .txt file extension
list_filename += '.txt'
full_filename = os.path.join(list_dir, list_filename)
print(full_filename)
with open(full_filename, 'w') as f:
f.writelines('%s\n' % item for item in input_list)
print('List stored')
def read_list(list_filename, list_dir):
"""
Reads a list from a .txt file. Specify the directory it is stored in.
:param list_filename: str type. The filename of the .txt file to be read.
:param list_dir: str type. Full path to the directory the file is stored in.
:return: list type. List of strings from the .txt file.
"""
if list_filename[-4:] != '.txt': # Check if the .txt file extension
list_filename += '.txt'
full_name = os.path.join(list_dir, list_filename)
with open(full_name, 'r') as f: # Open and read the .txt file
list_of_lines = [line.rstrip() for line in f.readlines()] # For each line, remove newline character and store in a list
return list_of_lines
### Processing tools ###
def find_possible_collocated_him_folders(caliop_overpass):
"""
Will find Himawari folders that fall within the time range of the given CALIOP profile.
:param caliop_overpass: Loaded CALIOP .hdf file to collocate with Himawari data
:return: list of str type. Names of the folders that should collocate with the
given CALIOP profile
"""
cal_time = caliop_overpass.select('Profile_UTC_Time').get()
cal_time = calipso_to_datetime(cal_time)
start = cal_time[0][0]
end = cal_time[-1][-1]
print('Raw Start: %s' % datetime.datetime.strftime(start, '%Y%m%d_%H%M'))
print('Raw End: %s' % datetime.datetime.strftime(end, '%Y%m%d_%H%M'))
cal_lats = caliop_overpass.select('Latitude').get()
cal_lons = caliop_overpass.select('Longitude').get()
hemisphere_mask = (cal_lats <= 81.1) & (cal_lats >= -81.1) & \
(((cal_lons >= 60.6) & (cal_lons <= 180.)) | \
((cal_lons >= -180.) & (cal_lons <= -138.0))) # Due to looking at Eastern Hemisphere
cal_time = cal_time[hemisphere_mask]
print('MASKED START LAT/LON: (%s, %s)' % (cal_lats[hemisphere_mask][0], cal_lons[hemisphere_mask][0]))
print('MASKED END LAT/LON: (%s, %s)' % (cal_lats[hemisphere_mask][-1], cal_lons[hemisphere_mask][-1]))
if len(cal_time) == 0:
return None
print(len(cal_time))
start = cal_time[0]
end = cal_time[-1]
print('Masked Start: %s' % datetime.datetime.strftime(start, '%Y%m%d_%H%M'))
print('Masked End: %s' % datetime.datetime.strftime(end, '%Y%m%d_%H%M'))
start -= datetime.timedelta(minutes=start.minute % 10,
seconds=start.second,
microseconds=start.microsecond)
end -= datetime.timedelta(minutes=end.minute % 10,
seconds=end.second,
microseconds=end.microsecond)
print('First Folder: %s' % start)
print('Last Folder: %s' % end)
folder_names = []
while start <= end:
folder_name = datetime.datetime.strftime(start, '%Y%m%d_%H%M')
folder_names.append(folder_name)
start += datetime.timedelta(minutes=10)
return folder_names
def get_him_folders(him_names, data_dir):
"""
Finds the Himawari folders given in the list in the mdss on NCI.
:param him_names: list of str types of Himawari folder names.
:param data_dir: str type. Full path to the directory where the
data will be stored.
:return: Saves and un-tars Himawari data from mdss into a readable
folder system for further analysis
"""
for name in him_names:
year = name[:4]
month = name[4:6]
day = name[6:8]
filename = 'HS_H08_%s_FLDK.tar' % name
path = os.path.join('satellite/raw/ahi/FLDK', year, month, day, filename)
if sp.getoutput('mdss -P rr5 ls %s' % path) == path:
print('%s available' % name)
destination = os.path.join(data_dir, name)
if not os.path.isdir(destination):
os.mkdir(destination)
os.system('mdss -P rr5 get %s %s' % (path, destination))
else:
print('%s unavailable' % name)
def clear_him_folders(him_names, data_dir):
"""
Finds the Himawari folders given in the list in the mdss on NCI.
:param him_names: list of str types of Himawari folder names.
:param data_dir: str type. Full path to the directory where the
data will be stored.
:return: Removes the Himawari data folders w/n the him_name list
from /g/data/k10/dr1709/ahi/ directory.
"""
for name in him_names:
destination = os.path.join(data_dir, name)
if os.path.isdir(destination):
os.system('rm -r %s' % destination)
def define_collocation_area(geo_lons, geo_lats, central_geo_lon,
lidar_lons, lidar_lats, spatial_tolerance):
### Shift meridian to be defined by geostationary satellite ###
shifted_geo_lons = geo_lons - central_geo_lon # For geostationary satellite coordinates
shifted_geo_lons[shifted_geo_lons < -180.] += 360.
shifted_geo_lons[shifted_geo_lons > 180.] -= 360.
shifted_lidar_lons = lidar_lons - central_geo_lon # For active satellite coordinates
shifted_lidar_lons[shifted_lidar_lons < -180.] += 360.
shifted_lidar_lons[shifted_lidar_lons > 180.] -= 360.
### Find limits defined by active satellite ###
min_lidar_lat, max_lidar_lat = np.nanmin(lidar_lats), np.nanmax(lidar_lats)
min_lidar_lon, max_lidar_lon = np.nanmin(shifted_lidar_lons), np.nanmax(shifted_lidar_lons)
### Find area of geostationary satellite defined by limits ###
loc_mask = (geo_lats > (min_lidar_lat - spatial_tolerance)) & \
(geo_lats < (max_lidar_lat + spatial_tolerance)) & \
(shifted_geo_lons > (min_lidar_lon - spatial_tolerance)) & \
(shifted_geo_lons < (max_lidar_lon + spatial_tolerance))
### Return spatial mask for the geostationary data ###
return loc_mask
def small_angle_region(latitudes, longitudes, central_geo_lon, small_angle_value):
### Shift meridian to be defined by geostationary satellite ###
shifted_lons = longitudes - central_geo_lon # For geostationary satellite coordinates
shifted_lons[shifted_lons < -180.] += 360.
shifted_lons[shifted_lons > 180.] -= 360.
region = (shifted_lons < small_angle_value) & (shifted_lons > -small_angle_value) & \
(latitudes < small_angle_value) & (latitudes > -small_angle_value)
return region
def load_obs_angles(root_dir):
"""
Loads satellite surface observation angles from the root directory provided.
:param root_dir: str type. Full path to the root directory where
the observation angle arrays are stored.
:return: Two np.ndarrays of angles --> (azimuth, elevation)
"""
sat_azimuths = np.load(os.path.join(root_dir, 'him8-sat_azimuth_angle.npy'))
sat_elevations = np.load(os.path.join(root_dir, 'him8-sat_elevation_angle.npy'))
return sat_azimuths, sat_elevations
def load_era_dataset(him_name, var_name='t', multilevel=True):
"""
Finds and loads the corresponding era5 data for the Himawari-8 scene.
:param him_name: str type. Himawari-8 scene name.
:return:
"""
from glob import glob
from netCDF4 import Dataset
if multilevel:
level_dir = 'pressure-levels'
level_marker = 'pl'
else:
level_dir = 'single-levels'
level_marker = 'sfc'
path_to_data = os.path.join(
f'/g/data/rt52/era5/{level_dir}/monthly-averaged-by-hour/{var_name}',
him_name[:4],
f'{var_name}_era5_mnth_{level_marker}_{him_name[:6]}01-{him_name[:6]}??.nc'
)
print(path_to_data)
fname = glob(path_to_data)[0]
print(fname)
dst = Dataset(fname)
print(dst)
time_stamp = int(him_name[-4:-2]) - 1
if var_name == '2t':
var_name_mod = 't2m'
elif var_name == 'ci':
var_name_mod = 'siconc'
else:
var_name_mod = var_name
if multilevel:
data_arr_l = dst[var_name_mod][time_stamp, :, 35:686, 958:]
data_arr_r = dst[var_name_mod][time_stamp, :, 35:686, :168]
data_arr = np.dstack((data_arr_l, data_arr_r))
data_arr = np.dstack(tuple([data_arr[n, :, :] for n in range(37)]))
else:
data_arr_l = dst[var_name_mod][time_stamp, 35:686, 958:]
data_arr_r = dst[var_name_mod][time_stamp, 35:686, :168]
data_arr = np.hstack((data_arr_l, data_arr_r))
data_arr[data_arr < 0] = np.nan
lons, lats = np.meshgrid(
np.concatenate((dst['longitude'][958:], dst['longitude'][:168])),
dst['latitude'][35:686],
)
return data_arr, lats, lons
def get_closest_era_profile_mask(him_lat, him_lon, era_lats, era_lons):
"""
Generate a mask for era5 data to locate the closest matching profile to
the input Himawari-8 coordinate.
:param him_lat:
:param him_lon:
:param era_lats:
:param era_lons:
:return:
"""
shifted_him_lon = him_lon - 140.7
if shifted_him_lon < -180:
shifted_him_lon += 360.
shifted_era_lons = era_lons - 140.7
shifted_era_lons[shifted_era_lons < -180.] += 360.
comp_lats = np.abs(era_lats - him_lat)
comp_lons = np.abs(shifted_era_lons - shifted_him_lon)
total_comp = comp_lons + comp_lats
return total_comp == np.nanmin(total_comp)
def parallax_collocation(caliop_overpass, him_scn, caliop_name, him_name):
"""
Collocates a Himawari scene w/ a given CALIOP overpass and returns the
collocated data as a pandas dataframe. The dataframe from this function will
contain repeats and therefore needs further processing to clean the data.
:param caliop_overpass:
:param him_scn:
:param caliop_name:
:param him_name:
:return:
"""
### Load Himawari variables from the scene ###
him_lon = him_scn['B16'].attrs['satellite_longitude'] # Himawari central longitude
him_lat = him_scn['B16'].attrs['satellite_latitude'] # Himawari central latitude
him_alt = him_scn['B16'].attrs['satellite_altitude'] / 1000. # Himawari altitude in km (taken from Earth's CoM)
start_time = him_scn.start_time # Himawari scene scan start time
end_time = him_scn.end_time # Himawari scene scan end time
avg_time = start_time + (end_time - start_time) / 2. # Himawari scene scan middle time
him_lons, him_lats = him_scn['B16'].area.get_lonlats() # Himawari surface lats & lons
him_lons[him_lons == np.inf] = np.nan # Set np.inf values to NaNs
him_lats[him_lats == np.inf] = np.nan # Set np.inf values to NaNs
### Load and hold CALIOP data ###
caliop_data = {}
caliop_triplet_datasets = ['Profile_UTC_Time', # CALIOP pixel scan times
'Latitude', # CALIOP pixel latitudes
'Longitude'] # CALIOP pixel longitudes
caliop_fifteen_datasets = ['Feature_Classification_Flags', # CALIOP pixel vertical feature flags
'Feature_Optical_Depth_532', # CALIOP pixel features' optical depths (532nm)
'Feature_Optical_Depth_1064', # CALIOP pixel features' optical depths (1064nm)
'Layer_IAB_QA_Factor', # CALIOP pixel features' quality assurance values
'CAD_Score', # CALIOP pixel features' cloud vs aerosol score
'Layer_Top_Altitude', # CALIOP pixel features' top altitudes
'Layer_Base_Altitude'] # CALIOP pixel features' base altitudes
caliop_singlet_datasets = ['Tropopause_Height', # CALIOP pixel tropopause heights
'IGBP_Surface_Type', # CALIOP pixel surface types
'DEM_Surface_Elevation'] # CALIOP pixel surface elevation values
all_datasets = caliop_triplet_datasets + \
caliop_fifteen_datasets + \
caliop_singlet_datasets # Full list of ordered dataset names
special_case_datasets = ['Profile_UTC_Time', # Easy-to-reference special case storage
'DEM_Surface_Elevation']
for dataset in caliop_triplet_datasets: # Expand pixel data with 3 sub-values to standard format
dst = caliop_overpass.select(dataset).get().flatten()
if dataset in special_case_datasets:
dst = calipso_to_datetime(dst) # Ensure times are datetime objects
dst = np.repeat(dst, np.full((dst.shape[0]), 15), axis=0).reshape(dst.shape[0], 15)
caliop_data[dataset] = dst
for dataset in caliop_fifteen_datasets: # Expand pixel data with 15 sub-values to standard format
dst = caliop_overpass.select(dataset).get()
dst = np.hstack((dst, dst, dst)).reshape(dst.shape[0]*3, 15)
if dataset == 'Layer_Top_Altitude': # Set fill values to NaNs
dst[dst < -1000.] = np.nan
all_nans = np.all(np.isnan(dst), axis=1) # If the a row is all fill, implies it is clear air
dst[all_nans, 0] = 0. # Set clear air height to 0 for calculating observation angles
caliop_data[dataset] = dst
for dataset in caliop_singlet_datasets: # Expand pixel data with single sub-value to standard format
dst = caliop_overpass.select(dataset).get()
if dataset in special_case_datasets: # Special case; sub-value is array, not single float or int
dst = np.repeat(dst, np.full((dst.shape[0]), 3), axis=0)
dst = np.repeat(dst, np.full((dst.shape[0]), 15), axis=0).reshape(dst.shape[0], 15, 4)
if dataset == 'Profile_UTC_Time':
dst = dst.astype('str')
else:
dst =
|
np.repeat(dst, 3)
|
numpy.repeat
|
import numpy as np
import pandas as pd
import pymaid
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import navis
volume_names = ["PS_Neuropil_manual"]
def rgb2hex(r, g, b):
r = int(255 * r)
g = int(255 * g)
b = int(255 * b)
return "#{:02x}{:02x}{:02x}".format(r, g, b)
def simple_plot_neurons(
neurons,
azim=-90,
elev=-90,
dist=5,
use_x=True,
use_y=True,
use_z=False,
palette=None,
volume_names=volume_names,
ax=None,
autoscale=False,
axes_equal=True,
force_bounds=True,
axis_off=True,
**kwargs,
):
if isinstance(neurons, (list, np.ndarray, pd.Series, pd.Index)):
try:
neuron_ids = [int(n.id) for n in neurons]
except AttributeError:
neuron_ids = neurons
neurons = []
for n in neuron_ids:
try:
neuron = pymaid.get_neuron(n)
neurons.append(neuron)
except:
print(f"Error when retreiving neuron skeleton {n}")
elif isinstance(neurons, navis.NeuronList):
neuron_ids = neurons.id
neuron_ids = [int(n) for n in neuron_ids]
for key, value in palette.items():
if isinstance(value, tuple):
palette[key] = rgb2hex(*value)
# neurons = [pymaid.get_neuron(n) for n in neuron_ids]
# volumes = [pymaid.get_volume(v) for v in volume_names]
colors = np.vectorize(palette.get)(neuron_ids)
plot_mode = "3d"
navis.plot2d(
neurons,
color=colors,
ax=ax,
connectors=False,
method="3d",
autoscale=autoscale,
soma=False,
**kwargs,
)
# plot_volumes(volumes, ax)
if plot_mode == "3d":
ax.azim = azim
ax.elev = elev
ax.dist = dist
if axes_equal:
set_axes_equal(ax, use_y=use_y, use_x=use_x, use_z=use_z)
if axis_off:
ax.axis("off")
if force_bounds:
ax.set_xlim3d((-4500, 110000))
ax.set_ylim3d((-4500, 110000))
return ax
def plot_volumes(volumes, ax):
navis.plot2d(volumes, ax=ax, method="3d", autoscale=False)
for c in ax.collections:
if isinstance(c, Poly3DCollection):
c.set_alpha(0.02)
def set_axes_equal(ax, use_x=True, use_y=True, use_z=True):
"""Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
"""
# REF: https://stackoverflow.com/questions/13685386/matplotlib-equal-unit-length-with-equal-aspect-ratio-z-axis-is-not-equal-to
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle =
|
np.mean(z_limits)
|
numpy.mean
|
"unit tests for log-sum-exp functions"
import unittest
import numpy as np
from numpy import array, arange
from gpfit.maths.logsumexp import lse_implicit, lse_scaled
class TestLSEimplicit1D(unittest.TestCase):
"tests with one-dimensional input"
x = arange(1.0, 31.0).reshape(15, 2)
alpha = arange(1.0, 3.0)
y, dydx, dydalpha = lse_implicit(x, alpha)
def test_y_ndim(self):
self.assertEqual(self.y.ndim, 1)
def test_y_size(self):
self.assertEqual(self.y.size, self.x.shape[0])
def test_dydx_ndim(self):
self.assertEqual(self.dydx.ndim, 2)
def test_dydx_shape_0(self):
self.assertEqual(self.dydx.shape[0], self.x.shape[0])
def test_dydalpha_ndim(self):
self.assertEqual(self.dydalpha.ndim, 2)
def test_dydalpha_size(self):
self.assertEqual(self.dydalpha.shape[0], self.x.shape[0])
class TestLSEimplicit2D(unittest.TestCase):
"tests with 2D input"
K = 4
x = np.random.rand(1000, K)
alpha = array([1.499, 13.703, 3.219, 4.148])
y, dydx, dydalpha = lse_implicit(x, alpha)
def test_dydx_shape(self):
self.assertEqual(self.dydx.shape, self.x.shape)
def test_dydalpha_shape(self):
self.assertEqual(self.dydalpha.shape, self.x.shape)
class TestLSEScaled(unittest.TestCase):
"Test lse_implicit"
x =
|
arange(1.0, 31.0)
|
numpy.arange
|
import random as rand
import numpy as np
import multiprocessing as mp
from scipy.spatial import HalfspaceIntersection, ConvexHull
from scipy.spatial.qhull import QhullError
from dataclasses import dataclass
from operator import mul, add
from functools import reduce
from collections import namedtuple
from kaa.lputil import minLinProg, maxLinProg, LPUtil
from settings import KaaSettings
from kaa.trajectory import Traj, TrajCollection
from settings import KaaSettings
from kaa.log import Output
@dataclass
class VolumeData:
ConvHullVol: float
EnvelopBoxVol: float
class ChebyCenter:
def __init__(self, center, radius):
self.center = center
self.radius = radius
class LinearSystem:
def __init__(self, model, A, b, constr_mat=None):
self.A = A
self.b = b
self.model = model
self.vars = model.vars
self.dim = model.dim
self.constr_mat = constr_mat # Pointer to total constraint mat for LPUtil purposes.
self.randgen = rand.Random(KaaSettings.RandSeed)
"""
Computes and returns the Chebyshev center of parallelotope.
@returns self.dim point marking the Chebyshev center.
"""
@property
def chebyshev_center(self):
'Initialize objective function for Chebyshev intersection LP routine.'
c = [0 for _ in range(self.dim + 1)]
c[-1] = 1
row_norm = np.reshape(np.linalg.norm(self.A, axis=1), (self.A.shape[0], 1))
center_A = np.hstack((self.A, row_norm))
center_pt = maxLinProg(self.model, c, center_A, self.b).x
return ChebyCenter(center_pt[:-1], center_pt[-1])
"""
Volume estimation of system by sampling points and taking ratio.
@params samples: number of samples used to estimate volume
@returns estimated volume of linear system stored in VolDataTuple
"""
@property
def volume(self):
envelop_box_vol = self.calc_vol_envelop_box()
conv_hull_vol = self.calc_vol_conv_hull() if self.dim < 4 else None
return VolumeData(conv_hull_vol, envelop_box_vol)
"""
Find vertices of this linear system.
"""
@property
def vertices(self):
phase_intersect = np.hstack((self.A, -
|
np.asarray([self.b])
|
numpy.asarray
|
"""
Author: <NAME>
Last modfied: 10/31/2020
Description:
This module consists of simulations of the spread of multiple
contigions on a single network under the threshold model.
"""
#--------------------------- Imports ------------------------------#
import numpy as np
import mms.utility as mu
from scipy import sparse
#----------------------- Funciton Defintions ----------------------#
def isolate_threshold_count(A, B, T, k, r = 0):
"""
Description
-----------
This function simulate the spread of multiple contigions on a single network
where each contagion has 2 states (0 or 1).
Contagions are not interrelated.
Parameters
----------
A: scipy array, int {0, 1}
The adjacency matrix of G.
A is sparse
B: scipy array, int {0, 1}
The initial configuration matrix where $B_{vj}$ is the state value of
vertex v for contagion j.
B is sparse
T: numpy array, int
The threshold matrix where $T_{vj}$ is the threshold of vertex v for
contagion j.
k: int
The number of system iterations
r: float, optional
The recovery probability. In each iteration, each vertex has a probability
r changing the state to 0 for each contigion.
Returns
-------
B: numpy array
The final configuration
"""
# Make all 1s along the diagonal of A (since we are considering the closed neighborhood)
#np.fill_diagonal(A, 1)
# The recovery probability
recovery = False
if r != 0:
recovery = True
# The main loop
for i in range(k):
# matrix operation
B_last = B
B = A @ B - T #B = np.matmul(A, B_last) - T
# update states
B[B >= 0] = 1
B[B < 0] = 0
# If a recovery probability is set
if recovery:
B[np.random.rand(*B.shape) < r] = 0
# if fixed point
if np.array_equal(B, B_last):
print("A fixed point is reached at iteration {}".format(i))
return B
print("Max number of iteratios reached")
return B
###########################################################################################
def correlate_threshold_weight(A, B, T, W, k, r = 0):
"""
Description
-----------
This function simulate the spread of multiple contigions on a single network
where each contagion has 2 states (0 or 1).
Contagions are interrelated as described by the thrid model.
Parameters
----------
A: numpy array, int {0, 1}
The adjacency matrix of G.
A is sparse
B: numpy array, int {0, 1}
The initial configuration matrix where $B_{vj}$ is the state value of
vertex v for contagion j.
B is sparse
T: numpy array, int
The threshold matrix where $T_{vj}$ is the threshold of vertex v for
contagion j.
W: numpy array, float [0, 1]
The weight matrix where $W_{ij}$ is the weight of contagion j w.r.t
contagion i
k: int
The number of system iterations
r: float, optional
The recovery probability. In each iteration, each vertex has a probability
r changing the state to 0 for each contigion.
Returns
-------
B: numpy array
The final configuration
"""
# Make all 1s along the diagonal of A (since we are considering the closed neighborhood)
#A.setdiag(1)
# The recovery probability
recovery = False
if r != 0:
recovery = True
# Take the transpose of the weight matrix
W = np.transpose(W)
# The main loop
for i in range(k):
# matrix operation
B_last = B
#B = np.linalg.multi_dot([A, B_last, W]) - T
B = A @ B_last @ W - T
# update states
B[B >= 0] = 1
B[B < 0] = 0
# If a recovery probability is set
if recovery:
B[np.random.rand(*B.shape) < r] = 0
# if fixed point
if np.array_equal(B, B_last):
print("A fixed point is reached at iteration {}".format(i))
return B
#h = hpy()
#print(h.heap())
print("Max number of iteratios reached")
return B
def correlate_threshold_density(A, B, T, d, k):
"""
Description
-----------
This function simulate the spread of multiple contigions on a single network
where each contagion has 2 states (0 or 1).
Contagions interrelated as described by the second model.
Parameters
----------
A: numpy array, int {0, 1}
The adjacency matrix of G.
A is sparse
B: numpy array, int {0, 1}
The initial configuration matrix where $B_{vj}$ is the state value of
vertex v for contagion j.
B is sparse
T: numpy array, int
The threshold matrix where $T_{vj}$ is the threshold of vertex v for
contagion j.
d: numpy array, int
The density vector
k: int
The number of system iterations
Returns
-------
B: numpy array
The final configuration
"""
# Compute the reciprocal
d_bar = np.transpose( np.reciprocal(d.astype(float)) ) # Make sure that d is a column vector
# The number of contagions
c = np.shape(T)[1]
# k * 1 ones
one = np.ones((c, 1), dtype = 'float')
# The main loop
for i in range(k):
B_last = B
# Compute M
M = B @ one @ d_bar #M = np.linalg.multi_dot([B, one, d_bar])
M[M >= 1.0] = 1.0
M[M < 1.0] = 0.0
#B = np.matmul(A, M) - T
B = A @ M - T
# update states
B[B >= 0.0] = 1.0
B[B < 0.0] = 0.0
# if fixed point
if np.array_equal(B, B_last):
print("A fixed point is reached at iteration {}".format(i))
return B
print("Max number of iteratios reached")
return B
def covid_mask(A_1, A_2, D_inverse, a_1, t_2, t_3, b_1, b_2, p, alpha, beta, r, k):
"""
Description
-----------
This funciton simulate the spread of two contagions on two different
networks, where contagions are correlated as described in the project
report.
Parameters
----------
A_1: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the social layer
A_2: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the disease layer
D_inverse: n x n scipy sparse matrix, float [0, 1]
The inversed diagonal matrix of the social layer.
a_1: n x 1 scipy sparse matrix, int {0, 1}
(a_1)_i = 1 if the person i is prosocial, and (a_1)_i = 0 otherwise.
t_2: n x 1 numpy array, float [0, 1]
(t_2)_i is threshold percentage of neighbors who wear masks for person
i to wear a mask in the next iteration.
t_3: n x 1 numpy array, float [0, 1]
(t_3)_i is the threshold percentage of the overall infection of the population
for person i to wear a mask in the next iteration.
b_1: n x 1 scipy sparse matrix, int {0, 1}
(b_1)_i = 1 if the person i wears a mask at the current iteration.
b_2: n x 1 scipy sparse matrix, int {0, 1}
(b_2)_1 = 1 if the person i is infected by the disease at the current iteration
p: float [0, 1]
Transimission probability of the disease
alpha: The damping factor on p when the person himself wears a mask.
beta: The damping factor on p when a neighbor of a person wears a mask.
r: Recovery probability.
k: The maximum number of time-steps.
"""
# Keep track of the dynamic: {time: [# of masks, # of infections]}
# dynamic = {}
# Compute the degree fraction matrix
F = D_inverse @ A_1
F = sparse.csr_matrix(F)
# The number of vertices
n = np.shape(A_1)[0]
# The one and zero vectors
one = np.ones((n, 1), dtype = 'float')
zero = np.zeros((n, 1), dtype = 'float')
# The recovery vector: b_3
b_3 = np.zeros((n, 1), dtype = 'float') # Initially, no one has recovered.
# The susceptible vector: b_4
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# The largest fraction of infection reached throughout the time
max_frac = 0.0
# The number of days the infection lasts
days = 0
# total number of mask wearings (sum over all days)
total_mask = 0
# mask vector thoughout the time
mask_vector = [] #new
# infection vector
infection_vector = [] #new
# The main loop
for i in range(k):
days += 1
mask_vector.append(float(np.count_nonzero(b_1) / n)) #new
infection_vector.append(float(np.count_nonzero(b_2) / n)) #new
# dynamic[i] = [np.count_nonzero(b_1), np.count_nonzero(b_2), np.count_nonzero(b_3), np.count_nonzero(b_4)]
# need b_1_last to update the state of the second contagion
b_1_last = b_1
b_4_last = b_4
b_2_last = b_2
# The fraction of total number of infections
a_3 = np.count_nonzero(b_2) / float(n)
# Update the max_frac
if a_3 > max_frac:
max_frac = a_3
# determine if the overall faction of infection exceed the threshold
l_3 = -(t_3 - a_3) # Note that I cannot do a_3 - t_3 since a_3 is not a vector
l_3[l_3 >= 0.0] = 1.0
l_3[l_3 < 0.0] = 0.0
# l3 = t_3 <= a_3
# Determine if the fraction of neighbors with wear face masks exceeds a threshold
l_2 = F @ b_1_last - t_2 # sparse?
l_2[l_2 >= 0.0] = 1.0
l_2[l_2 < 0.0] = 0.0
# l_2 = (F @ b_1_last) >= t_2 WORTH TRYING!
# Update the mask state b_1
b_1 = a_1 + l_2 + l_3 # logical operation?
b_1[b_1 >= 1.0] = 1.0 # sparse?
#b_1 = np.logical_or(np.logical_or(a_1, l_2), l_3) WORTH TRYING
total_mask += np.count_nonzero(b_1)
# The # of infected neighbors of each v
d = A_2 @ b_2_last
# The # of infected neighbors with mask
d_2 = A_2 @ np.multiply(b_1_last, b_2_last) # Very important to pass b_1_last here
# The # of infected neighbors without mask
d_1 = d - d_2
# Only susceptibles (b_4) can be infected
#--------------------------------------------------#
# h1 : the probability of not getting infected from neighbors who do not wear masks (1 - p or 1 - alpha p)
temp = one - (b_1 * (1.0 - alpha)) # IMPORTANT: b_1_last vs b_1 (syn vs asyn)
h_1 = one - (temp * p)
# h2: contains the probability of not getting infected from neighbors who wear masks (1 - beta p or 1 - alpha beta p)
h_2 = one - (temp * beta * p)
temp = np.multiply(np.power(h_1, d_1), np.power(h_2, d_2))
q = np.multiply(b_4, one - temp)
#--------------------------------------------------#
# Has to flatten q to pass it to the binomial funciton
q_f = q.flatten()
# Compute newly infected nodes
newly_infected = np.reshape(np.random.binomial(1, q_f), (-1 ,1))
# Computer R0 (do this before recovery)
# R_0 = np.count_nonzero(newly_infected) / np.count_nonzero(b_2)
# Recovery
rr = np.random.choice([0, 1], size = (n, 1), p=[1.0 - r, r])
b_3 = np.logical_and(b_2, rr) + b_3 # update b_3
b_2 = b_2 - rr
b_2[b_2 == -1] = 0.0
# Update b_2
b_2 = newly_infected + b_2
# Update the susceptible vector
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# A fixed point is reached under zero infection
if np.array_equal(b_2, zero):
# print("A fixed point is reached at iteration {}".format(i))
average_mask = float(total_mask / days)
return round(float(np.count_nonzero(b_3) / n), 4), round(max_frac, 4), days, round(float(average_mask / n), 4)
#return infection_vector, mask_vector
average_mask = float(total_mask / days)
return round(float(np.count_nonzero(b_3) / n), 4), round(max_frac, 4), days, round(float(average_mask / n), 4)
#return infection_vector, mask_vector
def covid_mask_sym_fear(A_1, A_2, D_inverse, a_1, t_2, t_3, b_1, b_2, p, alpha, beta, r, k, sym_ratio):
"""
Description
-----------
This funciton simulate the spread of two contagions on two different
networks, where contagions are correlated as described in the project
report.
Parameters
----------
A_1: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the social layer
A_2: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the disease layer
D_inverse: n x n scipy sparse matrix, float [0, 1]
The inversed diagonal matrix of the social layer.
a_1: n x 1 scipy sparse matrix, int {0, 1}
(a_1)_i = 1 if the person i is prosocial, and (a_1)_i = 0 otherwise.
t_2: n x 1 numpy array, float [0, 1]
(t_2)_i is threshold percentage of neighbors who wear masks for person
i to wear a mask in the next iteration.
t_3: n x 1 numpy array, float [0, 1]
(t_3)_i is the threshold percentage of the overall infection of the population
for person i to wear a mask in the next iteration.
b_1: n x 1 scipy sparse matrix, int {0, 1}
(b_1)_i = 1 if the person i wears a mask at the current iteration.
b_2: n x 1 scipy sparse matrix, int {0, 1}
(b_2)_1 = 1 if the person i is infected by the disease at the current iteration
p: float [0, 1]
Transimission probability of the disease
alpha: The damping factor on p when the person himself wears a mask.
beta: The damping factor on p when a neighbor of a person wears a mask.
r: Recovery probability.
k: The maximum number of time-steps.
"""
# Keep track of the dynamic: {time: [# of masks, # of infections]}
# dynamic = {}
# Compute the degree fraction matrix
F = D_inverse @ A_1
F = sparse.csr_matrix(F)
# The number of vertices
n = np.shape(A_1)[0]
# The one and zero vectors
one = np.ones((n, 1), dtype = 'float')
zero = np.zeros((n, 1), dtype = 'float')
# The recovery vector: b_3
b_3 = np.zeros((n, 1), dtype = 'float') # Initially, no one has recovered.
# The susceptible vector: b_4
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# The largest fraction of infection reached throughout the time
max_frac = 0.0
# The number of days the infection lasts
days = 0
# total number of mask wearings (sum over all days)
total_mask = 0
# mask vector thoughout the time
mask_vector = [] #new
# infection vector
infection_vector = [] #new
# The main loop
for i in range(k):
days += 1
mask_vector.append(float(np.count_nonzero(b_1) / n)) #new
infection_vector.append(float(np.count_nonzero(b_2) / n)) #new
# dynamic[i] = [np.count_nonzero(b_1), np.count_nonzero(b_2), np.count_nonzero(b_3), np.count_nonzero(b_4)]
# need b_1_last to update the state of the second contagion
b_1_last = b_1
b_4_last = b_4
b_2_last = b_2
# The fraction of total number of infections
a_3 = np.count_nonzero(b_2) / float(n)
# Update the max_frac
if a_3 > max_frac:
max_frac = a_3
# determine if the overall faction of infection exceed the threshold
l_3 = -(t_3 - sym_ratio * a_3) # Note that I cannot do a_3 - t_3 since a_3 is not a vector
l_3[l_3 >= 0.0] = 1.0
l_3[l_3 < 0.0] = 0.0
# l3 = t_3 <= a_3
# Determine if the fraction of neighbors with wear face masks exceeds a threshold
l_2 = F @ b_1_last - t_2 # sparse?
l_2[l_2 >= 0.0] = 1.0
l_2[l_2 < 0.0] = 0.0
# l_2 = (F @ b_1_last) >= t_2 WORTH TRYING!
# Update the mask state b_1
b_1 = a_1 + l_2 + l_3 # logical operation?
b_1[b_1 >= 1.0] = 1.0 # sparse?
#b_1 = np.logical_or(np.logical_or(a_1, l_2), l_3) WORTH TRYING
total_mask += np.count_nonzero(b_1)
# The # of infected neighbors of each v
d = A_2 @ b_2_last
# The # of infected neighbors with mask
d_2 = A_2 @ np.multiply(b_1_last, b_2_last) # Very important to pass b_1_last here
# The # of infected neighbors without mask
d_1 = d - d_2
# Only susceptibles (b_4) can be infected
#--------------------------------------------------#
# h1 : the probability of not getting infected from neighbors who do not wear masks (1 - p or 1 - alpha p)
temp = one - (b_1 * (1.0 - alpha)) # IMPORTANT: b_1_last vs b_1 (syn vs asyn)
h_1 = one - (temp * p)
# h2: contains the probability of not getting infected from neighbors who wear masks (1 - beta p or 1 - alpha beta p)
h_2 = one - (temp * beta * p)
temp = np.multiply(np.power(h_1, d_1), np.power(h_2, d_2))
q = np.multiply(b_4, one - temp)
#--------------------------------------------------#
# Has to flatten q to pass it to the binomial funciton
q_f = q.flatten()
# Compute newly infected nodes
newly_infected = np.reshape(np.random.binomial(1, q_f), (-1 ,1))
# Computer R0 (do this before recovery)
# R_0 = np.count_nonzero(newly_infected) / np.count_nonzero(b_2)
# Recovery
rr = np.random.choice([0, 1], size = (n, 1), p=[1.0 - r, r])
b_3 = np.logical_and(b_2, rr) + b_3 # update b_3
b_2 = b_2 - rr
b_2[b_2 == -1] = 0.0
# Update b_2
b_2 = newly_infected + b_2
# Update the susceptible vector
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# A fixed point is reached under zero infection
if np.array_equal(b_2, zero):
# print("A fixed point is reached at iteration {}".format(i))
average_mask = float(total_mask / days)
return round(float(np.count_nonzero(b_3) / n), 4), round(max_frac, 4), days, round(float(average_mask / n), 4)
#return infection_vector, mask_vector
average_mask = float(total_mask / days)
return round(float(np.count_nonzero(b_3) / n), 4), round(max_frac, 4), days, round(float(average_mask / n), 4)
#return infection_vector, mask_vector
def covid_mask_peak_diff(A_1, A_2, D_inverse, a_1, t_2, t_3, b_1, b_2, p, alpha, beta, r, k):
"""
Description
-----------
This funciton simulate the spread of two contagions on two different
networks, where contagions are correlated as described in the project
report.
Parameters
----------
A_1: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the social layer
A_2: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the disease layer
D_inverse: n x n scipy sparse matrix, float [0, 1]
The inversed diagonal matrix of the social layer.
a_1: n x 1 scipy sparse matrix, int {0, 1}
(a_1)_i = 1 if the person i is prosocial, and (a_1)_i = 0 otherwise.
t_2: n x 1 numpy array, float [0, 1]
(t_2)_i is threshold percentage of neighbors who wear masks for person
i to wear a mask in the next iteration.
t_3: n x 1 numpy array, float [0, 1]
(t_3)_i is the threshold percentage of the overall infection of the population
for person i to wear a mask in the next iteration.
b_1: n x 1 scipy sparse matrix, int {0, 1}
(b_1)_i = 1 if the person i wears a mask at the current iteration.
b_2: n x 1 scipy sparse matrix, int {0, 1}
(b_2)_1 = 1 if the person i is infected by the disease at the current iteration
p: float [0, 1]
Transimission probability of the disease
alpha: The damping factor on p when the person himself wears a mask.
beta: The damping factor on p when a neighbor of a person wears a mask.
r: Recovery probability.
k: The maximum number of time-steps.
"""
# Keep track of the dynamic: {time: [# of masks, # of infections]}
# dynamic = {}
# Compute the degree fraction matrix
F = D_inverse @ A_1
F = sparse.csr_matrix(F)
# The number of vertices
n = np.shape(A_1)[0]
# The one and zero vectors
one = np.ones((n, 1), dtype = 'float')
zero = np.zeros((n, 1), dtype = 'float')
# The recovery vector: b_3
b_3 = np.zeros((n, 1), dtype = 'float') # Initially, no one has recovered.
# The susceptible vector: b_4
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# The largest fraction of infection reached throughout the time
max_frac_1 = 0.0
# The second largest fraction of infection reached throughout the time
max_frac_2 = 0.0
# The time where the largest infection occurs
peak_time_1 = 0
# The time where the second largest infection occurs
peak_time_2 = 0
# The number of days the infection lasts
days = 0
# total number of mask wearings (sum over all days)
total_mask = 0
# mask vector thoughout the time
mask_vector = [] #new
# infection vector
infection_vector = [] #new
# The main loop
for i in range(k):
days += 1
mask_vector.append(float(np.count_nonzero(b_1) / n)) #new
infection_vector.append(float(np.count_nonzero(b_2) / n)) #new
# dynamic[i] = [np.count_nonzero(b_1), np.count_nonzero(b_2), np.count_nonzero(b_3), np.count_nonzero(b_4)]
# need b_1_last to update the state of the second contagion
b_1_last = b_1
b_4_last = b_4
b_2_last = b_2
# The fraction of total number of infections
a_3 = np.count_nonzero(b_2) / float(n)
# Update the max_frac
if a_3 > max_frac_1:
max_frac_2 = max_frac_1
peak_time_2 = peak_time_1
max_frac_1 = a_3
peak_time_1 = i
# determine if the overall faction of infection exceed the threshold
l_3 = -(t_3 - a_3) # Note that I cannot do a_3 - t_3 since a_3 is not a vector
l_3[l_3 >= 0.0] = 1.0
l_3[l_3 < 0.0] = 0.0
# l3 = t_3 <= a_3
# Determine if the fraction of neighbors with wear face masks exceeds a threshold
l_2 = F @ b_1_last - t_2 # sparse?
l_2[l_2 >= 0.0] = 1.0
l_2[l_2 < 0.0] = 0.0
# l_2 = (F @ b_1_last) >= t_2 WORTH TRYING!
# Update the mask state b_1
b_1 = a_1 + l_2 + l_3 # logical operation?
b_1[b_1 >= 1.0] = 1.0 # sparse?
#b_1 = np.logical_or(np.logical_or(a_1, l_2), l_3) WORTH TRYING
total_mask += np.count_nonzero(b_1)
# The # of infected neighbors of each v
d = A_2 @ b_2_last
# The # of infected neighbors with mask
d_2 = A_2 @ np.multiply(b_1_last, b_2_last) # Very important to pass b_1_last here
# The # of infected neighbors without mask
d_1 = d - d_2
# Only susceptibles (b_4) can be infected
#--------------------------------------------------#
# h1 : the probability of not getting infected from neighbors who do not wear masks (1 - p or 1 - alpha p)
temp = one - (b_1 * (1.0 - alpha)) # IMPORTANT: b_1_last vs b_1 (syn vs asyn)
h_1 = one - (temp * p)
# h2: contains the probability of not getting infected from neighbors who wear masks (1 - beta p or 1 - alpha beta p)
h_2 = one - (temp * beta * p)
temp = np.multiply(np.power(h_1, d_1), np.power(h_2, d_2))
q =
|
np.multiply(b_4, one - temp)
|
numpy.multiply
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
from cleverhans.devtools.checks import CleverHansTest
from cleverhans.attacks import Attack
from cleverhans.attacks import FastGradientMethod
from cleverhans.attacks import BasicIterativeMethod
from cleverhans.attacks import MomentumIterativeMethod
from cleverhans.attacks import VirtualAdversarialMethod
from cleverhans.attacks import SaliencyMapMethod
from cleverhans.attacks import CarliniWagnerL2
from cleverhans.attacks import ElasticNetMethod
from cleverhans.attacks import DeepFool
from cleverhans.attacks import MadryEtAl
from cleverhans.attacks import FastFeatureAdversaries
class TestAttackClassInitArguments(CleverHansTest):
def test_model(self):
import tensorflow as tf
sess = tf.Session()
# Exception is thrown when model does not have __call__ attribute
with self.assertRaises(Exception) as context:
model = tf.placeholder(tf.float32, shape=(None, 10))
Attack(model, back='tf', sess=sess)
self.assertTrue(context.exception)
def test_back(self):
# Define empty model
def model():
return True
# Exception is thrown when back is not tf or th
with self.assertRaises(Exception) as context:
Attack(model, back='test', sess=None)
self.assertTrue(context.exception)
def test_sess(self):
# Define empty model
def model():
return True
# Test that it is permitted to provide no session
Attack(model, back='tf', sess=None)
def test_sess_generate_np(self):
def model(x):
return True
class DummyAttack(Attack):
def generate(self, x, **kwargs):
return x
attack = DummyAttack(model, back='tf', sess=None)
with self.assertRaises(Exception) as context:
attack.generate_np(0.)
self.assertTrue(context.exception)
class TestParseParams(CleverHansTest):
def test_parse(self):
def model():
return True
import tensorflow as tf
sess = tf.Session()
test_attack = Attack(model, back='tf', sess=sess)
self.assertTrue(test_attack.parse_params({}))
class TestVirtualAdversarialMethod(CleverHansTest):
def setUp(self):
super(TestVirtualAdversarialMethod, self).setUp()
import tensorflow as tf
import tensorflow.contrib.slim as slim
def dummy_model(x):
net = slim.fully_connected(x, 60)
return slim.fully_connected(net, 10, activation_fn=None)
self.sess = tf.Session()
self.sess.as_default()
self.model = tf.make_template('dummy_model', dummy_model)
self.attack = VirtualAdversarialMethod(self.model, sess=self.sess)
# initialize model
with tf.name_scope('dummy_model'):
self.model(tf.placeholder(tf.float32, shape=(None, 1000)))
self.sess.run(tf.global_variables_initializer())
def test_parse_params(self):
self.attack.parse_params()
# test default values
self.assertEqual(self.attack.eps, 2.0)
self.assertEqual(self.attack.num_iterations, 1)
self.assertEqual(self.attack.xi, 1e-6)
self.assertEqual(self.attack.clip_min, None)
self.assertEqual(self.attack.clip_max, None)
def test_generate_np(self):
x_val = np.random.rand(100, 1000)
perturbation = self.attack.generate_np(x_val) - x_val
perturbation_norm = np.sqrt(np.sum(perturbation**2, axis=1))
# test perturbation norm
self.assertClose(perturbation_norm, self.attack.eps)
class TestFastGradientMethod(CleverHansTest):
def setUp(self):
super(TestFastGradientMethod, self).setUp()
import tensorflow as tf
# The world's simplest neural network
def my_model(x):
W1 = tf.constant([[1.5, .3], [-2, 0.3]], dtype=tf.float32)
h1 = tf.nn.sigmoid(tf.matmul(x, W1))
W2 = tf.constant([[-2.4, 1.2], [0.5, -2.3]], dtype=tf.float32)
res = tf.nn.softmax(tf.matmul(h1, W2))
return res
self.sess = tf.Session()
self.model = my_model
self.attack = FastGradientMethod(self.model, sess=self.sess)
def help_generate_np_gives_adversarial_example(self, ord):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
x_adv = self.attack.generate_np(x_val, eps=.5, ord=ord,
clip_min=-5, clip_max=5)
if ord == np.inf:
delta = np.max(np.abs(x_adv - x_val), axis=1)
elif ord == 1:
delta = np.sum(np.abs(x_adv - x_val), axis=1)
elif ord == 2:
delta = np.sum(np.square(x_adv - x_val), axis=1)**.5
self.assertClose(delta, 0.5)
orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(orig_labs == new_labs) < 0.5)
def test_generate_np_gives_adversarial_example_linfinity(self):
self.help_generate_np_gives_adversarial_example(np.infty)
def test_generate_np_gives_adversarial_example_l1(self):
self.help_generate_np_gives_adversarial_example(1)
def test_generate_np_gives_adversarial_example_l2(self):
self.help_generate_np_gives_adversarial_example(2)
def test_targeted_generate_np_gives_adversarial_example(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
random_labs = np.random.random_integers(0, 1, 100)
random_labs_one_hot = np.zeros((100, 2))
random_labs_one_hot[np.arange(100), random_labs] = 1
x_adv = self.attack.generate_np(x_val, eps=.5, ord=np.inf,
clip_min=-5, clip_max=5,
y_target=random_labs_one_hot)
delta = np.max(np.abs(x_adv - x_val), axis=1)
self.assertClose(delta, 0.5)
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(random_labs == new_labs) > 0.7)
def test_generate_np_can_be_called_with_different_eps(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
for eps in [0.1, 0.2, 0.3, 0.4]:
x_adv = self.attack.generate_np(x_val, eps=eps, ord=np.inf,
clip_min=-5.0, clip_max=5.0)
delta = np.max(np.abs(x_adv - x_val), axis=1)
self.assertClose(delta, eps)
def test_generate_np_clip_works_as_expected(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
x_adv = self.attack.generate_np(x_val, eps=0.5, ord=np.inf,
clip_min=-0.2, clip_max=0.1)
self.assertClose(np.min(x_adv), -0.2)
self.assertClose(np.max(x_adv), 0.1)
def test_generate_np_caches_graph_computation_for_eps_clip_or_xi(self):
import tensorflow as tf
x_val = np.random.rand(1, 2)
x_val = np.array(x_val, dtype=np.float32)
self.attack.generate_np(x_val, eps=.3, num_iterations=10,
clip_max=-5.0, clip_min=-5.0,
xi=1e-6)
old_grads = tf.gradients
def fn(*x, **y):
raise RuntimeError()
tf.gradients = fn
self.attack.generate_np(x_val, eps=.2, num_iterations=10,
clip_max=-4.0, clip_min=-4.0,
xi=1e-5)
tf.gradients = old_grads
class TestBasicIterativeMethod(TestFastGradientMethod):
def setUp(self):
super(TestBasicIterativeMethod, self).setUp()
import tensorflow as tf
# The world's simplest neural network
def my_model(x):
W1 = tf.constant([[1.5, .3], [-2, 0.3]], dtype=tf.float32)
h1 = tf.nn.sigmoid(tf.matmul(x, W1))
W2 = tf.constant([[-2.4, 1.2], [0.5, -2.3]], dtype=tf.float32)
res = tf.nn.softmax(tf.matmul(h1, W2))
return res
self.sess = tf.Session()
self.model = my_model
self.attack = BasicIterativeMethod(self.model, sess=self.sess)
def test_attack_strength(self):
"""
If clipping is not done at each iteration (not passing clip_min and
clip_max to fgm), this attack fails by
np.mean(orig_labels == new_labels) == .39.
"""
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
x_adv = self.attack.generate_np(x_val, eps=1.0, ord=np.inf,
clip_min=0.5, clip_max=0.7,
nb_iter=5)
orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
def test_generate_np_does_not_cache_graph_computation_for_nb_iter(self):
import tensorflow as tf
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
x_adv = self.attack.generate_np(x_val, eps=1.0, ord=np.inf,
clip_min=-5.0, clip_max=5.0,
nb_iter=10)
orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
ok = [False]
old_grads = tf.gradients
def fn(*x, **y):
ok[0] = True
return old_grads(*x, **y)
tf.gradients = fn
x_adv = self.attack.generate_np(x_val, eps=1.0, ord=np.inf,
clip_min=-5.0, clip_max=5.0,
nb_iter=11)
orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
tf.gradients = old_grads
self.assertTrue(ok[0])
class TestMomentumIterativeMethod(TestBasicIterativeMethod):
def setUp(self):
super(TestMomentumIterativeMethod, self).setUp()
import tensorflow as tf
# The world's simplest neural network
def my_model(x):
W1 = tf.constant([[1.5, .3], [-2, 0.3]], dtype=tf.float32)
h1 = tf.nn.sigmoid(tf.matmul(x, W1))
W2 = tf.constant([[-2.4, 1.2], [0.5, -2.3]], dtype=tf.float32)
res = tf.nn.softmax(tf.matmul(h1, W2))
return res
self.sess = tf.Session()
self.model = my_model
self.attack = MomentumIterativeMethod(self.model, sess=self.sess)
def test_generate_np_can_be_called_with_different_decay_factor(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
for dacay_factor in [0.0, 0.5, 1.0]:
x_adv = self.attack.generate_np(x_val, eps=0.5, ord=np.inf,
dacay_factor=dacay_factor,
clip_min=-5.0, clip_max=5.0)
delta = np.max(np.abs(x_adv - x_val), axis=1)
self.assertClose(delta, 0.5)
class TestCarliniWagnerL2(CleverHansTest):
def setUp(self):
super(TestCarliniWagnerL2, self).setUp()
import tensorflow as tf
# The world's simplest neural network
def my_model(x):
W1 = tf.constant([[1.5, .3], [-2, 0.3]], dtype=tf.float32)
h1 = tf.nn.sigmoid(tf.matmul(x, W1))
W2 = tf.constant([[-2.4, 1.2], [0.5, -2.3]], dtype=tf.float32)
res = tf.matmul(h1, W2)
return res
self.sess = tf.Session()
self.model = my_model
self.attack = CarliniWagnerL2(self.model, sess=self.sess)
def test_generate_np_untargeted_gives_adversarial_example(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
x_adv = self.attack.generate_np(x_val, max_iterations=100,
binary_search_steps=3,
initial_const=1,
clip_min=-5, clip_max=5,
batch_size=10)
orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
def test_generate_np_targeted_gives_adversarial_example(self):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
feed_labs = np.zeros((100, 2))
feed_labs[
|
np.arange(100)
|
numpy.arange
|
import numpy as np
from gym import Env
from gym.spaces import Discrete, Box
import logging
logger = logging.getLogger(__name__)
class InvalidMoveException(Exception):
pass
class SechsNimmtEnv(Env):
""" OpenAI gym environment for the card game 6 Nimmt! """
def __init__(self, num_players, num_rows=4, num_cards=104, threshold=6, include_summaries=True, player_names=None, verbose=True):
super().__init__()
assert num_players > 0
assert num_rows > 0
assert num_cards >= 10 * num_players + num_rows
self._num_players = num_players
self._num_rows = num_rows
self._num_cards = num_cards
self._threshold = threshold
self._include_summaries = include_summaries
self._player_names = player_names
self._board = [[] for _ in range(self._num_rows)]
self._hands = [[] for _ in range(self._num_players)]
self._scores = np.zeros(self._num_players, dtype=np.int)
self.action_space = Discrete(self._num_cards)
self.reward_range = (-float("inf"), 0)
self.metadata = {"render.modes": ["human"]}
state_shape = (10 + 1 + int(self._include_summaries) * 3 * self._num_rows + self._num_rows * self._threshold,)
self.observation_space = Box(low=-1.0, high=2.0, shape=state_shape, dtype=np.float)
self.spec = None
self.verbose = verbose
def reset(self):
""" Resets the state of the environment and returns an initial observation. """
self._deal()
self._scores = np.zeros(self._num_players, dtype=np.int)
states = self._create_states()
return states
def reset_to(self, board, hands):
""" Initializes a game for given board and hands """
self._board = board
self._hands = hands
self._scores = np.zeros(self._num_players, dtype=np.int)
states = self._create_states()
return states
def step(self, action):
""" Environment step. action is actually a list of actions (one for each player). """
assert len(action) == self._num_players
for player, card in enumerate(action):
self._check_move(player, card)
rewards = self._play_cards(action)
states = self._create_states()
info = dict()
done = self._is_done()
return states, rewards, done, info
def render(self, mode="human"):
""" Report game progress somehow """
logger.info("-" * 120)
logger.info("Board:")
for row, cards in enumerate(self._board):
logger.info(f" " + " ".join([self._format_card(card) for card in cards]) + " _ " * (self._threshold - len(cards) - 1) + " * ")
logger.info("Players:")
for player, (score, hand) in enumerate(zip(self._scores, self._hands)):
self._player_name(player)
logger.info(
f" {self._player_name(player)}: {score:>3d} Hornochsen, "
+ ("no cards " if len(hand) == 0 else "cards " + " ".join([self._format_card(card) for card in hand]))
)
if self._is_done():
winning_player = np.argmin(self._scores)
losing_player = np.argmax(self._scores)
logger.info(f"The game is over! {self._player_name(winning_player)} wins, {self._player_name(losing_player)} loses. Congratulations!")
logger.info("-" * 120)
def _deal(self):
""" Deals random cards to all players and initiates the game board """
if self.verbose: logger.debug("Dealing cards")
cards = np.arange(0, self._num_cards, 1, dtype=np.int)
np.random.shuffle(cards)
cards = list(cards)
for player in range(self._num_players):
self._hands[player] = sorted(cards[:10]) # pop() does not support multiple indices, does it?
del cards[:10]
for row in range(self._num_rows):
self._board[row] = [cards.pop()]
def _check_move(self, player, card):
""" Check legality of a move and raise an exception otherwise"""
if card not in self._hands[player]:
raise InvalidMoveException(f"Player {player + 1} tried to play card {card + 1}, but their hand is {self._hands[player]}")
def _play_cards(self, cards):
""" Given one played card per player, play the cards, score points, and update the game """
rewards = np.zeros(self._num_players, dtype=np.int)
actions = [(card, player) for player, card in enumerate(cards)]
actions = sorted(actions, key=lambda x: x[0])
for card, player in actions:
if self.verbose: logger.debug(f"{self._player_name(player)} plays card {card + 1}")
row, replaced = self._find_row(card)
self._board[row].append(card)
self._hands[player].remove(card)
if replaced or len(self._board[row]) >= self._threshold:
rewards += self._score_row(row, player)
return rewards
def _find_row(self, card):
""" Find which row a card has to go in """
thresholds = [(row, cards[-1]) for row, cards in enumerate(self._board)]
thresholds = sorted(thresholds, key=lambda x: -x[1]) # Sort by card threshold
if card < thresholds[-1][1]:
row = self._pick_row_to_replace()
if self.verbose: logger.debug(f" ...chooses to replace row {row + 1}")
return row, True
for row, threshold in thresholds:
if card > threshold:
return row, False
raise ValueError(f"Cannot fit card {card} into thresholds {thresholds}")
def _pick_row_to_replace(self):
""" Picks which row should be replaces when a player undercuts the smalles open row """
# TODO: In the long term this should be up to the agents.
row_values = [self._row_value(cards, include_last=True) for cards in self._board]
return np.argmin(row_values)
def _score_row(self, row, player):
""" Assigns points from a full row, and resets that row """
cards = self._board[row]
penalty = self._row_value(cards)
if self.verbose: logger.debug(f" ...and gains {penalty} Hornochsen")
self._scores[player] += penalty
rewards = np.zeros(self._num_players, dtype=np.int)
rewards[player] -= penalty
self._board[row] = [cards[-1]]
return rewards
def _create_states(self):
""" Creates state tuple """
game_state = self._create_game_state()
player_states = []
legal_actions = []
for player in range(self._num_players):
player_state, legal_action = self._create_agent_state(player)
player_states.append(np.hstack((player_state, game_state)))
legal_actions.append(legal_action)
return player_states, legal_actions
def _create_game_state(self):
""" Builds game state """
board_array = -
|
np.ones((self._num_rows, self._threshold), dtype=np.int)
|
numpy.ones
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import logging
from PIL import Image
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset
import torchvision.transforms.functional as TF
from torchvision import transforms
import random
import os
class DisasterDataset(Dataset):
def __init__(self, data_dir, i_shard, set_name, data_mean_stddev, transform:bool, normalize:bool):
self.data_dir = data_dir
self.transform = transform
self.normalize = normalize
self.data_mean_stddev = data_mean_stddev
shard_path = os.path.join(data_dir, f'{set_name}_pre_image_chips_{i_shard}.npy')
self.pre_image_chip_shard = np.load(shard_path)
logging.info(f'pre_image_chips loaded{self.pre_image_chip_shard.shape}')
shard_path = os.path.join(data_dir, f'{set_name}_post_image_chips_{i_shard}.npy')
self.post_image_chip_shard = np.load(shard_path)
logging.info(f'post_image_chips loaded{self.post_image_chip_shard.shape}')
shard_path = os.path.join(data_dir, f'{set_name}_bld_mask_chips_{i_shard}.npy')
self.bld_mask_chip_shard =
|
np.load(shard_path)
|
numpy.load
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs, make_circles, make_moons
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from scipy.misc import imresize
from scipy.spatial.distance import pdist, squareform
# Set tolerances
tol = 0.01 # error tolerance
eps = 0.01 # alpha tolerance
class SMOModel:
"""Container object for the model used for sequential minimal optimization."""
def __init__(self, X, y, C, kernel, alphas, b, errors):
self.X = X # training data vector
self.y = y # class label vector
self.C = C # regularization parameter
self.kernel = kernel # kernel function
self.alphas = alphas # lagrange multiplier vector
self.b = b # scalar bias term
self.errors = errors # error cache
self._obj = [] # record of objective function value
self.m = len(self.X) # store size of training set
def linear(x, y, b=1):
"""
Computes the linear kernel between x and y
Args:
b: Bias (a scalar)
x: array
y: array
Returns:
Linear kernel between x and y
"""
result = None
#######################################################################
# TODO: #
# Compute the linear kernel between x and y #
#######################################################################
result = np.matmul(x, y.T) + b
#######################################################################
# END OF YOUR CODE #
#######################################################################
return result
def gaussian(x, y, sigma=1):
"""
Computes the gaussian kernel between x and y
Args:
x: array
y: array
sigma: scalar
Returns:
Gaussian similarity
"""
result = None
#######################################################################
# TODO: #
# Compute the Gaussian kernel between x and y #
#######################################################################
#result = np.exp(-np.linalg.norm(x-y)**2/(2 * sigma**2))
if np.ndim(x) == 1 and np.ndim(y) == 1:
result = np.exp(- np.linalg.norm(x - y) / (2 * sigma ** 2))
elif (np.ndim(x) > 1 and np.ndim(y) == 1) or (np.ndim(x) == 1 and np.ndim(y) > 1):
result = np.exp(- np.linalg.norm(x - y, axis=1) / (2 * sigma ** 2))
elif np.ndim(x) > 1 and np.ndim(y) > 1:
result = np.exp(- np.linalg.norm(x[:, np.newaxis] - y[np.newaxis, :], axis=2) / (2 * sigma ** 2))
#######################################################################
# END OF YOUR CODE #
#######################################################################
return result
def objective_function(alphas, y,kernel, X):
"""
Computes the objective function
Args:
alphas: Lagrangian multipliers
y: class labels -1 or 1
X: training data
Returns:
Value of the objective function
"""
result = None
#######################################################################
# TODO: #
# Compute the objective function #
#######################################################################
result = np.sum(alphas) - 0.5 * np.sum(y * y * kernel(X, X) * alphas * alphas)
#######################################################################
# END OF YOUR CODE #
#######################################################################
return result
# Decision function
def decision_function(alphas, target, kernel, X_train, x_test, b):
"""
Compute the decision function
Args:
alphas: Lagrangian multipliers
y: class labels -1 or 1
X: training/test data
Returns:
Output of decision function
"""
result = None
#######################################################################
# TODO: #
# Compute the decision function #
#######################################################################
result = np.matmul((alphas * target), kernel(X_train, x_test)) - b
#######################################################################
# END OF YOUR CODE #
#######################################################################
return result
def plot_decision_boundary(model, ax, resolution=100, colors=('b', 'k', 'r')):
"""Plots the model's decision boundary on the input axes object.
Range of decision boundary grid is determined by the training data.
Returns decision boundary grid and axes object (`grid`, `ax`)."""
# Generate coordinate grid of shape [resolution x resolution]
# and evaluate the model over the entire space
xrange = np.linspace(model.X[:,0].min(), model.X[:,0].max(), resolution)
yrange = np.linspace(model.X[:,1].min(), model.X[:,1].max(), resolution)
grid = [[decision_function(model.alphas, model.y,
model.kernel, model.X,
np.array([xr, yr]), model.b) for yr in yrange] for xr in xrange]
grid = np.array(grid).reshape(len(xrange), len(yrange))
# Plot decision contours using grid and
# make a scatter plot of training data
ax.contour(xrange, yrange, grid, (-1, 0, 1), linewidths=(1, 1, 1),
linestyles=('--', '-', '--'), colors=colors)
ax.scatter(model.X[:,0], model.X[:,1],
c=model.y, cmap=plt.cm.viridis, lw=0, alpha=0.5)
# Plot support vectors (non-zero alphas)
# as circled points (linewidth > 0)
mask = model.alphas != 0.0
ax.scatter(model.X[:,0][mask], model.X[:,1][mask],
c=model.y[mask], cmap=plt.cm.viridis)
return grid, ax
def take_step(i1, i2, model):
# Skip if chosen alphas are the same
if i1 == i2:
return 0, model
alph1 = model.alphas[i1]
alph2 = model.alphas[i2]
y1 = model.y[i1]
y2 = model.y[i2]
E1 = model.errors[i1]
E2 = model.errors[i2]
s = y1 * y2
# Compute L & H, the bounds on new possible alpha values
if (y1 != y2):
L = max(0, alph2 - alph1)
H = min(model.C, model.C + alph2 - alph1)
elif (y1 == y2):
L = max(0, alph1 + alph2 - model.C)
H = min(model.C, alph1 + alph2)
if (L == H):
return 0, model
# Compute kernel & 2nd derivative eta
k11 = model.kernel(model.X[i1], model.X[i1])
k12 = model.kernel(model.X[i1], model.X[i2])
k22 = model.kernel(model.X[i2], model.X[i2])
eta = 2 * k12 - k11 - k22
# Compute new alpha 2 (a2) if eta is negative
if (eta < 0):
a2 = alph2 - y2 * (E1 - E2) / eta
# Clip a2 based on bounds L & H
#######################################################################
# TODO: #
# Clip a2 based on the last equation in the notes #
#######################################################################
if L < a2 < H:
a2 = a2
elif (a2 <= L):
a2 = L
elif (a2 >= H):
a2 = H
#######################################################################
# END OF YOUR CODE #
#######################################################################
# If eta is non-negative, move new a2 to bound with greater objective function value
else:
alphas_adj = model.alphas.copy()
alphas_adj[i2] = L
# objective function output with a2 = L
Lobj = objective_function(alphas_adj, model.y, model.kernel, model.X)
alphas_adj[i2] = H
# objective function output with a2 = H
Hobj = objective_function(alphas_adj, model.y, model.kernel, model.X)
if Lobj > (Hobj + eps):
a2 = L
elif Lobj < (Hobj - eps):
a2 = H
else:
a2 = alph2
# Push a2 to 0 or C if very close
if a2 < 1e-8:
a2 = 0.0
elif a2 > (model.C - 1e-8):
a2 = model.C
# If examples can't be optimized within epsilon (eps), skip this pair
if (np.abs(a2 - alph2) < eps * (a2 + alph2 + eps)):
return 0, model
# Calculate new alpha 1 (a1)
a1 = alph1 + s * (alph2 - a2)
# Update threshold b to reflect newly calculated alphas
# Calculate both possible thresholds
b1 = E1 + y1 * (a1 - alph1) * k11 + y2 * (a2 - alph2) * k12 + model.b
b2 = E2 + y1 * (a1 - alph1) * k12 + y2 * (a2 - alph2) * k22 + model.b
# Set new threshold based on if a1 or a2 is bound by L and/or H
if 0 < a1 and a1 < model.C:
b_new = b1
elif 0 < a2 and a2 < model.C:
b_new = b2
# Average thresholds if both are bound
else:
b_new = (b1 + b2) * 0.5
# Update model object with new alphas & threshold
model.alphas[i1] = a1
model.alphas[i2] = a2
# Update error cache
# Error cache for optimized alphas is set to 0 if they're unbound
for index, alph in zip([i1, i2], [a1, a2]):
if 0.0 < alph < model.C:
model.errors[index] = 0.0
# Set non-optimized errors based on equation 12.11 in Platt's book
non_opt = [n for n in range(model.m) if (n != i1 and n != i2)]
model.errors[non_opt] = model.errors[non_opt] + \
y1*(a1 - alph1)*model.kernel(model.X[i1], model.X[non_opt]) + \
y2*(a2 - alph2)*model.kernel(model.X[i2], model.X[non_opt]) + model.b - b_new
# Update model threshold
model.b = b_new
return 1, model
def examine_example(i2, model):
y2 = model.y[i2]
alph2 = model.alphas[i2]
E2 = model.errors[i2]
r2 = E2 * y2
# Proceed if error is within specified tolerance (tol)
if ((r2 < -tol and alph2 < model.C) or (r2 > tol and alph2 > 0)):
if len(model.alphas[(model.alphas != 0) & (model.alphas != model.C)]) > 1:
# Use 2nd choice heuristic is choose max difference in error
if model.errors[i2] > 0:
i1 = np.argmin(model.errors)
elif model.errors[i2] <= 0:
i1 = np.argmax(model.errors)
step_result, model = take_step(i1, i2, model)
if step_result:
return 1, model
# Loop through non-zero and non-C alphas, starting at a random point
for i1 in np.roll(
|
np.where((model.alphas != 0) & (model.alphas != model.C))
|
numpy.where
|
"""Classes for normalising imput data prior to running a model."""
# Copyright 2019 CSIRO (Data61)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Optional, Tuple
import numpy as np
from tqdm import tqdm
from landshark import iteration
from landshark.basetypes import ContinuousArraySource, ContinuousType, Worker
from landshark.util import to_masked
log = logging.getLogger(__name__)
class StatCounter:
"""Class that computes online mean and variance."""
def __init__(self, n_features: int) -> None:
"""Initialise the counters."""
self._mean = np.zeros(n_features)
self._m2 = np.zeros(n_features)
self._n = np.zeros(n_features, dtype=int)
def update(self, array: np.ma.MaskedArray) -> None:
"""Update calclulations with new data."""
assert array.ndim == 2
assert array.shape[0] > 1
new_n = np.ma.count(array, axis=0)
new_mean = (np.ma.mean(array, axis=0)).data
new_mean[new_n == 0] = 0.0 # enforce this condition
new_m2 = (np.ma.var(array, axis=0, ddof=0) * new_n).data
add_n = new_n + self._n
if any(add_n == 0): # catch any totally masked images
add_n[add_n == 0] = 1
delta = new_mean - self._mean
delta_mean = delta * (new_n / add_n)
self._mean += delta_mean
self._m2 += new_m2 + (delta * self._n * delta_mean)
self._n += new_n
@property
def mean(self) -> np.ndarray:
"""Get the current estimate of the mean."""
assert np.all(self._n > 1)
return self._mean
@property
def sd(self) -> np.ndarray:
"""Get the current estimate of the standard deviation."""
assert
|
np.all(self._n > 1)
|
numpy.all
|
import numpy as np
from sklearn import svm
#from sklearn import linear_model
#from statsmodels.sandbox.regression.predstd import wls_prediction_std
from sklearn.linear_model import LogisticRegression
from sklearn import preprocessing
from pygam import GAM, LinearGAM, s, l
#from patsy import dmatrix
#import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
########################################################################################################################
########################################################################################################################
## calculate propensity score
class propensityScore:
def __init__(self, Xall, Aall, uniqueIndex, dataLabel):
self.Xall = Xall
self.Aall = Aall
self.uniqueIndex = uniqueIndex
self.dataLabel = dataLabel
def p(self, obsSetting = 'trial'):
if obsSetting == 'trial':
pall =
|
np.full(self.Aall.shape[0], 0.5)
|
numpy.full
|
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name, too-many-arguments, too-many-branches,
# pylint: disable=too-many-locals, too-many-instance-attributes, too-many-lines
"""
This module implements the linear Kalman filter in both an object
oriented and procedural form. The KalmanFilter class implements
the filter by storing the various matrices in instance variables,
minimizing the amount of bookkeeping you have to do.
All Kalman filters operate with a predict->update cycle. The
predict step, implemented with the method or function predict(),
uses the state transition matrix F to predict the state in the next
time period (epoch). The state is stored as a gaussian (x, P), where
x is the state (column) vector, and P is its covariance. Covariance
matrix Q specifies the process covariance. In Bayesian terms, this
prediction is called the *prior*, which you can think of colloquially
as the estimate prior to incorporating the measurement.
The update step, implemented with the method or function `update()`,
incorporates the measurement z with covariance R, into the state
estimate (x, P). The class stores the system uncertainty in S,
the innovation (residual between prediction and measurement in
measurement space) in y, and the Kalman gain in k. The procedural
form returns these variables to you. In Bayesian terms this computes
the *posterior* - the estimate after the information from the
measurement is incorporated.
Whether you use the OO form or procedural form is up to you. If
matrices such as H, R, and F are changing each epoch, you'll probably
opt to use the procedural form. If they are unchanging, the OO
form is perhaps easier to use since you won't need to keep track
of these matrices. This is especially useful if you are implementing
banks of filters or comparing various KF designs for performance;
a trivial coding bug could lead to using the wrong sets of matrices.
This module also offers an implementation of the RTS smoother, and
other helper functions, such as log likelihood computations.
The Saver class allows you to easily save the state of the
KalmanFilter class after every update
This module expects NumPy arrays for all values that expect
arrays, although in a few cases, particularly method parameters,
it will accept types that convert to NumPy arrays, such as lists
of lists. These exceptions are documented in the method or function.
Examples
--------
The following example constructs a constant velocity kinematic
filter, filters noisy data, and plots the results. It also demonstrates
using the Saver class to save the state of the filter at each epoch.
.. code-block:: Python
import matplotlib.pyplot as plt
import numpy as np
from filterpy.kalman import KalmanFilter
from filterpy.common import Q_discrete_white_noise, Saver
r_std, q_std = 2., 0.003
cv = KalmanFilter(dim_x=2, dim_z=1)
cv.x = np.array([[0., 1.]]) # position, velocity
cv.F = np.array([[1, dt],[ [0, 1]])
cv.R = np.array([[r_std^^2]])
f.H = np.array([[1., 0.]])
f.P = np.diag([.1^^2, .03^^2)
f.Q = Q_discrete_white_noise(2, dt, q_std**2)
saver = Saver(cv)
for z in range(100):
cv.predict()
cv.update([z + randn() * r_std])
saver.save() # save the filter's state
saver.to_array()
plt.plot(saver.x[:, 0])
# plot all of the priors
plt.plot(saver.x_prior[:, 0])
# plot mahalanobis distance
plt.figure()
plt.plot(saver.mahalanobis)
This code implements the same filter using the procedural form
x = np.array([[0., 1.]]) # position, velocity
F = np.array([[1, dt],[ [0, 1]])
R = np.array([[r_std^^2]])
H = np.array([[1., 0.]])
P = np.diag([.1^^2, .03^^2)
Q = Q_discrete_white_noise(2, dt, q_std**2)
for z in range(100):
x, P = predict(x, P, F=F, Q=Q)
x, P = update(x, P, z=[z + randn() * r_std], R=R, H=H)
xs.append(x[0, 0])
plt.plot(xs)
For more examples see the test subdirectory, or refer to the
book cited below. In it I both teach Kalman filtering from basic
principles, and teach the use of this library in great detail.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
Copyright 2014-2018 <NAME>.
"""
from __future__ import absolute_import, division
from copy import deepcopy
from math import log, exp, sqrt
import sys
import warnings
import numpy as np
from numpy import dot, zeros, eye, isscalar, shape
import numpy.linalg as linalg
from filterpy.stats import logpdf
from filterpy.common import pretty_str, reshape_z
class KalmanFilter(object):
r""" Implements a Kalman filter. You are responsible for setting the
various state variables to reasonable values; the defaults will
not give you a functional filter.
For now the best documentation is my free book Kalman and Bayesian
Filters in Python [2]_. The test files in this directory also give you a
basic idea of use, albeit without much description.
In brief, you will first construct this object, specifying the size of
the state vector with dim_x and the size of the measurement vector that
you will be using with dim_z. These are mostly used to perform size checks
when you assign values to the various matrices. For example, if you
specified dim_z=2 and then try to assign a 3x3 matrix to R (the
measurement noise matrix you will get an assert exception because R
should be 2x2. (If for whatever reason you need to alter the size of
things midstream just use the underscore version of the matrices to
assign directly: your_filter._R = a_3x3_matrix.)
After construction the filter will have default matrices created for you,
but you must specify the values for each. It’s usually easiest to just
overwrite them rather than assign to each element yourself. This will be
clearer in the example below. All are of type numpy.array.
Examples
--------
Here is a filter that tracks position and velocity using a sensor that only
reads position.
First construct the object with the required dimensionality.
.. code::
from filterpy.kalman import KalmanFilter
f = KalmanFilter (dim_x=2, dim_z=1)
Assign the initial value for the state (position and velocity). You can do this
with a two dimensional array like so:
.. code::
f.x = np.array([[2.], # position
[0.]]) # velocity
or just use a one dimensional array, which I prefer doing.
.. code::
f.x = np.array([2., 0.])
Define the state transition matrix:
.. code::
f.F = np.array([[1.,1.],
[0.,1.]])
Define the measurement function:
.. code::
f.H = np.array([[1.,0.]])
Define the covariance matrix. Here I take advantage of the fact that
P already contains np.eye(dim_x), and just multiply by the uncertainty:
.. code::
f.P *= 1000.
I could have written:
.. code::
f.P = np.array([[1000., 0.],
[ 0., 1000.] ])
You decide which is more readable and understandable.
Now assign the measurement noise. Here the dimension is 1x1, so I can
use a scalar
.. code::
f.R = 5
I could have done this instead:
.. code::
f.R = np.array([[5.]])
Note that this must be a 2 dimensional array, as must all the matrices.
Finally, I will assign the process noise. Here I will take advantage of
another FilterPy library function:
.. code::
from filterpy.common import Q_discrete_white_noise
f.Q = Q_discrete_white_noise(dim=2, dt=0.1, var=0.13)
Now just perform the standard predict/update loop:
while some_condition_is_true:
.. code::
z = get_sensor_reading()
f.predict()
f.update(z)
do_something_with_estimate (f.x)
**Procedural Form**
This module also contains stand alone functions to perform Kalman filtering.
Use these if you are not a fan of objects.
**Example**
.. code::
while True:
z, R = read_sensor()
x, P = predict(x, P, F, Q)
x, P = update(x, P, z, R, H)
See my book Kalman and Bayesian Filters in Python [2]_.
You will have to set the following attributes after constructing this
object for the filter to perform properly. Please note that there are
various checks in place to ensure that you have made everything the
'correct' size. However, it is possible to provide incorrectly sized
arrays such that the linear algebra can not perform an operation.
It can also fail silently - you can end up with matrices of a size that
allows the linear algebra to work, but are the wrong shape for the problem
you are trying to solve.
Parameters
----------
dim_x : int
Number of state variables for the Kalman filter. For example, if
you are tracking the position and velocity of an object in two
dimensions, dim_x would be 4.
This is used to set the default size of P, Q, and u
dim_z : int
Number of of measurement inputs. For example, if the sensor
provides you with position in (x,y), dim_z would be 2.
dim_u : int (optional)
size of the control input, if it is being used.
Default value of 0 indicates it is not used.
compute_log_likelihood : bool (default = True)
Computes log likelihood by default, but this can be a slow
computation, so if you never use it you can turn this computation
off.
Attributes
----------
x : numpy.array(dim_x, 1)
Current state estimate. Any call to update() or predict() updates
this variable.
P : numpy.array(dim_x, dim_x)
Current state covariance matrix. Any call to update() or predict()
updates this variable.
x_prior : numpy.array(dim_x, 1)
Prior (predicted) state estimate. The *_prior and *_post attributes
are for convienence; they store the prior and posterior of the
current epoch. Read Only.
P_prior : numpy.array(dim_x, dim_x)
Prior (predicted) state covariance matrix. Read Only.
x_post : numpy.array(dim_x, 1)
Posterior (updated) state estimate. Read Only.
P_post : numpy.array(dim_x, dim_x)
Posterior (updated) state covariance matrix. Read Only.
z : numpy.array
Last measurement used in update(). Read only.
R : numpy.array(dim_z, dim_z)
Measurement noise matrix
Q : numpy.array(dim_x, dim_x)
Process noise matrix
F : numpy.array()
State Transition matrix
H : numpy.array(dim_z, dim_x)
Measurement function
y : numpy.array
Residual of the update step. Read only.
K : numpy.array(dim_x, dim_z)
Kalman gain of the update step. Read only.
S : numpy.array
System uncertainty (P projected to measurement space). Read only.
SI : numpy.array
Inverse system uncertainty. Read only.
log_likelihood : float
log-likelihood of the last measurement. Read only.
likelihood : float
likelihood of last measurement. Read only.
Computed from the log-likelihood. The log-likelihood can be very
small, meaning a large negative value such as -28000. Taking the
exp() of that results in 0.0, which can break typical algorithms
which multiply by this value, so by default we always return a
number >= sys.float_info.min.
mahalanobis : float
mahalanobis distance of the innovation. Read only.
inv : function, default numpy.linalg.inv
If you prefer another inverse function, such as the Moore-Penrose
pseudo inverse, set it to that instead: kf.inv = np.linalg.pinv
This is only used to invert self.S. If you know it is diagonal, you
might choose to set it to filterpy.common.inv_diagonal, which is
several times faster than numpy.linalg.inv for diagonal matrices.
alpha : float
Fading memory setting. 1.0 gives the normal Kalman filter, and
values slightly larger than 1.0 (such as 1.02) give a fading
memory effect - previous measurements have less influence on the
filter's estimates. This formulation of the Fading memory filter
(there are many) is due to <NAME> [1]_.
References
----------
.. [1] <NAME>. "Optimal State Estimation." <NAME>.
p. 208-212. (2006)
.. [2] <NAME>. "Kalman and Bayesian Filters in Python"
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
"""
def __init__(self, dim_x, dim_z, dim_u=0):
if dim_x < 1:
raise ValueError('dim_x must be 1 or greater')
if dim_z < 1:
raise ValueError('dim_z must be 1 or greater')
if dim_u < 0:
raise ValueError('dim_u must be 0 or greater')
self.dim_x = dim_x
self.dim_z = dim_z
self.dim_u = dim_u
self.x = zeros((dim_x, 1)) # state
self.P = eye(dim_x) # uncertainty covariance
self.Q = eye(dim_x) # process uncertainty
self.B = None # control transition matrix
self.F = eye(dim_x) # state transition matrix
self.H = zeros((dim_z, dim_x)) # Measurement function
self.R = eye(dim_z) # state uncertainty
self._alpha_sq = 1. # fading memory control
self.M = np.zeros((dim_z, dim_z)) # process-measurement cross correlation
self.z = np.array([[None]*self.dim_z]).T
# gain and residual are computed during the innovation step. We
# save them so that in case you want to inspect them for various
# purposes
self.K = np.zeros((dim_x, dim_z)) # kalman gain
self.y = zeros((dim_z, 1))
self.S = np.zeros((dim_z, dim_z)) # system uncertainty
self.SI = np.zeros((dim_z, dim_z)) # inverse system uncertainty
# identity matrix. Do not alter this.
self._I = np.eye(dim_x)
# these will always be a copy of x,P after predict() is called
self.x_prior = self.x.copy()
self.P_prior = self.P.copy()
# these will always be a copy of x,P after update() is called
self.x_post = self.x.copy()
self.P_post = self.P.copy()
# Only computed only if requested via property
self._log_likelihood = log(sys.float_info.min)
self._likelihood = sys.float_info.min
self._mahalanobis = None
self.inv = np.linalg.inv
def predict(self, u=None, B=None, F=None, Q=None):
"""
Predict next state (prior) using the Kalman filter state propagation
equations.
Parameters
----------
u : np.array
Optional control vector. If not `None`, it is multiplied by B
to create the control input into the system.
B : np.array(dim_x, dim_z), or None
Optional control transition matrix; a value of None
will cause the filter to use `self.B`.
F : np.array(dim_x, dim_x), or None
Optional state transition matrix; a value of None
will cause the filter to use `self.F`.
Q : np.array(dim_x, dim_x), scalar, or None
Optional process noise matrix; a value of None will cause the
filter to use `self.Q`.
"""
if B is None:
B = self.B
if F is None:
F = self.F
if Q is None:
Q = self.Q
elif isscalar(Q):
Q = eye(self.dim_x) * Q
# x = Fx + Bu
if B is not None and u is not None:
self.x = dot(F, self.x) + dot(B, u)
else:
self.x = dot(F, self.x)
# P = FPF' + Q
self.P = self._alpha_sq * dot(dot(F, self.P), F.T) + Q
# save prior
self.x_prior = self.x.copy()
self.P_prior = self.P.copy()
def update(self, z, R=None, H=None):
"""
Add a new measurement (z) to the Kalman filter.
If z is None, nothing is computed. However, x_post and P_post are
updated with the prior (x_prior, P_prior), and self.z is set to None.
Parameters
----------
z : (dim_z, 1): array_like
measurement for this update. z can be a scalar if dim_z is 1,
otherwise it must be convertible to a column vector.
R : np.array, scalar, or None
Optionally provide R to override the measurement noise for this
one call, otherwise self.R will be used.
H : np.array, or None
Optionally provide H to override the measurement function for this
one call, otherwise self.H will be used.
"""
# set to None to force recompute
self._log_likelihood = None
self._likelihood = None
self._mahalanobis = None
if z is None:
self.z = np.array([[None]*self.dim_z]).T
self.x_post = self.x.copy()
self.P_post = self.P.copy()
self.y = zeros((self.dim_z, 1))
return
z = reshape_z(z, self.dim_z, self.x.ndim)
if R is None:
R = self.R
elif isscalar(R):
R = eye(self.dim_z) * R
if H is None:
H = self.H
# y = z - Hx
# error (residual) between measurement and prediction
self.y = z - dot(H, self.x)
# common subexpression for speed
PHT = dot(self.P, H.T)
# S = HPH' + R
# project system uncertainty into measurement space
self.S = dot(H, PHT) + R
self.SI = self.inv(self.S)
# K = PH'inv(S)
# map system uncertainty into kalman gain
self.K = dot(PHT, self.SI)
# x = x + Ky
# predict new x with residual scaled by the kalman gain
self.x = self.x + dot(self.K, self.y)
# P = (I-KH)P(I-KH)' + KRK'
# This is more numerically stable
# and works for non-optimal K vs the equation
# P = (I-KH)P usually seen in the literature.
I_KH = self._I - dot(self.K, H)
self.P = dot(dot(I_KH, self.P), I_KH.T) + dot(dot(self.K, R), self.K.T)
# save measurement and posterior state
self.z = deepcopy(z)
self.x_post = self.x.copy()
self.P_post = self.P.copy()
def predict_steadystate(self, u=0, B=None):
"""
Predict state (prior) using the Kalman filter state propagation
equations. Only x is updated, P is left unchanged. See
update_steadstate() for a longer explanation of when to use this
method.
Parameters
----------
u : np.array
Optional control vector. If non-zero, it is multiplied by B
to create the control input into the system.
B : np.array(dim_x, dim_z), or None
Optional control transition matrix; a value of None
will cause the filter to use `self.B`.
"""
if B is None:
B = self.B
# x = Fx + Bu
if B is not None:
self.x = dot(self.F, self.x) + dot(B, u)
else:
self.x = dot(self.F, self.x)
# save prior
self.x_prior = self.x.copy()
self.P_prior = self.P.copy()
def update_steadystate(self, z):
"""
Add a new measurement (z) to the Kalman filter without recomputing
the Kalman gain K, the state covariance P, or the system
uncertainty S.
You can use this for LTI systems since the Kalman gain and covariance
converge to a fixed value. Precompute these and assign them explicitly,
or run the Kalman filter using the normal predict()/update(0 cycle
until they converge.
The main advantage of this call is speed. We do significantly less
computation, notably avoiding a costly matrix inversion.
Use in conjunction with predict_steadystate(), otherwise P will grow
without bound.
Parameters
----------
z : (dim_z, 1): array_like
measurement for this update. z can be a scalar if dim_z is 1,
otherwise it must be convertible to a column vector.
Examples
--------
>>> cv = kinematic_kf(dim=3, order=2) # 3D const velocity filter
>>> # let filter converge on representative data, then save k and P
>>> for i in range(100):
>>> cv.predict()
>>> cv.update([i, i, i])
>>> saved_k = np.copy(cv.K)
>>> saved_P = np.copy(cv.P)
later on:
>>> cv = kinematic_kf(dim=3, order=2) # 3D const velocity filter
>>> cv.K = np.copy(saved_K)
>>> cv.P = np.copy(saved_P)
>>> for i in range(100):
>>> cv.predict_steadystate()
>>> cv.update_steadystate([i, i, i])
"""
# set to None to force recompute
self._log_likelihood = None
self._likelihood = None
self._mahalanobis = None
if z is None:
self.z = np.array([[None]*self.dim_z]).T
self.x_post = self.x.copy()
self.P_post = self.P.copy()
self.y = zeros((self.dim_z, 1))
return
z = reshape_z(z, self.dim_z, self.x.ndim)
# y = z - Hx
# error (residual) between measurement and prediction
self.y = z - dot(self.H, self.x)
# x = x + Ky
# predict new x with residual scaled by the kalman gain
self.x = self.x + dot(self.K, self.y)
self.z = deepcopy(z)
self.x_post = self.x.copy()
self.P_post = self.P.copy()
# set to None to force recompute
self._log_likelihood = None
self._likelihood = None
self._mahalanobis = None
def update_correlated(self, z, R=None, H=None):
""" Add a new measurement (z) to the Kalman filter assuming that
process noise and measurement noise are correlated as defined in
the `self.M` matrix.
If z is None, nothing is changed.
Parameters
----------
z : (dim_z, 1): array_like
measurement for this update. z can be a scalar if dim_z is 1,
otherwise it must be convertible to a column vector.
R : np.array, scalar, or None
Optionally provide R to override the measurement noise for this
one call, otherwise self.R will be used.
H : np.array, or None
Optionally provide H to override the measurement function for this
one call, otherwise self.H will be used.
"""
# set to None to force recompute
self._log_likelihood = None
self._likelihood = None
self._mahalanobis = None
if z is None:
self.z = np.array([[None]*self.dim_z]).T
self.x_post = self.x.copy()
self.P_post = self.P.copy()
self.y = zeros((self.dim_z, 1))
return
z = reshape_z(z, self.dim_z, self.x.ndim)
if R is None:
R = self.R
elif isscalar(R):
R = eye(self.dim_z) * R
# rename for readability and a tiny extra bit of speed
if H is None:
H = self.H
# handle special case: if z is in form [[z]] but x is not a column
# vector dimensions will not match
if self.x.ndim == 1 and shape(z) == (1, 1):
z = z[0]
if shape(z) == (): # is it scalar, e.g. z=3 or z=np.array(3)
z = np.asarray([z])
# y = z - Hx
# error (residual) between measurement and prediction
self.y = z - dot(H, self.x)
# common subexpression for speed
PHT = dot(self.P, H.T)
# project system uncertainty into measurement space
self.S = dot(H, PHT) + dot(H, self.M) + dot(self.M.T, H.T) + R
self.SI = self.inv(self.S)
# K = PH'inv(S)
# map system uncertainty into kalman gain
self.K = dot(PHT + self.M, self.SI)
# x = x + Ky
# predict new x with residual scaled by the kalman gain
self.x = self.x + dot(self.K, self.y)
self.P = self.P - dot(self.K, dot(H, self.P) + self.M.T)
self.z = deepcopy(z)
self.x_post = self.x.copy()
self.P_post = self.P.copy()
def batch_filter(self, zs, Fs=None, Qs=None, Hs=None,
Rs=None, Bs=None, us=None, update_first=False,
saver=None):
""" Batch processes a sequences of measurements.
Parameters
----------
zs : list-like
list of measurements at each time step `self.dt`. Missing
measurements must be represented by `None`.
Fs : None, list-like, default=None
optional value or list of values to use for the state transition
matrix F.
If Fs is None then self.F is used for all epochs.
Otherwise it must contain a list-like list of F's, one for
each epoch. This allows you to have varying F per epoch.
Qs : None, np.array or list-like, default=None
optional value or list of values to use for the process error
covariance Q.
If Qs is None then self.Q is used for all epochs.
Otherwise it must contain a list-like list of Q's, one for
each epoch. This allows you to have varying Q per epoch.
Hs : None, np.array or list-like, default=None
optional list of values to use for the measurement matrix H.
If Hs is None then self.H is used for all epochs.
If Hs contains a single matrix, then it is used as H for all
epochs.
Otherwise it must contain a list-like list of H's, one for
each epoch. This allows you to have varying H per epoch.
Rs : None, np.array or list-like, default=None
optional list of values to use for the measurement error
covariance R.
If Rs is None then self.R is used for all epochs.
Otherwise it must contain a list-like list of R's, one for
each epoch. This allows you to have varying R per epoch.
Bs : None, np.array or list-like, default=None
optional list of values to use for the control transition matrix B.
If Bs is None then self.B is used for all epochs.
Otherwise it must contain a list-like list of B's, one for
each epoch. This allows you to have varying B per epoch.
us : None, np.array or list-like, default=None
optional list of values to use for the control input vector;
If us is None then None is used for all epochs (equivalent to 0,
or no control input).
Otherwise it must contain a list-like list of u's, one for
each epoch.
update_first : bool, optional, default=False
controls whether the order of operations is update followed by
predict, or predict followed by update. Default is predict->update.
saver : filterpy.common.Saver, optional
filterpy.common.Saver object. If provided, saver.save() will be
called after every epoch
Returns
-------
means : np.array((n,dim_x,1))
array of the state for each time step after the update. Each entry
is an np.array. In other words `means[k,:]` is the state at step
`k`.
covariance : np.array((n,dim_x,dim_x))
array of the covariances for each time step after the update.
In other words `covariance[k,:,:]` is the covariance at step `k`.
means_predictions : np.array((n,dim_x,1))
array of the state for each time step after the predictions. Each
entry is an np.array. In other words `means[k,:]` is the state at
step `k`.
covariance_predictions : np.array((n,dim_x,dim_x))
array of the covariances for each time step after the prediction.
In other words `covariance[k,:,:]` is the covariance at step `k`.
Examples
--------
.. code-block:: Python
# this example demonstrates tracking a measurement where the time
# between measurement varies, as stored in dts. This requires
# that F be recomputed for each epoch. The output is then smoothed
# with an RTS smoother.
zs = [t + random.randn()*4 for t in range (40)]
Fs = [np.array([[1., dt], [0, 1]] for dt in dts]
(mu, cov, _, _) = kf.batch_filter(zs, Fs=Fs)
(xs, Ps, Ks) = kf.rts_smoother(mu, cov, Fs=Fs)
"""
#pylint: disable=too-many-statements
n = np.size(zs, 0)
if Fs is None:
Fs = [self.F] * n
if Qs is None:
Qs = [self.Q] * n
if Hs is None:
Hs = [self.H] * n
if Rs is None:
Rs = [self.R] * n
if Bs is None:
Bs = [self.B] * n
if us is None:
us = [0] * n
# mean estimates from Kalman Filter
if self.x.ndim == 1:
means = zeros((n, self.dim_x))
means_p = zeros((n, self.dim_x))
else:
means = zeros((n, self.dim_x, 1))
means_p = zeros((n, self.dim_x, 1))
# state covariances from Kalman Filter
covariances = zeros((n, self.dim_x, self.dim_x))
covariances_p = zeros((n, self.dim_x, self.dim_x))
if update_first:
for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)):
self.update(z, R=R, H=H)
means[i, :] = self.x
covariances[i, :, :] = self.P
self.predict(u=u, B=B, F=F, Q=Q)
means_p[i, :] = self.x
covariances_p[i, :, :] = self.P
if saver is not None:
saver.save()
else:
for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)):
self.predict(u=u, B=B, F=F, Q=Q)
means_p[i, :] = self.x
covariances_p[i, :, :] = self.P
self.update(z, R=R, H=H)
means[i, :] = self.x
covariances[i, :, :] = self.P
if saver is not None:
saver.save()
return (means, covariances, means_p, covariances_p)
def rts_smoother(self, Xs, Ps, Fs=None, Qs=None, inv=np.linalg.inv):
"""
Runs the Rauch-Tung-Striebal Kalman smoother on a set of
means and covariances computed by a Kalman filter. The usual input
would come from the output of `KalmanFilter.batch_filter()`.
Parameters
----------
Xs : numpy.array
array of the means (state variable x) of the output of a Kalman
filter.
Ps : numpy.array
array of the covariances of the output of a kalman filter.
Fs : list-like collection of numpy.array, optional
State transition matrix of the Kalman filter at each time step.
Optional, if not provided the filter's self.F will be used
Qs : list-like collection of numpy.array, optional
Process noise of the Kalman filter at each time step. Optional,
if not provided the filter's self.Q will be used
inv : function, default numpy.linalg.inv
If you prefer another inverse function, such as the Moore-Penrose
pseudo inverse, set it to that instead: kf.inv = np.linalg.pinv
Returns
-------
x : numpy.ndarray
smoothed means
P : numpy.ndarray
smoothed state covariances
K : numpy.ndarray
smoother gain at each step
Pp : numpy.ndarray
Predicted state covariances
Examples
--------
.. code-block:: Python
zs = [t + random.randn()*4 for t in range (40)]
(mu, cov, _, _) = kalman.batch_filter(zs)
(x, P, K, Pp) = rts_smoother(mu, cov, kf.F, kf.Q)
"""
if len(Xs) != len(Ps):
raise ValueError('length of Xs and Ps must be the same')
n = Xs.shape[0]
dim_x = Xs.shape[1]
if Fs is None:
Fs = [self.F] * n
if Qs is None:
Qs = [self.Q] * n
# smoother gain
K = zeros((n, dim_x, dim_x))
x, P, Pp = Xs.copy(), Ps.copy(), Ps.copy()
for k in range(n-2, -1, -1):
Pp[k] = dot(dot(Fs[k+1], P[k]), Fs[k+1].T) + Qs[k+1]
#pylint: disable=bad-whitespace
K[k] = dot(dot(P[k], Fs[k+1].T), inv(Pp[k]))
x[k] += dot(K[k], x[k+1] - dot(Fs[k+1], x[k]))
P[k] += dot(dot(K[k], P[k+1] - Pp[k]), K[k].T)
return (x, P, K, Pp)
def get_prediction(self, u=0):
"""
Predicts the next state of the filter and returns it without
altering the state of the filter.
Parameters
----------
u : np.array
optional control input
Returns
-------
(x, P) : tuple
State vector and covariance array of the prediction.
"""
x = dot(self.F, self.x) + dot(self.B, u)
P = self._alpha_sq * dot(dot(self.F, self.P), self.F.T) + self.Q
return (x, P)
def get_update(self, z=None):
"""
Computes the new estimate based on measurement `z` and returns it
without altering the state of the filter.
Parameters
----------
z : (dim_z, 1): array_like
measurement for this update. z can be a scalar if dim_z is 1,
otherwise it must be convertible to a column vector.
Returns
-------
(x, P) : tuple
State vector and covariance array of the update.
"""
if z is None:
return self.x, self.P
z = reshape_z(z, self.dim_z, self.x.ndim)
R = self.R
H = self.H
P = self.P
x = self.x
# error (residual) between measurement and prediction
y = z - dot(H, x)
# common subexpression for speed
PHT = dot(P, H.T)
# project system uncertainty into measurement space
S = dot(H, PHT) + R
# map system uncertainty into kalman gain
K = dot(PHT, self.inv(S))
# predict new x with residual scaled by the kalman gain
x = x + dot(K, y)
# P = (I-KH)P(I-KH)' + KRK'
I_KH = self._I - dot(K, H)
P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T)
return x, P
def residual_of(self, z):
"""
Returns the residual for the given measurement (z). Does not alter
the state of the filter.
"""
return z - dot(self.H, self.x_prior)
def measurement_of_state(self, x):
"""
Helper function that converts a state into a measurement.
Parameters
----------
x : np.array
kalman state vector
Returns
-------
z : (dim_z, 1): array_like
measurement for this update. z can be a scalar if dim_z is 1,
otherwise it must be convertible to a column vector.
"""
return dot(self.H, x)
@property
def log_likelihood(self):
"""
log-likelihood of the last measurement.
"""
if self._log_likelihood is None:
self._log_likelihood = logpdf(x=self.y, cov=self.S)
return self._log_likelihood
@property
def likelihood(self):
"""
Computed from the log-likelihood. The log-likelihood can be very
small, meaning a large negative value such as -28000. Taking the
exp() of that results in 0.0, which can break typical algorithms
which multiply by this value, so by default we always return a
number >= sys.float_info.min.
"""
if self._likelihood is None:
self._likelihood = exp(self.log_likelihood)
if self._likelihood == 0:
self._likelihood = sys.float_info.min
return self._likelihood
@property
def mahalanobis(self):
""""
Mahalanobis distance of measurement. E.g. 3 means measurement
was 3 standard deviations away from the predicted value.
Returns
-------
mahalanobis : float
"""
if self._mahalanobis is None:
self._mahalanobis = sqrt(float(dot(dot(self.y.T, self.SI), self.y)))
return self._mahalanobis
@property
def alpha(self):
"""
Fading memory setting. 1.0 gives the normal Kalman filter, and
values slightly larger than 1.0 (such as 1.02) give a fading
memory effect - previous measurements have less influence on the
filter's estimates. This formulation of the Fading memory filter
(there are many) is due to <NAME> [1]_.
"""
return self._alpha_sq**.5
def log_likelihood_of(self, z):
"""
log likelihood of the measurement `z`. This should only be called
after a call to update(). Calling after predict() will yield an
incorrect result."""
if z is None:
return log(sys.float_info.min)
return logpdf(z, dot(self.H, self.x), self.S)
@alpha.setter
def alpha(self, value):
if not np.isscalar(value) or value < 1:
raise ValueError('alpha must be a float greater than 1')
self._alpha_sq = value**2
def __repr__(self):
return '\n'.join([
'KalmanFilter object',
pretty_str('dim_x', self.dim_x),
pretty_str('dim_z', self.dim_z),
pretty_str('dim_u', self.dim_u),
pretty_str('x', self.x),
pretty_str('P', self.P),
pretty_str('x_prior', self.x_prior),
pretty_str('P_prior', self.P_prior),
pretty_str('x_post', self.x_post),
pretty_str('P_post', self.P_post),
pretty_str('F', self.F),
pretty_str('Q', self.Q),
pretty_str('R', self.R),
pretty_str('H', self.H),
pretty_str('K', self.K),
pretty_str('y', self.y),
pretty_str('S', self.S),
pretty_str('SI', self.SI),
pretty_str('M', self.M),
pretty_str('B', self.B),
pretty_str('z', self.z),
pretty_str('log-likelihood', self.log_likelihood),
pretty_str('likelihood', self.likelihood),
pretty_str('mahalanobis', self.mahalanobis),
pretty_str('alpha', self.alpha),
pretty_str('inv', self.inv)
])
def test_matrix_dimensions(self, z=None, H=None, R=None, F=None, Q=None):
"""
Performs a series of asserts to check that the size of everything
is what it should be. This can help you debug problems in your design.
If you pass in H, R, F, Q those will be used instead of this object's
value for those matrices.
Testing `z` (the measurement) is problamatic. x is a vector, and can be
implemented as either a 1D array or as a nx1 column vector. Thus Hx
can be of different shapes. Then, if Hx is a single value, it can
be either a 1D array or 2D vector. If either is true, z can reasonably
be a scalar (either '3' or np.array('3') are scalars under this
definition), a 1D, 1 element array, or a 2D, 1 element array. You are
allowed to pass in any combination that works.
"""
if H is None:
H = self.H
if R is None:
R = self.R
if F is None:
F = self.F
if Q is None:
Q = self.Q
x = self.x
P = self.P
assert x.ndim == 1 or x.ndim == 2, \
"x must have one or two dimensions, but has {}".format(x.ndim)
if x.ndim == 1:
assert x.shape[0] == self.dim_x, \
"Shape of x must be ({},{}), but is {}".format(
self.dim_x, 1, x.shape)
else:
assert x.shape == (self.dim_x, 1), \
"Shape of x must be ({},{}), but is {}".format(
self.dim_x, 1, x.shape)
assert P.shape == (self.dim_x, self.dim_x), \
"Shape of P must be ({},{}), but is {}".format(
self.dim_x, self.dim_x, P.shape)
assert Q.shape == (self.dim_x, self.dim_x), \
"Shape of P must be ({},{}), but is {}".format(
self.dim_x, self.dim_x, P.shape)
assert F.shape == (self.dim_x, self.dim_x), \
"Shape of F must be ({},{}), but is {}".format(
self.dim_x, self.dim_x, F.shape)
assert np.ndim(H) == 2, \
"Shape of H must be (dim_z, {}), but is {}".format(
P.shape[0], shape(H))
assert H.shape[1] == P.shape[0], \
"Shape of H must be (dim_z, {}), but is {}".format(
P.shape[0], H.shape)
# shape of R must be the same as HPH'
hph_shape = (H.shape[0], H.shape[0])
r_shape = shape(R)
if H.shape[0] == 1:
# r can be scalar, 1D, or 2D in this case
assert r_shape == () or r_shape == (1,) or r_shape == (1, 1), \
"R must be scalar or one element array, but is shaped {}".format(
r_shape)
else:
assert r_shape == hph_shape, \
"shape of R should be {} but it is {}".format(hph_shape, r_shape)
if z is not None:
z_shape = shape(z)
else:
z_shape = (self.dim_z, 1)
# H@x must have shape of z
Hx =
|
dot(H, x)
|
numpy.dot
|
'''Visualize tsne on samples that pass through specific nodes.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from nbdt import data
import torchvision
import torchvision.transforms as transforms
import os
import argparse
import json
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import models
from PIL import Image, ImageOps
from PIL.ImageColor import getcolor
from numpy import linalg as LA
from nbdt.utils import (
generate_fname, populate_kwargs, Colors, get_saved_word2vec, DATASET_TO_FOLDER_NAME, get_word_embedding, get_transform_from_name
)
datasets = ('CIFAR10', 'CIFAR100') + data.imagenet.names + data.custom.names + data.awa2.names
parser = argparse.ArgumentParser(description='T-SNE vis generation')
parser.add_argument('--batch-size', default=512, type=int,
help='Batch size used for training')
parser.add_argument('--dataset', default='CIFAR10', choices=datasets)
parser.add_argument('--model', default='ResNet18', choices=list(models.get_model_choices()))
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
# extra general options for main script
parser.add_argument('--checkpoint-fname', default='',
help='Fname to save new model to')
parser.add_argument('--path-resume', default='',
help='Overrides checkpoint path generation')
parser.add_argument('--name', default='',
help='Name of experiment. Used for checkpoint filename')
parser.add_argument('--pretrained', action='store_true',
help='Download pretrained model. Not all models support this.')
parser.add_argument('--new-classes', nargs='*',
help='New class names used for zero-shot.')
parser.add_argument('--new-labels', nargs='*', type=int,
help='New class indices used for zero-shot.')
parser.add_argument('--input-size', type=int,
help='Set transform train and val. Samples are resized to '
'input-size + 32.')
parser.add_argument('--experiment-name', type=str, help='name of experiment in wandb')
parser.add_argument('--wandb', action='store_true', help='log using wandb')
parser.add_argument('--word2vec', action='store_true')
parser.add_argument('--dimension', type=int, default=300, help='dimension of word2vec embeddings')
parser.add_argument('--num-samples', type=int, default=1)
parser.add_argument('--replace', action='store_true', help='replace the fc rows')
data.custom.add_arguments(parser)
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Data
print('==> Preparing data..')
dataset = getattr(data, args.dataset)
transform_train, transform_test = get_transform_from_name(args.dataset, dataset, args.input_size)
dataset_kwargs = {}
populate_kwargs(args, dataset_kwargs, dataset, name=f'Dataset {args.dataset}',
keys=data.custom.keys, globals=globals())
if args.dataset == 'MiniImagenet':
trainset = dataset(**dataset_kwargs, root='./data',
zeroshot=True, train=True, download=True, transform=transform_train)
testset = dataset(**dataset_kwargs, root='./data',
zeroshot=True, train=False, download=True, transform=transform_test)
else:
trainset = dataset(**dataset_kwargs, root='./data', train=True, download=True, transform=transform_train)
testset = dataset(**dataset_kwargs, root='./data', train=False, download=True, transform=transform_test)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=min(args.batch_size, 100), shuffle=False, num_workers=0)
testloader = torch.utils.data.DataLoader(testset, batch_size=min(args.batch_size, 100), shuffle=True, num_workers=0)
# Model
print('==> Building model..')
model = getattr(models, args.model)
Colors.cyan(f'Testing with dataset {args.dataset} and {len(testset.classes)} classes')
if args.replace:
model_kwargs = {'num_classes': len(testset.classes)}
else:
if args.dataset == 'MiniImagenet':
n_new_classes = 20
elif args.new_classes is not None:
n_new_classes = len(args.new_classes)
else:
n_new_classes = len(args.new_labels)
model_kwargs = {'num_classes': len(testset.classes) - n_new_classes}
if args.pretrained:
try:
print('==> Loading pretrained model..')
# net = model(pretrained=True, **model_kwargs)
net = model(pretrained=True)
# TODO: this is hardcoded
if int(args.model[6:]) <= 34:
net.fc = nn.Linear(512, model_kwargs['num_classes'])
else:
net.fc = nn.Linear(512*4, model_kwargs['num_classes'])
except Exception as e:
Colors.red(f'Fatal error: {e}')
exit()
else:
net = model(**model_kwargs)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
net = net.to(device)
checkpoint_fname = args.checkpoint_fname or \
'{}-placeholder-{}'.format(args.path_resume.replace('.pth','').replace('./checkpoint/',''), args.num_samples)
resume_path = args.path_resume or './checkpoint/{}.pth'.format(checkpoint_fname)
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
if not os.path.exists(resume_path):
print('==> No checkpoint found. Skipping...')
else:
checkpoint = torch.load(resume_path)
if 'net' in checkpoint:
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
Colors.cyan(f'==> Checkpoint found for epoch {start_epoch} with accuracy '
f'{best_acc} at {resume_path}')
else:
net.load_state_dict(checkpoint)
Colors.cyan(f'==> Checkpoint found at {resume_path}')
# get one sample of each zeroshot class, and get its output at linear layer
if args.dataset == 'MiniImagenet':
cls_to_vec = {trainset.classes[cls]:[] for i, cls in enumerate(list(range(64, 84)))}
elif args.new_classes is None:
cls_to_vec = {cls: [] for i, cls in enumerate(testset.classes) if i in args.new_labels}
else:
cls_to_vec = {cls: [] for cls in args.new_classes}
print(cls_to_vec)
hooked_inputs = None
def testhook(self, input, output):
global hooked_inputs
hooked_inputs = input[0].cpu().numpy()
keys = ['fc', 'linear']
for key in keys:
fc = getattr(net.module, key, None)
if fc is not None:
break
fc.register_forward_hook(testhook)
net.eval()
# load projection matrix
if args.word2vec:
word2vec_path = os.path.join(os.path.join(trainset.root, DATASET_TO_FOLDER_NAME[args.dataset]), "word2vec/")
projection_matrix = np.load('data/projection.npy')
num_samples = 0
with torch.no_grad():
for i, (inputs, labels) in enumerate(testloader):
if args.dataset in ("AnimalsWithAttributes2"):
inputs, predicates = inputs
net(inputs)
for vec, label in zip(hooked_inputs, labels):
num_samples = min([len(cls_to_vec[c]) for c in cls_to_vec])
if num_samples >= args.num_samples:
print("found and breaking")
break
cls_name = trainset.classes[label]
if cls_name in cls_to_vec and len(cls_to_vec[cls_name]) < args.num_samples:
if args.word2vec:
word_vec = get_saved_word2vec(word2vec_path + cls_name + '.npy', args.dimension, projection_matrix)
cls_to_vec[cls_name] = word_vec
else:
cls_to_vec[cls_name].append(vec)
num_samples = min([len(cls_to_vec[c]) for c in cls_to_vec])
for cls in cls_to_vec:
cls_to_vec[cls] = np.average(np.array(cls_to_vec[cls]), axis=0)
cls_to_vec[cls] -= np.mean(cls_to_vec[cls])
cls_to_vec[cls] /=
|
LA.norm(cls_to_vec[cls])
|
numpy.linalg.norm
|
# Author: <NAME>
import numpy as np
import matplotlib.pyplot as pplot
def matproj(im, dim, method='max', slice_index=0):
if method == 'max':
im = np.max(im, dim)
elif method == 'mean':
im = np.mean(im, dim)
elif method == 'sum':
im = np.sum(im, dim)
elif method == 'slice':
im = im[slice_index]
else:
raise ValueError("Invalid projection method")
return im
def imgtoprojection(im1, proj_all=False, proj_method='max', colors=lambda i: [1, 1, 1], global_adjust=False, local_adjust=False):
"""
Outputs projections of a 4d CZYX numpy array into a CYX numpy array, allowing for color masks for each input channel
as well as adjustment options
:param im1: Either a 4d numpy array or a list of 3D or 2D numpy arrays. The input that will be projected
:param proj_all: boolean. True outputs XY, YZ, and XZ projections in a grid, False just outputs XY. False by default
:param proj_method: string. Method by which to do projections. 'Max' by default
:param colors: Can be either a string which corresponds to a cmap function in matplotlib, a function that
takes in the channel index and returns a list of numbers, or a list of lists containing the color multipliers.
:param global_adjust: boolean. If true, scales each color channel to set its max to be 255
after combining all channels. False by default
:param local_adjust: boolean. If true, performs contrast adjustment on each channel individually. False by default
:return: a CYX numpy array containing the requested projections
"""
# turn list of 2d or 3d arrays into single 4d array if needed
try:
if isinstance(im1, (list, tuple)):
# if only YX, add a single Z dimen
if im1[0].ndim == 2:
im1 = [
|
np.expand_dims(c, axis=0)
|
numpy.expand_dims
|
#!/usr/bin/env python
"""Resegment the dataset block boundaries.
"""
import sys
import argparse
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from skimage.color import label2rgb
from scipy import ndimage as ndi
from skimage.segmentation import relabel_sequential, watershed
from stapl3d.segmentation import features
from stapl3d import Image, MaskImage, LabelImage, wmeMPI
from stapl3d.reporting import (
gen_orthoplot,
load_parameters,
get_paths,
get_centreslice,
get_centreslices,
get_zyx_medians,
get_cslc,
)
def main(argv):
"""Resegment the dataset block boundaries."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'-i', '--images_in',
required=True,
nargs='*',
help="""paths to hdf5 datasets <filepath>.h5/<...>/<dataset>:
datasets to stitch together""",
)
parser.add_argument(
'-s', '--blocksize',
required=True,
nargs=3,
type=int,
default=[],
help='size of the datablock',
)
parser.add_argument(
'-m', '--blockmargin',
nargs=3,
type=int,
default=[0, 64, 64],
help='the datablock overlap used',
)
parser.add_argument(
'-A', '--axis',
type=int,
default=2,
help='',
)
parser.add_argument(
'-L', '--seamnumbers',
nargs='*',
type=int,
default=[-1, -1, -1],
help='',
)
parser.add_argument(
'-a', '--mask_dataset',
help='use this mask h5 dataset to mask the labelvolume',
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'-r', '--relabel',
action='store_true',
help='apply incremental labeling to each block'
)
group.add_argument(
'-l', '--maxlabel',
help='maximum labelvalue in the full dataset'
)
parser.add_argument(
'-p', '--in_place',
action='store_true',
help='write the resegmentation back to the input datasets'
)
parser.add_argument(
'-o', '--outputstem',
help='template for output',
)
parser.add_argument(
'-S', '--save_steps',
action='store_true',
help='save intermediate results'
)
args = parser.parse_args()
resegment_block_boundaries(
args.images_in,
args.blocksize,
args.blockmargin,
args.axis,
args.seamnumbers,
args.mask_dataset,
args.relabel,
args.maxlabel,
args.in_place,
args.outputstem,
args.save_steps,
)
def resegment_block_boundaries(
images_in,
blocksize,
blockmargin=[0, 64, 64],
axis=2,
seamnumbers=[-1, -1, -1],
mask_dataset='',
relabel=False,
maxlabel='',
in_place=False,
outputstem='',
save_steps=False,
):
"""Resegment the dataset block boundaries."""
# NB: images_in are sorted in xyz-order while most processing will map to zyx
# TODO: may want to switch to xyz here; or perhaps sort according to zyx for consistency
images_in.sort()
step = 'resegment'
# paths = get_paths(images_in[0], -1, 0, outputstem, step, save_steps)
paths = {}
paths['out_base'] = outputstem
paths['out_h5'] = '{}.h5/{}'.format(paths['out_base'], '{}')
paths['main'] = paths['steps'] = paths['out_h5']
paths['params'] = '{}-params.pickle'.format(paths['out_base'], step)
report = {
'parameters': locals(),
'paths': paths,
'medians': {},
'centreslices': {}
}
info, filelist, ids = get_block_info(images_in, blocksize, blockmargin)
blockmap = get_blockmap(info)
# FIXME: assuming call with single axis for now
seamnumber = seamgrid_ravel_multi_index(blockmap, seamnumbers, axis)
maxlabel = prep_maxlabel(maxlabel, seamnumber, filelist, ids)
print('starting with maxlabel = {:12d}\n'.format(maxlabel))
report['j'] = 0
report['seam'] = seamnumber
pairs = get_seam_pairs(blockmap, seamnumbers, axis)
for pair in pairs:
print('{:03d}: pair {} over axis {}'.format(report['j'], pair, axis))
margin = blockmargin[2] if axis == 0 else blockmargin[axis]
info_ims = tuple(info[idx] for idx in pair)
n_max = find_nmax(info_ims, axis, margin)
n_max = min(10, n_max)
if n_max < 2:
print('n_max', n_max)
continue
n = min(n_max, 3)
report['axis'] = axis
report['margin'] = margin
maxlabel, report = process_pair(info_ims, ids, margin, axis, maxlabel, n, n_max, report)
report['j'] += 1
print('maxlabel = {:08d}\n'.format(maxlabel))
def get_block_info(images_in, blocksize, margins=[0, 64, 64]):
"""Get info on the blocks of a dataset."""
inputfiles = []
for image_in in images_in:
fstem, ids = image_in.split('.h5')
inputfiles.append('{}.h5'.format(fstem))
info = {}
for i, inputfile in enumerate(inputfiles):
# NOTE: after inputfiles.sort(), i is a linear index
# into image info incrementing first z then y then x
info[i] = features.split_filename(inputfile)[0]
info[i]['inputfile'] = inputfile
zyx = [info[i][dim] + margins[j] if info[i][dim] > 0 else 0
for j, dim in enumerate('zyx')]
info[i]['blockcoords'] = [
int(zyx[0] / blocksize[0]),
int(zyx[1] / blocksize[1]),
int(zyx[2] / blocksize[2]),
]
return info, inputfiles, ids[1:]
def get_blockmap(info):
"""Get a map of block indices."""
ds = np.amax(np.array([v['blockcoords'] for k, v in info.items()]), axis=0) + 1
blockmap = np.zeros(ds, dtype='uint16')
for k, v in info.items():
bc = v['blockcoords']
blockmap[bc[0], bc[1], bc[2]] = k
return blockmap
def get_seam_pairs(blockmap, seamnumbers, axis):
ad = {0: {'ax': [1, 2], 'tp': [0, 1], 'sh': (1,4)},
1: {'ax': [axis], 'tp': [1, 0], 'sh': (-1, 2)},
2: {'ax': [axis], 'tp': [0, 1], 'sh': (-1, 2)}}
slcs = [slice(seamnumbers[d], seamnumbers[d] + 2)
if d in ad[axis]['ax'] else slice(None)
for d in range(3)]
pairs = np.squeeze(blockmap[tuple(slcs)])
pairs = np.reshape(np.transpose(pairs, ad[axis]['tp']), ad[axis]['sh'])
return pairs
def prep_maxlabel(maxlabel, seamnumber, filelist='', ids='', maxlabel_margin=100000):
if maxlabel == 'attrs':
maxlabels = get_maxlabels_from_attribute(filelist, ids, '')
maxlabel = max(maxlabels)
src = 'attributes'
try:
maxlabel = int(maxlabel)
src = 'integer argument'
except ValueError:
maxlabels = np.loadtxt(maxlabel, dtype=np.uint32)
maxlabel = max(maxlabels)
src = 'textfile'
print('read maxlabel {:12d} from {}'.format(maxlabel, src))
maxlabel += seamnumber * maxlabel_margin
return maxlabel
def seamgrid_ravel_multi_index(blockmap, seamnumbers, axis):
if axis == 0:
seamgrid_shape = [blockmap.shape[1] - 1, blockmap.shape[2] - 1]
seamnumber = np.ravel_multi_index(seamnumbers[1:], seamgrid_shape)
else:
seamnumber = seamnumbers[axis]
print('linear seamindex = {}'.format(seamnumber))
return seamnumber
def find_nmax(info_ims, axis=2, margin=64):
"""Calculate how many margin-blocks fit into the dataset."""
sizes = []
if axis == 2:
sizes += [info_im['X'] - info_im['x'] for info_im in info_ims]
elif axis == 1:
sizes += [info_im['Y'] - info_im['y'] for info_im in info_ims]
elif axis == 0:
sizes += [info_im['X'] - info_im['x'] for info_im in info_ims]
sizes += [info_im['Y'] - info_im['y'] for info_im in info_ims]
n_max = int(np.amin(np.array(sizes)) / margin)
return n_max
def write_output(outpath, out, props, imtype='Label'):
"""Write data to an image on disk."""
props['dtype'] = out.dtype
if imtype == 'Label':
mo = LabelImage(outpath, **props)
elif imtype == 'Mask':
mo = MaskImage(outpath, **props)
else:
mo = Image(outpath, **props)
mo.create()
mo.write(out)
return mo
def read_image(im_info, ids='segm/labels_memb_del', imtype='Label'):
""""Read a h5 dataset as Image object."""
fname = '{}_{}'.format(im_info['base'], im_info['postfix'])
fstem = os.path.join(im_info['datadir'], fname)
if imtype == 'Label':
im = LabelImage('{}.h5/{}'.format(fstem, ids))
elif imtype == 'Mask':
im = MaskImage('{}.h5/{}'.format(fstem, ids))
else:
im = Image('{}.h5/{}'.format(fstem, ids))
im.load(load_data=False)
if imtype == 'Label':
im.set_maxlabel()
return im
def read_images(info_ims, ids='segm/labels_memb_del', imtype='Label',
axis=2, margin=64, n=2, include_margin=False, concat=False):
"""Read a set of block and slice along the block margins."""
segs = tuple(read_image(info_im, ids=ids, imtype=imtype) for info_im in info_ims)
set_to_margin_slices(segs, axis, margin, n, include_margin)
segs_marg = tuple(seg.slice_dataset() for seg in segs)
if concat:
segs_marg = concat_images(segs_marg, axis)
return segs, segs_marg
def set_to_margin_slices(segs, axis=2, margin=64, n=2, include_margin=False):
""""Set slices for selecting margins."""
def slice_ll(margin, margin_n):
return slice(margin, margin_n, 1)
def slice_ur(seg, axis, margin, margin_n):
start = seg.dims[axis] - margin_n
stop = seg.dims[axis] - margin
return slice(start, stop, 1)
mn = margin * n
if include_margin: # select data including the full margin strip
m = 0
else: # select only the part within the block-proper (i.e. minus margins)
m = margin
if axis > 0:
segs[0].slices[axis] = slice_ur(segs[0], axis, m, mn) # left block
segs[1].slices[axis] = slice_ll(m, mn) # right block
elif axis == 0: # NOTE: axis=0 hijacked for quads
# left-bottom block
segs[0].slices[2] = slice_ur(segs[0], 2, m, mn)
segs[0].slices[1] = slice_ur(segs[0], 1, m, mn)
# right-bottom block
segs[1].slices[2] = slice_ll(m, mn)
segs[1].slices[1] = slice_ur(segs[1], 1, m, mn)
# left-top block
segs[2].slices[2] = slice_ur(segs[2], 2, m, mn)
segs[2].slices[1] = slice_ll(m, mn)
# right-top block
segs[3].slices[2] = slice_ll(m, mn)
segs[3].slices[1] = slice_ll(m, mn)
def get_labels(segs_marg, axis=2, margin=64, include_margin=False, bg=set([0])):
"""Find the labels on the boundary of blocks."""
# NOTE: if include_margin: <touching the boundary and into the margin>
b = margin if include_margin else 1
if axis == 2:
seg1_labels = set(np.unique(segs_marg[0][:, :, -b:]))
seg1_labels -= bg
seg2_labels = set(np.unique(segs_marg[1][:, :, :b]))
seg2_labels -= bg
return seg1_labels, seg2_labels
elif axis == 1:
seg1_labels = set(np.unique(segs_marg[0][:, -b:, :]))
seg1_labels -= bg
seg2_labels = set(np.unique(segs_marg[1][:, :b, :]))
seg2_labels -= bg
return seg1_labels, seg2_labels
elif axis == 0: # NOTE: axis=0 hijacked for quads
seg1_labels = set(np.unique(segs_marg[0][:, -margin:, -b:]))
seg1_labels |= set(np.unique(segs_marg[0][:, -b:, -margin:]))
seg1_labels -= bg
seg2_labels = set(np.unique(segs_marg[1][:, -margin:, :b]))
seg2_labels |= set(np.unique(segs_marg[1][:, -b:, :margin]))
seg2_labels -= bg
seg3_labels = set(np.unique(segs_marg[2][:, :margin, -b:]))
seg3_labels |= set(np.unique(segs_marg[2][:, :b, -margin:]))
seg3_labels -= bg
seg4_labels = set(np.unique(segs_marg[3][:, :margin, :b]))
seg4_labels |= set(np.unique(segs_marg[3][:, :b, :margin]))
seg4_labels -= bg
return seg1_labels, seg2_labels, seg3_labels, seg4_labels
def check_margin(mask, axis):
"""Check if all voxels marked for resegmentation are within margin."""
msum = False
if axis == 1 or axis == 0: # NOTE: axis=0 hijacked for quads
m1sum = np.sum(mask[:, 0, :])
m2sum = np.sum(mask[:, -1, :])
msum = msum | bool(m1sum) | bool(m2sum)
if axis == 2 or axis == 0: # NOTE: axis=0 hijacked for quads
m1sum = np.sum(mask[:, :, 0])
m2sum = np.sum(mask[:, :, -1])
msum = msum | bool(m1sum) | bool(m2sum)
return msum
def write_margin(ims, data, axis, margin, n):
"""Write margin datablocks back to file."""
def update_vol(im, d):
if isinstance(im, LabelImage):
# NOTE: is it even possible that im.ds.attrs['maxlabel'] > np.amax(d)?
# new maxlabel of the block is the max of the old and the max of the newly written subblock
im.ds.attrs['maxlabel'] = max(im.ds.attrs['maxlabel'],
|
np.amax(d)
|
numpy.amax
|
# -*- coding: utf-8 -*-
import numpy as np
class moon_data_class(object):
def __init__(self,N,d,r,w):
self.N=N
self.w=w
self.d=d
self.r=r
def sgn(self,x):
if(x>0):
return 1;
else:
return -1;
def sig(self,x):
return 1.0/(1+np.exp(x))
def dbmoon(self):
N1 = 10*self.N
N = self.N
r = self.r
w2 = self.w/2
d = self.d
done = True
data =
|
np.empty(0)
|
numpy.empty
|
"""
Streaming DMD
A Python implementation of the streaming dynamic mode decomposition algorithm
described in the paper Liew, J. et al. "Streaming dynamic mode decomposition for
short-term forecasting in wind farms"
The algorithm performs a continuously updating dynamic mode decomposition as new
data is made available.
The equations referred to in this paper correspond to the equations in:
Liew, J. et al. "Streaming dynamic mode decomposition for
short-term forecasting in wind farms"
Author: <NAME>
License: MIT (see LICENSE.txt)
Version: 1.0
Email: <EMAIL>
"""
import numpy as np
def hankel_transform(X, s):
"""
stacks the snapshots, X, so that each new snapshot contains the previous
s snapshots.
args:
X (2D array): n by m matrix.
s (int): stack size
returns:
Xout (2D array): n - (k-1) by k*m matrix.
"""
if X.ndim == 1:
X = X.reshape(1, -1)
if s == 1:
return X
l, m = X.shape
w = m - (s - 1)
out = np.zeros([l * s, w])
for i in range(s):
row = X[:, m - i - w : m - i]
out[i * l : (i + 1) * l, :] = row
return out
def truncatedSVD(X, r):
"""
Computes the truncated singular value decomposition (SVD)
args:
X (2d array): Matrix to perform SVD on.
rank (int or float): rank parameter of the svd. If a positive integer,
truncates to the largest r singular values. If a float such that 0 < r < 1,
the rank is the number of singular values needed to reach the energy
specified in r. If -1, no truncation is performed.
"""
U, S, V = np.linalg.svd(X, full_matrices=False)
V = V.conj().T
if r >= 1:
rank = min(r, U.shape[1])
elif 0 < r < 1:
cumulative_energy = np.cumsum(S ** 2 / np.sum(S ** 2))
rank = np.searchsorted(cumulative_energy, r) + 1
U_r = U[:, :rank]
S_r = S[:rank]
V_r = V[:, :rank]
return U_r, S_r, V_r
class sDMD_base(object):
"""
Calculate DMD in streaming mode.
"""
def __init__(self, X, Y, rmin, rmax, thres=0.2, halflife=None):
self.rmin = rmin
self.rmax = rmax
self.thres = thres
self.halflife = halflife
self.rho = 1 if halflife is None else 2 ** (-1 / halflife)
# Eq. (2) - truncated SVD
self.Ux, _, _ = truncatedSVD(X, rmin)
self.Uy, _, _ = truncatedSVD(Y, rmin)
# Eq. (3) - Mapping of input vector to reduced order space.
X_tild = self.Ux.T @ X
# Eq. (4) - Mapping of out vector to reduced order space.
Y_tild = self.Uy.T @ Y
# Eq (9) - Decomposition of transition matrix into the product of Q and Pinvx.
self.Q = Y_tild @ X_tild.T
self.Pinvx = X_tild @ X_tild.T
self.Pinvy = Y_tild @ Y_tild.T
def update(self, x, y):
x, y = x.reshape([-1, 1]), y.reshape([-1, 1])
status = 0
normx = np.linalg.norm(x, ord=2, axis=0)
normy = np.linalg.norm(y, ord=2, axis=0)
xtilde = self.Ux.T @ x
ytilde = self.Uy.T @ y
# Numerator of Eq. (14) - projection error.
ex = x - self.Ux @ xtilde
ey = y - self.Uy @ ytilde
#### STEP 1 - BASIS EXPANSION ####
# Table 1: Rank augmentation of Ux
if np.linalg.norm(ex, ord=2, axis=0) / normx > self.thres:
u_new = ex /
|
np.linalg.norm(ex, ord=2, axis=0)
|
numpy.linalg.norm
|
import numpy as np
import pandas as pd
import tifffile as tf
import networkx as nx
from cloudvolume import CloudVolume, Skeleton
import brainlit
from brainlit.utils.Neuron_trace import NeuronTrace
from brainlit.utils.session import NeuroglancerSession
import pytest
from pathlib import Path
import networkx.algorithms.isomorphism as iso
swc_path = "./data/data_octree/consensus-swcs/2018-08-01_G-002_consensus.swc"
url_seg = "s3://open-neurodata/brainlit/brain1_segments"
seg_id = 2
mip = 0
seg_id_bad = "asdf"
mip_bad = "asdf"
read_offset_bad = "asdf"
rounding_bad = "asdf"
path_bad_string = "asdf"
path_bad_nonstring = 3
test_swc = NeuronTrace(swc_path)
test_s3 = NeuronTrace(url_seg, seg_id, mip)
####################
### input checks ###
####################
def test_Neurontrace_bad_inputs():
# test 'path' must be a string
with pytest.raises(TypeError):
test_trace = NeuronTrace(path_bad_nonstring)
# test 'path' must be swc or skel path
with pytest.raises(ValueError, match="Did not input 'swc' filepath or 'skel' url"):
test_trace = NeuronTrace(path_bad_string)
# test 'seg_id' must be NoneType or int
with pytest.raises(TypeError):
test_trace = NeuronTrace(url_seg, seg_id_bad, mip)
# test 'mip' must be NoneType or int
with pytest.raises(TypeError):
test_trace = NeuronTrace(url_seg, seg_id, mip_bad)
# test both 'seg_id' and 'mip' must be provided if one is provided
with pytest.raises(
ValueError,
match="For 'swc' do not input mip or seg_id, and for 'skel', provide both mip and seg_id",
):
test_trace = NeuronTrace(url_seg, seg_id)
# test 'read_offset' must be bool
with pytest.raises(TypeError):
test_trace = NeuronTrace(swc_path, read_offset_bad)
# test 'rounding' must be bool
with pytest.raises(TypeError):
test_trace = NeuronTrace(swc_path, rounding=rounding_bad)
def test_get_df_arguments():
# test if output is list
assert isinstance(test_swc.get_df_arguments(), list)
assert isinstance(test_s3.get_df_arguments(), list)
def test_get_df():
# test if output is dataframe
assert isinstance(test_swc.get_df(), pd.DataFrame)
assert isinstance(test_s3.get_df(), pd.DataFrame)
# test if output is correct shape
correct_shape = (1650, 7)
assert test_swc.get_df().shape == correct_shape
assert test_s3.get_df().shape == correct_shape
# test if columns are correct"
col = ["sample", "structure", "x", "y", "z", "r", "parent"]
assert list(test_swc.get_df().columns) == col
assert list(test_s3.get_df().columns) == col
def test_get_skel():
# test 'origin' arg must either be type None or numpy.ndarray
with pytest.raises(TypeError):
test_swc.get_skel(origin="asdf")
# test 'benchmarking' arg must be bool
with pytest.raises(TypeError):
test_swc.get_skel(benchmarking="asdf")
# test if 'origin' is type numpy.ndarray, it must be shape (3,1)
with pytest.raises(ValueError):
test_swc.get_skel(origin=np.asarray([0, 1]))
# test if output is skeleton
assert isinstance(test_swc.get_skel(benchmarking=True), Skeleton)
assert isinstance(test_s3.get_skel(), Skeleton)
def test_get_df_voxel():
# test 'spacing' arg must be type numpy.ndarray
with pytest.raises(TypeError):
test_swc.get_df_voxel(spacing="asdf")
# test if 'spacing' is type numpy.ndarray, it must be shape (3,1)
with pytest.raises(ValueError):
test_swc.get_df_voxel(spacing=np.asarray([0, 1]))
# test 'origin' arg must be type numpy.ndarray
with pytest.raises(TypeError):
test_swc.get_df_voxel(spacing=np.asarray([0, 1, 2]), origin="asdf")
# test if 'origin' is type numpy.ndarray, it must be shape (3,1)
with pytest.raises(ValueError):
test_swc.get_df_voxel(spacing=np.asarray([0, 1, 2]), origin=np.asarray([0, 1]))
# test if output is correct shape
correct_shape = (1650, 7)
df_voxel_swc = test_swc.get_df_voxel(
spacing=np.asarray([1, 2, 3]), origin=np.asarray([2, 2, 2])
)
assert df_voxel_swc.shape == correct_shape
correct_shape = (1650, 7)
df_voxel_s3 = test_s3.get_df_voxel(
spacing=np.asarray([1, 2, 3]), origin=np.asarray([2, 2, 2])
)
assert df_voxel_s3.shape == correct_shape
# test columns
col = ["sample", "structure", "x", "y", "z", "r", "parent"]
assert list(df_voxel_swc.columns) == col
assert list(df_voxel_s3.columns) == col
# test if coordinates are all nonnegative"""
coord_swc = df_voxel_swc[["x", "y", "z"]].values
coord_s3 = df_voxel_s3[["x", "y", "z"]].values
assert np.greater_equal(np.abs(coord_swc), np.zeros(coord_swc.shape)).all()
assert np.greater_equal(np.abs(coord_s3), np.zeros(coord_s3.shape)).all()
# test if output is dataframe
assert isinstance(
test_swc.get_df_voxel(
spacing=np.asarray([0, 1, 2]), origin=np.asarray([0, 1, 2])
),
pd.DataFrame,
)
assert isinstance(
test_s3.get_df_voxel(
spacing=np.asarray([0, 1, 2]), origin=np.asarray([0, 1, 2])
),
pd.DataFrame,
)
def test_get_graph():
# test 'spacing' arg must either be NoneType or numpy.ndarray
with pytest.raises(TypeError):
test_swc.get_graph(spacing="asdf")
# test if 'spacing' is type numpy.ndarray, it must be shape (3,1)
with pytest.raises(ValueError):
test_swc.get_graph(spacing=np.asarray([0, 1]))
# test 'origin' arg must either be NoneType or numpy.ndarray
with pytest.raises(TypeError):
test_swc.get_graph(spacing=np.asarray([0, 1, 2]), origin="asdf")
# test if 'origin' is type numpy.ndarray, it must be shape (3,1)
with pytest.raises(ValueError):
test_swc.get_graph(spacing=np.asarray([0, 1, 2]), origin=np.asarray([0, 1]))
# test if origin isn't specified but spacing is, origin set to np.array([0, 0, 0])
G1 = test_swc.get_graph(spacing=np.asarray([0, 1, 2]))
G2 = test_swc.get_graph(spacing=np.asarray([0, 1, 2]), origin=np.array([0, 0, 0]))
assert nx.is_isomorphic(G1, G2) == True
# test if graph coordinates are same as that of df_voxel
df_voxel = test_swc.get_df_voxel(
spacing=np.asarray([1, 2, 3]), origin=np.asarray([1, 2, 3])
)
df_voxel_s3 = test_s3.get_df_voxel(
spacing=np.asarray([1, 2, 3]), origin=np.asarray([1, 2, 3])
)
# swc
G = test_swc.get_graph(spacing=np.asarray([1, 2, 3]), origin=np.asarray([1, 2, 3]))
coord_df = df_voxel[["x", "y", "z"]].values
x_dict = nx.get_node_attributes(G, "x")
y_dict = nx.get_node_attributes(G, "y")
z_dict = nx.get_node_attributes(G, "z")
x = [x_dict[i] for i in G.nodes]
y = [y_dict[i] for i in G.nodes]
z = [z_dict[i] for i in G.nodes]
coord_graph =
|
np.array([x, y, z])
|
numpy.array
|
# -*- coding: utf-8 -*-
"""Tests for the streams.py and basins.py submodules."""
import pytest
import numpy as np
from pyflwdir import streams, basins, core, gis_utils, regions
# import matplotlib.pyplot as plt
# parsed, flwdir = test_data[0]
# test data
from test_core import test_data
@pytest.mark.parametrize("parsed, flwdir", test_data)
def test_accuflux(parsed, flwdir):
idxs_ds, idxs_pit, seq, rank, mv = [p.copy() for p in parsed]
n, ncol = seq.size, flwdir.shape[1]
# cell count
nodata = -9999
material = np.full(idxs_ds.size, nodata, dtype=np.int32)
material[seq] = 1
acc = streams.accuflux(idxs_ds, seq, material, nodata)
assert acc[idxs_pit].sum() == n
upa = streams.upstream_area(idxs_ds, seq, ncol, dtype=np.int32)
assert upa[idxs_pit].sum() == n
assert np.all(upa == acc)
# latlon is True
lons, lats = gis_utils.affine_to_coords(gis_utils.IDENTITY, flwdir.shape)
area = np.where(rank >= 0, gis_utils.reggrid_area(lats, lons).ravel(), nodata)
acc1 = streams.accuflux(idxs_ds, seq, area, nodata)
upa1 = streams.upstream_area(idxs_ds, seq, ncol, latlon=True)
assert np.all(upa1 == acc1)
@pytest.mark.parametrize("parsed, flwdir", test_data)
def test_basins(parsed, flwdir):
idxs_ds, idxs_pit, seq, _, _ = [p.copy() for p in parsed]
n, ncol = seq.size, flwdir.shape[1]
upa = streams.upstream_area(idxs_ds, seq, ncol, dtype=np.int32)
# test basins
ids = np.arange(1, idxs_pit.size + 1, dtype=int)
bas = basins.basins(idxs_ds, idxs_pit, seq, ids)
assert np.all(np.array([np.sum(bas == i) for i in ids]) == upa[idxs_pit])
assert np.all(np.unique(bas[bas != 0]) == ids) # nodata == 0
# test region
bas = bas.reshape(flwdir.shape)
total_bbox = regions.region_bounds(bas)[-1]
assert np.all(total_bbox == np.array([0, -bas.shape[0], bas.shape[1], 0]))
lbs, areas = regions.region_area(bas)
assert areas[0] == np.sum(bas == 1)
areas1 = regions.region_area(bas, latlon=True)[1]
assert areas1.argmax() == areas.argmax()
# test dissolve with labels
lbs0 = lbs[
|
np.argmin(areas)
|
numpy.argmin
|
# Import modules
import pytest
import xarray as xr
import numpy as np
from numpy.testing import (assert_allclose, assert_array_almost_equal,
assert_array_equal)
# From OceanSpy
from oceanspy import open_oceandataset, OceanDataset
from oceanspy.compute import (gradient, divergence, curl,
laplacian, weighted_mean, integral)
# Directory
Datadir = './oceanspy/tests/Data/'
# Test oceandataset
od4calc = open_oceandataset.from_netcdf('{}MITgcm_rect_nc.nc'
''.format(Datadir))
ds = od4calc.dataset
step = 1.E-2
# Space
for var in ['X', 'XC', 'XV']:
ds[var] = xr.full_like(ds[var], step).cumsum(dim='X')
for var in ['Y', 'YC', 'YU']:
ds[var] = xr.full_like(ds[var], step).cumsum(dim='Y')
for var in ['Xp1', 'XG', 'XU']:
ds[var] = xr.full_like(ds[var], step).cumsum(dim='Xp1')
ds[var] = ds[var] - step / 2
for var in ['Yp1', 'YG', 'YV']:
ds[var] = xr.full_like(ds[var], step).cumsum(dim='Yp1')
ds[var] = ds[var] - step / 2
ds['Z'] = xr.full_like(ds['Z'], - step).cumsum(dim='Z')
ds['Zp1'] = xr.full_like(ds['Zp1'], - step).cumsum(dim='Zp1') + step / 2
ds['Zl'] = xr.full_like(ds['Zl'], - step).cumsum(dim='Zl') + step / 2
ds['Zu'] = xr.full_like(ds['Zu'], - step).cumsum(dim='Zu') - step / 2
# Time
t0 = '1990-09-27T00:00:00'
T = []
for i in range(len(ds['time'])):
T = T + [np.datetime64(t0) + np.timedelta64(int(i * step * 1.E3), 'ms')]
ds['time'] = np.array(T, dtype='datetime64')
T = []
for i in range(len(ds['time_midp'])):
T = T + [np.datetime64(t0) + np.timedelta64(int(i * step * 1.E3), 'ms')]
ds['time_midp'] = (np.array(T, dtype='datetime64')
+ np.timedelta64(int(0.5 * step * 1.E3), 'ms'))
# deltas
for var in ['drF', 'dxC', 'dyC', 'dxF', 'dyF', 'dxG', 'dyG', 'dxV', 'dyU']:
ds[var] = xr.full_like(ds[var], step)
for var in ['rA', 'rAw', 'rAs', 'rAz']:
ds[var] = xr.full_like(ds[var], step**2)
for var in ['HFacC', 'HFacW', 'HFacS']:
ds[var] = xr.ones_like(ds[var])
# Recreate oceandataset
od4calc = OceanDataset(ds)
# Gradient
sinX = xr.zeros_like(od4calc.dataset['Temp']) + np.sin(od4calc.dataset['XC'])
sinY = xr.zeros_like(od4calc.dataset['Temp']) + np.sin(od4calc.dataset['YC'])
sinZ = xr.zeros_like(od4calc.dataset['Temp']) + np.sin(od4calc.dataset['Z'])
sintime = (xr.zeros_like(od4calc.dataset['Temp'])
+ np.sin((od4calc.dataset['time']
- od4calc.dataset['time'][0])
/ np.timedelta64(1, 's')))
sintime.attrs = od4calc.dataset['time'].attrs
cosX = xr.zeros_like(od4calc.dataset['U']) + np.cos(od4calc.dataset['XU'])
cosY = xr.zeros_like(od4calc.dataset['V']) + np.cos(od4calc.dataset['YV'])
cosZ = xr.zeros_like(od4calc.dataset['W']) + np.cos(od4calc.dataset['Zl'])
costime = (xr.zeros_like(od4calc.dataset['oceSPtnd'])
+ np.cos((od4calc.dataset['time_midp']
- od4calc.dataset['time_midp'][0])
/ np.timedelta64(1, 's')))
# Divergence and Curl
X = od4calc.dataset['X']
Y = od4calc.dataset['Y']
Z = od4calc.dataset['Z']
Xp1 = od4calc.dataset['Xp1']
Yp1 = od4calc.dataset['Yp1']
Zl = od4calc.dataset['Zl']
sinUZ, sinUY, sinUX = xr.broadcast(np.sin(Z), np.sin(Y), np.sin(Xp1))
sinVZ, sinVY, sinVX = xr.broadcast(np.sin(Z), np.sin(Yp1), np.sin(X))
sinWZ, sinWY, sinWX = xr.broadcast(np.sin(Zl), np.sin(Y), np.sin(X))
sin_ds = xr.Dataset({'sinX': sinX, 'sinY': sinY, 'sinZ': sinZ,
'sintime': sintime,
'cosX': cosX, 'cosY': cosY, 'cosZ': cosZ,
'costime': costime,
'sinUX': sinUX, 'sinUY': sinUY, 'sinUZ': sinUZ,
'sinVX': sinVX, 'sinVY': sinVY, 'sinVZ': sinVZ,
'sinWX': sinWX, 'sinWY': sinWY, 'sinWZ': sinWZ})
od4calc = od4calc.merge_into_oceandataset(sin_ds)
# GRADIENT
@pytest.mark.parametrize("od", [od4calc])
@pytest.mark.parametrize("axesList", [None, 'X', 'wrong'])
def test_gradient(od, axesList):
varNameList = ['sinZ', 'sinY', 'sinX', 'sintime']
if axesList == 'wrong':
with pytest.raises(ValueError):
gradient(od, varNameList=varNameList, axesList=axesList)
else:
grad_ds = gradient(od, varNameList=varNameList, axesList=axesList)
if axesList is None:
axesList = list(od.grid_coords.keys())
# sin' = cos
for varName in varNameList:
for axis in axesList:
gradName = 'd'+varName+'_'+'d'+axis
var = grad_ds[gradName]
if axis not in varName:
assert (var.min().values
== grad_ds[gradName].max().values
== 0)
else:
check = od.dataset['cos'+axis].where(var)
mask = xr.where(np.logical_or(check.isnull(),
var.isnull()), 0, 1)
assert_allclose(var.where(mask, drop=True).values,
check.where(mask, drop=True).values, 1.E-3)
@pytest.mark.parametrize("od", [od4calc])
def test_all_gradients(od):
od_moor = od.subsample.mooring_array(Xmoor=[od.dataset['X'].min().values,
od.dataset['X'].max().values],
Ymoor=[od.dataset['Y'].min().values,
od.dataset['Y'].max().values])
with pytest.warns(UserWarning):
X = od_moor.dataset['XC'].squeeze().values
Y = od_moor.dataset['YC'].squeeze().values
od_surv = od.subsample.survey_stations(Xsurv=X,
Ysurv=Y)
# Test all dimension
DIMS = []
VARS = []
for var in od.dataset.data_vars:
this_dims = list(od.dataset[var].dims)
append = True
for dims in DIMS:
checks = [set(this_dims).issubset(set(dims)),
set(dims).issubset(set(this_dims))]
if all(checks):
append = False
continue
if append:
VARS = VARS + [var]
DIMS = DIMS + [list(this_dims)]
gradient(od, varNameList=VARS)
gradient(od_moor, varNameList=VARS)
gradient(od_surv, varNameList=VARS)
# DIVERGENCE
@pytest.mark.parametrize("od, iName, jName, kName",
[(od4calc, None, None, 'Temp'),
(od4calc, None, 'Temp', None),
(od4calc, 'Temp', None, None),
(od4calc, None, None, None)])
def test_div_errors(od, iName, jName, kName):
with pytest.raises(ValueError):
divergence(od, iName=iName, jName=jName, kName=kName)
@pytest.mark.parametrize("od", [od4calc])
@pytest.mark.parametrize("varNameList", [[None, 'sinVY', 'sinWZ'],
['sinUX', None, 'sinWZ'],
['sinUX', 'sinVY', None],
['sinUX', 'sinVY', 'sinWZ']])
def test_divergence(od, varNameList):
# Add units
if None not in varNameList:
for varName in varNameList:
od._ds[varName].attrs['units'] = 'm/s'
# Compute divergence
dive_ds = divergence(od,
iName=varNameList[0],
jName=varNameList[1],
kName=varNameList[2])
# sin' = cos
for varName in varNameList:
if varName is not None:
axis = varName[-1]
diveName = 'd'+varName+'_'+'d'+axis
var = dive_ds[diveName]
coords = {coord[0]: var[coord] for coord in var.coords}
coords['Z'], coords['Y'], coords['X'] = xr.broadcast(coords['Z'],
coords['Y'],
coords['X'])
check =
|
np.cos(coords[axis])
|
numpy.cos
|
#!/usr/bin/env python
u"""
test_spatial.py (11/2020)
Verify file read and write with spatial utilities
"""
import os
import ssl
import pytest
import warnings
import inspect
import numpy as np
import pyTMD.spatial
import pyTMD.utilities
#-- current file path
filename = inspect.getframeinfo(inspect.currentframe()).filename
filepath = os.path.dirname(os.path.abspath(filename))
#-- PURPOSE: test the data type function
def test_data_type():
#-- test drift type
exp = 'drift'
#-- number of data points
npts = 30; ntime = 30
x = np.random.rand(npts)
y = np.random.rand(npts)
t = np.random.rand(ntime)
obs = pyTMD.spatial.data_type(x,y,t)
assert (obs == exp)
#-- test grid type
exp = 'grid'
xgrid,ygrid = np.meshgrid(x,y)
obs = pyTMD.spatial.data_type(xgrid,ygrid,t)
assert (obs == exp)
#-- test grid type with spatial dimensions
exp = 'grid'
nx = 30; ny = 20; ntime = 10
x = np.random.rand(nx)
y = np.random.rand(ny)
t = np.random.rand(ntime)
xgrid,ygrid = np.meshgrid(x,y)
obs = pyTMD.spatial.data_type(xgrid,ygrid,t)
assert (obs == exp)
#-- test time series type
exp = 'time series'
#-- number of data points
npts = 1; ntime = 1
x = np.random.rand(npts)
y =
|
np.random.rand(npts)
|
numpy.random.rand
|
import os
import numpy as np
import pandas as pd
import torch
import torchvision
from tqdm import tqdm
from PIL import Image
from matplotlib import pyplot as plt
from torchvision.datasets.vision import VisionDataset
from torchvision import transforms
class Fruits(VisionDataset):
def __init__(self, root, train=True, extensions=None, transform=None,
target_transform=None, imb_factor=1, imb_type='exp', new_class_idx_sorted=None):
root = os.path.join(root, 'fruits')
if train:
root = os.path.join(root, 'Training')
else:
root = os.path.join(root, 'Test')
super(Fruits, self).__init__(root, transform=transform,
target_transform=target_transform)
classes, class_to_idx = self._find_classes(self.root)
categories = os.listdir(root)
samples = []
for c in categories:
label = int(class_to_idx[c])
image_path = os.listdir(os.path.join(root, c))
for p in image_path:
samples.append((os.path.join(root, c, p), label))
if len(samples) == 0:
msg = "Found 0 files in subfolders of: {}\n".format(self.root)
if extensions is not None:
msg += "Supported extensions are: {}".format(",".join(extensions))
raise RuntimeError(msg)
self.extensions = extensions
self.classes = classes
self.cls_num = len(classes)
self.class_to_idx = class_to_idx
self.samples = np.array([s[0] for s in samples])
self.targets = np.array([s[1] for s in samples])
num_in_class = []
for class_idx in np.unique(self.targets):
num_in_class.append(len(np.where(self.targets == class_idx)[0]))
self.num_in_class = num_in_class
self.sort_dataset(new_class_idx_sorted)
if train:
img_num_list = self.get_img_num_per_cls(self.cls_num, imb_type, imb_factor)
self.gen_imbalanced_data(img_num_list)
def sort_dataset(self, new_class_idx_sorted=None):
idx = np.argsort(self.targets)
self.targets = self.targets[idx]
self.samples = self.samples[idx]
if new_class_idx_sorted is None:
new_class_idx_sorted =
|
np.argsort(self.num_in_class)
|
numpy.argsort
|
#!/usr/bin/env python
import numpy as np
import ase.io.extxyz as rx
from aqml import cheminfo as co
from ase import Atoms
import aqml.io2 as io2
import os, re
def read_xyz_simple(f, opt='z', icol=None, property_names=None, idx=None):
"""
read geometry & property from a xyz file
icol: if not None, choose the `icol entry of line 2 of input xyz
file as the default property of the molecule,
and the default property to be assigned is "HF"
"""
assert os.path.exists(f)
cs = open(f,'r').readlines()
assert len(cs) > 0
#print('cs=',cs)
na = int(cs[0])
props = {}
c2 = cs[1].strip() ## e.g., "E=-100.2045 ALPHA=-3.45" or pure property list "-100.2 -3.45"
if len(c2) > 0 and property_names:
_props = {}
if '#' in c2:
try:
sk, sv = c2.split('#')
_props = dict(zip(sk.strip().split(), [eval(svi) for svi in sv.split()]))
except:
print(' ** no property found from 2nd line of xyz file')
elif '=' in c2:
for c2i in c2.split():
k,sv = c2i.split('=')
_props[k] = eval(sv)
else:
print(' ** no property found from 2nd line') #raise Exception(' unknown property format in 2-nd line')
if ('a' in property_names) or ('all' in property_names):
property_names = list(_props.keys())
#print('f=',f, 'pns=', property_names, 'props=', _props )
for p in property_names:
if p not in _props:
raise Exception('No value for property_name %s is found!'%p)
props[p] = _props[p]
_ats = []; coords = []; nheav = 0
chgs = []; nmr = []; grads = []; cls = []
for i in range(2,na+2):
#print cs[i]
csi = cs[i].strip().split()
_si, sx, sy, sz = csi[:4]
csia = csi[4:]
if len(csia)>0:
if 'chgs' in props:
#chgs.append( eval(csia[props['chgs']]) )
syi = csia[props['chgs']]
yi = np.nan if syi.lower() == 'nan' else eval(syi)
chgs.append(yi)
if 'nmr' in props:
syi = csia[props['nmr']]
yi = np.nan if syi.lower() == 'nan' else eval(syi)
nmr.append(yi)
if 'cls' in props:
syi = csia[props['cls']]
yi = np.nan if syi.lower() == 'nan' else eval(syi)
cls.append(yi)
if 'grads' in props:
grads.append( [ eval(csia[props['grads']+j]) for j in range(3) ] )
try:
_zi = co.chemical_symbols_lowercase.index(_si.lower())
except:
_zi = int(_si)
_si = co.chemical_symbols[_zi]
if _si not in ['H']: nheav += 1
si = _zi if opt=='z' else _si
_ats.append(si)
coords.append( [ eval(_s) for _s in [sx,sy,sz] ] )
if len(chgs) > 0:
props['chgs'] = np.array(chgs)
if len(nmr) > 0:
props['nmr'] = np.array(nmr)
if len(grads) > 0:
props['grads'] =
|
np.array(grads)
|
numpy.array
|
from itertools import combinations
import numpy as np
import torch
import torch.nn.functional as F
from .metric import outer_pairwise_distance
class PairSelector:
"""
Implementation should return indices of positive pairs and negative pairs that will be passed to compute
Contrastive Loss
return positive_pairs, negative_pairs
"""
def __init__(self):
pass
def get_pairs(self, embeddings, labels):
raise NotImplementedError
class AllPositivePairSelector(PairSelector):
"""
Discards embeddings and generates all possible pairs given labels.
If balance is True, negative pairs are a random sample to match the number of positive samples
"""
def __init__(self, balance=True):
super(AllPositivePairSelector, self).__init__()
self.balance = balance
def get_pairs(self, embeddings, labels):
# construct matrix x, such as x_ij == 0 <==> labels[i] == labels[j]
n = labels.size(0)
x = labels.expand(n, n) - labels.expand(n, n).t()
positive_pairs = torch.triu((x == 0).int(), diagonal=1).nonzero(as_tuple=False)
negative_pairs = torch.triu((x != 0).int(), diagonal=1).nonzero(as_tuple=False)
if self.balance:
negative_pairs = negative_pairs[torch.randperm(len(negative_pairs))[:len(positive_pairs)]]
return positive_pairs, negative_pairs
class HardNegativePairSelector(PairSelector):
"""
Generates all possible possitive pairs given labels and
neg_count hardest negative example for each example
"""
def __init__(self, neg_count=1):
super(HardNegativePairSelector, self).__init__()
self.neg_count = neg_count
def get_pairs(self, embeddings, labels):
# construct matrix x, such as x_ij == 0 <==> labels[i] == labels[j]
n = labels.size(0)
x = labels.expand(n, n) - labels.expand(n, n).t()
# positive pairs
positive_pairs = torch.triu((x == 0).int(), diagonal=1).nonzero(as_tuple=False)
# hard negative minning
mat_distances = outer_pairwise_distance(embeddings.detach()) # pairwise_distance
upper_bound = int((2 * n) ** 0.5) + 1
mat_distances = ((upper_bound - mat_distances) * (x != 0).type(
mat_distances.dtype)) # filter: get only negative pairs
values, indices = mat_distances.topk(k=self.neg_count, dim=0, largest=True)
negative_pairs = torch.stack([
torch.arange(0, n, dtype=indices.dtype, device=indices.device).repeat(self.neg_count),
torch.cat(indices.unbind(dim=0))
]).t()
return positive_pairs, negative_pairs
class DistanceWeightedPairSelector(PairSelector):
"""
Distance Weighted Sampling
"Sampling Matters in Deep Embedding Learning", ICCV 2017
https://arxiv.org/abs/1706.07567
code based on https://github.com/suruoxi/DistanceWeightedSampling
Generates pairs correspond to distances
parameters
----------
batch_k: int
number of images per class
Inputs:
data: input tensor with shape (batch_size, embed_dim)
Here we assume the consecutive batch_k examples are of the same class.
For example, if batch_k = 5, the first 5 examples belong to the same class,
6th-10th examples belong to another class, etc.
Outputs:
a_indices: indicess of anchors
x[a_indices]
x[p_indices]
x[n_indices]
xxx
"""
def __init__(self, batch_k, cutoff=0.5, nonzero_loss_cutoff=1.4, normalize=False):
super(DistanceWeightedPairSelector, self).__init__()
self.batch_k = batch_k
self.cutoff = cutoff
self.nonzero_loss_cutoff = nonzero_loss_cutoff
self.normalize = normalize
def get_pairs(self, x, labels):
k = self.batch_k
n, d = x.shape
distance = outer_pairwise_distance(x.detach())
distance = distance.clamp(min=self.cutoff)
log_weights = ((2.0 - float(d)) * distance.log() - (float(d - 3) / 2) * torch.log(
torch.clamp(1.0 - 0.25 * (distance * distance), min=1e-8)))
if self.normalize:
log_weights = (log_weights - log_weights.min()) / (log_weights.max() - log_weights.min() + 1e-8)
weights = torch.exp(log_weights - torch.max(log_weights))
device = x.device
weights = weights.to(device)
mask = torch.ones_like(weights)
for i in range(0, n, k):
mask[i:i + k, i:i + k] = 0
mask_uniform_probs = mask.double() * (1.0 / (n - k))
weights = weights * mask * ((distance < self.nonzero_loss_cutoff).float()) + 1e-8
weights_sum = torch.sum(weights, dim=1, keepdim=True)
weights = weights / weights_sum
a_indices = []
p_indices = []
n_indices = []
np_weights = weights.cpu().numpy()
for i in range(n):
block_idx = i // k
if weights_sum[i] != 0:
n_indices += np.random.choice(n, k - 1, p=np_weights[i]).tolist()
else:
n_indices += np.random.choice(n, k - 1, p=mask_uniform_probs[i]).tolist()
for j in range(block_idx * k, (block_idx + 1) * k):
if j != i:
a_indices.append(i)
p_indices.append(j)
positive_pairs = [[a, p] for a, p in zip(a_indices, p_indices)]
negative_pairs = [[a, n] for a, n in zip(a_indices, n_indices)]
return torch.LongTensor(positive_pairs).to(device), torch.LongTensor(negative_pairs).to(device)
class TripletSelector:
"""
Implementation should return indices of anchors, positive and negative samples
return np array of shape [N_triplets x 3]
"""
def __init__(self):
pass
def get_triplets(self, embeddings, labels):
raise NotImplementedError
class AllTripletSelector(TripletSelector):
"""
Returns all possible triplets
May be impractical in most cases
"""
def __init__(self):
super(AllTripletSelector, self).__init__()
def get_triplets(self, embeddings, labels):
np_labels = labels.cpu().data.numpy()
triplets = []
for label in set(np_labels):
label_mask = (np_labels == label)
label_indices = np.where(label_mask)[0]
if len(label_indices) < 2:
continue
negative_indices = np.where(np.logical_not(label_mask))[0]
anchor_positives = list(combinations(label_indices, 2)) # All anchor-positive pairs
# Add all negatives for all positive pairs
temp_triplets = [[anchor_positive[0], anchor_positive[1], neg_ind] for anchor_positive in anchor_positives
for neg_ind in negative_indices]
triplets += temp_triplets
return torch.LongTensor(
|
np.array(triplets)
|
numpy.array
|
import h5py
import numpy as np
from tqdm import tqdm # barra de progresso
from dynamol import construct
from dynamol import forcelaws
from dynamol import methods
from dynamol import data
from dynamol import files
def trange(N):
return tqdm(range(N))
class IdealGas(construct.SetOfParticles):
def __init__(self, N, T=1.0, compress=1.0, dt=2.0e-15, dim=3,
atom='argon', cutoff=3.0, mass=None, config_file=None,
folder='outputs'):
files.mkdir(folder)
self.folder = folder
self.position_folder = files.mkdir(folder + r'\positions')
self.vars_folder = files.mkdir(folder + r'\variables')
self.units = construct.SystemOfUnits().set_units(atom)
self.dim = dim
self.N = N
self.cutoff = cutoff
self.time_step = dt/self.units.time
self.pressure = 0.0
self.T = T/self.units.temperature
self.T_bath = T
self.tau = 1.0e5*self.time_step
if mass is None:
mass = np.ones(self.N)
density = self.check_inputs(compress)
self.V = N*self.units.mass/density
self.density = density/self.units.density
if self.dim == 2:
self.V /= self.units.space**2
else:
self.V /= self.units.volume
self.size = np.ones(self.dim)*self.V**(1/self.dim)
self.cellist = construct.CellList(self.N, self.size, L=cutoff)
self.initialize(mass, config_file)
self.interaction = forcelaws.LennardJones(cutoff=self.cutoff)
self.integration = methods.VelocityVerlet(dt=self.time_step)
print("\tIdeal gas")
print(f"\t\t Número de partículas: {self.N}")
print(f"\t\t Volume: {self.V*self.units.volume:.2e} m³.")
print(f"\t\t Temperatura: {self.T*self.units.temperature} K")
dim1 = f"{self.size[0]:.2e} x {self.size[1]:.2e} "
if dim == 3:
dim1 += f"x {self.size[2]:.2e}"
uL = self.units.space
dim2 = f"{self.size[0]*uL:.2e} x {self.size[1]*uL:.2e}"
if dim == 3:
dim2 += f" x {self.size[2]*uL:.2e}"
print(f"\t\t Dimensões:\n\t\t {dim1} uL³/uL², ou\n\t\t {dim2} m³/m²")
print(f"\t\t Densidade: {density} kg/m³ ou kg/m² ou:\t\t")
print(f"\t\t\t\t{self.N/self.V:.2f} partículas por uV")
print(f"\t\t Espaçamento inicial: {(self.V/N)**(1/dim):.3f} uL")
def initialize(self, mass, config_file=None):
if config_file is not None:
pass
else:
R, self.N, self.dim = self.cellist.square_lattice()
V = self.static_system(mass)
super().__init__(self.N, self.dim)
self.positions = R
self.velocities = V
def update_list(self, r=None):
if r is None:
r = np.array([r for r in self.positions])
else:
self.positions = r
self.cellist.make_list(r)
def compute_accels(self, r):
self.update_list(r)
cum_forces = np.zeros((self.N, self.dim))
for loc in self.cellist.index_list():
for i in self.cellist.cells[tuple(loc)]:
for j in self.cellist.neighbors(tuple(loc)):
if i != j:
rij = self[i].r - self[j].r
cum_forces[i] += self.interaction.force(rij)
masses = np.array([p.m for p in self[:]])
return np.divide(cum_forces, masses[:, None])
def compute_interactions(self, time):
R = np.array([p.r for p in self[:]])
V = np.array([p.v for p in self[:]])
A = np.array([p.a for p in self[:]])
accels = self.compute_accels
R, V, A = self.integration.single_step(R, V, A, accels)
for p, r, v, a in zip(self.particles, R, V, A):
p.r, p.v, p.a = r, v, a
self.check_reflections(time)
def execute_simulation(self, n_intereations, start=0,
n_files=1000, zeros=4):
if n_files > n_intereations:
n_files = n_intereations
file_ratio = int(np.ceil(n_intereations/n_files))
while(len(str(n_files)) > zeros):
zeros += 1
text = "Iniciando simulação...\n"
text += f"\tArmanezando dados a cada {file_ratio} passos."
print(text)
self.compute_interactions(self.time_step) # start accelerations
self.store_variables(time=0, maxlines=n_files+1)
self.save_positions(idx=0)
time = 0.0
for t in trange(n_intereations):
time += self.integration.dt
self.compute_interactions(time)
if t % file_ratio == 0:
idx = int((t + 1)/file_ratio)
self.save_positions(idx=idx, zeros=zeros)
self.store_variables(time=time,
idx=(idx+1))
def check_reflections(self, time):
pressure = 0.0
for p in self[:]:
for i, (u, v, l) in enumerate(zip(p.r, p.v, self.size)):
if u < 0.0:
p.v[i] = -p.v[i]
p.r[i] = 0.0
pressure += l*p.m*abs(p.v[i])/self.time_step
elif u > l:
p.v[i] = -p.v[i]
p.r[i] = l
pressure += l*p.m*abs(p.v[i])/self.time_step
pressure /= 3*self.V
self.pressure += pressure
def store_variables(self, time, idx=0, maxlines=10000):
file = self.vars_folder + r'\variables.h5'
U = self.potential_energy()*self.units.kJmol/self.N
K = self.kinetic_energy()*self.units.kJmol/self.N
data = {
'Mechanical Energy': K + U,
'Potential Energy': U,
'Kinetic Energy': K,
'Average Pressure': self.pressure*self.units.pressure/1.0e5,
'Temperature': self.T*self.units.temperature,
}
if idx == 0:
maxshape = (maxlines, len(data))
with h5py.File(file, 'w') as f:
for key in data:
if key not in f.keys():
f.create_dataset(key, shape=(1, 2),
maxshape=maxshape, dtype='float64')
f[key][:] =
|
np.array((time, data[key]))
|
numpy.array
|
# -*- coding: utf-8 -*-
import numpy as np
import pylab as pb
import math
class NaiveSimulation(object):
"""A base class for an MD simulation"""
# creates an instance of a simulation class
def __init__(self, name, config):
# configuration
np.random.seed() # RNG for velocities/positions
self.name = name
self.config = config
self.integration = config['integration']
self.batchmode = config['batchmode'] # don't show
# environment
self.ndim = int(config['ndim'])
self.L = float(config['L'])
# time
self.steps = 0
self.samplesteps = 0
self.t = 0.0
self.dt = float(config['dt'])
self.dtsq = self.dt ** 2
self.sampleint = float(config['sampleint'])
self.runtime = float(config['runtime'])
self.relaxtime = float(config['relaxtime'])
self.tArray = np.array([self.t])
self.sampleTArray= np.array([])
#objects
self.N = int(config['N'])
self.pos = np.zeros([self.ndim, self.N])
self.vel = np.zeros([self.ndim, self.N])
self.acc = np.zeros([self.ndim, self.N])
self.velArray = np.array([])
self.posArray = np.array([])
# observables
self.iniTemp = float(config['iniTemp'])
self.tempArray = np.array([])
self.tempAcc = 0.0
self.tempsqAcc = 0.0
self.enArray = np.array([])
# randomizes positions as (rand() - 1/2) * L
# may produce overlap for large N!
def randomPos(self):
self.pos = (np.random.random([self.ndim, self.N]) - 0.5) * self.L
# randomizes velocities as (rand() - 1/2) * L
def randomVel(self):
self.vel = (np.random.random([self.ndim, self.N]) - 0.5) * self.L
# T = self.temp()
# self.vel *= math.sqrt( self.iniTemp / T )
# zeros total momentum along each dimension
def zeroMomentum(self):
for dim in range(self.ndim):
self.vel[dim][:] -= self.vel[dim][:].mean()
# assigns initial velocities and positions, ensures zero momentum
def initialize(self):
self.randomPos()
self.randomVel()
self.zeroMomentum()
# calculates sum of kinetic energies of each particle
def kinEn(self):
return 0.5 * (self.vel * self.vel).sum()
# calculates potential energy of the system
def potEn(self):
pass
# calculates ``kinetic'' temperature of the system
def temp(self):
return 2 * self.kinEn() / ( self.ndim * self.N )
# TODO calculate pressure via virial theorem
def pressure(self):
pass
def en(self):
return self.kinEn() + self.potEn()
# calculates forces on each particle
def force(self):
pass
# Velocity Verlet
# v(t + 0.5 dt) = v(t) + 1/2 dt a(t)
# x(t + dt) = x(t) + dt v(t + 0.5 dt)
# a(t + dt) = 1/m F( x(t + dt))
# v(t + dt) = v(t + 0.5 dt) + 1/2 dt a(t + dt)
def velocityVerlet(self):
self.vel += 0.5 * self.dt * self.acc
self.pos += self.dt * self.vel
self.acc = self.force()
self.vel += 0.5 * self.dt * self.acc
# TODO
# Position Verlet
# x(t + 0.5 dt) = x(t) + 1/2 dt v(t)
# v(t + dt) = v(t) + dt a(t + 0.5 dt)
# x(t) = x(t + 0.5 dt) + 1/2 dt v(t + dt)
def positionVerlet(self):
pass
# TODO
def verlet(self):
pass
# TODO
def gearPC(self, iterations=0.0):
if iterations == 0.0: iterations = self.config['iterations']
pass
# integrates EOMs
def integrate(self):
if self.integration == "velocityVerlet":
self.velocityVerlet()
elif self.integration == "positionVerlet":
self.positionVerlet()
elif self.integration == "verlet":
self.verlet()
elif self.integration == "gearPC":
self.gearPC()
# calculates and stores requires quantities
def recordObservables(self):
pass
# evolves the system a specified amount of time
def evolve(self, time):
steps = int(abs(time/self.dt))
for istep in xrange(steps):
self.integrate()
self.t += self.dt
self.tArray = np.append(self.tArray, self.t)
if (istep % self.sampleint == 0):
self.recordObservables()
self.sampleTArray = np.append(self.sampleTArray, self.t)
self.samplesteps += 1
T = self.temp()
self.tempArray = np.append(self.tempArray, T)
self.tempAcc += T
self.tempsqAcc += T**2
self.steps += 1
def reverseTime(self):
self.dt = -self.dt
# STATISTICS
def resetObservales(self):
pass
def resetStatistics(self):
self.steps = 0
self.samplesteps = 0
self.sampleTArray= np.array([])
self.posArray = np.array([])
self.velArray =
|
np.array([])
|
numpy.array
|
"""
Formulas and value units were taken from:
<NAME>., <NAME>., <NAME>., & <NAME>. (2011).
Principles of Computational Modelling in Neuroscience. Cambridge: Cambridge University Press.
DOI:10.1017/CBO9780511975899
Based on the NEURON repository
"""
import random
import numpy as np
import logging as log
import matplotlib.pyplot as plt
from time import time
log.basicConfig(level=log.INFO)
DEBUG = False
EXTRACELLULAR = False
GENERATOR = 'generator'
INTER = 'interneuron'
MOTO = 'motoneuron'
MUSCLE = 'muscle'
neurons_in_ip = 196
dt = 0.025 # [ms] - sim step
sim_time = 50 # [ms] - simulation time
sim_time_steps = int(sim_time / dt) # [steps] converted time into steps
skin_time = 25 # duration of layer 25 = 21 cm/s; 50 = 15 cm/s; 125 = 6 cm/s
cv_fr = 200 # frequency of CV
ees_fr = 100 # frequency of EES
cv_int = 1000 / cv_fr
ees_int = 1000 / ees_fr
EES_stimulus = (np.arange(0, sim_time, ees_int) / dt).astype(int)
CV1_stimulus = (np.arange(skin_time * 0, skin_time * 1, random.gauss(cv_int, cv_int / 10)) / dt).astype(int)
CV2_stimulus = (np.arange(skin_time * 1, skin_time * 2, random.gauss(cv_int, cv_int / 10)) / dt).astype(int)
CV3_stimulus = (np.arange(skin_time * 2, skin_time * 3, random.gauss(cv_int, cv_int / 10)) / dt).astype(int)
CV4_stimulus = (np.arange(skin_time * 3, skin_time * 5, random.gauss(cv_int, cv_int / 10)) / dt).astype(int)
CV5_stimulus = (np.arange(skin_time * 5, skin_time * 6, random.gauss(cv_int, cv_int / 10)) / dt).astype(int)
# common neuron constants
k = 0.017 # synaptic coef
V_th = -40 # [mV] voltage threshold
V_adj = -63 # [mV] adjust voltage for -55 threshold
# moto neuron constants
ca0 = 2 # initial calcium concentration
amA = 0.4 # const ??? todo
amB = 66 # const ??? todo
amC = 5 # const ??? todo
bmA = 0.4 # const ??? todo
bmB = 32 # const ??? todo
bmC = 5 # const ??? todo
R_const = 8.314472 # [k-mole] or [joule/degC] const
F_const = 96485.34 # [faraday] or [kilocoulombs] const
# muscle fiber constants
g_kno = 0.01 # [S/cm2] conductance of the todo
g_kir = 0.03 # [S/cm2] conductance of the Inwardly Rectifying Potassium K+ (Kir) channel
# Boltzman steady state curve
vhalfl = -98.92 # [mV] inactivation half-potential
kl = 10.89 # [mV] Stegen et al. 2012
# tau_infty
vhalft = 67.0828 # [mV] fitted #100 uM sens curr 350a, Stegen et al. 2012
at = 0.00610779 # [/ ms] Stegen et al. 2012
bt = 0.0817741 # [/ ms] Note: typo in Stegen et al. 2012
# temperature dependence
q10 = 1 # temperature scaling (sensitivity)
celsius = 36 # [degC] temperature of the cell
# i_membrane [mA/cm2]
e_extracellular = 0 # [mV]
xraxial = 1e9 # [MOhm/cm]
# todo find the initialization
xg = [0, 1e9, 1e9, 1e9, 0] # [S/cm2]
xc = [0, 0, 0, 0, 0] # [uF/cm2]
def init0(shape, dtype=float):
return np.zeros(shape, dtype=dtype)
nrns_number = 0
nrns_and_segs = 0
generators_id_end = 0
# common neuron's parameters
# also from https://www.cell.com/neuron/pdfExtended/S0896-6273(16)00010-6
class P:
nrn_start_seg = [] #
models = [] # [str] model's names
Cm = [] # [uF / cm2] membrane capacitance
gnabar = [] # [S / cm2] the maximal fast Na+ conductance
gkbar = [] # [S / cm2] the maximal slow K+ conductance
gl = [] # [S / cm2] the maximal leak conductance
Ra = [] # [Ohm cm] axoplasmic resistivity
diam = [] # [um] soma compartment diameter
length = [] # [um] soma compartment length
ena = [] # [mV] Na+ reversal (equilibrium, Nernst) potential
ek = [] # [mV] K+ reversal (equilibrium, Nernst) potential
el = [] # [mV] Leakage reversal (equilibrium) potential
# moto neuron's properties
# https://senselab.med.yale.edu/modeldb/showModel.cshtml?model=189786
# https://journals.physiology.org/doi/pdf/10.1152/jn.2002.88.4.1592
gkrect = [] # [S / cm2] the maximal delayed rectifier K+ conductance
gcaN = [] # [S / cm2] the maximal N-type Ca2+ conductance
gcaL = [] # [S / cm2] the maximal L-type Ca2+ conductance
gcak = [] # [S / cm2] the maximal Ca2+ activated K+ conductance
# synapses' parameters
E_ex = [] # [mV] excitatory reversal (equilibrium) potential
E_inh = [] # [mV] inhibitory reversal (equilibrium) potential
tau_exc = [] # [ms] rise time constant of excitatory synaptic conductance
tau_inh1 = [] # [ms] rise time constant of inhibitory synaptic conductance
tau_inh2 = [] # [ms] decay time constant of inhibitory synaptic conductance
# metadata of synapses
syn_pre_nrn = [] # [id] list of pre neurons ids
syn_post_nrn = [] # [id] list of pre neurons ids
syn_weight = [] # [S] list of synaptic weights
syn_delay = [] # [ms * dt] list of synaptic delays in steps
syn_delay_timer = [] # [ms * dt] list of synaptic timers, shows how much left to send signal
# arrays for saving data
spikes = [] # saved spikes
GRAS_data = [] # saved gras data (DEBUGGING)
save_groups = [] # neurons groups that need to save
saved_voltage = [] # saved voltage
save_neuron_ids = [] # neurons id that need to save
def form_group(name, number=50, model=INTER, segs=1):
"""
"""
global nrns_number, nrns_and_segs
ids = list(range(nrns_number, nrns_number + number))
#
__Cm = None
__gnabar = None
__gkbar = None
__gl = None
__Ra = None
__ena = None
__ek = None
__el = None
__diam = None
__dx = None
__gkrect = None
__gcaN = None
__gcaL = None
__gcak = None
__e_ex = None
__e_inh = None
__tau_exc = None
__tau_inh1 = None
__tau_inh2 = None
# without random at first stage of debugging
for _ in ids:
if model == INTER:
__Cm = random.gauss(1, 0.01)
__gnabar = 0.1
__gkbar = 0.08
__gl = 0.002
__Ra = 100
__ena = 50
__ek = -90
__el = -70
__diam = 10 # random.randint(5, 15)
__dx = __diam
__e_ex = 50
__e_inh = -80
__tau_exc = 0.35
__tau_inh1 = 0.5
__tau_inh2 = 3.5
elif model == MOTO:
__Cm = 2
__gnabar = 0.05
__gl = 0.002
__Ra = 200
__ena = 50
__ek = -80
__el = -70
__diam = random.randint(45, 55)
__dx = __diam
__gkrect = 0.3
__gcaN = 0.05
__gcaL = 0.0001
__gcak = 0.3
__e_ex = 50
__e_inh = -80
__tau_exc = 0.3
__tau_inh1 = 1
__tau_inh2 = 1.5
if __diam > 50:
__gnabar = 0.1
__gcaL = 0.001
__gl = 0.003
__gkrect = 0.2
__gcak = 0.2
elif model == MUSCLE:
__Cm = 3.6
__gnabar = 0.15
__gkbar = 0.03
__gl = 0.0002
__Ra = 1.1
__ena = 55
__ek = -80
__el = -72
__diam = 40
__dx = 3000
__e_ex = 0
__e_inh = -80
__tau_exc = 0.3
__tau_inh1 = 1
__tau_inh2 = 1
elif model == GENERATOR:
pass
else:
raise Exception("Choose the model")
# common properties
P.Cm.append(__Cm)
P.gnabar.append(__gnabar)
P.gkbar.append(__gkbar)
P.gl.append(__gl)
P.el.append(__el)
P.ena.append(__ena)
P.ek.append(__ek)
P.Ra.append(__Ra)
P.diam.append(__diam)
P.length.append(__dx)
P.gkrect.append(__gkrect)
P.gcaN.append(__gcaN)
P.gcaL.append(__gcaL)
P.gcak.append(__gcak)
P.E_ex.append(__e_ex)
P.E_inh.append(__e_inh)
P.tau_exc.append(__tau_exc)
P.tau_inh1.append(__tau_inh1)
P.tau_inh2.append(__tau_inh2)
P.nrn_start_seg.append(nrns_and_segs)
nrns_and_segs += (segs + 2)
P.models += [model] * number
nrns_number += number
return name, ids
def conn_a2a(pre_nrns, post_nrns, delay, weight):
"""
"""
pre_nrns_ids = pre_nrns[1]
post_nrns_ids = post_nrns[1]
for pre in pre_nrns_ids:
for post in post_nrns_ids:
# weight = random.gauss(weight, weight / 5)
# delay = random.gauss(delay, delay / 5)
syn_pre_nrn.append(pre)
syn_post_nrn.append(post)
syn_weight.append(weight)
syn_delay.append(int(delay / dt))
syn_delay_timer.append(-1)
def conn_fixed_outdegree(pre_group, post_group, delay, weight, indegree=50):
"""
"""
pre_nrns_ids = pre_group[1]
post_nrns_ids = post_group[1]
nsyn = random.randint(indegree - 15, indegree)
for post in post_nrns_ids:
for _ in range(nsyn):
pre = random.choice(pre_nrns_ids)
# weight = random.gauss(weight, weight / 5)
# delay = random.gauss(delay, delay / 5)
syn_pre_nrn.append(pre)
syn_post_nrn.append(post)
syn_weight.append(weight)
syn_delay.append(int(delay / dt))
syn_delay_timer.append(-1)
def save(nrn_groups):
global save_neuron_ids, save_groups
save_groups = nrn_groups
for group in nrn_groups:
for nrn in group[1]:
center = P.nrn_start_seg[nrn] + (2 if P.models[nrn] == MUSCLE else 1)
save_neuron_ids.append(center)
if DEBUG:
gen = form_group(1, model=GENERATOR)
m1 = form_group(1, model=MUSCLE, segs=3)
conn_a2a(gen, m1, delay=1, weight=40.5)
groups = [m1]
save(groups)
# m1 = create(1, model='moto')
# connect(gen, m1, delay=1, weight=5.5, conn_type='all-to-all')
'''
gen = create(1, model='generator', segs=1)
OM1 = create(50, model='inter', segs=1)
OM2 = create(50, model='inter', segs=1)
OM3 = create(50, model='inter', segs=1)
moto = create(50, model='moto', segs=1)
muscle = create(1, model='muscle', segs=3)
conn_a2a(gen, OM1, delay=1, weight=1.5)
conn_fixed_outdegree(OM1, OM2, delay=2, weight=1.85)
conn_fixed_outdegree(OM2, OM1, delay=3, weight=1.85)
conn_fixed_outdegree(OM2, OM3, delay=3, weight=0.00055)
conn_fixed_outdegree(OM1, OM3, delay=3, weight=0.00005)
conn_fixed_outdegree(OM3, OM2, delay=1, weight=-4.5)
conn_fixed_outdegree(OM3, OM1, delay=1, weight=-4.5)
conn_fixed_outdegree(OM2, moto, delay=2, weight=1.5)
conn_fixed_outdegree(moto, muscle, delay=2, weight=15.5)
'''
else:
gen = form_group("gen", 1, model=GENERATOR, segs=1)
OM1 = form_group("OM1", 50, model=INTER, segs=1)
OM2 = form_group("OM2", 50, model=INTER, segs=1)
OM3 = form_group("OM3", 50, model=INTER, segs=1)
moto = form_group("moto", 50, model=MOTO, segs=1)
muscle = form_group("muscle", 1, model=MUSCLE, segs=3)
conn_a2a(gen, OM1, delay=1, weight=1.5)
conn_fixed_outdegree(OM1, OM2, delay=2, weight=1.85)
conn_fixed_outdegree(OM2, OM1, delay=3, weight=1.85)
conn_fixed_outdegree(OM2, OM3, delay=3, weight=0.00055)
conn_fixed_outdegree(OM1, OM3, delay=3, weight=0.00005)
conn_fixed_outdegree(OM3, OM2, delay=1, weight=-4.5)
conn_fixed_outdegree(OM3, OM1, delay=1, weight=-4.5)
conn_fixed_outdegree(OM2, moto, delay=2, weight=1.5)
conn_fixed_outdegree(moto, muscle, delay=2, weight=15.5)
groups = [OM1, OM2, OM3, moto, muscle]
save(groups)
P.nrn_start_seg.append(nrns_and_segs)
#
# EES = form_group("EES", 1, model=GENERATOR)
# E1 = form_group("E1", 1, model=GENERATOR)
# E2 = form_group("E2", 1, model=GENERATOR)
# E3 = form_group("E3", 1, model=GENERATOR)
# E4 = form_group("E4", 1, model=GENERATOR)
# E5 = form_group("E5", 1, model=GENERATOR)
# #
# CV1 = form_group("CV1", 1, model=GENERATOR)
# CV2 = form_group("CV2", 1, model=GENERATOR)
# CV3 = form_group("CV3", 1, model=GENERATOR)
# CV4 = form_group("CV4", 1, model=GENERATOR)
# CV5 = form_group("CV5", 1, model=GENERATOR)
#
# C_0 = form_group("C_0")
# C_1 = form_group("C_1")
# V0v = form_group("V0v")
# OM1_0E = form_group("OM1_0E")
# OM1_0F = form_group("OM1_0F")
# #
# OM1_0 = form_group("OM1_0")
# OM1_1 = form_group("OM1_1")
# OM1_2_E = form_group("OM1_2_E")
# OM1_2_F = form_group("OM1_2_F")
# OM1_3 = form_group("OM1_3")
# '''
# #
# OM2_0 = form_group("OM2_0")
# OM2_1 = form_group("OM2_1")
# OM2_2_E = form_group("OM2_2_E")
# OM2_2_F = form_group("OM2_2_F")
# OM2_3 = form_group("OM2_3")
# #
# OM3_0 = form_group("OM3_0")
# OM3_1 = form_group("OM3_1")
# OM3_2_E = form_group("OM3_2_E")
# OM3_2_F = form_group("OM3_2_F")
# OM3_3 = form_group("OM3_3")
# #
# OM4_0 = form_group("OM4_0")
# OM4_1 = form_group("OM4_1")
# OM4_2_E = form_group("OM4_2_E")
# OM4_2_F = form_group("OM4_2_F")
# OM4_3 = form_group("OM4_3")
# #
# OM5_0 = form_group("OM5_0")
# OM5_1 = form_group("OM5_1")
# OM5_2_E = form_group("OM5_2_E")
# OM5_2_F = form_group("OM5_2_F")
# OM5_3 = form_group("OM5_3")
# #
# '''
# '''
# Ia_E = form_group("Ia_E", neurons_in_ip)
# iIP_E = form_group("iIP_E", neurons_in_ip)
# R_E = form_group("R_E")
#
# Ia_F = form_group("Ia_F", neurons_in_ip)
# iIP_F = form_group("iIP_F", neurons_in_ip)
# R_F = form_group("R_F")
#
# MN_E = form_group("MN_E", 210, model=MOTO)
# MN_F = form_group("MN_F", 180, model=MOTO)
# sens_aff = form_group("sens_aff", 120)
# Ia_aff_E = form_group("Ia_aff_E", 120)
# Ia_aff_F = form_group("Ia_aff_F", 120)
# eIP_E_1 = form_group("eIP_E_1", 40)
# eIP_E_2 = form_group("eIP_E_2", 40)
# eIP_E_3 = form_group("eIP_E_3", 40)
# eIP_E_4 = form_group("eIP_E_4", 40)
# eIP_E_5 = form_group("eIP_E_5", 40)
# eIP_F = form_group("eIP_F", neurons_in_ip)
# # muscle_E = form_group("muscle_E", 150 * 210, model=MUSCLE)
# # muscle_F = form_group("muscle_F", 100 * 180, model=MUSCLE)
# '''
# conn_fixed_outdegree(EES, CV1, delay=1, weight=15)
# conn_fixed_outdegree(EES, OM1_0, delay=2, weight=0.00075 * k * skin_time)
# conn_fixed_outdegree(CV1, OM1_0, delay=2, weight=0.00048)
# # conn_fixed_outdegree(CV1, CV2, delay=1, weight=15)
#
# # OM1
# conn_fixed_outdegree(OM1_0, OM1_1, delay=3, weight=2.95)
# conn_fixed_outdegree(OM1_1, OM1_2_E, delay=3, weight=2.85)
# conn_fixed_outdegree(OM1_2_E, OM1_1, delay=3, weight=1.95)
# conn_fixed_outdegree(OM1_2_E, OM1_3, delay=3, weight=0.0007)
# # conn_fixed_outdegree(OM1_2_F, OM2_2_F, delay=1.5, weight=2)
# conn_fixed_outdegree(OM1_1, OM1_3, delay=3, weight=0.00005)
# conn_fixed_outdegree(OM1_3, OM1_2_E, delay=3, weight=-4.5)
# conn_fixed_outdegree(OM1_3, OM1_1, delay=3, weight=-4.5)
#
# groups = [OM1_0, OM1_1, OM1_2_E, OM1_3]
# save(groups)
'''
# OM2
conn_fixed_outdegree(OM2_0, OM2_1, delay=3, weight=2.95)
conn_fixed_outdegree(OM2_1, OM2_2_E, delay=3, weight=2.85)
conn_fixed_outdegree(OM2_2_E, OM2_1, delay=3, weight=1.95)
conn_fixed_outdegree(OM2_2_E, OM2_3, delay=3, weight=0.0007)
conn_fixed_outdegree(OM2_2_F, OM3_2_F, delay=1.5, weight=2)
conn_fixed_outdegree(OM2_1, OM2_3, delay=3, weight=0.00005)
conn_fixed_outdegree(OM2_3, OM2_2_E, delay=3, weight=-4.5)
conn_fixed_outdegree(OM2_3, OM2_1, delay=3, weight=-4.5)
# OM3
conn_fixed_outdegree(OM3_0, OM3_1, delay=3, weight=2.95)
conn_fixed_outdegree(OM3_1, OM3_2_E, delay=3, weight=2.85)
conn_fixed_outdegree(OM3_2_E, OM3_1, delay=3, weight=1.95)
conn_fixed_outdegree(OM3_2_E, OM3_3, delay=3, weight=0.0007)
conn_fixed_outdegree(OM3_2_F, OM4_2_F, delay=1.5, weight=2)
conn_fixed_outdegree(OM3_1, OM3_3, delay=3, weight=0.00005)
conn_fixed_outdegree(OM3_3, OM3_2_E, delay=3, weight=-4.5)
conn_fixed_outdegree(OM3_3, OM3_1, delay=3, weight=-4.5)
# OM4
conn_fixed_outdegree(OM4_0, OM4_1, delay=3, weight=2.95)
conn_fixed_outdegree(OM4_1, OM4_2_E, delay=3, weight=2.85)
conn_fixed_outdegree(OM4_2_E, OM4_1, delay=3, weight=1.95)
conn_fixed_outdegree(OM4_2_E, OM4_3, delay=3, weight=0.0007)
conn_fixed_outdegree(OM4_2_F, OM5_2_F, delay=1.5, weight=2)
conn_fixed_outdegree(OM4_1, OM4_3, delay=3, weight=0.00005)
conn_fixed_outdegree(OM4_3, OM4_2_E, delay=3, weight=-4.5)
conn_fixed_outdegree(OM4_3, OM4_1, delay=3, weight=-4.5)
# OM5
conn_fixed_outdegree(OM5_0, OM5_1, delay=3, weight=2.95)
conn_fixed_outdegree(OM5_1, OM5_2_E, delay=3, weight=2.85)
conn_fixed_outdegree(OM5_2_E, OM5_1, delay=3, weight=1.95)
conn_fixed_outdegree(OM5_2_E, OM5_3, delay=3, weight=0.0007)
conn_fixed_outdegree(OM5_1, OM5_3, delay=3, weight=0.00005)
conn_fixed_outdegree(OM5_3, OM5_2_E, delay=3, weight=-4.5)
conn_fixed_outdegree(OM5_3, OM5_1, delay=3, weight=-4.5)
'''
# ids of neurons
nrns = list(range(nrns_number))
class S:
Vm = init0(nrns_and_segs) # [mV] array for three compartments volatge
n = init0(nrns_and_segs) # [0..1] compartments channel, providing the kinetic pattern of the L conductance
m = init0(nrns_and_segs) # [0..1] compartments channel, providing the kinetic pattern of the Na conductance
h = init0(nrns_and_segs) # [0..1] compartments channel, providing the kinetic pattern of the Na conductance
l = init0(nrns_and_segs) # [0..1] inward rectifier potassium (Kir) channel
s = init0(nrns_and_segs) # [0..1] nodal slow potassium channel
p = init0(nrns_and_segs) # [0..1] compartments channel, providing the kinetic pattern of the ?? conductance
hc = init0(nrns_and_segs) # [0..1] compartments channel, providing the kinetic pattern of the ?? conductance
mc = init0(nrns_and_segs) # [0..1] compartments channel, providing the kinetic pattern of the ?? conductance
cai = init0(nrns_and_segs) #
I_Ca = init0(nrns_and_segs) # [nA] Ca ionic currents
NODE_A = init0(nrns_and_segs) # the effect of this node on the parent node's equation
NODE_B = init0(nrns_and_segs) # the effect of the parent node on this node's equation
NODE_D = init0(nrns_and_segs) # diagonal element in node equation
const_NODE_D = init0(nrns_and_segs) # const diagonal element in node equation (performance)
NODE_RHS = init0(nrns_and_segs) # right hand side in node equation
NODE_RINV = init0(nrns_and_segs) # conductance uS from node to parent
NODE_AREA = init0(nrns_and_segs) # area of a node in um^2
class U:
has_spike = init0(nrns_number, dtype=bool) # spike flag for each neuron
spike_on = init0(nrns_number, dtype=bool) # special flag to prevent fake spike detecting
# synapses
g_exc = init0(nrns_number) # [S] excitatory conductivity level
g_inh_A = init0(nrns_number) # [S] inhibitory conductivity level
g_inh_B = init0(nrns_number) # [S] inhibitory conductivity level
factor = init0(nrns_number) # [const] todo
# extracellular
nlayer = 2
ext_shape = (nrns_and_segs, nlayer)
ext_rhs = init0(ext_shape) # extracellular right hand side in node equation
ext_v = init0(ext_shape) # extracellular membrane potential
ext_a = init0(ext_shape) # extracellular effect of node in parent equation
ext_b = init0(ext_shape) # extracellular effect of parent in node equation
ext_d = init0(ext_shape) # extracellular diagonal element in node equation
def get_neuron_data():
"""
please note, that this file should contain only specified debugging output
"""
with open("/home/alex/NRNTEST/muscle/output") as file:
neuron_data = []
while 1:
line = file.readline()
if not line:
break
if 'SYNAPTIC time' in line:
line = file.readline()
line = line[line.index('i:')+2:line.index(', e')]
neuron_data.append([line])
if 'BREAKPOINT currents' in line:
# il, ina, ik, m, h, n, v
file.readline()
line = file.readline()
line = line.replace('BREAKPOINT currents ', '').strip().split("\t")[3:-1] # without Vm
[file.readline() for _ in range(3)]
neuron_data[-1] += line
if 'A B D INV Vm RHS' in line:
file.readline()
file.readline()
line = file.readline()
line = line.strip().split("\t")
neuron_data[-1] += line
# neuron_data.append(line)
neuron_data = neuron_data[1:-1]
neuron_data.insert(0, neuron_data[0])
neuron_data.insert(0, neuron_data[0])
neuron_data = np.array(neuron_data).astype(np.float)
return neuron_data
def save_data():
"""
for debugging with NEURON
"""
global GRAS_data
for nrn_seg in save_neuron_ids:
# syn il, ina, ik, m, h, n, l, s, v
isyn = 0 # S.g_exc[nrn_seg] * (S.Vm[nrn_seg] - P.E_ex[nrn_seg])
GRAS_data.append([S.NODE_A[nrn_seg], S.NODE_B[nrn_seg], S.NODE_D[nrn_seg], S.NODE_RINV[nrn_seg], S.Vm[nrn_seg],
S.NODE_RHS[nrn_seg], ext_v[nrn_seg, 0], isyn, S.m[nrn_seg], S.h[nrn_seg], S.n[nrn_seg],
S.l[nrn_seg], S.s[nrn_seg]])
def Exp(volt):
return 0 if volt < -100 else np.exp(volt)
def alpham(volt):
"""
"""
if abs((volt + amB) / amC) < 1e-6:
return amA * amC
return amA * (volt + amB) / (1 - Exp(-(volt + amB) / amC))
def betam(volt):
"""
"""
if abs((volt + bmB) / bmC) < 1e-6:
return -bmA * bmC
return -bmA * (volt + bmB) / (1 - Exp((volt + bmB) / bmC))
def syn_current(nrn, voltage):
"""
calculate synaptic current
"""
return U.g_exc[nrn] * (voltage - P.E_ex[nrn]) + (U.g_inh_B[nrn] - U.g_inh_A[nrn]) * (voltage - P.E_inh[nrn])
def nrn_moto_current(nrn, nrn_seg_index, voltage):
"""
calculate channels current
"""
iNa = P.gnabar[nrn] * S.m[nrn_seg_index] ** 3 * S.h[nrn_seg_index] * (voltage - P.ena[nrn])
iK = P.gkrect[nrn] * S.n[nrn_seg_index] ** 4 * (voltage - P.ek[nrn]) + \
P.gcak[nrn] * S.cai[nrn_seg_index] ** 2 / (S.cai[nrn_seg_index] ** 2 + 0.014 ** 2) * (voltage - P.ek[nrn])
iL = P.gl[nrn] * (voltage - P.el[nrn])
eCa = (1000 * R_const * 309.15 / (2 * F_const)) * np.log(ca0 / S.cai[nrn_seg_index])
S.I_Ca[nrn_seg_index] = P.gcaN[nrn] * S.mc[nrn_seg_index] ** 2 * S.hc[nrn_seg_index] * (voltage - eCa) + \
P.gcaL[nrn] * S.p[nrn_seg_index] * (voltage - eCa)
return iNa + iK + iL + S.I_Ca[nrn_seg_index]
def nrn_fastchannel_current(nrn, nrn_seg_index, voltage):
"""
calculate channels current
"""
iNa = P.gnabar[nrn] * S.m[nrn_seg_index] ** 3 * S.h[nrn_seg_index] * (voltage - P.ena[nrn])
iK = P.gkbar[nrn] * S.n[nrn_seg_index] ** 4 * (voltage - P.ek[nrn])
iL = P.gl[nrn] * (voltage - P.el[nrn])
return iNa + iK + iL
def recalc_synaptic(nrn):
"""
updating conductance (summed) of neurons' post-synaptic conenctions
"""
# exc synaptic conductance
if U.g_exc[nrn] != 0:
U.g_exc[nrn] -= (1 - np.exp(-dt / P.tau_exc[nrn])) * U.g_exc[nrn]
if U.g_exc[nrn] < 1e-5:
U.g_exc[nrn] = 0
# inh1 synaptic conductance
if U.g_inh_A[nrn] != 0:
U.g_inh_A[nrn] -= (1 - np.exp(-dt / P.tau_inh1[nrn])) * U.g_inh_A[nrn]
if U.g_inh_A[nrn] < 1e-5:
U.g_inh_A[nrn] = 0
# inh2 synaptic conductance
if U.g_inh_B[nrn] != 0:
U.g_inh_B[nrn] -= (1 - np.exp(-dt / P.tau_inh2[nrn])) * U.g_inh_B[nrn]
if U.g_inh_B[nrn] < 1e-5:
U.g_inh_B[nrn] = 0
def syn_initial(nrn):
"""
initialize tau (rise/decay time, ms) and factor (const) variables
"""
if P.tau_inh1[nrn] / P.tau_inh2[nrn] > 0.9999:
P.tau_inh1[nrn] = 0.9999 * P.tau_inh2[nrn]
if P.tau_inh1[nrn] / P.tau_inh2[nrn] < 1e-9:
P.tau_inh1[nrn] = P.tau_inh2[nrn] * 1e-9
#
tp = (P.tau_inh1[nrn] * P.tau_inh2[nrn]) / (P.tau_inh2[nrn] - P.tau_inh1[nrn]) * np.log(P.tau_inh2[nrn] / P.tau_inh1[nrn])
U.factor[nrn] = -
|
np.exp(-tp / P.tau_inh1[nrn])
|
numpy.exp
|
import numpy as np
import bayesiancoresets as bc
import os, sys
from scipy.stats import multivariate_normal
#make it so we can import models/etc from parent folder
sys.path.insert(1, os.path.join(sys.path[0], '../common'))
import model_linreg
import time
nm = sys.argv[1]
tr = sys.argv[2]
#experiment params
M = 300
opt_itrs = 100
proj_dim = 100
pihat_noise =0.75
n_bases_per_scale = 50
N_subsample = 1000
#load data and compute true posterior
#each row of x is [lat, lon, price]
print('Loading data')
#trial num as seed for loading data
np.random.seed(int(tr))
x = np.load('../data/prices2018.npy')
print('Taking a random subsample')
#get a random subsample of it
idcs = np.arange(x.shape[0])
np.random.shuffle(idcs)
x = x[idcs[:N_subsample], :]
#log transform
x[:, 2] =
|
np.log10(x[:, 2])
|
numpy.log10
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import os
from datetime import datetime
from packaging.version import Version
import pytest
import numpy as np
from numpy.testing import (
assert_allclose, assert_array_almost_equal, assert_array_almost_equal_nulp,
assert_array_equal)
from astropy import wcs
from astropy.wcs import _wcs # noqa
from astropy.utils.data import (
get_pkg_data_filenames, get_pkg_data_contents, get_pkg_data_filename)
from astropy.utils.misc import NumpyRNGContext
from astropy.utils.exceptions import (
AstropyUserWarning, AstropyWarning, AstropyDeprecationWarning)
from astropy.io import fits
from astropy.coordinates import SkyCoord
_WCSLIB_VER = Version(_wcs.__version__)
# NOTE: User can choose to use system wcslib instead of bundled.
def _check_v71_dateref_warnings(w, nmax=None):
if _WCSLIB_VER >= Version('7.1') and _WCSLIB_VER < Version('7.3') and w:
if nmax is None:
assert w
else:
assert len(w) == nmax
for item in w:
if (issubclass(item.category, wcs.FITSFixedWarning) and
str(item.message) == "'datfix' made the change "
"'Set DATE-REF to '1858-11-17' from MJD-REF'."):
break
else:
assert False, "No 'datfix' warning found"
class TestMaps:
def setup(self):
# get the list of the hdr files that we want to test
self._file_list = list(get_pkg_data_filenames(
"data/maps", pattern="*.hdr"))
def test_consistency(self):
# Check to see that we actually have the list we expect, so that we
# do not get in a situation where the list is empty or incomplete and
# the tests still seem to pass correctly.
# how many do we expect to see?
n_data_files = 28
assert len(self._file_list) == n_data_files, (
"test_spectra has wrong number data files: found {}, expected "
" {}".format(len(self._file_list), n_data_files))
def test_maps(self):
for filename in self._file_list:
# use the base name of the file, so we get more useful messages
# for failing tests.
filename = os.path.basename(filename)
# Now find the associated file in the installed wcs test directory.
header = get_pkg_data_contents(
os.path.join("data", "maps", filename), encoding='binary')
# finally run the test.
wcsobj = wcs.WCS(header)
world = wcsobj.wcs_pix2world([[97, 97]], 1)
|
assert_array_almost_equal(world, [[285.0, -66.25]], decimal=1)
|
numpy.testing.assert_array_almost_equal
|
# -*- coding: utf-8 -*-
"""
Created on Saturday September 1 1:50:00 2012
###############################################################################
#
# autoPACK Authors: <NAME>, <NAME>, <NAME>, <NAME>
# Based on COFFEE Script developed by <NAME> between 2005 and 2010
# with assistance from <NAME> in 2009 and periodic input
# from <NAME>'s Molecular Graphics Lab
#
# AFGui.py Authors: <NAME> with minor editing/enhancement from <NAME>
#
# Copyright: <NAME> ©2010
#
# This file "fillBoxPseudoCode.py" is part of autoPACK, cellPACK, and AutoFill.
#
# autoPACK is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# autoPACK is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with autoPACK (See "CopyingGNUGPL" in the installation.
# If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
Name: -
@author: <NAME> and <NAME> with <NAME>
"""
SMALL_NUM = 0.00000001 #anything that avoids division overflow
from math import fabs
import numpy
import AutoFill
helper = AutoFill.helper
## intersect_RayTriangle(): intersect a ray with a 3D triangle
## Input: a ray R, and a triangle T
## Returns -1, None = triangle is degenerate (a segment or point)
## 0, None = disjoint (no intersect)
## 1, I = intersect in unique point I
## 2, None = are in the same plane
def intersect_RayTrianglePy( ray, Triangle):
point1 = ray[0]
point2 = ray[1]
# get triangle edge vectors and plane normal
t0 = Triangle[0]
t1 = Triangle[1]
t2 = Triangle[2]
u = [ t1[0]-t0[0], t1[1]-t0[1], t1[2]-t0[2] ]
v = [ t2[0]-t0[0], t2[1]-t0[1], t2[2]-t0[2] ]
# cross product
n = ( u[1]*v[2]-u[2]*v[1], u[2]*v[0]-u[0]*v[2], u[0]*v[1]-u[1]*v[0])
if n[0]*n[0]+n[1]*n[1]+n[2]*n[2]<SMALL_NUM: # triangle is degenerate
return -1,None # do not deal with this case
# ray direction vector
dir = ( point2[0]-point1[0], point2[1]-point1[1], point2[2]-point1[2])
w0 = ( point1[0]-t0[0], point1[1]-t0[1], point1[2]-t0[2] )
a = -n[0]*w0[0] - n[1]*w0[1] - n[2]*w0[2]
b = n[0]*dir[0] + n[1]*dir[1] + n[2]*dir[2]
if fabs(b) < SMALL_NUM: # ray is parallel to triangle plane
if a == 0: # ray lies in triangle plane
return 2,None
else:
return 0 ,None # ray disjoint from plane
# get intersect point of ray with triangle plane
r = a / b
if r < 0.0: # ray goes away from triangle => no intersect
return 0,None
#if r > 1.0: # segment too short => no intersect
# return 0,None
# intersect point of ray and plane
I = (point1[0] + r*dir[0], point1[1] + r*dir[1], point1[2] + r*dir[2] )
# is I inside Triangle?
uu = u[0]*u[0]+u[1]*u[1]+u[2]*u[2]
uv = u[0]*v[0]+u[1]*v[1]+u[2]*v[2]
vv = v[0]*v[0]+v[1]*v[1]+v[2]*v[2]
w = ( I[0] - t0[0], I[1] - t0[1], I[2] - t0[2] )
wu = w[0]*u[0]+w[1]*u[1]+w[2]*u[2]
wv = w[0]*v[0]+w[1]*v[1]+w[2]*v[2]
D = uv * uv - uu * vv
# get and test parametric coords
s = (uv * wv - vv * wu) / D
if s < 0.0 or s > 1.0: # I is outside Triangle
return 0,None
t = (uv * wu - uu * wv) / D
if t < 0.0 or (s + t) > 1.0: # I is outside Triangle
return 0,None
return 1, I # I is in Triangle
try:
from geomutils.geomalgorithms import intersect_RayTriangle
except ImportError:
print ("shapefit.intersect_RayTriangle.py: defaulting to python implementation")
intersect_RayTriangle = intersect_RayTrianglePy
intersect_RayTriangle = intersect_RayTrianglePy
def intersectRayPolyhedron( pol, pt1, pt2, returnAll=False):
# compute intersection points between a polyhedron defined by a list
# of triangles (p0, p1, p2) and a ray starting at pt1 and going through pt2
inter = []
interi = []
ray = [list(pt1), list(pt2)]
for ti, t in enumerate(pol):
status, interPt = intersect_RayTriangle(ray, t)
if status==1:
inter.append(interPt)
interi.append(ti)
if returnAll:
return interi, inter
if len(inter)>0: # find closest to pt1
mini=9999999999.0
for i, p in enumerate(inter):
v = ( p[0]-pt1[0], p[1]-pt1[1], p[2]-pt1[2] )
d = v[0]*v[0]+v[1]*v[1]+v[2]*v[2]
if d < mini:
mini = d
interPt = inter[i]
tind = interi[i]
return tind, interPt
else:
return None, None
def IndexedPolgonsToTriPoints(geom):
verts = geom.getVertices()
tri = geom.getFaces()
assert tri.shape[1]==3
triv = []
for t in tri:
triv.append( [verts[i].tolist() for i in t] )
return triv
def vlen(vector):
a,b,c = (vector[0],vector[1],vector[2])
return (math.sqrt( a*a + b*b + c*c))
def f_ray_intersect_polygon(pRayStartPos, pRayEndPos, pQuadranglePointPositions, pQuadranglePointList, pTruncateToSegment):
#// This function returns TRUE if a ray intersects a triangle.
#//It also calculates and returns the UV coordinates of said colision as part of the intersection test,
vLineSlope = pRayEndPos - pRayStartPos; # This line segment defines an infinite line to test for intersection
vTriPolys = pQuadranglePointList;
vBackface = false;
vHitCount = 0;
vTriPoints = pQuadranglePointPositions;
vEpsilon = 0.00001;
vBreakj = false;
vCollidePos = None
j = 0;
vQuadrangle = 1; # Default says polygon is a quadrangle.
vLoopLimit = 2; # Default k will loop through polygon assuming its a quad.
if (vTriPolys[j+3] == vTriPolys[j+2]): # Test to see if quad is actually just a triangle.
vQuadrangle = 0; # Current polygon is not a quad, its a triangle.
vLoopLimit = 1; #// Set k loop to only cycle one time.
for k in range(vLoopLimit):# (k = 0; k<vLoopLimit; k++)
vTriPt0 = vTriPoints[vTriPolys[j+0]]; #// Always get the first point of a quad/tri
vTriPt1 = vTriPoints[vTriPolys[j+1+k]]; # // Get point 1 for a tri and a quad's first pass, but skip for a quad's second pass
vTriPt2 = vTriPoints[vTriPolys[j+2+k]]; #// Get pontt 2 for a tri and a quad's first pass, but get point 3 only for a quad on its second pass.
vE1 = vTriPt1 - vTriPt0; #// Get the first edge as a vector.
vE2 = vTriPt2 - vTriPt0; #// Get the second edge.
h = numpy.cross(vLineSlope, vE2);
a = numpy.dot(vE1,h); #// Get the projection of h onto vE1.
if (a > -0.00001) and (a < 0.00001) :#// If the ray is parallel to the plane then it does not intersect it, i.e, a = 0 +/- given rounding slop.
continue;
#// If the polygon is a quadrangle, test the other triangle that comprises it.
F = 1.0/a;
s = pRayStartPos - vTriPt0; #// Get the vector from the origin of the triangle to the ray's origin.
u = F * ( f_dot_product(s,h) );
if (u < 0.0 ) or (u > 1.0) : continue;
#/* Break if its outside of the triangle, but try the other triangle if in a quad.
#U is described as u = : start of vE1 = 0.0, to the end of vE1 = 1.0 as a percentage.
#If the value of the U coordinate is outside the range of values inside the triangle,
#then the ray has intersected the plane outside the triangle.*/
q = numpy.cross(s, vE1);
v = F * numpy.dot(vLineSlope,q);
if (v <0.0) or (u+v > 1.0) : continue;
#/* Breai if outside of the triangles v range.
#If the value of the V coordinate is outside the range of values inside the triangle,
#then the ray has intersected the plane outside the triangle.
#U + V cannot exceed 1.0 or the point is not in the triangle.
#If you imagine the triangle as half a square this makes sense. U=1 V=1 would be in the
#lower left hand corner which would be in the second triangle making up the square.*/
vCollidePos = vTriPt0 + u*vE1 + v*vE2; #// This is the global collision position.
#// The ray is hitting a triangle, now test to see if its a triangle hit by the ray.
vBackface = false;
if (numpy.dot(vLineSlope, vCollidePos - pRayStartPos) > 0) : #// This truncates our infinite line to a ray pointing from start THROUGH end positions.
vHitCount+=1;
if (pTruncateToSegment ) and (vlen(vLineSlope) < vlen(vCollidePos - pRayStartPos)) :
break; #// This truncates our ray to a line segment from start to end positions.
if (a<0.00001) :
vBackface = true;# Test to see if the triangle hit is a backface.
return vBackface;
def f_ray_intersect_polyhedron(pRayStartPos, pRayEndPos, pPolyhedron, pTruncateToSegment,point=0):
#//This function returns TRUE if a ray intersects a triangle.
#//It also calculates and returns the UV coordinates of said colision as part of the intersection test,
vLineSlope = (pRayEndPos - pRayStartPos)*2.0; #// This line segment defines an infinite line to test for intersection
#var vPolyhedronPos = GetGlobalPosition(pPolyhedron);
vTriPolys,vTriPoints,vnormals = helper.DecomposeMesh(pPolyhedron,
edit=False,copy=False,tri=True,transform=True)
#var vTriPoints = pPolyhedron->GetPoints();
#var vTriPolys = pPolyhedron->GetPolygons();
vTriPolys = numpy.array(vTriPolys)
vTriPoints = numpy.array(vTriPoints)
vBackface = None
vHitCount = 0;
#
#var i;
#for (i=0; i< sizeof(vTriPoints); i++) // Lets globalize the polyhedron.
#{
#vTriPoints[i] = vTriPoints[i] + vPolyhedronPos;
#}
#
vEpsilon = 0.00001;
vBreakj = False;
vCollidePos = None
#var j;
# helper.resetProgressBar()
# helper.progressBar(label="checking point %d" % point)
for j in range(len(vTriPolys)):#(j=0; j<sizeof(vTriPolys); j+=4) // Walk through each polygon in a polyhedron
#{
#// Loop through all the polygons in an input polyhedron
vQuadrangle = 1; #// Default says polygon is a quadrangle.triangle
vLoopLimit = 2; #// Default k will loop through polygon assuming its a quad.triangle
# if (vTriPolys[j+3] == vTriPolys[j+2]) #// Test to see if quad is actually just a triangle.
#{
vQuadrangle = 0; #// Current polygon is not a quad, its a triangle.
vLoopLimit = 1; #// Set k loop to only cycle one time.
#}
#
#var k;
# p=(j/float(len(vTriPolys)))*100.0
# helper.progressBar(progress=int(p),label=str(j))
for k in range(vLoopLimit):# (k = 0; k<vLoopLimit; k++)
vTriPt0 = vTriPoints[vTriPolys[j][0]]; #// Always get the first point of a quad/tri
vTriPt1 = vTriPoints[vTriPolys[j][1+k]]; # // Get point 1 for a tri and a quad's first pass, but skip for a quad's second pass
vTriPt2 = vTriPoints[vTriPolys[j][2+k]]; #// Get point 2 for a tri and a quad's first pass, but get point 3 only for a quad on its second pass.
#
vE1 = vTriPt1 - vTriPt0; #// Get the first edge as a vector.
vE2 = vTriPt2 - vTriPt0; #// Get the second edge.
h = numpy.cross(vLineSlope, vE2);#or use hostmath ?
#
a =
|
numpy.dot(vE1,h)
|
numpy.dot
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import cairo
from cairo import OPERATOR_SOURCE
from numpy import arctan2
from numpy import array
from numpy import column_stack
from numpy import cos
from numpy import linspace
from numpy import pi
from numpy import sin
from numpy import sqrt
from numpy import square
from numpy.random import random
TWOPI = pi*2
class Render(object):
def __init__(self,n, back, front):
self.n = n
self.front = front
self.back = back
self.pix = 1./float(n)
self.num_img = 0
self.__init_cairo()
def __init_cairo(self):
sur = cairo.ImageSurface(cairo.FORMAT_ARGB32,self.n,self.n)
ctx = cairo.Context(sur)
ctx.scale(self.n,self.n)
self.sur = sur
self.ctx = ctx
self.clear_canvas()
def clear_canvas(self):
ctx = self.ctx
ctx.set_source_rgba(*self.back)
ctx.rectangle(0,0,1,1)
ctx.fill()
ctx.set_source_rgba(*self.front)
def write_to_png(self,fn):
self.sur.write_to_png(fn)
self.num_img += 1
def set_front(self, c):
self.front = c
self.ctx.set_source_rgba(*c)
def set_back(self, c):
self.back = c
def set_line_width(self, w):
self.line_width = w
self.ctx.set_line_width(w)
def line(self,x1,y1,x2,y2):
ctx = self.ctx
ctx.move_to(x1,y1)
ctx.line_to(x2,y2)
ctx.stroke()
def triangle(self,x1,y1,x2,y2,x3,y3,fill=False):
ctx = self.ctx
ctx.move_to(x1,y1)
ctx.line_to(x2,y2)
ctx.line_to(x3,y3)
ctx.close_path()
if fill:
ctx.fill()
else:
ctx.stroke()
def random_parallelogram(self,x1,y1,x2,y2,x3,y3,grains):
pix = self.pix
rectangle = self.ctx.rectangle
fill = self.ctx.fill
v1 = array((x2-x1, y2-y1))
v2 = array((x3-x1, y3-y1))
a1 = random((grains, 1))
a2 = random((grains, 1))
dd = v1*a1 + v2*a2
dd[:,0] += x1
dd[:,1] += y1
for x,y in dd:
rectangle(x,y,pix,pix)
fill()
def random_triangle(self,x1,y1,x2,y2,x3,y3,grains):
pix = self.pix
rectangle = self.ctx.rectangle
fill = self.ctx.fill
v1 = array((x2-x1, y2-y1))
v2 = array((x3-x1, y3-y1))
a1 = random((2*grains, 1))
a2 = random((2*grains, 1))
mask = ((a1+a2)<1).flatten()
## discarding half the grains because i am too tired to figure out how to
## map the parallelogram to the triangle
dd = v1*a1 + v2*a2
dd[:,0] += x1
dd[:,1] += y1
for x,y in dd[mask,:]:
rectangle(x,y,pix,pix)
fill()
def random_circle(self,x1,y1,r,grains):
"""
random points in circle. nonuniform distribution.
"""
pix = self.pix
rectangle = self.ctx.rectangle
fill = self.ctx.fill
the = random(grains)*pi*2
rad = random(grains)*r
xx = x1 + cos(the)*rad
yy = y1 + sin(the)*rad
for x,y in zip(xx,yy):
rectangle(x,y,pix,pix)
fill()
def random_uniform_circle(self,x1,y1,r,grains,dst=0):
from helpers import darts
pix = self.pix
rectangle = self.ctx.rectangle
fill = self.ctx.fill
for x,y in darts(grains,x1,y1,r,dst):
rectangle(x,y,pix,pix)
fill()
def dot(self,x,y):
ctx = self.ctx
pix = self.pix
ctx.rectangle(x,y,pix,pix)
ctx.fill()
def circle(self,x,y,r,fill=False):
ctx = self.ctx
ctx.arc(x,y,r,0,TWOPI)
if fill:
ctx.fill()
else:
ctx.stroke()
def transparent_pix(self):
op = self.ctx.get_operator()
self.ctx.set_operator(OPERATOR_SOURCE)
self.ctx.set_source_rgba(*[1,1,1,0.95])
self.dot(1-self.pix,1.0-self.pix)
self.ctx.set_operator(op)
def path(self, xy):
ctx = self.ctx
ctx.move_to(*xy[0,:])
for x in xy:
ctx.line_to(*x)
ctx.stroke()
def closed_path(self, coords, fill=True):
ctx = self.ctx
line_to = ctx.line_to
x,y = coords[0]
ctx.move_to(x,y)
for x,y in coords[1:]:
line_to(x,y)
ctx.close_path()
if fill:
ctx.fill()
else:
ctx.stroke()
def circle_path(self, coords, r, fill=False):
ctx = self.ctx
for x,y in coords:
ctx.arc(x,y,r,0,TWOPI)
if fill:
ctx.fill()
else:
ctx.stroke()
def circles(self,x1,y1,x2,y2,r,nmin=2):
arc = self.ctx.arc
fill = self.ctx.fill
dx = x1-x2
dy = y1-y2
dd = sqrt(dx*dx+dy*dy)
n = int(dd/self.pix)
n = n if n>nmin else nmin
a = arctan2(dy,dx)
scale = linspace(0,dd,n)
xp = x1-scale*cos(a)
yp = y1-scale*sin(a)
for x,y in zip(xp,yp):
arc(x,y,r,0,pi*2.)
fill()
def sandstroke_orthogonal(self,xys,height=None,steps=10,grains=10):
pix = self.pix
rectangle = self.ctx.rectangle
fill = self.ctx.fill
if not height:
height = pix*10
dx = xys[:,2] - xys[:,0]
dy = xys[:,3] - xys[:,1]
aa = arctan2(dy,dx)
directions = column_stack([cos(aa),sin(aa)])
dd = sqrt(square(dx)+square(dy))
aa_orth = aa + pi*0.5
directions_orth = column_stack([cos(aa_orth),sin(aa_orth)])
for i,d in enumerate(dd):
xy_start = xys[i,:2] + \
directions[i,:]*random((steps,1))*d
for xy in xy_start:
points = xy + \
directions_orth[i,:]*random((grains,1))*height
for x,y in points:
rectangle(x,y,pix,pix)
fill()
def sandstroke_non_linear(self,xys,grains=10,left=True):
pix = self.pix
rectangle = self.ctx.rectangle
fill = self.ctx.fill
dx = xys[:,2] - xys[:,0]
dy = xys[:,3] - xys[:,1]
aa = arctan2(dy,dx)
directions = column_stack([cos(aa),sin(aa)])
dd = sqrt(square(dx)+square(dy))
for i,d in enumerate(dd):
rnd = sqrt(random((grains,1)))
if left:
rnd = 1.0-rnd
for x,y in xys[i,:2] + directions[i,:]*rnd*d:
rectangle(x,y,pix,pix)
fill()
def sandstroke(self,xys,grains=10):
pix = self.pix
rectangle = self.ctx.rectangle
fill = self.ctx.fill
dx = xys[:,2] - xys[:,0]
dy = xys[:,3] - xys[:,1]
aa = arctan2(dy,dx)
directions = column_stack([cos(aa),sin(aa)])
dd = sqrt(
|
square(dx)
|
numpy.square
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 23 18:43:28 2017
@author: tobias
"""
import os
import re
import glob
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.switch_backend('agg')
#contig_input_file = '/Users/tobias/GitHub/seqcap_processor/data/processed/target_contigs/match_table.txt'
#alignment_folder = '/Users/tobias/GitHub/seqcap_processor/data/processed/alignments/contig_alignments'
#read_cov_file = '/Users/tobias/GitHub/seqcap_processor/data/processed/remapped_reads/average_cov_per_locus.txt'
#read_cov_file = '/Users/tobias/GitHub/seqcap_processor/data/processed/selected_loci_50/overview_selected_loci.txt'
##plot_contigs_alignments_read_cov(contig_input_file,alignment_folder,read_cov_file,number_of_rows=2)
#selected_loci = plot_contigs_alignments_read_cov(contig_input_file,alignment_folder,read_cov_file,reduce=True,norm_value=10)
#selected_loci.savefig(os.path.join('/Users/tobias/GitHub/seqcap_processor/data/processed/selected_loci_50/','contig_exon_coverage_matrix_reduced.png'), dpi = 500)
#
#output_folder = '/Users/tobias/GitHub/seqcap_processor/data/processed/'
#align = general_scale_bar(2,tick_labels=['No','Yes'],x0=.1,x1=.25,plot_height=.5,plot_width=.3,font_size = 26,color1='white',color2=(0.0, 0.26666666666666666, 0.10588235294117647),height=4,width=3,plot_label='Alignment present')
#align.savefig(os.path.join(output_folder,'legend_presence_absence_alignments_green.png'), dpi = 500)
#
#contig = general_scale_bar(2,tick_labels=['No','Yes'],x0=.1,x1=.25,plot_height=.5,plot_width=.3,font_size = 26,color1='white',color2=(0.031372549019607843, 0.25098039215686274, 0.50588235294117645),height=4,width=3,plot_label='Contig present')
#contig.savefig(os.path.join(output_folder,'legend_presence_absence_contig_blue.png'), dpi = 500)
#
#
#legend = plot_heatmap_legend(0,10,font_size=26)
#legend.savefig(os.path.join('/Users/tobias/GitHub/seqcap_processor/data/processed/','legend_read_coverage.png'), dpi = 500)
#
def plot_contig_yield_linux_cluster(contig_input_file,outdir):
workdir = '/'.join(contig_input_file.split('/')[:-1])
contig_matrix = pd.read_csv(contig_input_file,sep='\t',index_col=0)
x_labels = np.array(contig_matrix.index)
num_x_labels = range(len(x_labels))
#______________________________Contig Data_____________________________________
# Read the contig data
data_1_contig_present = np.matrix(contig_matrix).T
data_1_y_labels = contig_matrix.columns
# replace substring in sample name
data_1_y_labels = np.core.defchararray.replace(np.array(data_1_y_labels,dtype=str), 'sample_', 'contigs ')
# print a text file with the loci indeces and the corresponding loci names
new_locus_list = x_labels
locus_index_overview = pd.DataFrame({'loci':new_locus_list})
locus_index_overview.to_csv(os.path.join(workdir,'locus_index_overview.txt'),sep='\t',header=False)
#___________________________Plotting settings___________________________________
height,width = data_1_contig_present.shape
fig = plt.figure(figsize=(20,8))
#fig.subplots_adjust(top=1, bottom=0.0, left=0.2, right=0.99)
for i,m in enumerate(data_1_contig_present):
ax = plt.subplot(height, 1, i+1)
ax.tick_params(left='off',bottom='off',labelleft='off')
# Only plot x-axis for last row
if not i == height-1:
ax.xaxis.set_major_formatter(plt.NullFormatter())
#plt.axis("off")
if data_1_y_labels[i] == 'contig alignment':
plt.imshow(data_1_contig_present[i], aspect='auto', cmap='binary', origin='lower')
else:
plt.imshow(data_1_contig_present[i], aspect='auto', cmap='GnBu', origin='lower')
pos = list(ax.get_position().bounds)
fig.text(pos[0] - 0.01, pos[1], data_1_y_labels[i], horizontalalignment='right')
plt.xlabel('exon index')
#plt.colorbar()
fig.savefig(os.path.join(outdir,'contig_yield_overview.png'),bbox_inches='tight', dpi = 500)
def plot_contigs_and_alignments_yield_linux_cluster(contig_input_file,alignment_folder,outdir):
workdir = '/'.join(contig_input_file.split('/')[:-1])
contig_matrix = pd.read_csv(contig_input_file,sep='\t',index_col=0)
x_labels = np.array(contig_matrix.index)
num_x_labels = range(len(x_labels))
#______________________________Contig Data_____________________________________
# Read the contig data
data_1_contig_present = np.matrix(contig_matrix).T
data_1_y_labels = contig_matrix.columns
# replace substring in sample name
data_1_y_labels = np.core.defchararray.replace(np.array(data_1_y_labels,dtype=str), 'sample_', 'contigs ')
#_______________________________Contig Alignment Data__________________________
# Get the alignment files and make list of loci with alignments
alignment_files = glob.glob(os.path.join(alignment_folder, '*.fasta'))
list_of_loci_with_alignments = [re.sub('.fasta','',al.split('/')[-1]) for al in alignment_files]
# Create 1-dimensional matrix and fill with info which loci have alignment data
presence_absence_df = pd.DataFrame({'loci':x_labels,'presence':0})
for locus in list_of_loci_with_alignments:
row_index = presence_absence_df[presence_absence_df.loci == locus].index
presence_absence_df.loc[row_index,'presence'] = 1
data_2_contig_alignment = np.matrix(presence_absence_df.presence)
data_2_y_labels = np.array('contig alignment')
#_________________________Combine contig and alignment data_____________________
contig_data_subset = np.vstack([data_1_contig_present, data_2_contig_alignment])
y_labels_contig_data = np.append(data_1_y_labels,data_2_y_labels)
# print a text file with the loci indeces and the corresponding loci names
new_locus_list = x_labels
locus_index_overview = pd.DataFrame({'loci':new_locus_list})
locus_index_overview.to_csv(os.path.join(workdir,'locus_index_overview.txt'),sep='\t',header=False)
#___________________________Plotting settings___________________________________
height,width = contig_data_subset.shape
fig = plt.figure(figsize=(20,8))
#fig.subplots_adjust(top=1, bottom=0.0, left=0.2, right=0.99)
for i,m in enumerate(contig_data_subset):
ax = plt.subplot(height, 1, i+1)
ax.tick_params(left='off',bottom='off',labelleft='off')
# Only plot x-axis for last row
if not i == height-1:
ax.xaxis.set_major_formatter(plt.NullFormatter())
#plt.axis("off")
if y_labels_contig_data[i] == 'contig alignment':
plt.imshow(contig_data_subset[i], aspect='auto', cmap='binary', origin='lower')
else:
plt.imshow(contig_data_subset[i], aspect='auto', cmap='GnBu', origin='lower')
pos = list(ax.get_position().bounds)
fig.text(pos[0] - 0.01, pos[1], y_labels_contig_data[i], horizontalalignment='right')
plt.xlabel('exon index')
#plt.colorbar()
fig.savefig(os.path.join(outdir,'contig_yield_and_msas_overview.png'),bbox_inches='tight', dpi = 500)
def plot_contigs_alignments_read_cov_linux_cluster(contig_input_file,alignment_folder,read_cov_file,outdir,number_of_rows=False,font_size=12,reduce=False,string_to_remove_from_sample_names='sample_',norm_value=False):
mpl.rcParams.update({'font.size': font_size})
workdir = '/'.join(read_cov_file.split('/')[:-1])
contig_matrix = pd.read_csv(contig_input_file,sep='\t',index_col=0)
x_labels = np.array(contig_matrix.index)
num_x_labels = range(len(x_labels))
#______________________________1. Contig Data_____________________________________
# Read the contig data
data_1_contig_present = np.matrix(contig_matrix).T
data_1_y_labels = contig_matrix.columns
# replace substring in sample name
data_1_y_labels = np.core.defchararray.replace(np.array(data_1_y_labels,dtype=str), string_to_remove_from_sample_names, 'contigs ')
#_______________________________2. Contig Alignment Data__________________________
# Get the alignment files and make list of loci with alignments
alignment_files = glob.glob(os.path.join(alignment_folder, '*.fasta'))
list_of_loci_with_alignments = [re.sub('.fasta','',al.split('/')[-1]) for al in alignment_files]
# Create 1-dimensional matrix and fill with info which loci have alignment data
presence_absence_df = pd.DataFrame({'loci':x_labels,'presence':0})
for locus in list_of_loci_with_alignments:
row_index = presence_absence_df[presence_absence_df.loci == locus].index
presence_absence_df.loc[row_index,'presence'] = 1
data_2_contig_alignment = np.matrix(presence_absence_df.presence)
data_2_y_labels = np.array('contig alignment')
#_______________________________3. Reference-assembly Data__________________________
# Get the data as pandas dataframe
unsorted_read_cov_data = pd.read_csv(read_cov_file, sep = '\t',index_col=0)
locus_selection=False
if 'sum_per_locus' in unsorted_read_cov_data.columns:
unsorted_read_cov_data = unsorted_read_cov_data.iloc[:,:-1]
locus_selection=True
# sort columns in df
temp_read_cov_data = unsorted_read_cov_data[sorted(unsorted_read_cov_data.columns)].sort_index()
# add row of 0's for all missing loci
loci_in_df = list(temp_read_cov_data.index)
for locus in list(x_labels):
if locus not in loci_in_df:
temp_read_cov_data.loc[locus] = [0.0]*len(temp_read_cov_data.columns)
# sort by index again
read_cov_data = temp_read_cov_data.sort_index()
# turn df into matrix
data_3_read_cov = np.matrix(read_cov_data).T
# lets use the same labels as for the contig data
data_3_y_labels = np.core.defchararray.replace(data_1_y_labels, 'contigs', 'coverage')
#___________________________Combine all Data___________________________________
combined_data = np.vstack([data_1_contig_present, data_2_contig_alignment,data_3_read_cov])
tmp_combined_y_labels = np.append(data_1_y_labels,data_2_y_labels)
combined_y_labels = np.append(tmp_combined_y_labels,data_3_y_labels)
height,width = combined_data.shape
# Define the range for the heatmap, above the maximum everything will be colored in the highest color
norm=None
if norm_value:
norm = mpl.colors.Normalize(vmin=0, vmax=norm_value)
if locus_selection and reduce:
print('Reducing final matrix to selected loci.')
# Reduce the data matrix to only those columns for which we have extracted values in the selected loci
boolean = combined_data[-1]>0
reduced_data = np.matrix([np.matrix.tolist(ind[boolean])[0] for ind in combined_data])
# also print a text file with the loci indeces and the corresponding loci names
new_locus_list = x_labels[np.array(boolean)[0]]
locus_index_overview = pd.DataFrame({'loci':new_locus_list})
locus_index_overview.to_csv(os.path.join(workdir,'locus_index_overview.txt'),sep='\t',header=False)
# continue with plotting
fig = plt.figure(figsize=(13.5,8))
for i,m in enumerate(reduced_data):
ax = plt.subplot(height, 1, i+1)
ax.tick_params(left='off',bottom='off',labelleft='off')
# Only plot x-axis for last row
if not i == height-1:
ax.xaxis.set_major_formatter(plt.NullFormatter())
#plt.axis("off")
if combined_y_labels[i] == 'contig alignment':
plt.imshow(reduced_data[i], aspect='auto', cmap='Greens_r', origin='lower')
elif 'contigs' in combined_y_labels[i]:
plt.imshow(reduced_data[i], aspect='auto', cmap='GnBu', origin='lower')
else:
plt.imshow(reduced_data[i], aspect='auto', cmap='hot_r',norm=norm, origin='lower')#,clim=(0.0, 10))
pos = list(ax.get_position().bounds)
fig.text(pos[0] - 0.01, pos[1], combined_y_labels[i], horizontalalignment='right')
plt.xlabel('exon index')
else:
# print a text file with the loci indeces and the corresponding loci names
new_locus_list = x_labels
locus_index_overview = pd.DataFrame({'loci':new_locus_list})
locus_index_overview.to_csv(os.path.join(workdir,'locus_index_overview.txt'),sep='\t',header=False)
if not number_of_rows:
#_______________________________Plot Combined Data_____________________________
fig = plt.figure(figsize=(20,8))
#fig.subplots_adjust(top=1, bottom=0.0, left=0.2, right=0.99)
for i,m in enumerate(combined_data):
ax = plt.subplot(height, 1, i+1)
ax.tick_params(left='off',bottom='off',labelleft='off')
# Only plot x-axis for last row
if not i == height-1:
ax.xaxis.set_major_formatter(plt.NullFormatter())
#plt.axis("off")
if combined_y_labels[i] == 'contig alignment':
plt.imshow(combined_data[i], aspect='auto', cmap='Greens', origin='lower')
elif 'contigs' in combined_y_labels[i]:
plt.imshow(combined_data[i], aspect='auto', cmap='GnBu', origin='lower')
else:
plt.imshow(combined_data[i], aspect='auto', cmap='hot_r',norm=norm, origin='lower')#,clim=(0.0, 10))
pos = list(ax.get_position().bounds)
fig.text(pos[0] - 0.01, pos[1], combined_y_labels[i], horizontalalignment='right')
plt.xlabel('exon index')
#plt.colorbar()
elif number_of_rows:
images = []
#_______________________________Plot Split Data_____________________________
# Split dataset for better readability
columns_per_row = int(combined_data.shape[1]/number_of_rows)
remainder = combined_data.shape[1]%number_of_rows
# start and end of first row, adding the remainder to the first row
b = 0
n = columns_per_row+remainder
subset_dict = {}
# iterate through chunks
for i in range(number_of_rows):
data_chunk = combined_data[:,b:n]
subset_dict.setdefault('%i,%i' %(b,n),data_chunk)
b=n
n+=columns_per_row
for j in subset_dict.keys():
split_data = subset_dict[j]
data_range = j.split(',')
num_x_labels = np.arange(int(data_range[0]),int(data_range[-1]))
fig = plt.figure(figsize=(20,8))
#fig.subplots_adjust(top=1, bottom=0.0, left=0.2, right=0.99)
for i,m in enumerate(split_data):
ax = plt.subplot(height, 1, i+1)
ax.tick_params(left='off',bottom='off',labelleft='off')
# Only plot x-axis for last row
if not i == height-1:
ax.xaxis.set_major_formatter(plt.NullFormatter())
#plt.axis("off")
if combined_y_labels[i] == 'contig alignment':
plt.imshow(split_data[i], aspect='auto', cmap='Greens', origin='lower')
elif 'contigs' in combined_y_labels[i]:
plt.imshow(split_data[i], aspect='auto', cmap='GnBu', origin='lower')
else:
plt.imshow(split_data[i], aspect='auto', cmap='hot_r',norm=norm, origin='lower')#,clim=(0.0, 10))
pos = list(ax.get_position().bounds)
fig.text(pos[0] - 0.01, pos[1], combined_y_labels[i], horizontalalignment='right')
# make sure to have 10 ticks on the x-axis (ensured by dividing the total lenght by 9 and using the resulting value as stepsize)
tick_step_size = split_data[i].shape[1]/9
# get the desired indeces of the x-values that shall carry ticks on x-axis
xi = np.arange(0, split_data[i].shape[1],int(tick_step_size))
# get the corresponding x-values from the num_x_labels variable (dicitonary keys)
x = np.arange(num_x_labels[0], num_x_labels[-1], int(tick_step_size))
plt.xticks(xi,x)
plt.xlabel('exon index')
fig.savefig(os.path.join(workdir,'contig_exon_coverage_matrix_%s.png'%j), dpi = 500)
images.append(fig)
if not number_of_rows:
fig.savefig(os.path.join(outdir,'contig_yield_and_msas_and_readcov_overview.png'),bbox_inches='tight', dpi = 500)
else:
images.savefig(os.path.join(outdir,'contig_yield_and_msas_and_readcov_overview.png'),bbox_inches='tight', dpi = 500)
def plot_contig_yield(contig_input_file):
workdir = '/'.join(contig_input_file.split('/')[:-1])
contig_matrix = pd.read_csv(contig_input_file,sep='\t',index_col=0)
x_labels = np.array(contig_matrix.index)
num_x_labels = range(len(x_labels))
#______________________________Contig Data_____________________________________
# Read the contig data
data_1_contig_present = np.matrix(contig_matrix).T
data_1_y_labels = contig_matrix.columns
# replace substring in sample name
data_1_y_labels = np.core.defchararray.replace(np.array(data_1_y_labels,dtype=str), 'sample_', 'contigs ')
# print a text file with the loci indeces and the corresponding loci names
new_locus_list = x_labels
locus_index_overview = pd.DataFrame({'loci':new_locus_list})
locus_index_overview.to_csv(os.path.join(workdir,'locus_index_overview.txt'),sep='\t',header=False)
#___________________________Plotting settings___________________________________
height,width = data_1_contig_present.shape
fig = plt.figure(figsize=(20,8))
#fig.subplots_adjust(top=1, bottom=0.0, left=0.2, right=0.99)
for i,m in enumerate(data_1_contig_present):
ax = plt.subplot(height, 1, i+1)
ax.tick_params(left='off',bottom='off',labelleft='off')
# Only plot x-axis for last row
if not i == height-1:
ax.xaxis.set_major_formatter(plt.NullFormatter())
#plt.axis("off")
if data_1_y_labels[i] == 'contig alignment':
plt.imshow(data_1_contig_present[i], aspect='auto', cmap='binary', origin='lower')
else:
plt.imshow(data_1_contig_present[i], aspect='auto', cmap='GnBu', origin='lower')
pos = list(ax.get_position().bounds)
fig.text(pos[0] - 0.01, pos[1], data_1_y_labels[i], horizontalalignment='right')
plt.xlabel('exon index')
#plt.colorbar()
return fig
def plot_contigs_and_alignments_yield(contig_input_file,alignment_folder):
workdir = '/'.join(contig_input_file.split('/')[:-1])
contig_matrix = pd.read_csv(contig_input_file,sep='\t',index_col=0)
x_labels = np.array(contig_matrix.index)
num_x_labels = range(len(x_labels))
#______________________________Contig Data_____________________________________
# Read the contig data
data_1_contig_present =
|
np.matrix(contig_matrix)
|
numpy.matrix
|
"""
The ``pvsystem`` module contains functions for modeling the output and
performance of PV modules and inverters.
"""
from collections import OrderedDict
import io
import os
from urllib.request import urlopen
import warnings
import numpy as np
import pandas as pd
from pvlib._deprecation import deprecated
from pvlib import (atmosphere, iam, irradiance, singlediode as _singlediode,
temperature)
from pvlib.tools import _build_kwargs
from pvlib.location import Location
from pvlib._deprecation import pvlibDeprecationWarning
# a dict of required parameter names for each DC power model
_DC_MODEL_PARAMS = {
'sapm': set([
'A0', 'A1', 'A2', 'A3', 'A4', 'B0', 'B1', 'B2', 'B3',
'B4', 'B5', 'C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6',
'C7', 'Isco', 'Impo', 'Voco', 'Vmpo', 'Aisc', 'Aimp', 'Bvoco',
'Mbvoc', 'Bvmpo', 'Mbvmp', 'N', 'Cells_in_Series',
'IXO', 'IXXO', 'FD']),
'desoto': set([
'alpha_sc', 'a_ref', 'I_L_ref', 'I_o_ref',
'R_sh_ref', 'R_s']),
'cec': set([
'alpha_sc', 'a_ref', 'I_L_ref', 'I_o_ref',
'R_sh_ref', 'R_s', 'Adjust']),
'pvsyst': set([
'gamma_ref', 'mu_gamma', 'I_L_ref', 'I_o_ref',
'R_sh_ref', 'R_sh_0', 'R_s', 'alpha_sc', 'EgRef',
'cells_in_series']),
'singlediode': set([
'alpha_sc', 'a_ref', 'I_L_ref', 'I_o_ref',
'R_sh_ref', 'R_s']),
'pvwatts': set(['pdc0', 'gamma_pdc'])
}
def _combine_localized_attributes(pvsystem=None, location=None, **kwargs):
"""
Get and combine attributes from the pvsystem and/or location
with the rest of the kwargs.
"""
if pvsystem is not None:
pv_dict = pvsystem.__dict__
else:
pv_dict = {}
if location is not None:
loc_dict = location.__dict__
else:
loc_dict = {}
new_kwargs = dict(
list(pv_dict.items()) + list(loc_dict.items()) + list(kwargs.items())
)
return new_kwargs
# not sure if this belongs in the pvsystem module.
# maybe something more like core.py? It may eventually grow to
# import a lot more functionality from other modules.
class PVSystem(object):
"""
The PVSystem class defines a standard set of PV system attributes
and modeling functions. This class describes the collection and
interactions of PV system components rather than an installed system
on the ground. It is typically used in combination with
:py:class:`~pvlib.location.Location` and
:py:class:`~pvlib.modelchain.ModelChain`
objects.
See the :py:class:`LocalizedPVSystem` class for an object model that
describes an installed PV system.
The class supports basic system topologies consisting of:
* `N` total modules arranged in series
(`modules_per_string=N`, `strings_per_inverter=1`).
* `M` total modules arranged in parallel
(`modules_per_string=1`, `strings_per_inverter=M`).
* `NxM` total modules arranged in `M` strings of `N` modules each
(`modules_per_string=N`, `strings_per_inverter=M`).
The class is complementary to the module-level functions.
The attributes should generally be things that don't change about
the system, such the type of module and the inverter. The instance
methods accept arguments for things that do change, such as
irradiance and temperature.
Parameters
----------
surface_tilt: float or array-like, default 0
Surface tilt angles in decimal degrees.
The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90)
surface_azimuth: float or array-like, default 180
Azimuth angle of the module surface.
North=0, East=90, South=180, West=270.
albedo : None or float, default None
The ground albedo. If ``None``, will attempt to use
``surface_type`` and ``irradiance.SURFACE_ALBEDOS``
to lookup albedo.
surface_type : None or string, default None
The ground surface type. See ``irradiance.SURFACE_ALBEDOS``
for valid values.
module : None or string, default None
The model name of the modules.
May be used to look up the module_parameters dictionary
via some other method.
module_type : None or string, default 'glass_polymer'
Describes the module's construction. Valid strings are 'glass_polymer'
and 'glass_glass'. Used for cell and module temperature calculations.
module_parameters : None, dict or Series, default None
Module parameters as defined by the SAPM, CEC, or other.
temperature_model_parameters : None, dict or Series, default None.
Temperature model parameters as defined by the SAPM, Pvsyst, or other.
modules_per_string: int or float, default 1
See system topology discussion above.
strings_per_inverter: int or float, default 1
See system topology discussion above.
inverter : None or string, default None
The model name of the inverters.
May be used to look up the inverter_parameters dictionary
via some other method.
inverter_parameters : None, dict or Series, default None
Inverter parameters as defined by the SAPM, CEC, or other.
racking_model : None or string, default 'open_rack'
Valid strings are 'open_rack', 'close_mount', and 'insulated_back'.
Used to identify a parameter set for the SAPM cell temperature model.
losses_parameters : None, dict or Series, default None
Losses parameters as defined by PVWatts or other.
name : None or string, default None
**kwargs
Arbitrary keyword arguments.
Included for compatibility, but not used.
See also
--------
pvlib.location.Location
pvlib.tracking.SingleAxisTracker
pvlib.pvsystem.LocalizedPVSystem
"""
def __init__(self,
surface_tilt=0, surface_azimuth=180,
albedo=None, surface_type=None,
module=None, module_type='glass_polymer',
module_parameters=None,
temperature_model_parameters=None,
modules_per_string=1, strings_per_inverter=1,
inverter=None, inverter_parameters=None,
racking_model='open_rack', losses_parameters=None, name=None,
**kwargs):
self.surface_tilt = surface_tilt
self.surface_azimuth = surface_azimuth
# could tie these together with @property
self.surface_type = surface_type
if albedo is None:
self.albedo = irradiance.SURFACE_ALBEDOS.get(surface_type, 0.25)
else:
self.albedo = albedo
# could tie these together with @property
self.module = module
if module_parameters is None:
self.module_parameters = {}
else:
self.module_parameters = module_parameters
self.module_type = module_type
self.racking_model = racking_model
if temperature_model_parameters is None:
self.temperature_model_parameters = \
self._infer_temperature_model_params()
# TODO: in v0.8 check if an empty dict is returned and raise error
else:
self.temperature_model_parameters = temperature_model_parameters
# TODO: deprecated behavior if PVSystem.temperature_model_parameters
# are not specified. Remove in v0.8
if not any(self.temperature_model_parameters):
warnings.warn(
'Required temperature_model_parameters is not specified '
'and parameters are not inferred from racking_model and '
'module_type. Reverting to deprecated default: SAPM cell '
'temperature model parameters for a glass/glass module in '
'open racking. In the future '
'PVSystem.temperature_model_parameters will be required',
pvlibDeprecationWarning)
params = temperature._temperature_model_params(
'sapm', 'open_rack_glass_glass')
self.temperature_model_parameters = params
self.modules_per_string = modules_per_string
self.strings_per_inverter = strings_per_inverter
self.inverter = inverter
if inverter_parameters is None:
self.inverter_parameters = {}
else:
self.inverter_parameters = inverter_parameters
if losses_parameters is None:
self.losses_parameters = {}
else:
self.losses_parameters = losses_parameters
self.name = name
def __repr__(self):
attrs = ['name', 'surface_tilt', 'surface_azimuth', 'module',
'inverter', 'albedo', 'racking_model']
return ('PVSystem: \n ' + '\n '.join(
('{}: {}'.format(attr, getattr(self, attr)) for attr in attrs)))
def get_aoi(self, solar_zenith, solar_azimuth):
"""Get the angle of incidence on the system.
Parameters
----------
solar_zenith : float or Series.
Solar zenith angle.
solar_azimuth : float or Series.
Solar azimuth angle.
Returns
-------
aoi : Series
The angle of incidence
"""
aoi = irradiance.aoi(self.surface_tilt, self.surface_azimuth,
solar_zenith, solar_azimuth)
return aoi
def get_irradiance(self, solar_zenith, solar_azimuth, dni, ghi, dhi,
dni_extra=None, airmass=None, model='haydavies',
**kwargs):
"""
Uses the :py:func:`irradiance.get_total_irradiance` function to
calculate the plane of array irradiance components on a tilted
surface defined by ``self.surface_tilt``,
``self.surface_azimuth``, and ``self.albedo``.
Parameters
----------
solar_zenith : float or Series.
Solar zenith angle.
solar_azimuth : float or Series.
Solar azimuth angle.
dni : float or Series
Direct Normal Irradiance
ghi : float or Series
Global horizontal irradiance
dhi : float or Series
Diffuse horizontal irradiance
dni_extra : None, float or Series, default None
Extraterrestrial direct normal irradiance
airmass : None, float or Series, default None
Airmass
model : String, default 'haydavies'
Irradiance model.
kwargs
Extra parameters passed to :func:`irradiance.get_total_irradiance`.
Returns
-------
poa_irradiance : DataFrame
Column names are: ``total, beam, sky, ground``.
"""
# not needed for all models, but this is easier
if dni_extra is None:
dni_extra = irradiance.get_extra_radiation(solar_zenith.index)
if airmass is None:
airmass = atmosphere.get_relative_airmass(solar_zenith)
return irradiance.get_total_irradiance(self.surface_tilt,
self.surface_azimuth,
solar_zenith, solar_azimuth,
dni, ghi, dhi,
dni_extra=dni_extra,
airmass=airmass,
model=model,
albedo=self.albedo,
**kwargs)
def get_iam(self, aoi, iam_model='physical'):
"""
Determine the incidence angle modifier using the method specified by
``iam_model``.
Parameters for the selected IAM model are expected to be in
``PVSystem.module_parameters``. Default parameters are available for
the 'physical', 'ashrae' and 'martin_ruiz' models.
Parameters
----------
aoi : numeric
The angle of incidence in degrees.
aoi_model : string, default 'physical'
The IAM model to be used. Valid strings are 'physical', 'ashrae',
'martin_ruiz' and 'sapm'.
Returns
-------
iam : numeric
The AOI modifier.
Raises
------
ValueError if `iam_model` is not a valid model name.
"""
model = iam_model.lower()
if model in ['ashrae', 'physical', 'martin_ruiz']:
param_names = iam._IAM_MODEL_PARAMS[model]
kwargs = _build_kwargs(param_names, self.module_parameters)
func = getattr(iam, model)
return func(aoi, **kwargs)
elif model == 'sapm':
return iam.sapm(aoi, self.module_parameters)
elif model == 'interp':
raise ValueError(model + ' is not implemented as an IAM model'
'option for PVSystem')
else:
raise ValueError(model + ' is not a valid IAM model')
def ashraeiam(self, aoi):
"""
Deprecated. Use ``PVSystem.get_iam`` instead.
"""
import warnings
warnings.warn('PVSystem.ashraeiam is deprecated and will be removed in'
'v0.8, use PVSystem.get_iam instead',
pvlibDeprecationWarning)
return PVSystem.get_iam(self, aoi, iam_model='ashrae')
def physicaliam(self, aoi):
"""
Deprecated. Use ``PVSystem.get_iam`` instead.
"""
import warnings
warnings.warn('PVSystem.physicaliam is deprecated and will be removed'
' in v0.8, use PVSystem.get_iam instead',
pvlibDeprecationWarning)
return PVSystem.get_iam(self, aoi, iam_model='physical')
def calcparams_desoto(self, effective_irradiance, temp_cell, **kwargs):
"""
Use the :py:func:`calcparams_desoto` function, the input
parameters and ``self.module_parameters`` to calculate the
module currents and resistances.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : float or Series
The average cell temperature of cells within a module in C.
**kwargs
See pvsystem.calcparams_desoto for details
Returns
-------
See pvsystem.calcparams_desoto for details
"""
kwargs = _build_kwargs(['a_ref', 'I_L_ref', 'I_o_ref', 'R_sh_ref',
'R_s', 'alpha_sc', 'EgRef', 'dEgdT',
'irrad_ref', 'temp_ref'],
self.module_parameters)
return calcparams_desoto(effective_irradiance, temp_cell, **kwargs)
def calcparams_cec(self, effective_irradiance, temp_cell, **kwargs):
"""
Use the :py:func:`calcparams_cec` function, the input
parameters and ``self.module_parameters`` to calculate the
module currents and resistances.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : float or Series
The average cell temperature of cells within a module in C.
**kwargs
See pvsystem.calcparams_cec for details
Returns
-------
See pvsystem.calcparams_cec for details
"""
kwargs = _build_kwargs(['a_ref', 'I_L_ref', 'I_o_ref', 'R_sh_ref',
'R_s', 'alpha_sc', 'Adjust', 'EgRef', 'dEgdT',
'irrad_ref', 'temp_ref'],
self.module_parameters)
return calcparams_cec(effective_irradiance, temp_cell, **kwargs)
def calcparams_pvsyst(self, effective_irradiance, temp_cell):
"""
Use the :py:func:`calcparams_pvsyst` function, the input
parameters and ``self.module_parameters`` to calculate the
module currents and resistances.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : float or Series
The average cell temperature of cells within a module in C.
Returns
-------
See pvsystem.calcparams_pvsyst for details
"""
kwargs = _build_kwargs(['gamma_ref', 'mu_gamma', 'I_L_ref', 'I_o_ref',
'R_sh_ref', 'R_sh_0', 'R_sh_exp',
'R_s', 'alpha_sc', 'EgRef',
'irrad_ref', 'temp_ref',
'cells_in_series'],
self.module_parameters)
return calcparams_pvsyst(effective_irradiance, temp_cell, **kwargs)
def sapm(self, effective_irradiance, temp_cell, **kwargs):
"""
Use the :py:func:`sapm` function, the input parameters,
and ``self.module_parameters`` to calculate
Voc, Isc, Ix, Ixx, Vmp, and Imp.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : float or Series
The average cell temperature of cells within a module in C.
kwargs
See pvsystem.sapm for details
Returns
-------
See pvsystem.sapm for details
"""
return sapm(effective_irradiance, temp_cell, self.module_parameters)
def sapm_celltemp(self, poa_global, temp_air, wind_speed):
"""Uses :py:func:`temperature.sapm_cell` to calculate cell
temperatures.
Parameters
----------
poa_global : numeric
Total incident irradiance in W/m^2.
temp_air : numeric
Ambient dry bulb temperature in degrees C.
wind_speed : numeric
Wind speed in m/s at a height of 10 meters.
Returns
-------
numeric, values in degrees C.
"""
kwargs = _build_kwargs(['a', 'b', 'deltaT'],
self.temperature_model_parameters)
return temperature.sapm_cell(poa_global, temp_air, wind_speed,
**kwargs)
def _infer_temperature_model_params(self):
# try to infer temperature model parameters from from racking_model
# and module_type
param_set = self.racking_model + '_' + self.module_type
if param_set in temperature.TEMPERATURE_MODEL_PARAMETERS['sapm']:
return temperature._temperature_model_params('sapm', param_set)
elif 'freestanding' in param_set:
return temperature._temperature_model_params('pvsyst',
'freestanding')
elif 'insulated' in param_set: # after SAPM to avoid confusing keys
return temperature._temperature_model_params('pvsyst',
'insulated')
else:
return {}
def sapm_spectral_loss(self, airmass_absolute):
"""
Use the :py:func:`sapm_spectral_loss` function, the input
parameters, and ``self.module_parameters`` to calculate F1.
Parameters
----------
airmass_absolute : numeric
Absolute airmass.
Returns
-------
F1 : numeric
The SAPM spectral loss coefficient.
"""
return sapm_spectral_loss(airmass_absolute, self.module_parameters)
def sapm_aoi_loss(self, aoi):
"""
Deprecated. Use ``PVSystem.get_iam`` instead.
"""
import warnings
warnings.warn('PVSystem.sapm_aoi_loss is deprecated and will be'
' removed in v0.8, use PVSystem.get_iam instead',
pvlibDeprecationWarning)
return PVSystem.get_iam(self, aoi, iam_model='sapm')
def sapm_effective_irradiance(self, poa_direct, poa_diffuse,
airmass_absolute, aoi,
reference_irradiance=1000):
"""
Use the :py:func:`sapm_effective_irradiance` function, the input
parameters, and ``self.module_parameters`` to calculate
effective irradiance.
Parameters
----------
poa_direct : numeric
The direct irradiance incident upon the module. [W/m2]
poa_diffuse : numeric
The diffuse irradiance incident on module. [W/m2]
airmass_absolute : numeric
Absolute airmass. [unitless]
aoi : numeric
Angle of incidence. [degrees]
Returns
-------
effective_irradiance : numeric
The SAPM effective irradiance. [W/m2]
"""
return sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi,
self.module_parameters)
def pvsyst_celltemp(self, poa_global, temp_air, wind_speed=1.0):
"""Uses :py:func:`temperature.pvsyst_cell` to calculate cell
temperature.
Parameters
----------
poa_global : numeric
Total incident irradiance in W/m^2.
temp_air : numeric
Ambient dry bulb temperature in degrees C.
wind_speed : numeric, default 1.0
Wind speed in m/s measured at the same height for which the wind
loss factor was determined. The default value is 1.0, which is
the wind speed at module height used to determine NOCT.
eta_m : numeric, default 0.1
Module external efficiency as a fraction, i.e.,
DC power / poa_global.
alpha_absorption : numeric, default 0.9
Absorption coefficient
Returns
-------
numeric, values in degrees C.
"""
kwargs = _build_kwargs(['eta_m', 'alpha_absorption'],
self.module_parameters)
kwargs.update(_build_kwargs(['u_c', 'u_v'],
self.temperature_model_parameters))
return temperature.pvsyst_cell(poa_global, temp_air, wind_speed,
**kwargs)
def first_solar_spectral_loss(self, pw, airmass_absolute):
"""
Use the :py:func:`first_solar_spectral_correction` function to
calculate the spectral loss modifier. The model coefficients are
specific to the module's cell type, and are determined by searching
for one of the following keys in self.module_parameters (in order):
'first_solar_spectral_coefficients' (user-supplied coefficients)
'Technology' - a string describing the cell type, can be read from
the CEC module parameter database
'Material' - a string describing the cell type, can be read from
the Sandia module database.
Parameters
----------
pw : array-like
atmospheric precipitable water (cm).
airmass_absolute : array-like
absolute (pressure corrected) airmass.
Returns
-------
modifier: array-like
spectral mismatch factor (unitless) which can be multiplied
with broadband irradiance reaching a module's cells to estimate
effective irradiance, i.e., the irradiance that is converted to
electrical current.
"""
if 'first_solar_spectral_coefficients' in \
self.module_parameters.keys():
coefficients = \
self.module_parameters['first_solar_spectral_coefficients']
module_type = None
else:
module_type = self._infer_cell_type()
coefficients = None
return atmosphere.first_solar_spectral_correction(pw,
airmass_absolute,
module_type,
coefficients)
def _infer_cell_type(self):
"""
Examines module_parameters and maps the Technology key for the CEC
database and the Material key for the Sandia database to a common
list of strings for cell type.
Returns
-------
cell_type: str
"""
_cell_type_dict = {'Multi-c-Si': 'multisi',
'Mono-c-Si': 'monosi',
'Thin Film': 'cigs',
'a-Si/nc': 'asi',
'CIS': 'cigs',
'CIGS': 'cigs',
'1-a-Si': 'asi',
'CdTe': 'cdte',
'a-Si': 'asi',
'2-a-Si': None,
'3-a-Si': None,
'HIT-Si': 'monosi',
'mc-Si': 'multisi',
'c-Si': 'multisi',
'Si-Film': 'asi',
'EFG mc-Si': 'multisi',
'GaAs': None,
'a-Si / mono-Si': 'monosi'}
if 'Technology' in self.module_parameters.keys():
# CEC module parameter set
cell_type = _cell_type_dict[self.module_parameters['Technology']]
elif 'Material' in self.module_parameters.keys():
# Sandia module parameter set
cell_type = _cell_type_dict[self.module_parameters['Material']]
else:
cell_type = None
return cell_type
def singlediode(self, photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth,
ivcurve_pnts=None):
"""Wrapper around the :py:func:`singlediode` function.
Parameters
----------
See pvsystem.singlediode for details
Returns
-------
See pvsystem.singlediode for details
"""
return singlediode(photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth,
ivcurve_pnts=ivcurve_pnts)
def i_from_v(self, resistance_shunt, resistance_series, nNsVth, voltage,
saturation_current, photocurrent):
"""Wrapper around the :py:func:`i_from_v` function.
Parameters
----------
See pvsystem.i_from_v for details
Returns
-------
See pvsystem.i_from_v for details
"""
return i_from_v(resistance_shunt, resistance_series, nNsVth, voltage,
saturation_current, photocurrent)
# inverter now specified by self.inverter_parameters
def snlinverter(self, v_dc, p_dc):
"""Uses :func:`snlinverter` to calculate AC power based on
``self.inverter_parameters`` and the input parameters.
Parameters
----------
See pvsystem.snlinverter for details
Returns
-------
See pvsystem.snlinverter for details
"""
return snlinverter(v_dc, p_dc, self.inverter_parameters)
def adrinverter(self, v_dc, p_dc):
return adrinverter(v_dc, p_dc, self.inverter_parameters)
def scale_voltage_current_power(self, data):
"""
Scales the voltage, current, and power of the DataFrames
returned by :py:func:`singlediode` and :py:func:`sapm`
by `self.modules_per_string` and `self.strings_per_inverter`.
Parameters
----------
data: DataFrame
Must contain columns `'v_mp', 'v_oc', 'i_mp' ,'i_x', 'i_xx',
'i_sc', 'p_mp'`.
Returns
-------
scaled_data: DataFrame
A scaled copy of the input data.
"""
return scale_voltage_current_power(data,
voltage=self.modules_per_string,
current=self.strings_per_inverter)
def pvwatts_dc(self, g_poa_effective, temp_cell):
"""
Calcuates DC power according to the PVWatts model using
:py:func:`pvwatts_dc`, `self.module_parameters['pdc0']`, and
`self.module_parameters['gamma_pdc']`.
See :py:func:`pvwatts_dc` for details.
"""
kwargs = _build_kwargs(['temp_ref'], self.module_parameters)
return pvwatts_dc(g_poa_effective, temp_cell,
self.module_parameters['pdc0'],
self.module_parameters['gamma_pdc'],
**kwargs)
def pvwatts_losses(self):
"""
Calculates DC power losses according the PVwatts model using
:py:func:`pvwatts_losses` and ``self.losses_parameters``.`
See :py:func:`pvwatts_losses` for details.
"""
kwargs = _build_kwargs(['soiling', 'shading', 'snow', 'mismatch',
'wiring', 'connections', 'lid',
'nameplate_rating', 'age', 'availability'],
self.losses_parameters)
return pvwatts_losses(**kwargs)
def pvwatts_ac(self, pdc):
"""
Calculates AC power according to the PVWatts model using
:py:func:`pvwatts_ac`, `self.module_parameters['pdc0']`, and
`eta_inv_nom=self.inverter_parameters['eta_inv_nom']`.
See :py:func:`pvwatts_ac` for details.
"""
kwargs = _build_kwargs(['eta_inv_nom', 'eta_inv_ref'],
self.inverter_parameters)
return pvwatts_ac(pdc, self.inverter_parameters['pdc0'], **kwargs)
def localize(self, location=None, latitude=None, longitude=None,
**kwargs):
"""Creates a LocalizedPVSystem object using this object
and location data. Must supply either location object or
latitude, longitude, and any location kwargs
Parameters
----------
location : None or Location, default None
latitude : None or float, default None
longitude : None or float, default None
**kwargs : see Location
Returns
-------
localized_system : LocalizedPVSystem
"""
if location is None:
location = Location(latitude, longitude, **kwargs)
return LocalizedPVSystem(pvsystem=self, location=location)
class LocalizedPVSystem(PVSystem, Location):
"""
The LocalizedPVSystem class defines a standard set of installed PV
system attributes and modeling functions. This class combines the
attributes and methods of the PVSystem and Location classes.
The LocalizedPVSystem may have bugs due to the difficulty of
robustly implementing multiple inheritance. See
:py:class:`~pvlib.modelchain.ModelChain` for an alternative paradigm
for modeling PV systems at specific locations.
"""
def __init__(self, pvsystem=None, location=None, **kwargs):
new_kwargs = _combine_localized_attributes(
pvsystem=pvsystem,
location=location,
**kwargs,
)
PVSystem.__init__(self, **new_kwargs)
Location.__init__(self, **new_kwargs)
def __repr__(self):
attrs = ['name', 'latitude', 'longitude', 'altitude', 'tz',
'surface_tilt', 'surface_azimuth', 'module', 'inverter',
'albedo', 'racking_model']
return ('LocalizedPVSystem: \n ' + '\n '.join(
('{}: {}'.format(attr, getattr(self, attr)) for attr in attrs)))
def systemdef(meta, surface_tilt, surface_azimuth, albedo, modules_per_string,
strings_per_inverter):
'''
Generates a dict of system parameters used throughout a simulation.
Parameters
----------
meta : dict
meta dict either generated from a TMY file using readtmy2 or
readtmy3, or a dict containing at least the following fields:
=============== ====== ====================
meta field format description
=============== ====== ====================
meta.altitude Float site elevation
meta.latitude Float site latitude
meta.longitude Float site longitude
meta.Name String site name
meta.State String state
meta.TZ Float timezone
=============== ====== ====================
surface_tilt : float or Series
Surface tilt angles in decimal degrees.
The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90)
surface_azimuth : float or Series
Surface azimuth angles in decimal degrees.
The azimuth convention is defined
as degrees east of north
(North=0, South=180, East=90, West=270).
albedo : float or Series
Ground reflectance, typically 0.1-0.4 for surfaces on Earth
(land), may increase over snow, ice, etc. May also be known as
the reflection coefficient. Must be >=0 and <=1.
modules_per_string : int
Number of modules connected in series in a string.
strings_per_inverter : int
Number of strings connected in parallel.
Returns
-------
Result : dict
A dict with the following fields.
* 'surface_tilt'
* 'surface_azimuth'
* 'albedo'
* 'modules_per_string'
* 'strings_per_inverter'
* 'latitude'
* 'longitude'
* 'tz'
* 'name'
* 'altitude'
See also
--------
pvlib.tmy.readtmy3
pvlib.tmy.readtmy2
'''
try:
name = meta['Name']
except KeyError:
name = meta['City']
system = {'surface_tilt': surface_tilt,
'surface_azimuth': surface_azimuth,
'albedo': albedo,
'modules_per_string': modules_per_string,
'strings_per_inverter': strings_per_inverter,
'latitude': meta['latitude'],
'longitude': meta['longitude'],
'tz': meta['TZ'],
'name': name,
'altitude': meta['altitude']}
return system
def calcparams_desoto(effective_irradiance, temp_cell,
alpha_sc, a_ref, I_L_ref, I_o_ref, R_sh_ref, R_s,
EgRef=1.121, dEgdT=-0.0002677,
irrad_ref=1000, temp_ref=25):
'''
Calculates five parameter values for the single diode equation at
effective irradiance and cell temperature using the De Soto et al.
model described in [1]_. The five values returned by calcparams_desoto
can be used by singlediode to calculate an IV curve.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : numeric
The average cell temperature of cells within a module in C.
alpha_sc : float
The short-circuit current temperature coefficient of the
module in units of A/C.
a_ref : float
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at reference
conditions, in units of V.
I_L_ref : float
The light-generated current (or photocurrent) at reference conditions,
in amperes.
I_o_ref : float
The dark or diode reverse saturation current at reference conditions,
in amperes.
R_sh_ref : float
The shunt resistance at reference conditions, in ohms.
R_s : float
The series resistance at reference conditions, in ohms.
EgRef : float
The energy bandgap at reference temperature in units of eV.
1.121 eV for crystalline silicon. EgRef must be >0. For parameters
from the SAM CEC module database, EgRef=1.121 is implicit for all
cell types in the parameter estimation algorithm used by NREL.
dEgdT : float
The temperature dependence of the energy bandgap at reference
conditions in units of 1/K. May be either a scalar value
(e.g. -0.0002677 as in [1]_) or a DataFrame (this may be useful if
dEgdT is a modeled as a function of temperature). For parameters from
the SAM CEC module database, dEgdT=-0.0002677 is implicit for all cell
types in the parameter estimation algorithm used by NREL.
irrad_ref : float (optional, default=1000)
Reference irradiance in W/m^2.
temp_ref : float (optional, default=25)
Reference cell temperature in C.
Returns
-------
Tuple of the following results:
photocurrent : numeric
Light-generated current in amperes
saturation_current : numeric
Diode saturation curent in amperes
resistance_series : float
Series resistance in ohms
resistance_shunt : numeric
Shunt resistance in ohms
nNsVth : numeric
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at
specified effective irradiance and cell temperature.
References
----------
.. [1] <NAME> et al., "Improvement and validation of a model for
photovoltaic array performance", Solar Energy, vol 80, pp. 78-88,
2006.
.. [2] System Advisor Model web page. https://sam.nrel.gov.
.. [3] <NAME>, "An Improved Coefficient Calculator for the California
Energy Commission 6 Parameter Photovoltaic Module Model", Journal of
Solar Energy Engineering, vol 134, 2012.
.. [4] <NAME>, "Semiconductors: Data Handbook, 3rd ed." ISBN
3-540-40488-0
See Also
--------
singlediode
retrieve_sam
Notes
-----
If the reference parameters in the ModuleParameters struct are read
from a database or library of parameters (e.g. System Advisor
Model), it is important to use the same EgRef and dEgdT values that
were used to generate the reference parameters, regardless of the
actual bandgap characteristics of the semiconductor. For example, in
the case of the System Advisor Model library, created as described
in [3], EgRef and dEgdT for all modules were 1.121 and -0.0002677,
respectively.
This table of reference bandgap energies (EgRef), bandgap energy
temperature dependence (dEgdT), and "typical" airmass response (M)
is provided purely as reference to those who may generate their own
reference module parameters (a_ref, IL_ref, I0_ref, etc.) based upon
the various PV semiconductors. Again, we stress the importance of
using identical EgRef and dEgdT when generation reference parameters
and modifying the reference parameters (for irradiance, temperature,
and airmass) per DeSoto's equations.
Crystalline Silicon (Si):
* EgRef = 1.121
* dEgdT = -0.0002677
>>> M = np.polyval([-1.26E-4, 2.816E-3, -0.024459, 0.086257, 0.9181],
... AMa) # doctest: +SKIP
Source: [1]
Cadmium Telluride (CdTe):
* EgRef = 1.475
* dEgdT = -0.0003
>>> M = np.polyval([-2.46E-5, 9.607E-4, -0.0134, 0.0716, 0.9196],
... AMa) # doctest: +SKIP
Source: [4]
Copper Indium diSelenide (CIS):
* EgRef = 1.010
* dEgdT = -0.00011
>>> M = np.polyval([-3.74E-5, 0.00125, -0.01462, 0.0718, 0.9210],
... AMa) # doctest: +SKIP
Source: [4]
Copper Indium Gallium diSelenide (CIGS):
* EgRef = 1.15
* dEgdT = ????
>>> M = np.polyval([-9.07E-5, 0.0022, -0.0202, 0.0652, 0.9417],
... AMa) # doctest: +SKIP
Source: Wikipedia
Gallium Arsenide (GaAs):
* EgRef = 1.424
* dEgdT = -0.000433
* M = unknown
Source: [4]
'''
# test for use of function pre-v0.6.0 API change
if isinstance(a_ref, dict) or \
(isinstance(a_ref, pd.Series) and ('a_ref' in a_ref.keys())):
import warnings
warnings.warn('module_parameters detected as fourth positional'
+ ' argument of calcparams_desoto. calcparams_desoto'
+ ' will require one argument for each module model'
+ ' parameter in v0.7.0 and later', DeprecationWarning)
try:
module_parameters = a_ref
a_ref = module_parameters['a_ref']
I_L_ref = module_parameters['I_L_ref']
I_o_ref = module_parameters['I_o_ref']
R_sh_ref = module_parameters['R_sh_ref']
R_s = module_parameters['R_s']
except Exception as e:
raise e('Module parameters could not be extracted from fourth'
+ ' positional argument of calcparams_desoto. Check that'
+ ' parameters are from the CEC database and/or update'
+ ' your code for the new API for calcparams_desoto')
# Boltzmann constant in eV/K
k = 8.617332478e-05
# reference temperature
Tref_K = temp_ref + 273.15
Tcell_K = temp_cell + 273.15
E_g = EgRef * (1 + dEgdT*(Tcell_K - Tref_K))
nNsVth = a_ref * (Tcell_K / Tref_K)
# In the equation for IL, the single factor effective_irradiance is
# used, in place of the product S*M in [1]. effective_irradiance is
# equivalent to the product of S (irradiance reaching a module's cells) *
# M (spectral adjustment factor) as described in [1].
IL = effective_irradiance / irrad_ref * \
(I_L_ref + alpha_sc * (Tcell_K - Tref_K))
I0 = (I_o_ref * ((Tcell_K / Tref_K) ** 3) *
(np.exp(EgRef / (k*(Tref_K)) - (E_g / (k*(Tcell_K))))))
# Note that the equation for Rsh differs from [1]. In [1] Rsh is given as
# Rsh = Rsh_ref * (S_ref / S) where S is broadband irradiance reaching
# the module's cells. If desired this model behavior can be duplicated
# by applying reflection and soiling losses to broadband plane of array
# irradiance and not applying a spectral loss modifier, i.e.,
# spectral_modifier = 1.0.
# use errstate to silence divide by warning
with np.errstate(divide='ignore'):
Rsh = R_sh_ref * (irrad_ref / effective_irradiance)
Rs = R_s
return IL, I0, Rs, Rsh, nNsVth
def calcparams_cec(effective_irradiance, temp_cell,
alpha_sc, a_ref, I_L_ref, I_o_ref, R_sh_ref, R_s,
Adjust, EgRef=1.121, dEgdT=-0.0002677,
irrad_ref=1000, temp_ref=25):
'''
Calculates five parameter values for the single diode equation at
effective irradiance and cell temperature using the CEC
model described in [1]_. The CEC model differs from the De soto et al.
model [3]_ by the parameter Adjust. The five values returned by
calcparams_cec can be used by singlediode to calculate an IV curve.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : numeric
The average cell temperature of cells within a module in C.
alpha_sc : float
The short-circuit current temperature coefficient of the
module in units of A/C.
a_ref : float
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at reference
conditions, in units of V.
I_L_ref : float
The light-generated current (or photocurrent) at reference conditions,
in amperes.
I_o_ref : float
The dark or diode reverse saturation current at reference conditions,
in amperes.
R_sh_ref : float
The shunt resistance at reference conditions, in ohms.
R_s : float
The series resistance at reference conditions, in ohms.
Adjust : float
The adjustment to the temperature coefficient for short circuit
current, in percent
EgRef : float
The energy bandgap at reference temperature in units of eV.
1.121 eV for crystalline silicon. EgRef must be >0. For parameters
from the SAM CEC module database, EgRef=1.121 is implicit for all
cell types in the parameter estimation algorithm used by NREL.
dEgdT : float
The temperature dependence of the energy bandgap at reference
conditions in units of 1/K. May be either a scalar value
(e.g. -0.0002677 as in [3]) or a DataFrame (this may be useful if
dEgdT is a modeled as a function of temperature). For parameters from
the SAM CEC module database, dEgdT=-0.0002677 is implicit for all cell
types in the parameter estimation algorithm used by NREL.
irrad_ref : float (optional, default=1000)
Reference irradiance in W/m^2.
temp_ref : float (optional, default=25)
Reference cell temperature in C.
Returns
-------
Tuple of the following results:
photocurrent : numeric
Light-generated current in amperes
saturation_current : numeric
Diode saturation curent in amperes
resistance_series : float
Series resistance in ohms
resistance_shunt : numeric
Shunt resistance in ohms
nNsVth : numeric
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at
specified effective irradiance and cell temperature.
References
----------
.. [1] <NAME>, "An Improved Coefficient Calculator for the California
Energy Commission 6 Parameter Photovoltaic Module Model", Journal of
Solar Energy Engineering, vol 134, 2012.
.. [2] System Advisor Model web page. https://sam.nrel.gov.
.. [3] <NAME> et al., "Improvement and validation of a model for
photovoltaic array performance", Solar Energy, vol 80, pp. 78-88,
2006.
See Also
--------
calcparams_desoto
singlediode
retrieve_sam
'''
# pass adjusted temperature coefficient to desoto
return calcparams_desoto(effective_irradiance, temp_cell,
alpha_sc*(1.0 - Adjust/100),
a_ref, I_L_ref, I_o_ref,
R_sh_ref, R_s,
EgRef=1.121, dEgdT=-0.0002677,
irrad_ref=1000, temp_ref=25)
def calcparams_pvsyst(effective_irradiance, temp_cell,
alpha_sc, gamma_ref, mu_gamma,
I_L_ref, I_o_ref,
R_sh_ref, R_sh_0, R_s,
cells_in_series,
R_sh_exp=5.5,
EgRef=1.121,
irrad_ref=1000, temp_ref=25):
'''
Calculates five parameter values for the single diode equation at
effective irradiance and cell temperature using the PVsyst v6
model described in [1]_, [2]_, [3]_. The five values returned by
calcparams_pvsyst can be used by singlediode to calculate an IV curve.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : numeric
The average cell temperature of cells within a module in C.
alpha_sc : float
The short-circuit current temperature coefficient of the
module in units of A/C.
gamma_ref : float
The diode ideality factor
mu_gamma : float
The temperature coefficient for the diode ideality factor, 1/K
I_L_ref : float
The light-generated current (or photocurrent) at reference conditions,
in amperes.
I_o_ref : float
The dark or diode reverse saturation current at reference conditions,
in amperes.
R_sh_ref : float
The shunt resistance at reference conditions, in ohms.
R_sh_0 : float
The shunt resistance at zero irradiance conditions, in ohms.
R_s : float
The series resistance at reference conditions, in ohms.
cells_in_series : integer
The number of cells connected in series.
R_sh_exp : float
The exponent in the equation for shunt resistance, unitless. Defaults
to 5.5.
EgRef : float
The energy bandgap at reference temperature in units of eV.
1.121 eV for crystalline silicon. EgRef must be >0.
irrad_ref : float (optional, default=1000)
Reference irradiance in W/m^2.
temp_ref : float (optional, default=25)
Reference cell temperature in C.
Returns
-------
Tuple of the following results:
photocurrent : numeric
Light-generated current in amperes
saturation_current : numeric
Diode saturation current in amperes
resistance_series : float
Series resistance in ohms
resistance_shunt : numeric
Shunt resistance in ohms
nNsVth : numeric
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at
specified effective irradiance and cell temperature.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, Modeling the Irradiance and
Temperature Dependence of Photovoltaic Modules in PVsyst,
IEEE Journal of Photovoltaics v5(1), January 2015.
.. [2] <NAME>, PV modules modelling, Presentation at the 2nd PV
Performance Modeling Workshop, Santa Clara, CA, May 2013
.. [3] <NAME>, <NAME>, Performance Assessment of a Simulation Model
for PV modules of any available technology, 25th European Photovoltaic
Solar Energy Conference, Valencia, Spain, Sept. 2010
See Also
--------
calcparams_desoto
singlediode
'''
# Boltzmann constant in J/K
k = 1.38064852e-23
# elementary charge in coulomb
q = 1.6021766e-19
# reference temperature
Tref_K = temp_ref + 273.15
Tcell_K = temp_cell + 273.15
gamma = gamma_ref + mu_gamma * (Tcell_K - Tref_K)
nNsVth = gamma * k / q * cells_in_series * Tcell_K
IL = effective_irradiance / irrad_ref * \
(I_L_ref + alpha_sc * (Tcell_K - Tref_K))
I0 = I_o_ref * ((Tcell_K / Tref_K) ** 3) * \
(np.exp((q * EgRef) / (k * gamma) * (1 / Tref_K - 1 / Tcell_K)))
Rsh_tmp = \
(R_sh_ref - R_sh_0 * np.exp(-R_sh_exp)) / (1.0 - np.exp(-R_sh_exp))
Rsh_base = np.maximum(0.0, Rsh_tmp)
Rsh = Rsh_base + (R_sh_0 - Rsh_base) * \
np.exp(-R_sh_exp * effective_irradiance / irrad_ref)
Rs = R_s
return IL, I0, Rs, Rsh, nNsVth
def retrieve_sam(name=None, path=None):
'''
Retrieve latest module and inverter info from a local file or the
SAM website.
This function will retrieve either:
* CEC module database
* Sandia Module database
* CEC Inverter database
* Anton Driesse Inverter database
and return it as a pandas DataFrame.
Parameters
----------
name : None or string, default None
Name can be one of:
* 'CECMod' - returns the CEC module database
* 'CECInverter' - returns the CEC Inverter database
* 'SandiaInverter' - returns the CEC Inverter database
(CEC is only current inverter db available; tag kept for
backwards compatibility)
* 'SandiaMod' - returns the Sandia Module database
* 'ADRInverter' - returns the ADR Inverter database
path : None or string, default None
Path to the SAM file. May also be a URL.
Returns
-------
samfile : DataFrame
A DataFrame containing all the elements of the desired database.
Each column represents a module or inverter, and a specific
dataset can be retrieved by the command
Raises
------
ValueError
If no name or path is provided.
Notes
-----
Files available at
https://github.com/NREL/SAM/tree/develop/deploy/libraries
Documentation for module and inverter data sets:
https://sam.nrel.gov/photovoltaic/pv-sub-page-2.html
Examples
--------
>>> from pvlib import pvsystem
>>> invdb = pvsystem.retrieve_sam('CECInverter')
>>> inverter = invdb.AE_Solar_Energy__AE6_0__277V__277V__CEC_2012_
>>> inverter
Vac 277.000000
Paco 6000.000000
Pdco 6165.670000
Vdco 361.123000
Pso 36.792300
C0 -0.000002
C1 -0.000047
C2 -0.001861
C3 0.000721
Pnt 0.070000
Vdcmax 600.000000
Idcmax 32.000000
Mppt_low 200.000000
Mppt_high 500.000000
Name: AE_Solar_Energy__AE6_0__277V__277V__CEC_2012_, dtype: float64
'''
if name is not None:
name = name.lower()
data_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'data')
if name == 'cecmod':
csvdata = os.path.join(
data_path, 'sam-library-cec-modules-2019-03-05.csv')
elif name == 'sandiamod':
csvdata = os.path.join(
data_path, 'sam-library-sandia-modules-2015-6-30.csv')
elif name == 'adrinverter':
csvdata = os.path.join(data_path, 'adr-library-2013-10-01.csv')
elif name in ['cecinverter', 'sandiainverter']:
# Allowing either, to provide for old code,
# while aligning with current expectations
csvdata = os.path.join(
data_path, 'sam-library-cec-inverters-2019-03-05.csv')
else:
raise ValueError('invalid name {}'.format(name))
elif path is not None:
if path.startswith('http'):
response = urlopen(path)
csvdata = io.StringIO(response.read().decode(errors='ignore'))
else:
csvdata = path
elif name is None and path is None:
raise ValueError("A name or path must be provided!")
return _parse_raw_sam_df(csvdata)
def _normalize_sam_product_names(names):
'''
Replace special characters within the product names to make them more
suitable for use as Dataframe column names.
'''
# Contributed by <NAME> (@adriesse), PV Performance Labs. July, 2019
import warnings
BAD_CHARS = ' -.()[]:+/",'
GOOD_CHARS = '____________'
mapping = str.maketrans(BAD_CHARS, GOOD_CHARS)
names = pd.Series(data=names)
norm_names = names.str.translate(mapping)
n_duplicates = names.duplicated().sum()
if n_duplicates > 0:
warnings.warn('Original names contain %d duplicate(s).' % n_duplicates)
n_duplicates = norm_names.duplicated().sum()
if n_duplicates > 0:
warnings.warn('Normalized names contain %d duplicate(s).' % n_duplicates)
return norm_names.values
def _parse_raw_sam_df(csvdata):
df = pd.read_csv(csvdata, index_col=0, skiprows=[1, 2])
df.columns = df.columns.str.replace(' ', '_')
df.index = _normalize_sam_product_names(df.index)
df = df.transpose()
if 'ADRCoefficients' in df.index:
ad_ce = 'ADRCoefficients'
# for each inverter, parses a string of coefficients like
# ' 1.33, 2.11, 3.12' into a list containing floats:
# [1.33, 2.11, 3.12]
df.loc[ad_ce] = df.loc[ad_ce].map(lambda x: list(
map(float, x.strip(' []').split())))
return df
def sapm(effective_irradiance, temp_cell, module):
'''
The Sandia PV Array Performance Model (SAPM) generates 5 points on a
PV module's I-V curve (Voc, Isc, Ix, Ixx, Vmp/Imp) according to
SAND2004-3535. Assumes a reference cell temperature of 25 C.
Parameters
----------
effective_irradiance : numeric
Irradiance reaching the module's cells, after reflections and
adjustment for spectrum. [W/m2]
temp_cell : numeric
Cell temperature [C].
module : dict-like
A dict or Series defining the SAPM parameters. See the notes section
for more details.
Returns
-------
A DataFrame with the columns:
* i_sc : Short-circuit current (A)
* i_mp : Current at the maximum-power point (A)
* v_oc : Open-circuit voltage (V)
* v_mp : Voltage at maximum-power point (V)
* p_mp : Power at maximum-power point (W)
* i_x : Current at module V = 0.5Voc, defines 4th point on I-V
curve for modeling curve shape
* i_xx : Current at module V = 0.5(Voc+Vmp), defines 5th point on
I-V curve for modeling curve shape
Notes
-----
The SAPM parameters which are required in ``module`` are
listed in the following table.
The Sandia module database contains parameter values for a limited set
of modules. The CEC module database does not contain these parameters.
Both databases can be accessed using :py:func:`retrieve_sam`.
================ ========================================================
Key Description
================ ========================================================
A0-A4 The airmass coefficients used in calculating
effective irradiance
B0-B5 The angle of incidence coefficients used in calculating
effective irradiance
C0-C7 The empirically determined coefficients relating
Imp, Vmp, Ix, and Ixx to effective irradiance
Isco Short circuit current at reference condition (amps)
Impo Maximum power current at reference condition (amps)
Voco Open circuit voltage at reference condition (amps)
Vmpo Maximum power voltage at reference condition (amps)
Aisc Short circuit current temperature coefficient at
reference condition (1/C)
Aimp Maximum power current temperature coefficient at
reference condition (1/C)
Bvoco Open circuit voltage temperature coefficient at
reference condition (V/C)
Mbvoc Coefficient providing the irradiance dependence for the
BetaVoc temperature coefficient at reference irradiance
(V/C)
Bvmpo Maximum power voltage temperature coefficient at
reference condition
Mbvmp Coefficient providing the irradiance dependence for the
BetaVmp temperature coefficient at reference irradiance
(V/C)
N Empirically determined "diode factor" (dimensionless)
Cells_in_Series Number of cells in series in a module's cell string(s)
IXO Ix at reference conditions
IXXO Ixx at reference conditions
FD Fraction of diffuse irradiance used by module
================ ========================================================
References
----------
.. [1] <NAME> al, 2004, "Sandia Photovoltaic Array Performance
Model", SAND Report 3535, Sandia National Laboratories, Albuquerque,
NM.
See Also
--------
retrieve_sam
temperature.sapm_cell
temperature.sapm_module
'''
# TODO: someday, change temp_ref and irrad_ref to reference_temperature and
# reference_irradiance and expose
temp_ref = 25
irrad_ref = 1000
# TODO: remove this warning in v0.8 after deprecation period for change in
# effective irradiance units, made in v0.7
with np.errstate(invalid='ignore'): # turn off warning for NaN
ee = np.asarray(effective_irradiance)
ee_gt0 = ee[ee > 0.0]
if ee_gt0.size > 0 and np.all(ee_gt0 < 2.0):
import warnings
msg = 'effective_irradiance inputs appear to be in suns. Units ' \
'changed in v0.7 from suns to W/m2'
warnings.warn(msg, RuntimeWarning)
q = 1.60218e-19 # Elementary charge in units of coulombs
kb = 1.38066e-23 # Boltzmann's constant in units of J/K
# avoid problem with integer input
Ee = np.array(effective_irradiance, dtype='float64') / irrad_ref
# set up masking for 0, positive, and nan inputs
Ee_gt_0 = np.full_like(Ee, False, dtype='bool')
Ee_eq_0 = np.full_like(Ee, False, dtype='bool')
notnan = ~np.isnan(Ee)
np.greater(Ee, 0, where=notnan, out=Ee_gt_0)
np.equal(Ee, 0, where=notnan, out=Ee_eq_0)
Bvmpo = module['Bvmpo'] + module['Mbvmp']*(1 - Ee)
Bvoco = module['Bvoco'] + module['Mbvoc']*(1 - Ee)
delta = module['N'] * kb * (temp_cell + 273.15) / q
# avoid repeated computation
logEe = np.full_like(Ee, np.nan)
np.log(Ee, where=Ee_gt_0, out=logEe)
logEe = np.where(Ee_eq_0, -np.inf, logEe)
# avoid repeated __getitem__
cells_in_series = module['Cells_in_Series']
out = OrderedDict()
out['i_sc'] = (
module['Isco'] * Ee * (1 + module['Aisc']*(temp_cell - temp_ref)))
out['i_mp'] = (
module['Impo'] * (module['C0']*Ee + module['C1']*(Ee**2)) *
(1 + module['Aimp']*(temp_cell - temp_ref)))
out['v_oc'] = np.maximum(0, (
module['Voco'] + cells_in_series * delta * logEe +
Bvoco*(temp_cell - temp_ref)))
out['v_mp'] = np.maximum(0, (
module['Vmpo'] +
module['C2'] * cells_in_series * delta * logEe +
module['C3'] * cells_in_series * ((delta * logEe) ** 2) +
Bvmpo*(temp_cell - temp_ref)))
out['p_mp'] = out['i_mp'] * out['v_mp']
out['i_x'] = (
module['IXO'] * (module['C4']*Ee + module['C5']*(Ee**2)) *
(1 + module['Aisc']*(temp_cell - temp_ref)))
# the Ixx calculation in King 2004 has a typo (mixes up Aisc and Aimp)
out['i_xx'] = (
module['IXXO'] * (module['C6']*Ee + module['C7']*(Ee**2)) *
(1 + module['Aisc']*(temp_cell - temp_ref)))
if isinstance(out['i_sc'], pd.Series):
out = pd.DataFrame(out)
return out
def _sapm_celltemp_translator(*args, **kwargs):
# TODO: remove this function after deprecation period for sapm_celltemp
new_kwargs = {}
# convert position arguments to kwargs
old_arg_list = ['poa_global', 'wind_speed', 'temp_air', 'model']
for pos in range(len(args)):
new_kwargs[old_arg_list[pos]] = args[pos]
# determine value for new kwarg 'model'
try:
param_set = new_kwargs['model']
new_kwargs.pop('model') # model is not a new kwarg
except KeyError:
# 'model' not in positional arguments, check kwargs
try:
param_set = kwargs['model']
kwargs.pop('model')
except KeyError:
# 'model' not in kwargs, use old default value
param_set = 'open_rack_glass_glass'
if type(param_set) is list:
new_kwargs.update({'a': param_set[0],
'b': param_set[1],
'deltaT': param_set[2]})
elif type(param_set) is dict:
new_kwargs.update(param_set)
else: # string
params = temperature._temperature_model_params('sapm', param_set)
new_kwargs.update(params)
new_kwargs.update(kwargs) # kwargs with unchanged names
new_kwargs['irrad_ref'] = 1000 # default for new kwarg
# convert old positional arguments to named kwargs
return temperature.sapm_cell(**new_kwargs)
sapm_celltemp = deprecated('0.7', alternative='temperature.sapm_cell',
name='sapm_celltemp', removal='0.8',
addendum='Note that the arguments and argument '
'order for temperature.sapm_cell are different '
'than for sapm_celltemp')(_sapm_celltemp_translator)
def _pvsyst_celltemp_translator(*args, **kwargs):
# TODO: remove this function after deprecation period for pvsyst_celltemp
new_kwargs = {}
# convert position arguments to kwargs
old_arg_list = ['poa_global', 'temp_air', 'wind_speed', 'eta_m',
'alpha_absorption', 'model_params']
for pos in range(len(args)):
new_kwargs[old_arg_list[pos]] = args[pos]
# determine value for new kwarg 'model'
try:
param_set = new_kwargs['model_params']
new_kwargs.pop('model_params') # model_params is not a new kwarg
except KeyError:
# 'model_params' not in positional arguments, check kwargs
try:
param_set = kwargs['model_params']
kwargs.pop('model_params')
except KeyError:
# 'model_params' not in kwargs, use old default value
param_set = 'freestanding'
if type(param_set) in (list, tuple):
new_kwargs.update({'u_c': param_set[0],
'u_v': param_set[1]})
else: # string
params = temperature._temperature_model_params('pvsyst', param_set)
new_kwargs.update(params)
new_kwargs.update(kwargs) # kwargs with unchanged names
# convert old positional arguments to named kwargs
return temperature.pvsyst_cell(**new_kwargs)
pvsyst_celltemp = deprecated(
'0.7', alternative='temperature.pvsyst_cell', name='pvsyst_celltemp',
removal='0.8', addendum='Note that the argument names for '
'temperature.pvsyst_cell are different than '
'for pvsyst_celltemp')(_pvsyst_celltemp_translator)
def sapm_spectral_loss(airmass_absolute, module):
"""
Calculates the SAPM spectral loss coefficient, F1.
Parameters
----------
airmass_absolute : numeric
Absolute airmass
module : dict-like
A dict, Series, or DataFrame defining the SAPM performance
parameters. See the :py:func:`sapm` notes section for more
details.
Returns
-------
F1 : numeric
The SAPM spectral loss coefficient.
Notes
-----
nan airmass values will result in 0 output.
"""
am_coeff = [module['A4'], module['A3'], module['A2'], module['A1'],
module['A0']]
spectral_loss = np.polyval(am_coeff, airmass_absolute)
spectral_loss = np.where(np.isnan(spectral_loss), 0, spectral_loss)
spectral_loss = np.maximum(0, spectral_loss)
if isinstance(airmass_absolute, pd.Series):
spectral_loss = pd.Series(spectral_loss, airmass_absolute.index)
return spectral_loss
def sapm_effective_irradiance(poa_direct, poa_diffuse, airmass_absolute, aoi,
module):
r"""
Calculates the SAPM effective irradiance using the SAPM spectral
loss and SAPM angle of incidence loss functions.
Parameters
----------
poa_direct : numeric
The direct irradiance incident upon the module. [W/m2]
poa_diffuse : numeric
The diffuse irradiance incident on module. [W/m2]
airmass_absolute : numeric
Absolute airmass. [unitless]
aoi : numeric
Angle of incidence. [degrees]
module : dict-like
A dict, Series, or DataFrame defining the SAPM performance
parameters. See the :py:func:`sapm` notes section for more
details.
Returns
-------
effective_irradiance : numeric
Effective irradiance accounting for reflections and spectral content.
[W/m2]
Notes
-----
The SAPM model for effective irradiance [1]_ translates broadband direct
and diffuse irradiance on the plane of array to the irradiance absorbed by
a module's cells.
The model is
.. math::
`Ee = f_1(AM_a) (E_b f_2(AOI) + f_d E_d)`
where :math:`Ee` is effective irradiance (W/m2), :math:`f_1` is a fourth
degree polynomial in air mass :math:`AM_a`, :math:`E_b` is beam (direct)
irradiance on the plane of array, :math:`E_d` is diffuse irradiance on the
plane of array, :math:`f_2` is a fifth degree polynomial in the angle of
incidence :math:`AOI`, and :math:`f_d` is the fraction of diffuse
irradiance on the plane of array that is not reflected away.
References
----------
.. [1] <NAME> et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
See also
--------
pvlib.iam.sapm
pvlib.pvsystem.sapm_spectral_loss
pvlib.pvsystem.sapm
"""
F1 = sapm_spectral_loss(airmass_absolute, module)
F2 = iam.sapm(aoi, module)
Ee = F1 * (poa_direct * F2 + module['FD'] * poa_diffuse)
return Ee
def singlediode(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts=None,
method='lambertw'):
"""
Solve the single-diode model to obtain a photovoltaic IV curve.
Singlediode solves the single diode equation [1]_
.. math::
I = IL - I0*[exp((V+I*Rs)/(nNsVth))-1] - (V + I*Rs)/Rsh
for ``I`` and ``V`` when given ``IL, I0, Rs, Rsh,`` and ``nNsVth
(nNsVth = n*Ns*Vth)`` which are described later. Returns a DataFrame
which contains the 5 points on the I-V curve specified in
SAND2004-3535 [3]_. If all IL, I0, Rs, Rsh, and nNsVth are scalar, a
single curve will be returned, if any are Series (of the same
length), multiple IV curves will be calculated.
The input parameters can be calculated using calcparams_desoto from
meteorological data.
Parameters
----------
photocurrent : numeric
Light-generated current (photocurrent) in amperes under desired
IV curve conditions. Often abbreviated ``I_L``.
0 <= photocurrent
saturation_current : numeric
Diode saturation current in amperes under desired IV curve
conditions. Often abbreviated ``I_0``.
0 < saturation_current
resistance_series : numeric
Series resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rs``.
0 <= resistance_series < numpy.inf
resistance_shunt : numeric
Shunt resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rsh``.
0 < resistance_shunt <= numpy.inf
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
(n), 2) the number of cells in series (Ns), and 3) the cell
thermal voltage under the desired IV curve conditions (Vth). The
thermal voltage of the cell (in volts) may be calculated as
``k*temp_cell/q``, where k is Boltzmann's constant (J/K),
temp_cell is the temperature of the p-n junction in Kelvin, and
q is the charge of an electron (coulombs).
0 < nNsVth
ivcurve_pnts : None or int, default None
Number of points in the desired IV curve. If None or 0, no
IV curves will be produced.
method : str, default 'lambertw'
Determines the method used to calculate points on the IV curve. The
options are ``'lambertw'``, ``'newton'``, or ``'brentq'``.
Returns
-------
OrderedDict or DataFrame
The returned dict-like object always contains the keys/columns:
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
If ivcurve_pnts is greater than 0, the output dictionary will also
include the keys:
* i - IV curve current in amperes.
* v - IV curve voltage in volts.
The output will be an OrderedDict if photocurrent is a scalar,
array, or ivcurve_pnts is not None.
The output will be a DataFrame if photocurrent is a Series and
ivcurve_pnts is None.
Notes
-----
If the method is ``'lambertw'`` then the solution employed to solve the
implicit diode equation utilizes the Lambert W function to obtain an
explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_.
If the method is ``'newton'`` then the root-finding Newton-Raphson method
is used. It should be safe for well behaved IV-curves, but the ``'brentq'``
method is recommended for reliability.
If the method is ``'brentq'`` then Brent's bisection search method is used
that guarantees convergence by bounding the voltage between zero and
open-circuit.
If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts``
are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to
calculate the points on the IV curve points at diode voltages from zero to
open-circuit voltage with a log spacing that gets closer as voltage
increases. If the method is ``'lambertw'`` then the calculated points on
the IV curve are linearly spaced.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, "Applied Photovoltaics" ISBN
0 86758 909 4
.. [2] <NAME>, <NAME>, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
.. [3] <NAME> et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
.. [4] "Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988)
https://doi.org/10.1016/0379-6787(88)90059-2
See also
--------
sapm
calcparams_desoto
pvlib.singlediode.bishop88
"""
# Calculate points on the IV curve using the LambertW solution to the
# single diode equation
if method.lower() == 'lambertw':
out = _singlediode._lambertw(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts
)
i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7]
if ivcurve_pnts:
ivcurve_i, ivcurve_v = out[7:]
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) # collect args
v_oc = _singlediode.bishop88_v_from_i(
0.0, *args, method=method.lower()
)
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
*args, method=method.lower()
)
i_sc = _singlediode.bishop88_i_from_v(
0.0, *args, method=method.lower()
)
i_x = _singlediode.bishop88_i_from_v(
v_oc / 2.0, *args, method=method.lower()
)
i_xx = _singlediode.bishop88_i_from_v(
(v_oc + v_mp) / 2.0, *args, method=method.lower()
)
# calculate the IV curve if requested using bishop88
if ivcurve_pnts:
vd = v_oc * (
(11.0 - np.logspace(np.log10(11.0), 0.0,
ivcurve_pnts)) / 10.0
)
ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args)
out = OrderedDict()
out['i_sc'] = i_sc
out['v_oc'] = v_oc
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
out['i_x'] = i_x
out['i_xx'] = i_xx
if ivcurve_pnts:
out['v'] = ivcurve_v
out['i'] = ivcurve_i
if isinstance(photocurrent, pd.Series) and not ivcurve_pnts:
out = pd.DataFrame(out, index=photocurrent.index)
return out
def max_power_point(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, d2mutau=0, NsVbi=np.Inf,
method='brentq'):
"""
Given the single diode equation coefficients, calculates the maximum power
point (MPP).
Parameters
----------
photocurrent : numeric
photo-generated current [A]
saturation_current : numeric
diode reverse saturation current [A]
resistance_series : numeric
series resitance [ohms]
resistance_shunt : numeric
shunt resitance [ohms]
nNsVth : numeric
product of thermal voltage ``Vth`` [V], diode ideality factor ``n``,
and number of serices cells ``Ns``
d2mutau : numeric, default 0
PVsyst parameter for cadmium-telluride (CdTe) and amorphous-silicon
(a-Si) modules that accounts for recombination current in the
intrinsic layer. The value is the ratio of intrinsic layer thickness
squared :math:`d^2` to the diffusion length of charge carriers
:math:`\\mu \\tau`. [V]
NsVbi : numeric, default np.inf
PVsyst parameter for cadmium-telluride (CdTe) and amorphous-silicon
(a-Si) modules that is the product of the PV module number of series
cells ``Ns`` and the builtin voltage ``Vbi`` of the intrinsic layer.
[V].
method : str
either ``'newton'`` or ``'brentq'``
Returns
-------
OrderedDict or pandas.Datafrane
``(i_mp, v_mp, p_mp)``
Notes
-----
Use this function when you only want to find the maximum power point. Use
:func:`singlediode` when you need to find additional points on the IV
curve. This function uses Brent's method by default because it is
guaranteed to converge.
"""
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, d2mutau=0, NsVbi=np.Inf,
method=method.lower()
)
if isinstance(photocurrent, pd.Series):
ivp = {'i_mp': i_mp, 'v_mp': v_mp, 'p_mp': p_mp}
out = pd.DataFrame(ivp, index=photocurrent.index)
else:
out = OrderedDict()
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
return out
def v_from_i(resistance_shunt, resistance_series, nNsVth, current,
saturation_current, photocurrent, method='lambertw'):
'''
Device voltage at the given device current for the single diode model.
Uses the single diode model (SDM) as described in, e.g.,
Jain and Kapoor 2004 [1]_.
The solution is per Eq 3 of [1]_ except when resistance_shunt=numpy.inf,
in which case the explict solution for voltage is used.
Ideal device parameters are specified by resistance_shunt=np.inf and
resistance_series=0.
Inputs to this function can include scalars and pandas.Series, but it is
the caller's responsibility to ensure that the arguments are all float64
and within the proper ranges.
Parameters
----------
resistance_shunt : numeric
Shunt resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rsh``.
0 < resistance_shunt <= numpy.inf
resistance_series : numeric
Series resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rs``.
0 <= resistance_series < numpy.inf
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
(n), 2) the number of cells in series (Ns), and 3) the cell
thermal voltage under the desired IV curve conditions (Vth). The
thermal voltage of the cell (in volts) may be calculated as
``k*temp_cell/q``, where k is Boltzmann's constant (J/K),
temp_cell is the temperature of the p-n junction in Kelvin, and
q is the charge of an electron (coulombs).
0 < nNsVth
current : numeric
The current in amperes under desired IV curve conditions.
saturation_current : numeric
Diode saturation current in amperes under desired IV curve
conditions. Often abbreviated ``I_0``.
0 < saturation_current
photocurrent : numeric
Light-generated current (photocurrent) in amperes under desired
IV curve conditions. Often abbreviated ``I_L``.
0 <= photocurrent
method : str
Method to use: ``'lambertw'``, ``'newton'``, or ``'brentq'``. *Note*:
``'brentq'`` is limited to 1st quadrant only.
Returns
-------
current : np.ndarray or scalar
References
----------
.. [1] <NAME>, <NAME>, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
'''
if method.lower() == 'lambertw':
return _singlediode._lambertw_v_from_i(
resistance_shunt, resistance_series, nNsVth, current,
saturation_current, photocurrent
)
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (current, photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth)
V = _singlediode.bishop88_v_from_i(*args, method=method.lower())
# find the right size and shape for returns
size, shape = _singlediode._get_size_and_shape(args)
if size <= 1:
if shape is not None:
V = np.tile(V, shape)
if np.isnan(V).any() and size <= 1:
V = np.repeat(V, size)
if shape is not None:
V = V.reshape(shape)
return V
def i_from_v(resistance_shunt, resistance_series, nNsVth, voltage,
saturation_current, photocurrent, method='lambertw'):
'''
Device current at the given device voltage for the single diode model.
Uses the single diode model (SDM) as described in, e.g.,
Jain and Kapoor 2004 [1]_.
The solution is per Eq 2 of [1] except when resistance_series=0,
in which case the explict solution for current is used.
Ideal device parameters are specified by resistance_shunt=np.inf and
resistance_series=0.
Inputs to this function can include scalars and pandas.Series, but it is
the caller's responsibility to ensure that the arguments are all float64
and within the proper ranges.
Parameters
----------
resistance_shunt : numeric
Shunt resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rsh``.
0 < resistance_shunt <= numpy.inf
resistance_series : numeric
Series resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rs``.
0 <= resistance_series < numpy.inf
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
(n), 2) the number of cells in series (Ns), and 3) the cell
thermal voltage under the desired IV curve conditions (Vth). The
thermal voltage of the cell (in volts) may be calculated as
``k*temp_cell/q``, where k is Boltzmann's constant (J/K),
temp_cell is the temperature of the p-n junction in Kelvin, and
q is the charge of an electron (coulombs).
0 < nNsVth
voltage : numeric
The voltage in Volts under desired IV curve conditions.
saturation_current : numeric
Diode saturation current in amperes under desired IV curve
conditions. Often abbreviated ``I_0``.
0 < saturation_current
photocurrent : numeric
Light-generated current (photocurrent) in amperes under desired
IV curve conditions. Often abbreviated ``I_L``.
0 <= photocurrent
method : str
Method to use: ``'lambertw'``, ``'newton'``, or ``'brentq'``. *Note*:
``'brentq'`` is limited to 1st quadrant only.
Returns
-------
current : np.ndarray or scalar
References
----------
.. [1] <NAME>, <NAME>, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
'''
if method.lower() == 'lambertw':
return _singlediode._lambertw_i_from_v(
resistance_shunt, resistance_series, nNsVth, voltage,
saturation_current, photocurrent
)
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (voltage, photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth)
I = _singlediode.bishop88_i_from_v(*args, method=method.lower())
# find the right size and shape for returns
size, shape = _singlediode._get_size_and_shape(args)
if size <= 1:
if shape is not None:
I = np.tile(I, shape)
if np.isnan(I).any() and size <= 1:
I = np.repeat(I, size)
if shape is not None:
I = I.reshape(shape)
return I
def snlinverter(v_dc, p_dc, inverter):
r'''
Converts DC power and voltage to AC power using Sandia's
Grid-Connected PV Inverter model.
Determines the AC power output of an inverter given the DC voltage,
DC power, and appropriate Sandia Grid-Connected Photovoltaic
Inverter Model parameters. The output, ac_power, is clipped at the
maximum power output, and gives a negative power during low-input
power conditions, but does NOT account for maximum power point
tracking voltage windows nor maximum current or voltage limits on
the inverter.
Parameters
----------
v_dc : numeric
DC voltages, in volts, which are provided as input to the
inverter. Vdc must be >= 0.
p_dc : numeric
A scalar or DataFrame of DC powers, in watts, which are provided
as input to the inverter. Pdc must be >= 0.
inverter : dict-like
A dict-like object defining the inverter to be used, giving the
inverter performance parameters according to the Sandia
Grid-Connected Photovoltaic Inverter Model (SAND 2007-5036) [1]_.
A set of inverter performance parameters are provided with
pvlib, or may be generated from a System Advisor Model (SAM) [2]_
library using retrievesam. See Notes for required keys.
Returns
-------
ac_power : numeric
Modeled AC power output given the input DC voltage, Vdc, and
input DC power, Pdc. When ac_power would be greater than Pac0,
it is set to Pac0 to represent inverter "clipping". When
ac_power would be less than Ps0 (startup power required), then
ac_power is set to -1*abs(Pnt) to represent nightly power
losses. ac_power is not adjusted for maximum power point
tracking (MPPT) voltage windows or maximum current limits of the
inverter.
Notes
-----
Required inverter keys are:
====== ============================================================
Column Description
====== ============================================================
Pac0 AC-power output from inverter based on input power
and voltage (W)
Pdc0 DC-power input to inverter, typically assumed to be equal
to the PV array maximum power (W)
Vdc0 DC-voltage level at which the AC-power rating is achieved
at the reference operating condition (V)
Ps0 DC-power required to start the inversion process, or
self-consumption by inverter, strongly influences inverter
efficiency at low power levels (W)
C0 Parameter defining the curvature (parabolic) of the
relationship between ac-power and dc-power at the reference
operating condition, default value of zero gives a
linear relationship (1/W)
C1 Empirical coefficient allowing Pdco to vary linearly
with dc-voltage input, default value is zero (1/V)
C2 Empirical coefficient allowing Pso to vary linearly with
dc-voltage input, default value is zero (1/V)
C3 Empirical coefficient allowing Co to vary linearly with
dc-voltage input, default value is zero (1/V)
Pnt AC-power consumed by inverter at night (night tare) to
maintain circuitry required to sense PV array voltage (W)
====== ============================================================
References
----------
.. [1] SAND2007-5036, "Performance Model for Grid-Connected
Photovoltaic Inverters by <NAME>, <NAME>, <NAME>, <NAME>
.. [2] System Advisor Model web page. https://sam.nrel.gov.
See also
--------
sapm
singlediode
'''
Paco = inverter['Paco']
Pdco = inverter['Pdco']
Vdco = inverter['Vdco']
Pso = inverter['Pso']
C0 = inverter['C0']
C1 = inverter['C1']
C2 = inverter['C2']
C3 = inverter['C3']
Pnt = inverter['Pnt']
A = Pdco * (1 + C1*(v_dc - Vdco))
B = Pso * (1 + C2*(v_dc - Vdco))
C = C0 * (1 + C3*(v_dc - Vdco))
ac_power = (Paco/(A-B) - C*(A-B)) * (p_dc-B) + C*((p_dc-B)**2)
ac_power = np.minimum(Paco, ac_power)
ac_power = np.where(p_dc < Pso, -1.0 * abs(Pnt), ac_power)
if isinstance(p_dc, pd.Series):
ac_power = pd.Series(ac_power, index=p_dc.index)
return ac_power
def adrinverter(v_dc, p_dc, inverter, vtol=0.10):
r'''
Converts DC power and voltage to AC power using <NAME>'s
Grid-Connected PV Inverter efficiency model
Parameters
----------
v_dc : numeric
A scalar or pandas series of DC voltages, in volts, which are provided
as input to the inverter. If Vdc and Pdc are vectors, they must be
of the same size. v_dc must be >= 0. (V)
p_dc : numeric
A scalar or pandas series of DC powers, in watts, which are provided
as input to the inverter. If Vdc and Pdc are vectors, they must be
of the same size. p_dc must be >= 0. (W)
inverter : dict-like
A dict-like object defining the inverter to be used, giving the
inverter performance parameters according to the model
developed by <NAME> [1].
A set of inverter performance parameters may be loaded from the
supplied data table using retrievesam.
See Notes for required keys.
vtol : numeric, default 0.1
A unit-less fraction that determines how far the efficiency model is
allowed to extrapolate beyond the inverter's normal input voltage
operating range. 0.0 <= vtol <= 1.0
Returns
-------
ac_power : numeric
A numpy array or pandas series of modeled AC power output given the
input DC voltage, v_dc, and input DC power, p_dc. When ac_power would
be greater than pac_max, it is set to p_max to represent inverter
"clipping". When ac_power would be less than -p_nt (energy consumed
rather than produced) then ac_power is set to -p_nt to represent
nightly power losses. ac_power is not adjusted for maximum power point
tracking (MPPT) voltage windows or maximum current limits of the
inverter.
Notes
-----
Required inverter keys are:
======= ============================================================
Column Description
======= ============================================================
p_nom The nominal power value used to normalize all power values,
typically the DC power needed to produce maximum AC power
output, (W).
v_nom The nominal DC voltage value used to normalize DC voltage
values, typically the level at which the highest efficiency
is achieved, (V).
pac_max The maximum AC output power value, used to clip the output
if needed, (W).
ce_list This is a list of 9 coefficients that capture the influence
of input voltage and power on inverter losses, and thereby
efficiency.
p_nt ac-power consumed by inverter at night (night tare) to
maintain circuitry required to sense PV array voltage, (W).
======= ============================================================
References
----------
.. [1] Beyond the Curves: Modeling the Electrical Efficiency
of Photovoltaic Inverters, PVSC 2008, <NAME> et. al.
See also
--------
sapm
singlediode
'''
p_nom = inverter['Pnom']
v_nom = inverter['Vnom']
pac_max = inverter['Pacmax']
p_nt = inverter['Pnt']
ce_list = inverter['ADRCoefficients']
v_max = inverter['Vmax']
v_min = inverter['Vmin']
vdc_max = inverter['Vdcmax']
mppt_hi = inverter['MPPTHi']
mppt_low = inverter['MPPTLow']
v_lim_upper = float(np.nanmax([v_max, vdc_max, mppt_hi]) * (1 + vtol))
v_lim_lower = float(np.nanmax([v_min, mppt_low]) * (1 - vtol))
pdc = p_dc / p_nom
vdc = v_dc / v_nom
# zero voltage will lead to division by zero, but since power is
# set to night time value later, these errors can be safely ignored
with np.errstate(invalid='ignore', divide='ignore'):
poly = np.array([pdc**0, # replace with np.ones_like?
pdc,
pdc**2,
vdc - 1,
pdc * (vdc - 1),
pdc**2 * (vdc - 1),
1. / vdc - 1, # divide by 0
pdc * (1. / vdc - 1), # invalid 0./0. --> nan
pdc**2 * (1. / vdc - 1)]) # divide by 0
p_loss = np.dot(np.array(ce_list), poly)
ac_power = p_nom * (pdc-p_loss)
p_nt = -1 * np.absolute(p_nt)
# set output to nan where input is outside of limits
# errstate silences case where input is nan
with np.errstate(invalid='ignore'):
invalid = (v_lim_upper < v_dc) | (v_dc < v_lim_lower)
ac_power = np.where(invalid, np.nan, ac_power)
# set night values
ac_power =
|
np.where(vdc == 0, p_nt, ac_power)
|
numpy.where
|
'''
<NAME> 2015
Useful mathematical commands
'''
import sys
import copy
import numpy as np
import scipy.optimize as optimize
import scipy.stats as stats
import scipy.interpolate as interp
import scipy.special as special
from scipy.signal import fftconvolve, correlate
import matplotlib.pyplot as plt
from multiprocessing import Process, Pipe
try:
from itertools import izip
except ImportError:
izip = zip
if sys.version_info.major == 2:
fmap = map
elif sys.version_info.major == 3:
fmap = lambda x, *args: list(map(x, *args))
xrange = range
'''
ACF
var=True: calculate variance, var=False, do not calculate. var=number: use as number
Include mean subtraction?
Include lagaxis function?
'''
def acf(array, var=False, norm_by_tau=True, lagaxis=False): #set lagaxis=True?
array = np.array(array)
N = len(array)
if var:
var = np.var(array)
elif not var:
var = 1
lags = np.arange(-(N-1), N, dtype=np.float)
if norm_by_tau:
taus = np.concatenate((np.arange(1, N+1), np.arange(N-1, 0, -1)))
if lagaxis:
return lags, np.correlate(array, array, "full")/(var*taus)
return np.correlate(array, array, "full")/(var*taus)
if lagaxis:
return lags, np.correlate(array, array, "full")/(var*N)
return np.correlate(array, array, "full")/(var*N)
#Do not provide bins but provide edges?
#error bars?
def lagfunction(func, t, x, e=None, dtau=1, tau_edges=None, mirror=False):
length = len(x)
if tau_edges is None:
num_lags = np.ceil((np.max(t) - np.min(t))/dtau) + 1 #+1?
taus = np.arange(num_lags) * dtau
tau_edges = (taus[:-1] + taus[1:])/2.0
tau_edges = np.hstack((tau_edges, [tau_edges[-1]+dtau]))
N_taus = np.zeros(num_lags)
retval = np.zeros(num_lags)
variance = np.zeros(num_lags)
else:
dtau = np.median(np.diff(tau_edges)) #not quite working
taus = tau_edges - dtau
#taus = np.concatenate((taus, [taus[-1]+dtau]))
N_taus = np.zeros(len(tau_edges))#-1)
retval = np.zeros(len(tau_edges))#-1) #this should just be "mean"
variance = np.zeros(len(tau_edges))
weighted = False
if e is not None:
weighted = True
# this could be sped up several ways
I = list(range(length))
for i in I:
for j in I:
dt = np.abs(t[i]-t[j])
index = np.where(dt < tau_edges)[0] #<=?
if len(index) == 0:
continue
index = index[0] #get the lowest applicable lag value
N_taus[index] += 1
#Replace this with online algorithm?
retval[index] += func(x[i], x[j])
# print N_taus
#divide by zero problem!, only with one-pass algorithm
retval = retval / N_taus
if mirror: #fix this
#mirror each:
taus = np.concatenate((-1*taus[::-1][:-1], taus))
retval = np.concatenate((retval[::-1][:-1], retval))
#retval /= 2 #double counting, can speed this up! why no division by 2?
#return tau_edges, retval
return taus, retval
#return tau_edges, retval #BAD
def acf2d(array, speed='fast', mode='full', xlags=None, ylags=None):
if speed == 'fast' or speed == 'slow':
ones = np.ones(np.shape(array))
norm = fftconvolve(ones, ones, mode=mode) #very close for either speed
if speed == 'fast':
return fftconvolve(array, np.flipud(np.fliplr(array)), mode=mode)/norm
else:
return correlate(array, array, mode=mode)/norm
elif speed == 'exact':
#NOTE: (r, c) convention is flipped from (x, y), also that increasing c is decreasing y
LENX = len(array[0])
LENY = len(array)
if xlags is None:
xlags = np.arange(-1*LENX+1, LENX)
if ylags is None:
ylags = np.arange(-1*LENY+1, LENY)
retval = np.zeros((len(ylags), len(xlags)))
for i, xlag in enumerate(xlags):
print(xlag)
for j, ylag in enumerate(ylags):
if ylag > 0 and xlag > 0:
A = array[:-1*ylag, xlag:] #the "stationary" array
B = array[ylag:, :-1*xlag]
elif ylag < 0 and xlag > 0:
A = array[-1*ylag:, xlag:]
B = array[:ylag, :-1*xlag]
elif ylag > 0 and xlag < 0:#optimize later via symmetries
A = array[:-1*ylag, :xlag]
B = array[ylag:, -1*xlag:]
elif ylag < 0 and xlag < 0:
A = array[-1*ylag:, :xlag]
B = array[:ylag, -1*xlag:]
else: #one of the lags is zero
if ylag == 0 and xlag > 0:
A = array[-1*ylag:, xlag:]
B = array[:, :-1*xlag]
elif ylag == 0 and xlag < 0:
A = array[-1*ylag:, :xlag]
B = array[:, -1*xlag:]
elif ylag > 0 and xlag == 0:
A = array[:-1*ylag, :]
B = array[ylag:, -1*xlag:]
elif ylag < 0 and xlag == 0:
A = array[-1*ylag:, :]
B = array[:ylag, -1*xlag:]
else:
A = array[:, :]
B = array[:, :]
#print xlag, ylag, A, B
C = A*B
C = C.flatten()
goodinds = np.where(np.isfinite(C))[0] #check for good values
retval[j, i] = np.mean(C[goodinds])
return retval
def lagaxis(arg, dtau=1):
if isinstance(arg, (list, np.ndarray)): #generate a lag axis based on a time axis
length = len(arg)
dtau = np.mean(
|
np.diff(arg)
|
numpy.diff
|
#!/usr/bin/env python
#===============================================================================
# Copyright 2017 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
'''
Created on 16/11/2016
@author: <NAME>
'''
import netCDF4
import numpy as np
import numexpr as ne
import math
import os
import sys
import re
import tempfile
from collections import OrderedDict
from pprint import pformat
from scipy.interpolate import griddata
from geophys_utils._crs_utils import transform_coords, get_utm_wkt
from geophys_utils._transect_utils import utm_coords, coords2distance
from geophys_utils._netcdf_utils import NetCDFUtils
from geophys_utils._polygon_utils import points2convex_hull
from scipy.spatial.ckdtree import cKDTree
import logging
# Setup logging handlers if required
logger = logging.getLogger(__name__) # Get logger
logger.setLevel(logging.INFO) # Initial logging level for this module
try:
import memcache
except ImportError:
logger.debug('Unable to import memcache. AWS-specific functionality will not be enabled')
memcache = None
# Default number of points to read per chunk when retrieving data
DEFAULT_READ_CHUNK_SIZE = 8192
# Set this to a number other than zero for testing
POINT_LIMIT = 0
class NetCDFPointUtils(NetCDFUtils):
'''
NetCDFPointUtils class to do various fiddly things with NetCDF geophysics point data files.
'''
CACHE_VARIABLE_PARAMETERS = {'complevel': 4,
'zlib': True,
'fletcher32': True,
'shuffle': True,
'endian': 'little',
}
def __init__(self,
netcdf_dataset,
memcached_connection=None,
enable_disk_cache=None,
enable_memory_cache=True,
cache_path=None,
s3_bucket=None,
debug=False):
'''
NetCDFPointUtils Constructor
@parameter netcdf_dataset: netCDF4.Dataset object containing a point dataset
@parameter enable_disk_cache: Boolean parameter indicating whether local cache file should be used, or None for default
@parameter enable_memory_cache: Boolean parameter indicating whether values should be cached in memory or not.
@parameter debug: Boolean parameter indicating whether debug output should be turned on or not
'''
# Start of init function - Call inherited constructor first
super().__init__(netcdf_dataset=netcdf_dataset,
debug=debug
)
logger.debug('Running NetCDFPointUtils constructor')
if memcache is not None:
self.memcached_connection = memcached_connection
else:
self.memcached_connection = None
self.s3_bucket = s3_bucket
self.cache_path = cache_path or os.path.join(os.path.join(tempfile.gettempdir(), 'NetCDFPointUtils'),
re.sub('\W', '_', os.path.splitext(self.nc_path)[0])) + '_cache.nc'
self.cache_basename = os.path.join(self.cache_path,
re.sub('\W', '_', os.path.splitext(self.nc_path)[0]))
logger.debug('self.cache_path')
logger.debug(self.cache_path)
#logger.debug('self.cache_path: {}'.format(self.cache_path))
self.enable_memory_cache = enable_memory_cache
# If caching is not explicitly specified, enable it for OPeNDAP access
if enable_disk_cache is None:
self.enable_disk_cache = self.opendap
else:
self.enable_disk_cache = enable_disk_cache
# Initialise private property variables to None until set by property getter methods
self._xycoords = None
self._point_variables = None
self._data_variable_list = None
self._kdtree = None
# Determine exact spatial bounds
xycoords = self.xycoords
xmin = np.nanmin(xycoords[:,0])
xmax = np.nanmax(xycoords[:,0])
ymin = np.nanmin(xycoords[:,1])
ymax = np.nanmax(xycoords[:,1])
# Create nested list of bounding box corner coordinates
self.native_bbox = [[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]]
# Define bounds
self.bounds = [xmin, ymin, xmax, ymax]
self.point_count = len(xycoords)
#===========================================================================
# def __del__(self):
# '''
# NetCDFPointUtils Destructor
# '''
# if self.enable_disk_cache:
# try:
# cache_file_path = self._nc_cache_dataset.filepath()
# self._nc_cache_dataset.close()
# os.remove(cache_file_path)
# except:
# pass
#===========================================================================
def fetch_array(self, source_variable, dest_array=None):
'''
Helper function to retrieve entire 1D array in pieces < self.max_bytes in size
@param source_variable: netCDF variable from which to retrieve data
'''
source_len = source_variable.shape[0]
pieces_required = int(math.ceil((source_variable[0].itemsize * source_len) / self.max_bytes))
max_elements = source_len // pieces_required
# Reduce max_elements to fit within chunk boundaries if possible
if pieces_required > 1 and hasattr(source_variable, '_ChunkSizes'):
chunk_size = (source_variable._ChunkSizes
if type(source_variable._ChunkSizes) in [int, np.int32]
else source_variable._ChunkSizes[0]
)
chunk_count = max(max_elements // chunk_size,
1)
max_elements = min(chunk_count * chunk_size,
max_elements)
pieces_required = int(math.ceil(source_len / max_elements))
logger.debug('Fetching {} pieces containing up to {} {} array elements.'.format(pieces_required, max_elements, source_variable.name))
if dest_array is None:
dest_array = np.zeros((source_len,), dtype=source_variable.dtype)
# Copy array in pieces
start_index = 0
while start_index < source_len:
end_index = min(start_index + max_elements, source_len)
logger.debug('Retrieving {} array elements {}:{}'.format(source_variable.name, start_index, end_index))
array_slice = slice(start_index, end_index)
dest_array[array_slice] = source_variable[array_slice]
start_index += max_elements
return dest_array
def get_polygon(self):
'''
Returns GML representation of convex hull polygon for dataset
'''
return 'POLYGON((' + ', '.join([' '.join(
['%.4f' % ordinate for ordinate in coordinates])
for coordinates in self.get_convex_hull()]) + '))'
def get_spatial_mask(self, bounds, bounds_wkt=None):
'''
Return boolean mask of dimension 'point' for all coordinates within specified bounds and CRS
'''
coordinates = self.xycoords
if bounds_wkt is not None:
coordinates = np.array(transform_coords(self.xycoords, self.wkt, bounds_wkt))
bounds_half_size = abs(np.array([bounds[2] - bounds[0], bounds[3] - bounds[1]])) / 2.0
bounds_centroid = np.array([bounds[0], bounds[1]]) + bounds_half_size
# Return true for each point which is <= bounds_half_size distance from bounds_centroid
return np.all(ne.evaluate("abs(coordinates - bounds_centroid) <= bounds_half_size"), axis=1)
def get_reprojected_bounds(self, bounds, from_wkt, to_wkt):
'''
Function to take a bounding box specified in one CRS and return its smallest containing bounding box in a new CRS
@parameter bounds: bounding box specified as tuple(xmin, ymin, xmax, ymax) in CRS from_wkt
@parameter from_wkt: WKT for CRS from which to transform bounds
@parameter to_wkt: WKT for CRS to which to transform bounds
@return reprojected_bounding_box: bounding box specified as tuple(xmin, ymin, xmax, ymax) in CRS to_wkt
'''
if (to_wkt is None) or (from_wkt is None) or (to_wkt == from_wkt):
return bounds
# Need to look at all four bounding box corners, not just LL & UR
original_bounding_box =((bounds[0], bounds[1]), (bounds[2], bounds[1]), (bounds[2], bounds[3]), (bounds[0], bounds[3]))
reprojected_bounding_box = np.array(transform_coords(original_bounding_box, from_wkt, to_wkt))
return [min(reprojected_bounding_box[:,0]), min(reprojected_bounding_box[:,1]), max(reprojected_bounding_box[:,0]), max(reprojected_bounding_box[:,1])]
def grid_points(self, grid_resolution,
variables=None,
native_grid_bounds=None,
reprojected_grid_bounds=None,
resampling_method='linear',
grid_wkt=None,
point_step=1):
'''
Function to grid points in a specified bounding rectangle to a regular grid of the specified resolution and crs
@parameter grid_resolution: cell size of regular grid in grid CRS units
@parameter variables: Single variable name string or list of multiple variable name strings. Defaults to all point variables
@parameter native_grid_bounds: Spatial bounding box of area to grid in native coordinates
@parameter reprojected_grid_bounds: Spatial bounding box of area to grid in grid coordinates
@parameter resampling_method: Resampling method for gridding. 'linear' (default), 'nearest' or 'cubic'.
See https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html
@parameter grid_wkt: WKT for grid coordinate reference system. Defaults to native CRS
@parameter point_step: Sampling spacing for points. 1 (default) means every point, 2 means every second point, etc.
@return grids: dict of grid arrays keyed by variable name if parameter 'variables' value was a list, or
a single grid array if 'variable' parameter value was a string
@return wkt: WKT for grid coordinate reference system.
@return geotransform: GDAL GeoTransform for grid
'''
assert not (native_grid_bounds and reprojected_grid_bounds), 'Either native_grid_bounds or reprojected_grid_bounds can be provided, but not both'
# Grid all data variables if not specified
variables = variables or self.point_variables
# Allow single variable to be given as a string
single_var = (type(variables) == str)
if single_var:
variables = [variables]
if native_grid_bounds:
reprojected_grid_bounds = self.get_reprojected_bounds(native_grid_bounds, self.wkt, grid_wkt)
elif reprojected_grid_bounds:
native_grid_bounds = self.get_reprojected_bounds(reprojected_grid_bounds, grid_wkt, self.wkt)
else: # No reprojection required
native_grid_bounds = self.bounds
reprojected_grid_bounds = self.bounds
# Determine spatial grid bounds rounded out to nearest GRID_RESOLUTION multiple
pixel_centre_bounds = (round(math.floor(reprojected_grid_bounds[0] / grid_resolution) * grid_resolution, 6),
round(math.floor(reprojected_grid_bounds[1] / grid_resolution) * grid_resolution, 6),
round(math.floor(reprojected_grid_bounds[2] / grid_resolution - 1.0) * grid_resolution + grid_resolution, 6),
round(math.floor(reprojected_grid_bounds[3] / grid_resolution - 1.0) * grid_resolution + grid_resolution, 6)
)
grid_size = [pixel_centre_bounds[dim_index+2] - pixel_centre_bounds[dim_index] for dim_index in range(2)]
# Extend area for points an arbitrary 4% out beyond grid extents for nice interpolation at edges
expanded_grid_bounds = [pixel_centre_bounds[0]-grid_size[0]/50.0,
pixel_centre_bounds[1]-grid_size[0]/50.0,
pixel_centre_bounds[2]+grid_size[1]/50.0,
pixel_centre_bounds[3]+grid_size[1]/50.0
]
spatial_subset_mask = self.get_spatial_mask(self.get_reprojected_bounds(expanded_grid_bounds, grid_wkt, self.wkt))
# Create grids of Y and X values. Note YX ordering and inverted Y
# Note GRID_RESOLUTION/2.0 fudge to avoid truncation due to rounding error
grid_y, grid_x = np.mgrid[pixel_centre_bounds[3]:pixel_centre_bounds[1]-grid_resolution/2.0:-grid_resolution,
pixel_centre_bounds[0]:pixel_centre_bounds[2]+grid_resolution/2.0:grid_resolution]
# Skip points to reduce memory requirements
#TODO: Implement function which grids spatial subsets.
point_subset_mask =
|
np.zeros(shape=(self.netcdf_dataset.dimensions['point'].size,), dtype=bool)
|
numpy.zeros
|
import numpy as np
import cv2
boundaries = [
([0, 120, 0], [140, 255, 100]),
([25, 0, 75], [180, 38, 255])
]
def handsegment(frame):
lower, upper = boundaries[0]
lower =
|
np.array(lower, dtype="uint8")
|
numpy.array
|
import numpy as np
from simple_neural_network.activations import (
tanh,
dtanh,
relu,
drelu,
leaky_relu,
dleaky_relu,
elu,
delu,
softmax,
dsoftmax,
identity,
didentity,
)
MAX_ATOL = 1e-3
def test_tanh():
arr = np.array([-5.0, -1.0, 0.0, 1.0, 5.0])
assert np.allclose(tanh(arr), np.array([-0.999, -0.761, 0.0, 0.761, 0.999]), atol=MAX_ATOL)
assert np.allclose(
dtanh(arr), np.array([1.81e-04, 4.19e-01, 1.0, 4.19e-01, 1.81e-04]), atol=MAX_ATOL
)
def test_relu():
arr =
|
np.array([-1.0, 1.0, 0.0])
|
numpy.array
|
"""Tests for module 1d Wasserstein solver"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: MIT License
import numpy as np
import pytest
import ot
from ot.lp import wasserstein_1d
from ot.backend import get_backend_list, tf
from scipy.stats import wasserstein_distance
backend_list = get_backend_list()
def test_emd_1d_emd2_1d_with_weights():
# test emd1d gives similar results as emd
n = 20
m = 30
rng = np.random.RandomState(0)
u = rng.randn(n, 1)
v = rng.randn(m, 1)
w_u = rng.uniform(0., 1., n)
w_u = w_u / w_u.sum()
w_v = rng.uniform(0., 1., m)
w_v = w_v / w_v.sum()
M = ot.dist(u, v, metric='sqeuclidean')
G, log = ot.emd(w_u, w_v, M, log=True)
wass = log["cost"]
G_1d, log = ot.emd_1d(u, v, w_u, w_v, metric='sqeuclidean', log=True)
wass1d = log["cost"]
wass1d_emd2 = ot.emd2_1d(u, v, w_u, w_v, metric='sqeuclidean', log=False)
wass1d_euc = ot.emd2_1d(u, v, w_u, w_v, metric='euclidean', log=False)
# check loss is similar
np.testing.assert_allclose(wass, wass1d)
np.testing.assert_allclose(wass, wass1d_emd2)
# check loss is similar to scipy's implementation for Euclidean metric
wass_sp = wasserstein_distance(u.reshape((-1,)), v.reshape((-1,)), w_u, w_v)
np.testing.assert_allclose(wass_sp, wass1d_euc)
# check constraints
np.testing.assert_allclose(w_u, G.sum(1))
np.testing.assert_allclose(w_v, G.sum(0))
@pytest.mark.parametrize('nx', backend_list)
def test_wasserstein_1d(nx):
from scipy.stats import wasserstein_distance
rng = np.random.RandomState(0)
n = 100
x = np.linspace(0, 5, n)
rho_u = np.abs(rng.randn(n))
rho_u /= rho_u.sum()
rho_v = np.abs(rng.randn(n))
rho_v /= rho_v.sum()
xb = nx.from_numpy(x)
rho_ub = nx.from_numpy(rho_u)
rho_vb = nx.from_numpy(rho_v)
# test 1 : wasserstein_1d should be close to scipy W_1 implementation
np.testing.assert_almost_equal(wasserstein_1d(xb, xb, rho_ub, rho_vb, p=1),
wasserstein_distance(x, x, rho_u, rho_v))
# test 2 : wasserstein_1d should be close to one when only translating the support
np.testing.assert_almost_equal(wasserstein_1d(xb, xb + 1, p=2),
1.)
# test 3 : arrays test
X = np.stack((np.linspace(0, 5, n), np.linspace(0, 5, n) * 10), -1)
Xb = nx.from_numpy(X)
res = wasserstein_1d(Xb, Xb, rho_ub, rho_vb, p=2)
np.testing.assert_almost_equal(100 * res[0], res[1], decimal=4)
def test_wasserstein_1d_type_devices(nx):
rng = np.random.RandomState(0)
n = 10
x = np.linspace(0, 5, n)
rho_u = np.abs(rng.randn(n))
rho_u /= rho_u.sum()
rho_v = np.abs(rng.randn(n))
rho_v /= rho_v.sum()
for tp in nx.__type_list__:
print(nx.dtype_device(tp))
xb = nx.from_numpy(x, type_as=tp)
rho_ub = nx.from_numpy(rho_u, type_as=tp)
rho_vb = nx.from_numpy(rho_v, type_as=tp)
res = wasserstein_1d(xb, xb, rho_ub, rho_vb, p=1)
nx.assert_same_dtype_device(xb, res)
@pytest.mark.skipif(not tf, reason="tf not installed")
def test_wasserstein_1d_device_tf():
if not tf:
return
nx = ot.backend.TensorflowBackend()
rng =
|
np.random.RandomState(0)
|
numpy.random.RandomState
|
# %% [markdown]
# # THE MIND OF A MAGGOT
# %% [markdown]
# ## Imports
import os
import time
import warnings
from itertools import chain
import colorcet as cc
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import numpy as np
import pandas as pd
import seaborn as sns
from anytree import LevelOrderGroupIter, NodeMixin
from mpl_toolkits.mplot3d import Axes3D
from scipy.cluster.hierarchy import linkage
from scipy.linalg import orthogonal_procrustes
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import squareform
from sklearn.cluster import AgglomerativeClustering
from sklearn.exceptions import ConvergenceWarning
from sklearn.manifold import MDS
from sklearn.metrics import adjusted_rand_score, pairwise_distances
from sklearn.utils.testing import ignore_warnings
# from tqdm import tqdm
from graspy.cluster import AutoGMMCluster, GaussianCluster
from graspy.embed import (
AdjacencySpectralEmbed,
ClassicalMDS,
LaplacianSpectralEmbed,
selectSVD,
)
from graspy.models import DCSBMEstimator, RDPGEstimator, SBMEstimator
from graspy.plot import heatmap, pairplot
from graspy.simulations import rdpg
from graspy.utils import augment_diagonal, binarize, pass_to_ranks
from src.cluster import get_paired_inds
from src.data import load_metagraph
from src.graph import preprocess
from src.hierarchy import signal_flow
from src.io import savecsv, savefig
from src.traverse import (
Cascade,
RandomWalk,
TraverseDispatcher,
to_markov_matrix,
to_transmission_matrix,
)
from src.visualization import (
CLASS_COLOR_DICT,
adjplot,
barplot_text,
gridmap,
matrixplot,
palplot,
screeplot,
set_axes_equal,
stacked_barplot,
)
from tqdm.autonotebook import tqdm
warnings.filterwarnings(action="ignore", category=ConvergenceWarning)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
rc_dict = {
"axes.spines.right": False,
"axes.spines.top": False,
"axes.formatter.limits": (-3, 3),
"figure.figsize": (6, 3),
"figure.dpi": 100,
}
for key, val in rc_dict.items():
mpl.rcParams[key] = val
context = sns.plotting_context(context="talk", font_scale=1, rc=rc_dict)
sns.set_context(context)
np.random.seed(8888)
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
def stashcsv(df, name, **kws):
savecsv(df, name)
def invert_permutation(p):
"""The argument p is assumed to be some permutation of 0, 1, ..., len(p)-1.
Returns an array s, where s[i] gives the index of i in p.
"""
p = np.asarray(p)
s = np.empty(p.size, p.dtype)
s[p] = np.arange(p.size)
return s
graph_type = "Gad"
mg = load_metagraph(graph_type, version="2020-04-01")
mg = preprocess(
mg,
threshold=0,
sym_threshold=False,
remove_pdiff=True,
binarize=False,
weight="weight",
)
meta = mg.meta
# plot where we are cutting out nodes based on degree
degrees = mg.calculate_degrees()
fig, ax = plt.subplots(1, 1, figsize=(5, 2.5))
sns.distplot(np.log10(degrees["Total edgesum"]), ax=ax)
q = np.quantile(degrees["Total edgesum"], 0.05)
ax.axvline(np.log10(q), linestyle="--", color="r")
ax.set_xlabel("log10(total synapses)")
# remove low degree neurons
idx = meta[degrees["Total edgesum"] > q].index
mg = mg.reindex(idx, use_ids=True)
# remove center neurons # FIXME
idx = mg.meta[mg.meta["hemisphere"].isin(["L", "R"])].index
mg = mg.reindex(idx, use_ids=True)
mg = mg.make_lcc()
mg.calculate_degrees(inplace=True)
meta = mg.meta
meta["inds"] = range(len(meta))
adj = mg.adj
# %% [markdown]
# ## Setup for paths
out_groups = [
("dVNC", "dVNC;CN", "dVNC;RG", "dSEZ;dVNC"),
("dSEZ", "dSEZ;CN", "dSEZ;LHN", "dSEZ;dVNC"),
("motor-PaN", "motor-MN", "motor-VAN", "motor-AN"),
("RG", "RG-IPC", "RG-ITP", "RG-CA-LP", "dVNC;RG"),
("dUnk",),
]
out_group_names = ["VNC", "SEZ" "motor", "RG", "dUnk"]
source_groups = [
("sens-ORN",),
("sens-MN",),
("sens-photoRh5", "sens-photoRh6"),
("sens-thermo",),
("sens-vtd",),
("sens-AN",),
]
source_group_names = ["Odor", "MN", "Photo", "Temp", "VTD", "AN"]
class_key = "merge_class"
sg = list(chain.from_iterable(source_groups))
og = list(chain.from_iterable(out_groups))
sg_name = "All"
og_name = "All"
np.random.seed(888)
max_hops = 10
n_init = 1024
p = 0.05
traverse = Cascade
simultaneous = True
transition_probs = to_transmission_matrix(adj, p)
transition_probs = to_markov_matrix(adj)
source_inds = meta[meta[class_key].isin(sg)]["inds"].values
out_inds = meta[meta[class_key].isin(og)]["inds"].values
# %% [markdown]
# ## Run paths
print(f"Running {n_init} random walks from each source node...")
paths = []
path_lens = []
for s in source_inds:
rw = RandomWalk(
transition_probs, stop_nodes=out_inds, max_hops=10, allow_loops=False
)
for n in range(n_init):
rw.start(s)
paths.append(rw.traversal_)
path_lens.append(len(rw.traversal_))
# %% [markdown]
# ## Look at distribution of path lengths
for p in paths:
path_lens.append(len(p))
sns.distplot(path_lens, kde=False)
stashfig(f"path-length-dist-graph={graph_type}")
paths_by_len = {i: [] for i in range(1, max_hops + 1)}
for p in paths:
paths_by_len[len(p)].append(p)
# %% [markdown]
# ## Subsampling and selecting paths
path_len = 5
paths = paths_by_len[path_len]
subsample = min(2 ** 13, len(paths))
basename = f"-subsample={subsample}-plen={path_len}-graph={graph_type}"
new_paths = []
for p in paths:
if p[-1] in out_inds:
new_paths.append(p)
paths = new_paths
print(f"Number of paths of length {path_len}: {len(paths)}")
if subsample != -1:
inds = np.random.choice(len(paths), size=subsample, replace=False)
new_paths = []
for i, p in enumerate(paths):
if i in inds:
new_paths.append(p)
paths = new_paths
print(f"Number of paths after subsampling: {len(paths)}")
# %% [markdown]
# ## Embed for a dissimilarity measure
print("Embedding graph and finding pairwise distances...")
embedder = AdjacencySpectralEmbed(n_components=None, n_elbows=2)
embed = embedder.fit_transform(pass_to_ranks(adj))
embed = np.concatenate(embed, axis=-1)
lp_inds, rp_inds = get_paired_inds(meta)
R, _, = orthogonal_procrustes(embed[lp_inds], embed[rp_inds])
left_inds = meta[meta["left"]]["inds"]
right_inds = meta[meta["right"]]["inds"]
embed[left_inds] = embed[left_inds] @ R
pdist = pairwise_distances(embed, metric="cosine")
# %% [markdown]
# ## Compute distances between paths
print("Computing pairwise distances between paths...")
path_dist_mat = np.zeros((len(paths), len(paths)))
for i in range(len(paths)):
for j in range(len(paths)):
p1 = paths[i]
p2 = paths[j]
dist_sum = 0
for t in range(path_len):
dist = pdist[p1[t], p2[t]]
dist_sum += dist
path_dist_mat[i, j] = dist_sum
path_indicator_mat = np.zeros((len(paths), len(adj)), dtype=int)
for i, p in enumerate(paths):
for j, visit in enumerate(p):
path_indicator_mat[i, visit] = j + 1
# %% [markdown]
# ## Cluster and look at distance mat
Z = linkage(squareform(path_dist_mat), method="average")
sns.clustermap(
path_dist_mat,
figsize=(20, 20),
row_linkage=Z,
col_linkage=Z,
xticklabels=False,
yticklabels=False,
)
stashfig("agglomerative-path-dist-mat" + basename)
# %% [markdown]
# ##
from graspy.embed import select_dimension
print("Running CMDS on path dissimilarity...")
X = path_dist_mat
cmds = ClassicalMDS(
dissimilarity="precomputed", n_components=int(np.ceil(np.log2(np.min(X.shape))))
)
path_embed = cmds.fit_transform(X)
elbows, elbow_vals = select_dimension(cmds.singular_values_, n_elbows=3)
rng = np.arange(1, len(cmds.singular_values_) + 1)
elbows = np.array(elbows)
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
pc = ax.scatter(elbows, elbow_vals, color="red", label="ZG")
pc.set_zorder(10)
ax.plot(rng, cmds.singular_values_, "o-")
ax.legend()
stashfig("cmds-screeplot" + basename)
# %% [markdown]
# ##
pairplot(path_embed, alpha=0.02)
stashfig("cmds-pairs-all" + basename)
# %% [markdown]
# ##
print("Running AGMM on CMDS embedding")
n_components = 4
agmm = AutoGMMCluster(max_components=40, n_jobs=-2)
pred = agmm.fit_predict(path_embed[:, :n_components])
print(f"Number of clusters: {agmm.n_components_}")
# %% [markdown]
# ##
pairplot(
path_embed[:, :n_components],
alpha=0.02,
labels=pred,
palette=cc.glasbey_light,
legend_name="Cluster",
)
stashfig("pairplot-agmm-cmds" + basename)
# %% [markdown]
# ##
from sklearn.manifold import Isomap
iso = Isomap(n_components=int(np.ceil(np.log2(np.min(X.shape)))), metric="precomputed")
iso_embed = iso.fit_transform(path_dist_mat)
pairplot(iso_embed, alpha=0.02)
stashfig("isomap-pairs-all" + basename)
# %% [markdown]
# ##
svals = iso.kernel_pca_.lambdas_
elbows, elbow_vals = select_dimension(svals, n_elbows=3)
rng = np.arange(1, len(svals) + 1)
elbows =
|
np.array(elbows)
|
numpy.array
|
#!/usr/bin/env python
'''
mcu: Modeling and Crystallographic Utilities
Copyright (C) 2019 <NAME>. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Email: <NAME> <<EMAIL>>
'''
# This is the only place needed to be modified
# The path for the libwannier90 library
W90LIB = '/panfs/roc/groups/6/gagliard/phamx494/pyWannier90/src'
import sys
sys.path.append(W90LIB)
import importlib
found = importlib.util.find_spec('libwannier90') is not None
if found == True:
import libwannier90
else:
print('WARNING: Check the installation of libwannier90 and its path in pyscf/pbc/tools/pywannier90.py')
print('libwannier90 path: ' + W90LIB)
print('libwannier90 can be found at: https://github.com/hungpham2017/pyWannier90')
raise ImportError
import numpy as np
import scipy
import mcu
from mcu.vasp import const
from mcu.cell import utils as cell_utils
def angle(v1, v2):
'''
Return the angle (in radiant between v1 and v2)
'''
v1 = np.asarray(v1)
v2 = np.asarray(v2)
cosa = v1.dot(v2)/ np.linalg.norm(v1) / np.linalg.norm(v2)
return np.arccos(cosa)
def transform(x_vec, z_vec):
'''
Construct a transformation matrix to transform r_vec to the new coordinate system defined by x_vec and z_vec
'''
x_vec = x_vec/np.linalg.norm(np.asarray(x_vec))
z_vec = z_vec/np.linalg.norm(np.asarray(z_vec))
assert x_vec.dot(z_vec) == 0 # x and z have to be orthogonal to one another
y_vec = np.cross(x_vec,z_vec)
new = np.asarray([x_vec, y_vec, z_vec])
original = np.asarray([[1,0,0],[0,1,0],[0,0,1]])
tran_matrix = np.empty([3,3])
for row in range(3):
for col in range(3):
tran_matrix[row,col] = np.cos(angle(original[row],new[col]))
return tran_matrix.T
def cartesian_prod(arrays, out=None, order = 'C'):
'''
This function is similar to lib.cartesian_prod of PySCF, except the output can be in Fortran or in C order
'''
arrays = [np.asarray(x) for x in arrays]
dtype = np.result_type(*arrays)
nd = len(arrays)
dims = [nd] + [len(x) for x in arrays]
if out is None:
out = np.empty(dims, dtype)
else:
out = np.ndarray(dims, dtype, buffer=out)
tout = out.reshape(dims)
shape = [-1] + [1] * nd
for i, arr in enumerate(arrays):
tout[i] = arr.reshape(shape[:nd-i])
return tout.reshape((nd,-1),order=order).T
def periodic_grid(lattice, grid = [50,50,50], supercell = [1,1,1], order = 'C'):
'''
Generate a periodic grid for the unit/computational cell in F/C order
Note: coords has the same unit as lattice
'''
ngrid = np.asarray(grid)
qv = cartesian_prod([np.arange(-ngrid[i]*(supercell[i]//2),ngrid[i]*((supercell[i]+1)//2)) for i in range(3)], order=order)
a_frac = np.einsum('i,ij->ij', 1./ngrid, lattice)
coords = np.dot(qv, a_frac)
# Compute weight
ngrids = np.prod(grid)
ncells = np.prod(supercell)
weights = np.empty(ngrids*ncells)
vol = abs(np.linalg.det(lattice))
weights[:] = vol / ngrids / ncells
return coords, weights
def R_r(r_norm, r = 1, zona = 1):
'''
Radial functions used to compute \Theta_{l,m_r}(\theta,\phi)
Note: r_norm has the unit of Bohr
'''
if r == 1:
R_r = 2 * zona**(3/2) * np.exp(-zona*r_norm)
elif r == 2:
R_r = 1 / 2 / np.sqrt(2) * zona**(3/2) * (2 - zona*r_norm) * np.exp(-zona*r_norm/2)
else:
R_r = np.sqrt(4/27) * zona**(3/2) * (1 - 2*zona*r_norm/3 + 2*(zona**2)*(r_norm**2)/27) * np.exp(-zona*r_norm/3)
return R_r
def theta(func, cost, phi):
'''
Basic angular functions (s,p,d,f) used to compute \Theta_{l,m_r}(\theta,\phi)
'''
if func == 's': # s
theta = 1 / np.sqrt(4 * np.pi) * np.ones([cost.shape[0]])
elif func == 'pz':
theta = np.sqrt(3 / 4 / np.pi) * cost
elif func == 'px':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(3 / 4 / np.pi) * sint * np.cos(phi)
elif func == 'py':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(3 / 4 / np.pi) * sint * np.sin(phi)
elif func == 'dz2':
theta = np.sqrt(5 / 16 / np.pi) * (3*cost**2 - 1)
elif func == 'dxz':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(15 / 4 / np.pi) * sint * cost * np.cos(phi)
elif func == 'dyz':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(15 / 4 / np.pi) * sint * cost * np.sin(phi)
elif func == 'dx2-y2':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(15 / 16 / np.pi) * (sint**2) * np.cos(2*phi)
elif func == 'pxy':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(15 / 16 / np.pi) * (sint**2) * np.sin(2*phi)
elif func == 'fz3':
theta = np.sqrt(7) / 4 / np.sqrt(np.pi) * (5*cost**3 - 3*cost)
elif func == 'fxz2':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(21) / 4 / np.sqrt(2*np.pi) * (5*cost**2 - 1) * sint * np.cos(phi)
elif func == 'fyz2':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(21) / 4 / np.sqrt(2*np.pi) * (5*cost**2 - 1) * sint * np.sin(phi)
elif func == 'fz(x2-y2)':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(105) / 4 / np.sqrt(np.pi) * sint**2 * cost * np.cos(2*phi)
elif func == 'fxyz':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(105) / 4 / np.sqrt(np.pi) * sint**2 * cost * np.sin(2*phi)
elif func == 'fx(x2-3y2)':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(35) / 4 / np.sqrt(2*np.pi) * sint**3 * (np.cos(phi)**2 - 3*np.sin(phi)**2) *
|
np.cos(phi)
|
numpy.cos
|
# -*- coding: utf-8 -*-
"""
These are some useful functions used in CSD methods,
They include CSD source profiles to be used as ground truths,
placement of electrodes in 1D, 2D and 3D., etc
These scripts are based on Grzegorz Parka's,
Google Summer of Code 2014, INFC/pykCSD
This was written by :
<NAME>, <NAME>
Laboratory of Neuroinformatics,
Nencki Institute of Experimental Biology, Warsaw.
"""
from __future__ import division
import numpy as np
from numpy import exp
import quantities as pq
def patch_quantities():
"""patch quantities with the SI unit Siemens if it does not exist"""
for symbol, prefix, definition, u_symbol in zip(
['siemens', 'S', 'mS', 'uS', 'nS', 'pS'],
['', '', 'milli', 'micro', 'nano', 'pico'],
[pq.A / pq.V, pq.A / pq.V, 'S', 'mS', 'uS', 'nS'],
[None, None, None, None, u'µS', None]):
if type(definition) is str:
definition = lastdefinition / 1000
if not hasattr(pq, symbol):
setattr(pq, symbol, pq.UnitQuantity(
prefix + 'siemens',
definition,
symbol=symbol,
u_symbol=u_symbol))
lastdefinition = definition
return
def check_for_duplicated_electrodes(elec_pos):
"""Checks for duplicate electrodes
Parameters
----------
elec_pos : np.array
Returns
-------
has_duplicated_elec : Boolean
"""
unique_elec_pos = np.vstack({tuple(row) for row in elec_pos})
has_duplicated_elec = unique_elec_pos.shape == elec_pos.shape
return has_duplicated_elec
def distribute_srcs_1D(X, n_src, ext_x, R_init):
"""Distribute sources in 1D equally spaced
Parameters
----------
X : np.arrays
points at which CSD will be estimated
n_src : int
number of sources to be included in the model
ext_x : floats
how much should the sources extend the area X
R_init : float
Same as R in 1D case
Returns
-------
X_src : np.arrays
positions of the sources
R : float
effective radius of the basis element
"""
X_src = np.mgrid[(np.min(X) - ext_x):(
|
np.max(X)
|
numpy.max
|
import random
import collections
import dataclasses
import itertools
import numpy as np
import matplotlib.pyplot as plt
import evalfuncs as ef
cars = ["hiro", "shiori", "asako", "kyoko", "yukio", "miyako"]
tvos = [20, 40, 80]
vsps = [40, 60, 80, 120]
funcs = ["one", "two"]
#weights = [1.0, 0.5]
persons_eval = [
[10, 20, 30, 40, 50, 60],
[10, 20, 30, 40, 50, 60],
[10, 20, 30, 40, 50, 60],
]
@dataclasses.dataclass
class Scores:
score : np.ndarray
func_total : np.ndarray
func_rank : np.ndarray
tvo_total : np.ndarray
tvo_rank : np.ndarray
cache = {}
def read_csv(filepath):
if filepath in cache:
return cache[filepath]
else:
pass
def get_eval():
return (int(random.random()*10), int(random.random()*100))
# スコアを計算する
def calc_score():
data = np.empty((len(vsps), len(tvos), len(cars), len(funcs)), float)
for i, j, k in itertools.product(range(len(vsps)), range(len(tvos)), range(len(cars))):
data[i, j, k] = get_eval()
# 処理しやすいようにデータ配列を転置
data = np.transpose(data, axes=(0, 1, 3, 2))
# 評価値毎に偏差値に変換
data_std = 50 + data / np.std(data, axis=3, keepdims=True) * 10
# 偏差値に重みをつける
weights_t = np.reshape(ef.weights, (-1, 1))
score = data_std * weights_t
# 評価を合算しアクセル開度毎に総合評価値を計算する
# ランキングもつけておく
func_total = np.sum(score, axis=2)
func_rank =
|
np.argsort(-func_total, axis=2)
|
numpy.argsort
|
"""
Read a file or just convert an alternative list to numpy array
"""
from typing import AnyStr, List
from numpy import ndarray
from .readDataFile import readDataFile
import numpy
from ...tk.arrayTK import transpose as array_transpose
def readFileOrList(
file_name, # type: AnyStr
data_list, # type: ndarray
skip_rows=0, # type: int
dtype='float', # type: AnyStr
transpose=False # type: bool
):
# type: (AnyStr, List) -> ndarray
"""Read a file or just convert an alternative list to numpy array
If both file_name and data_list are given
then the content of the data file will be returned.
Args:
file_name (str): input file name
data_list (list): a list of numbers
skip_rows=0 (int): number of rows to skip
dtype="float" (str): data type of the input
transpose=False (bool): whether to transpose data
Returns:
a numpy ndarray
"""
if file_name is not None:
return readDataFile(
file_name, skip_rows, dtype, transpose)
elif data_list is not None:
return array_transpose(
|
numpy.array(data_list)
|
numpy.array
|
"""
image_utilities.py (author: <NAME> / git: ankonzoid)
Image utilities class that helps with image data IO and processing.
"""
import os, glob, random, errno
import numpy as np
import scipy.misc
from multiprocessing import Pool
class ImageUtils(object):
def __init__(self):
# Run settings
self.img_shape = None
self.flatten_before_encode = False
# Directories
self.query_dir = None
self.answer_dir = None
self.img_train_raw_dir = None
self.img_inventory_raw_dir = None
self.img_train_dir = None
self.img_inventory_dir = None
############################################################
###
###
### External functions
###
###
############################################################
"""
Set configuration
"""
def configure(self, info):
# Run settings
self.img_shape = info["img_shape"]
self.flatten_before_encode = info["flatten_before_encode"]
# Directories
self.query_dir = info["query_dir"]
self.answer_dir = info["answer_dir"]
self.img_train_raw_dir = info["img_train_raw_dir"]
self.img_inventory_raw_dir = info["img_inventory_raw_dir"]
self.img_train_dir = info["img_train_dir"]
self.img_inventory_dir = info["img_inventory_dir"]
"""
Load raw images from a given directory, resize them, and save them
"""
def raw2resized_load_save(self, raw_dir=None, processed_dir=None, img_shape=None):
(ypixels_force, xpixels_force) = img_shape
gray_scale = False
# Extract filenames from dir
raw_filenames_list, n_files = self.extract_filenames(raw_dir, 1)
for i, raw_filename in enumerate(raw_filenames_list):
# Read raw image
img_raw = self.read_img(raw_filename, gray_scale=gray_scale)
# Process image
img_resized = self.force_resize_img(img_raw, ypixels_force, xpixels_force)
# Save processed image
name, tag = self.extract_name_tag(raw_filename)
processed_shortname = name + "_resized." + tag
processed_filename = os.path.join(processed_dir, processed_shortname)
self.save_img(img_resized, processed_filename)
# Print process progress
print("[{0}/{1}] Resized and saved to '{2}'...".format(
i+1, n_files, processed_filename))
"""
Read a raw directory of images, and load as numpy array to memory
"""
def raw2resizednorm_load(self, raw_dir=None, img_shape=None):
(ypixels_force, xpixels_force) = img_shape
gray_scale = False
# Extract filenames from dir
raw_filenames_list, n_files = self.extract_filenames(raw_dir, 1)
img_list = []
for i, raw_filename in enumerate(raw_filenames_list):
# Read raw image
img_raw = self.read_img(raw_filename, gray_scale=gray_scale)
# Resize image (if not of shape (ypixels_force, xpixels_force))
img_resized = img_raw
if img_raw.shape[:2] != img_shape:
img_resized = self.force_resize_img(img_resized, ypixels_force, xpixels_force)
# Normalize image
img_resizednorm = self.normalize_img_data(img_resized)
# Append processed image to image list
img_list.append(img_resizednorm)
# Print process progress
print("[{0}/{1}] Loaded and processed '{2}'...".format(
i + 1, n_files, raw_filename))
# Convert image list to numpy array
img_list =
|
np.array(img_list)
|
numpy.array
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dynamic programming algorithm generators.
Currently implements the following:
- Matrix-chain multiplication
- Longest common subsequence
- Optimal binary search tree (Aho et al., 1974)
See "Introduction to Algorithms" 3ed (CLRS3) for more information.
"""
# pylint: disable=invalid-name
from typing import Tuple
import chex
from clrs._src import probing
from clrs._src import specs
import numpy as np
_Array = np.ndarray
_Out = Tuple[_Array, probing.ProbesDict]
def matrix_chain_order(p: _Array) -> _Out:
"""Matrix-chain multiplication."""
chex.assert_rank(p, 1)
probes = probing.initialize(specs.SPECS['matrix_chain_order'])
A_pos = np.arange(p.shape[0])
probing.push(
probes,
specs.Stage.INPUT,
next_probe={
'pos': np.copy(A_pos) * 1.0 / A_pos.shape[0],
'p': np.copy(p)
})
m = np.zeros((p.shape[0], p.shape[0]))
s = np.zeros((p.shape[0], p.shape[0]))
msk = np.zeros((p.shape[0], p.shape[0]))
for i in range(1, p.shape[0]):
m[i, i] = 0
msk[i, i] = 1
while True:
prev_m = np.copy(m)
prev_msk = np.copy(msk)
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'pred_h': probing.array(np.copy(A_pos)),
'm': np.copy(prev_m),
's_h': np.copy(s),
'msk': np.copy(msk)
})
for i in range(1, p.shape[0]):
for j in range(i + 1, p.shape[0]):
flag = prev_msk[i, j]
for k in range(i, j):
if prev_msk[i, k] == 1 and prev_msk[k + 1, j] == 1:
msk[i, j] = 1
q = prev_m[i, k] + prev_m[k + 1, j] + p[i - 1] * p[k] * p[j]
if flag == 0 or q < m[i, j]:
m[i, j] = q
s[i, j] = k
flag = 1
if np.all(prev_m == m):
break
probing.push(probes, specs.Stage.OUTPUT, next_probe={'s': np.copy(s)})
probing.finalize(probes)
return s[1:, 1:], probes
def lcs_length(x: _Array, y: _Array) -> _Out:
"""Longest common subsequence."""
chex.assert_rank([x, y], 1)
probes = probing.initialize(specs.SPECS['lcs_length'])
x_pos = np.arange(x.shape[0])
y_pos = np.arange(y.shape[0])
b = np.zeros((x.shape[0], y.shape[0]))
c = np.zeros((x.shape[0], y.shape[0]))
probing.push(
probes,
specs.Stage.INPUT,
next_probe={
'string': probing.strings_id(x_pos, y_pos),
'pos': probing.strings_pos(x_pos, y_pos),
'key': probing.array_cat(np.concatenate([
|
np.copy(x)
|
numpy.copy
|
# Description: Local ancillary functions.
#
# Author: <NAME>
# E-mail: <EMAIL>
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import erfinv
from netCDF4 import num2date
from mpl_toolkits.basemap import Basemap
from pygamman import gamma_n as gmn
from pygeodesy import Datums, VincentyError
from pygeodesy.ellipsoidalVincenty import LatLon as LatLon
from pygeodesy.sphericalNvector import LatLon as LatLon_sphere
from pandas import Series
def seasonal_avg(t, F):
tmo = np.array([ti.month for ti in t])
ftmo = [tmo==mo for mo in range(1, 13)]
return np.array([F[ft].mean() for ft in ftmo])
def seasonal_std(t, F):
"""
USAGE
-----
F_seasonal = seasonal_std(t, F)
Calculates the seasonal standard deviation of variable F(t).
Assumes 't' is a 'datetime.datetime' object.
"""
tmo = np.array([ti.month for ti in t])
ftmo = [tmo==mo for mo in range(1, 13)]
return np.array([F[ft].std() for ft in ftmo])
def deseason(t, F):
Fssn = seasonal_avg(t, F)
nyears = int(t.size/12)
aux = np.array([])
for n in range(nyears):
aux = np.concatenate((aux, Fssn))
return F - aux
def blkavgt(t, x, every=2):
"""
Block-averages a variable x(t). Returns its block average
and standard deviation and new t axis.
"""
nt = t.size
t = np.array([tt.toordinal() for tt in t])
tblk, xblk, xblkstd = np.array([]), np.array([]), np.array([])
for i in range(every, nt+every, every):
xi = x[i-every:i]
tblk = np.append(tblk, t[i-every:i].mean())
xblk = np.append(xblk, xi.mean())
xblkstd = np.append(xblkstd, xi.std())
tblk = num2date(tblk, units='days since 01-01-01')
return tblk, xblk, xblkstd
def blkavg(x, y, every=2):
"""
Block-averages a variable y(x). Returns its block average
and standard deviation and new x axis.
"""
nx = x.size
xblk, yblk, yblkstd = np.array([]), np.array([]), np.array([])
for i in range(every, nx+every, every):
yi = y[i-every:i]
xblk = np.append(xblk, x[i-every:i].mean())
yblk = np.append(yblk, yi.mean())
yblkstd = np.append(yblkstd, yi.std())
return xblk, yblk, yblkstd
def blksum(x, y, every=2):
"""
Sums a variable y(x) in blocks. Returns its block average
and new x axis.
"""
nx = x.size
xblk, yblksum = np.array([]), np.array([])
for i in range(every, nx+every, every):
xblk = np.append(xblk, x[i-every:i].mean())
yblksum = np.append(yblksum, y[i-every:i].sum())
return xblk, yblksum
def stripmsk(arr, mask_invalid=False):
if mask_invalid:
arr = np.ma.mask_invalid(arr)
if np.ma.isMA(arr):
msk = arr.mask
arr = arr.data
arr[msk] = np.nan
return arr
def rcoeff(x, y):
"""
USAGE
-----
r = rcoeff(x, y)
Computes the Pearson correlation coefficient r between series x and y.
References
----------
e.g., <NAME> (2014),
Data analysis methods in physical oceanography,
p. 257, equation 3.97a.
"""
x,y = map(np.asanyarray, (x,y))
# Sample size.
assert x.size==y.size
N = x.size
# Demeaned series.
x = x - x.mean()
y = y - y.mean()
# Standard deviations.
sx = x.std()
sy = y.std()
## Covariance between series. Choosing unbiased normalization (N-1).
Cxy = np.sum(x*y)/(N-1)
## Pearson correlation coefficient r.
r = Cxy/(sx*sy)
return r
def near(x, x0, npts=1, return_index=False):
"""
USAGE
-----
xnear = near(x, x0, npts=1, return_index=False)
Finds 'npts' points (defaults to 1) in array 'x'
that are closest to a specified 'x0' point.
If 'return_index' is True (defauts to False),
then the indices of the closest points are
returned. The indices are ordered in order of
closeness.
"""
x = list(x)
xnear = []
xidxs = []
for n in range(npts):
idx = np.nanargmin(np.abs(np.array(x)-x0))
xnear.append(x.pop(idx))
if return_index:
xidxs.append(idx)
if return_index: # Sort indices according to the proximity of wanted points.
xidxs = [xidxs[i] for i in np.argsort(xnear).tolist()]
xnear.sort()
if npts==1:
xnear = xnear[0]
if return_index:
xidxs = xidxs[0]
else:
xnear = np.array(xnear)
if return_index:
return xidxs
else:
return xnear
def near2(x, y, x0, y0, npts=1, return_index=False):
"""
USAGE
-----
xnear, ynear = near2(x, y, x0, y0, npts=1, return_index=False)
Finds 'npts' points (defaults to 1) in arrays 'x' and 'y'
that are closest to a specified '(x0, y0)' point. If
'return_index' is True (defauts to False), then the
indices of the closest point(s) are returned.
Example
-------
>>> x = np.arange(0., 100., 0.25)
>>> y = np.arange(0., 100., 0.25)
>>> x, y = np.meshgrid(x, y)
>>> x0, y0 = 44.1, 30.9
>>> xn, yn = near2(x, y, x0, y0, npts=1)
>>> print("(x0, y0) = (%f, %f)"%(x0, y0))
>>> print("(xn, yn) = (%f, %f)"%(xn, yn))
"""
x, y = map(np.array, (x, y))
shp = x.shape
xynear = []
xyidxs = []
dx = x - x0
dy = y - y0
dr = dx**2 + dy**2
for n in range(npts):
xyidx = np.unravel_index(np.nanargmin(dr), dims=shp)
if return_index:
xyidxs.append(xyidx)
xyn = (x[xyidx], y[xyidx])
xynear.append(xyn)
dr[xyidx] = np.nan
if npts==1:
xynear = xynear[0]
if return_index:
xyidxs = xyidxs[0]
if return_index:
return xyidxs
else:
return xynear
def xy2dist(x, y, cyclic=False, datum='WGS84'):
"""
USAGE
-----
d = xy2dist(x, y, cyclic=False, datum='WGS84')
"""
if datum is not 'Sphere':
xy = [LatLon(y0, x0, datum=Datums[datum]) for x0, y0 in zip(x, y)]
else:
xy = [LatLon_sphere(y0, x0) for x0, y0 in zip(x, y)]
d = np.array([xy[n].distanceTo(xy[n+1]) for n in range(len(xy)-1)])
return np.append(0, np.cumsum(d))
def lon180to360(lon):
"""
Converts longitude values in the range [-180,+180]
to longitude values in the range [0,360].
"""
lon = np.asanyarray(lon)
return (lon + 360.0) % 360.0
def lon360to180(lon):
"""
Converts longitude values in the range [0,360]
to longitude values in the range [-180,+180].
"""
lon = np.asanyarray(lon)
return ((lon + 180.) % 360.) - 180.
def rot_vec(u, v, angle=-45, degrees=True):
"""
USAGE
-----
u_rot,v_rot = rot_vec(u,v,angle=-45.,degrees=True)
Returns the rotated vector components (`u_rot`,`v_rot`)
from the zonal-meridional input vector components (`u`,`v`).
The rotation is done using the angle `angle` positive counterclockwise
(trigonometric convention). If `degrees` is set to `True``(default),
then `angle` is converted to radians.
is
Example
-------
>>> from matplotlib.pyplot import quiver
>>> from ap_tools.utils import rot_vec
>>> u = -1.
>>> v = -1.
>>> u2,v2 = rot_vec(u,v, angle=-30.)
"""
u,v = map(np.asanyarray, (u,v))
if degrees:
angle = angle*np.pi/180. # Degrees to radians.
u_rot = +u*np.cos(angle) + v*np.sin(angle) # Usually the across-shore component.
v_rot = -u*np.sin(angle) + v*np.cos(angle) # Usually the along-shore component.
return u_rot, v_rot
def angle_isobath(xiso, yiso, cyclic=True):
R = 6371000.0 # Mean radius of the earth in meters (6371 km), from gsw.constants.earth_radius.
deg2rad = np.pi/180. # [rad/deg]
if cyclic: # Add cyclic point.
xiso = np.append(xiso, xiso[0])
yiso = np.append(yiso, yiso[0])
# From the coordinates of the isobath, find the angle it forms with the
# zonal axis, using points k+1 and k.
shth = yiso.size-1
theta = np.zeros(shth)
for k in range(shth):
dyk = R*(yiso[k+1]-yiso[k])
dxk = R*(xiso[k+1]-xiso[k])*np.cos(yiso[k]*deg2rad)
theta[k] = np.arctan2(dyk,dxk)
xisom = 0.5*(xiso[1:] + xiso[:-1])
yisom = 0.5*(yiso[1:] + yiso[:-1])
return xisom, yisom, theta
def fmt_isobath(cs, fontsize=8, fmt='%g', inline=True, inline_spacing=7, manual=True, **kw):
"""
Formats the labels of isobath contours. `manual` is set to `True` by default,
but can be `False`, or a tuple/list of tuples with the coordinates of the labels.
All options are passed to plt.clabel().
"""
isobstrH = plt.clabel(cs, fontsize=fontsize, fmt=fmt, inline=inline, \
inline_spacing=inline_spacing, manual=manual, **kw)
for ih in range(0, len(isobstrH)): # Appends 'm' for meters at the end of the label.
isobstrh = isobstrH[ih]
isobstr = isobstrh.get_text()
isobstr = isobstr.replace('-','') + ' m'
isobstrh.set_text(isobstr)
def gamman(Sp, T, p, x, y):
assert Sp.shape==T.shape
n = np.size(p)
skel = Sp*np.nan
Spmsk = np.ma.masked_invalid(Sp).mask
Tmsk = np.ma.masked_invalid(T).mask
gammamsk = np.logical_or(Tmsk, Spmsk)
Sp[Spmsk] = 35
T[Tmsk] = 0
gm, dgl, dgh = gmn(Sp, T, p, n, x, y)
# gm, dgl, dgh = gmn.gamma_n(Sp, T, p, n, x, y)
gm[gammamsk] = np.nan
return gm, dgl, dgh
def bmap_antarctica(ax, resolution='h'):
"""
Full Antartica basemap (Polar Stereographic Projection).
"""
m = Basemap(boundinglat=-60,
lon_0=60,
projection='spstere',
resolution=resolution,
ax=ax)
m.fillcontinents(color='0.9', zorder=9)
m.drawcoastlines(zorder=10)
m.drawmapboundary(zorder=-9999)
m.drawmeridians(np.arange(-180, 180, 20), linewidth=0.15, labels=[1, 1, 1, 1], zorder=12)
m.drawparallels(np.arange(-90, -50, 5), linewidth=0.15, labels=[0, 0, 0, 0], zorder=12)
return m
def near(x, x0, npts=1, return_index=False):
"""
USAGE
-----
xnear = near(x, x0, npts=1, return_index=False)
Finds 'npts' points (defaults to 1) in array 'x'
that are closest to a specified 'x0' point.
If 'return_index' is True (defauts to False),
then the indices of the closest points are
returned. The indices are ordered in order of
closeness.
"""
x = list(x)
xnear = []
xidxs = []
for n in range(npts):
idx = np.nanargmin(np.abs(np.array(x)-x0))
xnear.append(x.pop(idx))
if return_index:
xidxs.append(idx)
if return_index: # Sort indices according to the proximity of wanted points.
xidxs = [xidxs[i] for i in np.argsort(xnear).tolist()]
xnear.sort()
if npts==1:
xnear = xnear[0]
if return_index:
xidxs = xidxs[0]
else:
xnear = np.array(xnear)
if return_index:
return xidxs
else:
return xnear
def UVz2iso(U, V, hisopyc, z):
ny, nx = hisopyc.shape
Uisopyc = np.nan*np.ones((ny,nx))
Visopyc = np.nan*np.ones((ny,nx))
for j in range(ny):
print("Row %d of %d"%(j+1,ny))
for i in range(nx):
if np.isnan(hisopyc[j,i]):
continue
else:
Uisopyc[j,i] = np.interp(hisopyc[j,i], z, U[:,j,i])
Visopyc[j,i] = np.interp(hisopyc[j,i], z, V[:,j,i])
return Uisopyc, Visopyc
def isopyc_depth(dens0, z, isopyc=1027.75, dzref=1.):
"""
USAGE
-----
hisopyc = isopyc_depth(z, dens0, isopyc=1027.75)
Calculates the spatial distribution of the depth of a specified isopycnal 'isopyc'
(defaults to 1027.75 kg/m3) from a 2D density section rho0 (in kg/m3) with shape
(nz,ny,nx) and a 1D depth array 'z' (in m) with shape (nz).
'dzref' is the desired resolution for the refined depth array (defaults to 1 m) which
is generated for calculating the depth of the isopycnal. The smaller 'dzref', the smoother
the resolution of the returned isopycnal depth array 'hisopyc'.
"""
dens0, z = map(np.array, (dens0, z))
if not np.all(np.diff(z)>0):
z = np.flipud(z)
dens0 = np.flipud(dens0)
if dens0.ndim==2:
nz, nx = dens0.shape
else:
nz = dens0.size
nx = 1
zref = np.arange(z.min(), z.max()+dzref, dzref)
if np.ma.isMaskedArray(dens0):
dens0 = np.ma.filled(dens0, np.nan)
hisopyc = np.nan*np.ones((nx))
for i in range(nx):
if nx==1:
dens0i = dens0
else:
dens0i = dens0[:,i]
cond1 = np.logical_or(isopyc<np.nanmin(dens0i), np.nanmax(dens0i)<isopyc)
if np.logical_or(cond1, np.isnan(dens0i).all()):
continue
else:
dens0ref = np.interp(zref, z, dens0i) # Refined density profile.
fz = near(dens0ref, isopyc, return_index=True)
try:
hisopyc[i] = zref[fz]
except ValueError:
print("Warning: More than 1 (%d) nearest depths found. Using the median of the depths for point (i=%d)."%(fz.sum(), i))
hisopyc[i] = np.nanmedian(zref[fz])
return hisopyc
def isopyc_depth2(z, dens0, isopyc=1027.75, dzref=1.):
"""
USAGE
-----
hisopyc = isopyc_depth(z, dens0, isopyc=1027.75)
Calculates the spatial distribution of the depth of a specified isopycnal 'isopyc'
(defaults to 1027.75 kg/m3) from a 3D density array rho0 (in kg/m3) with shape
(nz,ny,nx) and a 1D depth array 'z' (in m) with shape (nz).
'dzref' is the desired resolution for the refined depth array (defaults to 1 m) which
is generated for calculating the depth of the isopycnal. The smaller 'dzref', the smoother
the resolution of the returned isopycnal depth array 'hisopyc'.
"""
z, dens0 = map(np.asanyarray, (z, dens0))
ny, nx = dens0.shape[1:]
if not np.all(np.diff(z>0)):
z = np.flipud(z)
dens0 = np.flipud(dens0)
zref = np.arange(z.min(), z.max(), dzref)
if np.ma.isMaskedArray(dens0):
dens0 = np.ma.filled(dens0, np.nan)
hisopyc = np.nan*np.ones((ny,nx))
for j in range(ny):
print("Row %d of %d"%(j+1,ny))
for i in range(nx):
dens0ij = dens0[:,j,i]
if np.logical_or(np.logical_or(isopyc<np.nanmin(dens0ij), np.nanmax(dens0ij)<isopyc), np.isnan(dens0ij).all()):
continue
else:
dens0ref = np.interp(zref, z, dens0ij) # Refined density profile.
dens0refn = near(dens0ref, isopyc)
fz=dens0ref==dens0refn
try:
hisopyc[j,i] = zref[fz]
except ValueError:
print("Warning: More than 1 (%d) nearest depths found. Using the median of the depths for point (j=%d,i=%d)."%(fz.sum(), j, i))
hisopyc[j,i] =
|
np.nanmedian(zref[fz])
|
numpy.nanmedian
|
#!/usr/bin/env python
"""
Background:
--------
oer_Glider_contour.py
Purpose:
--------
Contour glider profile data as a function of dive/date
History:
--------
"""
#System Stack
import datetime
import argparse
import numpy as np
import pandas as pd
# Visual Stack
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.dates import YearLocator, WeekdayLocator, MonthLocator, DayLocator, HourLocator, DateFormatter
import matplotlib.ticker as ticker
import cmocean
from io_utils.EcoFOCI_db_io import EcoFOCI_db_oculus
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__created__ = datetime.datetime(2016, 9, 22)
__modified__ = datetime.datetime(2016, 9, 22)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'arctic heat','ctd','FOCI', 'wood', 'kevin', 'alamo'
mpl.rcParams['axes.grid'] = False
mpl.rcParams['axes.edgecolor'] = 'white'
mpl.rcParams['axes.linewidth'] = 0.25
mpl.rcParams['grid.linestyle'] = '--'
mpl.rcParams['grid.linestyle'] = '--'
mpl.rcParams['xtick.major.size'] = 2
mpl.rcParams['xtick.minor.size'] = 1
mpl.rcParams['xtick.major.width'] = 0.25
mpl.rcParams['xtick.minor.width'] = 0.25
mpl.rcParams['ytick.major.size'] = 2
mpl.rcParams['ytick.minor.size'] = 1
mpl.rcParams['xtick.major.width'] = 0.25
mpl.rcParams['xtick.minor.width'] = 0.25
mpl.rcParams['ytick.direction'] = 'out'
mpl.rcParams['xtick.direction'] = 'out'
mpl.rcParams['ytick.color'] = 'grey'
mpl.rcParams['xtick.color'] = 'grey'
mpl.rcParams['font.family'] = 'Arial'
mpl.rcParams['svg.fonttype'] = 'none'
# Example of making your own norm. Also see matplotlib.colors.
# From <NAME>: This one gives two different linear ramps:
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
"""----------------------------- Main -------------------------------------"""
parser = argparse.ArgumentParser(description='Oculus Glider datafile parser ')
parser.add_argument('filepath', metavar='filepath', type=str,
help='full path to file')
parser.add_argument('--maxdepth', type=float,
help="known bathymetric depth at location")
parser.add_argument('--paramspan', nargs='+', type=float,
help="max,min of parameter")
parser.add_argument('--divenum','--divenum', type=int, nargs=2,
help='start and stop range for dive number')
parser.add_argument('--param', type=str,
help='plot parameter (temperature, salinity, do_sat, sig695nm')
parser.add_argument('--castdirection', type=str,
help='cast direction (u or d or all)')
parser.add_argument('--reverse_x', action="store_true",
help='plot axis in reverse')
parser.add_argument('--extend_plot', type=int,
help='days to prefil plot with blanks')
parser.add_argument('--bydivenum', action="store_true",
help='plot x as a function of divenum and time')
parser.add_argument('--bylat', action="store_true",
help='plot x as a function of lat')
parser.add_argument('--scatter', action="store_true",
help='plot sample scatter points')
parser.add_argument('--boundary', action="store_true",
help='plot boundary depth')
parser.add_argument('--latlon_vs_time', action="store_true",
help='plot lat/lon as a function of time')
args = parser.parse_args()
startcycle=args.divenum[0]
endcycle=args.divenum[1]
#get information from local config file - a json formatted file
config_file = 'EcoFOCI_config/db_config/db_config_oculus_local.pyini'
db_table = '2017_fall_sg401_sciencesubset'
db_table2 = '2017_fall_sg401_sciencesubset'
EcoFOCI_db = EcoFOCI_db_oculus()
(db,cursor) = EcoFOCI_db.connect_to_DB(db_config_file=config_file)
depth_array = np.arange(0,args.maxdepth+1,0.5)
num_cycles = EcoFOCI_db.count(table=db_table2, start=startcycle, end=endcycle)
temparray = np.ones((num_cycles,len(depth_array)))*np.nan
ProfileTime, ProfileLat = [],[]
cycle_col=0
if args.param in ['temperature']:
cmap = cmocean.cm.thermal
elif args.param in ['salinity','salinity_raw','conductivity_raw']:
cmap = cmocean.cm.haline
elif args.param in ['do_sat']:
cmap = cmocean.cm.delta_r
elif args.param in ['sig695nm','chl','chla','chlorophyl']:
cmap = cmocean.cm.algae
elif args.param in ['sig700nm','turb','turbidity']:
cmap = cmocean.cm.turbid
elif args.param in ['density_insitu','sigma_t','sigma_theta']:
cmap = cmocean.cm.dense
elif args.param in ['up_par','down_par']:
cmap = cmocean.cm.solar
elif args.param in ['dtemp_dpress','ddens_dpress']:
if args.boundary:
cmap = cmocean.cm.gray_r
else:
cmap = cmocean.cm.delta
else:
cmap = cmocean.cm.gray
### plot a single parameter as a function of time gouping by divenum (good for lat/lon)
if args.latlon_vs_time:
Profile_sb = EcoFOCI_db.read_location(table=db_table,
param=['latitude,longitude'],
dive_range=[startcycle,endcycle],
verbose=True)
Profile_nb = EcoFOCI_db.read_location(table=db_table2,
param=['latitude,longitude'],
dive_range=[startcycle,endcycle],
verbose=True)
fig = plt.figure(1, figsize=(12, 3), facecolor='w', edgecolor='w')
ax1 = fig.add_subplot(111)
xtime = np.array([Profile_sb[v]['time'] for k,v in enumerate(Profile_sb.keys())])
ydata = np.array([Profile_sb[v]['latitude'] for k,v in enumerate(Profile_sb.keys())])
plt.scatter(x=xtime, y=ydata,s=1,marker='.',color='#849B00')
xtime_nb = np.array([Profile_nb[v]['time'] for k,v in enumerate(Profile_nb.keys())])
ydata_nb = np.array([Profile_nb[v]['latitude'] for k,v in enumerate(Profile_nb.keys())])
plt.scatter(x=xtime_nb, y=ydata_nb,s=1,marker='.',color='#B6D800')
ax1.set_ylim([59,63])
""" if args.extend_plot:
ax1.set_xlim([xtime[0],xtime[0]+datetime.timedelta(days=args.extend_plot)])
if args.reverse_x:
ax1.invert_xaxis()"""
ax1.xaxis.set_major_locator(DayLocator(bymonthday=15))
ax1.xaxis.set_minor_locator(DayLocator(bymonthday=range(1,32,1)))
ax1.xaxis.set_major_formatter(ticker.NullFormatter())
ax1.xaxis.set_minor_formatter(DateFormatter('%d'))
ax1.xaxis.set_major_formatter(DateFormatter('%b %y'))
ax1.xaxis.set_tick_params(which='major', pad=25)
plt.tight_layout()
#plt.savefig(args.filepath + '_' + args.param + args.castdirection + '.svg', transparent=False, dpi = (300))
plt.savefig(args.filepath + '_' + args.param + '.png', transparent=False, dpi = (300))
plt.close()
if args.bydivenum:
fig = plt.figure(1, figsize=(12, 3), facecolor='w', edgecolor='w')
ax1 = fig.add_subplot(111)
for cycle in range(startcycle,endcycle+1,1):
#get db meta information for mooring
Profile = EcoFOCI_db.read_profile(table=db_table,
divenum=cycle,
castdirection=args.castdirection,
param=args.param,
verbose=True)
try:
temp_time = Profile[sorted(Profile.keys())[0]]['time']
ProfileTime = ProfileTime + [temp_time]
Pressure = np.array(sorted(Profile.keys()))
if args.boundary:
Temperature = np.array([Profile[x][args.param] for x in sorted(Profile.keys()) ], dtype=np.float)
dtdz_max = np.where(Temperature == np.min(Temperature))
Temperature =
|
np.zeros_like(Temperature)
|
numpy.zeros_like
|
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from data_utils import getHeader, findCorrelation, generateReport
from sklearn.metrics import mean_absolute_error
from distance_calculator import calculateDistance
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"]="1"
distance_parameter = ['Frobenius Norm', 'Energy Distance', 'L2 Norm', 'Wasserstein Distance', 'Frechet Inception Distance',
'Students T-test', 'KS Test', '<NAME> Test', '<NAME> Test']
threshold = 0.3
real_train_file = 'x_train.npy'
syn_train_file = '0.npy'
header_file = "x_headers.txt"
lst = getHeader(header_file)
# Real Data and Synthetic Data
corr_real, corr_syn, chng_lst = findCorrelation(real_train_file, syn_train_file, lst)
# Deleted columns list
del_list = [x for x in lst if x not in chng_lst]
# Plot and Mean Absolute Error
error = mean_absolute_error(corr_real, corr_syn)
print("Mean Absolute Error: ",error)
x =
|
np.concatenate((corr_real, corr_syn))
|
numpy.concatenate
|
# coding: utf-8
# # Module 4 - Multiple Classifier Comparison
#
# In this module you will learn how to compare classifiers.
#
#
# [reference](http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html)
# ### Step 1: Load basic python libraries
# In[27]:
get_ipython().magic('matplotlib inline')
# This is used to display images within the browser
import os
try:
import cPickle as pickle
except:
import pickle
import numpy as np
import matplotlib.pyplot as plt
import pylab
from mpl_toolkits.mplot3d import Axes3D
from sklearn import svm
import pandas as pd
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# ## Read the dataset
# In this case the training dataset is just a csv file. In case of larger dataset more advanced file fromats like hdf5 are used.
# Pandas is used to load the files.
# In[28]:
Data=pd.read_csv ('DataExample.csv')
# ## Creating training sets
#
# Each class of tissue in our pandas framework has a pre assigned label (Module 1).
# This labels were:
# - ClassTissuePost
# - ClassTissuePre
# - ClassTissueFlair
# - ClassTumorPost
# - ClassTumorPre
# - ClassTumorFlair
# - ClassEdemaPost
# - ClassEdemaPre
# - ClassEdemaFlair
#
# For demontration purposes we will create a feature vector that contains the intesities for the tumor and white matter area from the T1w pre and post contrast images.
# In[29]:
ClassBrainTissuepost=(Data['ClassTissuePost'].values)
ClassBrainTissuepost= (np.asarray(ClassBrainTissuepost))
ClassBrainTissuepost=ClassBrainTissuepost[~np.isnan(ClassBrainTissuepost)]
ClassBrainTissuepre=(Data[['ClassTissuePre']].values)
ClassBrainTissuepre= (np.asarray(ClassBrainTissuepre))
ClassBrainTissuepre=ClassBrainTissuepre[~np.isnan(ClassBrainTissuepre)]
ClassTUMORpost=(Data[['ClassTumorPost']].values)
ClassTUMORpost= (np.asarray(ClassTUMORpost))
ClassTUMORpost=ClassTUMORpost[~np.isnan(ClassTUMORpost)]
ClassTUMORpre=(Data[['ClassTumorPre']].values)
ClassTUMORpre= (np.asarray(ClassTUMORpre))
ClassTUMORpre=ClassTUMORpre[~np.isnan(ClassTUMORpre)]
X_1 = np.stack((ClassBrainTissuepost,ClassBrainTissuepre)) # we only take the first two features.
X_2 = np.stack((ClassTUMORpost,ClassTUMORpre))
X=np.concatenate((X_1.transpose(), X_2.transpose()),axis=0)
y =np.zeros((np.shape(X))[0])
y[np.shape(X_1)[1]:]=1
# ## Compare classifiers
#
#
# List of classifier considered
# - Nearest Neighbors
# - Linear SVM
# - RBF SVM
# - Decision Tree
# - Random Forest
# - AdaBoost
# - Naive Bayes
# - Linear Discriminant Analysis
# - Quadratic Discriminant Analysis
# In[32]:
h = .02 # step size in the mesh (parameter for ploting purposes)
data = (X, y)
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "Linear Discriminant Analysis",
"Quadratic Discriminant Analysis"]
# Feel free to experiment with the parameters and observe the effects on the classification borders
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=.9, C=10),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LinearDiscriminantAnalysis(),
QuadraticDiscriminantAnalysis(reg_param=1)]
datasets = [data]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
# X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
|
np.arange(y_min, y_max, h)
|
numpy.arange
|
from unittest import TestCase
from tempfile import TemporaryDirectory
from pathlib import Path
from giant.camera_models import PinholeModel, OwenModel, BrownModel, OpenCVModel, save, load
import numpy as np
import giant.rotations as at
import lxml.etree as etree
class TestPinholeModel(TestCase):
def setUp(self):
self.Class = PinholeModel
def test___init__(self):
model = self.Class(intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]), focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
estimation_parameters='basic intrinsic',
a1=1, a2=2, a3=3)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['basic intrinsic'])
self.assertEqual(model.a1, 1)
self.assertEqual(model.a2, 2)
self.assertEqual(model.a3, 3)
model = self.Class(kx=1, ky=2, px=4, py=5, focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
estimation_parameters=['focal_length', 'px'])
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 0, 4], [0, 2, 5]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['focal_length', 'px'])
def test_estimation_parameters(self):
model = self.Class()
model.estimation_parameters = 'kx'
self.assertEqual(model.estimation_parameters, ['kx'])
model.estimate_multiple_misalignments = False
model.estimation_parameters = ['px', 'py', 'Multiple misalignments']
self.assertEqual(model.estimation_parameters, ['px', 'py', 'multiple misalignments'])
self.assertTrue(model.estimate_multiple_misalignments)
def test_kx(self):
model = self.Class(intrinsic_matrix=np.array([[1, 0, 0], [0, 0, 0]]))
self.assertEqual(model.kx, 1)
model.kx = 100
self.assertEqual(model.kx, 100)
self.assertEqual(model.intrinsic_matrix[0, 0], 100)
def test_ky(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [0, 3, 0]]))
self.assertEqual(model.ky, 3)
model.ky = 100
self.assertEqual(model.ky, 100)
self.assertEqual(model.intrinsic_matrix[1, 1], 100)
def test_px(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 20], [0, 3, 0]]))
self.assertEqual(model.px, 20)
model.px = 100
self.assertEqual(model.px, 100)
self.assertEqual(model.intrinsic_matrix[0, 2], 100)
def test_py(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [0, 0, 10]]))
self.assertEqual(model.py, 10)
model.py = 100
self.assertEqual(model.py, 100)
self.assertEqual(model.intrinsic_matrix[1, 2], 100)
def test_a1(self):
model = self.Class(temperature_coefficients=np.array([10, 0, 0]))
self.assertEqual(model.a1, 10)
model.a1 = 100
self.assertEqual(model.a1, 100)
self.assertEqual(model.temperature_coefficients[0], 100)
def test_a2(self):
model = self.Class(temperature_coefficients=np.array([0, 10, 0]))
self.assertEqual(model.a2, 10)
model.a2 = 100
self.assertEqual(model.a2, 100)
self.assertEqual(model.temperature_coefficients[1], 100)
def test_a3(self):
model = self.Class(temperature_coefficients=np.array([0, 0, 10]))
self.assertEqual(model.a3, 10)
model.a3 = 100
self.assertEqual(model.a3, 100)
self.assertEqual(model.temperature_coefficients[2], 100)
def test_intrinsic_matrix_inv(self):
model = self.Class(kx=5, ky=10, px=100, py=-5)
np.testing.assert_array_almost_equal(model.intrinsic_matrix @ np.vstack([model.intrinsic_matrix_inv,
[0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
np.testing.assert_array_almost_equal(model.intrinsic_matrix_inv @ np.vstack([model.intrinsic_matrix,
[0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
def test_get_temperature_scale(self):
model = self.Class(temperature_coefficients=[1, 2, 3.])
self.assertEqual(model.get_temperature_scale(1), 7)
np.testing.assert_array_equal(model.get_temperature_scale([1, 2]), [7, 35])
np.testing.assert_array_equal(model.get_temperature_scale([-1, 2.]), [-1, 35])
def test_apply_distortion(self):
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
model = self.Class()
for inp in inputs:
gnom_dist = model.apply_distortion(np.array(inp))
np.testing.assert_array_almost_equal(gnom_dist, inp)
def test_get_projections(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5, a1=1, a2=2, a3=3)
with self.subTest(misalignment=None):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(gnom, gnom_true)
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=1):
for point in points:
gnom, _, pix = model.get_projections(point, temperature=1)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(1)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(gnom, gnom_true)
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=-10.5):
for point in points:
gnom, _, pix = model.get_projections(point, temperature=-10.5)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(-10.5)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(gnom, gnom_true)
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[0] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[1] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
point_new = rot_mat @ point
gnom, _, pix = model.get_projections(point, image=0)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
gnom, _, pix = model.get_projections(point, image=1)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
def test_project_onto_image(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5, a1=-1e-3, a2=1e-6, a3=-7e-8)
with self.subTest(misalignment=None):
for point in points:
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=1):
for point in points:
pix = model.project_onto_image(point, temperature=1)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(1)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=-10.5):
for point in points:
pix = model.project_onto_image(point, temperature=-10.5)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(-10.5)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
pix = model.project_onto_image(point)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[0] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[1] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
point_new = rot_mat @ point
pix = model.project_onto_image(point, image=0)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
pix = model.project_onto_image(point, image=1)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
def test_compute_pixel_jacobian(self):
def num_deriv(uvec, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
uvec = np.array(uvec).reshape(3, -1)
pix_true = cmodel.project_onto_image(uvec, image=image, temperature=temperature)
uvec_pert = uvec + [[delta], [0], [0]]
pix_pert_x_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [delta], [0]]
pix_pert_y_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [0], [delta]]
pix_pert_z_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[delta], [0], [0]]
pix_pert_x_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [delta], [0]]
pix_pert_y_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [0], [delta]]
pix_pert_z_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
return np.array([(pix_pert_x_f-pix_pert_x_b)/(2*delta),
(pix_pert_y_f-pix_pert_y_b)/(2*delta),
(pix_pert_z_f-pix_pert_z_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "kx": 30, "ky": 40,
"px": 4005.23, "py": 4005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(3):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_pixel_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_dcamera_point_dgnomic(self):
def num_deriv(gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def g2u(g):
v = np.vstack([g, cmodel.focal_length*np.ones(g.shape[-1])])
return v/np.linalg.norm(v, axis=0, keepdims=True)
gnomic_locations = np.asarray(gnomic_locations).reshape(2, -1)
gnom_pert = gnomic_locations + [[delta], [0]]
cam_loc_pert_x_f = g2u(gnom_pert)
gnom_pert = gnomic_locations + [[0], [delta]]
cam_loc_pert_y_f = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[delta], [0]]
cam_loc_pert_x_b = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[0], [delta]]
cam_loc_pert_y_b = g2u(gnom_pert)
return np.array([(cam_loc_pert_x_f -cam_loc_pert_x_b)/(2*delta),
(cam_loc_pert_y_f -cam_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "kx": 300, "ky": 400,
"px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for gnom in input.T:
jac_ana.append(
model._compute_dcamera_point_dgnomic(gnom, np.sqrt(np.sum(gnom*gnom) + model.focal_length**2)))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test__compute_dgnomic_ddist_gnomic(self):
def num_deriv(dist_gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def dg2g(dg):
gnomic_guess = dg.copy()
# perform the fpa
for _ in np.arange(20):
# get the distorted location assuming the current guess is correct
gnomic_guess_distorted = cmodel.apply_distortion(gnomic_guess)
# subtract off the residual distortion from the gnomic guess
gnomic_guess += dg - gnomic_guess_distorted
# check for convergence
if np.all(np.linalg.norm(gnomic_guess_distorted - dg, axis=0) <= 1e-15):
break
return gnomic_guess
dist_gnomic_locations = np.asarray(dist_gnomic_locations).reshape(2, -1)
dist_gnom_pert = dist_gnomic_locations + [[delta], [0]]
gnom_loc_pert_x_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations + [[0], [delta]]
gnom_loc_pert_y_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[delta], [0]]
gnom_loc_pert_x_b = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[0], [delta]]
gnom_loc_pert_y_b = dg2g(dist_gnom_pert)
return np.array([(gnom_loc_pert_x_f - gnom_loc_pert_x_b)/(2*delta),
(gnom_loc_pert_y_f - gnom_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "kx": 300, "ky": 400,
"px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 0.1], [0.1, 0], [0.1, 0.1]]).T,
np.array([[-0.1, 0], [0, -0.1], [-0.1, -0.1], [0.1, -0.1], [-0.1, 0.1]]).T]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for dist_gnom in input.T:
jac_ana.append(model._compute_dgnomic_ddist_gnomic(dist_gnom))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test_compute_unit_vector_jacobian(self):
def num_deriv(pixels, cmodel, delta=1e-6, image=0, temperature=0) -> np.ndarray:
pixels = np.array(pixels).reshape(2, -1)
pix_pert = pixels + [[delta], [0]]
uvec_pert_x_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels + [[0], [delta]]
uvec_pert_y_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[delta], [0]]
uvec_pert_x_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[0], [delta]]
uvec_pert_y_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
return np.array([(uvec_pert_x_f-uvec_pert_x_b)/(2*delta),
(uvec_pert_y_f-uvec_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "kx": 300, "ky": 400,
"px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(3):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_unit_vector_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_dcamera_point_dmisalignment(self):
def num_deriv(loc, dtheta, delta=1e-10) -> np.ndarray:
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) + [delta, 0, 0]).squeeze()
point_pert_x_f = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) + [0, delta, 0]).squeeze()
point_pert_y_f = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) + [0, 0, delta]).squeeze()
point_pert_z_f = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) - [delta, 0, 0]).squeeze()
point_pert_x_b = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) - [0, delta, 0]).squeeze()
point_pert_y_b = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) - [0, 0, delta]).squeeze()
point_pert_z_b = mis_pert @ loc
return np.array([(point_pert_x_f - point_pert_x_b) / (2 * delta),
(point_pert_y_f - point_pert_y_b) / (2 * delta),
(point_pert_z_f - point_pert_z_b) / (2 * delta)]).T
inputs = [[1, 0, 0], [0, 1, 0], [0, 0, 1], [np.sqrt(3), np.sqrt(3), np.sqrt(3)],
[-1, 0, 0], [0, -1, 0], [0, 0, -1], [-np.sqrt(3), -np.sqrt(3), -np.sqrt(3)],
[1, 0, 100], [0, 0.5, 1]]
misalignment = [[1e-8, 0, 0], [0, 1e-8, 0], [0, 0, 1e-8], [1e-9, 1e-9, 1e-9],
[-1e-8, 0, 0], [0, -1e-8, 0], [0, 0, -1e-8], [-1e-9, -1e-9, -1e-9],
[1e-9, 2.3e-9, -0.5e-9]]
for mis in misalignment:
with self.subTest(misalignment=mis):
for inp in inputs:
num = num_deriv(inp, mis)
# noinspection PyTypeChecker
ana = self.Class._compute_dcamera_point_dmisalignment(inp)
np.testing.assert_allclose(num, ana, atol=1e-10, rtol=1e-4)
def test__compute_dpixel_ddistorted_gnomic(self):
def num_deriv(loc, cmodel, delta=1e-8, temperature=0) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) + [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
return np.array(
[(pix_pert_x_f - pix_pert_x_b) / (2 * delta), (pix_pert_y_f - pix_pert_y_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 1.5, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 1.5, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 1.5, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 1.5, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 1.5, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 1.5},
{"kx": 1.5, "ky": 1.5, "px": 1.5, "py": 1.5, 'a1': 1.5, 'a2': 1.5, 'a3': 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
for temp in temperatures:
with self.subTest(temp=temp, **intrins_coef):
for inp in inputs:
num = num_deriv(inp, model, temperature=temp)
ana = model._compute_dpixel_ddistorted_gnomic(temperature=temp)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_dgnomic_dcamera_point(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0, 0]
gnom_pert_x_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) + [0, delta, 0]
gnom_pert_y_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) + [0, 0, delta]
gnom_pert_z_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [delta, 0, 0]
gnom_pert_x_b = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [0, delta, 0]
gnom_pert_y_b = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [0, 0, delta]
gnom_pert_z_b = cmodel.get_projections(loc_pert)[0]
return np.array([(gnom_pert_x_f - gnom_pert_x_b) / (2 * delta),
(gnom_pert_y_f - gnom_pert_y_b) / (2 * delta),
(gnom_pert_z_f - gnom_pert_z_b) / (2 * delta)]).T
intrins_coefs = [{"focal_length": 1},
{"focal_length": 2.5},
{"focal_length": 1000},
{"focal_length": 0.1}]
inputs = [[0, 0, 1], [0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1],
[5, 10, 1000.23], [0.5, 1e-14, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef):
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dgnomic_dcamera_point(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-9, rtol=1e-5)
def test__compute_dgnomic_dfocal_length(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
gnom_pert_f = model_pert.get_projections(loc)[0]
model_pert = cmodel.copy()
model_pert.focal_length -= delta
gnom_pert_b = model_pert.get_projections(loc)[0]
# noinspection PyTypeChecker
return np.asarray((gnom_pert_f - gnom_pert_b) / (2 * delta))
intrins_coefs = [{"focal_length": 1},
{"focal_length": 2.5},
{"focal_length": 1000},
{"focal_length": 0.1}]
inputs = [[0, 0, 1], [0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef):
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dgnomic_dfocal_length(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-10, rtol=1e-5)
def test__compute_dpixel_dintrinsic(self):
def num_deriv(loc, cmodel, delta=1e-6) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
return np.array([(pix_pert_kx_f - pix_pert_kx_b) / (2 * delta),
(pix_pert_ky_f - pix_pert_ky_b) / (2 * delta),
(pix_pert_px_f - pix_pert_px_b) / (2 * delta),
(pix_pert_py_f - pix_pert_py_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "ky": 0, "px": 0, "py": 0},
{"kx": 0, "ky": 1.5, "px": 0, "py": 0},
{"kx": 0, "ky": 0, "px": 1.5, "py": 0},
{"kx": 0, "ky": 0, "px": 0, "py": 1.5},
{"kx": 1.5, "ky": 1.5, "px": 1.5, "py": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef):
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dpixel_dintrinsic(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_dpixel_dtemperature_coeffs(self):
def num_deriv(loc, cmodel, delta=1e-6, temperature=0) -> np.ndarray:
loc = np.array(loc)
model_pert = cmodel.copy()
model_pert.a1 += delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a1_f = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a2 += delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a2_f = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a3 += delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a3_f = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a1 -= delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a1_b = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a2 -= delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a2_b = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a3 -= delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a3_b = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
return np.array([(pix_pert_a1_f - pix_pert_a1_b) / (2 * delta),
(pix_pert_a2_f - pix_pert_a2_b) / (2 * delta),
(pix_pert_a3_f - pix_pert_a3_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 1.5, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 1.5, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 1.5, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 1.5, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 1.5, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 1.5},
{"kx": 1.5, "ky": 1.5, "px": 1.5, "py": 1.5, 'a1': 1.5, 'a2': 1.5, 'a3': 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
temperatures = [0, 1, -1, -10.5, 10.5]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
for temp in temperatures:
with self.subTest(temp=temp, **intrins_coef):
for inp in inputs:
num = num_deriv(inp, model, temperature=temp)
ana = model._compute_dpixel_dtemperature_coeffs(inp, temperature=temp)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test_get_jacobian_row(self):
def num_deriv(loc, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
pix_pert_f_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.focal_length -= delta
pix_pert_f_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
delta_misalignment = 1e-6
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta_misalignment
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta_misalignment
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta_misalignment
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta_misalignment
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta_misalignment
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta_misalignment
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
return np.vstack([(pix_pert_f_f - pix_pert_f_b) / (delta * 2),
(pix_pert_kx_f - pix_pert_kx_b) / (delta * 2),
(pix_pert_ky_f - pix_pert_ky_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta_misalignment * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta_misalignment * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta_misalignment * 2)]).T
# TODO: investigate why this fails with slightly larger misalignments and temperature coefficients
model_param = {"focal_length": 100.75, "kx": 30, "ky": 40, "px": 4005.23, "py": 4005.23,
"a1": 1e-4, "a2": 2e-7, "a3": 3e-8,
"misalignment": [[2e-15, -1.2e-14, 5e-16], [-1e-14, 2e-14, -1e-15]]}
inputs = [[0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1],
[5, 10, 1000.23], [[10], [-22], [1200.23]]]
temperatures = [0, -1, 1, -10.5, 10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for inp in inputs:
for temp in temperatures:
with self.subTest(temp=temp, inp=inp):
num = num_deriv(inp, model, delta=1, temperature=temp)
ana = model._get_jacobian_row(np.array(inp), 0, 1, temperature=temp)
np.testing.assert_allclose(ana, num, rtol=1e-2, atol=1e-10)
num = num_deriv(inp, model, delta=1, image=1, temperature=temp)
ana = model._get_jacobian_row(np.array(inp), 1, 2, temperature=temp)
np.testing.assert_allclose(ana, num, atol=1e-10, rtol=1e-2)
def test_compute_jacobian(self):
def num_deriv(loc, cmodel, delta=1e-8, image=0, nimages=1, temperature=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
pix_pert_f_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.focal_length -= delta
pix_pert_f_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
delta_misalignment = 1e-6
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta_misalignment
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta_misalignment
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta_misalignment
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta_misalignment
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta_misalignment
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta_misalignment
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
return np.vstack([(pix_pert_f_f - pix_pert_f_b) / (delta * 2),
(pix_pert_kx_f - pix_pert_kx_b) / (delta * 2),
(pix_pert_ky_f - pix_pert_ky_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta_misalignment * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta_misalignment * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta_misalignment * 2),
np.zeros(((nimages - image - 1) * 3, 2))]).T
model_param = {"focal_length": 100.75, "kx": 30, "ky": 40, "px": 4005.23, "py": 4005.23,
"a1": 1e-5, "a2": 1e-6, "a3": 1e-7,
"misalignment": [[0, 0, 1e-15], [0, 2e-15, 0], [3e-15, 0, 0]]}
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1000]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param, estimation_parameters=['intrinsic',
'temperature dependence',
'multiple misalignments'])
for temp in temperatures:
with self.subTest(temp=temp):
model.use_a_priori = False
jac_ana = model.compute_jacobian(inputs, temperature=temp)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
for vec in inp.T:
jac_num.append(num_deriv(vec.T, model, delta=1, image=ind, nimages=numim, temperature=temp))
np.testing.assert_allclose(jac_ana, np.vstack(jac_num), rtol=1e-2, atol=1e-10)
model.use_a_priori = True
jac_ana = model.compute_jacobian(inputs, temperature=temp)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
for vec in inp.T:
jac_num.append(num_deriv(vec.T, model, delta=1, image=ind, nimages=numim, temperature=temp))
jac_num = np.vstack(jac_num)
jac_num = np.pad(jac_num, [(0, jac_num.shape[1]), (0, 0)], 'constant', constant_values=0)
jac_num[-jac_num.shape[1]:] = np.eye(jac_num.shape[1])
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-2, atol=1e-10)
def test_remove_jacobian_columns(self):
jac = np.arange(30).reshape(1, -1)
model = self.Class()
for est_param, vals in model.element_dict.items():
model.estimation_parameters = [est_param]
expected = jac[0, vals]
np.testing.assert_array_equal(model._remove_jacobian_columns(jac), [expected])
def test_apply_update(self):
model_param = {"focal_length": 0, "kx": 0, "ky": 0,
"px": 0, "py": 0, "a1": 0, "a2": 0, "a3": 0,
"misalignment": [[0, 0, 0], [0, 0, 0]]}
model = self.Class(**model_param, estimation_parameters=['intrinsic',
'temperature dependence',
'multiple misalignments'])
update_vec = np.arange(14)
model.apply_update(update_vec)
keys = list(model_param.keys())
keys.remove('misalignment')
for key in keys:
self.assertEqual(getattr(model, key), update_vec[model.element_dict[key][0]])
for ind, vec in enumerate(update_vec[8:].reshape(-1, 3)):
np.testing.assert_array_almost_equal(at.Rotation(vec).q, at.Rotation(model.misalignment[ind]).q)
def test_pixels_to_gnomic(self):
gnomic = [[1, 0], [0, 1], [-1, 0], [0, -1],
[0.5, 0], [0, 0.5], [-0.5, 0], [0, -0.5],
[0.5, 0.5], [-0.5, -0.5], [0.5, -0.5], [-0.5, 0.5],
[[1, 0, 0.5], [0, 1.5, -0.5]]]
model = self.Class(kx=2000, ky=-3000.2, px=1025, py=937.567,
a1=1e-3, a2=2e-6, a3=-5.5e-8)
temperatures = [0, 1, -1, 10.5, -10.5]
for gnoms in gnomic:
for temp in temperatures:
with self.subTest(gnoms=gnoms, temp=temp):
dis_gnoms = np.asarray(model.apply_distortion(gnoms)).astype(float)
dis_gnoms *= model.get_temperature_scale(temp)
pixels = ((model.intrinsic_matrix[:, :2] @ dis_gnoms).T + model.intrinsic_matrix[:, 2]).T
gnoms_solved = model.pixels_to_gnomic(pixels, temperature=temp)
np.testing.assert_allclose(gnoms_solved, gnoms)
def test_undistort_pixels(self):
intrins_param = {"kx": 3000, "ky": 4000, "px": 4005.23, 'py': 2000.33, 'a1': 1e-6, 'a2': 1e-5, 'a3': 2e-5}
pinhole = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
model = self.Class(**intrins_param)
temperatures = [0, 1, -1, 10.5, -10.5]
for gnom in pinhole:
gnom = np.asarray(gnom).astype(float)
for temp in temperatures:
with self.subTest(gnom=gnom, temp=temp):
mm_dist = model.apply_distortion(np.array(gnom))
temp_scale = model.get_temperature_scale(temp)
mm_dist *= temp_scale
pix_dist = ((model.intrinsic_matrix[:, :2] @ mm_dist).T + model.intrinsic_matrix[:, 2]).T
pix_undist = model.undistort_pixels(pix_dist, temperature=temp)
gnom *= temp_scale
pix_pinhole = ((model.intrinsic_matrix[:, :2] @ gnom).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_allclose(pix_undist, pix_pinhole, atol=1e-13)
def test_pixels_to_unit(self):
intrins_param = {"focal_length": 32.7, "kx": 3000, "ky": 4000,
"px": 4005.23, 'py': 2000.33, 'a1': 1e-5, 'a2': -1e-10, 'a3': 2e-4,
'misalignment': [[1e-10, 2e-13, -3e-12], [4e-8, -5.3e-9, 9e-15]]}
camera_vecs = [[0, 0, 1], [0.01, 0, 1], [-0.01, 0, 1], [0, 0.01, 1], [0, -0.01, 1], [0.01, 0.01, 1],
[-0.01, -0.01, 1], [[0.01, -0.01], [-0.01, 0.01], [1, 1]]]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**intrins_param)
# TODO: consider adjusting so this isn't needed
model.estimate_multiple_misalignments = True
for vec in camera_vecs:
for image in [0, 1]:
for temp in temperatures:
with self.subTest(vec=vec, image=image, temp=temp):
pixel_loc = model.project_onto_image(vec, image=image, temperature=temp)
unit_vec = model.pixels_to_unit(pixel_loc, image=image, temperature=temp)
unit_true = np.array(vec).astype(np.float64)
unit_true /= np.linalg.norm(unit_true, axis=0, keepdims=True)
np.testing.assert_allclose(unit_vec, unit_true, atol=1e-13)
def test_overwrite(self):
model1 = self.Class(field_of_view=10, intrinsic_matrix=np.array([[1, 0, 3], [0, 5, 6]]), focal_length=60,
misalignment=[[1, 2, 3], [4, 5, 6]], use_a_priori=False,
estimation_parameters=['multiple misalignments'])
model2 = self.Class(field_of_view=20, intrinsic_matrix=np.array([[11, 0, 13], [0, 15, 16]]), focal_length=160,
misalignment=[[11, 12, 13], [14, 15, 16]], use_a_priori=True,
estimation_parameters=['single misalignment'])
modeltest = model1.copy()
modeltest.overwrite(model2)
self.assertEqual(model2.field_of_view, modeltest.field_of_view)
self.assertEqual(model2.use_a_priori, modeltest.use_a_priori)
self.assertEqual(model2.estimate_multiple_misalignments, modeltest.estimate_multiple_misalignments)
np.testing.assert_array_equal(model2.intrinsic_matrix, modeltest.intrinsic_matrix)
np.testing.assert_array_equal(model2.misalignment, modeltest.misalignment)
np.testing.assert_array_equal(model2.estimation_parameters, modeltest.estimation_parameters)
modeltest = model2.copy()
modeltest.overwrite(model1)
self.assertEqual(model1.field_of_view, modeltest.field_of_view)
self.assertEqual(model1.use_a_priori, modeltest.use_a_priori)
self.assertEqual(model1.estimate_multiple_misalignments, modeltest.estimate_multiple_misalignments)
np.testing.assert_array_equal(model1.intrinsic_matrix, modeltest.intrinsic_matrix)
np.testing.assert_array_equal(model1.misalignment, modeltest.misalignment)
np.testing.assert_array_equal(model1.estimation_parameters, modeltest.estimation_parameters)
def test_distort_pixels(self):
model = self.Class(kx=1000, ky=-950.5, px=4500, py=139.32, a1=1e-3, a2=1e-4, a3=1e-5)
pixels = [[0, 1], [1, 0], [-1, 0], [0, -1], [9000., 200.2],
[[4500, 100, 10.98], [0, 139.23, 200.3]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for pix in pixels:
for temp in temperatures:
with self.subTest(pix=pix, temp=temp):
undist_pix = model.undistort_pixels(pix, temperature=temp)
dist_pix = model.distort_pixels(undist_pix, temperature=temp)
np.testing.assert_allclose(dist_pix, pix)
def test_distortion_map(self):
model = self.Class(kx=100, ky=-985.234, px=1000, py=1095)
rows, cols, dist = model.distortion_map((2000, 250), step=10)
# noinspection PyTypeChecker
np.testing.assert_allclose(dist, 0, atol=1e-10)
rl, cl = np.arange(0, 2000, 10), np.arange(0, 250, 10)
rs, cs = np.meshgrid(rl, cl, indexing='ij')
np.testing.assert_array_equal(rows, rs)
np.testing.assert_array_equal(cols, cs)
def test_undistort_image(self):
# not sure how best to do this test...
pass
def test_copy(self):
model = self.Class()
model_copy = model.copy()
model.kx = 1000
model.ky = 999
model.px = 100
model.py = -20
model.a1 = 5
model.a2 = 6
model.a3 = 7
model._focal_length = 11231
model.field_of_view = 1231231
model.use_a_priori = True
model.estimation_parameters = ['a1', 'kx', 'ky']
model.estimate_multiple_misalignments = True
model.misalignment = [1231241, 123124, .12]
self.assertNotEqual(model.kx, model_copy.kx)
self.assertNotEqual(model.ky, model_copy.ky)
self.assertNotEqual(model.px, model_copy.px)
self.assertNotEqual(model.py, model_copy.py)
self.assertNotEqual(model.a1, model_copy.a1)
self.assertNotEqual(model.a2, model_copy.a2)
self.assertNotEqual(model.a3, model_copy.a3)
self.assertNotEqual(model.focal_length, model_copy.focal_length)
self.assertNotEqual(model.field_of_view, model_copy.field_of_view)
self.assertNotEqual(model.use_a_priori, model_copy.use_a_priori)
self.assertNotEqual(model.estimate_multiple_misalignments, model_copy.estimate_multiple_misalignments)
self.assertNotEqual(model.estimation_parameters, model_copy.estimation_parameters)
self.assertTrue((model.misalignment != model_copy.misalignment).all())
def test_to_from_elem(self):
element = etree.Element(self.Class.__name__)
model = self.Class(focal_length=20, field_of_view=5, use_a_priori=True,
misalignment=[1, 2, 3], kx=2, ky=200, px=50, py=300,
a1=37, a2=1, a3=-1230,
estimation_parameters=['a1', 'multiple misalignments'], n_rows=20, n_cols=30)
model_copy = model.copy()
with self.subTest(misalignment=True):
element = model.to_elem(element, misalignment=True)
self.assertEqual(model, model_copy)
model_new = self.Class.from_elem(element)
self.assertEqual(model, model_new)
with self.subTest(misalignment=False):
element = model.to_elem(element, misalignment=False)
self.assertEqual(model, model_copy)
model_new = self.Class.from_elem(element)
model.estimation_parameters[-1] = 'single misalignment'
model.estimate_multiple_misalignments = False
model.misalignment = np.zeros(3)
self.assertEqual(model, model_new)
class TestOwenModel(TestPinholeModel):
def setUp(self):
self.Class = OwenModel
def test___init__(self):
model = self.Class(intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]), focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
distortion_coefficients=np.array([1, 2, 3, 4, 5, 6]),
estimation_parameters='basic intrinsic',
a1=1, a2=2, a3=3)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['basic intrinsic'])
self.assertEqual(model.a1, 1)
self.assertEqual(model.a2, 2)
self.assertEqual(model.a3, 3)
np.testing.assert_array_equal(model.distortion_coefficients, np.arange(1, 7))
model = self.Class(kx=1, ky=2, px=4, py=5, focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)], kxy=80, kyx=90,
estimation_parameters=['focal_length', 'px'], n_rows=500, n_cols=600,
e1=1, radial2=2, pinwheel2=3, e4=4, tangential_x=6, e5=5)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 80, 4], [90, 2, 5]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['focal_length', 'px'])
self.assertEqual(model.n_rows, 500)
self.assertEqual(model.n_cols, 600)
np.testing.assert_array_equal(model.distortion_coefficients, [2, 4, 5, 6, 1, 3])
def test_kxy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.kxy, 1)
model.kxy = 100
self.assertEqual(model.kxy, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_kyx(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [3, 0, 0]]))
self.assertEqual(model.kyx, 3)
model.kyx = 100
self.assertEqual(model.kyx, 100)
self.assertEqual(model.intrinsic_matrix[1, 0], 100)
def test_e1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1, 0]))
self.assertEqual(model.e1, 1)
model.e1 = 100
self.assertEqual(model.e1, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_e2(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0, 0]))
self.assertEqual(model.e2, 1)
model.e2 = 100
self.assertEqual(model.e2, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_e3(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 1]))
self.assertEqual(model.e3, 1)
model.e3 = 100
self.assertEqual(model.e3, 100)
self.assertEqual(model.distortion_coefficients[5], 100)
def test_e4(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0, 0]))
self.assertEqual(model.e4, 1)
model.e4 = 100
self.assertEqual(model.e4, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_e5(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0, 0]))
self.assertEqual(model.e5, 1)
model.e5 = 100
self.assertEqual(model.e5, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_e6(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0, 0]))
self.assertEqual(model.e6, 1)
model.e6 = 100
self.assertEqual(model.e6, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_pinwheel1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1, 0]))
self.assertEqual(model.pinwheel1, 1)
model.pinwheel1 = 100
self.assertEqual(model.pinwheel1, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_radial2(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0, 0]))
self.assertEqual(model.radial2, 1)
model.radial2 = 100
self.assertEqual(model.radial2, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_pinwheel2(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 1]))
self.assertEqual(model.pinwheel2, 1)
model.pinwheel2 = 100
self.assertEqual(model.pinwheel2, 100)
self.assertEqual(model.distortion_coefficients[5], 100)
def test_radial4(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0, 0]))
self.assertEqual(model.radial4, 1)
model.radial4 = 100
self.assertEqual(model.radial4, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_tangential_y(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0, 0]))
self.assertEqual(model.tangential_y, 1)
model.tangential_y = 100
self.assertEqual(model.tangential_y, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_tangential_x(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0, 0]))
self.assertEqual(model.tangential_x, 1)
model.tangential_x = 100
self.assertEqual(model.tangential_x, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_apply_distortion(self):
dist_coefs = [{"radial2": 1.5, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
solus = [[[0, 0], [2.5, 0], [-2.5, 0], [1.5 + 1.5 ** 4, 0], [-(1.5 + 1.5 ** 4), 0],
[[(1.5 + 1.5 ** 4)], [0]], [[(1.5 + 1.5 ** 4), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 4)], [0, -(1.5 + 1.5 ** 4)], [[0], [(1.5 + 1.5 ** 4)]],
[[0, 0], [(1.5 + 1.5 ** 4), -2.5]], [1 + 2 * 1.5, 1 + 2 * 1.5]],
[[0, 0], [2.5, 0], [-2.5, 0], [(1.5 + 1.5 ** 6), 0], [-(1.5 + 1.5 ** 6), 0],
[[(1.5 + 1.5 ** 6)], [0]], [[(1.5 + 1.5 ** 6), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 6)], [0, -(1.5 + 1.5 ** 6)], [[0], [(1.5 + 1.5 ** 6)]],
[[0, 0], [(1.5 + 1.5 ** 6), -2.5]], [1 + 4 * 1.5, 1 + 4 * 1.5]],
[[0, 0], [2.5, 0], [0.5, 0], [(1.5 + 1.5 ** 3), 0], [-1.5 + 1.5 ** 3, 0],
[[(1.5 + 1.5 ** 3)], [0]], [[(1.5 + 1.5 ** 3), 0.5], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [2.5, 2.5]],
[[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 2.5], [0, 0.5], [0, 1.5 + 1.5 ** 3], [0, -1.5 + 1.5 ** 3], [[0], [1.5 + 1.5 ** 3]],
[[0, 0], [1.5 + 1.5 ** 3, 0.5]], [2.5, 2.5]],
[[0, 0], [1, 1.5], [-1, -1.5], [1.5, 1.5 ** 3], [-1.5, -1.5 ** 3],
[[1.5], [1.5 ** 3]], [[1.5, -1], [1.5 ** 3, -1.5]],
[-1.5, 1], [1.5, -1], [-1.5 ** 3, 1.5], [1.5 ** 3, -1.5], [[-1.5 ** 3], [1.5]],
[[-1.5 ** 3, 1.5], [1.5, -1]],
[1 - np.sqrt(2) * 1.5, 1 + np.sqrt(2) * 1.5]],
[[0, 0], [1, 1.5], [-1, -1.5], [1.5, 1.5 ** 5], [-1.5, -1.5 ** 5],
[[1.5], [1.5 ** 5]], [[1.5, -1], [1.5 ** 5, -1.5]],
[-1.5, 1], [1.5, -1], [-1.5 ** 5, 1.5], [1.5 ** 5, -1.5], [[-1.5 ** 5], [1.5]],
[[-1.5 ** 5, 1.5], [1.5, -1]],
[1 - 2 * np.sqrt(2) * 1.5, 1 + 2 * np.sqrt(2) * 1.5]]]
for dist, sols in zip(dist_coefs, solus):
with self.subTest(**dist):
model = self.Class(**dist)
for inp, solu in zip(inputs, sols):
gnom_dist = model.apply_distortion(np.array(inp))
np.testing.assert_array_almost_equal(gnom_dist, solu)
def test_get_projections(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23e-8, a1=1e-1, a2=1e-6, a3=-3e-7)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(misalignment=None, temp=temp):
for point in points:
pin, dist, pix = model.get_projections(point, temperature=temp)
pin_true = model.focal_length * np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
dist_true *= model.get_temperature_scale(temp)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pin, pin_true)
np.testing.assert_array_equal(dist, dist_true)
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = -model.focal_length * np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = model.focal_length * np.array(point[:2]) / point[2]
pin_true[0] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = model.focal_length * np.array(point[:2]) / point[2]
pin_true[1] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point)
pin_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point, image=0)
pin_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
pin, dist, pix = model.get_projections(point, image=1)
pin_true = -model.focal_length * np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
def test_project_onto_image(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, a1=1, a2=2, a3=-3)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(misalignment=None, temp=temp):
for point in points:
_, __, pix = model.get_projections(point, temperature=temp)
pix_proj = model.project_onto_image(point, temperature=temp)
np.testing.assert_array_equal(pix, pix_proj)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
_, __, pix = model.get_projections(point, image=0)
pix_proj = model.project_onto_image(point, image=0)
np.testing.assert_array_equal(pix, pix_proj)
_, __, pix = model.get_projections(point, image=1)
pix_proj = model.project_onto_image(point, image=1)
np.testing.assert_array_equal(pix, pix_proj)
def test_compute_pixel_jacobian(self):
def num_deriv(uvec, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
uvec = np.array(uvec).reshape(3, -1)
pix_true = cmodel.project_onto_image(uvec, image=image, temperature=temperature)
uvec_pert = uvec + [[delta], [0], [0]]
pix_pert_x_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [delta], [0]]
pix_pert_y_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [0], [delta]]
pix_pert_z_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[delta], [0], [0]]
pix_pert_x_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [delta], [0]]
pix_pert_y_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [0], [delta]]
pix_pert_z_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
return np.array([(pix_pert_x_f-pix_pert_x_b)/(2*delta),
(pix_pert_y_f-pix_pert_y_b)/(2*delta),
(pix_pert_z_f-pix_pert_z_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "radial2": 1.5e-5, "radial4": 1.5e-5, "tangential_x": 1.5e-5,
"tangential_y": 1.5e-5, "pinwheel1": 1.5e-5, "pinwheel2": 1.5e-5, "kx": 30, "ky": 40,
"kxy": 0.5, "kyx": -0.8, "px": 4005.23, "py": 4005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(3):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_pixel_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-6)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_dcamera_point_dgnomic(self):
def num_deriv(gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def g2u(g):
v = np.vstack([g, cmodel.focal_length*np.ones(g.shape[-1])])
return v/np.linalg.norm(v, axis=0, keepdims=True)
gnomic_locations = np.asarray(gnomic_locations).reshape(2, -1)
gnom_pert = gnomic_locations + [[delta], [0]]
cam_loc_pert_x_f = g2u(gnom_pert)
gnom_pert = gnomic_locations + [[0], [delta]]
cam_loc_pert_y_f = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[delta], [0]]
cam_loc_pert_x_b = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[0], [delta]]
cam_loc_pert_y_b = g2u(gnom_pert)
return np.array([(cam_loc_pert_x_f -cam_loc_pert_x_b)/(2*delta),
(cam_loc_pert_y_f -cam_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "radial2": 1.5e-8, "radial4": 1.5e-8, "tangential_x": 1.5e-8,
"tangential_y": 1.5e-8, "pinwheel1": 1.5e-8, "pinwheel2": 1.5e-8, "kx": 300, "ky": 400,
"kxy": 0.5, "kyx": -0.8, "px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for gnom in input.T:
jac_ana.append(
model._compute_dcamera_point_dgnomic(gnom, np.sqrt(np.sum(gnom*gnom) + model.focal_length**2)))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test__compute_dgnomic_ddist_gnomic(self):
def num_deriv(dist_gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def dg2g(dg):
gnomic_guess = dg.copy()
# perform the fpa
for _ in np.arange(20):
# get the distorted location assuming the current guess is correct
gnomic_guess_distorted = cmodel.apply_distortion(gnomic_guess)
# subtract off the residual distortion from the gnomic guess
gnomic_guess += dg - gnomic_guess_distorted
# check for convergence
if np.all(np.linalg.norm(gnomic_guess_distorted - dg, axis=0) <= 1e-15):
break
return gnomic_guess
dist_gnomic_locations = np.asarray(dist_gnomic_locations).reshape(2, -1)
dist_gnom_pert = dist_gnomic_locations + [[delta], [0]]
gnom_loc_pert_x_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations + [[0], [delta]]
gnom_loc_pert_y_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[delta], [0]]
gnom_loc_pert_x_b = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[0], [delta]]
gnom_loc_pert_y_b = dg2g(dist_gnom_pert)
return np.array([(gnom_loc_pert_x_f - gnom_loc_pert_x_b)/(2*delta),
(gnom_loc_pert_y_f - gnom_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "radial2": 1.5e-8, "radial4": 1.5e-8, "tangential_x": 1.5e-8,
"tangential_y": 1.5e-8, "pinwheel1": 1.5e-8, "pinwheel2": 1.5e-8, "kx": 300, "ky": 400,
"kxy": 0.5, "kyx": -0.8, "px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 0.1], [0.1, 0], [0.1, 0.1]]).T,
np.array([[-0.1, 0], [0, -0.1], [-0.1, -0.1], [0.1, -0.1], [-0.1, 0.1]]).T]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for dist_gnom in input.T:
jac_ana.append(model._compute_dgnomic_ddist_gnomic(dist_gnom))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test_compute_unit_vector_jacobian(self):
def num_deriv(pixels, cmodel, delta=1e-6, image=0, temperature=0) -> np.ndarray:
pixels = np.array(pixels).reshape(2, -1)
pix_pert = pixels + [[delta], [0]]
uvec_pert_x_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels + [[0], [delta]]
uvec_pert_y_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[delta], [0]]
uvec_pert_x_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[0], [delta]]
uvec_pert_y_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
return np.array([(uvec_pert_x_f-uvec_pert_x_b)/(2*delta),
(uvec_pert_y_f-uvec_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "radial2": 1.5e-8, "radial4": 1.5e-8, "tangential_x": 1.5e-8,
"tangential_y": 1.5e-8, "pinwheel1": 1.5e-8, "pinwheel2": 1.5e-8, "kx": 300, "ky": 400,
"kxy": 0.5, "kyx": -0.8, "px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(3):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_unit_vector_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_ddistortion_dgnomic(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
dist_pert_x_f = cmodel.apply_distortion(loc_pert) - loc_pert
loc_pert = np.array(loc) + [0, delta]
dist_pert_y_f = cmodel.apply_distortion(loc_pert) - loc_pert
loc_pert = np.array(loc) - [delta, 0]
dist_pert_x_b = cmodel.apply_distortion(loc_pert) - loc_pert
loc_pert = np.array(loc) - [0, delta]
dist_pert_y_b = cmodel.apply_distortion(loc_pert) - loc_pert
return np.array(
[(dist_pert_x_f - dist_pert_x_b) / (2 * delta), (dist_pert_y_f - dist_pert_y_b) / (2 * delta)]).T
dist_coefs = [{"radial2": 1.5, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5},
{"e1": -1.5, "e2": -1.5, "e3": -1.5, "e4": -1.5, "e5": -1.5, "e6": -1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for dist_coef in dist_coefs:
model = self.Class(**dist_coef)
with self.subTest(**dist_coef):
for inp in inputs:
r = np.sqrt(inp[0] ** 2 + inp[1] ** 2)
r2 = r ** 2
r3 = r ** 3
r4 = r ** 4
num = num_deriv(inp, model)
ana = model._compute_ddistortion_dgnomic(np.array(inp), r, r2, r3, r4)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_dpixel_ddistorted_gnomic(self):
def num_deriv(loc, cmodel, delta=1e-8, temperature=0) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) + [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
return np.array(
[(pix_pert_x_f - pix_pert_x_b) / (2 * delta), (pix_pert_y_f - pix_pert_y_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 1.5, "ky": 0, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 1.5, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 1.5, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 1.5, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 1.5},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0, "a1": 1.5, "a2": 0, "a3": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0, "a1": 0, "a2": 1.5, "a3": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0, "a1": 0, "a2": 0, "a3": 1.5},
{"kx": 1.5, "kxy": 1.5, "ky": 1.5, "kyx": 1.5, "px": 1.5, "py": 1.5,
"a1": 1.5, "a2": 1.5, "a3": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
temps = [0, 1, -1, 10.5, -10.5]
for temp in temps:
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef, temp=temp):
for inp in inputs:
num = num_deriv(inp, model, temperature=temp)
ana = model._compute_dpixel_ddistorted_gnomic(temperature=temp)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_dpixel_dintrinsic(self):
def num_deriv(loc, cmodel, delta=1e-6) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kxy += delta
pix_pert_kxy_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kyx += delta
pix_pert_kyx_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kxy -= delta
pix_pert_kxy_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kyx -= delta
pix_pert_kyx_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
return np.array([(pix_pert_kx_f - pix_pert_kx_b) / (2 * delta),
(pix_pert_kxy_f - pix_pert_kxy_b) / (2 * delta),
(pix_pert_kyx_f - pix_pert_kyx_b) / (2 * delta),
(pix_pert_ky_f - pix_pert_ky_b) / (2 * delta),
(pix_pert_px_f - pix_pert_px_b) / (2 * delta),
(pix_pert_py_f - pix_pert_py_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 1.5, "ky": 0, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 1.5, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 1.5, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 1.5, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 1.5},
{"kx": 1.5, "kxy": 1.5, "ky": 1.5, "kyx": 1.5, "px": 1.5, "py": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef):
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dpixel_dintrinsic(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_ddistorted_gnomic_ddistortion(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.radial2 += delta
loc_pert_r2_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.radial4 += delta
loc_pert_r4_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.tangential_y += delta
loc_pert_ty_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.tangential_x += delta
loc_pert_tx_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.pinwheel1 += delta
loc_pert_p1_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.pinwheel2 += delta
loc_pert_p2_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.radial2 -= delta
loc_pert_r2_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.radial4 -= delta
loc_pert_r4_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.tangential_y -= delta
loc_pert_ty_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.tangential_x -= delta
loc_pert_tx_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.pinwheel1 -= delta
loc_pert_p1_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.pinwheel2 -= delta
loc_pert_p2_b = model_pert.apply_distortion(loc)
return np.array([(loc_pert_r2_f - loc_pert_r2_b) / (2 * delta),
(loc_pert_r4_f - loc_pert_r4_b) / (2 * delta),
(loc_pert_ty_f - loc_pert_ty_b) / (2 * delta),
(loc_pert_tx_f - loc_pert_tx_b) / (2 * delta),
(loc_pert_p1_f - loc_pert_p1_b) / (2 * delta),
(loc_pert_p2_f - loc_pert_p2_b) / (2 * delta)]).T
dist_coefs = [{"radial2": 1.5, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for dist_coef in dist_coefs:
model = self.Class(**dist_coef)
with self.subTest(**dist_coef):
for inp in inputs:
r = np.sqrt(inp[0] ** 2 + inp[1] ** 2)
r2 = r ** 2
r3 = r ** 3
r4 = r ** 4
num = num_deriv(inp, model)
ana = model._compute_ddistorted_gnomic_ddistortion(np.array(inp), r, r2, r3, r4)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test_get_jacobian_row(self):
def num_deriv(loc, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
pix_pert_f_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.focal_length -= delta
pix_pert_f_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kxy += delta
pix_pert_kxy_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kyx += delta
pix_pert_kyx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kxy -= delta
pix_pert_kxy_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kyx -= delta
pix_pert_kyx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial2 += delta
pix_pert_r2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial4 += delta
pix_pert_r4_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_y += delta
pix_pert_ty_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_x += delta
pix_pert_tx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel1 += delta
pix_pert_p1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel2 += delta
pix_pert_p2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial2 -= delta
pix_pert_r2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial4 -= delta
pix_pert_r4_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_y -= delta
pix_pert_ty_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_x -= delta
pix_pert_tx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel1 -= delta
pix_pert_p1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel2 -= delta
pix_pert_p2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
delta_m = 1e-6
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta_m
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta_m
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta_m
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta_m
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta_m
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta_m
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
return np.vstack([(pix_pert_f_f - pix_pert_f_b) / (delta * 2),
(pix_pert_kx_f - pix_pert_kx_b) / (delta * 2),
(pix_pert_kxy_f - pix_pert_kxy_b) / (delta * 2),
(pix_pert_kyx_f - pix_pert_kyx_b) / (delta * 2),
(pix_pert_ky_f - pix_pert_ky_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_r2_f - pix_pert_r2_b) / (delta * 2),
(pix_pert_r4_f - pix_pert_r4_b) / (delta * 2),
(pix_pert_ty_f - pix_pert_ty_b) / (delta * 2),
(pix_pert_tx_f - pix_pert_tx_b) / (delta * 2),
(pix_pert_p1_f - pix_pert_p1_b) / (delta * 2),
(pix_pert_p2_f - pix_pert_p2_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta_m * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta_m * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta_m * 2)]).T
model_param = {"focal_length": 100.75, "radial2": 1.5e-5, "radial4": 1.5e-5, "tangential_x": 1.5e-5,
"tangential_y": 1.5e-5, "pinwheel1": 1.5e-5, "pinwheel2": 1.5e-5, "kx": 30, "ky": 40,
"kxy": 0.5, "kyx": -0.8, "px": 4005.23, "py": 4005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [[0.1, 0, 1], [0, 0.1, 1], [0.1, 0.1, 1], [-0.1, 0, 1], [0, -0.1, 1], [-0.1, -0.1, 1],
[5, 10, 1000.23], [[1], [2], [1200.23]]]
temps = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temps:
for inp in inputs:
with self.subTest(temp=temp, inp=inp):
num = num_deriv(inp, model, delta=1e-3, temperature=temp)
ana = model._get_jacobian_row(np.array(inp), 0, 1, temperature=temp)
np.testing.assert_allclose(ana, num, rtol=1e-3, atol=1e-10)
num = num_deriv(inp, model, delta=1e-3, image=1, temperature=temp)
ana = model._get_jacobian_row(np.array(inp), 1, 2, temperature=temp)
np.testing.assert_allclose(ana, num, atol=1e-10, rtol=1e-3)
def test_compute_jacobian(self):
def num_deriv(loc, cmodel, delta=1e-8, image=0, nimages=1, temperature=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
pix_pert_f_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.focal_length -= delta
pix_pert_f_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kxy += delta
pix_pert_kxy_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kyx += delta
pix_pert_kyx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kxy -= delta
pix_pert_kxy_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kyx -= delta
pix_pert_kyx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial2 += delta
pix_pert_r2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial4 += delta
pix_pert_r4_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_y += delta
pix_pert_ty_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_x += delta
pix_pert_tx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel1 += delta
pix_pert_p1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel2 += delta
pix_pert_p2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial2 -= delta
pix_pert_r2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial4 -= delta
pix_pert_r4_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_y -= delta
pix_pert_ty_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_x -= delta
pix_pert_tx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel1 -= delta
pix_pert_p1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel2 -= delta
pix_pert_p2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
return np.vstack([(pix_pert_f_f - pix_pert_f_b) / (delta * 2),
(pix_pert_kx_f - pix_pert_kx_b) / (delta * 2),
(pix_pert_kxy_f - pix_pert_kxy_b) / (delta * 2),
(pix_pert_kyx_f - pix_pert_kyx_b) / (delta * 2),
(pix_pert_ky_f - pix_pert_ky_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_r2_f - pix_pert_r2_b) / (delta * 2),
(pix_pert_r4_f - pix_pert_r4_b) / (delta * 2),
(pix_pert_ty_f - pix_pert_ty_b) / (delta * 2),
(pix_pert_tx_f - pix_pert_tx_b) / (delta * 2),
(pix_pert_p1_f - pix_pert_p1_b) / (delta * 2),
(pix_pert_p2_f - pix_pert_p2_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta * 2),
np.zeros(((nimages - image - 1) * 3, 2))]).T
model_param = {"focal_length": 100.75, "radial2": 1.5e-5, "radial4": 1.5e-5, "tangential_x": 1.5e-5,
"tangential_y": 1.5e-5, "pinwheel1": 1.5e-5, "pinwheel2": 1.5e-5, "kx": 30, "ky": 40,
"kxy": 0.5, "kyx": -0.8, "px": 4005.23, "py": 4005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5, [1, -10, 10]]
model = self.Class(**model_param, estimation_parameters=['intrinsic', 'temperature dependence',
'multiple misalignments'])
for temp in temperatures:
with self.subTest(temp=temp):
model.use_a_priori = False
jac_ana = model.compute_jacobian(inputs, temperature=temp)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
if isinstance(temp, list):
templ = temp[ind]
else:
templ = temp
for vec in inp.T:
jac_num.append(num_deriv(vec.T, model, delta=1e-4, image=ind, nimages=numim, temperature=templ))
np.testing.assert_allclose(jac_ana, np.vstack(jac_num), rtol=1e-3, atol=1e-10)
model.use_a_priori = True
jac_ana = model.compute_jacobian(inputs, temperature=temp)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
if isinstance(temp, list):
templ = temp[ind]
else:
templ = temp
for vec in inp.T:
jac_num.append(num_deriv(vec.T, model, delta=1e-4, image=ind, nimages=numim, temperature=templ))
jac_num = np.vstack(jac_num)
jac_num = np.pad(jac_num, [(0, jac_num.shape[1]), (0, 0)], 'constant', constant_values=0)
jac_num[-jac_num.shape[1]:] = np.eye(jac_num.shape[1])
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test_apply_update(self):
model_param = {"focal_length": 0, "radial2": 0, "radial4": 0, "tangential_x": 0,
"tangential_y": 0, "pinwheel1": 0, "pinwheel2": 0, "kx": 0, "ky": 0,
"kxy": 0, "kyx": 0, "px": 0,
"misalignment": [[0, 0, 0], [0, 0, 0]],
"a1": 0, "a2": 0, "a3": 0}
model = self.Class(**model_param, estimation_parameters=['intrinsic', "temperature dependence",
'multiple misalignments'])
update_vec = np.arange(22)
model.apply_update(update_vec)
keys = list(model_param.keys())
keys.remove('misalignment')
for key in keys:
self.assertEqual(getattr(model, key), update_vec[model.element_dict[key][0]])
for ind, vec in enumerate(update_vec[16:].reshape(-1, 3)):
np.testing.assert_array_almost_equal(at.Rotation(vec).q, at.Rotation(model.misalignment[ind]).q)
def test_pixels_to_gnomic(self):
intrins_param = {"kx": 3000, "ky": 4000, "kxy": 0.5, "kyx": -0.8, "px": 4005.23, 'py': 2000.33,
'a1': 1e-6, 'a2': 1e-7, 'a3': 1e-8}
dist_coefs = [{"radial2": 1.5e-3, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5e-3, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5e-3, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5e-3,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5e-3, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5e-3},
{"radial2": 1.5e-3, "radial4": 1.5e-3, "tangential_x": 1.5e-3, "tangential_y": 1.5e-3,
"pinwheel1": 1.5e-3, "pinwheel2": 1.5e-3}]
pinhole = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
for gnoms in pinhole:
with self.subTest(**dist, temp=temp, gnoms=gnoms):
mm_dist = model.apply_distortion(np.array(gnoms))
mm_dist *= model.get_temperature_scale(temp)
pix_dist = ((model.intrinsic_matrix[:, :2] @ mm_dist).T + model.intrinsic_matrix[:, 2]).T
mm_undist = model.pixels_to_gnomic(pix_dist, temperature=temp)
np.testing.assert_allclose(mm_undist, gnoms, atol=1e-13)
def test_undistort_pixels(self):
intrins_param = {"kx": 3000, "ky": 4000, "kxy": 0.5, "kyx": -0.8, "px": 4005.23, 'py': 2000.33,
"a1": 1e-3, "a2": 1e-4, "a3": 1e-5}
dist_coefs = [{"radial2": 1.5e-3, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5e-3, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5e-3, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5e-3,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5e-3, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5e-3},
{"radial2": 1.5e-3, "radial4": 1.5e-3, "tangential_x": 1.5e-3, "tangential_y": 1.5e-3,
"pinwheel1": 1.5e-3, "pinwheel2": 1.5e-3}]
pinhole = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
for gnoms in pinhole:
with self.subTest(**dist, temp=temp, gnoms=gnoms):
gnoms = np.array(gnoms).astype(np.float64)
mm_dist = model.apply_distortion(np.array(gnoms))
mm_dist *= model.get_temperature_scale(temp)
pix_dist = ((model.intrinsic_matrix[:, :2] @ mm_dist).T + model.intrinsic_matrix[:, 2]).T
pix_undist = model.undistort_pixels(pix_dist, temperature=temp)
gnoms *= model.get_temperature_scale(temp)
pix_pinhole = ((model.intrinsic_matrix[:, :2] @ gnoms).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_allclose(pix_undist, pix_pinhole, atol=1e-13)
def test_pixels_to_unit(self):
intrins_param = {"focal_length": 32.7, "kx": 3000, "ky": 4000, "kxy": 0.5, "kyx": -0.8,
"px": 4005.23, 'py': 2000.33, "a1": 1e-6, "a2": 1e-7, "a3": -3e-5}
dist_coefs = [{"radial2": 1.5e-3, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5e-3, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5e-3, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5e-3,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5e-3, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5e-3},
{"radial2": 1.5e-3, "radial4": 1.5e-3, "tangential_x": 1.5e-3, "tangential_y": 1.5e-3,
"pinwheel1": 1.5e-3, "pinwheel2": 1.5e-3},
{"misalignment": np.array([1e-11, 2e-12, -1e-10])},
{"misalignment": np.array([[1e-11, 2e-12, -1e-10], [-1e-13, 1e-11, 2e-12]]),
"estimation_parameters": "multiple misalignments"}]
camera_vecs = [[0, 0, 1], [0.01, 0, 1], [-0.01, 0, 1], [0, 0.01, 1], [0, -0.01, 1], [0.01, 0.01, 1],
[-0.01, -0.01, 1], [[0.01, -0.01], [-0.01, 0.01], [1, 1]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
for vec in camera_vecs:
with self.subTest(**dist, temp=temp, vec=vec):
pixel_loc = model.project_onto_image(vec, image=-1, temperature=temp)
unit_vec = model.pixels_to_unit(pixel_loc, image=-1, temperature=temp)
unit_true = np.array(vec).astype(np.float64)
unit_true /= np.linalg.norm(unit_true, axis=0, keepdims=True)
np.testing.assert_allclose(unit_vec, unit_true, atol=1e-13)
def test_overwrite(self):
model1 = self.Class(field_of_view=10, intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]),
distortion_coefficients=np.array([1, 2, 3, 4, 5, 6]), focal_length=60,
misalignment=[[1, 2, 3], [4, 5, 6]], use_a_priori=False,
estimation_parameters=['multiple misalignments'], a1=0, a2=3, a3=5)
model2 = self.Class(field_of_view=20, intrinsic_matrix=np.array([[11, 12, 13], [14, 15, 16]]),
distortion_coefficients=np.array([11, 12, 13, 14, 15, 16]), focal_length=160,
misalignment=[[11, 12, 13], [14, 15, 16]], use_a_priori=True,
estimation_parameters=['single misalignment'], a1=-100, a2=-200, a3=-300)
modeltest = model1.copy()
modeltest.overwrite(model2)
self.assertEqual(modeltest, model2)
modeltest = model2.copy()
modeltest.overwrite(model1)
self.assertEqual(modeltest, model1)
def test_intrinsic_matrix_inv(self):
model = self.Class(kx=5, ky=10, kxy=20, kyx=-30.4, px=100, py=-5)
np.testing.assert_array_almost_equal(
model.intrinsic_matrix @ np.vstack([model.intrinsic_matrix_inv, [0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
np.testing.assert_array_almost_equal(
model.intrinsic_matrix_inv @ np.vstack([model.intrinsic_matrix, [0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
def test_distort_pixels(self):
model = self.Class(kx=1000, ky=-950.5, px=4500, py=139.32, a1=1e-3, a2=1e-4, a3=1e-5,
kxy=0.5, kyx=-8, radial2=1e-5, radial4=1e-5, pinwheel2=1e-7, pinwheel1=-1e-12,
tangential_x=1e-6, tangential_y=2e-12)
pixels = [[0, 1], [1, 0], [-1, 0], [0, -1], [9000., 200.2],
[[4500, 100, 10.98], [0, 139.23, 200.3]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for pix in pixels:
for temp in temperatures:
with self.subTest(pix=pix, temp=temp):
undist_pix = model.undistort_pixels(pix, temperature=temp)
dist_pix = model.distort_pixels(undist_pix, temperature=temp)
np.testing.assert_allclose(dist_pix, pix, atol=1e-10)
def test_distortion_map(self):
model = self.Class(kx=100, ky=-985.234, px=1000, py=1095, kxy=10, kyx=-5,
e1=1e-6, e2=1e-12, e3=-4e-10, e5=6e-7, e6=-1e-5, e4=1e-7,
a1=1e-6, a2=-1e-7, a3=4e-12)
rows, cols, dist = model.distortion_map((2000, 250), step=10)
rl, cl = np.arange(0, 2000, 10), np.arange(0, 250, 10)
rs, cs = np.meshgrid(rl, cl, indexing='ij')
np.testing.assert_array_equal(rows, rs)
np.testing.assert_array_equal(cols, cs)
distl = model.distort_pixels(np.vstack([cs.flatten(), rs.flatten()]))
np.testing.assert_array_equal(distl - np.vstack([cs.flatten(), rs.flatten()]), dist)
class TestBrownModel(TestPinholeModel):
def setUp(self):
self.Class = BrownModel
# Not supported for this model
test__compute_dgnomic_dfocal_length = None
def test___init__(self):
model = self.Class(intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]), field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
distortion_coefficients=np.array([1, 2, 3, 4, 5]),
estimation_parameters='basic intrinsic',
a1=1, a2=2, a3=3)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(model.focal_length, 1)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['basic intrinsic'])
self.assertEqual(model.a1, 1)
self.assertEqual(model.a2, 2)
self.assertEqual(model.a3, 3)
np.testing.assert_array_equal(model.distortion_coefficients, np.arange(1, 6))
model = self.Class(kx=1, fy=2, px=4, py=5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)], kxy=80,
estimation_parameters=['kx', 'px'], n_rows=500, n_cols=600,
radial2=1, radial4=2, k3=3, p1=4, tiptilt_x=5)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 80, 4], [0, 2, 5]])
self.assertEqual(model.focal_length, 1)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['kx', 'px'])
self.assertEqual(model.n_rows, 500)
self.assertEqual(model.n_cols, 600)
np.testing.assert_array_equal(model.distortion_coefficients, np.arange(1, 6))
def test_fx(self):
model = self.Class(intrinsic_matrix=np.array([[1, 0, 0], [0, 0, 0]]))
self.assertEqual(model.kx, 1)
model.kx = 100
self.assertEqual(model.kx, 100)
self.assertEqual(model.intrinsic_matrix[0, 0], 100)
def test_fy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [0, 1, 0]]))
self.assertEqual(model.ky, 1)
model.ky = 100
self.assertEqual(model.ky, 100)
self.assertEqual(model.intrinsic_matrix[1, 1], 100)
def test_kxy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.kxy, 1)
model.kxy = 100
self.assertEqual(model.kxy, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_alpha(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.alpha, 1)
model.alpha = 100
self.assertEqual(model.alpha, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_k1(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0]))
self.assertEqual(model.k1, 1)
model.k1 = 100
self.assertEqual(model.k1, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_k2(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0]))
self.assertEqual(model.k2, 1)
model.k2 = 100
self.assertEqual(model.k2, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_k3(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0]))
self.assertEqual(model.k3, 1)
model.k3 = 100
self.assertEqual(model.k3, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_p1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0]))
self.assertEqual(model.p1, 1)
model.p1 = 100
self.assertEqual(model.p1, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_p2(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1]))
self.assertEqual(model.p2, 1)
model.p2 = 100
self.assertEqual(model.p2, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_radial2(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0]))
self.assertEqual(model.radial2, 1)
model.radial2 = 100
self.assertEqual(model.radial2, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_radial4(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0]))
self.assertEqual(model.radial4, 1)
model.radial4 = 100
self.assertEqual(model.radial4, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_radial6(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0]))
self.assertEqual(model.radial6, 1)
model.radial6 = 100
self.assertEqual(model.radial6, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_tiptilt_y(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0]))
self.assertEqual(model.tiptilt_y, 1)
model.tiptilt_y = 100
self.assertEqual(model.tiptilt_y, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_tiptilt_x(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1]))
self.assertEqual(model.tiptilt_x, 1)
model.tiptilt_x = 100
self.assertEqual(model.tiptilt_x, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_apply_distortion(self):
dist_coefs = [{"k1": 1.5, "k2": 0, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 1.5, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 1.5, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 1.5, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 0, "p2": 1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
solus = [[[0, 0], [2.5, 0], [-2.5, 0], [1.5 + 1.5 ** 4, 0], [-(1.5 + 1.5 ** 4), 0],
[[(1.5 + 1.5 ** 4)], [0]], [[(1.5 + 1.5 ** 4), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 4)], [0, -(1.5 + 1.5 ** 4)], [[0], [(1.5 + 1.5 ** 4)]],
[[0, 0], [(1.5 + 1.5 ** 4), -2.5]], [1 + 2 * 1.5, 1 + 2 * 1.5]],
[[0, 0], [2.5, 0], [-2.5, 0], [(1.5 + 1.5 ** 6), 0], [-(1.5 + 1.5 ** 6), 0],
[[(1.5 + 1.5 ** 6)], [0]], [[(1.5 + 1.5 ** 6), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 6)], [0, -(1.5 + 1.5 ** 6)], [[0], [(1.5 + 1.5 ** 6)]],
[[0, 0], [(1.5 + 1.5 ** 6), -2.5]], [1 + 4 * 1.5, 1 + 4 * 1.5]],
[[0, 0], [2.5, 0], [-2.5, 0], [(1.5 + 1.5 ** 8), 0], [-(1.5 + 1.5 ** 8), 0],
[[(1.5 + 1.5 ** 8)], [0]], [[(1.5 + 1.5 ** 8), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 8)], [0, -(1.5 + 1.5 ** 8)], [[0], [(1.5 + 1.5 ** 8)]],
[[0, 0], [(1.5 + 1.5 ** 8), -2.5]], [1 + 8 * 1.5, 1 + 8 * 1.5]],
[[0, 0], [1, 1.5], [-1, 1.5], [1.5, 1.5 ** 3], [-1.5, 1.5 ** 3], [[1.5], [1.5 ** 3]],
[[1.5, -1], [1.5 ** 3, 1.5]],
[0, 1 + 3 * 1.5], [0, -1 + 3 * 1.5], [0, 1.5 + 3 * 1.5 ** 3], [0, -1.5 + 3 * 1.5 ** 3],
[[0], [1.5 + 3 * 1.5 ** 3]],
[[0, 0], [1.5 + 3 * 1.5 ** 3, -1 + 3 * 1.5]], [1 + 2 * 1.5, 1 + 4 * 1.5]],
[[0, 0], [1 + 3 * 1.5, 0], [-1 + 3 * 1.5, 0], [1.5 + 3 * 1.5 ** 3, 0], [-1.5 + 3 * 1.5 ** 3, 0],
[[1.5 + 3 * 1.5 ** 3], [0]], [[1.5 + 3 * 1.5 ** 3, -1 + 3 * 1.5], [0, 0]],
[1.5, 1], [1.5, -1], [1.5 ** 3, 1.5], [1.5 ** 3, -1.5], [[1.5 ** 3], [1.5]],
[[1.5 ** 3, 1.5], [1.5, -1]],
[1 + 4 * 1.5, 1 + 2 * 1.5]]]
for dist, sols in zip(dist_coefs, solus):
with self.subTest(**dist):
model = self.Class(**dist)
for inp, solu in zip(inputs, sols):
gnom_dist = model.apply_distortion(np.array(inp))
np.testing.assert_array_almost_equal(gnom_dist, solu)
def test_get_projections(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, a1=1e-1, a2=-1e-6, a3=3e-7)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(misalignment=None, temp=temp):
for point in points:
pin, dist, pix = model.get_projections(point, temperature=temp)
pin_true = np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
dist_true *= model.get_temperature_scale(temp)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pin, pin_true)
np.testing.assert_array_equal(dist, dist_true)
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = -np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = np.array(point[:2]) / point[2]
pin_true[0] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = np.array(point[:2]) / point[2]
pin_true[1] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point)
pin_true = np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point, image=0)
pin_true = np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
pin, dist, pix = model.get_projections(point, image=1)
pin_true = -np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
def test_project_onto_image(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, a1=1, a2=2, a3=-3)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(temp=temp, misalignment=None):
for point in points:
_, __, pix = model.get_projections(point, temperature=temp)
pix_proj = model.project_onto_image(point, temperature=temp)
np.testing.assert_array_equal(pix, pix_proj)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
_, __, pix = model.get_projections(point, image=0)
pix_proj = model.project_onto_image(point, image=0)
np.testing.assert_array_equal(pix, pix_proj)
_, __, pix = model.get_projections(point, image=1)
pix_proj = model.project_onto_image(point, image=1)
np.testing.assert_array_equal(pix, pix_proj)
def test_compute_pixel_jacobian(self):
def num_deriv(uvec, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
uvec = np.array(uvec).reshape(3, -1)
pix_true = cmodel.project_onto_image(uvec, image=image, temperature=temperature)
uvec_pert = uvec + [[delta], [0], [0]]
pix_pert_x_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [delta], [0]]
pix_pert_y_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [0], [delta]]
pix_pert_z_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[delta], [0], [0]]
pix_pert_x_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [delta], [0]]
pix_pert_y_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [0], [delta]]
pix_pert_z_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
return np.array([(pix_pert_x_f-pix_pert_x_b)/(2*delta),
(pix_pert_y_f-pix_pert_y_b)/(2*delta),
(pix_pert_z_f-pix_pert_z_b)/(2*delta)]).swapaxes(0, -1)
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(2):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_pixel_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_dcamera_point_dgnomic(self):
def num_deriv(gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def g2u(g):
v = np.vstack([g, cmodel.focal_length*np.ones(g.shape[-1])])
return v/np.linalg.norm(v, axis=0, keepdims=True)
gnomic_locations = np.asarray(gnomic_locations).reshape(2, -1)
gnom_pert = gnomic_locations + [[delta], [0]]
cam_loc_pert_x_f = g2u(gnom_pert)
gnom_pert = gnomic_locations + [[0], [delta]]
cam_loc_pert_y_f = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[delta], [0]]
cam_loc_pert_x_b = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[0], [delta]]
cam_loc_pert_y_b = g2u(gnom_pert)
return np.array([(cam_loc_pert_x_f -cam_loc_pert_x_b)/(2*delta),
(cam_loc_pert_y_f -cam_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for gnom in input.T:
jac_ana.append(
model._compute_dcamera_point_dgnomic(gnom, np.sqrt(np.sum(gnom*gnom) + model.focal_length**2)))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test__compute_dgnomic_ddist_gnomic(self):
def num_deriv(dist_gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def dg2g(dg):
gnomic_guess = dg.copy()
# perform the fpa
for _ in np.arange(20):
# get the distorted location assuming the current guess is correct
gnomic_guess_distorted = cmodel.apply_distortion(gnomic_guess)
# subtract off the residual distortion from the gnomic guess
gnomic_guess += dg - gnomic_guess_distorted
# check for convergence
if np.all(np.linalg.norm(gnomic_guess_distorted - dg, axis=0) <= 1e-15):
break
return gnomic_guess
dist_gnomic_locations = np.asarray(dist_gnomic_locations).reshape(2, -1)
dist_gnom_pert = dist_gnomic_locations + [[delta], [0]]
gnom_loc_pert_x_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations + [[0], [delta]]
gnom_loc_pert_y_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[delta], [0]]
gnom_loc_pert_x_b = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[0], [delta]]
gnom_loc_pert_y_b = dg2g(dist_gnom_pert)
return np.array([(gnom_loc_pert_x_f - gnom_loc_pert_x_b)/(2*delta),
(gnom_loc_pert_y_f - gnom_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
inputs = [np.array([[0, 0]]).T,
np.array([[0, 0.1], [0.1, 0], [0.1, 0.1]]).T,
np.array([[-0.1, 0], [0, -0.1], [-0.1, -0.1], [0.1, -0.1], [-0.1, 0.1]]).T]
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for dist_gnom in input.T:
jac_ana.append(model._compute_dgnomic_ddist_gnomic(dist_gnom))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model, delta=1e-8)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-1, atol=1e-10)
def test_compute_unit_vector_jacobian(self):
def num_deriv(pixels, cmodel, delta=1e-6, image=0, temperature=0) -> np.ndarray:
pixels = np.array(pixels).reshape(2, -1)
pix_pert = pixels + [[delta], [0]]
uvec_pert_x_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels + [[0], [delta]]
uvec_pert_y_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[delta], [0]]
uvec_pert_x_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[0], [delta]]
uvec_pert_y_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
return np.array([(uvec_pert_x_f-uvec_pert_x_b)/(2*delta),
(uvec_pert_y_f-uvec_pert_y_b)/(2*delta)]).swapaxes(0, -1)
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T/10,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T/10]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=100, py=100.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.05, k2=-0.03, k3=0.015, p1=1e-7, p2=1e-6,
misalignment=[[1e-9, -1e-9, 2e-9], [-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(2):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_unit_vector_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_ddistorted_gnomic_dgnomic(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
dist_pert_x_f = cmodel.apply_distortion(loc_pert)
loc_pert = np.array(loc) + [0, delta]
dist_pert_y_f = cmodel.apply_distortion(loc_pert)
loc_pert = np.array(loc) - [delta, 0]
dist_pert_x_b = cmodel.apply_distortion(loc_pert)
loc_pert = np.array(loc) - [0, delta]
dist_pert_y_b = cmodel.apply_distortion(loc_pert)
return np.array(
[(dist_pert_x_f - dist_pert_x_b) / (2 * delta), (dist_pert_y_f - dist_pert_y_b) / (2 * delta)]).T
dist_coefs = [{"k1": 1.5, "k2": 0, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 1.5, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 1.5, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 1.5, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 0, "p2": 1.5},
{"k1": -1.5, "k2": -1.5, "k3": -1.5, "p1": -1.5, "p2": -1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for dist_coef in dist_coefs:
model = self.Class(**dist_coef)
with self.subTest(**dist_coef):
for inp in inputs:
r = np.sqrt(inp[0] ** 2 + inp[1] ** 2)
r2 = r ** 2
r4 = r ** 4
r6 = r ** 6
num = num_deriv(inp, model)
ana = model._compute_ddistorted_gnomic_dgnomic(np.array(inp), r2, r4, r6)
np.testing.assert_allclose(num, ana, atol=1e-14)
def test__compute_dpixel_ddistorted_gnomic(self):
def num_deriv(loc, cmodel, delta=1e-8, temperature=0) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) + [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
return np.array(
[(pix_pert_x_f - pix_pert_x_b) / (2 * delta), (pix_pert_y_f - pix_pert_y_b) / (2 * delta)]).T
intrins_coefs = [{"fx": 1.5, "fy": 0, "alpha": 0, "px": 0, "py": 0},
{"fx": 0, "fy": 1.5, "alpha": 0, "px": 0, "py": 0},
{"fx": 0, "fy": 0, "alpha": 1.5, "px": 0, "py": 0},
{"fx": 0, "fy": 0, "alpha": 0, "px": 1.5, "py": 0},
{"fx": 0, "fy": 0, "alpha": 0, "px": 0, "py": 1.5},
{"fx": -1.5, "fy": -1.5, "alpha": -1.5, "px": 1.5, "py": 1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
temps = [0, 1, -1, 10.5, -10.5]
for temp in temps:
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef, temp=temp):
for inp in inputs:
num = num_deriv(np.array(inp), model, temperature=temp)
ana = model._compute_dpixel_ddistorted_gnomic(temperature=temp)
np.testing.assert_allclose(num, ana, atol=1e-14)
def test__compute_dpixel_dintrinsic(self):
def num_deriv(loc, cmodel, delta=1e-6) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kxy += delta
pix_pert_kxy_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kxy -= delta
pix_pert_kxy_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
return np.array([(pix_pert_kx_f - pix_pert_kx_b) / (2 * delta),
(pix_pert_ky_f - pix_pert_ky_b) / (2 * delta),
(pix_pert_kxy_f - pix_pert_kxy_b) / (2 * delta),
(pix_pert_px_f - pix_pert_px_b) / (2 * delta),
(pix_pert_py_f - pix_pert_py_b) / (2 * delta)]).T
intrins_coefs = [{"fx": 1.5, "fy": 0, "alpha": 0, "px": 0, "py": 0},
{"fx": 0, "fy": 1.5, "alpha": 0, "px": 0, "py": 0},
{"fx": 0, "fy": 0, "alpha": 1.5, "px": 0, "py": 0},
{"fx": 0, "fy": 0, "alpha": 0, "px": 1.5, "py": 0},
{"fx": 0, "fy": 0, "alpha": 0, "px": 0, "py": 1.5},
{"fx": -1.5, "fy": -1.5, "alpha": -1.5, "px": 1.5, "py": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
for inp in inputs:
with self.subTest(**intrins_coef, inp=inp):
num = num_deriv(inp, model, delta=1e-5)
ana = model._compute_dpixel_dintrinsic(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-14, rtol=1e-5)
def test__compute_ddistorted_gnomic_ddistortion(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.k1 += delta
loc_pert_k1_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k2 += delta
loc_pert_k2_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k3 += delta
loc_pert_k3_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.p1 += delta
loc_pert_p1_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.p2 += delta
loc_pert_p2_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k1 -= delta
loc_pert_k1_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k2 -= delta
loc_pert_k2_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.k3 -= delta
loc_pert_k3_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.p1 -= delta
loc_pert_p1_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.p2 -= delta
loc_pert_p2_b = model_pert.apply_distortion(loc)
return np.array([(loc_pert_k1_f - loc_pert_k1_b) / (2 * delta),
(loc_pert_k2_f - loc_pert_k2_b) / (2 * delta),
(loc_pert_k3_f - loc_pert_k3_b) / (2 * delta),
(loc_pert_p1_f - loc_pert_p1_b) / (2 * delta),
(loc_pert_p2_f - loc_pert_p2_b) / (2 * delta)]).T
dist_coefs = [{"k1": 1.5, "k2": 0, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 1.5, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 1.5, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 1.5, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 0, "p2": 1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for dist_coef in dist_coefs:
model = self.Class(**dist_coef)
with self.subTest(**dist_coef):
for inp in inputs:
r = np.sqrt(inp[0] ** 2 + inp[1] ** 2)
r2 = r ** 2
r4 = r ** 4
r6 = r ** 6
num = num_deriv(np.array(inp), model)
ana = model._compute_ddistorted_gnomic_ddistortion(np.array(inp), r2, r4, r6)
np.testing.assert_allclose(num, ana, atol=1e-14)
def test__compute_dgnomic_dcamera_point(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0, 0]
gnom_pert_x_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) + [0, delta, 0]
gnom_pert_y_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) + [0, 0, delta]
gnom_pert_z_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [delta, 0, 0]
gnom_pert_x_b = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [0, delta, 0]
gnom_pert_y_b = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [0, 0, delta]
gnom_pert_z_b = cmodel.get_projections(loc_pert)[0]
return np.array([(gnom_pert_x_f - gnom_pert_x_b) / (2 * delta),
(gnom_pert_y_f - gnom_pert_y_b) / (2 * delta),
(gnom_pert_z_f - gnom_pert_z_b) / (2 * delta)]).T
inputs = [[0, 0, 1], [0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1],
[5, 10, 1000.23], [0.5, 1e-14, 1]]
model = self.Class()
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dgnomic_dcamera_point(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-9, rtol=1e-5)
def test_get_jacobian_row(self):
def num_deriv(loc, temp, cmodel, delta=1e-8, image=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.fx += delta
pix_pert_fx_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fy += delta
pix_pert_fy_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.alpha += delta
pix_pert_skew_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fx -= delta
pix_pert_fx_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fy -= delta
pix_pert_fy_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.alpha -= delta
pix_pert_skew_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k1 += delta
pix_pert_k1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k2 += delta
pix_pert_k2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k3 += delta
pix_pert_k3_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p1 += delta
pix_pert_p1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p2 += delta
pix_pert_p2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k1 -= delta
pix_pert_k1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k2 -= delta
pix_pert_k2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k3 -= delta
pix_pert_k3_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p1 -= delta
pix_pert_p1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p2 -= delta
pix_pert_p2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
return np.vstack([(pix_pert_fx_f - pix_pert_fx_b) / (delta * 2),
(pix_pert_fy_f - pix_pert_fy_b) / (delta * 2),
(pix_pert_skew_f - pix_pert_skew_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_k1_f - pix_pert_k1_b) / (delta * 2),
(pix_pert_k2_f - pix_pert_k2_b) / (delta * 2),
(pix_pert_k3_f - pix_pert_k3_b) / (delta * 2),
(pix_pert_p1_f - pix_pert_p1_b) / (delta * 2),
(pix_pert_p2_f - pix_pert_p2_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta * 2)]).T
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
a1=0.15e-7, a2=-0.01e-8, a3=1e-9,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[[1e-9, -1e-9, 2e-9],
[-1e-9, 2e-9, -1e-9]])
model.estimate_multiple_misalignments = True
inputs = [[0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1],
[5, 10, 1000.23], [[1], [2], [1200.23]]]
temps = [0, 1.5, -10]
# TODO: investigate if this is actually correct
for temperature in temps:
for inp in inputs:
with self.subTest(temperature=temperature, inp=inp):
num = num_deriv(inp, temperature, model, delta=1e-2)
ana = model._get_jacobian_row(np.array(inp), 0, 1, temperature=temperature)
np.testing.assert_allclose(ana, num, rtol=1e-1, atol=1e-10)
num = num_deriv(inp, temperature, model, delta=1e-2, image=1)
ana = model._get_jacobian_row(np.array(inp), 1, 2, temperature=temperature)
np.testing.assert_allclose(ana, num, atol=1e-10, rtol=1e-1)
def test_compute_jacobian(self):
def num_deriv(loc, temp, cmodel, delta=1e-8, image=0, nimages=2) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.fx += delta
pix_pert_fx_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fy += delta
pix_pert_fy_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.alpha += delta
pix_pert_skew_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fx -= delta
pix_pert_fx_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.fy -= delta
pix_pert_fy_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.alpha -= delta
pix_pert_skew_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k1 += delta
pix_pert_k1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k2 += delta
pix_pert_k2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k3 += delta
pix_pert_k3_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p1 += delta
pix_pert_p1_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p2 += delta
pix_pert_p2_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k1 -= delta
pix_pert_k1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k2 -= delta
pix_pert_k2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.k3 -= delta
pix_pert_k3_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p1 -= delta
pix_pert_p1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.p2 -= delta
pix_pert_p2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temp).flatten()
return np.vstack([(pix_pert_fx_f - pix_pert_fx_b) / (delta * 2),
(pix_pert_fy_f - pix_pert_fy_b) / (delta * 2),
(pix_pert_skew_f - pix_pert_skew_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_k1_f - pix_pert_k1_b) / (delta * 2),
(pix_pert_k2_f - pix_pert_k2_b) / (delta * 2),
(pix_pert_k3_f - pix_pert_k3_b) / (delta * 2),
(pix_pert_p1_f - pix_pert_p1_b) / (delta * 2),
(pix_pert_p2_f - pix_pert_p2_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta * 2),
np.zeros(((nimages - image - 1) * 3, 2))]).T
model = self.Class(fx=4050.5, fy=5050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[[1e-9, -1e-9, 2e-9],
[-1e-9, 2e-9, -1e-9],
[1e-10, 2e-11, 3e-12]],
a1=0.15e-6, a2=-0.01e-7, a3=0.5e-8,
estimation_parameters=['intrinsic', 'temperature dependence', 'multiple misalignments'])
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
model.use_a_priori = False
temps = [0, -20, 20.5]
jac_ana = model.compute_jacobian(inputs, temperature=temps)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
temperature = temps[ind]
for vec in inp.T:
jac_num.append(num_deriv(vec.T, temperature, model, delta=1e-3, image=ind, nimages=numim))
np.testing.assert_allclose(jac_ana, np.vstack(jac_num), rtol=1e-1, atol=1e-9)
model.use_a_priori = True
jac_ana = model.compute_jacobian(inputs, temperature=temps)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
temperature = temps[ind]
for vec in inp.T:
jac_num.append(num_deriv(vec.T, temperature, model, delta=1e-3, image=ind, nimages=numim))
jac_num = np.vstack(jac_num)
jac_num = np.pad(jac_num, [(0, jac_num.shape[1]), (0, 0)], 'constant', constant_values=0)
jac_num[-jac_num.shape[1]:] = np.eye(jac_num.shape[1])
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-1, atol=1e-9)
def test_apply_update(self):
model_param = {"fx": 0, "fy": 0, "alpha": 0, "k1": 0,
"k2": 0, "k3": 0, "p1": 0, "p2": 0, 'a1': 0, 'a2': 0, 'a3': 0,
"px": 0, "py": 0,
"misalignment": [[0, 0, 0], [0, 0, 0]]}
model = self.Class(**model_param,
estimation_parameters=['intrinsic', 'temperature dependence', 'multiple misalignments'])
update_vec = np.arange(19)
model.apply_update(update_vec)
keys = list(model_param.keys())
keys.remove('misalignment')
for key in keys:
self.assertEqual(getattr(model, key), update_vec[model.element_dict[key][0]])
for ind, vec in enumerate(update_vec[13:].reshape(-1, 3)):
np.testing.assert_array_almost_equal(at.Rotation(vec).q, at.Rotation(model.misalignment[ind]).q)
def test_pixels_to_gnomic(self):
intrins_param = {"fx": 3000, "fy": 4000, "alpha": 0.5,
"px": 4005.23, 'py': 2000.33, 'a1': 1e-5, 'a2': 1e-6, 'a3': -1e-7}
dist_coefs = [{"k1": 1.5e-1, "k2": 0, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 1.5e-1, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 1.5e-1, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 1.5e-6, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 0, "p2": 1.5e-6}]
pinhole = [[0, 0], [0.1, 0], [-0.1, 0], [0.15, 0], [-0.15, 0], [[0.15], [0]], [[0.15, -0.1], [0, 0]],
[0, 0.1], [0, -0.1], [0, 0.15], [0, -0.15], [[0], [0.15]], [[0, 0], [0.15, -0.1]], [0.1, 0.1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
for fp_pinhole in pinhole:
with self.subTest(**dist, temp=temp, fp_pinhole=fp_pinhole):
fp_dist = model.apply_distortion(np.array(fp_pinhole))
fp_dist *= model.get_temperature_scale(temp)
pix_dist = ((model.intrinsic_matrix[:, :2] @ fp_dist).T + model.intrinsic_matrix[:, 2]).T
fp_undist = model.pixels_to_gnomic(pix_dist, temperature=temp)
np.testing.assert_allclose(fp_undist, fp_pinhole, atol=1e-13)
def test_undistort_pixels(self):
intrins_param = {"fx": 3000, "fy": 4000, "alpha": 0.5,
"px": 4005.23, 'py': 2000.33, 'a1': 1e-5, 'a2': 1e-6, 'a3': -1e-7}
dist_coefs = [{"k1": 1.5e-1, "k2": 0, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 1.5e-1, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 1.5e-1, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 1.5e-6, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 0, "p2": 1.5e-6}]
pinhole = [[0, 0], [0.1, 0], [-0.1, 0], [0.15, 0], [-0.15, 0], [[0.15], [0]], [[0.15, -0.1], [0, 0]],
[0, 0.1], [0, -0.1], [0, 0.15], [0, -0.15], [[0], [0.15]], [[0, 0], [0.15, -0.1]], [0.1, 0.1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
with self.subTest(**dist, temp=temp):
for fp_pinhole in pinhole:
fp_pinhole = np.array(fp_pinhole).astype(np.float64)
fp_dist = model.apply_distortion(fp_pinhole)
fp_dist *= model.get_temperature_scale(temp)
pix_dist = ((model.intrinsic_matrix[:, :2] @ fp_dist).T + model.intrinsic_matrix[:, 2]).T
pix_undist = model.undistort_pixels(pix_dist, temperature=temp)
fp_pinhole *= model.get_temperature_scale(temp)
pix_pinhole = ((model.intrinsic_matrix[:, :2] @ fp_pinhole).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_allclose(pix_undist, pix_pinhole, atol=1e-13)
def test_pixels_to_unit(self):
intrins_param = {"fx": 3000, "fy": 4000, "alpha": 0.5,
"px": 4005.23, 'py': 2000.33, 'a1': 1e-6, 'a2': -2e-7, 'a3': 4.5e-8}
dist_coefs = [{"k1": 1.5e-1, "k2": 0, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 1.5e-1, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 1.5e-1, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 1.5e-6, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 0, "p2": 1.5e-6},
{"misalignment": np.array([1e-11, 2e-12, -1e-10])},
{"misalignment": np.array([[1e-11, 2e-12, -1e-10], [-1e-13, 1e-11, 2e-12]]),
"estimation_parameters": "multiple misalignments"}]
camera_vecs = [[0, 0, 1], [0.1, 0, 1], [-0.1, 0, 1], [0, 0.1, 1], [0, -0.1, 1], [0.1, 0.1, 1],
[-0.1, -0.1, 1], [[0.1, -0.1], [-0.1, 0.1], [1, 1]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
with self.subTest(**dist, temp=temp):
for vec in camera_vecs:
pixel_loc = model.project_onto_image(vec, image=-1, temperature=temp)
unit_vec = model.pixels_to_unit(pixel_loc, image=-1, temperature=temp)
unit_true = np.array(vec).astype(np.float64)
unit_true /= np.linalg.norm(unit_true, axis=0, keepdims=True)
np.testing.assert_allclose(unit_vec, unit_true, atol=1e-13)
def test_overwrite(self):
model1 = self.Class(field_of_view=10, intrinsic_matrix=np.array([[1, 2, 3], [0, 5, 6]]),
distortion_coefficients=np.array([1, 2, 3, 4, 5]),
misalignment=[[1, 2, 3], [4, 5, 6]], use_a_priori=False,
estimation_parameters=['multiple misalignments'])
model2 = self.Class(field_of_view=20, intrinsic_matrix=np.array([[11, 12, 13], [0, 15, 16]]),
distortion_coefficients=np.array([11, 12, 13, 14, 15]),
misalignment=[[11, 12, 13], [14, 15, 16]], use_a_priori=True,
estimation_parameters=['single misalignment'])
modeltest = model1.copy()
modeltest.overwrite(model2)
self.assertEqual(model2.field_of_view, modeltest.field_of_view)
self.assertEqual(model2.use_a_priori, modeltest.use_a_priori)
self.assertEqual(model2.estimate_multiple_misalignments, modeltest.estimate_multiple_misalignments)
np.testing.assert_array_equal(model2.intrinsic_matrix, modeltest.intrinsic_matrix)
np.testing.assert_array_equal(model2.distortion_coefficients, modeltest.distortion_coefficients)
np.testing.assert_array_equal(model2.misalignment, modeltest.misalignment)
np.testing.assert_array_equal(model2.estimation_parameters, modeltest.estimation_parameters)
modeltest = model2.copy()
modeltest.overwrite(model1)
self.assertEqual(model1.field_of_view, modeltest.field_of_view)
self.assertEqual(model1.use_a_priori, modeltest.use_a_priori)
self.assertEqual(model1.estimate_multiple_misalignments, modeltest.estimate_multiple_misalignments)
np.testing.assert_array_equal(model1.intrinsic_matrix, modeltest.intrinsic_matrix)
np.testing.assert_array_equal(model1.distortion_coefficients, modeltest.distortion_coefficients)
np.testing.assert_array_equal(model1.misalignment, modeltest.misalignment)
np.testing.assert_array_equal(model1.estimation_parameters, modeltest.estimation_parameters)
def test_distort_pixels(self):
model = self.Class(kx=1000, ky=-950.5, px=4500, py=139.32, a1=1e-3, a2=1e-4, a3=1e-5,
kxy=0.5, radial2=1e-5, radial4=1e-5, radial6=1e-7,
tiptilt_x=1e-6, tiptilt_y=2e-12)
pixels = [[0, 1], [1, 0], [-1, 0], [0, -1], [9000., 200.2],
[[4500, 100, 10.98], [0, 139.23, 200.3]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for pix in pixels:
for temp in temperatures:
with self.subTest(pix=pix, temp=temp):
undist_pix = model.undistort_pixels(pix, temperature=temp)
dist_pix = model.distort_pixels(undist_pix, temperature=temp)
np.testing.assert_allclose(dist_pix, pix, atol=1e-10)
def test_to_from_elem(self):
element = etree.Element(self.Class.__name__)
model = self.Class(field_of_view=5, use_a_priori=True,
misalignment=[1, 2, 3], kx=2, ky=200, px=50, py=300, kxy=12123,
a1=37, a2=1, a3=-1230, k1=5, k2=10, k3=20, p1=-10, p2=35,
estimation_parameters=['kx', 'multiple misalignments'], n_rows=20, n_cols=30)
model_copy = model.copy()
with self.subTest(misalignment=True):
element = model.to_elem(element, misalignment=True)
self.assertEqual(model, model_copy)
model_new = self.Class.from_elem(element)
self.assertEqual(model, model_new)
with self.subTest(misalignment=False):
element = model.to_elem(element, misalignment=False)
self.assertEqual(model, model_copy)
model_new = self.Class.from_elem(element)
model.estimation_parameters[-1] = 'single misalignment'
model.estimate_multiple_misalignments = False
model.misalignment = np.zeros(3)
self.assertEqual(model, model_new)
def test_distortion_map(self):
model = self.Class(kx=100, ky=-985.234, px=1000, py=1095, kxy=10,
k1=1e-6, k2=1e-12, k3=-4e-10, p1=6e-7, p2=-1e-5,
a1=1e-6, a2=-1e-7, a3=4e-12)
rows, cols, dist = model.distortion_map((2000, 250), step=10)
rl, cl = np.arange(0, 2000, 10), np.arange(0, 250, 10)
rs, cs = np.meshgrid(rl, cl, indexing='ij')
np.testing.assert_array_equal(rows, rs)
np.testing.assert_array_equal(cols, cs)
distl = model.distort_pixels(np.vstack([cs.flatten(), rs.flatten()]))
np.testing.assert_array_equal(distl - np.vstack([cs.flatten(), rs.flatten()]), dist)
class TestOpenCVModel(TestPinholeModel):
def setUp(self):
self.Class = OpenCVModel
# Not supported for this model
test__compute_dgnomic_dfocal_length = None
def test___init__(self):
model = self.Class(intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]), field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
distortion_coefficients=np.array([1, 2, 3, 4, 5]),
estimation_parameters='basic intrinsic',
a1=1, a2=2, a3=3)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(model.focal_length, 1)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['basic intrinsic'])
self.assertEqual(model.a1, 1)
self.assertEqual(model.a2, 2)
self.assertEqual(model.a3, 3)
np.testing.assert_array_equal(model.distortion_coefficients, np.arange(1, 6))
model = self.Class(kx=1, fy=2, px=4, py=5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)], kxy=80,
estimation_parameters=['kx', 'px'], n_rows=500, n_cols=600,
radial2n=1, radial4n=2, k3=3, p1=4, tiptilt_x=5, radial2d=9, k5=100, k6=-90,
s1=400, thinprism_2=-500, s3=600, s4=5)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 80, 4], [0, 2, 5]])
self.assertEqual(model.focal_length, 1)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['kx', 'px'])
self.assertEqual(model.n_rows, 500)
self.assertEqual(model.n_cols, 600)
np.testing.assert_array_equal(model.distortion_coefficients, [1, 2, 3, 9, 100, -90, 4, 5, 400, -500, 600, 5])
def test_fx(self):
model = self.Class(intrinsic_matrix=np.array([[1, 0, 0], [0, 0, 0]]))
self.assertEqual(model.kx, 1)
model.kx = 100
self.assertEqual(model.kx, 100)
self.assertEqual(model.intrinsic_matrix[0, 0], 100)
def test_fy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [0, 1, 0]]))
self.assertEqual(model.ky, 1)
model.ky = 100
self.assertEqual(model.ky, 100)
self.assertEqual(model.intrinsic_matrix[1, 1], 100)
def test_kxy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.kxy, 1)
model.kxy = 100
self.assertEqual(model.kxy, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_alpha(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.alpha, 1)
model.alpha = 100
self.assertEqual(model.alpha, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_k1(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0]))
self.assertEqual(model.k1, 1)
model.k1 = 100
self.assertEqual(model.k1, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_k2(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0]))
self.assertEqual(model.k2, 1)
model.k2 = 100
self.assertEqual(model.k2, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_k3(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.k3, 1)
model.k3 = 100
self.assertEqual(model.k3, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_k4(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.k4, 1)
model.k4 = 100
self.assertEqual(model.k4, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_k5(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.k5, 1)
model.k5 = 100
self.assertEqual(model.k5, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_k6(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.k6, 1)
model.k6 = 100
self.assertEqual(model.k6, 100)
self.assertEqual(model.distortion_coefficients[5], 100)
def test_p1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 1, 0]))
self.assertEqual(model.p1, 1)
model.p1 = 100
self.assertEqual(model.p1, 100)
self.assertEqual(model.distortion_coefficients[6], 100)
def test_p2(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 1]))
self.assertEqual(model.p2, 1)
model.p2 = 100
self.assertEqual(model.p2, 100)
self.assertEqual(model.distortion_coefficients[7], 100)
def test_s1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]))
self.assertEqual(model.s1, 1)
model.s1 = 100
self.assertEqual(model.s1, 100)
self.assertEqual(model.distortion_coefficients[8], 100)
def test_s2(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]))
self.assertEqual(model.s2, 1)
model.s2 = 100
self.assertEqual(model.s2, 100)
self.assertEqual(model.distortion_coefficients[9], 100)
def test_s3(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]))
self.assertEqual(model.s3, 1)
model.s3 = 100
self.assertEqual(model.s3, 100)
self.assertEqual(model.distortion_coefficients[10], 100)
def test_s4(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]))
self.assertEqual(model.s4, 1)
model.s4 = 100
self.assertEqual(model.s4, 100)
self.assertEqual(model.distortion_coefficients[11], 100)
def test_radial2n(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0]))
self.assertEqual(model.radial2n, 1)
model.radial2n = 100
self.assertEqual(model.radial2n, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_radial4n(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0]))
self.assertEqual(model.radial4n, 1)
model.radial4n = 100
self.assertEqual(model.radial4n, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_radial6n(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0]))
self.assertEqual(model.radial6n, 1)
model.radial6n = 100
self.assertEqual(model.radial6n, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_radial2d(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.radial2d, 1)
model.radial2d = 100
self.assertEqual(model.radial2d, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_radial4d(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.radial4d, 1)
model.radial4d = 100
self.assertEqual(model.radial4d, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_radial6d(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]))
self.assertEqual(model.radial6d, 1)
model.radial6d = 100
self.assertEqual(model.radial6d, 100)
self.assertEqual(model.distortion_coefficients[5], 100)
def test_tiptilt_y(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 1, 0]))
self.assertEqual(model.tiptilt_y, 1)
model.tiptilt_y = 100
self.assertEqual(model.tiptilt_y, 100)
self.assertEqual(model.distortion_coefficients[6], 100)
def test_tiptilt_x(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 0, 0, 1]))
self.assertEqual(model.tiptilt_x, 1)
model.tiptilt_x = 100
self.assertEqual(model.tiptilt_x, 100)
self.assertEqual(model.distortion_coefficients[7], 100)
def test_thinprism_1(self):
model = self.Class(distortion_coefficients=
|
np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0])
|
numpy.array
|
import random
import traceback
import numpy as np
from PIL import Image, ImageFilter
from torch.distributions.uniform import Uniform
from torch.distributions.normal import Normal
from torch.utils.data import Dataset
from torchvision.transforms import functional as func_transforms
from libyana.transformutils import colortrans, handutils
from meshreg.datasets.queries import BaseQueries, TransQueries, one_query_in
from meshreg.datasets import datutils
class HandObjSet(Dataset):
"""Hand-Object dataset
"""
def __init__(
self,
pose_dataset,
center_idx=9,
inp_res=(256, 256),
max_rot=np.pi,
normalize_img=False,
split="train",
scale_jittering=0.3,
center_jittering=0.2,
train=True,
hue=0.15,
saturation=0.5,
contrast=0.5,
brightness=0.5,
blur_radius=0.5,
spacing=2,
queries=[
BaseQueries.IMAGE,
TransQueries.JOINTS2D,
TransQueries.HANDVERTS3D,
TransQueries.OBJVERTS2D,
TransQueries.OBJCORNERS2D,
TransQueries.HANDVERTS2D,
TransQueries.OBJVERTS3D,
TransQueries.OBJCORNERS3D,
BaseQueries.OBJCANVERTS,
BaseQueries.OBJCANCORNERS,
TransQueries.JOINTS3D,
],
sides="both",
block_rot=False,
sample_nb=None,
has_dist2strong=False,
):
"""
Args:
sample_nb: Number of samples to return: first sample is
spacing: if 0, sample closest ground truth frame
center_idx: idx of joint on which to center 3d pose
not present
sides: if both, don't flip hands, if 'right' flip all left hands to
right hands, if 'left', do the opposite
"""
# Dataset attributes
self.pose_dataset = pose_dataset
self.inp_res = tuple(inp_res)
self.normalize_img = normalize_img
self.center_idx = center_idx
self.sides = sides
# Sequence attributes
self.sample_nb = sample_nb
self.spacing = spacing
# Color jitter attributes
self.hue = hue
self.contrast = contrast
self.brightness = brightness
self.saturation = saturation
self.blur_radius = blur_radius
self.max_rot = max_rot
self.block_rot = block_rot
# Training attributes
self.train = train
self.scale_jittering = scale_jittering
self.center_jittering = center_jittering
self.queries = queries
self.has_dist2strong = has_dist2strong
def __len__(self):
return len(self.pose_dataset)
def get_sample(self, idx, query=None, color_augm=None, space_augm=None):
if query is None:
query = self.queries
sample = {}
if BaseQueries.IMAGE in query or TransQueries.IMAGE in query:
center, scale = self.pose_dataset.get_center_scale(idx)
needs_center_scale = True
else:
needs_center_scale = False
if BaseQueries.JOINTVIS in query:
jointvis = self.pose_dataset.get_jointvis(idx)
sample[BaseQueries.JOINTVIS] = jointvis
# Get sides
if BaseQueries.SIDE in query:
hand_side = self.pose_dataset.get_sides(idx)
hand_side, flip = datutils.flip_hand_side(self.sides, hand_side)
sample[BaseQueries.SIDE] = hand_side
else:
flip = False
# Get original image
if BaseQueries.IMAGE in query or TransQueries.IMAGE in query:
img = self.pose_dataset.get_image(idx)
if flip:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
if BaseQueries.IMAGE in query:
sample[BaseQueries.IMAGE] = np.array(img)
# Get object mask
if BaseQueries.OBJMASK in query:
mask = self.pose_dataset.get_obj_mask(idx)
if flip:
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
if BaseQueries.OBJMASK in query:
sample[BaseQueries.OBJMASK] = mask
# Get keypoint vector fields
if BaseQueries.OBJFPSVECFIELD in query:
vec_field = self.pose_dataset.get_obj_fpsvectorfield(idx)
if flip:
vec_field = np.fliplr(vec_field)
sample[BaseQueries.OBJFPSVECFIELD] = vec_field
# Flip and image 2d if needed
if flip:
center[0] = img.size[0] - center[0]
# Data augmentation
if space_augm is not None:
center = space_augm["center"]
scale = space_augm["scale"]
rot = space_augm["rot"]
elif self.train and needs_center_scale:
# Randomly jitter center
# Center is located in square of size 2*center_jitter_factor
# in center of cropped image
center_jit = Uniform(low=-1, high=1).sample((2,)).numpy()
center_offsets = self.center_jittering * scale * center_jit
center = center + center_offsets.astype(int)
# Scale jittering
scale_jit = Normal(0, 1).sample().item() + 1
scale_jittering = self.scale_jittering * scale_jit
scale_jittering = np.clip(scale_jittering, 1 - self.scale_jittering, 1 + self.scale_jittering)
scale = scale * scale_jittering
rot = Uniform(low=-self.max_rot, high=self.max_rot).sample().item()
else:
rot = 0
if self.block_rot:
rot = 0
space_augm = {"rot": rot, "scale": scale, "center": center}
sample["space_augm"] = space_augm
rot_mat = np.array([[np.cos(rot), -np.sin(rot), 0], [np.sin(rot), np.cos(rot), 0], [0, 0, 1]]).astype(
np.float32
)
# Get 2D hand joints
if (TransQueries.JOINTS2D in query) or (TransQueries.IMAGE in query):
affinetrans, post_rot_trans = handutils.get_affine_transform(center, scale, self.inp_res, rot=rot)
if TransQueries.AFFINETRANS in query:
sample[TransQueries.AFFINETRANS] = affinetrans
if BaseQueries.JOINTS2D in query or TransQueries.JOINTS2D in query:
joints2d = self.pose_dataset.get_joints2d(idx)
if flip:
joints2d = joints2d.copy()
joints2d[:, 0] = img.size[0] - joints2d[:, 0]
if BaseQueries.JOINTS2D in query:
sample[BaseQueries.JOINTS2D] = joints2d.astype(np.float32)
if TransQueries.JOINTS2D in query:
rows = handutils.transform_coords(joints2d, affinetrans)
sample[TransQueries.JOINTS2D] = np.array(rows).astype(np.float32)
if BaseQueries.CAMINTR in query or TransQueries.CAMINTR in query:
camintr = self.pose_dataset.get_camintr(idx)
if BaseQueries.CAMINTR in query:
sample[BaseQueries.CAMINTR] = camintr.astype(np.float32)
if TransQueries.CAMINTR in query:
# Rotation is applied as extr transform
new_camintr = post_rot_trans.dot(camintr)
sample[TransQueries.CAMINTR] = new_camintr.astype(np.float32)
# Get 2D object points
if BaseQueries.OBJVERTS2D in query or (TransQueries.OBJVERTS2D in query):
objverts2d = self.pose_dataset.get_objverts2d(idx)
if flip:
objverts2d = objverts2d.copy()
objverts2d[:, 0] = img.size[0] - objverts2d[:, 0]
if BaseQueries.OBJVERTS2D in query:
sample[BaseQueries.OBJVERTS2D] = objverts2d.astype(np.float32)
if TransQueries.OBJVERTS2D in query:
transobjverts2d = handutils.transform_coords(objverts2d, affinetrans)
sample[TransQueries.OBJVERTS2D] = np.array(transobjverts2d).astype(np.float32)
if BaseQueries.OBJVIS2D in query:
objvis2d = self.pose_dataset.get_objvis2d(idx)
sample[BaseQueries.OBJVIS2D] = objvis2d
# Get 2D object points
if BaseQueries.OBJCORNERS2D in query or (TransQueries.OBJCORNERS2D in query):
objcorners2d = self.pose_dataset.get_objcorners2d(idx)
if flip:
objcorners2d = objcorners2d.copy()
objcorners2d[:, 0] = img.size[0] - objcorners2d[:, 0]
if BaseQueries.OBJCORNERS2D in query:
sample[BaseQueries.OBJCORNERS2D] = np.array(objcorners2d)
if TransQueries.OBJCORNERS2D in query:
transobjcorners2d = handutils.transform_coords(objcorners2d, affinetrans)
sample[TransQueries.OBJCORNERS2D] =
|
np.array(transobjcorners2d)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
# standard imports
import numpy as np
import matplotlib.pyplot as plt
import time
# custom imports
import apt_fileio
import m2q_calib
import plotting_stuff
import initElements_P3
import peak_param_determination as ppd
from histogram_functions import bin_dat
from voltage_and_bowl import do_voltage_and_bowl
plt.close('all')
# Read in template spectrum
#ref_fn = r"Q:\NIST_Projects\EUV_APT_IMS\BWC\Final EPOS for APL Mat Paper\R20_07263-v02.epos"
ref_fn = r'C:\Users\capli\Google Drive\NIST\pos_and_epos_files\GaN_BeamScans_May192021\R20_08041-v01.epos'
ref_epos = apt_fileio.read_epos_numpy(ref_fn)
#ref_epos = ref_epos[130000:]
# Read in data
#fn = r"Q:\NIST_Projects\EUV_APT_IMS\BWC\Final EPOS for APL Mat Paper\R20_07263-v02.epos" # 25 K
#fn = r"Q:\NIST_Projects\EUV_APT_IMS\BWC\Final EPOS for APL Mat Paper\R20_07080-v01.epos" # 50 K
#fn = r"Q:\NIST_Projects\EUV_APT_IMS\BWC\Final EPOS for APL Mat Paper\R20_07086-v01.epos" # 125 K
#fn = r"Q:\NIST_Projects\EUV_APT_IMS\BWC\Final EPOS for APL Mat Paper\R20_07276-v03.epos" # 150 K
fn = r'C:\Users\capli\Google Drive\NIST\pos_and_epos_files\GaN_BeamScans_May192021\R20_08041-v01.epos'
#fn = r'C:\Users\capli\Google Drive\NIST\pos_and_epos_files\GaN_BeamScans_May192021\R20_08041-v01.epos'
epos = apt_fileio.read_epos_numpy(fn)
#epos = epos[epos.size//2:-1]
# Plot TOF vs event index and show the current ROI selection
#roi_event_idxs = np.arange(1000,epos.size-1000)
roi_event_idxs = np.arange(epos.size)
ax = plotting_stuff.plot_TOF_vs_time(epos['tof'],epos,1)
ax.plot(roi_event_idxs[0]*np.ones(2),[0,1200],'--k')
ax.plot(roi_event_idxs[-1]*np.ones(2),[0,1200],'--k')
ax.set_title('roi selected to start analysis')
epos = epos[roi_event_idxs]
# Compute some extra information from epos information
wall_time =
|
np.cumsum(epos['pslep'])
|
numpy.cumsum
|
# This code was developed by <NAME>, 2021. <EMAIL>
import pandas as pd
import numpy as np
import copy
from simple_dispatch import StorageModel
from simple_dispatch import generatorData
from simple_dispatch import bidStack
from simple_dispatch import dispatch
from simple_dispatch import generatorDataShort
import scipy
class FutureGrid(object):
"""By <NAME>. This class manages the model of the future grid and implements dispatch / capacity calculations.
:param gd_short: The generator model
:type gd_short: An object of class `generatorDataShort` from `simple_dispatch.py`
:param unit_drops: Information about which generators are retired in each year
:type unit_drops: Dataframe
:param additions_df: Information about which generators are added each year
:type additions_df: Dataframe
:param year: Year for the future grid
:type year: int
:param future: Future grid demand, including EV demand
:type future: An object of class `FutureDemand` from later in this file
:param stor_df: Demand that needs to be met by storage; passed to storage model object
:type stor_df: Dataframe
:param storage: Storage model
:type storage: An object of the class `StorageModel` from `simple_dispatch.py`
:param bs: Bidstack
:type bs: An object of the class `bidStack` by Thomas Deetjen from `simple_dispatch.py`
:param dp: Dispatch
:type dp: An object of the class `dispatch` by Thomas Deetjen from `simple_dispatch.py`
"""
def __init__(self, gd_short):
self.gd_short = gd_short
self.gd_short_original = copy.deepcopy(gd_short)
self.unit_drops = pd.read_csv('IntermediateOutputs/scheduled_retirements_2019.csv', index_col=0)
self.additions_df = pd.read_csv('IntermediateOutputs/generator_additions.csv', index_col=0)
self.year = None
self.future = None
self.stor_df = None
self.storage = None
self.bs = None
self.dp = None
def add_generators(self, future_year):
"""Duplicate generators to simulate new additions in the future WECC grid."""
gd_short_final = copy.deepcopy(self.gd_short)
added_units = self.additions_df[self.additions_df['Year']<future_year]['orispl_unit'].values
for i, val in enumerate(added_units):
idx = len(gd_short_final.df)
loc1 = gd_short_final.df[gd_short_final.df['orispl_unit']==val].index
gd_short_final.df = pd.concat((gd_short_final.df, gd_short_final.df.loc[loc1]), ignore_index=True)
gd_short_final.df.loc[idx, 'orispl_unit'] = 'added_'+str(i)
self.gd_short = copy.deepcopy(gd_short_final)
def add_generators_sensitivity(self, fuel_col='is_gas', percent_increase_of_total_ffcap=0.2):
# Duplicate existing plants, youngest and cheapest, to add 20% (or other) of existing fossil fuel capacity
captotal = self.gd_short.df.loc[(self.gd_short.df['nerc']=='WECC')&(self.gd_short.df[fuel_col]==1)].mw.sum()
uptoind = np.where(np.cumsum(self.gd_short.df.loc[(self.gd_short.df['nerc']=='WECC')&(self.gd_short.df[fuel_col]==1)].sort_values(by=['year_online', 'vom'], ascending=[False, True]).loc[:, ['mw', 'year_online', 'vom']]['mw']) > percent_increase_of_total_ffcap*(captotal))[0][0]
new_additions = pd.DataFrame({'orispl_unit':self.gd_short.df.loc[self.gd_short.df.loc[(self.gd_short.df['nerc']=='WECC')&(self.gd_short.df[fuel_col]==1)].sort_values(by=['year_online', 'vom'], ascending=[False, True]).index.values[np.arange(0, uptoind)], 'orispl_unit'].values})
new_additions['Year'] = 2022
gd_short_final = copy.deepcopy(self.gd_short)
# added_units = self.additions_df[self.additions_df['Year']<future_year]['orispl_unit'].values
for i, val in enumerate(new_additions['orispl_unit'].values):
idx = len(gd_short_final.df)
loc1 = gd_short_final.df[gd_short_final.df['orispl_unit']==val].index
gd_short_final.df = pd.concat((gd_short_final.df, gd_short_final.df.loc[loc1]), ignore_index=True)
gd_short_final.df.loc[idx, 'orispl_unit'] = 'added_'+str(i)
self.gd_short = copy.deepcopy(gd_short_final)
def drop_generators_sensitivity(self, fuel_col='is_gas', percent_decrease_of_total_ffcap=0.2):
# Drop existing plants, oldest and most expensive, to drop 20% (or other) of existing fossil fuel capacity
captotal = self.gd_short.df.loc[(self.gd_short.df['nerc']=='WECC')&(self.gd_short.df[fuel_col]==1)].mw.sum()
uptoind = np.where(np.cumsum(self.gd_short.df.loc[(self.gd_short.df['nerc']=='WECC')&(self.gd_short.df[fuel_col]==1)].sort_values(by=['year_online', 'vom'], ascending=[True, False]).loc[:, ['mw', 'year_online', 'vom']]['mw']) > percent_decrease_of_total_ffcap*(captotal))[0][0]
new_drops = pd.DataFrame({'orispl_unit':self.gd_short.df.loc[self.gd_short.df.loc[(self.gd_short.df['nerc']=='WECC')&(self.gd_short.df[fuel_col]==1)].sort_values(by=['year_online', 'vom'], ascending=[True, False]).index.values[np.arange(0, uptoind)], 'orispl_unit'].values})
new_drops['Year'] = 2022
gd_short_final = copy.deepcopy(self.gd_short)
# dropped_units = new_drops[new_drops['retirement_year']<future_year]['orispl_unit'].values
gd_short_final.df = gd_short_final.df[~gd_short_final.df['orispl_unit'].isin(new_drops)].copy(deep=True).reset_index(drop=True)
self.gd_short = copy.deepcopy(gd_short_final)
def drop_generators(self, future_year):
"""Drop generators to match announced retirements in the WECC grid."""
gd_short_final = copy.deepcopy(self.gd_short)
dropped_units = self.unit_drops[self.unit_drops['retirement_year']<future_year]['orispl_unit'].values
gd_short_final.df = gd_short_final.df[~gd_short_final.df['orispl_unit'].isin(dropped_units)].copy(deep=True).reset_index(drop=True)
self.gd_short = copy.deepcopy(gd_short_final)
def change_gas_prices(self, fuel):
"""Change fuel prices for gas generators to test sensitivity."""
gd_short_final = copy.deepcopy(self.gd_short)
inds = gd_short_final.df[gd_short_final.df['fuel'].isin(['ng', 'og'])].index
gd_short_final.df.loc[inds, ['fuel_price'+str(i) for i in np.arange(1, 53)]] = fuel*gd_short_final.df.loc[inds, ['fuel_price'+str(i) for i in np.arange(1, 53)]]
self.gd_short = copy.deepcopy(gd_short_final)
def set_up_scenario(self, year=2030, solar=2.5, wind=2.5, fuel=1.0, ev_pen=1.0,
ev_scenario='High Home', ev_timers='', ev_workplace_control='',
ev_workplace_bool=False, evs_bool=True, ev_scenario_date='20211119',
weekend_timers='', weekend_date='20211119', ev_folder=None, generator_sensitivity=False, fuel_col='is_gas', generator_sensitivity_type='add', percent_increase_of_total_ffcap=0.2, percent_decrease_of_total_ffcap=0.2):
"""Set up scenario of future demand."""
# drop and add generators
self.year = year
if year != 2019:
self.add_generators(year)
self.drop_generators(year)
if generator_sensitivity:
if generator_sensitivity_type=='add':
self.add_generators_sensitivity(fuel_col=fuel_col, percent_increase_of_total_ffcap=percent_increase_of_total_ffcap)
else:
self.drop_generators_sensitivity(fuel_col=fuel_col, percent_decrease_of_total_ffcap=percent_decrease_of_total_ffcap)
# change fuel prices
if fuel != 1.0:
self.change_gas_prices(fuel)
# model future demand
self.future = FutureDemand(self.gd_short, year=year)
if year != 2019:
self.future.electrification(scale_vs_given=True) # electrification in other sectors
# adjust renewables levels
self.future.solar_multiplier[year] = solar
self.future.wind_multiplier[year] = wind
self.future.solar()
self.future.wind()
# add EVs
if evs_bool:
if ev_workplace_bool:
self.future.evs(pen_level=ev_pen, scenario_name=ev_scenario, timers_extra_info=ev_timers, wp_control=ev_workplace_control, scenario_date=ev_scenario_date, timers_extra_info_weekends=weekend_timers, weekend_date=weekend_date, folder=ev_folder)
else:
self.future.evs(pen_level=ev_pen, scenario_name=ev_scenario, timers_extra_info=ev_timers, scenario_date=ev_scenario_date, timers_extra_info_weekends=weekend_timers, weekend_date=weekend_date, folder=ev_folder)
# update
self.future.update_total()
def check_overgeneration(self, save_str=None, extra_save_str='', change_demand=True):
"""Check for negative demand. Clip and save overgeneration amount."""
if self.future.demand['demand'].min() < 0:
if save_str is not None:
self.future.demand.loc[self.future.demand['demand'] < 0].to_csv(save_str+'_overgeneration'+extra_save_str+'.csv', index=None)
if change_demand:
self.future.demand['demand'] = self.future.demand['demand'].clip(0, 1e10)
def run_storage_before_capacitydispatch(self, cap, max_rate, allow_negative=False):
"""If running storage on net demand before dispatch, do that here."""
self.stor_df = pd.DataFrame({'datetime': pd.to_datetime(self.future.demand['datetime'].values),
'total_demand': self.future.demand['demand'].values})
self.storage = StorageModel(self.stor_df)
self.storage.calculate_operation_beforecapacity(cap, max_rate, allow_negative=allow_negative)
def run_dispatch(self, max_penlevel, save_str, result_date='20220330', return_generator_limits=False, thermal_storage=False, force_storage=False):
"""Run the dispatch. max_penlevel indicates whether storage will be needed or whether the model will break
without it, but the try except clause will ensure the simulation is run if that is incorrect."""
self.bs = bidStack(self.gd_short, co2_dol_per_kg=0, time=1, dropNucHydroGeo=True, include_min_output=False, mdt_weight=0.5, include_easiur=False)
self.dp = dispatch(self.bs, self.future.demand, time_array=scipy.arange(52)+1, return_generator_limits=return_generator_limits)
if ((self.future.ev_pen_level <= max_penlevel) and not force_storage):
try:
self.dp.calcDispatchAll()
if save_str is not None:
self.dp.df.to_csv(save_str+'_dpdf_'+result_date+'.csv', index=False)
except:
print('Error!')
pd.DataFrame({'Error':['Needed storage in dispatch'], 'Case':[save_str]}, index=[0]).to_csv(save_str+'_error_record.csv', index=False)
print('----Capacity too low----')
print('Try with storage:')
self.dp = dispatch(self.bs, self.future.demand, time_array=scipy.arange(52)+1, include_storage=True, return_generator_limits=return_generator_limits)
self.dp.calcDispatchAll()
if save_str is not None:
self.dp.df.to_csv(save_str+'_withstorage'+'_dpdf_'+result_date+'.csv', index=False)
self.dp.storage_df['total_demand'] = self.dp.df.demand
self.storage = StorageModel(self.dp.storage_df)
if thermal_storage:
self.storage.calculate_minbatt_forcapacity_thermal(limityear=self.year)
else:
self.storage.calculate_minbatt_forcapacity()
print('Storage Rate Result:', int(self.storage.min_maxrate))
print('Storage Capacity: ', int(self.storage.min_capacity))
if save_str is not None:
self.storage.df.to_csv(save_str+'_storage_operations_'+result_date+'.csv', index=False)
self.storage_stats = pd.DataFrame({'Storage Rate Result':int(self.storage.min_maxrate),'Storage Capacity':int(self.storage.min_capacity)}, index=[0])
if save_str is not None:
self.storage_stats.to_csv(save_str+'_storage_stats_'+result_date+'.csv', index=False)
else:
print('----Capacity too low----')
print('Try with storage:')
self.dp = dispatch(self.bs, self.future.demand, time_array=scipy.arange(52)+1, include_storage=True, return_generator_limits=return_generator_limits)
self.dp.calcDispatchAll()
if save_str is not None:
self.dp.df.to_csv(save_str+'_withstorage'+'_dpdf_'+result_date+'.csv', index=False)
self.storage = StorageModel(self.dp.storage_df)
if thermal_storage:
self.storage.calculate_minbatt_forcapacity_thermal(limityear=self.year)
else:
self.storage.calculate_minbatt_forcapacity()
print('Storage Rate Result:', int(self.storage.min_maxrate))
print('Storage Capacity: ', int(self.storage.min_capacity))
if save_str is not None:
self.storage.df.to_csv(save_str+'_storage_operations_'+result_date+'.csv', index=False)
self.storage_stats = pd.DataFrame({'Storage Rate Result':int(self.storage.min_maxrate),'Storage Capacity':int(self.storage.min_capacity)}, index=[0])
if save_str is not None:
self.storage_stats.to_csv(save_str+'_storage_stats_'+result_date+'.csv', index=False)
def find_capacity_limit_1_binarysearch(self, bs_limits=None, lims_8760=None, year=2035, solar=3.5, wind=3,
fuel=1.0, ev_scenario='HighHome', ev_timers='',
ev_workplace_control='', ev_workplace_bool=False, evs_bool=True,
ev_scenario_date='20220408', with_storage_before=False, cap=None,
max_rate=None, minpen=0.01, weekend_timers=None, weekend_date=None):
"""Find capacity limits. To avoid starting the search from 1% adoption each time, this method does a short
search to find which quadrant to start looking in. It returns just the 1-hour breaking point."""
if weekend_timers is None:
weekend_timers = ev_timers
if weekend_date is None:
weekend_date = ev_scenario_date
violated1 = False
limit1 = 0
if lims_8760 is None:
lims_8760 = np.concatenate((np.repeat(bs_limits['Max Capacity'], (24*7)), np.repeat(np.array(bs_limits.loc[51, 'Max Capacity']), 24)))
print('Short Binary Search: ')
penlevel = np.round((minpen+1)/2, 2)
self.set_up_scenario(year=year, solar=solar, wind=wind, fuel=fuel, ev_scenario=ev_scenario,
ev_timers=ev_timers, ev_pen=penlevel, ev_workplace_control=ev_workplace_control,
ev_workplace_bool=ev_workplace_bool, evs_bool=evs_bool,
ev_scenario_date=ev_scenario_date, weekend_timers=weekend_timers, weekend_date=weekend_date)
self.check_overgeneration()
if with_storage_before:
self.run_storage_before_capacitydispatch(cap, max_rate, allow_negative=True)
total_overs = np.shape(np.where(self.storage.df.comb_demand_after_storage.values > lims_8760)[0])[0]
else:
total_overs = np.shape(np.where(self.future.demand.demand.values > lims_8760)[0])[0]
print(penlevel, ':', total_overs)
if total_overs == 0:
mid = copy.copy(penlevel) # 0.5
penlevel = np.round((mid+1)/2, 2) # 0.75
self.set_up_scenario(year=year, solar=solar, wind=wind, fuel=fuel, ev_scenario=ev_scenario,
ev_timers=ev_timers, ev_pen=penlevel, ev_workplace_control=ev_workplace_control,
ev_workplace_bool=ev_workplace_bool, evs_bool=evs_bool, ev_scenario_date=ev_scenario_date,
weekend_timers=weekend_timers, weekend_date=weekend_date)
self.check_overgeneration()
if with_storage_before:
self.run_storage_before_capacitydispatch(cap, max_rate, allow_negative=True)
total_overs = np.shape(np.where(self.storage.df.comb_demand_after_storage.values > lims_8760)[0])[0]
else:
total_overs = np.shape(np.where(self.future.demand.demand.values > lims_8760)[0])[0]
print(penlevel, ':', total_overs)
if total_overs == 0:
mid = copy.copy(penlevel) # 0.75
penlevel = np.round((mid+1)/2, 2) # 0.875
self.set_up_scenario(year=year, solar=solar, wind=wind, fuel=fuel, ev_scenario=ev_scenario,
ev_timers=ev_timers, ev_pen=penlevel, ev_workplace_control=ev_workplace_control,
ev_workplace_bool=ev_workplace_bool, evs_bool=evs_bool, ev_scenario_date=ev_scenario_date,
weekend_timers=weekend_timers, weekend_date=weekend_date)
self.check_overgeneration()
if with_storage_before:
self.run_storage_before_capacitydispatch(cap, max_rate, allow_negative=True)
total_overs = np.shape(np.where(self.storage.df.comb_demand_after_storage.values > lims_8760)[0])[0]
else:
total_overs = np.shape(np.where(self.future.demand.demand.values > lims_8760)[0])[0]
print(penlevel, ':', total_overs)
if total_overs == 0:
start_pen = copy.copy(penlevel)
else:
start_pen = copy.copy(mid)
else:
penlevel = np.round(copy.copy(mid) + 1/8, 2) # 0.625
self.set_up_scenario(year=year, solar=solar, wind=wind, fuel=fuel, ev_scenario=ev_scenario,
ev_timers=ev_timers, ev_pen=penlevel, ev_workplace_control=ev_workplace_control,
ev_workplace_bool=ev_workplace_bool, evs_bool=evs_bool, ev_scenario_date=ev_scenario_date,
weekend_timers=weekend_timers, weekend_date=weekend_date)
self.check_overgeneration()
if with_storage_before:
self.run_storage_before_capacitydispatch(cap, max_rate, allow_negative=True)
total_overs = np.shape(np.where(self.storage.df.comb_demand_after_storage.values > lims_8760)[0])[0]
else:
total_overs = np.shape(np.where(self.future.demand.demand.values > lims_8760)[0])[0]
print(penlevel, ':', total_overs)
if total_overs == 0:
start_pen = copy.copy(penlevel)
else:
start_pen = copy.copy(mid)
else:
mid = copy.copy(penlevel) # 0.5
penlevel = np.round((mid+minpen)/2, 2) # 0.25
self.set_up_scenario(year=year, solar=solar, wind=wind, fuel=fuel, ev_scenario=ev_scenario,
ev_timers=ev_timers, ev_pen=penlevel, ev_workplace_control=ev_workplace_control,
ev_workplace_bool=ev_workplace_bool, evs_bool=evs_bool,
ev_scenario_date=ev_scenario_date, weekend_timers=weekend_timers, weekend_date=weekend_date)
self.check_overgeneration()
if with_storage_before:
self.run_storage_before_capacitydispatch(cap, max_rate, allow_negative=True)
total_overs = np.shape(np.where(self.storage.df.comb_demand_after_storage.values > lims_8760)[0])[0]
else:
total_overs = np.shape(np.where(self.future.demand.demand.values > lims_8760)[0])[0]
print(penlevel, ':', total_overs)
if total_overs == 0:
mid = copy.copy(penlevel) # 0.25
penlevel = np.round(copy.copy(mid) + 1/8, 2) # 0.375
self.set_up_scenario(year=year, solar=solar, wind=wind, fuel=fuel, ev_scenario=ev_scenario,
ev_timers=ev_timers, ev_pen=penlevel, ev_workplace_control=ev_workplace_control,
ev_workplace_bool=ev_workplace_bool, evs_bool=evs_bool,
ev_scenario_date=ev_scenario_date, weekend_timers=weekend_timers, weekend_date=weekend_date)
self.check_overgeneration()
if with_storage_before:
self.run_storage_before_capacitydispatch(cap, max_rate, allow_negative=True)
total_overs = np.shape(np.where(self.storage.df.comb_demand_after_storage.values > lims_8760)[0])[0]
else:
total_overs = np.shape(np.where(self.future.demand.demand.values > lims_8760)[0])[0]
print(penlevel, ':', total_overs)
if total_overs == 0:
start_pen = copy.copy(penlevel)
else:
start_pen = copy.copy(mid)
else:
mid = copy.copy(penlevel) # 0.25
penlevel = np.round(copy.copy(mid) - 1/8, 2) # 0.125
self.set_up_scenario(year=year, solar=solar, wind=wind, fuel=fuel, ev_scenario=ev_scenario,
ev_timers=ev_timers, ev_pen=penlevel, ev_workplace_control=ev_workplace_control,
ev_workplace_bool=ev_workplace_bool, evs_bool=evs_bool,
ev_scenario_date=ev_scenario_date, weekend_timers=weekend_timers, weekend_date=weekend_date)
self.check_overgeneration()
if with_storage_before:
self.run_storage_before_capacitydispatch(cap, max_rate, allow_negative=True)
total_overs = np.shape(np.where(self.storage.df.comb_demand_after_storage.values > lims_8760)[0])[0]
else:
total_overs = np.shape(np.where(self.future.demand.demand.values > lims_8760)[0])[0]
print(penlevel, ':', total_overs)
if total_overs == 0:
start_pen = copy.copy(penlevel)
else:
start_pen = copy.copy(minpen)
print('Linear search from starting point: ', start_pen)
for penlevel in np.arange(start_pen, 1.01, 0.01):
print(penlevel)
penint = int(100*penlevel)
self.set_up_scenario(year=year, solar=solar, wind=wind, fuel=fuel, ev_scenario=ev_scenario,
ev_timers=ev_timers, ev_pen=penlevel, ev_workplace_control=ev_workplace_control,
ev_workplace_bool=ev_workplace_bool, evs_bool=evs_bool,
ev_scenario_date=ev_scenario_date, weekend_timers=weekend_timers, weekend_date=weekend_date)
self.check_overgeneration()
if with_storage_before:
self.run_storage_before_capacitydispatch(cap, max_rate, allow_negative=True)
total_overs = np.shape(np.where(self.storage.df.comb_demand_after_storage.values > lims_8760)[0])[0]
else:
total_overs = np.shape(np.where(self.future.demand.demand.values > lims_8760)[0])[0]
if (total_overs == 1) and (violated1 == False):
print('Total overs: ', total_overs)
limit1 = copy.copy(penlevel)
print('Violation 1: ', penlevel)
violated1 = True
break
elif (total_overs >= 1) and (violated1 == False):
print('Total overs: ', total_overs)
limit1 = copy.copy(penlevel)
print('Violation 1: ', penlevel)
violated1 = True
break
if not violated1:
limit1 = 1.0
self.limit1 = limit1
return limit1
def find_capacity_limit_10_binarysearch(self, bs_limits=None, lims_8760=None, year=2030, solar=2.5, wind=2.5,
fuel=1.0, ev_scenario='BaseCase_NoL1', ev_timers='',
ev_workplace_control='', ev_workplace_bool=False, evs_bool=True,
ev_scenario_date='20211119', with_storage_before=False, cap=None,
max_rate=None, minpen=0.01, weekend_timers=None, weekend_date=None):
"""Find capacity limits. To avoid starting the search from 1% adoption each time, this method does a short
search to find which quadrant to start looking in. It returns both the 1-hour and 10-hour breaking points."""
if weekend_timers is None:
weekend_timers = ev_timers
if weekend_date is None:
weekend_date = ev_scenario_date
violated1 = False
violated2 = False
limit1 = 0
limit2 = 0
if lims_8760 is None:
lims_8760 = np.concatenate((np.repeat(bs_limits['Max Capacity'], (24*7)), np.repeat(np.array(bs_limits.loc[51, 'Max Capacity']), 24)))
print('Short Binary Search: ')
penlevel = np.round((minpen+1)/2, 2)
self.set_up_scenario(year=year, solar=solar, wind=wind, fuel=fuel, ev_scenario=ev_scenario,
ev_timers=ev_timers, ev_pen=penlevel, ev_workplace_control=ev_workplace_control,
ev_workplace_bool=ev_workplace_bool, evs_bool=evs_bool,
ev_scenario_date=ev_scenario_date, weekend_timers=weekend_timers, weekend_date=weekend_date)
self.check_overgeneration()
if with_storage_before:
self.run_storage_before_capacitydispatch(cap, max_rate)
total_overs = np.shape(np.where(self.storage.df.comb_demand_after_storage.values > lims_8760)[0])[0]
else:
total_overs = np.shape(np.where(self.future.demand.demand.values > lims_8760)[0])[0]
print(penlevel, ':', total_overs)
if total_overs == 0:
mid = copy.copy(penlevel)
penlevel = np.round((mid+1)/2, 2)
self.set_up_scenario(year=year, solar=solar, wind=wind, fuel=fuel, ev_scenario=ev_scenario,
ev_timers=ev_timers, ev_pen=penlevel, ev_workplace_control=ev_workplace_control,
ev_workplace_bool=ev_workplace_bool, evs_bool=evs_bool, ev_scenario_date=ev_scenario_date,
weekend_timers=weekend_timers, weekend_date=weekend_date)
self.check_overgeneration()
if with_storage_before:
self.run_storage_before_capacitydispatch(cap, max_rate)
total_overs = np.shape(np.where(self.storage.df.comb_demand_after_storage.values > lims_8760)[0])[0]
else:
total_overs = np.shape(np.where(self.future.demand.demand.values > lims_8760)[0])[0]
print(penlevel, ':', total_overs)
if total_overs == 0:
start_pen = copy.copy(penlevel)
else:
start_pen = copy.copy(mid)
else:
mid = copy.copy(penlevel)
penlevel = np.round((mid+minpen)/2, 2)
self.set_up_scenario(year=year, solar=solar, wind=wind, fuel=fuel, ev_scenario=ev_scenario,
ev_timers=ev_timers, ev_pen=penlevel, ev_workplace_control=ev_workplace_control,
ev_workplace_bool=ev_workplace_bool, evs_bool=evs_bool,
ev_scenario_date=ev_scenario_date, weekend_timers=weekend_timers, weekend_date=weekend_date)
self.check_overgeneration()
if with_storage_before:
self.run_storage_before_capacitydispatch(cap, max_rate)
total_overs = np.shape(np.where(self.storage.df.comb_demand_after_storage.values > lims_8760)[0])[0]
else:
total_overs = np.shape(np.where(self.future.demand.demand.values > lims_8760)[0])[0]
print(penlevel, ':', total_overs)
if total_overs == 0:
start_pen = copy.copy(penlevel)
else:
start_pen = copy.copy(minpen)
print('Linear search from starting point: ', start_pen)
for penlevel in
|
np.arange(start_pen, 1.01, 0.01)
|
numpy.arange
|
#
"""
Useful python tools for working with the MIRI MRS.
This contains cdp8b specific code.
This version of the tools uses the JWST pipeline implementation
of the distortion solution to do the transformations,
and hooks into offline versions of the CRDS reference
files contained within this github repository.
Convert JWST v2,v3 locations (in arcsec) to MIRI MRS SCA x,y pixel locations.
Note that the pipeline uses a 0-indexed detector pixel (1032x1024) convention while
SIAF uses a 1-indexed detector pixel convention. The CDP files define
the origin such that (0,0) is the middle of the lower-left pixel
(1032x1024)- note that this is a CHANGE of convention from earlier CDP!
Author: <NAME> (<EMAIL>)
REVISION HISTORY:
10-Oct-2018 Written by <NAME> (<EMAIL>)
"""
import os as os
import numpy as np
import pdb as pdb
from astropy.modeling import models
from asdf import AsdfFile
from jwst import datamodels
from jwst.assign_wcs import miri
#############################
# Return the tools version
def version():
return 'cdp8b'
#############################
# Set the relevant CRDS distortion file based on channel (e.g., '1A')
def get_fitsreffile(channel):
rootdir=os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
rootdir=os.path.join(rootdir,'data/crds/')
wavefile=rootdir+'jwst_miri_mrs_wavelengthrange_cdp8b.asdf'
# Channel should be of the form (e.g.) '1A', '3C', etc
# See https://jwst-crds.stsci.edu//display_result/52cef902-ad77-4792-9964-d26a0a8a96a8
if ((channel is '1A')or(channel is '2A')):
distfile=rootdir+'jwst_miri_mrs12A_distortion_cdp8b.asdf'
regfile=rootdir+'jwst_miri_mrs12A_regions_cdp8b.asdf'
specfile=rootdir+'jwst_miri_mrs12A_specwcs_cdp8b.asdf'
elif ((channel is '3A')or(channel is '4A')):
distfile=rootdir+'jwst_miri_mrs34A_distortion_cdp8b.asdf'
regfile=rootdir+'jwst_miri_mrs34A_regions_cdp8b.asdf'
specfile=rootdir+'jwst_miri_mrs34A_specwcs_cdp8b.asdf'
elif ((channel is '1B')or(channel is '2B')):
distfile=rootdir+'jwst_miri_mrs12B_distortion_cdp8b.asdf'
regfile=rootdir+'jwst_miri_mrs12B_regions_cdp8b.asdf'
specfile=rootdir+'jwst_miri_mrs12B_specwcs_cdp8b.asdf'
elif ((channel is '3B')or(channel is '4B')):
distfile=rootdir+'jwst_miri_mrs34B_distortion_cdp8b.asdf'
regfile=rootdir+'jwst_miri_mrs34B_regions_cdp8b.asdf'
specfile=rootdir+'jwst_miri_mrs34B_specwcs_cdp8b.asdf'
elif ((channel is '1C')or(channel is '2C')):
distfile=rootdir+'jwst_miri_mrs12C_distortion_cdp8b.asdf'
regfile=rootdir+'jwst_miri_mrs12C_regions_cdp8b.asdf'
specfile=rootdir+'jwst_miri_mrs12C_specwcs_cdp8b.asdf'
elif ((channel is '3C')or(channel is '4C')):
distfile=rootdir+'jwst_miri_mrs34C_distortion_cdp8b.asdf'
regfile=rootdir+'jwst_miri_mrs34C_regions_cdp8b.asdf'
specfile=rootdir+'jwst_miri_mrs34C_specwcs_cdp8b.asdf'
else:
print('Failure!')
refs={'distortion': distfile, 'regions':regfile, 'specwcs':specfile, 'wavelengthrange':wavefile}
return refs
#############################
# Convenience function to turn '1A' type name into '12' and 'SHORT' type names
def bandchan(channel):
# Channel should be of the form (e.g.) '1A', '3C', etc
if ((channel is '1A')or(channel is '2A')):
newband='SHORT'
newchannel='12'
elif ((channel is '3A')or(channel is '4A')):
newband='SHORT'
newchannel='34'
elif ((channel is '1B')or(channel is '2B')):
newband='MEDIUM'
newchannel='12'
elif ((channel is '3B')or(channel is '4B')):
newband='MEDIUM'
newchannel='34'
elif ((channel is '1C')or(channel is '2C')):
newband='LONG'
newchannel='12'
elif ((channel is '3C')or(channel is '4C')):
newband='LONG'
newchannel='34'
else:
newband='FAIL'
newchannel='FAIL'
return newband,newchannel
#############################
# Convenience function to turn '12A' type name into '1A' and '2A' type names
def channel(detband):
if (detband == '12A'):
ch1='1A'
ch2='2A'
elif (detband == '12B'):
ch1='1B'
ch2='2B'
elif (detband == '12C'):
ch1='1C'
ch2='2C'
elif (detband == '34A'):
ch1='3A'
ch2='4A'
elif (detband == '34B'):
ch1='3B'
ch2='4B'
elif (detband == '34C'):
ch1='3C'
ch2='4C'
else:
ch1='FAIL'
ch2='FAIL'
return ch1,ch2
#############################
# Convenience function to return the rough middle wavelength of a given channel
# Note that this ISNT exact, just some valid value
def midwave(channel):
if (channel is '1A'):
thewave=5.32
elif (channel is '1B'):
thewave=6.145
elif (channel is '1C'):
thewave=7.09
elif (channel is '2A'):
thewave=8.135
elif (channel is '2B'):
thewave=9.395
elif (channel is '2C'):
thewave=10.85
elif (channel is '3A'):
thewave=12.505
elif (channel is '3B'):
thewave=14.5
elif (channel is '3C'):
thewave=16.745
elif (channel is '4A'):
thewave=19.29
elif (channel is '4B'):
thewave=22.47
elif (channel is '4C'):
thewave=26.2
return thewave
#############################
# Convenience function to return model distortion object
# for the x,y to alpha,beta,lam transform
def xytoablmodel(channel,**kwargs):
# Construct the reference data model in general JWST imager type
input_model = datamodels.ImageModel()
# Convert input of type '1A' into the band and channel that pipeline needs
theband,thechan=bandchan(channel)
# Set the filter in the data model meta header
input_model.meta.instrument.band = theband
input_model.meta.instrument.channel = thechan
# If passed input refs keyword, unpack and use it
if ('refs' in kwargs):
therefs=kwargs['refs']
# Otherwise use default reference files
else:
therefs=get_fitsreffile(channel)
distortion = miri.detector_to_abl(input_model, therefs)
# Return the distortion object that can then be queried
return distortion
#############################
# Convenience function to return model distortion object
# for the alpha,beta to v2,v3 transform
def abtov2v3model(channel,**kwargs):
# Construct the reference data model in general JWST imager type
input_model = datamodels.ImageModel()
# Convert input of type '1A' into the band and channel that pipeline needs
theband,thechan=bandchan(channel)
# Set the filter in the data model meta header
input_model.meta.instrument.band = theband
input_model.meta.instrument.channel = thechan
# If passed input refs keyword, unpack and use it
if ('refs' in kwargs):
therefs=kwargs['refs']
# Otherwise use default reference files
else:
therefs=get_fitsreffile(channel)
# The pipeline transform actually uses the triple
# (alpha,beta,lambda) -> (v2,v3,lambda)
basedistortion = miri.abl_to_v2v3l(input_model, therefs)
distortion = basedistortion
# Therefore we need to hack a reasonable wavelength onto our input, run transform,
# then hack it back off again
thewave=midwave(channel)
# Duplicate the beta value at first, then replace with wavelength value
map=models.Mapping((0,1,1)) | models.Identity(1) & models.Identity(1) & models.Const1D(thewave)
map.inverse=models.Mapping((0,1),n_inputs=3)
allmap= map | distortion | map.inverse
allmap.inverse= map | distortion.inverse | map.inverse
# Return the distortion object that can then be queried
return allmap
#############################
# MRS test reference data
# Provided by Polychronis 5/9/19
mrs_ref_data = {
'1A': {'x': np.array([76.0,354.0]),
'y': np.array([512.0,700.0]),
's': np.array([10,4]),
'alpha': np.array([0.05765538365149925,-0.017032619150995743]),
'beta': np.array([-0.17721014379699995,-1.240471006579]),
'lam': np.array([5.348546577257886,5.5136420569934925]),
'v2': np.array([-503.57285226785064,-503.4979806620663]),
'v3': np.array([-318.5749892859028,-317.5090073056335]),
},
'1B': {'x': np.array([76.0,355.0]),
'y': np.array([512.0,700.0]),
's': np.array([10,4]),
'alpha': np.array([-0.012990737471741731,0.10766447914943456]),
'beta': np.array([-0.17720417669099997,-1.240429236837]),
'lam': np.array([6.168310398808807,6.358007642348213]),
'v2': np.array([-503.643100332753,-503.37069816112813]),
'v3': np.array([-318.72773306477103,-317.6938248759762]),
},
'1C': {'x': np.array([78.0,356.0]),
'y': np.array([512.0,700.0]),
's': np.array([10,4]),
'alpha': np.array([0.02871804339196271,-0.028315822861031847]),
'beta': np.array([-0.17720218765499984,-1.240415313585]),
'lam': np.array([7.006608159574103,7.218455147089075]),
'v2': np.array([-503.5598371896608,-503.45975848303885]),
'v3': np.array([-318.4367657801553,-317.3779485524358]),
},
'2A': {'x': np.array([574.0,719.0]),
'y': np.array([512.0,700.0]),
's': np.array([10,4]),
'alpha': np.array([0.022862344416012093,0.024104763006107532]),
'beta': np.array([0.27971818633699996,-1.3985909316610001]),
'lam': np.array([8.139463800053713, 8.423879719165456]),
'v2': np.array([-503.65782416704644, -503.3907046961389]),
'v3': np.array([-319.3709764579651, -317.71318662530217]),
},
'2B': {'x': np.array([570.0,715.0]),
'y': np.array([512.0,700.0]),
's': np.array([10,4]),
'alpha': np.array([-0.04101483043351095,-0.021964438108625473]),
'beta': np.array([0.27972605223,-1.39863026115]),
'lam': np.array([9.49091778668766, 9.826112199836349]),
'v2': np.array([-503.872441161987, -503.58468453126545]),
'v3': np.array([-319.6066193816802, -317.9526192173689]),
},
'2C': {'x': np.array([573.0,718.0]),
'y': np.array([512.0,700.0]),
's': np.array([10,4]),
'alpha': np.array([-0.08065540123411097,-0.07196315905207484]),
'beta': np.array([0.2797221192789996, -1.3986105964070001]),
'lam':
|
np.array([10.909558387414732,11.292658213110698])
|
numpy.array
|
import numpy as np
from datetime import datetime
import time
import matplotlib as mpl
import matplotlib.pyplot as plt
import csv
from brussfield_gillespie import brusselator1DFieldStochSim
import argparse
import os
import pickle
# from scipy import stats
# import freqent.freqent as fe
mpl.rcParams['pdf.fonttype'] = 42
parser = argparse.ArgumentParser()
parser.add_argument('--rates', type=float, nargs=6,
default=[1, 0.5, 2, 0.5, 2, 0.5])
parser.add_argument('--V', '-V', type=float, default=100,
help='Volume of each compartment')
parser.add_argument('--A', '-A', type=int, default=100,
help='Number of A molecules in solution')
parser.add_argument('--B', '-B', type=int, default=700,
help='Number of B molecules in solution')
parser.add_argument('--C', '-C', type=int, default=100,
help='Number of C molecules in solution')
parser.add_argument('--t_final', type=float, default=100,
help='Final time of simulations in seconds')
parser.add_argument('--n_t_points', type=int, default=1001,
help='Number of time points between 0 and t_final')
parser.add_argument('--nCompartments', '-K', type=int, default=64,
help='Number of compartments to divide space into')
parser.add_argument('--diffusion', '-D', type=float, nargs=2, default=[1000, 1000],
help='Diffusion constant of molecule X and Y')
parser.add_argument('--initial_condition', '-ic', type=str, default='random',
help='Initial distribution of X and Y, either random, or centered')
parser.add_argument('--seed_type', type=str, default='time',
help='Type of seed to use. Either "time" to use current microsecond,'
' or "input" for inputting specific seeds')
parser.add_argument('--seed_input', type=int, nargs='*',
help='If seed_type="input", the seeds to use for the simulations')
parser.add_argument('--savepath', default='.',
help='path to save outputs of simulations ')
parser.add_argument('--save', default=False,
help='Boolean to ask whether to save or not.')
args = parser.parse_args()
if args.initial_condition == 'random':
[X0, Y0] = (np.random.rand(2, args.nCompartments) * 0.1 * args.V).astype(int)
elif args.initial_condition == 'centered':
X0 = np.zeros(args.nCompartments).astype(int)
X0[args.nCompartments // 2 - 1:args.nCompartments // 2 + 1] = np.random.rand() * 2 * args.V
Y0 = X0
elif args.initial_condition not in ['random', 'centered']:
raise ValueError('Initial condition is either random or centered.\n'
'Currently given as {0}'.format(args.initial_condition))
t_points =
|
np.linspace(0, args.t_final, args.n_t_points)
|
numpy.linspace
|
"""
Dataloader class to load CAISO and EIA data into GluonTS model
for training and predicting CA net load ramp.
"""
import numpy as np
import pandas as pd
from gluonts.dataset.common import ListDataset
from gluonts.dataset.field_names import FieldName
from gluonts.transform import (
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
)
class dssmDataloader():
def __init__(self, configs):
self.configs = configs
self.df = pd.read_csv(configs.data_path)
def extract_data(self):
"""Extracts target and feature series from df."""
print("Extarcatmog", self.configs.six_ramps)
if self.configs.six_ramps:
caiso_load_target =
|
np.asarray(self.df['caiso_load_ramp'])
|
numpy.asarray
|
import matplotlib
import matplotlib.pyplot as plt
import csv
import collections
import numpy as np
import seaborn as sns
import pandas as pd
def plot_violin_shap_regions():
scale = 1
factor = 170.0 * 206.0 * 170.0 / 43.0 / 52.0 / 43.0 / scale
# load raw data and regions
content = []
regions = []
with open('../brainNetwork/Hammers_mith_atlases_n30r95_label_indices_SPM12_20170315.xml', 'r') as f:
for row in f:
regions.append(row.split("<name>")[1].split("</name>")[0])
# combine left and right for the same region of ADD cases
data =
|
np.load('../brainNetwork/regional95_avgScore_ADD.npy')
|
numpy.load
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
from mindspore.common.tensor import Tensor
from mindspore.nn import BatchNorm2d
from mindspore.nn import Cell
from mindspore.ops import composite as C
class Batchnorm_Net(Cell):
def __init__(self, c, weight, bias, moving_mean, moving_var_init):
super(Batchnorm_Net, self).__init__()
self.bn = BatchNorm2d(c, eps=0.00001, momentum=0.1, beta_init=bias, gamma_init=weight,
moving_mean_init=moving_mean, moving_var_init=moving_var_init)
def construct(self, input_data):
x = self.bn(input_data)
return x
class Grad(Cell):
def __init__(self, network):
super(Grad, self).__init__()
self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True)
self.network = network
def construct(self, input_data, sens):
gout = self.grad(self.network)(input_data, sens)
return gout
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_train_forward():
x = np.array([[
[[1, 3, 3, 5], [2, 4, 6, 8], [3, 6, 7, 7], [4, 3, 8, 2]],
[[5, 7, 6, 3], [3, 5, 6, 7], [9, 4, 2, 5], [7, 5, 8, 1]]]]).astype(np.float32)
expect_output = np.array([[[[-0.6059, 0.3118, 0.3118, 1.2294],
[-0.1471, 0.7706, 1.6882, 2.6059],
[0.3118, 1.6882, 2.1471, 2.1471],
[0.7706, 0.3118, 2.6059, -0.1471]],
[[0.9119, 1.8518, 1.3819, -0.0281],
[-0.0281, 0.9119, 1.3819, 1.8518],
[2.7918, 0.4419, -0.4981, 0.9119],
[1.8518, 0.9119, 2.3218, -0.9680]]]]).astype(np.float32)
weight = np.ones(2).astype(np.float32)
bias = np.ones(2).astype(np.float32)
moving_mean = np.ones(2).astype(np.float32)
moving_var_init = np.ones(2).astype(np.float32)
error = np.ones(shape=[1, 2, 4, 4]) * 1.0e-4
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
bn_net = Batchnorm_Net(2, Tensor(weight), Tensor(bias), Tensor(moving_mean), Tensor(moving_var_init))
bn_net.set_train()
output = bn_net(Tensor(x))
diff = output.asnumpy() - expect_output
assert
|
np.all(diff < error)
|
numpy.all
|
# Copyright (c) OpenMMLab. All rights reserved.
import random
import warnings
from collections.abc import Sequence
import cv2
import mmcv
import numpy as np
from mmcv.utils import digit_version
from torch.nn.modules.utils import _pair
import timm.data as tdata
import torch
from ..builder import PIPELINES
from .formatting import to_tensor
def _combine_quadruple(a, b):
return (a[0] + a[2] * b[0], a[1] + a[3] * b[1], a[2] * b[2], a[3] * b[3])
def _flip_quadruple(a):
return (1 - a[0] - a[2], a[1], a[2], a[3])
def _init_lazy_if_proper(results, lazy):
"""Initialize lazy operation properly.
Make sure that a lazy operation is properly initialized,
and avoid a non-lazy operation accidentally getting mixed in.
Required keys in results are "imgs" if "img_shape" not in results,
otherwise, Required keys in results are "img_shape", add or modified keys
are "img_shape", "lazy".
Add or modified keys in "lazy" are "original_shape", "crop_bbox", "flip",
"flip_direction", "interpolation".
Args:
results (dict): A dict stores data pipeline result.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
if 'img_shape' not in results:
results['img_shape'] = results['imgs'][0].shape[:2]
if lazy:
if 'lazy' not in results:
img_h, img_w = results['img_shape']
lazyop = dict()
lazyop['original_shape'] = results['img_shape']
lazyop['crop_bbox'] = np.array([0, 0, img_w, img_h],
dtype=np.float32)
lazyop['flip'] = False
lazyop['flip_direction'] = None
lazyop['interpolation'] = None
results['lazy'] = lazyop
else:
assert 'lazy' not in results, 'Use Fuse after lazy operations'
@PIPELINES.register_module()
class TorchvisionTrans:
"""Torchvision Augmentations, under torchvision.transforms.
Args:
type (str): The name of the torchvision transformation.
"""
def __init__(self, type, **kwargs):
try:
import torchvision
import torchvision.transforms as tv_trans
except ImportError:
raise RuntimeError('Install torchvision to use TorchvisionTrans')
if digit_version(torchvision.__version__) < digit_version('0.8.0'):
raise RuntimeError('The version of torchvision should be at least '
'0.8.0')
trans = getattr(tv_trans, type, None)
assert trans, f'Transform {type} not in torchvision'
self.trans = trans(**kwargs)
def __call__(self, results):
assert 'imgs' in results
imgs = [x.transpose(2, 0, 1) for x in results['imgs']]
imgs = to_tensor(np.stack(imgs))
imgs = self.trans(imgs).data.numpy()
imgs[imgs > 255] = 255
imgs[imgs < 0] = 0
imgs = imgs.astype(np.uint8)
imgs = [x.transpose(1, 2, 0) for x in imgs]
results['imgs'] = imgs
return results
@PIPELINES.register_module()
class PytorchVideoTrans:
"""PytorchVideoTrans Augmentations, under pytorchvideo.transforms.
Args:
type (str): The name of the pytorchvideo transformation.
"""
def __init__(self, type, **kwargs):
try:
import torch
import pytorchvideo.transforms as ptv_trans
except ImportError:
raise RuntimeError('Install pytorchvideo to use PytorchVideoTrans')
if digit_version(torch.__version__) < digit_version('1.8.0'):
raise RuntimeError(
'The version of PyTorch should be at least 1.8.0')
trans = getattr(ptv_trans, type, None)
assert trans, f'Transform {type} not in pytorchvideo'
supported_pytorchvideo_trans = ('AugMix', 'RandAugment',
'RandomResizedCrop', 'ShortSideScale',
'RandomShortSideScale')
assert type in supported_pytorchvideo_trans,\
f'PytorchVideo Transform {type} is not supported in MMAction2'
self.trans = trans(**kwargs)
self.type = type
def __call__(self, results):
assert 'imgs' in results
assert 'gt_bboxes' not in results,\
f'PytorchVideo {self.type} doesn\'t support bboxes yet.'
assert 'proposals' not in results,\
f'PytorchVideo {self.type} doesn\'t support bboxes yet.'
if self.type in ('AugMix', 'RandAugment'):
# list[ndarray(h, w, 3)] -> torch.tensor(t, c, h, w)
imgs = [x.transpose(2, 0, 1) for x in results['imgs']]
imgs = to_tensor(np.stack(imgs))
else:
# list[ndarray(h, w, 3)] -> torch.tensor(c, t, h, w)
# uint8 -> float32
imgs = to_tensor((np.stack(results['imgs']).transpose(3, 0, 1, 2) /
255.).astype(np.float32))
imgs = self.trans(imgs).data.numpy()
if self.type in ('AugMix', 'RandAugment'):
imgs[imgs > 255] = 255
imgs[imgs < 0] = 0
imgs = imgs.astype(np.uint8)
# torch.tensor(t, c, h, w) -> list[ndarray(h, w, 3)]
imgs = [x.transpose(1, 2, 0) for x in imgs]
else:
# float32 -> uint8
imgs = imgs * 255
imgs[imgs > 255] = 255
imgs[imgs < 0] = 0
imgs = imgs.astype(np.uint8)
# torch.tensor(c, t, h, w) -> list[ndarray(h, w, 3)]
imgs = [x for x in imgs.transpose(1, 2, 3, 0)]
results['imgs'] = imgs
return results
@PIPELINES.register_module()
class PoseCompact:
"""Convert the coordinates of keypoints to make it more compact.
Specifically, it first find a tight bounding box that surrounds all joints
in each frame, then we expand the tight box by a given padding ratio. For
example, if 'padding == 0.25', then the expanded box has unchanged center,
and 1.25x width and height.
Required keys in results are "img_shape", "keypoint", add or modified keys
are "img_shape", "keypoint", "crop_quadruple".
Args:
padding (float): The padding size. Default: 0.25.
threshold (int): The threshold for the tight bounding box. If the width
or height of the tight bounding box is smaller than the threshold,
we do not perform the compact operation. Default: 10.
hw_ratio (float | tuple[float] | None): The hw_ratio of the expanded
box. Float indicates the specific ratio and tuple indicates a
ratio range. If set as None, it means there is no requirement on
hw_ratio. Default: None.
allow_imgpad (bool): Whether to allow expanding the box outside the
image to meet the hw_ratio requirement. Default: True.
Returns:
type: Description of returned object.
"""
def __init__(self,
padding=0.25,
threshold=10,
hw_ratio=None,
allow_imgpad=True):
self.padding = padding
self.threshold = threshold
if hw_ratio is not None:
hw_ratio = _pair(hw_ratio)
self.hw_ratio = hw_ratio
self.allow_imgpad = allow_imgpad
assert self.padding >= 0
def __call__(self, results):
img_shape = results['img_shape']
h, w = img_shape
kp = results['keypoint']
# Make NaN zero
kp[np.isnan(kp)] = 0.
kp_x = kp[..., 0]
kp_y = kp[..., 1]
min_x = np.min(kp_x[kp_x != 0], initial=np.Inf)
min_y = np.min(kp_y[kp_y != 0], initial=np.Inf)
max_x = np.max(kp_x[kp_x != 0], initial=-np.Inf)
max_y = np.max(kp_y[kp_y != 0], initial=-np.Inf)
# The compact area is too small
if max_x - min_x < self.threshold or max_y - min_y < self.threshold:
return results
center = ((max_x + min_x) / 2, (max_y + min_y) / 2)
half_width = (max_x - min_x) / 2 * (1 + self.padding)
half_height = (max_y - min_y) / 2 * (1 + self.padding)
if self.hw_ratio is not None:
half_height = max(self.hw_ratio[0] * half_width, half_height)
half_width = max(1 / self.hw_ratio[1] * half_height, half_width)
min_x, max_x = center[0] - half_width, center[0] + half_width
min_y, max_y = center[1] - half_height, center[1] + half_height
# hot update
if not self.allow_imgpad:
min_x, min_y = int(max(0, min_x)), int(max(0, min_y))
max_x, max_y = int(min(w, max_x)), int(min(h, max_y))
else:
min_x, min_y = int(min_x), int(min_y)
max_x, max_y = int(max_x), int(max_y)
kp_x[kp_x != 0] -= min_x
kp_y[kp_y != 0] -= min_y
new_shape = (max_y - min_y, max_x - min_x)
results['img_shape'] = new_shape
# the order is x, y, w, h (in [0, 1]), a tuple
crop_quadruple = results.get('crop_quadruple', (0., 0., 1., 1.))
new_crop_quadruple = (min_x / w, min_y / h, (max_x - min_x) / w,
(max_y - min_y) / h)
crop_quadruple = _combine_quadruple(crop_quadruple, new_crop_quadruple)
results['crop_quadruple'] = crop_quadruple
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}(padding={self.padding}, '
f'threshold={self.threshold}, '
f'hw_ratio={self.hw_ratio}, '
f'allow_imgpad={self.allow_imgpad})')
return repr_str
@PIPELINES.register_module()
class Imgaug:
"""Imgaug augmentation.
Adds custom transformations from imgaug library.
Please visit `https://imgaug.readthedocs.io/en/latest/index.html`
to get more information. Two demo configs could be found in tsn and i3d
config folder.
It's better to use uint8 images as inputs since imgaug works best with
numpy dtype uint8 and isn't well tested with other dtypes. It should be
noted that not all of the augmenters have the same input and output dtype,
which may cause unexpected results.
Required keys are "imgs", "img_shape"(if "gt_bboxes" is not None) and
"modality", added or modified keys are "imgs", "img_shape", "gt_bboxes"
and "proposals".
It is worth mentioning that `Imgaug` will NOT create custom keys like
"interpolation", "crop_bbox", "flip_direction", etc. So when using
`Imgaug` along with other mmaction2 pipelines, we should pay more attention
to required keys.
Two steps to use `Imgaug` pipeline:
1. Create initialization parameter `transforms`. There are three ways
to create `transforms`.
1) string: only support `default` for now.
e.g. `transforms='default'`
2) list[dict]: create a list of augmenters by a list of dicts, each
dict corresponds to one augmenter. Every dict MUST contain a key
named `type`. `type` should be a string(iaa.Augmenter's name) or
an iaa.Augmenter subclass.
e.g. `transforms=[dict(type='Rotate', rotate=(-20, 20))]`
e.g. `transforms=[dict(type=iaa.Rotate, rotate=(-20, 20))]`
3) iaa.Augmenter: create an imgaug.Augmenter object.
e.g. `transforms=iaa.Rotate(rotate=(-20, 20))`
2. Add `Imgaug` in dataset pipeline. It is recommended to insert imgaug
pipeline before `Normalize`. A demo pipeline is listed as follows.
```
pipeline = [
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=16,
),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(
type='MultiScaleCrop',
input_size=224,
scales=(1, 0.875, 0.75, 0.66),
random_crop=False,
max_wh_scale_gap=1,
num_fixed_crops=13),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Imgaug', transforms='default'),
# dict(type='Imgaug', transforms=[
# dict(type='Rotate', rotate=(-20, 20))
# ]),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
```
Args:
transforms (str | list[dict] | :obj:`iaa.Augmenter`): Three different
ways to create imgaug augmenter.
"""
def __init__(self, transforms):
import imgaug.augmenters as iaa
if transforms == 'default':
self.transforms = self.default_transforms()
elif isinstance(transforms, list):
assert all(isinstance(trans, dict) for trans in transforms)
self.transforms = transforms
elif isinstance(transforms, iaa.Augmenter):
self.aug = self.transforms = transforms
else:
raise ValueError('transforms must be `default` or a list of dicts'
' or iaa.Augmenter object')
if not isinstance(transforms, iaa.Augmenter):
self.aug = iaa.Sequential(
[self.imgaug_builder(t) for t in self.transforms])
@staticmethod
def default_transforms():
"""Default transforms for imgaug.
Implement RandAugment by imgaug.
Please visit `https://arxiv.org/abs/1909.13719` for more information.
Augmenters and hyper parameters are borrowed from the following repo:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py # noqa
Miss one augmenter ``SolarizeAdd`` since imgaug doesn't support this.
Returns:
dict: The constructed RandAugment transforms.
"""
# RandAugment hyper params
num_augmenters = 2
cur_magnitude, max_magnitude = 9, 10
cur_level = 1.0 * cur_magnitude / max_magnitude
return [
dict(
type='SomeOf',
n=num_augmenters,
children=[
dict(
type='ShearX',
shear=17.19 * cur_level * random.choice([-1, 1])),
dict(
type='ShearY',
shear=17.19 * cur_level * random.choice([-1, 1])),
dict(
type='TranslateX',
percent=.2 * cur_level * random.choice([-1, 1])),
dict(
type='TranslateY',
percent=.2 * cur_level * random.choice([-1, 1])),
dict(
type='Rotate',
rotate=30 * cur_level * random.choice([-1, 1])),
dict(type='Posterize', nb_bits=max(1, int(4 * cur_level))),
dict(type='Solarize', threshold=256 * cur_level),
dict(type='EnhanceColor', factor=1.8 * cur_level + .1),
dict(type='EnhanceContrast', factor=1.8 * cur_level + .1),
dict(
type='EnhanceBrightness', factor=1.8 * cur_level + .1),
dict(type='EnhanceSharpness', factor=1.8 * cur_level + .1),
dict(type='Autocontrast', cutoff=0),
dict(type='Equalize'),
dict(type='Invert', p=1.),
dict(
type='Cutout',
nb_iterations=1,
size=0.2 * cur_level,
squared=True)
])
]
def imgaug_builder(self, cfg):
"""Import a module from imgaug.
It follows the logic of :func:`build_from_cfg`. Use a dict object to
create an iaa.Augmenter object.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
Returns:
obj:`iaa.Augmenter`: The constructed imgaug augmenter.
"""
import imgaug.augmenters as iaa
assert isinstance(cfg, dict) and 'type' in cfg
args = cfg.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
obj_cls = getattr(iaa, obj_type) if hasattr(iaa, obj_type) \
else getattr(iaa.pillike, obj_type)
elif issubclass(obj_type, iaa.Augmenter):
obj_cls = obj_type
else:
raise TypeError(
f'type must be a str or valid type, but got {type(obj_type)}')
if 'children' in args:
args['children'] = [
self.imgaug_builder(child) for child in args['children']
]
return obj_cls(**args)
def __repr__(self):
repr_str = self.__class__.__name__ + f'(transforms={self.aug})'
return repr_str
def __call__(self, results):
assert results['modality'] == 'RGB', 'Imgaug only support RGB images.'
in_type = results['imgs'][0].dtype.type
cur_aug = self.aug.to_deterministic()
results['imgs'] = [
cur_aug.augment_image(frame) for frame in results['imgs']
]
img_h, img_w, _ = results['imgs'][0].shape
out_type = results['imgs'][0].dtype.type
assert in_type == out_type, \
('Imgaug input dtype and output dtype are not the same. ',
f'Convert from {in_type} to {out_type}')
if 'gt_bboxes' in results:
from imgaug.augmentables import bbs
bbox_list = [
bbs.BoundingBox(
x1=bbox[0], y1=bbox[1], x2=bbox[2], y2=bbox[3])
for bbox in results['gt_bboxes']
]
bboxes = bbs.BoundingBoxesOnImage(
bbox_list, shape=results['img_shape'])
bbox_aug, *_ = cur_aug.augment_bounding_boxes([bboxes])
results['gt_bboxes'] = [[
max(bbox.x1, 0),
max(bbox.y1, 0),
min(bbox.x2, img_w),
min(bbox.y2, img_h)
] for bbox in bbox_aug.items]
if 'proposals' in results:
bbox_list = [
bbs.BoundingBox(
x1=bbox[0], y1=bbox[1], x2=bbox[2], y2=bbox[3])
for bbox in results['proposals']
]
bboxes = bbs.BoundingBoxesOnImage(
bbox_list, shape=results['img_shape'])
bbox_aug, *_ = cur_aug.augment_bounding_boxes([bboxes])
results['proposals'] = [[
max(bbox.x1, 0),
max(bbox.y1, 0),
min(bbox.x2, img_w),
min(bbox.y2, img_h)
] for bbox in bbox_aug.items]
results['img_shape'] = (img_h, img_w)
return results
@PIPELINES.register_module()
class Fuse:
"""Fuse lazy operations.
Fusion order:
crop -> resize -> flip
Required keys are "imgs", "img_shape" and "lazy", added or modified keys
are "imgs", "lazy".
Required keys in "lazy" are "crop_bbox", "interpolation", "flip_direction".
"""
def __call__(self, results):
if 'lazy' not in results:
raise ValueError('No lazy operation detected')
lazyop = results['lazy']
imgs = results['imgs']
# crop
left, top, right, bottom = lazyop['crop_bbox'].round().astype(int)
imgs = [img[top:bottom, left:right] for img in imgs]
# resize
img_h, img_w = results['img_shape']
if lazyop['interpolation'] is None:
interpolation = 'bilinear'
else:
interpolation = lazyop['interpolation']
imgs = [
mmcv.imresize(img, (img_w, img_h), interpolation=interpolation)
for img in imgs
]
# flip
if lazyop['flip']:
for img in imgs:
mmcv.imflip_(img, lazyop['flip_direction'])
results['imgs'] = imgs
del results['lazy']
return results
@PIPELINES.register_module()
class RandomCrop:
"""Vanilla square random crop that specifics the output size.
Required keys in results are "img_shape", "keypoint" (optional), "imgs"
(optional), added or modified keys are "keypoint", "imgs", "lazy"; Required
keys in "lazy" are "flip", "crop_bbox", added or modified key is
"crop_bbox".
Args:
size (int): The output size of the images.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self, size, lazy=False):
if not isinstance(size, int):
raise TypeError(f'Size must be an int, but got {type(size)}')
self.size = size
self.lazy = lazy
@staticmethod
def _crop_kps(kps, crop_bbox):
return kps - crop_bbox[:2]
@staticmethod
def _crop_imgs(imgs, crop_bbox):
x1, y1, x2, y2 = crop_bbox
return [img[y1:y2, x1:x2] for img in imgs]
@staticmethod
def _box_crop(box, crop_bbox):
"""Crop the bounding boxes according to the crop_bbox.
Args:
box (np.ndarray): The bounding boxes.
crop_bbox(np.ndarray): The bbox used to crop the original image.
"""
x1, y1, x2, y2 = crop_bbox
img_w, img_h = x2 - x1, y2 - y1
box_ = box.copy()
box_[..., 0::2] = np.clip(box[..., 0::2] - x1, 0, img_w - 1)
box_[..., 1::2] =
|
np.clip(box[..., 1::2] - y1, 0, img_h - 1)
|
numpy.clip
|
import networkx as nx
import numpy as np
import pandas as pd
import scanpy as sc
import scipy.stats as ss
from numpy.linalg import inv, pinv
from numpy.linalg.linalg import LinAlgError
from sklearn.neighbors import NearestNeighbors
from scipy.sparse import find, csr_matrix
from scipy.stats import entropy
from scipy.sparse.csgraph import dijkstra
from models.ti.sim import compute_lpi, compute_lrw
from utils.util import get_start_cell_cluster_id, prune_network_edges
def get_terminal_states(
ad,
adj_g,
start_cell_ids,
use_rep="metric_embedding",
cluster_key="metric_clusters",
mad_multiplier=1.0,
):
# Check 1: Input must be in AnnData format
assert isinstance(ad, sc.AnnData)
# Check 2: All keys must be present
if use_rep not in ad.obsm_keys():
raise ValueError(f"Representation `{use_rep}` not present in ad.obsm.")
if cluster_key not in ad.obs_keys():
raise ValueError(f"Cluster key `{cluster_key}` not present in ad.obs.")
communities = ad.obs[cluster_key]
X = pd.DataFrame(ad.obsm[use_rep], index=communities.index)
# adj_g will represent connectivities. For computing betweenness we
# need to account for distances, so invert. (Assuming a directed graph)
adj_g = 1 / adj_g
adj_g[adj_g == np.inf] = 0
g = nx.from_pandas_adjacency(adj_g, create_using=nx.DiGraph)
start_cluster_ids = set(get_start_cell_cluster_id(ad, start_cell_ids, communities))
# Find clusters with no outgoing edges (Candidate 1)
terminal_candidates_1 = set(adj_g.index[np.sum(adj_g, axis=1) == 0])
print(f"Terminal cluster candidate 1: {terminal_candidates_1}")
# Compute betweeness of second set of candidates and exclude
# clusters with low betweenness (based on MAD)
terminal_candidates_2 = set(np.unique(communities))
betweenness = pd.DataFrame(
nx.betweenness_centrality(g).values(), index=np.unique(communities)
)
betweenness = betweenness / betweenness.sum()
mad_betweenness = ss.median_absolute_deviation(betweenness.to_numpy())
median_betweenness = betweenness.median()
threshold = (median_betweenness - mad_multiplier * mad_betweenness).to_numpy()
threshold = threshold if threshold > 0 else 0
terminal_candidates_2 = set(
c for c in terminal_candidates_2 if betweenness.loc[c, 0] < threshold
)
print(f"Terminal cluster candidate 2: {terminal_candidates_2}")
terminal_candidates = terminal_candidates_1.union(terminal_candidates_2)
# Remove starting clusters
terminal_candidates = terminal_candidates - start_cluster_ids
# Remove clusters with no incoming edges which are not start_clusters
islands = set(adj_g.index[np.sum(adj_g, axis=0) == 0]) - start_cluster_ids
terminal_candidates = terminal_candidates - islands
# convert candidate set to list as sets cant be serialized in anndata objects
ad.uns["metric_terminal_clusters"] = list(terminal_candidates)
print(f"Terminal clusters: {terminal_candidates}")
return terminal_candidates
def get_terminal_cells(
ad,
terminal_keys="metric_terminal_clusters",
cluster_key="metric_clusters",
pt_key="metric_pseudotime_v2",
):
t_cell_ids = []
comms = ad.obs[cluster_key]
pt = ad.obs[pt_key]
for ts in ad.uns[terminal_keys]:
# Find the terminal cell within that cluster
t_ids = comms == ts
t_pt = pt.loc[t_ids]
# The terminal cell is the cell with the max pseudotime within a terminal cluster
t_cell_ids.append(t_pt.idxmax())
ad.uns["metric_terminal_cells"] = t_cell_ids
return t_cell_ids
def compute_cluster_lineage_likelihoods(
ad,
adj_g,
cluster_key="metric_clusters",
terminal_key="metric_terminal_clusters",
norm=False,
):
communities = ad.obs[cluster_key]
cluster_ids = np.unique(communities)
terminal_ids = ad.uns[terminal_key]
cll = pd.DataFrame(
np.zeros((len(cluster_ids), len(terminal_ids))),
columns=terminal_ids,
index=cluster_ids,
)
# Create Directed Graph from adj matrix
g = nx.from_pandas_adjacency(adj_g, create_using=nx.DiGraph)
for t_id in terminal_ids:
for c_id in cluster_ids:
# All terminal states end up in that state with prob 1.0
if c_id == t_id:
cll.loc[c_id, t_id] = 1.0
continue
# Compute total likelihood along all possible paths
paths = nx.all_simple_paths(g, c_id, t_id)
likelihood = 0
for path in paths:
next_state = path[0]
_l = 1
for idx in range(1, len(path)):
_l *= adj_g.loc[next_state, path[idx]]
next_state = path[idx]
likelihood += _l
cll.loc[c_id, t_id] = likelihood
# Row-Normalize the lineage likelihoods
if norm:
nz_inds = cll.sum(axis=1) > 0
cll[nz_inds] = cll[nz_inds].div(cll[nz_inds].sum(axis=1), axis=0)
return cll
def _sample_cluster_waypoints(X, g, cell_ids, n_waypoints=10, scheme="kmpp"):
X_cluster = X.loc[cell_ids, :]
cluster_index = X.index[cell_ids]
N = X_cluster.shape[0]
wps = []
cached_dists = {}
if scheme == "kmpp":
if n_waypoints > N:
return None
# Sample the first waypoint randomly
id = np.random.randint(0, N)
wps.append(cluster_index[id])
n_sampled = 0
# Sample the remaining waypoints
while True:
dist = pd.DataFrame(
np.zeros((N, len(wps))), index=cluster_index, columns=wps
)
for wp in wps:
# If the dist with a wp is precomputed use it
if wp in cached_dists.keys():
dist.loc[:, wp] = cached_dists[wp]
continue
# Else Compute the shortest path distance and cache
wp_id = np.where(cluster_index == wp)[0][0]
d = dijkstra(g.to_numpy(), directed=True, indices=wp_id)
dist.loc[:, wp] = d
cached_dists[wp] = d
# Exit if desired n_waypoints have been sampled
if n_sampled == n_waypoints - 1:
break
# Otherwise find the next waypoint
# Find the min_dist of the datapoints with existing centroids
min_dist = dist.min(axis=1)
# New waypoint will be the max of min distances
new_wp_id = min_dist.idxmax()
wps.append(new_wp_id)
n_sampled += 1
return wps, cached_dists
if scheme == "random":
raise NotImplementedError("The option `random` has not been implemented yet")
def sample_waypoints(
ad,
adj_dist,
cluster_key="metric_clusters",
embedding_key="metric_embedding",
n_waypoints=10,
scheme="kmpp",
exclude_clusters=None,
):
X = pd.DataFrame(ad.obsm[embedding_key], index=ad.obs_names)
clusters = ad.obs[cluster_key]
adj_dist = pd.DataFrame(adj_dist, index=ad.obs_names, columns=ad.obs_names)
labels = np.unique(clusters)
wps = list()
dists = pd.DataFrame(index=ad.obs_names)
for cluster_id in labels:
# Skip if the cluster id is excluded
if exclude_clusters is not None and cluster_id in exclude_clusters:
print(f"Excluding waypoint computation for cluster: {cluster_id}")
continue
# Sample waypoints for a cluster at a time
cell_ids = clusters == cluster_id
g = adj_dist.loc[cell_ids, cell_ids]
res = _sample_cluster_waypoints(
X, g, cell_ids, n_waypoints=n_waypoints, scheme=scheme
)
# res will be None when a cluster has less points than the n_waypoints
# This behavior is subject to change in the future.
if res is None:
continue
w_, d_ = res
wps.extend(w_)
for k, v in d_.items():
dists.loc[cell_ids, k] = v
# Add the waypoint to the annotated data object
ad.uns["metric_waypoints"] = wps
return dists.fillna(0), wps
# NOTE: This method for computing cell branch probs is now obsolete
def compute_cell_branch_probs(
ad, adj_g, adj_dist, cluster_lineages, cluster_key="metric_clusters"
):
communities = ad.obs[cluster_key]
cluster_ids = np.unique(communities)
n_clusters = len(cluster_ids)
N = communities.shape[0]
# Prune the distance graph
adj_dist = pd.DataFrame(adj_dist, index=ad.obs_names, columns=ad.obs_names)
adj_dist_pruned = prune_network_edges(communities, adj_dist, adj_g)
# Compute the cell to cluster connectivity
cell_branch_probs = pd.DataFrame(
np.zeros((N, n_clusters)), index=communities.index, columns=cluster_ids
)
for idx in communities.index:
row = adj_dist_pruned.loc[idx, :]
neighboring_clus = communities[np.where(row > 0)[0]]
for clus_i in set(neighboring_clus):
num_clus_i = np.sum(
row.loc[neighboring_clus.index[np.where(neighboring_clus == clus_i)[0]]]
)
w_i = num_clus_i /
|
np.sum(row)
|
numpy.sum
|
"""This module has various functions that allow the user to
choose halos in an N-body simulation using various selection
criteria.
Functions
---------
centrals
subhalos
"""
import numpy as np
import pandas as pd
from astropy.table import Table
import grab_files as grab
def centrals(userpath, halofile, mass_range=[1.e12, 1.e15], chunk=100):
"""This function will select all halos in the simulation
based on the mass range specified.
This function assumes the halocatalogs are sorted by virial mass.
Parameters
----------
userpath : string
the directory path that points to where the outputs will be stored
halofile : string
the input halo catalog file
mass_range : tuple
min_mass, max_mass
Returns
-------
centrals : pd.DataFrame
A pandas.DataFrame that contains all of the input columns for the
selected central halos.
"""
assert mass_range[1] > mass_range[0]
i = 0
#grab the halo catalog
rows = [1, 57]
read_halo = grab.reader(halofile, skiprows=rows)
snapshot_name = halofile.split('/')[-1]
#chunk size to test against
size=chunk
while chunk == size:
datachunk = read_halo.get_chunk(chunk)
keys = datachunk.keys()
selectcentral = np.where(np.logical_and(datachunk['mvir(10)'] > mass_range[0],
datachunk['mvir(10)'] < mass_range[1]))[0]
centralchunk = datachunk.iloc[selectcentral]
if i == 0:
centralgals = centralchunk
else:
centralgals = centralgals.append(centralchunk)
i += 1
print(i)
chunk = len(datachunk)
snapshot_sname = snapshot_name.split('.')
snapshotname = snapshot_sname[0]+'.'+snapshot_sname[1]
print(snapshotname)
centralgals.to_csv(userpath+snapshotname+'_centralhalos.csv')
return centralgals
def satellites(userpath, halofile, hostfile, mass_range=[1.e10, 1.e14],
distlimit=1.0, chunk=100):
"""This function will select all satellites in the simulation
based on the mass range specified and proximity to the host.
This function assumes the halocatalogs are sorted by virial mass.
Parameters
----------
userpath : string
the directory path that points to where the outputs will be stored
halofile : string
the input halo catalog file
hostfile : string
the input list of centrals to be used as the host locations
mass_range : tuple
min_mass, max_mass
distlimit : float
The maximum distance a halo can reside to be considered a satellite.
chunk : float
The number of lines to read in at a time.
Returns
-------
satellites : pd.DataFrame
A pandas.DataFrame that contains all of the input columns for the
selected satellite halos.
"""
assert mass_range[1] > mass_range[0]
i = 0
#grab the halo catalog
rows = [1, 57]
read_halo = grab.reader(halofile, skiprows=rows)
snapshot_name = halofile.split('/')[-1]
snapshot_sname = snapshot_name.split('.')
snapshotname = snapshot_sname[0]+'.'+snapshot_sname[1]
#grab central galaxy catalog
centrals = pd.read_csv(hostfile)
xcentral = centrals['x(17)'].values.reshape(len(centrals), 1)
ycentral = centrals['y(18)'].values.reshape(len(centrals), 1)
zcentral = centrals['z(19)'].values.reshape(len(centrals), 1)
rvir = centrals['rvir(11)'].values.reshape(len(centrals), 1)
mvir = centrals['mvir(10)'].values.reshape(len(centrals), 1)
xone = np.ones_like(xcentral)
yone = np.ones_like(ycentral)
zone = np.ones_like(zcentral)
massone = np.ones_like(mvir)
#chunk size to test against
size=chunk
while chunk == size:
datachunk = read_halo.get_chunk(chunk)
keys = datachunk.keys()
#create necessary mass arrays to select subhalos
satmass = datachunk['mvir(10)'].values.reshape(len(datachunk), 1)
massmat = np.dot(satmass, massone.T)
normmass = massmat.T / mvir
#subhalos need to be less than the mass of the "central"
hostmasscut = normmass <= 1.
satmasscut = np.logical_and(satmass > mass_range[0],
satmass < mass_range[1])
masscut = np.logical_and(satmasscut.T, hostmasscut)
x = datachunk['x(17)'].values.reshape(len(datachunk), 1)
y = datachunk['y(18)'].values.reshape(len(datachunk), 1)
z = datachunk['z(19)'].values.reshape(len(datachunk), 1)
xmat = np.dot(x, xone.T)
ymat = np.dot(y, yone.T)
zmat = np.dot(z, zone.T)
dx = xmat.T - xcentral
dy = ymat.T - ycentral
dz = zmat.T - zcentral
dist = np.sqrt(dx ** 2 + dy ** 2 + dz ** 2)
normdist = dist * 1000.0 / rvir
distcut = normdist <= distlimit
selectsatellite = np.where(
|
np.logical_and(masscut, distcut)
|
numpy.logical_and
|
import collections
from pprint import pformat as prettyformat
from functools import partial
from pathlib import Path
import warnings
import gc
from xarray import register_dataset_accessor, save_mfdataset, merge
import animatplot as amp
from matplotlib import pyplot as plt
from matplotlib.animation import PillowWriter
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
from dask.diagnostics import ProgressBar
from .plotting.animate import animate_poloidal, animate_pcolormesh, animate_line
from .plotting.utils import _create_norm
@register_dataset_accessor('bout')
class BoutDatasetAccessor:
"""
Contains BOUT-specific methods to use on BOUT++ datasets opened using
`open_boutdataset()`.
These BOUT-specific methods and attributes are accessed via the bout
accessor, e.g. `ds.bout.options` returns a `BoutOptionsFile` instance.
"""
def __init__(self, ds):
self.data = ds
self.metadata = ds.attrs.get('metadata') # None if just grid file
self.options = ds.attrs.get('options') # None if no inp file
def __str__(self):
"""
String representation of the BoutDataset.
Accessed by print(ds.bout)
"""
styled = partial(prettyformat, indent=4, compact=True)
text = "<xbout.BoutDataset>\n" + \
"Contains:\n{}\n".format(str(self.data)) + \
"Metadata:\n{}\n".format(styled(self.metadata))
if self.options:
text += "Options:\n{}".format(styled(self.options))
return text
#def __repr__(self):
# return 'boutdata.BoutDataset(', {}, ',', {}, ')'.format(self.datapath,
# self.prefix)
def save(self, savepath='./boutdata.nc', filetype='NETCDF4',
variables=None, save_dtype=None, separate_vars=False, pre_load=False):
"""
Save data variables to a netCDF file.
Parameters
----------
savepath : str, optional
filetype : str, optional
variables : list of str, optional
Variables from the dataset to save. Default is to save all of them.
separate_vars: bool, optional
If this is true then every variable which depends on time (but not
solely on time) will be saved into a different output file.
The files are labelled by the name of the variable. Variables which
don't meet this criterion will be present in every output file.
pre_load : bool, optional
When saving separate variables, will load each variable into memory
before saving to file, which can be considerably faster.
Examples
--------
If `separate_vars=True`, then multiple files will be created. These can
all be opened and merged in one go using a call of the form:
ds = xr.open_mfdataset('boutdata_*.nc', combine='nested', concat_dim=None)
"""
if variables is None:
# Save all variables
to_save = self.data
else:
to_save = self.data[variables]
if savepath == './boutdata.nc':
print("Will save data into the current working directory, named as"
" boutdata_[var].nc")
if savepath is None:
raise ValueError('Must provide a path to which to save the data.')
if save_dtype is not None:
to_save = to_save.astype(save_dtype)
options = to_save.attrs.pop('options')
if options:
# TODO Convert Ben's options class to a (flattened) nested
# dictionary then store it in ds.attrs?
raise NotImplementedError("Haven't decided how to write options "
"file back out yet")
else:
# Delete placeholders for options on each variable
for var in to_save.data_vars:
del to_save[var].attrs['options']
# Store the metadata as individual attributes instead because
# netCDF can't handle storing arbitrary objects in attrs
def dict_to_attrs(obj, key):
for key, value in obj.attrs.pop(key).items():
obj.attrs[key] = value
dict_to_attrs(to_save, 'metadata')
# Must do this for all variables in dataset too
for varname, da in to_save.data_vars.items():
dict_to_attrs(da, key='metadata')
if separate_vars:
# Save each major variable to a different netCDF file
# Determine which variables are "major"
# Defined as time-dependent, but not solely time-dependent
major_vars, minor_vars = _find_major_vars(to_save)
print("Will save the variables {} separately"
.format(str(major_vars)))
# Save each one to separate file
# TODO perform the save in parallel with save_mfdataset?
for major_var in major_vars:
# Group variables so that there is only one time-dependent
# variable saved in each file
minor_data = [to_save[minor_var] for minor_var in minor_vars]
single_var_ds = merge([to_save[major_var], *minor_data])
# Add the attrs back on
single_var_ds.attrs = to_save.attrs
if pre_load:
single_var_ds.load()
# Include the name of the variable in the name of the saved
# file
path = Path(savepath)
var_savepath = str(path.parent / path.stem) + '_' \
+ str(major_var) + path.suffix
print('Saving ' + major_var + ' data...')
with ProgressBar():
single_var_ds.to_netcdf(path=str(var_savepath),
format=filetype, compute=True)
# Force memory deallocation to limit RAM usage
single_var_ds.close()
del single_var_ds
gc.collect()
else:
# Save data to a single file
print('Saving data...')
with ProgressBar():
to_save.to_netcdf(path=savepath, format=filetype, compute=True)
return
def to_restart(self, savepath='.', nxpe=None, nype=None,
original_splitting=False):
"""
Write out final timestep as a set of netCDF BOUT.restart files.
If processor decomposition is not specified then data will be saved
using the decomposition it had when loaded.
Parameters
----------
savepath : str
nxpe : int
nype : int
"""
# Set processor decomposition if not given
if original_splitting:
if any([nxpe, nype]):
raise ValueError('Inconsistent choices for domain '
'decomposition.')
else:
nxpe, nype = self.metadata['NXPE'], self.metadata['NYPE']
# Is this even possible without saving the guard cells?
# Can they be recreated?
restart_datasets, paths = _split_into_restarts(self.data, savepath,
nxpe, nype)
with ProgressBar():
save_mfdataset(restart_datasets, paths, compute=True)
return
def animate_list(self, variables, animate_over='t', save_as=None, show=False, fps=10,
nrows=None, ncols=None, poloidal_plot=False, subplots_adjust=None,
vmin=None, vmax=None, logscale=None, titles=None, aspect='equal',
controls=True, tight_layout=True, **kwargs):
"""
Parameters
----------
variables : list of str or BoutDataArray
The variables to plot. For any string passed, the corresponding
variable in this DataSet is used - then the calling DataSet must
have only 3 dimensions. It is possible to pass BoutDataArrays to
allow more flexible plots, e.g. with different variables being
plotted against different axes.
animate_over : str, optional
Dimension over which to animate
save_as : str, optional
If passed, a gif is created with this filename
show : bool, optional
Call pyplot.show() to display the animation
fps : float, optional
Indicates the number of frames per second to play
nrows : int, optional
Specify the number of rows of plots
ncols : int, optional
Specify the number of columns of plots
poloidal_plot : bool or sequence of bool, optional
If set to True, make all 2D animations in the poloidal plane instead of using
grid coordinates, per variable if sequence is given
subplots_adjust : dict, optional
Arguments passed to fig.subplots_adjust()()
vmin : float or sequence of floats
Minimum value for color scale, per variable if a sequence is given
vmax : float or sequence of floats
Maximum value for color scale, per variable if a sequence is given
logscale : bool or float, sequence of bool or float, optional
If True, default to a logarithmic color scale instead of a linear one.
If a non-bool type is passed it is treated as a float used to set the linear
threshold of a symmetric logarithmic scale as
linthresh=min(abs(vmin),abs(vmax))*logscale, defaults to 1e-5 if True is
passed.
Per variable if sequence is given.
titles : sequence of str or None, optional
Custom titles for each plot. Pass None in the sequence to use the default for
a certain variable
aspect : str or None, or sequence of str or None, optional
Argument to set_aspect() for each plot
controls : bool, optional
If set to False, do not show the time-slider or pause button
tight_layout : bool or dict, optional
If set to False, don't call tight_layout() on the figure.
If a dict is passed, the dict entries are passed as arguments to
tight_layout()
**kwargs : dict, optional
Additional keyword arguments are passed on to each animation function
"""
nvars = len(variables)
if nrows is None and ncols is None:
ncols = int(np.ceil(
|
np.sqrt(nvars)
|
numpy.sqrt
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""Utility functions and classes."""
import collections
import itertools
import math
import operator
import os
import random
import sys
import numpy as np
import tensorflow as tf
import tqdm
__all__ = ["activity2id", "object2id",
"initialize", "read_data"]
activity2id = {
"BG": 0, # background
"activity_walking": 1,
"activity_standing": 2,
"activity_carrying": 3,
"activity_gesturing": 4,
"Closing": 5,
"Opening": 6,
"Interacts": 7,
"Exiting": 8,
"Entering": 9,
"Talking": 10,
"Transport_HeavyCarry": 11,
"Unloading": 12,
"Pull": 13,
"Loading": 14,
"Open_Trunk": 15,
"Closing_Trunk": 16,
"Riding": 17,
"specialized_texting_phone": 18,
"Person_Person_Interaction": 19,
"specialized_talking_phone": 20,
"activity_running": 21,
"PickUp": 22,
"specialized_using_tool": 23,
"SetDown": 24,
"activity_crouching": 25,
"activity_sitting": 26,
"Object_Transfer": 27,
"Push": 28,
"PickUp_Person_Vehicle": 29,
}
object2id = {
"Person": 0,
"Vehicle": 1,
"Parking_Meter": 2,
"Construction_Barrier": 3,
"Door": 4,
"Push_Pulled_Object": 5,
"Construction_Vehicle": 6,
"Prop": 7,
"Bike": 8,
"Dumpster": 9,
}
def process_args(args):
"""Process arguments.
Model will be in outbasepath/modelname/runId/save
Args:
args: arguments.
Returns:
Edited arguments.
"""
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
if args.activation_func == "relu":
args.activation_func = tf.nn.relu
elif args.activation_func == "tanh":
args.activation_func = tf.nn.tanh
elif args.activation_func == "lrelu":
args.activation_func = tf.nn.leaky_relu
else:
print("unrecognied activation function, using relu...")
args.activation_func = tf.nn.relu
args.seq_len = args.obs_len + args.pred_len
args.outpath = os.path.join(
args.outbasepath, args.modelname, str(args.runId).zfill(2))
mkdir(args.outpath)
args.save_dir = os.path.join(args.outpath, "save")
mkdir(args.save_dir)
args.save_dir_model = os.path.join(args.save_dir, "save")
args.save_dir_best = os.path.join(args.outpath, "best")
mkdir(args.save_dir_best)
args.save_dir_best_model = os.path.join(args.save_dir_best, "save-best")
args.write_self_sum = True
args.self_summary_path = os.path.join(args.outpath, "train_sum.txt")
args.record_val_perf = True
args.val_perf_path = os.path.join(args.outpath, "val_perf.p")
# assert os.path.exists(args.frame_path)
# args.resnet_num_block = [3,4,23,3] # resnet 101
assert os.path.exists(args.person_feat_path)
args.object2id = object2id
args.num_box_class = len(args.object2id)
# categories of traj
if args.is_actev:
args.virat_mov_actids = [
activity2id["activity_walking"],
activity2id["activity_running"],
activity2id["Riding"],
]
args.traj_cats = [
["static", 0],
["mov", 1],
]
args.scenes = ["0000", "0002", "0400", "0401", "0500"]
args.num_act = len(activity2id.keys()) # include the BG class
# has to be 2,4 to match the scene CNN strides
args.scene_grid_strides = (2, 4)
args.scene_grids = []
for stride in args.scene_grid_strides:
h, w = args.scene_h, args.scene_w
this_h, this_w = round(h*1.0/stride), round(w*1.0/stride)
this_h, this_w = int(this_h), int(this_w)
args.scene_grids.append((this_h, this_w))
if args.load_best:
args.load = True
if args.load_from is not None:
args.load = True
# if test, has to load
if not args.is_train:
args.load = True
args.num_epochs = 1
args.keep_prob = 1.0
args.activity2id = activity2id
return args
def initialize(load, load_best, args, sess):
"""Initialize graph with given model weights.
Args:
load: boolean, whether to load model weights
load_best: whether to load from best model path
args: arguments
sess: tf.Session() instance
Returns:
None
"""
tf.global_variables_initializer().run()
if load:
print("restoring model...")
allvars = tf.global_variables()
allvars = [var for var in allvars if "global_step" not in var.name]
restore_vars = allvars
opts = ["Adam", "beta1_power", "beta2_power",
"Adam_1", "Adadelta_1", "Adadelta", "Momentum"]
restore_vars = [var for var in restore_vars \
if var.name.split(":")[0].split("/")[-1] not in opts]
saver = tf.train.Saver(restore_vars, max_to_keep=5)
load_from = None
if args.load_from is not None:
load_from = args.load_from
else:
if load_best:
load_from = args.save_dir_best
else:
load_from = args.save_dir
ckpt = tf.train.get_checkpoint_state(load_from)
if ckpt and ckpt.model_checkpoint_path:
loadpath = ckpt.model_checkpoint_path
saver.restore(sess, loadpath)
print("Model:")
print("\tloaded %s" % loadpath)
print("")
else:
if os.path.exists(load_from):
if load_from.endswith(".ckpt"):
# load_from should be a single .ckpt file
saver.restore(sess, load_from)
else:
print("Not recognized model type:%s" % load_from)
sys.exit()
else:
print("Model not exists")
sys.exit()
print("done.")
def read_data(args, data_type):
"""Read propocessed data into memory for experiments.
Args:
args: Arguments
data_type: train/val/test
Returns:
Dataset instance
"""
def get_traj_cat(cur_acts, traj_cats):
"""Get trajectory categories for virat/actev dataset experiments."""
def is_in(l1, l2):
"""Check whether any of l1"s item is in l2."""
for i in l1:
if i in l2:
return True
return False
# 1 is moving act, 0 is static
act_cat = int(is_in(cur_acts, args.virat_mov_actids))
i = -1
for i, (_, actid) in enumerate(traj_cats):
if actid == act_cat:
return i
# something is wrong
assert i >= 0
data_path = os.path.join(args.prepropath, "data_%s.npz" % data_type)
data = dict(np.load(data_path, allow_pickle=True))
# save some shared feature first
shared = {}
shares = ["scene_feat", "video_wh", "scene_grid_strides",
"vid2name", "person_boxkey2id", "person_boxid2key"]
excludes = ["seq_start_end"]
if "video_wh" in data:
args.box_img_w, args.box_img_h = data["video_wh"]
else:
args.box_img_w, args.box_img_h = 1920, 1080
for i in range(len(args.scene_grid_strides)):
shares.append("grid_center_%d" % i)
for key in data:
if key in shares:
if not data[key].shape:
shared[key] = data[key].item()
else:
shared[key] = data[key]
newdata = {}
for key in data:
if key not in excludes+shares:
newdata[key] = data[key]
data = newdata
if args.add_activity: # transform activity ids to a one hot feature
cur_acts = []
future_acts = [] # [N, num_act]
num_act = args.num_act
for i in range(len(data["cur_activity"])): # super fast
cur_actids = data["cur_activity"][i]
future_actids = data["future_activity"][i]
cur_act = np.zeros((num_act), dtype="uint8")
future_act = np.zeros((num_act), dtype="uint8")
for actid in cur_actids:
cur_act[actid] = 1
for actid in future_actids:
future_act[actid] = 1
cur_acts.append(cur_act)
future_acts.append(future_act)
data["cur_activity_onehot"] = cur_acts
data["future_activity_onehot"] = future_acts
assert len(shared["scene_grid_strides"]) == len(args.scene_grid_strides)
assert shared["scene_grid_strides"][0] == args.scene_grid_strides[0]
num_examples = len(data["obs_traj"]) # (input,pred)
for key in data:
assert len(data[key]) == num_examples, \
(key, data[key].shape, num_examples)
# category each trajectory for training
if args.is_actev:
data["trajidx2catid"] = np.zeros(
(num_examples), dtype="uint8") # 0~256
boxid2key = shared["person_boxid2key"]
trajkey2cat = {}
data["traj_key"] = []
cat_count = [[cat_name, 0] for cat_name, _ in args.traj_cats]
for i in range(num_examples):
cur_acts = data["cur_activity"][i]
cat_id = get_traj_cat(cur_acts, args.traj_cats)
data["trajidx2catid"][i] = cat_id
cat_count[cat_id][1] += 1
# videoname_frameidx_personid
key = boxid2key[data["obs_boxid"][i][0]]
trajkey2cat[key] = cat_id
data["traj_key"].append(key)
print(cat_count)
else:
data["traj_key"] = []
boxid2key = shared["person_boxid2key"]
for i in range(num_examples):
# videoname_frameidx_personid
key = boxid2key[data["obs_boxid"][i][0]]
data["traj_key"].append(key)
print("loaded %s data points for %s" % (num_examples, data_type))
return Dataset(data, data_type, shared=shared, config=args)
def get_scene(videoname_):
"""Get the scene camera from the ActEV videoname."""
s = videoname_.split("_S_")[-1]
s = s.split("_")[0]
return s[:4]
def evaluate(dataset, config, sess, tester):
"""Evaluate the dataset using the tester model.
Args:
dataset: the Dataset instance
config: arguments
sess: tensorflow session
tester: the Tester instance
Returns:
Evaluation results.
"""
l2dis = [] # [num_example, each_timestep]
# show the evaluation per trajectory class if actev experiment
if config.is_actev:
l2dis_cats = [[] for i in range(len(config.traj_cats))]
# added 06/2019,
# show per-scene ADE/FDE for ActEV dataset
# for leave-one-scene-out experiment
l2dis_scenes = [[] for i in range(len(config.scenes))]
grid1_acc = None
grid2_acc = None
grid1 = []
grid2 = []
# BG class is also used for evaluate
future_act_scores = {actid: [] for actid in config.activity2id.values()}
future_act_labels = {actid: [] for actid in config.activity2id.values()}
act_ap = None
num_batches_per_epoch = int(
math.ceil(dataset.num_examples / float(config.batch_size)))
traj_class_correct = []
if config.is_actev:
traj_class_correct_cat = [[] for i in range(len(config.traj_cats))]
for evalbatch in tqdm.tqdm(dataset.get_batches(config.batch_size, \
full=True, shuffle=False), total=num_batches_per_epoch, ascii=True):
# [N,pred_len, 2]
# here the output is relative output
pred_out, future_act, grid_pred_1, grid_pred_2, \
traj_class_logits, _ = tester.step(sess, evalbatch)
_, batch = evalbatch
this_actual_batch_size = batch.data["original_batch_size"]
d = []
# activity location prediction
grid_pred_1 = np.argmax(grid_pred_1, axis=1)
grid_pred_2 = np.argmax(grid_pred_2, axis=1)
for i in range(len(batch.data["pred_grid_class"])):
gt_grid1_pred_class = batch.data["pred_grid_class"][i][0, -1]
gt_grid2_pred_class = batch.data["pred_grid_class"][i][1, -1]
grid1.append(grid_pred_1[i] == gt_grid1_pred_class)
grid2.append(grid_pred_2[i] == gt_grid2_pred_class)
if config.add_activity:
# get the mean AP
for i in range(len(batch.data["future_activity_onehot"])):
# [num_act]
this_future_act_labels = batch.data["future_activity_onehot"][i]
for j in range(len(this_future_act_labels)):
actid = j
future_act_labels[actid].append(this_future_act_labels[j])
# for checking AP using the cur act as
future_act_scores[actid].append(future_act[i, j])
for i, (obs_traj_gt, pred_traj_gt) in enumerate(
zip(batch.data["obs_traj"], batch.data["pred_traj"])):
if i >= this_actual_batch_size:
break
# the output is relative coordinates
this_pred_out = pred_out[i][:, :2] # [T2, 2]
# [T2,2]
this_pred_out_abs = relative_to_abs(this_pred_out, obs_traj_gt[-1])
# get the errors
assert this_pred_out_abs.shape == this_pred_out.shape, (
this_pred_out_abs.shape, this_pred_out.shape)
# [T2, 2]
diff = pred_traj_gt - this_pred_out_abs
diff = diff**2
diff = np.sqrt(
|
np.sum(diff, axis=1)
|
numpy.sum
|
# For this part of the assignment, You can use inbuilt functions to compute the fourier transform
# You are welcome to use fft that are available in numpy and opencv
from numpy import sqrt, zeros
import matplotlib.pyplot as plt
class Filtering:
image = None
filter = None
cutoff = None
order = None
def __init__(self, image, filter_name, cutoff, order = 0):
"""initializes the variables frequency filtering on an input image
takes as input:
image: the input image
filter_name: the name of the mask to use
cutoff: the cutoff frequency of the filter
order: the order of the filter (only for butterworth
returns"""
self.filter_name = filter_name
self.image = image
if filter_name == 'ideal_l':
self.filter = self.get_ideal_low_pass_filter
elif filter_name == 'ideal_h':
self.filter = self.get_ideal_high_pass_filter
elif filter_name == 'butterworth_l':
self.filter = self.get_butterworth_low_pass_filter
elif filter_name == 'butterworth_h':
self.filter = self.get_butterworth_high_pass_filter
elif filter_name == 'gaussian_l':
self.filter = self.get_gaussian_low_pass_filter
elif filter_name == 'gaussian_h':
self.filter = self.get_gaussian_high_pass_filter
self.cutoff = cutoff
self.order = order
def get_ideal_low_pass_filter(self, shape, cutoff):
"""Computes a Ideal low pass mask
takes as input:
shape: the shape of the mask to be generated
cutoff: the cutoff frequency of the ideal filter
returns a ideal low pass mask"""
mask = zeros(shape)
row_size, col_size = shape[0], shape[1]
center_row, center_col = row_size/2 , col_size/2
for r in range(0, row_size):
for c in range(0, col_size):
freq_dist = sqrt( (r-center_row)**2 + (c-center_col)**2 )
mask[r,c] = 0.0 if freq_dist > cutoff else 1.0
return mask
def get_ideal_high_pass_filter(self, shape, cutoff):
"""Computes a Ideal high pass mask
takes as input:
shape: the shape of the mask to be generated
cutoff: the cutoff frequency of the ideal filter
returns a ideal high pass mask"""
#Hint: May be one can use the low pass filter function to get a high pass mask
return 1 - self.get_ideal_low_pass_filter(shape, cutoff)
def get_butterworth_low_pass_filter(self, shape, cutoff, order):
"""Computes a butterworth low pass mask
takes as input:
shape: the shape of the mask to be generated
cutoff: the cutoff frequency of the butterworth filter
order: the order of the butterworth filter
returns a butterworth low pass mask"""
mask = zeros(shape)
row_size, col_size = shape[0], shape[1]
center_row, center_col = row_size/2 , col_size/2
for r in range(0, row_size):
for c in range(0, col_size):
freq_dist = sqrt( (r-center_row)**2 + (c-center_col)**2 )
mask[r,c] = (1/(1+(freq_dist/cutoff)*order)) if freq_dist > cutoff else 1.0
return mask
def get_butterworth_high_pass_filter(self, shape, cutoff, order):
"""Computes a butterworth high pass mask
takes as input:
shape: the shape of the mask to be generated
cutoff: the cutoff frequency of the butterworth filter
order: the order of the butterworth filter
returns a butterworth high pass mask"""
#Hint: May be one can use the low pass filter function to get a high pass mask
mask = zeros(shape)
row_size, col_size = shape[0], shape[1]
center_row, center_col = row_size/2 , col_size/2
for r in range(0, row_size):
for c in range(0, col_size):
freq_dist =
|
sqrt( (r-center_row)**2 + (c-center_col)**2 )
|
numpy.sqrt
|
import gym
from gym.wrappers import Monitor
import itertools
import numpy as np
import os
import random
import sys
import psutil
import tensorflow as tf
if "../" not in sys.path:
sys.path.append("../")
if "./modules/" not in sys.path:
sys.path.append("./modules/")
from lib import plotting
from collections import deque, namedtuple
from approximator import Estimator
from sensedisposer import SenseDisposer
from modelcopier import ModelParametersCopier
from controller import make_epsilon_greedy_policy
VALID_ACTIONS = [0, 1, 2, 3]
def deep_q_learning(sess,
env,
q_estimator,
target_estimator,
state_processor,
num_episodes,
experiment_dir,
replay_memory_size=500000,
replay_memory_init_size=50000,
update_target_estimator_every=10000,
discount_factor=0.99,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_steps=500000,
batch_size=32,
record_video_every=50):
"""
Q-Learning algorithm for off-policy TD control using Function Approximation.
Finds the optimal greedy policy while following an epsilon-greedy policy.
Args:
sess: Tensorflow Session object
env: OpenAI environment
q_estimator: Estimator object used for the q values
target_estimator: Estimator object used for the targets
state_processor: A StateProcessor object
num_episodes: Number of episodes to run for
experiment_dir: Directory to save Tensorflow summaries in
replay_memory_size: Size of the replay memory
replay_memory_init_size: Number of random experiences to sampel when initializing
the reply memory.
update_target_estimator_every: Copy parameters from the Q estimator to the
target estimator every N steps
discount_factor: Gamma discount factor
epsilon_start: Chance to sample a random action when taking an action.
Epsilon is decayed over time and this is the start value
epsilon_end: The final minimum value of epsilon after decaying is done
epsilon_decay_steps: Number of steps to decay epsilon over
batch_size: Size of batches to sample from the replay memory
record_video_every: Record a video every N episodes
Returns:
An EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.
"""
Transition = namedtuple("Transition", ["state", "action", "reward", "next_state", "done"])
# The replay memory
replay_memory = []
# Make model copier object
estimator_copy = ModelParametersCopier(q_estimator, target_estimator)
# Keeps track of useful statistics
stats = plotting.EpisodeStats(
episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes))
# For 'system/' summaries, usefull to check if currrent process looks healthy
current_process = psutil.Process()
# Create directories for checkpoints and summaries
checkpoint_dir = os.path.join(experiment_dir, "checkpoints")
checkpoint_path = os.path.join(checkpoint_dir, "model")
monitor_path = os.path.join(experiment_dir, "monitor")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
if not os.path.exists(monitor_path):
os.makedirs(monitor_path)
saver = tf.train.Saver()
# Load a previous checkpoint if we find one
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
if latest_checkpoint:
print("Loading model checkpoint {}...\n".format(latest_checkpoint))
saver.restore(sess, latest_checkpoint)
# Get the current time step
total_t = sess.run(tf.contrib.framework.get_global_step())
# The epsilon decay schedule
epsilons = np.linspace(epsilon_start, epsilon_end, epsilon_decay_steps)
# The policy we're following
policy = make_epsilon_greedy_policy(
q_estimator,
len(VALID_ACTIONS))
# Populate the replay memory with initial experience
print("Populating replay memory...")
state = env.reset()
state = state_processor.process(sess, state)
state = np.stack([state] * 4, axis=2)
for i in range(replay_memory_init_size):
action_probs = policy(sess, state, epsilons[min(total_t, epsilon_decay_steps-1)])
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
next_state, reward, done, _ = env.step(VALID_ACTIONS[action])
next_state = state_processor.process(sess, next_state)
next_state = np.append(state[:,:,1:], np.expand_dims(next_state, 2), axis=2)
replay_memory.append(Transition(state, action, reward, next_state, done))
if done:
state = env.reset()
state = state_processor.process(sess, state)
state = np.stack([state] * 4, axis=2)
else:
state = next_state
# Record videos
# Add env Monitor wrapper
env = Monitor(env, directory=monitor_path, video_callable=lambda count: count % record_video_every == 0, resume=True)
for i_episode in range(num_episodes):
# Save the current checkpoint
saver.save(tf.get_default_session(), checkpoint_path)
# Reset the environment
state = env.reset()
state = state_processor.process(sess, state)
state =
|
np.stack([state] * 4, axis=2)
|
numpy.stack
|
import numpy as np
from load_dataset import joe097 as monkey
import matplotlib.pyplot as plt
sampling_rate = 1.0 # ms
offset = -1000 # ms
data_arrays = monkey['data_arrays']
sources = monkey['sources']
tags = monkey['tags']
# Here we load the spikes activities
spike_activity_1 = data_arrays['SpikeActivity Unit 5 Target 1/data']
spike_activity_2 = data_arrays['SpikeActivity Unit 5 Target 2/data']
spike_activity_3 = data_arrays['SpikeActivity Unit 5 Target 3/data']
spike_activity_4 = data_arrays['SpikeActivity Unit 5 Target 4/data']
spike_activity_5 = data_arrays['SpikeActivity Unit 5 Target 5/data']
spike_activity_6 = data_arrays['SpikeActivity Unit 5 Target 6/data']
# Now we will build the raster plot for one of the targets
spike_activities = [spike_activity_1, spike_activity_2, spike_activity_3,
spike_activity_4, spike_activity_5, spike_activity_6]
mean = np.zeros(6)
time_window = 230
variability = np.array([])
for index, spike_activity in enumerate(spike_activities):
target = index + 1
n_trials = spike_activity.shape[1]
time_window_spikes = spike_activity[1000:1000 + time_window]
firing_rate = np.sum(time_window_spikes, axis=0) * 1000.0 / time_window
ones = np.ones(firing_rate.size) * target
plt.plot(ones, firing_rate, 'ob')
plt.hold(True)
mean[index] = np.mean(firing_rate)
variability = np.concatenate((variability, firing_rate - mean[index]))
signal_variance =
|
np.var(mean)
|
numpy.var
|
import numpy as np
import torch
import os
import copy
import time
import gym
import psutil
from wrappers import PointCloudWrapper
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageDraw
from collections import defaultdict
def makeEnv(env_name, idx, args):
"""return wrapped gym environment for parallel sample collection (vectorized environments)"""
def helper():
e = gym.make('{}-rotate-v1'.format(env_name))
e.seed(args.seed + idx)
return PointCloudWrapper(e, args)
return helper
def get_avg_values_across_batches(values):
"""
values: list(dict) where each dict is the infomration returned by _update_network()
"""
# convert list(dict) to dict(list)
all_keys = []
for v in values:
all_keys += list(v.keys())
all_keys = set(all_keys)
values = {k: [dic[k] for dic in values if k in dic.keys()]
for k in all_keys}
mean_across_batch = dict()
for k in values.keys():
if isinstance(values[k][0], torch.Tensor):
mean_across_batch[k] = torch.mean(
torch.stack(values[k])).detach().cpu().numpy()
else:
mean_across_batch[k] = np.mean(values[k])
return mean_across_batch
def get_memory_usage():
process = psutil.Process(os.getpid())
megabytes = process.memory_info().rss / 1024 ** 2
return megabytes
def dictArray2arrayDict(x):
"""convert an array of dictionary to dictionary of arrays"""
assert isinstance(x[0], dict)
keys = list(x[0].keys())
res = dict()
for k in x[0].keys():
res[k] = np.array([e[k] for e in x])
return res
def addSTARTtoImages(imgs):
for i in range(len(imgs)):
img = imgs[i]
img = Image.fromarray(img, "RGB")
draw = ImageDraw.Draw(img)
font = ImageFont.truetype('sans-serif.ttf', 60)
draw.text((170, 380), "START", (0, 0, 0), font=font)
img = np.array(img)
imgs[i] = img.astype(np.uint8)
return imgs
def addSUCCESStoImages(imgs, filter_array):
for i in range(len(imgs)):
if filter_array[i]:
img = imgs[i]
img = Image.fromarray(img, "RGB")
draw = ImageDraw.Draw(img)
font = ImageFont.truetype('sans-serif.ttf', 60)
draw.text((115, 380), "SUCCESS", (0, 0, 0), font=font)
img = np.array(img, dtype=np.uint8)
mask = (img == 255).astype(np.uint8)
img = mask * np.array([[[183, 230, 165]]],
dtype=img.dtype) + (1 - mask) * img
imgs[i] = img.astype(np.uint8)
return imgs
def preproc_og(o, g, clip_obs=200):
o = np.clip(o, -clip_obs, clip_obs)
g = np.clip(g, -clip_obs, clip_obs)
return o, g
def preproc_inputs(obs, g, o_normalizer, g_normalizer, cuda=False):
device = torch.device("cuda" if cuda else "cpu")
obs_norm = o_normalizer.normalize(obs)
g_norm = g_normalizer.normalize(g)
# concatenate the stuffs
inputs = np.concatenate([obs_norm, g_norm], axis=-1)
inputs = torch.tensor(inputs, dtype=torch.float32, device=device)
if len(inputs.shape) == 1:
inputs = inputs.unsqueeze(0)
return inputs
def distance_between_rotations(q1, q2):
"""
calculate distance between two unit quaternions
from the following paper:
Effective Sampling and Distance Metrics for 3D Rigid Body Path Planning
(https://www.ri.cmu.edu/pub_files/pub4/kuffner_james_2004_1/kuffner_james_2004_1.pdf)
"""
assert q1.shape[-1] == 4 and q2.shape[-1] == 4, 'must be quaternions'
return 1 - (np.sum(q1 * q2, axis=-1)) ** 2
def collect_experiences(env, num_episodes, max_timesteps, actor, o_normalizer, g_normalizer, env_params,
cuda=False, action_proc_func=None, point_cloud=False,
video_count=0, vec_env_names=None):
# define helper attributes for vectorized envs =================================================================
num_envs = env.num_envs # number of parallel vec envs
assert len(vec_env_names) % len(set(vec_env_names)) == 0
total_success_rate, total_distance, total_reward, total_video = [], [], [], []
assert num_episodes != 0
assert num_episodes % num_envs == 0, 'num_episodes ({}) must be multiple of num of parallel envs ({})'.format(
num_episodes, num_envs)
obs_key = 'pc_obs' if point_cloud else 'minimal_obs'
# assert video_count <= 1
# define storage =================================================================
mb_obs = np.empty([num_episodes, max_timesteps + 1, env_params['obs']])
mb_ag = np.empty([num_episodes, max_timesteps + 1, env_params['goal']])
mb_g = np.empty([num_episodes, max_timesteps, env_params['goal']])
mb_actions =
|
np.empty([num_episodes, max_timesteps, env_params['action']])
|
numpy.empty
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 18 13:49:57 2020
Algorithm for automatic labelling of motion capture markers.
Includes functions for importing motion capture data, generating simulated data,
training the algorithm, and automatic labelling.
@author: aclouthi
"""
import xml.dom.minidom
import numpy as np
import torch
import torch.nn as nn
import pandas as pd
from scipy import signal
from scipy import stats
from scipy.optimize import linear_sum_assignment
from scipy.interpolate import CubicSpline
from sklearn.utils.extmath import weighted_mode
from ezc3d import c3d
import h5py
import random
import copy
import pickle
import warnings
import glob
import time
from datetime import date
import os
# Import parameters
filtfreq = 6 # Cut off frequency for low-pass filter for marker trajectories
# Neural network architecture parameters
batch_size = 100
nLSTMcells = 256
nLSTMlayers = 3
LSTMdropout = .17
FCnodes = 128
# Learning parameters
lr = 0.078
momentum = 0.65
# --------------------------------------------------------------------------- #
# ---------------------------- MAIN FUNCTIONS ------------------------------- #
# --------------------------------------------------------------------------- #
def generateSimTrajectories(bodykinpath,markersetpath,outputfile,alignMkR,alignMkL,
fs,num_participants=100,max_len=240):
'''
Generate simulated marker trajectories to use for training the machine learning-
based marker labelling algorithm. Trajectories are generated based on the defined
OpenSim (https://simtk.org/projects/opensim) marker set using body kinematics
for up to 100 participants performing a series of athletic movements.
Parameters
----------
bodykinpath : string
Path to .hdf5 file containing body kinematics of training data
markersetpath : string
Path to .xml file of OpenSim marker set
outputfile : string
Path to save .pickle file of training data
alignMkR : string
Markers to use to align person such that they face +x. This is for the right side.
Suggest acromions or pelvis markers.
alignMkL : string
Markers to use to align person such that they face +x. This is for the left side.
Suggest acromions or pelvis markers.
fs : int
Sampling frequency of data to be labelled.
num_participants : int, optional
Number of participants to include in training data, must be <=100.
The default is 100.
max_len : int, optional
Max length of data segments. The default is 240.
Returns
-------
data : list of numpy arrays
num_frames x num_markers x 3 matrices of marker trajectories of simulated
training data.
'''
# Read marker set
markers, segment, uniqueSegs, segID, mkcoordL, num_mks = import_markerSet(markersetpath)
hf = h5py.File(bodykinpath,'r')
# Get body segment scaling and centre of mass
com = {}
scale = {}
for seg in uniqueSegs:
com[seg] = np.array(hf.get('/com/'+seg))
scale[seg] = np.array(hf.get('/scale/'+seg))
sids = list(hf['kin'].keys())
# Generate simulated trajectories
data = []
R = np.array([[1, 0, 0],[0,0,-1],[0,1,0]]) # Converts from y=up to z=up
for s in range(num_participants):
for t in hf['kin'][sids[s]].keys():
pts = np.zeros((hf['kin'][sids[s]][t]['torso'].shape[2],len(markers),3))
for m in range(len(markers)):
T = np.array(hf['kin'][sids[s]][t][segment[m]])
for i in range(T.shape[2]):
p = np.ones((4,1))
p[:3,0] = np.transpose((np.multiply(mkcoordL[m],scale[segment[m]][s,:]) -
com[segment[m]][s,:]) * 1000)
p = np.matmul(T[:,:,i],p)
pts[i,m,:] = np.transpose(np.matmul(R,p[:3,0]))
cs = CubicSpline(np.arange(0,pts.shape[0],1),pts,axis=0)
pts = cs(np.arange(0,pts.shape[0],120/fs)) # match sampling frequency of data to label
pts = align(pts,markers.index(alignMkR),markers.index(alignMkL))
if pts.shape[0] > max_len:
data.append(torch.from_numpy(pts[:round(pts.shape[0]/2),:,:]))
data.append(torch.from_numpy(pts[round(pts.shape[0]/2):,:,:]))
else:
data.append(torch.from_numpy(pts))
if (s+1) % 10 == 0:
print('%d/%d complete' % (s+1,num_participants))
hf.close()
with open(outputfile,'wb') as f:
pickle.dump(data,f)
print('Training data saved to ' + outputfile)
return data
def trainAlgorithm(savepath,datapath,markersetpath,fs,num_epochs=10,prevModel=None,windowSize=120,
alignMkR=None,alignMkL=None,tempCkpt=None,contFromTemp=False):
'''
Use this function to train the marker labelling algorithm on existing labelled
c3d files or simulated marker trajectories created using
generateSimTrajectories()
Parameters
----------
savepath : string
Folder where trained model should be saved.
datapath : string
Full path to .pickle file containing simualted trajetory training data
or folder containing labelled .c3d files to use as training data.
markersetpath : string
Path to .xml file of OpenSim marker set.
fs : int
Sampling frequency of training data.
num_epochs : int, optional
Number of epochs to train for. The default is 10.
prevModel : string, optional
Path to a .ckpt file of a previously trained neural network if using
transfer learning. Set to None if not using a previous model.
The default is None.
windowSize : int, optional
Desired size of data windows. Not required if using simulated trajectories
to train. The default is 120.
alignMkR : string
Markers to use to align person such that they face +x. This is for the right side.
Suggest acromions or pelvis markers. Not required if using simulated trajectories
to train. The default is None.
alignMkL : string
Markers to use to align person such that they face +x. This is for the left side.
Suggest acromions or pelvis markers. Not required if using simulated trajectories
to train. The default is None.
tempCkpt : string, optional
Path to save a temporary .ckpt file of training progress after each epoch.
Set to None to only save model when training completes
contFromTemp : boolean, optional
Set to True to continue progress from partially completed training .ckpt file
at tempCkpt
Returns
-------
None.
'''
t0 = time.time()
# Read marker set
markers, segment, uniqueSegs, segID, _, num_mks = import_markerSet(markersetpath)
if '.pickle' in datapath:
# Import simulated trajectory data
with open(datapath,'rb') as f:
data_segs = pickle.load(f)
# Filter trajectories
b, a = signal.butter(2,6,btype='low',fs=fs) # 2nd order, low-pass at 6 Hz
for i in range(len(data_segs)):
for k in range(3):
inan = torch.isnan(data_segs[i][:,:,k])
df = pd.DataFrame(data_segs[i][:,:,k].numpy())
df = df.interpolate(axis=0,limit_direction='both')
dummy = torch.from_numpy(signal.filtfilt(b,a,df.to_numpy(),axis=0).copy())
dummy[inan] = np.nan
data_segs[i][:,:,k] = dummy
# Set windows (simulated data is already windowed)
windowIdx = []
for i in range(len(data_segs)):
for m in range(num_mks):
windowIdx.append([i,m,0,data_segs[i].shape[0]])
max_len = max([len(x) for x in data_segs])
print('Loaded simulated trajectory training data')
else:
# Load labelled c3d files for training
filelist = glob.glob(os.path.join(datapath,'*.c3d'))
data_segs, windowIdx = import_labelled_c3ds(filelist,markers,
alignMkR,alignMkL,windowSize)
max_len = max([x[3]-x[2] for x in windowIdx])
print('Loaded c3ds files for training data')
# Calculate values to use to scale neural network inputs and distances between
# markers on same body segment to use for label verification
scaleVals, segdists = get_trainingVals(data_segs,uniqueSegs,segID)
max_len = max([len(x) for x in data_segs])
training_vals = {'segdists' : segdists, 'scaleVals' : scaleVals,'max_len' : max_len}
with open(os.path.join(savepath,'trainingvals_' + date.today().strftime("%Y-%m-%d") + '.pickle'),'wb') as f:
pickle.dump(training_vals,f)
net, running_loss = train_nn(data_segs,num_mks,max_len,windowIdx,
scaleVals,num_epochs,prevModel,tempCkpt,contFromTemp)
with open(os.path.join(savepath,'training_stats_' + date.today().strftime("%Y-%m-%d") + '.pickle'),'wb') as f:
pickle.dump(running_loss,f)
torch.save(net.state_dict(),os.path.join(savepath,'model_'+ date.today().strftime("%Y-%m-%d") + '.ckpt'))
print('Model saved to %s' % os.path.realpath(savepath))
print('Algorithm trained in %s' % (time.time() - t0))
def transferLearning(savepath,datapath,modelpath,trainvalpath,markersetpath,
num_epochs=10,windowSize=120,alignMkR=None,alignMkL=None,
tempCkpt=None,contFromTemp=False):
'''
Use this function to perform transfer learning. Requires a previously trained
model and labelled c3d files to add to training set.
Parameters
----------
savepath : string
Path for save location of trained model.
datapath : string
Path to folder containing labelled .c3d files to add to training set.
modelpath : string
Path to a .ckpt file of a previously trained neural network to use as
base for transfer learning.
trainvalpath : string
Path to training values from previously trained algorithm.
Should match the model used in modelpath.
markersetpath : string
Path to .xml file of OpenSim marker set.
num_epochs : int, optional
Number of epochs to train for. The default is 10.
windowSize : int, optional
Size of windows used to segment data for algorithm. The default is 120.
alignMkR : string, optional
Markers to use to align person such that they face +x. This is for the right side.
Suggest acromions or pelvis markers. The default is None (ie. no alignment).
alignMkL : string, optional
Markers to use to align person such that they face +x. This is for the left side.
Suggest acromions or pelvis markers. The default is None (ie. no alignment).
tempCkpt : string, optional
Path to save a temporary .ckpt file of training progress after each epoch.
Set to None to only save model when training completes
contFromTemp : boolean, optional
Set to True to continue progress from partially completed training .ckpt file
at tempCkpt
Returns
-------
None.
'''
t0 = time.time()
# Read marker set
markers, segment, uniqueSegs, segID, _, num_mks = import_markerSet(markersetpath)
# Load c3d files
filelist = glob.glob(os.path.join(datapath,'*.c3d'))
data_segs, windowIdx = import_labelled_c3ds(filelist,markers,alignMkR,alignMkL,windowSize)
# Load scale values and intra-segment distances
with open(trainvalpath,'rb') as f:
trainingvals = pickle.load(f)
segdists = trainingvals['segdists']
scaleVals = trainingvals['scaleVals']
max_len = trainingvals['max_len']
# Perform transfer learning
net, running_loss = train_nn(data_segs,num_mks,max_len,windowIdx,scaleVals,
num_epochs,modelpath,tempCkpt,contFromTemp)
with open(os.path.join(savepath,'training_stats_plus' + str(len(filelist)) + 'trials_' +
date.today().strftime("%Y-%m-%d") + '.pickle'),'wb') as f:
pickle.dump(running_loss,f)
torch.save(net.state_dict(),os.path.join(savepath,'model_plus' + str(len(filelist)) + 'trials_' +
date.today().strftime("%Y-%m-%d") + '.ckpt'))
# Update intra-segment distances by adding in new training data
nframes = 0
for i in range(len(data_segs)):
nframes = nframes + data_segs[i].shape[0]
for bs in range(len(uniqueSegs)):
I = np.where(segID == bs)[0]
dists = np.zeros((I.shape[0],I.shape[0],nframes))
k = 0
for i in range(len(data_segs)):
pts = data_segs[i]
for m1 in range(len(I)):
for m2 in range(m1+1,len(I)):
dists[m1,m2,k:k+data_segs[i].shape[0]] = (pts[:,I[m1],:] - pts[:,I[m2],:]).norm(dim=1).numpy()
k = k+data_segs[i].shape[0]
# update mean and std based on new data
mn = (segdists['mean'][bs]*segdists['nframes'] + np.nanmean(dists,axis=2)*nframes) / (segdists['nframes'] + nframes) # new mean
sumdiff = np.nansum((dists - np.repeat(np.expand_dims(segdists['mean'][bs],axis=2),nframes,axis=2))**2,axis=2)
segdists['std'][bs] = np.sqrt(((segdists['std'][bs]**2)*(segdists['nframes']-1) + sumdiff - \
(segdists['nframes']+nframes)*(mn-segdists['mean'][bs])**2)/(segdists['nframes']+nframes-1))
segdists['mean'][bs] = mn.copy()
for i in range(1,segdists['mean'][bs].shape[0]):
for j in range(0,i):
segdists['mean'][bs][i,j] = segdists['mean'][bs][j,i]
segdists['std'][bs][i,j] = segdists['std'][bs][j,i]
segdists['nframes'] = segdists['nframes']+nframes
training_vals = {'segdists' : segdists, 'scaleVals' : scaleVals,'max_len' : max_len}
with open(os.path.join(savepath,'trainingvals_plus' + str(len(filelist)) + 'trials_' +
date.today().strftime("%Y-%m-%d") + '.pickle'),'wb') as f:
pickle.dump(training_vals,f)
print('Added %d trials in %f s' % (len(filelist),time.time() - t0))
# --------------------------------------------------------------------------- #
# --------------------------- IMPORT FUNCTIONS ------------------------------ #
# --------------------------------------------------------------------------- #
def import_markerSet(markersetpath):
'''
Read marker set from OpenSim marker set .xml file
Parameters
----------
markersetpath : string
path to .xml file
Returns
-------
markers : list of strings
marker names
segment : list of strings
body segment each marker belongs to
uniqueSegs : list of strings
body segments
segID : list of ints
index of body segment each marker belongs to
mkcoordL : list of numpy arrays
position of each marker relative to the local coordinate system of its
body segment
num_mks : int
number of markers
'''
markersetxml = xml.dom.minidom.parse(markersetpath);
mkxml=markersetxml.getElementsByTagName('Marker')
markers = []
segment = []
mkcoordL = []
for i in range(len(mkxml)):
markers.append(mkxml[i].attributes['name'].value)
segment.append(mkxml[i].childNodes[3].childNodes[0].data)
mkcoordL.append(np.fromstring(mkxml[i].childNodes[7].childNodes[0].data,sep=' '))
segment = [x.split('/')[-1] for x in segment]
uniqueSegs = sorted(list(set(segment)))
segID = -1 * np.ones(len(segment),dtype=np.int64)
for i in range(len(segment)):
segID[i] = uniqueSegs.index(segment[i])
num_mks = len(markers)
return markers, segment, uniqueSegs, segID, mkcoordL, num_mks
def align(data,m1,m2):
'''
Rotate points about z-axis (vertical) so that participant is facing +x direction.
Angle to rotate is calculated based on m1 and m2. These should be the indices of
a marker on the right and left side of the torso or head.
If one of these markers is missing from the entire trial, the data will not be
rotated.
Parameters
----------
data : numpy array
num_frames x num_markers x 3 matrix of marker trajectories
m1 : int
index of the right side marker
m2 : int
index of the left side marker
Returns
-------
data : numpy array
Rotated marker trajectories
'''
# if alignment markers are missing for entire trial, can't align
if np.isnan(data[:,m1,0]).sum() == data.shape[0] or np.isnan(data[:,m2,0]).sum() == data.shape[0]:
return data
else:
# find first non-nan entry for the markers
i = 0
while np.isnan(data[i,m1,0]) or np.isnan(data[i,m2,0]):
i = i+1
pts = data[i,:,:]
v = pts[m2,:] - pts[m1,:] # L - R
theta = np.arctan2(v[0],v[1])
T = np.array([[np.cos(theta),-np.sin(theta),0],
[np.sin(theta),np.cos(theta),0],
[0,0,1]])
dataR = np.empty(data.shape)
for i in range(0,data.shape[0]):
pts = data[i,:,:]
ptsR = np.transpose(np.matmul(T,pts.transpose()))
dataR[i,:,:] = ptsR
return dataR
def window_data(data_segs,windowSize,num_mks):
'''
Determine how to window data.
Parameters
----------
data_segs : list of numpy arrays
num_frames x num_markers x 3 arrays of marker trajectories imported from .c3d files
windowSize : int
desired size of windows
num_mks : int
number of markers of interest
windows will be created for the first num_mks trajectories
Returns
-------
windowIdx : list of lists
indices to use to window data, required input to training function
'''
windowIdx = []
for t in range(len(data_segs)):
pts = data_segs[t]
if torch.is_tensor(pts):
pts = pts.numpy()
for m in range(num_mks):
# only include if it's not all nans
if len(np.where(~np.isnan(pts[:,m,0]))[0]) > 0:
i1 = np.where(~np.isnan(pts[:,m,0]))[0][0] # first visible frame
while i1 < pts.shape[0]:
if (np.isnan(pts[i1:,m,0])).sum() > 0: # if any more nans
i2 = np.where(np.isnan(pts[i1:,m,0]))[0][0] + i1
else:
i2 = pts.shape[0]
while i1 <= i2:
if (i2 - (i1+windowSize) < 12) or (i1 + windowSize > i2):
if i2 - i1 > 0:
# confirm that there are other markers visible in this window
if (~np.isnan(np.concatenate((pts[i1:i2,0:m,:],pts[i1:i2,m+1:,:]),1))).sum() > 0:
windowIdx.append([t,m,i1,i2])
if (~np.isnan(pts[i2:,m,0])).sum() > 1: # any more visible markers?
i1 = i2 + np.where(~np.isnan(pts[i2:,m,0]))[0][0]
else:
i1 = pts.shape[0] + 1
else:
if (~np.isnan(np.concatenate((pts[i1:i2,0:m,:],pts[i1:i2,m+1:,:]),1))).sum() > 0:
windowIdx.append([t,m,i1,i1+windowSize])
i1 = i1 + windowSize
return windowIdx
def import_labelled_c3ds(filelist,markers,alignMkR,alignMkL,windowSize):
'''
Import c3d files for training, sort markers to match marker set order,
filter data, rotate data such that person faces +x, and determine window
indices.
Parameters
----------
filelist : list of strings
list of filepaths to .c3d files to import
markers : list of strings
list of marker names
alignMkR : string
name of marker to use to rotate data on RIGHT side of body,
set to None if rotation is not needed
alignMkL : string
name of marker to use to rotate data on LEFT side of body,
set to None if rotation is not needed
windowSize : int
desired size of windows
Returns
-------
data_segs : list of torch tensors
num_frames x num_markers x 3 tensors of marker trajectories imported from .c3d files
windowIdx : list of lists
indices to use to window data, required input to training function
'''
num_mks = len(markers)
data_segs = []
for trial in filelist:
# Import c3d and reorder points according to marker set order
c3ddat = c3d(trial)
alllabels = c3ddat['parameters']['POINT']['LABELS']['value']
fs = c3ddat['parameters']['POINT']['RATE']['value'][0]
pts = np.nan * np.ones((c3ddat['data']['points'].shape[2],num_mks,3))
for i in range(c3ddat['data']['points'].shape[1]):
j = [ii for ii,x in enumerate(markers) if x in alllabels[i]]
if len(j) == 0:
# if this is an extraneous marker (not part of the defined marker set),
# add to the end of the array
dummy = np.empty((pts.shape[0],1,3))
for k in range(3):
dummy[:,0,k] = c3ddat['data']['points'][k,i,:]
pts = np.append(pts,dummy,axis=1)
elif len(j) == 1:
# If this is part of the marker set
for k in range(3):
pts[:,j[0],k] = c3ddat['data']['points'][k,i,:]
# delete any empty frames at the end
while (~np.isnan(pts[-1,:,0])).sum() == 0:
pts = pts[:-1,:,:]
# rotate so that the person faces +x
if (alignMkR is not None) and (alignMkL !='') and (alignMkL is not None) and (alignMkL !=''):
pts = align(pts,markers.index(alignMkR),markers.index(alignMkL))
# Filter with 2nd order, low-pass Butterworth at filtfreq Hz
b, a = signal.butter(2,filtfreq,btype='low',fs=fs)
for k in range(3):
inan = np.isnan(pts[:,:,k])
df = pd.DataFrame(pts[:,:,k])
df = df.interpolate(axis=0,limit_direction='both')
dummy = signal.filtfilt(b,a,df.to_numpy(),axis=0).copy()
dummy[inan] = np.nan
pts[:,:,k] = dummy
data_segs.append(torch.from_numpy(pts))
windowIdx = window_data(data_segs,windowSize,num_mks)
return data_segs, windowIdx
def import_raw_c3d(file,rotang):
'''
Import an a c3d file for labelling. Trajectories are split at gaps were the
distance the marker travels during the occlusion is greater than the distance to
the closest marker in the next frame. Marker data is rotated 'rotang' degrees
about the z-axis (vertical).
Parameters
----------
file : string
path to the c3d file to be imported
rotang : float
Angle to rotate the marker data about z-axis in DEGREES.
Returns
-------
pts : numpy array
num_frames x num_markers x 3 array of marker trajectories imported from .c3d files
fs : float
sampling frequency used in c3d file
'''
c3ddat = c3d(file) # read in c3d file
rawpts = c3ddat['data']['points'][0:3,:,:].transpose((2,1,0)) # Get points from c3d file
fs = c3ddat['parameters']['POINT']['RATE']['value'] # sampling frequency
rawlabels = c3ddat['parameters']['POINT']['LABELS']['value']
# # Try to find and fix places where the markers swap indices
# thresh = 20
# for m in range(rawpts.shape[1]):
# kf = np.where(np.isnan(rawpts[1:,m,0]) != np.isnan(rawpts[0:-1,m,0]))[0]
# if ~np.isnan(rawpts[0,m,0]):
# kf = np.insert(kf,0,-1,axis=0)
# if ~np.isnan(rawpts[-1,m,0]):
# kf = np.concatenate((kf,[rawpts.shape[0]-1]))
# kf = np.reshape(kf,(-1,2))
# k = 0
# while k < kf.shape[0]-1:
# d = np.linalg.norm(rawpts[kf[k+1,0]+1,m,:] - rawpts[kf[k,1],m,:])
# all_d = np.linalg.norm(rawpts[kf[k,1]+1,:,:] - rawpts[kf[k,1],m,:],axis=1)
# all_d[m] = np.nan
# if (~np.isnan(all_d)).sum() > 0:
# if d > np.nanmin(all_d) and np.nanmin(all_d) < thresh and \
# np.isnan(rawpts[kf[k,1],np.nanargmin(all_d),0]):
# dummy = rawpts[kf[k,1]+1:,m,:].copy()
# rawpts[kf[k,1]+1:,m,:] = rawpts[kf[k,1]+1:,np.nanargmin(all_d),:]
# rawpts[kf[k,1]+1:,np.nanargmin(all_d),:] = dummy.copy()
# kf = np.where(np.isnan(rawpts[1:,m,0]) != np.isnan(rawpts[0:-1,m,0]))[0]
# if ~np.isnan(rawpts[0,m,0]):
# kf = np.insert(kf,0,0,axis=0)
# if ~np.isnan(rawpts[-1,m,0]):
# kf = np.concatenate((kf,[rawpts.shape[0]-1]))
# kf = np.reshape(kf,(-1,2))
# k = k+1
# Wherever there is a gap, check if the marker jumps further than the distance to the
# next closest marker. If so, split it into a new trajectory.
pts = np.empty((rawpts.shape[0],0,3))
labels = []
for m in range(rawpts.shape[1]):
# key frames where the marker appears or disappears
kf = np.where(np.isnan(rawpts[1:,m,0]) != np.isnan(rawpts[0:-1,m,0]))[0]
if ~np.isnan(rawpts[0,m,0]):
kf = np.insert(kf,0,-1,axis=0)
if ~np.isnan(rawpts[-1,m,0]):
kf = np.concatenate((kf,[rawpts.shape[0]-1]))
kf = np.reshape(kf,(-1,2))
k = 0
while k < kf.shape[0]:
i1 = kf[k,0]
d = 0
gapsize = 0
min_d = 1000
while d < min_d and gapsize < 60:
if k < kf.shape[0]-1:
d = np.linalg.norm(rawpts[kf[k+1,0]+1,m,:] - rawpts[kf[k,1],m,:])
all_d = np.linalg.norm(rawpts[kf[k,1]+1,:,:] - rawpts[kf[k,1],m,:],axis=1)
all_d[m] = np.nan
if (~np.isnan(all_d)).sum() > 0:
min_d = np.nanmin(all_d)
else:
min_d = 1000
gapsize = kf[k+1,0] - kf[k,1]
else:
gapsize = 61
k=k+1
if kf[k-1,1] - i1 > 2:
traj = np.nan * np.ones((rawpts.shape[0],1,3))
traj[i1+1:kf[k-1,1]+1,0,:] = rawpts[i1+1:kf[k-1,1]+1,m,:]
pts = np.append(pts,traj,axis=1)
labels.append(rawlabels[m])
# Angle to rotate points about z-axis
rotang = float(rotang) * np.pi/180
Ralign = np.array([[np.cos(rotang),-np.sin(rotang),0],
[np.sin(rotang),np.cos(rotang),0],
[0,0,1]])
for i in range(pts.shape[1]):
pts[:,i,:] = np.matmul(Ralign,pts[:,i,:].transpose()).transpose()
return pts, fs, labels
# --------------------------------------------------------------------------- #
# ------------------------- NEURAL NET FUNCTIONS ---------------------------- #
# --------------------------------------------------------------------------- #
def get_trainingVals(data_segs,uniqueSegs,segID):
'''
Calculates the values that will be used to scale the input matrix to the neural
network. These are the mean observed relative distances, velocities, and
accelerations from 2000 trials from the training set.
Calculates the mean and standard deviation of distances among markers belonging
to each body segment. These are used to validate and correct the labels
predicted by the neural network.
Parameters
----------
data_segs : list of torch tensors
num_frames x num_markers x 3 tensors of marker trajectories in training set
uniqueSegs : list of strings
body segment names
segID : list of ints
index of body segments each marker belongs to
Returns
-------
scaleVals : list of floats
mean relative distance, velocity, and acceleration in training set and
number of data frames used to calculate these
segdists : dictionary
['mean'] : numpy arrays for each body segment containing mean distances
among associated markers in training set
['std'] : numpy arrays for each body segment containing standard deviation
of distances among associated markers in training set
['nframes'] : number of frames used to calculate these values
'''
# Get scale values
sumDist = 0.0
sumVel = 0.0
sumAccn = 0.0
nDist = 0.0
nVel = 0.0
nAccn = 0.0
# Only use 2000 segments to save computing time
if len(data_segs) > 2000:
I = random.sample(range(len(data_segs)),2000)
else:
I = range(len(data_segs))
for i in I:
for m in range(int(data_segs[i].shape[1])):
# marker distances relative to marker m
xyz = data_segs[i] - data_segs[i][:,m,:].unsqueeze(1).repeat(1,data_segs[i].shape[1],1)
xyz_v = xyz[1:,:,:] - xyz[0:xyz.shape[0]-1,:,:]
xyz_v_norm = xyz_v.norm(dim=2)
xyz_a = xyz_v[1:,:,:] - xyz_v[0:xyz_v.shape[0]-1,:,:]
xyz_a_norm = xyz_a.norm(dim=2)
sumDist = sumDist + np.nansum(abs(xyz))
nDist = nDist + (~torch.isnan(xyz[:,0:m,:])).sum() + (~torch.isnan(xyz[:,m+1:,:])).sum() #xyz.shape[0]*(xyz.shape[1]-1)*xyz.shape[2]
sumVel = sumVel + np.nansum(xyz_v_norm)
nVel = nVel + (~torch.isnan(xyz_v_norm[:,0:m])).sum() + (~torch.isnan(xyz_v_norm[:,m+1:])).sum() #xyz_v_norm.shape[0] * (xyz_v_norm.shape[1]-1)
sumAccn = sumAccn + np.nansum(xyz_a_norm)
nAccn = nAccn + (~torch.isnan(xyz_a_norm[:,0:m])).sum() + (~torch.isnan(xyz_a_norm[:,m+1:])).sum() # xyz_a_norm.shape[0] * (xyz_a_norm.shape[1]-1)
scaleVals = [sumDist/nDist, sumVel/nVel, sumAccn/nAccn,nDist,nVel,nAccn]
# Calculate distances between markers on same body segments
dists_mean = []
dists_std = []
nframes = 0
for i in range(len(data_segs)):
nframes = nframes + data_segs[i].shape[0]
for bs in range(len(uniqueSegs)):
I = np.where(segID == bs)[0] # indices of markers on this body segment
dists = np.zeros((I.shape[0],I.shape[0],nframes))
dists_mean.append(np.zeros((I.shape[0],I.shape[0])))
dists_std.append(np.zeros((I.shape[0],I.shape[0])))
k = 0
for i in range(len(data_segs)):
pts = data_segs[i]
for m1 in range(len(I)):
for m2 in range(m1+1,len(I)):
dists[m1,m2,k:k+data_segs[i].shape[0]] = \
(pts[:,I[m1],:] - pts[:,I[m2],:]).norm(dim=1).numpy()
k = k+data_segs[i].shape[0]
dists_mean[bs] = dists.mean(axis=2)
dists_std[bs] = dists.std(axis=2)
for i in range(1,dists_mean[bs].shape[0]):
for j in range(0,i):
dists_mean[bs][i,j] = dists_mean[bs][j,i]
dists_std[bs][i,j] = dists_std[bs][j,i]
segdists = {'mean' : dists_mean,'std' : dists_std,'nframes' : nframes}
return scaleVals, segdists
# Generates the input data for the neural network.
class markerdata(torch.utils.data.Dataset):
def __init__(self,marker_data,num_mks,windowIdx,scaleVals):
self.marker_data = copy.deepcopy(marker_data)
self.num_mks = num_mks # Number of marker labels
self.windowIdx = windowIdx
self.scaleVals = scaleVals
def __len__(self): # Should be the number of items in this dataset
return len(self.windowIdx)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist() # index will be row major
num_mks = self.num_mks
t = self.windowIdx[idx][0]
m = self.windowIdx[idx][1]
i1 = self.windowIdx[idx][2]
i2 = self.windowIdx[idx][3]
xyz_raw = copy.deepcopy(self.marker_data[t][i1:i2,:,:])
xyz_m = xyz_raw[:,m,:] # current marker
xyz_raw = torch.cat((xyz_raw[:,0:m,:],xyz_raw[:,m+1:,:]),dim=1)
# check what is visible at this time and take the markers that are
# visible for the greatest number of frames
mksvis = (~torch.isnan(xyz_raw[:,:,0])).sum(dim=0)
if (mksvis == xyz_raw.shape[0]).sum() > num_mks:
# then also sort by distance
xyz_raw = xyz_raw[:,mksvis==xyz_raw.shape[0],:]
d = (xyz_raw - xyz_m.unsqueeze(1).repeat(1,xyz_raw.shape[1],1)).norm(dim=2)
_,I = (d.mean(0)).sort()
xyz_raw = xyz_raw[:,I[0:num_mks-1],:]
else:
_,I = mksvis.sort(descending=True)
xyz_raw = xyz_raw[:,I[0:num_mks-1],:]
# Fill in any missing markers with the mean of all of the other visible markers
if torch.isnan(xyz_raw[:,:,0]).sum() > 0:
inan = torch.where(torch.isnan(xyz_raw))
xyz_raw[inan] = torch.take(torch.from_numpy(np.nanmean(xyz_raw,1)),
inan[0]*3+inan[2])
if torch.isnan(xyz_raw[:,:,0]).any(1).sum() > 0:
# if there somehow ended up to be empty frames, delete them
xyz_m = xyz_m[~torch.isnan(xyz_raw[:,:,0]).any(1),:]
xyz_raw = xyz_raw[~torch.isnan(xyz_raw[:,:,0]).any(1),:,:]
xyz_raw = xyz_raw - xyz_m.unsqueeze(1).repeat(1,xyz_raw.shape[1],1)
d = (xyz_raw.mean(dim=0)).norm(dim=1)
_, I = d.sort() # Sort trajectories by distance relative to marker m
xyz = xyz_raw[:,I,:]
# Add in velocity and accn
xyz_v = torch.zeros(xyz.shape,dtype=xyz.dtype)
xyz_v[1:,:,:] = xyz[1:,:,:] - xyz[0:xyz.shape[0]-1,:,:]
xyz_v_norm = xyz_v.norm(dim=2)
if xyz_v_norm.shape[0] > 1:
xyz_v_norm[0,:] = xyz_v_norm[1,:]
xyz_a = torch.zeros(xyz.shape,dtype=xyz.dtype)
xyz_a[1:,:,:] = xyz_v[1:xyz_v.shape[0],:,:] - xyz_v[0:xyz_v.shape[0]-1,:,:]
xyz_a_norm = xyz_a.norm(dim=2)
if xyz_a_norm.shape[0] > 2:
xyz_a_norm[1,:] = xyz_a_norm[2,:]
xyz_a_norm[0,:] = xyz_a_norm[2,:]
# Scale input data
xyz = xyz / self.scaleVals[0]
xyz_v_norm = xyz_v_norm / self.scaleVals[1]
xyz_a_norm = xyz_a_norm / self.scaleVals[2]
out = torch.cat((xyz,xyz_v_norm.unsqueeze(2),xyz_a_norm.unsqueeze(2)),2)
out = out.reshape(-1,(num_mks-1)*5)
return out, m, idx
# Collate function for data loader. Pads the data to make it equally sized.
def pad_collate(batch):
batch = list(filter(lambda xx:xx[0] is not None,batch))
(X,Y,T) = zip(*batch)
# filter out None entries
x_lens = [len(x) for x in X]
Y_out = [y for y in Y]
T_out = [t for t in T]
X_pad = nn.utils.rnn.pad_sequence(X,batch_first=True,padding_value=0)
return X_pad, Y_out, T_out, x_lens
# Define network architecture
class Net(nn.Module):
def __init__(self, max_len,num_mks):
super(Net,self).__init__()
self.max_len = max_len
self.lstm = nn.LSTM((num_mks-1)*5,nLSTMcells,num_layers=nLSTMlayers,dropout=LSTMdropout)
self.fc = nn.Sequential(nn.Linear(max_len*nLSTMcells,FCnodes),
nn.BatchNorm1d(FCnodes),
nn.ReLU(),
nn.Linear(FCnodes,num_mks))
def forward(self,x,x_lens):
out = torch.nn.utils.rnn.pack_padded_sequence(x.float(),x_lens,batch_first=True,enforce_sorted=False)
out, (h_t,h_c) = self.lstm(out)
out,_ = torch.nn.utils.rnn.pad_packed_sequence(out,batch_first=True,total_length=self.max_len)
out = self.fc(out.view(out.shape[0],-1))
return out
def train_nn(data_segs,num_mks,max_len,windowIdx,scaleVals,num_epochs,prevModel,
tempCkpt=None,contFromTemp=False):
'''
Train the neural network.
Will use GPU if available.
Parameters
----------
data_segs : list of torch tensors
num_frames x num_markers x 3 tensors of marker trajectories in training set
num_mks : int
number of markers in marker set
max_len : int
maximum window length
windowIdx : list of lists
indices to use to window data, required input to training function
scaleVals : list of floats
mean relative distance, velocity, and acceleration in training set and
number of data frames used to calculate these. Used to scale variables
before inputting to neural network.
num_epochs : int
number of epoch to train for
prevModel : string
path to the .ckpt file for a previously trained model if using transfer
learning
set to None if not using transfer learning
tempCkpt : string
path to save model training progress after each epoch .ckpt
Set to None to only save after training completes
contFromTemp : boolean
set to True to continue a partially completed training from the tempCkpt file
Returns
-------
net : torch.nn.Module
trained neural network
running_loss: list of floats
running loss for network training
'''
# Use GPU if available
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Create dataset and torch data loader
traindata = markerdata(data_segs,num_mks,windowIdx,scaleVals)
trainloader = torch.utils.data.DataLoader(traindata,batch_size=batch_size,
shuffle=True,collate_fn=pad_collate)
# Create neural net
net = Net(max_len,num_mks).to(device)
# Load previous model if transfer learning
if (prevModel is not None) and (prevModel != ''):
net.load_state_dict(torch.load(prevModel,map_location=device))
# Define loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer= torch.optim.SGD(net.parameters(), lr=lr, momentum=momentum)
# Load a partially trained model to complete training
if contFromTemp == True:
checkpoint = torch.load(tempCkpt)
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch0 = checkpoint['epoch']
running_loss = checkpoint['running_loss']
loss = checkpoint['loss']
torch.set_rng_state(checkpoint['rng_state'])
else:
epoch0 = 0
running_loss = []
# Train Network
total_step = len(trainloader)
for epoch in range(epoch0,num_epochs):
for i, (data, labels, trials, data_lens) in enumerate(trainloader):
data = data.to(device)
labels = torch.LongTensor(labels)
labels = labels.to(device)
# Forward pass
outputs = net(data,data_lens)
loss = criterion(outputs,labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss.append(loss.item())
# Print stats
if (i+1) % 10 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch+1,num_epochs,i+1,
total_step,loss.item()))
if tempCkpt is not None:
torch.save({'epoch': epoch+1,'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),'running_loss': running_loss,'loss' : loss,
'rng_state': torch.get_rng_state()},tempCkpt)
return net, running_loss
def predict_nn(modelpath,pts,windowIdx,scaleVals,num_mks,max_len):
'''
Run the neural network to get label probabilities
Parameters
----------
modelpath : string
path to .ckpt file of trained neural network weights
pts : numpy array
num_frames x num_markers x 3 array of marker trajectories to be labelled
windowIdx : list of lists
indices to use to window data, required input to training function
scaleVals : list of floats
mean relative distance, velocity, and acceleration in training set and
number of data frames used to calculate these. Used to scale variables
before inputting to neural network.
num_mks : int
number of markers in marker set
max_len : int
max length of data windows
Returns
-------
probWindow : torch tensor
num_windows x num_mks tensor of label probabilities for each window
'''
# Use GPU if available
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Load trained network weights
net = Net(max_len,num_mks).to(device)
net.load_state_dict(torch.load(modelpath,map_location=device))
dataset = markerdata([torch.from_numpy(pts)],num_mks,windowIdx,scaleVals)
dataloader = torch.utils.data.DataLoader(dataset,batch_size=batch_size,
shuffle=False,collate_fn=pad_collate)
# Apply neural net
sm = nn.Softmax(dim=1)
net.eval()
with torch.no_grad():
probWindow = torch.zeros(len(windowIdx),num_mks)
k = 0
for data, trajNo, segIdx, data_lens in dataloader:
if data is not None:
data = data.to(device)
outputs = net(data, data_lens)
_,predicted = torch.max(outputs.data,1)
# Get probabilities for each window
outputs = sm(outputs)
probWindow[k:k+data.shape[0],:] = outputs
k = k + data.shape[0]
return probWindow
# --------------------------------------------------------------------------- #
# ---------------------------- LABEL FUNCTIONS ------------------------------ #
# --------------------------------------------------------------------------- #
def marker_label(pts,modelpath,trainvalpath,markersetpath,fs,windowSize):
'''
Parameters
----------
pts : numpy array
[num_frames x num_markers x 3] array of marker trajectories to be labelled
modelpath : string
path to .ckpt file containing trained neural network weights
trainvalpath : string
path to .pickle file containing the training values obtained from trainAlgorithm.py
markersetpath : string
path to .xml file containing OpenSim marker set definition
fs : float
sampling frequency of data in pts
windowSize : int
desired size of windows
Returns
-------
labels_predicted : list of strings
predicted labels for each marker trajectory
confidence : numpy array
[1 x num_trajectories] array of confidence in predicted label
Y_pred : torch tensor
[1 x num_trajectories] tensor of predicted label indices
'''
# Read marker set
markers, segment, uniqueSegs, segID, mkcoordL, num_mks = import_markerSet(markersetpath)
# Get expected inter-marker distances organized by segment
with open(trainvalpath,'rb') as f:
trainingvals = pickle.load(f)
segdists = trainingvals['segdists']
scaleVals = trainingvals['scaleVals']
max_len = trainingvals['max_len']
pts = np.array(pts,dtype=np.float64)
num_mks = len(markers)
# Fill small gaps
gap_idx = np.isnan(pts)
for m in range(pts.shape[1]):
df = pd.DataFrame(pts[:,m,:])
df = df.interpolate(axis=0,limit=4,limit_area='inside')
pts[:,m,:] = df
# Filter
b, a = signal.butter(2,6,btype='low',fs=fs) # 2nd order, low-pass at 6 Hz
for k in range(3):
inan = np.isnan(pts[:,:,k])
df = pd.DataFrame(pts[:,:,k])
df = df.interpolate(axis=0,limit_direction='both')
dummy = signal.filtfilt(b,a,df.to_numpy(),axis=0).copy()
dummy[inan] = np.nan
pts[:,:,k] = dummy
# If there are fewer trajectories than the expected number of markers, add some empty columns
if pts.shape[1] < num_mks:
pts = np.concatenate((pts,np.nan * np.ones((pts.shape[0],num_mks-pts.shape[1],3),
dtype=pts.dtype)),axis=1)
# --- Initial Label Prediction --- #
# Determine window indices
windowIdx = window_data([pts],windowSize,pts.shape[1])
# Apply neural network to get label probablities within windows
probWindow = predict_nn(modelpath,pts,windowIdx,scaleVals,num_mks,max_len)
# Convert to frame-by-frame probabilities
probFrame = torch.zeros(pts.shape[1],num_mks,pts.shape[0])
for t in range(len(windowIdx)):
probFrame[windowIdx[t][1],:,windowIdx[t][2]:windowIdx[t][3]] = \
probWindow[t,:].repeat(windowIdx[t][3]-windowIdx[t][2],1).t().unsqueeze(dim=0)
# Find all frames where any marker appears or disappears
keyframes = [0]
for m in range(pts.shape[1]):
I = np.where(np.isnan(pts[1:,m,0]) != np.isnan(pts[0:-1,m,0]))[0]
for i in range (len(I)):
keyframes.append(I[i])
keyframes = sorted(set(keyframes))
if keyframes[-1] < pts.shape[0]:
keyframes.append(pts.shape[0])
# Make some new windows based on keyframes
# These are guaranteed to have only one of each marker in them
prob = torch.zeros((probFrame.shape[0],probFrame.shape[1],len(keyframes)-1),
dtype=probFrame.dtype)
if len(keyframes) == 1:
prob = probFrame.mean(dim=2).unsqueeze(2)
else:
for i in range(len(keyframes)-1):
prob[:,:,i] = probFrame[:,:,keyframes[i]:keyframes[i+1]].mean(dim=2)
# Hungarian Algorithm to assign labels within new windows
y_pred = -1 * torch.ones((prob.shape[2],prob.shape[0]),dtype=torch.int64) # predicted label
confidence = torch.zeros(prob.shape[2],prob.shape[0],dtype=prob.dtype)
for t in range(prob.shape[2]):
p = copy.deepcopy(prob[:,:,t])
with np.errstate(divide='ignore'):
alpha = np.log(((1-p)/p).detach().numpy())
alpha[alpha==np.inf] = 100
alpha[alpha==-np.inf] = -100
R,C = linear_sum_assignment(alpha) # Hungarian Algorithm
for i in range(R.shape[0]):
y_pred[t,R[i]] = int(C[i])
confidence[t,R[i]] = prob[R[i],C[i],t]
# Convert to frame-by-frame label prediction and confidence
y_pred_frame = -1 * torch.ones((pts.shape[0],pts.shape[1]),dtype=torch.int64)
confidence_frame = torch.empty((pts.shape[0],pts.shape[1]),dtype=prob.dtype)
if len(keyframes) == 1:
y_pred_frame = y_pred.repeat(pts.shape[0],1)
for d in range(pts.shape[1]):
confidence_frame[:,d] = probFrame[d,y_pred[0,d],:]
else:
for t in range(len(keyframes)-1):
y_pred_frame[keyframes[t]:keyframes[t+1],:] = y_pred[t,:]
for d in range(pts.shape[1]):
confidence_frame[keyframes[t]:keyframes[t+1],d] = probFrame[d,y_pred[t,d],keyframes[t]:keyframes[t+1]]
# Calculate scores for each trajectory using weighted mode
Y_pred = -1 * torch.ones(pts.shape[1],dtype=y_pred_frame.dtype)
confidence_final = np.empty(pts.shape[1])
confidence_weight = np.empty(pts.shape[1])
for d in range(pts.shape[1]):
a,b = weighted_mode(y_pred_frame[:,d],confidence_frame[:,d])
Y_pred[d] = torch.from_numpy(a)
confidence_final[d] = confidence_frame[y_pred_frame[:,d]==a[0],d].mean()
confidence_weight[d] = b
# Replace original gaps so that this doesn't interfere with error checking
pts[gap_idx] = np.nan
# --- Error checking and correction --- #
# Remove labels where inter-marker distances within the segment aren't within expected range
for bs in range(len(uniqueSegs)):
I = np.where((segID[Y_pred] == bs) & (Y_pred.numpy()>-1))[0]
J = np.where(segID == bs)[0]
badcombo = np.nan * np.ones((len(I),len(I)),dtype=np.int64)
for m1 in range(len(I)):
for m2 in range(m1+1,len(I)):
if Y_pred[I[m1]] != Y_pred[I[m2]]:
dist = np.linalg.norm(pts[:,I[m1],:] - pts[:,I[m2],:],axis=1)
if (~np.isnan(dist)).sum() > 0:
if np.nanmean(dist) + np.nanstd(dist) > \
segdists['mean'][bs][J==Y_pred[I[m1]].numpy(),J==Y_pred[I[m2]].numpy()] + \
3*segdists['std'][bs][J==Y_pred[I[m1]].numpy(),J==Y_pred[I[m2]].numpy()] or \
np.nanmean(dist) - np.nanstd(dist) < \
segdists['mean'][bs][J==Y_pred[I[m1]].numpy(),J==Y_pred[I[m2]].numpy()] - \
3*segdists['std'][bs][J==Y_pred[I[m1]].numpy(),J==Y_pred[I[m2]].numpy()]:
badcombo[m1,m2] = 1
badcombo[m2,m1] = 1
else:
badcombo[m1,m2] = 0
badcombo[m2,m1] = 0
for m1 in range(len(I)):
if (badcombo[m1,:] == 0).sum() == 0 and (badcombo[m1,:]==1).sum() > 0:
# if no good combos and at least one bad combo,
confidence_final[I[m1]] = 0
confidence_weight[I[m1]] = 0
Y_pred[I[m1]] = -1
# Check for overlapping marker labels and keep label for marker with
# highest confidence
for m in range(num_mks):
visible = (~np.isnan(pts[:,Y_pred==m,0]))
if (visible.sum(1) > 1).any():
ii = torch.where(Y_pred==m)[0]
I = np.argsort(-1*confidence_weight[ii]) # sort by decending confidence
ii = ii[I]
# check which ones are actually overlapping
for j1 in range(len(ii)):
for j2 in range(j1+1,len(ii)):
if Y_pred[ii[j2]] > -1:
if ((~np.isnan(pts[:,[ii[j1],ii[j2]],0])).sum(1) > 1).any():
# check if these are maybe the same marker due to a
# ghost marker situation
d = np.nanmean(np.linalg.norm(pts[:,ii[j1],:] - pts[:,ii[j2],:],axis=1))
olframes = ((~np.isnan(np.stack(((pts[:,ii[j1],0]),pts[:,ii[j2],0]),
axis=1))).sum(1) > 1).sum()
if ~(d < 25 or (d < 40 and olframes < 10) or (d < 50 and olframes < 5)):
Y_pred[ii[j2]] = -1
confidence_final[ii[j2]] = 0
confidence_weight[ii[j2]] = 0
# Attempt to assign labels to unlabelled markers based on probabilities and distances
unlabelled = torch.where(Y_pred == -1)[0]
for m in unlabelled:
avail_mks = list(set(range(num_mks)) -
set(Y_pred[(~np.isnan(pts[~np.isnan(pts[:,m,0]),:,0])).any(0)].tolist()))
if len(avail_mks) > 0:
avail_probs = np.zeros(len(avail_mks))
for i in range(len(avail_mks)):
avail_probs[i] = probFrame[m,avail_mks[i],~np.isnan(pts[:,m,0])].mean()
while avail_probs.max() > 0.1 and Y_pred[m] == -1:
lbl = avail_mks[np.argmax(avail_probs)]
segmks = np.where(segID == segID[lbl])[0]
# Check if distances are withing expected range
goodcount = 0
badcount = 0
for i in range(len(segmks)):
if i != np.where(segmks==lbl)[0]:
I = torch.where(Y_pred==segmks[i])[0]
if I.shape[0] > 0:
dist = np.zeros(0)
for ii in range(I.shape[0]):
dist = np.append(dist,np.linalg.norm(pts[:,m,:] -
pts[:,I[ii],:],axis=1),0)
if (~np.isnan(dist)).sum() > 0:
if np.nanmean(dist) + np.nanstd(dist) < \
segdists['mean'][segID[lbl]][segmks==lbl,i] + \
3*segdists['std'][segID[lbl]][segmks==lbl,i] and \
np.nanmean(dist) - np.nanstd(dist) > \
segdists['mean'][segID[lbl]][segmks==lbl,i] - \
3*segdists['std'][segID[lbl]][segmks==lbl,i]:
goodcount = goodcount + 1
else:
badcount = badcount + 1
if goodcount > 0:
Y_pred[m] = lbl
confidence_final[m] = avail_probs.max()
else:
avail_probs[np.argmax(avail_probs)] = 0
# Attempt to fit segment marker definitions to measured markers based on predicted labels and
# fill in where there are 3 predicted correctly and one or more unlabelled/incorrect
y_pred_frame_proposed = np.nan * np.ones((pts.shape[0],pts.shape[1]))
d_frame_proposed = np.nan * np.ones((pts.shape[0],pts.shape[1]))
for bs in range(len(uniqueSegs)):
I = np.where(segID==bs)[0] # label indices for this segment's markers
if I.shape[0] > 2:
# Get local coords from marker set
xsk = np.empty((I.shape[0],3))
for i in range(I.shape[0]):
xsk[i,:] = mkcoordL[I[i]]
xsk = 1000 * xsk
for k in range(len(keyframes)-1):
t=keyframes[k]+1
# Get markers based on predicted labels
xmk = np.nan * np.ones((I.shape[0],3))
j = 0
for i in I:
mi = np.logical_and((~np.isnan(pts[t,:,0])),Y_pred.numpy()==i)
if mi.sum() == 1:
xmk[j,:] = pts[t,mi,:]
elif mi.sum() > 1:
xmk[j,:] = pts[t,mi,:].mean(0)
j = j+1
if (~np.isnan(xmk[:,0])).sum() > 2:
# If there are at least 3 visible markers from this segment, use
# Procrustes to line up marker set coords with measured markers
xsk_g = np.nan * np.ones(xsk.shape)
_,xsk_g[~np.isnan(xmk[:,0]),:],T = \
procrustes(xmk[~np.isnan(xmk[:,0]),:],xsk[~np.isnan(xmk[:,0]),:])
d_mks = np.linalg.norm(xsk_g-xmk,axis=1)
# If there is a good fit
if np.nanmax(d_mks) < 30:
xsk_g = T['scale'] * np.matmul(xsk,T['rotation']) + T['translation']
for j in np.where(np.isnan(xmk[:,0]))[0]:
d = np.linalg.norm(xsk_g[j,:3] - pts[t,:,:],axis=1)
if np.nanmin(d) < 40:
y_pred_frame_proposed[keyframes[k]:keyframes[k+1],np.nanargmin(d)] = int(I[j])
d_frame_proposed[keyframes[k]:keyframes[k+1],np.nanargmin(d)] = np.nanmin(d)
# if there are 4 markers, and all of them but one are less than 30,
# remove label from that one and redo the fitting
elif (d_mks[~np.isnan(d_mks)]<30).sum() > 2 and \
(d_mks[~np.isnan(d_mks)]>=30).sum() > 0:
# Set the presumed incorrect one to nan
xmk[np.array([x>=30 if ~np.isnan(x) else False for x in d_mks]),:] = np.nan
xsk_g = np.nan * np.ones(xsk.shape)
_,xsk_g[~np.isnan(xmk[:,0]),:],T = procrustes(
xmk[~np.isnan(xmk[:,0]),:],xsk[~np.isnan(xmk[:,0]),:])
d_mks = np.linalg.norm(xsk_g-xmk,axis=1)
if np.nanmax(d_mks) < 30:
xsk_g = T['scale'] * np.matmul(xsk,T['rotation']) + T['translation']
for j in np.where(np.isnan(xmk[:,0]))[0]:
d = np.linalg.norm(xsk_g[j,:3] - pts[t,:,:],axis=1)
if np.nanmin(d) < 40:
if np.isnan(y_pred_frame_proposed[t,np.nanargmin(d)]):
y_pred_frame_proposed[
keyframes[k]:keyframes[k+1],np.nanargmin(d)] = int(I[j])
d_frame_proposed[
keyframes[k]:keyframes[k+1],np.nanargmin(d)] = np.nanmin(d)
elif np.nanmin(d) < d_frame_proposed[t,np.nanargmin(d)]:
y_pred_frame_proposed[
keyframes[k]:keyframes[k+1],
|
np.nanargmin(d)
|
numpy.nanargmin
|
# -*- coding: utf-8 -*-
from textwrap import dedent
import numpy as np
import pandas as pd
import xarray as xr
from xarray.core import formatting
from . import raises_regex
class TestFormatting:
def test_get_indexer_at_least_n_items(self):
cases = [
((20,), (slice(10),), (slice(-10, None),)),
((3, 20,), (0, slice(10)), (-1, slice(-10, None))),
((2, 10,), (0, slice(10)), (-1, slice(-10, None))),
((2, 5,), (slice(2), slice(None)),
(slice(-2, None), slice(None))),
((1, 2, 5,), (0, slice(2), slice(None)),
(-1, slice(-2, None), slice(None))),
((2, 3, 5,), (0, slice(2), slice(None)),
(-1, slice(-2, None), slice(None))),
((1, 10, 1,), (0, slice(10), slice(None)),
(-1, slice(-10, None), slice(None))),
((2, 5, 1,), (slice(2), slice(None), slice(None)),
(slice(-2, None), slice(None), slice(None))),
((2, 5, 3,), (0, slice(4), slice(None)),
(-1, slice(-4, None), slice(None))),
((2, 3, 3,), (slice(2), slice(None), slice(None)),
(slice(-2, None), slice(None), slice(None))),
]
for shape, start_expected, end_expected in cases:
actual = formatting._get_indexer_at_least_n_items(shape, 10,
from_end=False)
assert start_expected == actual
actual = formatting._get_indexer_at_least_n_items(shape, 10,
from_end=True)
assert end_expected == actual
def test_first_n_items(self):
array = np.arange(100).reshape(10, 5, 2)
for n in [3, 10, 13, 100, 200]:
actual = formatting.first_n_items(array, n)
expected = array.flat[:n]
assert (expected == actual).all()
with raises_regex(ValueError, 'at least one item'):
formatting.first_n_items(array, 0)
def test_last_n_items(self):
array = np.arange(100).reshape(10, 5, 2)
for n in [3, 10, 13, 100, 200]:
actual = formatting.last_n_items(array, n)
expected = array.flat[-n:]
assert (expected == actual).all()
with raises_regex(ValueError, 'at least one item'):
formatting.first_n_items(array, 0)
def test_last_item(self):
array = np.arange(100)
reshape = ((10, 10), (1, 100), (2, 2, 5, 5))
expected = np.array([99])
for r in reshape:
result = formatting.last_item(array.reshape(r))
assert result == expected
def test_format_item(self):
cases = [
(pd.Timestamp('2000-01-01T12'), '2000-01-01T12:00:00'),
(pd.Timestamp('2000-01-01'), '2000-01-01'),
(pd.Timestamp('NaT'), 'NaT'),
(pd.Timedelta('10 days 1 hour'), '10 days 01:00:00'),
(pd.Timedelta('-3 days'), '-3 days +00:00:00'),
(pd.Timedelta('3 hours'), '0 days 03:00:00'),
(pd.Timedelta('NaT'), 'NaT'),
('foo', "'foo'"),
(b'foo', "b'foo'"),
(1, '1'),
(1.0, '1.0'),
]
for item, expected in cases:
actual = formatting.format_item(item)
assert expected == actual
def test_format_items(self):
cases = [
(np.arange(4) * np.timedelta64(1, 'D'),
'0 days 1 days 2 days 3 days'),
(np.arange(4) * np.timedelta64(3, 'h'),
'00:00:00 03:00:00 06:00:00 09:00:00'),
(np.arange(4) * np.timedelta64(500, 'ms'),
'00:00:00 00:00:00.500000 00:00:01 00:00:01.500000'),
(pd.to_timedelta(['NaT', '0s', '1s', 'NaT']),
'NaT 00:00:00 00:00:01 NaT'),
(pd.to_timedelta(['1 day 1 hour', '1 day', '0 hours']),
'1 days 01:00:00 1 days 00:00:00 0 days 00:00:00'),
([1, 2, 3], '1 2 3'),
]
for item, expected in cases:
actual = ' '.join(formatting.format_items(item))
assert expected == actual
def test_format_array_flat(self):
actual = formatting.format_array_flat(np.arange(100), 2)
expected = '0 ... 99'
assert expected == actual
actual = formatting.format_array_flat(np.arange(100), 9)
expected = '0 ... 99'
assert expected == actual
actual = formatting.format_array_flat(np.arange(100), 10)
expected = '0 1 ... 99'
assert expected == actual
actual = formatting.format_array_flat(np.arange(100), 13)
expected = '0 1 ... 98 99'
assert expected == actual
actual = formatting.format_array_flat(np.arange(100), 15)
expected = '0 1 2 ... 98 99'
assert expected == actual
actual = formatting.format_array_flat(np.arange(100.0), 11)
expected = '0.0 ... 99.0'
assert expected == actual
actual = formatting.format_array_flat(np.arange(100.0), 1)
expected = '0.0 ... 99.0'
assert expected == actual
actual = formatting.format_array_flat(np.arange(3), 5)
expected = '0 1 2'
assert expected == actual
actual = formatting.format_array_flat(np.arange(4.0), 11)
expected = '0.0 ... 3.0'
assert expected == actual
actual = formatting.format_array_flat(np.arange(0), 0)
expected = ''
assert expected == actual
actual = formatting.format_array_flat(np.arange(1), 0)
expected = '0'
assert expected == actual
actual = formatting.format_array_flat(np.arange(2), 0)
expected = '0 1'
assert expected == actual
actual = formatting.format_array_flat(np.arange(4), 0)
expected = '0 ... 3'
assert expected == actual
def test_pretty_print(self):
assert formatting.pretty_print('abcdefghij', 8) == 'abcde...'
assert formatting.pretty_print('ß', 1) == 'ß'
def test_maybe_truncate(self):
assert formatting.maybe_truncate('ß', 10) == 'ß'
def test_format_timestamp_out_of_bounds(self):
from datetime import datetime
date = datetime(1300, 12, 1)
expected = '1300-12-01'
result = formatting.format_timestamp(date)
assert result == expected
date = datetime(2300, 12, 1)
expected = '2300-12-01'
result = formatting.format_timestamp(date)
assert result == expected
def test_attribute_repr(self):
short = formatting.summarize_attr('key', 'Short string')
long = formatting.summarize_attr('key', 100 * 'Very long string ')
newlines = formatting.summarize_attr('key', '\n\n\n')
tabs = formatting.summarize_attr('key', '\t\t\t')
assert short == ' key: Short string'
assert len(long) <= 80
assert long.endswith('...')
assert '\n' not in newlines
assert '\t' not in tabs
def test_diff_array_repr(self):
da_a = xr.DataArray(
np.array([[1, 2, 3], [4, 5, 6]], dtype='int64'),
dims=('x', 'y'),
coords={'x': np.array(['a', 'b'], dtype='U1'),
'y': np.array([1, 2, 3], dtype='int64')},
attrs={'units': 'm', 'description': 'desc'})
da_b = xr.DataArray(
np.array([1, 2], dtype='int64'),
dims='x',
coords={'x': np.array(['a', 'c'], dtype='U1'),
'label': ('x', np.array([1, 2], dtype='int64'))},
attrs={'units': 'kg'})
expected = dedent("""\
Left and right DataArray objects are not identical
Differing dimensions:
(x: 2, y: 3) != (x: 2)
Differing values:
L
array([[1, 2, 3],
[4, 5, 6]], dtype=int64)
R
array([1, 2], dtype=int64)
Differing coordinates:
L * x (x) <U1 'a' 'b'
R * x (x) <U1 'a' 'c'
Coordinates only on the left object:
* y (y) int64 1 2 3
Coordinates only on the right object:
label (x) int64 1 2
Differing attributes:
L units: m
R units: kg
Attributes only on the left object:
description: desc""")
actual = formatting.diff_array_repr(da_a, da_b, 'identical')
try:
assert actual == expected
except AssertionError:
# depending on platform, dtype may not be shown in numpy array repr
assert actual == expected.replace(", dtype=int64", "")
va = xr.Variable('x', np.array([1, 2, 3], dtype='int64'),
{'title': 'test Variable'})
vb = xr.Variable(('x', 'y'),
np.array([[1, 2, 3], [4, 5, 6]], dtype='int64'))
expected = dedent("""\
Left and right Variable objects are not equal
Differing dimensions:
(x: 3) != (x: 2, y: 3)
Differing values:
L
array([1, 2, 3], dtype=int64)
R
array([[1, 2, 3],
[4, 5, 6]], dtype=int64)""")
actual = formatting.diff_array_repr(va, vb, 'equals')
try:
assert actual == expected
except AssertionError:
assert actual == expected.replace(", dtype=int64", "")
def test_diff_dataset_repr(self):
ds_a = xr.Dataset(
data_vars={
'var1': (('x', 'y'),
np.array([[1, 2, 3], [4, 5, 6]], dtype='int64')),
'var2': ('x', np.array([3, 4], dtype='int64'))
},
coords={'x': np.array(['a', 'b'], dtype='U1'),
'y': np.array([1, 2, 3], dtype='int64')},
attrs={'units': 'm', 'description': 'desc'}
)
ds_b = xr.Dataset(
data_vars={'var1': ('x', np.array([1, 2], dtype='int64'))},
coords={
'x': ('x', np.array(['a', 'c'], dtype='U1'), {'source': 0}),
'label': ('x', np.array([1, 2], dtype='int64'))
},
attrs={'units': 'kg'}
)
expected = dedent("""\
Left and right Dataset objects are not identical
Differing dimensions:
(x: 2, y: 3) != (x: 2)
Differing coordinates:
L * x (x) <U1 'a' 'b'
R * x (x) <U1 'a' 'c'
source: 0
Coordinates only on the left object:
* y (y) int64 1 2 3
Coordinates only on the right object:
label (x) int64 1 2
Differing data variables:
L var1 (x, y) int64 1 2 3 4 5 6
R var1 (x) int64 1 2
Data variables only on the left object:
var2 (x) int64 3 4
Differing attributes:
L units: m
R units: kg
Attributes only on the left object:
description: desc""")
actual = formatting.diff_dataset_repr(ds_a, ds_b, 'identical')
assert actual == expected
def test_array_repr(self):
ds = xr.Dataset(coords={'foo': [1, 2, 3], 'bar': [1, 2, 3]})
ds[(1, 2)] = xr.DataArray([0], dims='test')
actual = formatting.array_repr(ds[(1, 2)])
expected = dedent("""\
<xarray.DataArray (1, 2) (test: 1)>
array([0])
Dimensions without coordinates: test""")
assert actual == expected
def test_set_numpy_options():
original_options = np.get_printoptions()
with formatting.set_numpy_options(threshold=10):
assert len(repr(np.arange(500))) < 200
# original options are restored
assert np.get_printoptions() == original_options
def test_short_array_repr():
cases = [
np.random.randn(500),
|
np.random.randn(20, 20)
|
numpy.random.randn
|
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# Copyright (c) 2017 Image Processing Research Group of University Federico II of Naples ('GRIP-UNINA').
# This software is delivered with Government Purpose Rights (GPR) under agreement number FA8750-16-2-0204.
#
# By downloading and/or using any of these files, you implicitly agree to all the
# terms of the license, as specified in the document LICENSE.txt
# (included in this package)
#
import numpy as np
from scipy.linalg import eigvalsh
from numpy.linalg import cholesky
from numpy.linalg import eigh
from numba import jit
import torch
class gm:
prioriProb = 0
outliersProb = 0
outliersNlogl = 0
mu = 0
listSigma = []
listSigmaInds = []
listSigmaType = []
# sigmaType = 0 # isotropic covariance
# sigmaType = 1 # diagonal covariance
# sigmaType = 2 # full covariance
# outliersProb < 0 # outliers are not managed
# outliersProb >= 0 # outliers are managed throught fixed nlogl (negative log likelihood)
# TODO: outliers managed throught fixed probability
def __init__(self, dim, listSigmaInds, listSigmaType, outliersProb = -1, outliersNlogl = 0, dtype = np.float32):
K = len(listSigmaInds)
S = len(listSigmaType)
self.listSigmaInds = listSigmaInds
self.listSigmaType = listSigmaType
self.outliersProb = outliersProb
self.outliersNlogl = outliersNlogl
self.prioriProb = (1.0-self.outliersProb) * np.ones((K, 1), dtype=dtype) / K
self.mu = np.zeros((K, dim), dtype=dtype)
self.listSigma = [None, ] * S
for s in range(S):
sigmaType = self.listSigmaType[s]
if sigmaType == 2: # full covariance
self.listSigma[s] = np.ones([dim, dim], dtype = dtype)
elif sigmaType == 1: # diagonal covariance
self.listSigma[s] = np.ones([1, dim], dtype = dtype)
else:
self.listSigma[s] = np.ones([], dtype = dtype)
def setRandomParams(self, X, regularizer = 0, randomState = np.random.get_state()):
[N, dim] = X.shape
K = len(self.listSigmaInds)
S = len(self.listSigmaType)
dtype = X.dtype
if self.outliersProb > 0:
self.prioriProb = (1.0-self.outliersProb) * np.ones((K, 1), dtype=dtype) / K
else:
self.prioriProb = np.ones((K, 1), dtype=dtype) / K
inds = randomState.random_integers(low=0,high=(N-1),size=(K,))
self.mu = X[inds, :]
varX = np.var(X, axis=0, keepdims=True)
if regularizer>0:
varX = varX + regularizer
elif regularizer<0:
varX = varX + np.abs(regularizer*np.spacing(np.max(varX)))
for s in range(S):
sigmaType = self.listSigmaType[s]
if sigmaType == 2: # full covariance
self.listSigma[s] = np.diag(varX.flatten())
elif sigmaType == 1: # diagonal covariance
self.listSigma[s] = varX
else:
self.listSigma[s] = np.mean(varX)
return inds
def setRandomParamsW(self, X, weights, regularizer = 0, randomState = np.random.get_state(), meanFlag = False):
[N, dim] = X.shape
K = len(self.listSigmaInds)
S = len(self.listSigmaType)
dtype = X.dtype
if self.outliersProb > 0:
self.prioriProb = (1.0-self.outliersProb) * np.ones((K, 1), dtype=dtype) / K
else:
self.prioriProb = np.ones((K, 1), dtype=dtype) / K
avrX = np.mean(X*weights, axis=0, keepdims=True)/np.mean(weights)
varX = np.mean(weights *((X - avrX) ** 2), axis=0, keepdims=True)/np.mean(weights)
indsW = np.sum(weights)*randomState.random_sample(size=(K,))
inds = [None, ] * K
weights = np.cumsum(weights.flatten())
for index in range(K):
inds[index] = np.count_nonzero(weights<=indsW[index])
self.mu = X[inds, :]
if meanFlag: self.mu[0,:] = avrX
#varX = np.var(X, axis=0, keepdims=True)
if regularizer>0:
varX = varX + regularizer
elif regularizer<0:
varX = varX + np.abs(regularizer*np.spacing(np.max(varX)))
for s in range(S):
sigmaType = self.listSigmaType[s]
if sigmaType == 2: # full covariance
self.listSigma[s] = np.diag(varX.flatten())
elif sigmaType == 1: # diagonal covariance
self.listSigma[s] = varX
else:
self.listSigma[s] = np.mean(varX)
return inds
def getNlogl(self, X):
[N, dim] = X.shape
K = len(self.listSigmaInds)
S = len(self.listSigmaType)
dtype = X.dtype
K0 = K
if self.outliersProb >= 0: K0 = K+1
nlogl = np.zeros([N, K0], dtype = dtype)
mahal = np.zeros([N, K ], dtype = dtype)
listLogDet = [None, ] * S
listLowMtx = [None, ] * S
for s in range(S):
sigmaType = self.listSigmaType[s]
sigma = self.listSigma[s]
if sigmaType == 2: # full covariance
try:
listLowMtx[s] = cholesky(sigma)
except:
# exceptional regularization
sigma_w, sigma_v = eigh(np.real(sigma))
sigma_w = np.maximum(sigma_w, np.spacing(np.max(sigma_w)))
sigma = np.matmul(np.matmul(sigma_v, np.diag(sigma_w)), (np.transpose(sigma_v,[1,0])))
try:
listLowMtx[s] = cholesky(sigma)
except:
sigma_w, sigma_v = eigh(np.real(sigma))
sigma_w = np.maximum(sigma_w, np.spacing(np.max(sigma_w)))
#print(np.min(sigma_w))
sigma = np.matmul(np.matmul(sigma_v,
|
np.diag(sigma_w)
|
numpy.diag
|
"""Equidistant points on a sphere.
Fibbonachi Spiral:
https://bduvenhage.me/geometry/2019/07/31/generating-equidistant-vectors.html
Fekete points:
https://arxiv.org/pdf/0808.1202.pdf
Geodesic grid: (sec. 3.2)
https://arxiv.org/pdf/1711.05618.pdf
Review on geodesic grids:
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.113.997&rep=rep1&type=pdf
"""
# %%
import math
import numpy as np
import matplotlib.pyplot as plt
import warnings
from scipy.spatial.transform import Rotation as Rot
import climnet.utils.fekete as fk
import climnet.plots as cplt
from importlib import reload
RADIUS_EARTH = 6371 # radius of earth in km
class BaseGrid():
"""Base Grid
Parameters:
-----------
"""
def __init__(self):
self.grid = None
return
def get_distance_equator(self):
"""Return distance between points at the equator."""
print("Function should be overwritten by subclasses!")
return None
def create_grid(self):
"""Create grid."""
print("Function should be overwritten by subclasses!")
return None
def cut_grid(self, lat_range, lon_range):
"""Cut the grid in lat and lon range.
TODO: allow taking regions around the date line
Args:
-----
lat_range: list
[min_lon, max_lon]
lon_range: list
[min_lon, max_lon]
"""
if lon_range[0] > lon_range[1]:
raise ValueError(
"Ranges around the date line are not yet defined.")
else:
print(f"Cut grid in range lat: {lat_range} and lon: {lon_range}")
idx = np.where((self.grid['lat'] >= lat_range[0])
& (self.grid['lat'] <= lat_range[1])
& (self.grid['lon'] >= lon_range[0])
& (self.grid['lon'] <= lon_range[1]))[0]
cutted_grid = {'lat': self.grid['lat'][idx],
'lon': self.grid['lon'][idx]}
return cutted_grid
class GaussianGrid(BaseGrid):
"""Gaussian Grid of the earth which is the classical grid type.
Args:
----
grid_step_lon: float
Grid step in longitudinal direction in degree
grid_step_lat: float
Grid step in longitudinal direction in degree
"""
def __init__(self, grid_step_lon, grid_step_lat, grid=None):
self.grid_step_lon = grid_step_lon
self.grid_step_lat = grid_step_lat
self.grid = grid
self.create_grid()
def create_grid(self):
init_lat = np.arange(-89.5, 90.5, self.grid_step_lat)
init_lon = np.arange(-179.5, 180.5, self.grid_step_lon)
lon_mesh, lat_mesh = np.meshgrid(init_lon, init_lat)
self.grid = {'lat': lat_mesh.flatten(), 'lon': lon_mesh.flatten()}
return self.grid
def get_distance_equator(self):
"""Return distance between points at the equator."""
d_lon = degree2distance_equator(self.grid_step_lon, radius=6371)
return d_lon
class FibonacciGrid(BaseGrid):
"""Fibonacci sphere creates a equidistance grid on a sphere.
Parameters:
-----------
distance_between_points: float
Distance between the equidistance grid points in km.
grid: dict
If grid is already computed, e.g. {'lon': [], 'lat': []}. Default: None
"""
def __init__(self, distance_between_points, grid=None):
self.distance = distance_between_points
self.num_points = self.get_num_points()
self.grid = grid
self.create_grid()
def create_grid(self,
):
"""Create Fibonacci grid."""
print(f'Create fibonacci grid with {self.num_points} points.')
cartesian_grid = self.fibonacci_sphere(self.num_points)
lon, lat = cartesian2spherical(cartesian_grid[:, 0],
cartesian_grid[:, 1],
cartesian_grid[:, 2])
# lon, lat = cut_lon_lat(
# lon, lat, lon_range=lon_range, lat_range=lat_range)
self.grid = {'lat': lat, 'lon': lon}
self.X = cartesian_grid
return self.grid
def fibonacci_sphere(self, num_points=1):
"""Creates the fibonacci sphere points on a unit sphere.
Code inspired by:
https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere
"""
points = []
phi = math.pi * (3. - math.sqrt(5.)) # golden angle in radians
for i in range(num_points):
y = 1 - (i / float(num_points - 1)) * 2 # y goes from 1 to -1
radius = math.sqrt(1 - y * y) # radius at y
theta = phi * i # golden angle increment
x = math.cos(theta) * radius
z = math.sin(theta) * radius
points.append([x, y, z])
return np.array(points)
def get_num_points(self):
"""Relationship between distance and num of points of fibonacci sphere.
num_points = a*distance**k
"""
# obtained by log-log fit
k = -2.01155176
a = np.exp(20.0165958)
return int(a * self.distance**k)
def fit_numPoints_distance(self):
"""Fit functional relationship between next-nearest-neighbor distance of
fibonacci points and number of points."""
num_points = np.linspace(200, 10000, 20, dtype=int)
main_distance = []
for n in num_points:
points = self.fibonacci_sphere(n)
lon, lat = cartesian2spherical(
points[:, 0], points[:, 1], points[:, 2])
distance = neighbor_distance(lon, lat)
hist, bin_edge = np.histogram(distance.flatten(), bins=100)
main_distance.append(bin_edge[np.argmax(hist)])
# fit function
logx = np.log(main_distance)
logy = np.log(num_points)
coeffs = np.polyfit(logx, logy, deg=1)
def func(x):
return np.exp(coeffs[1]) * x**(coeffs[0])
dist_array = np.linspace(100, 1400, 20)
y_fit = func(dist_array)
# Plot fit and data
fig, ax = plt.subplots()
ax.plot(main_distance, num_points)
ax.plot(dist_array, y_fit)
ax.set_xscale('linear')
ax.set_yscale('linear')
return coeffs
def get_distance_equator(self):
"""Return distance between points at the equator."""
return self.distance
class FeketeGrid(BaseGrid):
"""Fibonacci sphere creates a equidistance grid on a sphere.
Parameters:
-----------
distance_between_points: float
Distance between the equidistance grid points in km.
grid: dict (or 'old' makes old version of fib grid, 'maxmin' to maximize min. min-distance)
If grid is already computed, e.g. {'lon': [], 'lat': []}. Default: None
"""
def __init__(self, num_points, num_iter=1000,
grid=None,
pre_proccess_type=None):
self.distance = get_distance_from_num_points(num_points)
self.num_points = num_points
self.num_iter = num_iter
self.epsilon = None
self.grid = grid
self.reduced_grid = None
self.create_grid(num_points=self.num_points,
num_iter=self.num_iter,
pre_proccess_type=pre_proccess_type)
def create_grid(self, num_points=None,
num_iter=1000,
pre_proccess_type=None):
if num_points is None:
num_points = self.num_points
print(
f'\nCreate Fekete grid with {num_points} points with {num_iter} iterations.')
# This runs the Fekete Algorithm
X_pre = None
if pre_proccess_type is not None:
X_pre = self.get_preprocessed_grid(grid_type=pre_proccess_type)
self.X, self.dq = fk.bendito(N=num_points,
maxiter=num_iter,
X=X_pre)
print('... Finished', flush=True)
lon, lat = cartesian2spherical(x=self.X[:, 0],
y=self.X[:, 1],
z=self.X[:, 2])
self.grid = {'lon': lon, 'lat': lat}
return self.grid
def get_preprocessed_grid(self, grid_type='fibonacci'):
print(f'Start preprocessed grid {grid_type}...',
flush=True)
if grid_type == 'gaussian':
Grid = GaussianGrid(self.grid_step, self.grid_step)
elif grid_type == 'fibonacci':
Grid = FibonacciGrid(self.distance)
else:
raise ValueError(f'Grid type {grid_type} does not exist.')
x, y, z = spherical2cartesian(lon=Grid.grid['lon'],
lat=Grid.grid['lat'])
grid_points = np.array([x, y, z]).T
return grid_points
def nudge_grid(self, n_iter=1, step=0.01): # a 100th of a grid_step
if self.reduced_grid is None:
raise KeyError('First call keep_original_points')
leng = len(self.grid['lon'])
delta = 2 * np.pi * step * self.distance / 6371
regx, regy, regz = spherical2cartesian(
self.reduced_grid['lon'], self.reduced_grid['lat'])
for iter in range(n_iter):
perm =
|
np.random.permutation(leng)
|
numpy.random.permutation
|
###########################################################################
# #
# physical_validation, #
# a python package to test the physical validity of MD results #
# #
# Written by <NAME> <<EMAIL>> #
# <NAME> <<EMAIL>> #
# #
# Copyright (c) 2017-2021 University of Colorado Boulder #
# (c) 2012 The University of Virginia #
# #
###########################################################################
import warnings
from typing import Iterator, Optional, Tuple
import numpy as np
from pymbar import timeseries
from scipy import stats
from . import error as pv_error
def equilibrate(traj: np.ndarray) -> np.ndarray:
traj = np.array(traj)
if traj.ndim == 1:
t0, g, n_eff = timeseries.detectEquilibration(traj)
if t0 == 0 and traj.size > 10:
# See https://github.com/choderalab/pymbar/issues/277
t0x, gx, n_effx = timeseries.detectEquilibration(traj[10:])
if t0x != 0:
t0 = t0x + 10
res = traj[t0:]
elif traj.ndim == 2 and traj.shape[0] == 2:
t01, g1, n_eff1 = timeseries.detectEquilibration(traj[0])
t02, g2, n_eff2 = timeseries.detectEquilibration(traj[1])
t0 = max(t01, t02)
if t0 == 0 and traj.shape[1] > 10:
# See https://github.com/choderalab/pymbar/issues/277
t01x, g1x, n_eff1x = timeseries.detectEquilibration(traj[0, 10:])
t02x, g2x, n_eff2x = timeseries.detectEquilibration(traj[1, 10:])
t0x = max(t01x, t02x)
if t0x != 0:
t0 = t0x + 10
res = traj[:, t0:]
elif traj.ndim == 2:
raise NotImplementedError(
"trajectory.equilibrate() in 2 dimensions is only "
"implemented for exactly two timeseries."
)
else:
raise NotImplementedError(
"trajectory.equilibrate() is not implemented for "
"trajectories with more than 2 dimensions."
)
return res
def decorrelate(traj: np.ndarray) -> np.ndarray:
traj = np.array(traj)
if traj.ndim == 1:
idx = timeseries.subsampleCorrelatedData(traj)
res = traj[idx]
elif traj.ndim == 2:
# pymbar doesn't offer to decorrelate two samples, so let's do it ourselves
# and just use the decorrelation of the sample more strongly correlated
#
# calculate (maximal) inefficiency
g1 = timeseries.statisticalInefficiency(traj[0])
g2 = timeseries.statisticalInefficiency(traj[1])
g = np.max([g1, g2])
# calculate index
n0 = traj.shape[1]
idx = np.unique(
np.array(np.round(np.arange(0, int(n0 / g + 0.5)) * g), dtype=int)
)
idx = idx[idx < n0]
res = traj[:, idx]
else:
raise NotImplementedError(
"trajectory.decorrelate() is not implemented for "
"trajectories with more than 1 dimension."
)
return res
def cut_tails(traj: np.ndarray, cut: float) -> np.ndarray:
traj = np.array(traj)
dc = 100 * cut
if traj.ndim == 1:
with warnings.catch_warnings():
# With some combination of python version / scipy version,
# scoreatpercentile throws a warning
warnings.filterwarnings("ignore", category=FutureWarning)
tmax = stats.scoreatpercentile(traj, 100 - dc)
tmin = stats.scoreatpercentile(traj, dc)
t = traj[(tmin <= traj) * (traj <= tmax)]
elif traj.ndim == 2:
with warnings.catch_warnings():
# With some combination of python version / scipy version,
# scoreatpercentile throws a warning
warnings.filterwarnings("ignore", category=FutureWarning)
tmax = stats.scoreatpercentile(traj, 100 - dc, axis=1)
tmin = stats.scoreatpercentile(traj, dc, axis=1)
t = traj[
:,
(tmin[0] <= traj[0])
* (tmin[1] <= traj[1])
* (tmax[0] >= traj[0])
* (tmax[1] >= traj[1]),
]
else:
raise NotImplementedError(
"trajectory.cut_tails() is not implemented for "
"trajectories with more than 2 dimension."
)
return t
def prepare(
traj: np.ndarray,
cut: Optional[float] = None,
verbosity: int = 1,
name: Optional[str] = None,
skip_preparation: bool = False,
) -> np.ndarray:
traj = np.array(traj)
if not name:
name = "Traectory"
def traj_length(t: np.ndarray) -> int:
if t.ndim == 1:
return t.size
else:
return t.shape[1]
if traj.ndim > 2:
raise NotImplementedError(
"trajectory.prepare() is not implemented for "
"trajectories with more than 2 dimensions."
)
if skip_preparation:
if verbosity > 0:
print(
"Equilibration, decorrelation and tail pruning was skipped on user "
"request. Note that if the provided trajectory is statistically "
"correlated, the results of the physical validation checks might "
"be invalid."
)
return traj
# original length
n0 = traj_length(traj)
# equilibrate
res = equilibrate(traj)
n1 = traj_length(res)
if verbosity > 2:
print(
"{:s} equilibration: First {:d} frames ({:.1%} of "
"trajectory) discarded for burn-in.".format(name, n0 - n1, (n0 - n1) / n0)
)
# decorrelate
res = decorrelate(res)
n2 = traj_length(res)
if verbosity > 2:
print(
"{:s} decorrelation: {:d} frames ({:.1%} of equilibrated "
"trajectory) discarded for decorrelation.".format(
name, n1 - n2, (n1 - n2) / n1
)
)
# cut tails
if cut is not None:
res = cut_tails(res, cut)
n3 = traj_length(res)
if verbosity > 2:
print(
"{:s} tails (cut = {:.2%}): {:n} frames ({:.2%} of equilibrated and "
"decorrelated trajectory) were cut".format(
name, cut, n2 - n3, (n2 - n3) / n2
)
)
# end length
nn = traj_length(res)
if verbosity > 0:
print(
"After equilibration, decorrelation and tail pruning, {:.2%} ({:n} frames) "
"of original {:s} remain.".format(nn / n0, nn, name)
)
return res
def overlap(
traj1: np.ndarray, traj2: np.ndarray, cut=None
) -> Tuple[np.ndarray, np.ndarray, Optional[float], Optional[float]]:
traj1 = np.array(traj1)
traj2 = np.array(traj2)
if traj1.ndim == traj2.ndim and traj2.ndim == 1:
if cut:
dc = 100 * cut
max1 = stats.scoreatpercentile(traj1, 100 - dc)
min1 = stats.scoreatpercentile(traj1, dc)
max2 = stats.scoreatpercentile(traj2, 100 - dc)
min2 = stats.scoreatpercentile(traj2, dc)
else:
max1 = np.max(traj1)
min1 = np.min(traj1)
max2 = np.max(traj2)
min2 =
|
np.min(traj2)
|
numpy.min
|
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import os
def get_data_from_dir(path, size=[100, 100]):
imgs = []
labels = []
labels_list = []
files = os.listdir(path)
for i, file in enumerate(files):
img_names = os.listdir(os.path.join(path, file))
labels.append(np.ones(len(img_names)) * i)
labels_list.append(file)
for img_name in img_names:
with Image.open(os.path.join(path, file, img_name)) as img:
imgs.append(np.asarray(img.resize(size)))
imgs = np.asarray(imgs)
print('Found {} images belonging to {} different classes'.format(imgs.shape[0], len(labels_list)))
return imgs, np.hstack(labels), labels_list
def train_test_split(X, y, val_split=0.2):
index = int(X.shape[0] * (1 - val_split))
return X[:index], y[:index], X[index:], y[index:]
def shuffle(X, y):
indices = np.arange(X.shape[0])
|
np.random.shuffle(indices)
|
numpy.random.shuffle
|
#!/usr/bin/env python
'''
mcu: Modeling and Crystallographic Utilities
Copyright (C) 2019 <NAME>. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Email: <NAME> <<EMAIL>>
'''
# This is the only place needed to be modified
# The path for the libwannier90 library
W90LIB = '/panfs/roc/groups/6/gagliard/phamx494/pyWannier90/src'
import sys
sys.path.append(W90LIB)
import importlib
found = importlib.util.find_spec('libwannier90') is not None
if found == True:
import libwannier90
else:
print('WARNING: Check the installation of libwannier90 and its path in pyscf/pbc/tools/pywannier90.py')
print('libwannier90 path: ' + W90LIB)
print('libwannier90 can be found at: https://github.com/hungpham2017/pyWannier90')
raise ImportError
import numpy as np
import scipy
import mcu
from mcu.vasp import const
from mcu.cell import utils as cell_utils
def angle(v1, v2):
'''
Return the angle (in radiant between v1 and v2)
'''
v1 = np.asarray(v1)
v2 = np.asarray(v2)
cosa = v1.dot(v2)/ np.linalg.norm(v1) / np.linalg.norm(v2)
return np.arccos(cosa)
def transform(x_vec, z_vec):
'''
Construct a transformation matrix to transform r_vec to the new coordinate system defined by x_vec and z_vec
'''
x_vec = x_vec/np.linalg.norm(np.asarray(x_vec))
z_vec = z_vec/np.linalg.norm(np.asarray(z_vec))
assert x_vec.dot(z_vec) == 0 # x and z have to be orthogonal to one another
y_vec = np.cross(x_vec,z_vec)
new = np.asarray([x_vec, y_vec, z_vec])
original = np.asarray([[1,0,0],[0,1,0],[0,0,1]])
tran_matrix = np.empty([3,3])
for row in range(3):
for col in range(3):
tran_matrix[row,col] = np.cos(angle(original[row],new[col]))
return tran_matrix.T
def cartesian_prod(arrays, out=None, order = 'C'):
'''
This function is similar to lib.cartesian_prod of PySCF, except the output can be in Fortran or in C order
'''
arrays = [np.asarray(x) for x in arrays]
dtype = np.result_type(*arrays)
nd = len(arrays)
dims = [nd] + [len(x) for x in arrays]
if out is None:
out = np.empty(dims, dtype)
else:
out = np.ndarray(dims, dtype, buffer=out)
tout = out.reshape(dims)
shape = [-1] + [1] * nd
for i, arr in enumerate(arrays):
tout[i] = arr.reshape(shape[:nd-i])
return tout.reshape((nd,-1),order=order).T
def periodic_grid(lattice, grid = [50,50,50], supercell = [1,1,1], order = 'C'):
'''
Generate a periodic grid for the unit/computational cell in F/C order
Note: coords has the same unit as lattice
'''
ngrid = np.asarray(grid)
qv = cartesian_prod([np.arange(-ngrid[i]*(supercell[i]//2),ngrid[i]*((supercell[i]+1)//2)) for i in range(3)], order=order)
a_frac = np.einsum('i,ij->ij', 1./ngrid, lattice)
coords = np.dot(qv, a_frac)
# Compute weight
ngrids = np.prod(grid)
ncells = np.prod(supercell)
weights = np.empty(ngrids*ncells)
vol = abs(np.linalg.det(lattice))
weights[:] = vol / ngrids / ncells
return coords, weights
def R_r(r_norm, r = 1, zona = 1):
'''
Radial functions used to compute \Theta_{l,m_r}(\theta,\phi)
Note: r_norm has the unit of Bohr
'''
if r == 1:
R_r = 2 * zona**(3/2) * np.exp(-zona*r_norm)
elif r == 2:
R_r = 1 / 2 / np.sqrt(2) * zona**(3/2) * (2 - zona*r_norm) * np.exp(-zona*r_norm/2)
else:
R_r = np.sqrt(4/27) * zona**(3/2) * (1 - 2*zona*r_norm/3 + 2*(zona**2)*(r_norm**2)/27) * np.exp(-zona*r_norm/3)
return R_r
def theta(func, cost, phi):
'''
Basic angular functions (s,p,d,f) used to compute \Theta_{l,m_r}(\theta,\phi)
'''
if func == 's': # s
theta = 1 / np.sqrt(4 * np.pi) * np.ones([cost.shape[0]])
elif func == 'pz':
theta = np.sqrt(3 / 4 / np.pi) * cost
elif func == 'px':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(3 / 4 / np.pi) * sint * np.cos(phi)
elif func == 'py':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(3 / 4 / np.pi) * sint * np.sin(phi)
elif func == 'dz2':
theta = np.sqrt(5 / 16 / np.pi) * (3*cost**2 - 1)
elif func == 'dxz':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(15 / 4 / np.pi) * sint * cost * np.cos(phi)
elif func == 'dyz':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(15 / 4 / np.pi) * sint * cost * np.sin(phi)
elif func == 'dx2-y2':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(15 / 16 / np.pi) * (sint**2) * np.cos(2*phi)
elif func == 'pxy':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(15 / 16 / np.pi) * (sint**2) * np.sin(2*phi)
elif func == 'fz3':
theta = np.sqrt(7) / 4 / np.sqrt(np.pi) * (5*cost**3 - 3*cost)
elif func == 'fxz2':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(21) / 4 / np.sqrt(2*np.pi) * (5*cost**2 - 1) * sint * np.cos(phi)
elif func == 'fyz2':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(21) / 4 / np.sqrt(2*np.pi) * (5*cost**2 - 1) * sint * np.sin(phi)
elif func == 'fz(x2-y2)':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(105) / 4 / np.sqrt(np.pi) * sint**2 * cost * np.cos(2*phi)
elif func == 'fxyz':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(105) / 4 / np.sqrt(np.pi) * sint**2 * cost * np.sin(2*phi)
elif func == 'fx(x2-3y2)':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(35) / 4 / np.sqrt(2*np.pi) * sint**3 * (np.cos(phi)**2 - 3*np.sin(phi)**2) * np.cos(phi)
elif func == 'fy(3x2-y2)':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(35) / 4 / np.sqrt(2*np.pi) * sint**3 * (3*np.cos(phi)**2 - np.sin(phi)**2) * np.sin(phi)
return theta
def theta_lmr(l, mr, cost, phi):
'''
Compute the value of \Theta_{l,m_r}(\theta,\phi)
ref: Table 3.1 and 3.2 of Chapter 3, wannier90 User Guide
'''
assert l in [0,1,2,3,-1,-2,-3,-4,-5]
assert mr in [1,2,3,4,5,6,7]
if l == 0: # s
theta_lmr = theta('s', cost, phi)
elif (l == 1) and (mr == 1): # pz
theta_lmr = theta('pz', cost, phi)
elif (l == 1) and (mr == 2): # px
theta_lmr = theta('px', cost, phi)
elif (l == 1) and (mr == 3): # py
theta_lmr = theta('py', cost, phi)
elif (l == 2) and (mr == 1): # dz2
theta_lmr = theta('dz2', cost, phi)
elif (l == 2) and (mr == 2): # dxz
theta_lmr = theta('dxz', cost, phi)
elif (l == 2) and (mr == 3): # dyz
theta_lmr = theta('dyz', cost, phi)
elif (l == 2) and (mr == 4): # dx2-y2
theta_lmr = theta('dx2-y2', cost, phi)
elif (l == 2) and (mr == 5): # pxy
theta_lmr = theta('pxy', cost, phi)
elif (l == 3) and (mr == 1): # fz3
theta_lmr = theta('fz3', cost, phi)
elif (l == 3) and (mr == 2): # fxz2
theta_lmr = theta('fxz2', cost, phi)
elif (l == 3) and (mr == 3): # fyz2
theta_lmr = theta('fyz2', cost, phi)
elif (l == 3) and (mr == 4): # fz(x2-y2)
theta_lmr = theta('fz(x2-y2)', cost, phi)
elif (l == 3) and (mr == 5): # fxyz
theta_lmr = theta('fxyz', cost, phi)
elif (l == 3) and (mr == 6): # fx(x2-3y2)
theta_lmr = theta('fx(x2-3y2)', cost, phi)
elif (l == 3) and (mr == 7): # fy(3x2-y2)
theta_lmr = theta('fy(3x2-y2)', cost, phi)
elif (l == -1) and (mr == 1): # sp-1
theta_lmr = 1/np.sqrt(2) * (theta('s', cost, phi) + theta('px', cost, phi))
elif (l == -1) and (mr == 2): # sp-2
theta_lmr = 1/np.sqrt(2) * (theta('s', cost, phi) - theta('px', cost, phi))
elif (l == -2) and (mr == 1): # sp2-1
theta_lmr = 1/np.sqrt(3) * theta('s', cost, phi) - 1/np.sqrt(6) *theta('px', cost, phi) + 1/np.sqrt(2) * theta('py', cost, phi)
elif (l == -2) and (mr == 2): # sp2-2
theta_lmr = 1/np.sqrt(3) * theta('s', cost, phi) - 1/np.sqrt(6) *theta('px', cost, phi) - 1/np.sqrt(2) * theta('py', cost, phi)
elif (l == -2) and (mr == 3): # sp2-3
theta_lmr = 1/np.sqrt(3) * theta('s', cost, phi) + 2/np.sqrt(6) *theta('px', cost, phi)
elif (l == -3) and (mr == 1): # sp3-1
theta_lmr = 1/2 * (theta('s', cost, phi) + theta('px', cost, phi) + theta('py', cost, phi) + theta('pz', cost, phi))
elif (l == -3) and (mr == 2): # sp3-2
theta_lmr = 1/2 * (theta('s', cost, phi) + theta('px', cost, phi) - theta('py', cost, phi) - theta('pz', cost, phi))
elif (l == -3) and (mr == 3): # sp3-3
theta_lmr = 1/2 * (theta('s', cost, phi) - theta('px', cost, phi) + theta('py', cost, phi) - theta('pz', cost, phi))
elif (l == -3) and (mr == 4): # sp3-4
theta_lmr = 1/2 * (theta('s', cost, phi) - theta('px', cost, phi) - theta('py', cost, phi) + theta('pz', cost, phi))
elif (l == -4) and (mr == 1): # sp3d-1
theta_lmr = 1/np.sqrt(3) * theta('s', cost, phi) - 1/np.sqrt(6) *theta('px', cost, phi) + 1/np.sqrt(2) * theta('py', cost, phi)
elif (l == -4) and (mr == 2): # sp3d-2
theta_lmr = 1/np.sqrt(3) * theta('s', cost, phi) - 1/np.sqrt(6) *theta('px', cost, phi) - 1/np.sqrt(2) * theta('py', cost, phi)
elif (l == -4) and (mr == 3): # sp3d-3
theta_lmr = 1/np.sqrt(3) * theta('s', cost, phi) + 2/np.sqrt(6) * theta('px', cost, phi)
elif (l == -4) and (mr == 4): # sp3d-4
theta_lmr = 1/np.sqrt(2) (theta('pz', cost, phi) + theta('dz2', cost, phi))
elif (l == -4) and (mr == 5): # sp3d-5
theta_lmr = 1/np.sqrt(2) (-theta('pz', cost, phi) + theta('dz2', cost, phi))
elif (l == -5) and (mr == 1): # sp3d2-1
theta_lmr = 1/np.sqrt(6) * theta('s', cost, phi) - 1/np.sqrt(2) *theta('px', cost, phi) - 1/np.sqrt(12) *theta('dz2', cost, phi) \
+ 1/2 *theta('dx2-y2', cost, phi)
elif (l == -5) and (mr == 2): # sp3d2-2
theta_lmr = 1/np.sqrt(6) * theta('s', cost, phi) + 1/np.sqrt(2) *theta('px', cost, phi) - 1/np.sqrt(12) *theta('dz2', cost, phi) \
+ 1/2 *theta('dx2-y2', cost, phi)
elif (l == -5) and (mr == 3): # sp3d2-3
theta_lmr = 1/np.sqrt(6) * theta('s', cost, phi) - 1/np.sqrt(2) *theta('py', cost, phi) - 1/np.sqrt(12) *theta('dz2', cost, phi) \
- 1/2 *theta('dx2-y2', cost, phi)
elif (l == -5) and (mr == 4): # sp3d2-4
theta_lmr = 1/np.sqrt(6) * theta('s', cost, phi) + 1/np.sqrt(2) *theta('py', cost, phi) - 1/np.sqrt(12) *theta('dz2', cost, phi) \
- 1/2 *theta('dx2-y2', cost, phi)
elif (l == -5) and (mr == 5): # sp3d2-5
theta_lmr = 1/np.sqrt(6) * theta('s', cost, phi) - 1/np.sqrt(2) *theta('pz', cost, phi) + 1/np.sqrt(3) *theta('dz2', cost, phi)
elif (l == -5) and (mr == 6): # sp3d2-6
theta_lmr = 1/np.sqrt(6) * theta('s', cost, phi) + 1/
|
np.sqrt(2)
|
numpy.sqrt
|
# Various utilities for SSAM feature analysis
import numpy as np
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from scipy.spatial import ConvexHull
from scipy.interpolate import interp2d
import transforms3d as tf3d
from manipulator_learning.sim.utils.general import convert_quat_tf_to_pb, trans_quat_to_mat, convert_quat_pb_to_tf
from manipulator_learning.sim.robots.cameras import EyeInHandCam
# SSAM image tools
# --------------------------------------------------------------------------------------------------------------------
def convert_raw_ssam_to_img_coords(ssam, img_shape):
""" Convert raw ssam with values in range (-1, 1), output as (x0, y0, x1, y1, ..., xN, yN)
to image coordinates for plotting over top of image.
Also assumes that (-1, -1) corresponds to top left of image (0, 0). """
ssam_x, ssam_y = (ssam[0][::2].numpy(), ssam[0][1::2].numpy())
# ssam_x_img = (ssam_x * img_shape[1] / 2 + img_shape[1] / 2).astype(int)
ssam_x_img = (ssam_x * img_shape[1] / 2 + img_shape[1] / 2)
# ssam_y_img = (ssam_y * img_shape[0] / 2 + img_shape[0] / 2).astype(int)
ssam_y_img = (ssam_y * img_shape[0] / 2 + img_shape[0] / 2)
return ssam_x_img, ssam_y_img
def get_ssam_on_img(ssam, ssam_indices, img, img_shape, cmap, feat_size, include_labels=True, custom_cmap_inds=None):
ssam_img = convert_raw_ssam_to_img_coords(ssam, img_shape)
plt.cla()
im = plt.imshow(img)
# plt.scatter(ssam_img[0][:num_feats], ssam_img[1][:num_feats], c=np.arange(num_feats), s=5, cmap=cmap)
# plt.scatter(ssam_img[0][ssam_indices], ssam_img[1][ssam_indices], c=ssam_indices, s=feat_size, cmap=cmap)
if custom_cmap_inds is not None:
colors = [cmap(i) for i in custom_cmap_inds]
plt.scatter(ssam_img[0][ssam_indices], ssam_img[1][ssam_indices], c=colors, s=feat_size, edgecolors='k')
else:
plt.scatter(ssam_img[0][ssam_indices], ssam_img[1][ssam_indices], c=np.arange(len(ssam_indices)), s=feat_size,
cmap=cmap, edgecolors='k')
if include_labels:
labels = np.array(ssam_indices).astype(str)
for feat_i, txt in zip(ssam_indices, labels):
plt.annotate(txt, (ssam_img[0][feat_i] + .3, ssam_img[1][feat_i] + .3), fontsize=6)
return ssam_img
# SE(3)/pybullet tools
# --------------------------------------------------------------------------------------------------------------------
def get_link_to_pb_pose(pb_client, body_id, link_i, trans, pb_rot):
""" Get a pb T (7 tuple, 3 pos 4 xyzw quat) given a body id, link id, and a transformation """
pbc = pb_client
_, _, _, _, orig_pos, orig_rot = pbc.getLinkState(body_id, link_i)
new_pos, new_rot = pbc.multiplyTransforms(orig_pos, orig_rot, trans, pb_rot)
return (*new_pos, *new_rot)
def get_world_to_cam_T_from_view_mat(view_mat):
""" Get the world to cam T matrix from the view matrix as output by pybullet """
# view_mat from pybullet/opengl, after inversion, has z pointing inwards...180 rotation about x fixes that
world_to_cam_T = np.linalg.inv(np.array(view_mat).reshape(4, 4).T)
ogl_fix = tf3d.euler.euler2mat(np.pi, 0, 0)
world_to_cam_T[:3, :3] = world_to_cam_T[:3, :3] @ ogl_fix
return world_to_cam_T
# Matplotlib drawing tools
# --------------------------------------------------------------------------------------------------------------------
def single_ep_render_multiple_feats_in_mpl(axes_3d_obj, all_feat_pos_world, cmap, marker, plot_hull=False):
# all_feat_pos_world = get_pos_worlds_from_ep_feat_infos(pb_client, ep_feat_infos, ts_range_list,
# rob_tool_cam_pose_pb, obj_cam_pose_pb)
ax = axes_3d_obj
hulls = []
hull_feat_is = []
for feat_i, ep_feat_info in enumerate(all_feat_pos_world):
if plot_hull:
feat_on_objects_pos = ep_feat_info[ep_feat_info[:, -1] >= 0][:, :-1]
if len(feat_on_objects_pos) >= 4:
hull = ConvexHull(feat_on_objects_pos)
hulls.append(hull)
hull_feat_is.append(feat_i)
for pos_obj_id in ep_feat_info:
pos = pos_obj_id[:-1]
o_id = int(pos_obj_id[-1])
if o_id != -1:
ax.scatter(*pos, color=cmap(feat_i), marker=marker)
# add plotting of convex hulls
if plot_hull:
for hull, hull_feat_i in zip(hulls, hull_feat_is):
for tri_i in hull.simplices:
tri = Poly3DCollection(hull.points[tri_i])
tri.set_color(cmap(hull_feat_i))
# tri.set_edgecolor('k') # adds extra edges that we might not want
ax.add_collection3d(tri)
# Reprojection tools
# --------------------------------------------------------------------------------------------------------------------
def get_pos_worlds_from_ep_feat_infos(pb_client, ep_feat_infos, ts_range_list, rob_tool_cam_pose_pb, obj_cam_pose_pb):
pbc = pb_client
efi = np.array(ep_feat_infos)
all_feat_world_pos = []
for feat_loop_i in range(efi.shape[1]):
feat_world_pos = []
for pos_obj_id in efi[ts_range_list[0]:ts_range_list[1], feat_loop_i]:
# get pos in world frame based on which obj it's in frame of
pos = pos_obj_id[:-1]
o_id = int(pos_obj_id[-1])
cam_frames = [rob_tool_cam_pose_pb, obj_cam_pose_pb, [0, 0, 0, 0, 0, 0, 1]]
pos_world, _ = pbc.multiplyTransforms(
cam_frames[o_id][:3], cam_frames[o_id][3:], pos, [0, 0, 0, 1])
feat_world_pos.append([*pos_world, o_id])
all_feat_world_pos.append(feat_world_pos)
return np.array(all_feat_world_pos)
def pb_take_cam_img(pb_client, env, obj_cam_pose_pb, use_robot_cam_pose=False):
pbc = pb_client
if use_robot_cam_pose:
cur_base_pose = env.env.gripper.manipulator.get_link_pose(0)
cur_base_pose = [cur_base_pose[:3], cur_base_pose[3:]]
cur_base_pose_mat = trans_quat_to_mat(*cur_base_pose)
rgb, _ = env.env.workspace_cam.get_img(cur_base_pose_mat, width=1280, height=960)
else:
cam = EyeInHandCam(pbc, [0, 0, 0], [0, 0, 0, 1], [0, 0, 1], [0, -1, 0], 'opengl', True,
width=1280, height=960)
rgb, _ = cam.get_img(trans_quat_to_mat(obj_cam_pose_pb[:3], obj_cam_pose_pb[3:]))
return rgb
def single_ep_render_multiple_feats_in_pb(pb_client, all_feat_pos_world, obj_cam_pose_pb, cmap, radius,
make_objs_transparent=False, alpha=0.5, trans_color=0.5, env=None,
use_robot_cam_pose=False, custom_cmap_inds=None):
pbc = pb_client
mbs = []
for feat_i, ep_feat_info in enumerate(all_feat_pos_world):
for pos_obj_id in ep_feat_info:
pos = pos_obj_id[:-1]
o_id = int(pos_obj_id[-1])
if o_id != -1:
if custom_cmap_inds is not None:
mb = draw_feature_in_pb(pbc, cmap(custom_cmap_inds[feat_i]), pos, radius=radius)
else:
mb = draw_feature_in_pb(pbc, cmap(feat_i), pos, radius=radius)
mbs.append(mb)
if make_objs_transparent:
color = trans_color
# save all visual info about door and robot so we can reset it after making image
door_ids = env.env.save_body_texture_ids(env.env.door)
gripper_ids = env.env.save_body_texture_ids(env.env.gripper.body_id)
env.env.update_body_visual(env.env.gripper.body_id, color, color, color, alpha)
env.env.update_body_visual(env.env.door, color, color, color, alpha)
rgb = pb_take_cam_img(pbc, env, obj_cam_pose_pb, use_robot_cam_pose)
# remove the feature multibodies so they don't interfere with future episodes
for mb in mbs:
pbc.removeBody(mb)
# reset visual info of door and robot
env.env.update_body_visual_with_saved(env.env.door, door_ids, use_rgba=True)
env.env.update_body_visual_with_saved(env.env.gripper.body_id, gripper_ids, use_rgba=True)
else:
rgb = pb_take_cam_img(pbc, env, obj_cam_pose_pb, use_robot_cam_pose)
return rgb, mbs
def get_feat_pos_and_obj(pb_client, feat_indices, ssam_img, env_cam, true_depth, world_to_cam_T_mat, seg_mask_img,
seg_mask_ints, rob_tool_cam_pose_pb, obj_cam_pose_pb):
""" Get feature infos: relative to cam pos and obj integer id for each feature. """
pbc = pb_client
feat_infos = []
for feat_loop_i, feat in enumerate(feat_indices):
u = ssam_img[0][feat]; v = ssam_img[1][feat]
point_world = get_world_xyz_from_feature(
u=u, v=v, fov=env_cam.fov, aspect=env_cam.aspect, depth_img=true_depth,
world_to_cam_T=world_to_cam_T_mat) # this is okay since it doesn't change for the whole episode
# get object that feature point is "on"
obj_id = -1 # -1 corresponds to not on any object of interest
# avoid out of bound issues on too high u/v values
width = seg_mask_img.shape[1]; height = seg_mask_img.shape[0]
seg_mask_img_inds = [round(v), round(u)]
if seg_mask_img_inds[0] == height:
seg_mask_img_inds[0] -= 1
if seg_mask_img_inds[1] == width:
seg_mask_img_inds[1] -= 1
feat_point_obj_id = seg_mask_img[seg_mask_img_inds[0], seg_mask_img_inds[1]]
for int_list_i, int_list in enumerate(seg_mask_ints):
if feat_point_obj_id in int_list:
obj_id = int_list_i
break
# get pose of feature in "camera" frame for that object
if obj_id > -1:
cam_frames = [rob_tool_cam_pose_pb, obj_cam_pose_pb]
cam_to_world_t, cam_to_world_r = pbc.invertTransform(cam_frames[obj_id][:3], cam_frames[obj_id][3:])
feat_pos, feat_rot = pbc.multiplyTransforms(cam_to_world_t, cam_to_world_r, point_world, [0, 0, 0, 1])
else:
feat_pos = point_world
# feat_info = dict(rel_pose=feat_pos, obj_id=obj_id)
feat_info = [*feat_pos, obj_id]
feat_infos.append(feat_info)
return np.array(feat_infos)
def draw_feature_in_pb(pb_client, rgba_color, point_world, radius=.01, shape='SPHERE'):
""" Draw a feature in pb. Return the pb body object id. """
pbc = pb_client
shape_options = ['SPHERE', 'BOX', 'CAPSULE', 'CYLINDER']
assert shape in shape_options, "Shape %s is not an option out of options %s" % (shape, shape_options)
shape = getattr(pbc, 'GEOM_' + shape)
visual_shape_id = pbc.createVisualShape(shapeType=shape, rgbaColor=rgba_color, radius=radius)
collision_shape_id = -1
mb = pbc.createMultiBody(baseMass=0,
baseCollisionShapeIndex=collision_shape_id,
baseVisualShapeIndex=visual_shape_id,
basePosition=point_world,
useMaximalCoordinates=True)
return mb
def get_true_depth_and_segment_from_man_learn_env(env):
_, depth, segment = env.render('rgb_and_true_depth_and_segment_mask')
return depth, segment
def get_world_xyz_from_feature(u, v, fov, aspect, depth_img, world_to_cam_T):
"""
Get the xyz point corresponding to a 2D camera feature point, assuming that
the distance from the camera eye is given by a depth image.
The depth image from pybullet contains perpendicular distance from the plane of the camera, see
https://stackoverflow.com/questions/6652253/getting-the-true-z-value-from-the-depth-buffer
:param u: x feature location in image, (0, 0) is top left
:param v: y feature location in image, (0, 0) is top left
:param fov: field of view as given to proj mat for pybullet camera, in degrees
:param aspect: aspect ratio as given to pybullet camera for generating image (often w/h)
:param depth_img: the full depth image, with TRUE depths. width and height taken from this.
:param world_to_cam_T: the 4x4 world to cam T, can be gotten using get_world_to_cam_T_from_view_mat
:return: [x, y, z] tuple of point in world frame
"""
w = depth_img.shape[1]
h = depth_img.shape[0]
fov_rad = np.deg2rad(fov)
depth_inter = interp2d(range(w), range(h), depth_img)
d_uv = depth_inter(u, v)
# d_uv = depth_img[v, u] # first index is row which is Y value, second is col which is x value
# if depth at any corner is significantly higher or lower than others (i.e. indicating depth is on an edge),
# just take the depth at the rounded integer closest to the point, also ensuring it won't be out of bounds
max_diff = .05 # 5cm
if u < w - 1 and v < h - 1: # this ensures avoidance of out of bounds
depth_corners = depth_img[math.floor(v):math.ceil(v) + 1, math.floor(u):math.ceil(u) + 1]
diffs = depth_corners - d_uv
if np.any(diffs > max_diff):
d_uv = depth_img[round(v), round(u)]
elif u < w - 1 and v > h - 1: # v is above max
depth_corners = depth_img[h - 1, math.floor(u):math.ceil(u) + 1] # only 2
diffs = depth_corners - d_uv
if np.any(diffs > max_diff):
d_uv = depth_img[math.floor(v), round(u)]
elif u > w - 1 and v < h - 1: # u is above max
depth_corners = depth_img[math.floor(v):math.ceil(v) + 1, w - 1] # only 2
diffs = depth_corners - d_uv
if np.any(diffs > max_diff):
d_uv = depth_img[round(v), math.floor(u)]
# f_x = w / (2 * np.tan(fov_rad / 2))
# turns out that FOV is actually vertical ---
# see https://www.scratchapixel.com/lessons/3d-basic-rendering/perspective-and-orthographic-projection-matrix/opengl-perspective-projection-matrix
f_y = h / (2 * np.tan(fov_rad / 2))
# f_x = f_y * aspect
f_x = f_y
theta_x = np.arctan((u - w/2) / f_x)
theta_y =
|
np.arctan((v - h/2) / f_y)
|
numpy.arctan
|
# coding=utf-8
# Copyright 2022 The Reach ML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple block environments for the XArm."""
import collections
import enum
import math
import time
from typing import Dict, List, Optional, Tuple, Union
import gin
import gym
from gym import spaces
from gym.envs import registration
from ibc.environments.block_pushing import metrics as block_pushing_metrics
from ibc.environments.utils import utils_pybullet
from ibc.environments.utils import xarm_sim_robot
from ibc.environments.utils.pose3d import Pose3d
from ibc.environments.utils.utils_pybullet import ObjState
from ibc.environments.utils.utils_pybullet import XarmState
import numpy as np
from scipy.spatial import transform
import pybullet
import pybullet_utils.bullet_client as bullet_client
BLOCK_URDF_PATH = 'third_party/py/ibc/environments/assets/block.urdf'
PLANE_URDF_PATH = ('third_party/bullet/examples/pybullet/gym/pybullet_data/'
'plane.urdf')
WORKSPACE_URDF_PATH = 'third_party/py/ibc/environments/assets/workspace.urdf'
ZONE_URDF_PATH = 'third_party/py/ibc/environments/assets/zone.urdf'
INSERT_URDF_PATH = 'third_party/py/ibc/environments/assets/insert.urdf'
EFFECTOR_HEIGHT = 0.06
EFFECTOR_DOWN_ROTATION = transform.Rotation.from_rotvec([0, math.pi, 0])
WORKSPACE_BOUNDS = np.array(((0.15, -0.5), (0.7, 0.5)))
# Min/max bounds calculated from oracle data using:
# ibc/environments/board2d_dataset_statistics.ipynb
# to calculate [mean - 3 * std, mean + 3 * std] using the oracle data.
# pylint: disable=line-too-long
ACTION_MIN = np.array([-0.02547718, -0.02090043], np.float32)
ACTION_MAX = np.array([0.02869084, 0.04272365], np.float32)
EFFECTOR_TARGET_TRANSLATION_MIN = np.array([0.1774151772260666, -0.6287994794547558], np.float32)
EFFECTOR_TARGET_TRANSLATION_MAX = np.array([0.5654461532831192, 0.5441607423126698], np.float32)
EFFECTOR_TARGET_TO_BLOCK_TRANSLATION_MIN =
|
np.array([-0.07369826920330524, -0.11395704373717308], np.float32)
|
numpy.array
|
"""
Test Tabular Surrogate Explainer Builder
========================================
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
import numpy as np
import tabular_surrogate_builder
RANDOM_SEED = 42
iris = datasets.load_iris()
x_name, y_name = 'petal length (cm)', 'petal width (cm)'
x_ind = iris.feature_names.index(x_name)
y_ind = iris.feature_names.index(y_name)
X = iris.data[:, [x_ind, y_ind]] # We only take the first two features
Y = iris.target
tree_clf = DecisionTreeClassifier(
max_depth=5, min_samples_leaf=15, random_state=RANDOM_SEED)
tree_clf.fit(X, Y)
logreg_clf = LogisticRegression(random_state=RANDOM_SEED)
logreg_clf.fit(X, Y)
def test_tabular_blimey():
"""Tests bLIMEy explanations."""
x_min, x_max = X[:, 0].min(), X[:, 0].max()
y_min, y_max = X[:, 1].min(), X[:, 1].max()
class_map = {cls: i for i, cls in enumerate(iris.target_names)}
instances = {
'setosa': np.array([1.5, 0.25]),
'versicolor': np.array([4.5, 1.25]),
'virginica': np.array([5.5, 2.25])
}
models = {
'tree-intercept': (tree_clf.predict, True),
'tree-no-intercept': (tree_clf.predict, False),
'logreg-intercept': (logreg_clf.predict, True),
'logreg-no-intercept': (logreg_clf.predict, False)
}
samples = [
[0, 3, 6, 9, 12, 15, 18, 21, 24],
[12, 6, 24, 0, 15, 9, 3, 21, 18]
]
x_bins, y_bins = [1, 2.5, 3.3, 6], [.5, 1.5, 2]
discs = []
for i, ix in enumerate(x_bins):
for iix in x_bins[i + 1:]: # X-axis
for j, jy in enumerate(y_bins): # Y-axis
for jjy in y_bins[j + 1:]:
discs.append({
0: [ix, iix],
1: [jy, jjy]
})
for inst_i, inst in instances.items():
for samples_no_i, samples_no in enumerate(samples):
for cls, cls_i in class_map.items():
for disc_i, disc in enumerate(discs):
for model_i, (pred_fn, intercept) in models.items():
disc_x = [x_min] + disc[0] + [x_max]
disc_y = [y_min] + disc[1] + [y_max]
data = tabular_surrogate_builder._generate_data(
samples_no, disc_x, disc_y, RANDOM_SEED)
exp = tabular_surrogate_builder.build_tabular_blimey(
inst, cls_i, data, pred_fn, disc, intercept, RANDOM_SEED)
key = '{}&{}&{}&{}&{}'.format(
inst_i, samples_no_i, cls, disc_i, model_i)
assert np.allclose(
exp,
EXP[key],
atol=.001,
equal_nan=True
)
EXP = {
'setosa&0&setosa&0&tree-intercept': np.array([1.5739331306990878, 0.10256534954407359]),
'setosa&0&setosa&0&tree-no-intercept': np.array([1.1973684210526314, -0.36430921052631576]),
'setosa&0&setosa&0&logreg-intercept': np.array([1.5013252279635236, 0.16661398176291822]),
'setosa&0&setosa&0&logreg-no-intercept': np.array([1.144736842105263, -0.2754934210526315]),
'setosa&0&setosa&1&tree-intercept': np.array([1.5739331306990878, 0.10256534954407359]),
'setosa&0&setosa&1&tree-no-intercept': np.array([1.1973684210526314, -0.36430921052631576]),
'setosa&0&setosa&1&logreg-intercept': np.array([1.5013252279635236, 0.16661398176291822]),
'setosa&0&setosa&1&logreg-no-intercept': np.array([1.144736842105263, -0.2754934210526315]),
'setosa&0&setosa&2&tree-intercept': np.array([1.5739331306990878, 0.10256534954407359]),
'setosa&0&setosa&2&tree-no-intercept': np.array([1.1973684210526314, -0.36430921052631576]),
'setosa&0&setosa&2&logreg-intercept': np.array([1.5739331306990878, 0.10256534954407359]),
'setosa&0&setosa&2&logreg-no-intercept': np.array([1.1973684210526314, -0.36430921052631576]),
'setosa&0&setosa&3&tree-intercept': np.array([1.3185896656534941, 0.11883282674772026]),
'setosa&0&setosa&3&tree-no-intercept': np.array([0.9342105263157893, -0.35773026315789463]),
'setosa&0&setosa&3&logreg-intercept': np.array([1.4272097264437666, 0.1328632218844987]),
'setosa&0&setosa&3&logreg-no-intercept': np.array([1.0394736842105263, -0.3478618421052631]),
'setosa&0&setosa&4&tree-intercept': np.array([1.3185896656534941, 0.11883282674772026]),
'setosa&0&setosa&4&tree-no-intercept': np.array([0.9342105263157893, -0.35773026315789463]),
'setosa&0&setosa&4&logreg-intercept': np.array([1.4272097264437666, 0.1328632218844987]),
'setosa&0&setosa&4&logreg-no-intercept': np.array([1.0394736842105263, -0.3478618421052631]),
'setosa&0&setosa&5&tree-intercept': np.array([1.3185896656534941, 0.11883282674772026]),
'setosa&0&setosa&5&tree-no-intercept': np.array([0.9342105263157893, -0.35773026315789463]),
'setosa&0&setosa&5&logreg-intercept': np.array([1.3891063829787214, 0.17719148936170231]),
'setosa&0&setosa&5&logreg-no-intercept': np.array([0.9868421052631579, -0.32154605263157887]),
'setosa&0&setosa&6&tree-intercept': np.array([0.699282674772036, 0.13733738601823722]),
'setosa&0&setosa&6&tree-no-intercept': np.array([0.3026315789473684, -0.3544407894736842]),
'setosa&0&setosa&6&logreg-intercept': np.array([0.8812644376899683, 0.13621884498480247]),
'setosa&0&setosa&6&logreg-no-intercept': np.array([0.4868421052631579, -0.3527960526315789]),
'setosa&0&setosa&7&tree-intercept': np.array([0.699282674772036, 0.13733738601823722]),
'setosa&0&setosa&7&tree-no-intercept': np.array([0.3026315789473684, -0.3544407894736842]),
'setosa&0&setosa&7&logreg-intercept': np.array([0.8812644376899683, 0.13621884498480247]),
'setosa&0&setosa&7&logreg-no-intercept': np.array([0.4868421052631579, -0.3527960526315789]),
'setosa&0&setosa&8&tree-intercept': np.array([0.699282674772036, 0.13733738601823722]),
'setosa&0&setosa&8&tree-no-intercept': np.array([0.3026315789473684, -0.3544407894736842]),
'setosa&0&setosa&8&logreg-intercept': np.array([0.8812644376899683, 0.13621884498480247]),
'setosa&0&setosa&8&logreg-no-intercept': np.array([0.4868421052631579, -0.3527960526315789]),
'setosa&0&setosa&9&tree-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&9&tree-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&9&logreg-intercept': np.array([1.6393728222996513, 0.07259001161440241]),
'setosa&0&setosa&9&logreg-no-intercept': np.array([1.3365122615803813, -0.5790190735694822]),
'setosa&0&setosa&10&tree-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&10&tree-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&10&logreg-intercept': np.array([1.6585365853658536, 0.11382113821138252]),
'setosa&0&setosa&10&logreg-no-intercept': np.array([1.3365122615803813, -0.5790190735694822]),
'setosa&0&setosa&11&tree-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&11&tree-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&11&logreg-intercept': np.array([1.7238675958188177, 0.1634727061556331]),
'setosa&0&setosa&11&logreg-no-intercept': np.array([1.3610354223433239, -0.6171662125340599]),
'setosa&0&setosa&12&tree-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&12&tree-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&12&logreg-intercept': np.array([1.8780487804878052, 0.04065040650406589]),
'setosa&0&setosa&12&logreg-no-intercept': np.array([1.483651226158038, -0.8079019073569482]),
'setosa&0&setosa&13&tree-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&13&tree-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&13&logreg-intercept': np.array([1.8780487804878052, 0.04065040650406589]),
'setosa&0&setosa&13&logreg-no-intercept': np.array([1.483651226158038, -0.8079019073569482]),
'setosa&0&setosa&14&tree-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&14&tree-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&14&logreg-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&14&logreg-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&15&tree-intercept': np.array([1.54965156794425, 0.03106852497096429]),
'setosa&0&setosa&15&tree-no-intercept': np.array([1.145776566757493, -0.8378746594005451]),
'setosa&0&setosa&15&logreg-intercept': np.array([1.6933797909407649, 0.006968641114983181]),
'setosa&0&setosa&15&logreg-no-intercept': np.array([1.2956403269754766, -0.8487738419618529]),
'setosa&0&setosa&16&tree-intercept': np.array([1.54965156794425, 0.03106852497096429]),
'setosa&0&setosa&16&tree-no-intercept': np.array([1.145776566757493, -0.8378746594005451]),
'setosa&0&setosa&16&logreg-intercept': np.array([1.6933797909407649, 0.006968641114983181]),
'setosa&0&setosa&16&logreg-no-intercept': np.array([1.2956403269754766, -0.8487738419618529]),
'setosa&0&setosa&17&tree-intercept': np.array([1.54965156794425, 0.03106852497096429]),
'setosa&0&setosa&17&tree-no-intercept': np.array([1.145776566757493, -0.8378746594005451]),
'setosa&0&setosa&17&logreg-intercept': np.array([1.61759581881533, 0.05603948896631865]),
'setosa&0&setosa&17&logreg-no-intercept': np.array([1.2084468664850134, -0.8242506811989099]),
'setosa&0&versicolor&0&tree-intercept': np.array([-1.4066382978723382, 0.10485106382978716]),
'setosa&0&versicolor&0&tree-no-intercept': np.array([-1.1973684210526314, 0.36430921052631576]),
'setosa&0&versicolor&0&logreg-intercept': np.array([-1.2977264437689953, 0.008778115501519752]),
'setosa&0&versicolor&0&logreg-no-intercept': np.array([-1.118421052631579, 0.23108552631578946]),
'setosa&0&versicolor&1&tree-intercept': np.array([-1.3062613981762905, 0.22930091185410326]),
'setosa&0&versicolor&1&tree-no-intercept': np.array([-1.1973684210526314, 0.36430921052631576]),
'setosa&0&versicolor&1&logreg-intercept': np.array([-1.2642674772036455, 0.0502613981762915]),
'setosa&0&versicolor&1&logreg-no-intercept': np.array([-1.118421052631579, 0.23108552631578946]),
'setosa&0&versicolor&2&tree-intercept': np.array([-1.105507598784193, 0.47820060790273566]),
'setosa&0&versicolor&2&tree-no-intercept': np.array([-1.1973684210526314, 0.36430921052631576]),
'setosa&0&versicolor&2&logreg-intercept': np.array([-1.2699574468085086, 0.19727659574468057]),
'setosa&0&versicolor&2&logreg-no-intercept': np.array([-1.1710526315789473, 0.3199013157894736]),
'setosa&0&versicolor&3&tree-intercept': np.array([-1.189398176291792, 0.13291185410334339]),
'setosa&0&versicolor&3&tree-no-intercept': np.array([-0.9868421052631579, 0.38404605263157887]),
'setosa&0&versicolor&3&logreg-intercept': np.array([-1.223610942249238, 0.04252887537993902]),
'setosa&0&versicolor&3&logreg-no-intercept': np.array([-1.013157894736842, 0.303453947368421]),
'setosa&0&versicolor&4&tree-intercept': np.array([-1.0890212765957425, 0.25736170212765935]),
'setosa&0&versicolor&4&tree-no-intercept': np.array([-0.9868421052631579, 0.38404605263157887]),
'setosa&0&versicolor&4&logreg-intercept': np.array([-1.15669300911854, 0.12549544072948324]),
'setosa&0&versicolor&4&logreg-no-intercept': np.array([-1.013157894736842, 0.303453947368421]),
'setosa&0&versicolor&5&tree-intercept': np.array([-0.9263708206686919, 0.5505896656534954]),
'setosa&0&versicolor&5&tree-no-intercept': np.array([-1.0394736842105263, 0.4103618421052631]),
'setosa&0&versicolor&5&logreg-intercept': np.array([-0.9484498480243149, 0.2150759878419453]),
'setosa&0&versicolor&5&logreg-no-intercept': np.array([-0.9342105263157895, 0.2327302631578947]),
'setosa&0&versicolor&6&tree-intercept': np.array([-0.6081945288753788, 0.15873556231003033]),
'setosa&0&versicolor&6&tree-no-intercept': np.array([-0.4078947368421052, 0.40707236842105254]),
'setosa&0&versicolor&6&logreg-intercept': np.array([0.037398176291793324, 0.5550881458966567]),
'setosa&0&versicolor&6&logreg-no-intercept': np.array([-0.3552631578947368, 0.06825657894736842]),
'setosa&0&versicolor&7&tree-intercept': np.array([-0.5459209726443761, 0.32751367781155016]),
'setosa&0&versicolor&7&tree-no-intercept': np.array([-0.4605263157894736, 0.4333881578947368]),
'setosa&0&versicolor&7&logreg-intercept': np.array([0.037398176291793324, 0.5550881458966567]),
'setosa&0&versicolor&7&logreg-no-intercept': np.array([-0.3552631578947368, 0.06825657894736842]),
'setosa&0&versicolor&8&tree-intercept': np.array([-0.4594772036474156, 0.7093981762917936]),
'setosa&0&versicolor&8&tree-no-intercept': np.array([-0.618421052631579, 0.5123355263157895]),
'setosa&0&versicolor&8&logreg-intercept': np.array([0.4359878419452881, 0.15392097264437712]),
'setosa&0&versicolor&8&logreg-no-intercept': np.array([-0.0921052631578948, -0.5008223684210525]),
'setosa&0&versicolor&9&tree-intercept': np.array([-1.7709059233449487, 0.28077816492450636]),
'setosa&0&versicolor&9&tree-no-intercept': np.array([-1.5081743869209807, 0.8460490463215258]),
'setosa&0&versicolor&9&logreg-intercept': np.array([-1.5165505226480849, 0.10075493612078955]),
'setosa&0&versicolor&9&logreg-no-intercept': np.array([-1.3119891008174385, 0.5408719346049046]),
'setosa&0&versicolor&10&tree-intercept': np.array([-1.6559233449477355, 0.5281649245063876]),
'setosa&0&versicolor&10&tree-no-intercept': np.array([-1.5081743869209807, 0.8460490463215258]),
'setosa&0&versicolor&10&logreg-intercept': np.array([-1.4973867595818822, 0.1419860627177698]),
'setosa&0&versicolor&10&logreg-no-intercept': np.array([-1.3119891008174385, 0.5408719346049046]),
'setosa&0&versicolor&11&tree-intercept': np.array([-1.4642857142857129, 0.9404761904761902]),
'setosa&0&versicolor&11&tree-no-intercept': np.array([-1.5081743869209807, 0.8460490463215258]),
'setosa&0&versicolor&11&logreg-intercept': np.array([-1.4590592334494785, 0.22444831591173042]),
'setosa&0&versicolor&11&logreg-no-intercept': np.array([-1.3119891008174385, 0.5408719346049046]),
'setosa&0&versicolor&12&tree-intercept': np.array([-1.7709059233449487, 0.28077816492450636]),
'setosa&0&versicolor&12&tree-no-intercept': np.array([-1.5081743869209807, 0.8460490463215258]),
'setosa&0&versicolor&12&logreg-intercept': np.array([-1.2674216027874567, 0.636759581881533]),
'setosa&0&versicolor&12&logreg-no-intercept': np.array([-1.3119891008174385, 0.5408719346049046]),
'setosa&0&versicolor&13&tree-intercept': np.array([-1.6559233449477355, 0.5281649245063876]),
'setosa&0&versicolor&13&tree-no-intercept': np.array([-1.5081743869209807, 0.8460490463215258]),
'setosa&0&versicolor&13&logreg-intercept': np.array([-1.2674216027874567, 0.636759581881533]),
'setosa&0&versicolor&13&logreg-no-intercept': np.array([-1.3119891008174385, 0.5408719346049046]),
'setosa&0&versicolor&14&tree-intercept': np.array([-1.4642857142857129, 0.9404761904761902]),
'setosa&0&versicolor&14&tree-no-intercept': np.array([-1.5081743869209807, 0.8460490463215258]),
'setosa&0&versicolor&14&logreg-intercept': np.array([-0.8780487804878059, 0.2926829268292682]),
'setosa&0&versicolor&14&logreg-no-intercept': np.array([-0.9931880108991824, 0.04495912806539504]),
'setosa&0&versicolor&15&tree-intercept': np.array([-1.4155052264808363, 0.2575493612078976]),
'setosa&0&versicolor&15&tree-no-intercept': np.array([-1.145776566757493, 0.8378746594005451]),
'setosa&0&versicolor&15&logreg-intercept': np.array([-1.0635888501742157, 0.7116724738675959]),
'setosa&0&versicolor&15&logreg-no-intercept': np.array([-1.1239782016348774, 0.5817438692098093]),
'setosa&0&versicolor&16&tree-intercept': np.array([-1.300522648083622, 0.5049361207897793]),
'setosa&0&versicolor&16&tree-no-intercept': np.array([-1.145776566757493, 0.8378746594005451]),
'setosa&0&versicolor&16&logreg-intercept': np.array([-1.0444250871080138, 0.7529036004645763]),
'setosa&0&versicolor&16&logreg-no-intercept': np.array([-1.1239782016348774, 0.5817438692098093]),
'setosa&0&versicolor&17&tree-intercept': np.array([-1.184668989547039, 0.9663182346109179]),
'setosa&0&versicolor&17&tree-no-intercept': np.array([-1.2329700272479562, 0.8623978201634876]),
'setosa&0&versicolor&17&logreg-intercept': np.array([-0.5331010452961679, 0.36817653890824636]),
'setosa&0&versicolor&17&logreg-no-intercept': np.array([-0.6934604904632151, 0.02316076294277924]),
'setosa&0&virginica&0&tree-intercept': np.array([-0.16729483282674798, -0.20741641337386058]),
'setosa&0&virginica&0&tree-no-intercept': np.array([-0.5657894736842106, -0.7014802631578947]),
'setosa&0&virginica&0&logreg-intercept': np.array([-0.20359878419452845, -0.1753920972644377]),
'setosa&0&virginica&0&logreg-no-intercept': np.array([-0.5921052631578947, -0.6570723684210525]),
'setosa&0&virginica&1&tree-intercept': np.array([-0.26767173252279597, -0.3318662613981765]),
'setosa&0&virginica&1&tree-no-intercept': np.array([-0.5657894736842106, -0.7014802631578947]),
'setosa&0&virginica&1&logreg-intercept': np.array([-0.23705775075987823, -0.2168753799392096]),
'setosa&0&virginica&1&logreg-no-intercept': np.array([-0.5921052631578947, -0.6570723684210525]),
'setosa&0&virginica&2&tree-intercept': np.array([-0.4684255319148932, -0.5807659574468084]),
'setosa&0&virginica&2&tree-no-intercept': np.array([-0.5657894736842106, -0.7014802631578947]),
'setosa&0&virginica&2&logreg-intercept': np.array([-0.30397568389057705, -0.2998419452887544]),
'setosa&0&virginica&2&logreg-no-intercept': np.array([-0.5921052631578947, -0.6570723684210525]),
'setosa&0&virginica&3&tree-intercept': np.array([-0.12919148936170174, -0.2517446808510638]),
'setosa&0&virginica&3&tree-no-intercept': np.array([-0.5131578947368421, -0.7277960526315789]),
'setosa&0&virginica&3&logreg-intercept': np.array([-0.20359878419452845, -0.1753920972644377]),
'setosa&0&virginica&3&logreg-no-intercept': np.array([-0.5921052631578947, -0.6570723684210525]),
'setosa&0&virginica&4&tree-intercept': np.array([-0.2295683890577505, -0.37619452887538046]),
'setosa&0&virginica&4&tree-no-intercept': np.array([-0.5131578947368421, -0.7277960526315789]),
'setosa&0&virginica&4&logreg-intercept': np.array([-0.27051671732522753, -0.2583586626139816]),
'setosa&0&virginica&4&logreg-no-intercept': np.array([-0.5921052631578947, -0.6570723684210525]),
'setosa&0&virginica&5&tree-intercept': np.array([-0.39221884498480175, -0.6694224924012161]),
'setosa&0&virginica&5&tree-no-intercept': np.array([-0.4605263157894739, -0.7541118421052629]),
'setosa&0&virginica&5&logreg-intercept': np.array([-0.4406565349544069, -0.3922674772036473]),
'setosa&0&virginica&5&logreg-no-intercept': np.array([-0.618421052631579, -0.6126644736842104]),
'setosa&0&virginica&6&tree-intercept': np.array([-0.09108814589665652, -0.296072948328267]),
'setosa&0&virginica&6&tree-no-intercept': np.array([-0.4605263157894739, -0.7541118421052629]),
'setosa&0&virginica&6&logreg-intercept': np.array([-0.9186626139817617, -0.6913069908814593]),
'setosa&0&virginica&6&logreg-no-intercept': np.array([-0.6973684210526315, -0.4169407894736842]),
'setosa&0&virginica&7&tree-intercept': np.array([-0.15336170212765957, -0.46485106382978736]),
'setosa&0&virginica&7&tree-no-intercept': np.array([-0.4078947368421052, -0.7804276315789473]),
'setosa&0&virginica&7&logreg-intercept': np.array([-0.9186626139817617, -0.6913069908814593]),
'setosa&0&virginica&7&logreg-no-intercept': np.array([-0.6973684210526315, -0.4169407894736842]),
'setosa&0&virginica&8&tree-intercept': np.array([-0.23980547112461933, -0.8467355623100313]),
'setosa&0&virginica&8&tree-no-intercept': np.array([-0.2500000000000001, -0.8593749999999999]),
'setosa&0&virginica&8&logreg-intercept': np.array([-1.3172522796352566, -0.29013981762917995]),
'setosa&0&virginica&8&logreg-no-intercept': np.array([-0.9605263157894737, 0.15213815789473678]),
'setosa&0&virginica&9&tree-intercept': np.array([-0.13414634146341475, -0.28861788617886147]),
'setosa&0&virginica&9&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&9&logreg-intercept': np.array([-0.12282229965156827, -0.17334494773519163]),
'setosa&0&virginica&9&logreg-no-intercept': np.array([-0.42915531335149854, -0.832425068119891]),
'setosa&0&virginica&10&tree-intercept': np.array([-0.24912891986062732, -0.5360046457607435]),
'setosa&0&virginica&10&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&10&logreg-intercept': np.array([-0.1611498257839721, -0.25580720092915205]),
'setosa&0&virginica&10&logreg-no-intercept': np.array([-0.42915531335149854, -0.832425068119891]),
'setosa&0&virginica&11&tree-intercept': np.array([-0.4407665505226477, -0.9483159117305445]),
'setosa&0&virginica&11&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&11&logreg-intercept': np.array([-0.2648083623693382, -0.3879210220673635]),
'setosa&0&virginica&11&logreg-no-intercept': np.array([-0.45367847411444123, -0.7942779291553135]),
'setosa&0&virginica&12&tree-intercept': np.array([-0.13414634146341475, -0.28861788617886147]),
'setosa&0&virginica&12&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&12&logreg-intercept': np.array([-0.6106271777003482, -0.6774099883855986]),
'setosa&0&virginica&12&logreg-no-intercept': np.array([-0.5762942779291551, -0.6035422343324252]),
'setosa&0&virginica&13&tree-intercept': np.array([-0.24912891986062732, -0.5360046457607435]),
'setosa&0&virginica&13&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&13&logreg-intercept': np.array([-0.6106271777003482, -0.6774099883855986]),
'setosa&0&virginica&13&logreg-no-intercept': np.array([-0.5762942779291551, -0.6035422343324252]),
'setosa&0&virginica&14&tree-intercept': np.array([-0.4407665505226477, -0.9483159117305445]),
'setosa&0&virginica&14&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&14&logreg-intercept': np.array([-1.0270034843205562, -0.30052264808362433]),
'setosa&0&virginica&14&logreg-no-intercept': np.array([-0.9196185286103541, -0.06948228882833794]),
'setosa&0&virginica&15&tree-intercept': np.array([-0.13414634146341475, -0.28861788617886147]),
'setosa&0&virginica&15&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&15&logreg-intercept': np.array([-0.6297909407665501, -0.7186411149825784]),
'setosa&0&virginica&15&logreg-no-intercept': np.array([-0.5762942779291551, -0.6035422343324252]),
'setosa&0&virginica&16&tree-intercept': np.array([-0.24912891986062732, -0.5360046457607435]),
'setosa&0&virginica&16&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&16&logreg-intercept': np.array([-0.6489547038327523, -0.7598722415795597]),
'setosa&0&virginica&16&logreg-no-intercept': np.array([-0.5762942779291551, -0.6035422343324252]),
'setosa&0&virginica&17&tree-intercept': np.array([-0.36498257839721343, -0.9973867595818819]),
'setosa&0&virginica&17&tree-no-intercept': np.array([-0.3174386920980926, -0.8950953678474115]),
'setosa&0&virginica&17&logreg-intercept': np.array([-1.0844947735191648, -0.4242160278745647]),
'setosa&0&virginica&17&logreg-no-intercept': np.array([-0.9196185286103541, -0.06948228882833794]),
'setosa&1&setosa&0&tree-intercept': np.array([1.7608531994981194, -0.04592220828105419]),
'setosa&1&setosa&0&tree-no-intercept': np.array([1.1988571428571433, -0.5017142857142859]),
'setosa&1&setosa&0&logreg-intercept': np.array([1.7214554579673789, 0.004015056461731351]),
'setosa&1&setosa&0&logreg-no-intercept': np.array([1.1725714285714288, -0.44114285714285734]),
'setosa&1&setosa&1&tree-intercept': np.array([1.7608531994981194, -0.04592220828105419]),
'setosa&1&setosa&1&tree-no-intercept': np.array([1.1988571428571433, -0.5017142857142859]),
'setosa&1&setosa&1&logreg-intercept': np.array([1.7214554579673789, 0.004015056461731351]),
'setosa&1&setosa&1&logreg-no-intercept': np.array([1.1725714285714288, -0.44114285714285734]),
'setosa&1&setosa&2&tree-intercept': np.array([1.7608531994981194, -0.04592220828105419]),
'setosa&1&setosa&2&tree-no-intercept': np.array([1.1988571428571433, -0.5017142857142859]),
'setosa&1&setosa&2&logreg-intercept': np.array([1.7608531994981194, -0.04592220828105419]),
'setosa&1&setosa&2&logreg-no-intercept': np.array([1.1988571428571433, -0.5017142857142859]),
'setosa&1&setosa&3&tree-intercept': np.array([1.5081555834378932, -0.11543287327478088]),
'setosa&1&setosa&3&tree-no-intercept': np.array([0.9600000000000003, -0.5600000000000002]),
'setosa&1&setosa&3&logreg-intercept': np.array([1.614303638644919, -0.06398996235884592]),
'setosa&1&setosa&3&logreg-no-intercept': np.array([1.054857142857143, -0.5177142857142859]),
'setosa&1&setosa&4&tree-intercept': np.array([1.5081555834378932, -0.11543287327478088]),
'setosa&1&setosa&4&tree-no-intercept': np.array([0.9600000000000003, -0.5600000000000002]),
'setosa&1&setosa&4&logreg-intercept': np.array([1.614303638644919, -0.06398996235884592]),
'setosa&1&setosa&4&logreg-no-intercept': np.array([1.054857142857143, -0.5177142857142859]),
'setosa&1&setosa&5&tree-intercept': np.array([1.5081555834378932, -0.11543287327478088]),
'setosa&1&setosa&5&tree-no-intercept': np.array([0.9600000000000003, -0.5600000000000002]),
'setosa&1&setosa&5&logreg-intercept': np.array([1.573902132998746, -0.03061480552070302]),
'setosa&1&setosa&5&logreg-no-intercept': np.array([1.005714285714286, -0.49142857142857155]),
'setosa&1&setosa&6&tree-intercept': np.array([0.7754077791718955, -0.2057716436637395]),
'setosa&1&setosa&6&tree-no-intercept': np.array([0.24000000000000005, -0.64]),
'setosa&1&setosa&6&logreg-intercept': np.array([0.9548306148055205, -0.14529485570890852]),
'setosa&1&setosa&6&logreg-no-intercept': np.array([0.406857142857143, -0.5897142857142859]),
'setosa&1&setosa&7&tree-intercept': np.array([0.7754077791718955, -0.2057716436637395]),
'setosa&1&setosa&7&tree-no-intercept': np.array([0.24000000000000005, -0.64]),
'setosa&1&setosa&7&logreg-intercept': np.array([0.9548306148055205, -0.14529485570890852]),
'setosa&1&setosa&7&logreg-no-intercept': np.array([0.406857142857143, -0.5897142857142859]),
'setosa&1&setosa&8&tree-intercept': np.array([0.7754077791718955, -0.2057716436637395]),
'setosa&1&setosa&8&tree-no-intercept': np.array([0.24000000000000005, -0.64]),
'setosa&1&setosa&8&logreg-intercept': np.array([0.9548306148055205, -0.14529485570890852]),
'setosa&1&setosa&8&logreg-no-intercept': np.array([0.406857142857143, -0.5897142857142859]),
'setosa&1&setosa&9&tree-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&9&tree-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&9&logreg-intercept': np.array([1.6711804758626017, 0.16922959222706882]),
'setosa&1&setosa&9&logreg-no-intercept': np.array([1.0471281296023565, -0.5846833578792343]),
'setosa&1&setosa&10&tree-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&10&tree-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&10&logreg-intercept': np.array([1.6976512891133129, 0.2012087828016237]),
'setosa&1&setosa&10&logreg-no-intercept': np.array([1.0471281296023565, -0.5846833578792343]),
'setosa&1&setosa&11&tree-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&11&tree-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&11&logreg-intercept': np.array([1.7614566597812, 0.2204881034350868]),
'setosa&1&setosa&11&logreg-no-intercept': np.array([1.0559646539027983, -0.6318114874815908]),
'setosa&1&setosa&12&tree-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&12&tree-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&12&logreg-intercept': np.array([1.8422461938642845, 0.02907199143141387]),
'setosa&1&setosa&12&logreg-no-intercept': np.array([1.1001472754050075, -0.8674521354933727]),
'setosa&1&setosa&13&tree-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&13&tree-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&13&logreg-intercept': np.array([1.8422461938642845, 0.02907199143141387]),
'setosa&1&setosa&13&logreg-no-intercept': np.array([1.1001472754050075, -0.8674521354933727]),
'setosa&1&setosa&14&tree-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&14&tree-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&14&logreg-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&14&logreg-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&15&tree-intercept': np.array([1.476245122790915, -0.04314895570346513]),
'setosa&1&setosa&15&tree-no-intercept': np.array([0.7378497790868925, -0.9351988217967601]),
'setosa&1&setosa&15&logreg-intercept': np.array([1.6122714405936887, 0.017137173896413307]),
'setosa&1&setosa&15&logreg-no-intercept': np.array([0.8556701030927836, -0.8969072164948455]),
'setosa&1&setosa&16&tree-intercept': np.array([1.476245122790915, -0.04314895570346513]),
'setosa&1&setosa&16&tree-no-intercept': np.array([0.7378497790868925, -0.9351988217967601]),
'setosa&1&setosa&16&logreg-intercept': np.array([1.6122714405936887, 0.017137173896413307]),
'setosa&1&setosa&16&logreg-no-intercept': np.array([0.8556701030927836, -0.8969072164948455]),
'setosa&1&setosa&17&tree-intercept': np.array([1.476245122790915, -0.04314895570346513]),
'setosa&1&setosa&17&tree-no-intercept': np.array([0.7378497790868925, -0.9351988217967601]),
'setosa&1&setosa&17&logreg-intercept': np.array([1.6122714405936887, 0.017137173896413307]),
'setosa&1&setosa&17&logreg-no-intercept': np.array([0.8556701030927836, -0.8969072164948455]),
'setosa&1&versicolor&0&tree-intercept': np.array([-1.1872020075282315, 0.5111668757841914]),
'setosa&1&versicolor&0&tree-no-intercept': np.array([-1.1988571428571433, 0.5017142857142859]),
'setosa&1&versicolor&0&logreg-intercept': np.array([-1.2996235884567129, 0.25621079046424133]),
'setosa&1&versicolor&0&logreg-no-intercept': np.array([-1.1462857142857146, 0.3805714285714287]),
'setosa&1&versicolor&1&tree-intercept': np.array([-0.9322459222082816, 0.7179422835633638]),
'setosa&1&versicolor&1&tree-no-intercept': np.array([-1.1988571428571433, 0.5017142857142859]),
'setosa&1&versicolor&1&logreg-intercept': np.array([-1.2358845671267256, 0.30790464240903437]),
'setosa&1&versicolor&1&logreg-no-intercept': np.array([-1.1462857142857146, 0.3805714285714287]),
'setosa&1&versicolor&2&tree-intercept': np.array([-0.8047678795483063, 0.8213299874529498]),
'setosa&1&versicolor&2&tree-no-intercept': np.array([-1.1988571428571433, 0.5017142857142859]),
'setosa&1&versicolor&2&logreg-intercept': np.array([-1.2752823086574652, 0.35784190715181996]),
'setosa&1&versicolor&2&logreg-no-intercept': np.array([-1.1725714285714288, 0.44114285714285734]),
'setosa&1&versicolor&3&tree-intercept': np.array([-0.9749058971141784, 0.6140526976160612]),
'setosa&1&versicolor&3&tree-no-intercept': np.array([-1.0091428571428573, 0.5862857142857144]),
'setosa&1&versicolor&3&logreg-intercept': np.array([-1.16060225846926, 0.35006273525721504]),
'setosa&1&versicolor&3&logreg-no-intercept': np.array([-1.0285714285714287, 0.4571428571428573]),
'setosa&1&versicolor&4&tree-intercept': np.array([-0.7199498117942287, 0.8208281053952332]),
'setosa&1&versicolor&4&tree-no-intercept': np.array([-1.0091428571428573, 0.5862857142857144]),
'setosa&1&versicolor&4&logreg-intercept': np.array([-0.9693851944792976, 0.5051442910915944]),
'setosa&1&versicolor&4&logreg-no-intercept': np.array([-1.0285714285714287, 0.4571428571428573]),
'setosa&1&versicolor&5&tree-intercept': np.array([-0.6328732747804271, 0.9575909661229628]),
'setosa&1&versicolor&5&tree-no-intercept': np.array([-1.0582857142857145, 0.6125714285714288]),
'setosa&1&versicolor&5&logreg-intercept': np.array([-0.8577164366373908, 0.44767879548306233]),
'setosa&1&versicolor&5&logreg-no-intercept': np.array([-0.9531428571428574, 0.3702857142857144]),
'setosa&1&versicolor&6&tree-intercept': np.array([-0.3229611041405272, 0.7711417816813058]),
'setosa&1&versicolor&6&tree-no-intercept': np.array([-0.3874285714285715, 0.7188571428571429]),
'setosa&1&versicolor&6&logreg-intercept': np.array([0.2928481806775409, 0.7319949811794241]),
'setosa&1&versicolor&6&logreg-no-intercept': np.array([-0.2982857142857143, 0.2525714285714286]),
'setosa&1&versicolor&7&tree-intercept': np.array([-0.10840652446675068, 1.0112923462986212]),
'setosa&1&versicolor&7&tree-no-intercept': np.array([-0.43657142857142867, 0.7451428571428572]),
'setosa&1&versicolor&7&logreg-intercept': np.array([0.2928481806775409, 0.7319949811794241]),
'setosa&1&versicolor&7&logreg-no-intercept': np.array([-0.2982857142857143, 0.2525714285714286]),
'setosa&1&versicolor&8&tree-intercept': np.array([-0.14253450439146828, 1.2481806775407795]),
'setosa&1&versicolor&8&tree-no-intercept': np.array([-0.6331428571428572, 0.8502857142857143]),
'setosa&1&versicolor&8&logreg-intercept':
|
np.array([0.5741530740276033, 0.2735257214554584])
|
numpy.array
|
import numpy as np
import copy
class Coords(object):
'''Base class for coordinates.
'''
_coordinate_types = {}
def copy(self):
'''Make a copy.
'''
return copy.deepcopy(self)
@classmethod
def from_dict(cls, tree):
coordinate_class = Coords._coordinate_types[tree['type']]
return coordinate_class.from_dict(tree)
def to_dict(self):
raise NotImplementedError()
def __add__(self, b):
'''Add `b` to the coordinates separately and return the result.
'''
res = self.copy()
res += b
return res
def __iadd__(self, b):
'''Add `b` to the coordinates separately in-place.
'''
raise NotImplementedError()
def __radd__(self, b):
'''Add `b` to the coordinates separately and return the result.
'''
return self + b
def __sub__(self, b):
'''Subtract `b` from the coordinates separately and return the result.
'''
return self + (-b)
def __isub__(self, b):
'''Subtract `b` from the coordinates separately in-place.
'''
self += (-b)
return self
def __mul__(self, f):
'''Multiply each coordinate with `f` separately and return the result.
'''
res = self.copy()
res *= f
return res
def __rmul__(self, f):
'''Multiply each coordinate with `f` separately and return the result.
'''
return self * f
def __imul__(self, f):
'''Multiply each coordinate with `f` separately in-place.
'''
raise NotImplementedError()
def __div__(self, f):
'''Divide each coordinate with `f` separately and return the result.
'''
return self * (1./f)
def __idiv__(self, f):
'''Divide each coordinate with `f` separately in-place.
'''
self *= (1./f)
return self
def __getitem__(self, i):
'''The `i`-th point for these coordinates.
'''
raise NotImplementedError()
@property
def is_separated(self):
'''True if the coordinates are separated, False otherwise.
'''
return hasattr(self, 'separated_coords')
@property
def is_regular(self):
'''True if the coordinates are regularly-spaced, False otherwise.
'''
return hasattr(self, 'regular_coords')
@property
def is_unstructured(self):
'''True if the coordinates are not structured, False otherwise.
'''
return not self.is_separated
def reverse(self):
'''Reverse the ordering of points in-place.
'''
raise NotImplementedError()
@property
def size(self):
'''The number of points.
'''
raise NotImplementedError()
def __len__(self):
'''The number of dimensions.
'''
raise NotImplementedError()
def _add_coordinate_type(coordinate_type, coordinate_class):
Coords._coordinate_types[coordinate_type] = coordinate_class
class UnstructuredCoords(Coords):
'''An unstructured list of points.
Parameters
----------
coords : list or tuple
A tuple of a list of positions for each dimension.
'''
def __init__(self, coords):
self.coords = [np.array(c) for c in coords]
@classmethod
def from_dict(cls, tree):
'''Make an UnstructuredCoords from a dictionary, previously created by `to_dict()`.
Parameters
----------
tree : dictionary
The dictionary from which to make a new UnstructuredCoords object.
Returns
-------
UnstructuredCoords
The created object.
Raises
------
ValueError
If the dictionary is not formatted correctly.
'''
if tree['type'] != 'unstructured':
raise ValueError('The type of coordinates should be "unstructured".')
return cls(tree['coords'])
def to_dict(self):
'''Convert the object to a dictionary for serialization.
Returns
-------
dictionary
The created dictionary.
'''
tree = {
'type': 'unstructured',
'coords': self.coords
}
return tree
@property
def size(self):
'''The number of points.
'''
return self.coords[0].size
def __len__(self):
'''The number of dimensions.
'''
return len(self.coords)
def __getitem__(self, i):
'''The `i`-th point for these coordinates.
'''
return self.coords[i]
def __iadd__(self, b):
'''Add `b` to the coordinates separately in-place.
'''
b = np.ones(len(self.coords)) * b
for i in range(len(self.coords)):
self.coords[i] += b[i]
return self
def __imul__(self, f):
'''Multiply each coordinate with `f` separately in-place.
'''
f = np.ones(len(self.coords)) * f
for i in range(len(self.coords)):
self.coords[i] *= f[i]
return self
def reverse(self):
'''Reverse the ordering of points in-place.
'''
for i in range(len(self.coords)):
self.coords[i] = self.coords[i][::-1]
return self
class SeparatedCoords(Coords):
'''A list of points that are separable along each dimension.
The actual points are given by the iterated tensor product of the `separated_coords`.
Parameters
----------
separated_coords : list or tuple
A tuple of a list of coordinates along each dimension.
Attributes
----------
separated_coords
A tuple of a list of coordinates along each dimension.
'''
def __init__(self, separated_coords):
# Make a copy to avoid modification from outside the class
self.separated_coords = [copy.deepcopy(s) for s in separated_coords]
@classmethod
def from_dict(cls, tree):
'''Make an SeparatedCoords from a dictionary, previously created by `to_dict()`.
Parameters
----------
tree : dictionary
The dictionary from which to make a new SeparatedCoords object.
Returns
-------
SeparatedCoords
The created object.
Raises
------
ValueError
If the dictionary is not formatted correctly.
'''
if tree['type'] != 'separated':
raise ValueError('The type of coordinates should be "separated".')
return cls(tree['separated_coords'])
def to_dict(self):
'''Convert the object to a dictionary for serialization.
Returns
-------
dictionary
The created dictionary.
'''
tree = {
'type': 'separated',
'separated_coords': self.separated_coords
}
return tree
def __getitem__(self, i):
'''The `i`-th point for these coordinates.
'''
s0 = (1,) * len(self)
j = len(self) - i - 1
output = self.separated_coords[i].reshape(s0[:j] + (-1,) + s0[j + 1:])
return np.broadcast_to(output, self.shape).ravel()
@property
def size(self):
'''The number of points.
'''
return np.prod(self.shape)
def __len__(self):
'''The number of dimensions.
'''
return len(self.separated_coords)
@property
def dims(self):
'''The number of points along each dimension.
'''
return np.array([len(c) for c in self.separated_coords])
@property
def shape(self):
'''The shape of an ``numpy.ndarray`` with the right dimensions.
'''
return self.dims[::-1]
def __iadd__(self, b):
'''Add `b` to the coordinates separately in-place.
'''
for i in range(len(self)):
self.separated_coords[i] += b[i]
return self
def __imul__(self, f):
'''Multiply each coordinate with `f` separately in-place.
'''
if np.isscalar(f):
for i in range(len(self)):
self.separated_coords[i] *= f
else:
for i in range(len(self)):
self.separated_coords[i] *= f[i]
return self
def reverse(self):
'''Reverse the ordering of points in-place.
'''
for i in range(len(self)):
self.separated_coords[i] = self.separated_coords[i][::-1]
return self
class RegularCoords(Coords):
'''A list of points that have a regular spacing in all dimensions.
Parameters
----------
delta : array_like
The spacing between the points.
dims : array_like
The number of points along each dimension.
zero : array_like
The coordinates for the first point.
Attributes
----------
delta
The spacing between the points.
dims
The number of points along each dimension.
zero
The coordinates for the first point.
'''
def __init__(self, delta, dims, zero=None):
if np.isscalar(dims):
self.dims = np.array([dims]).astype('int')
else:
self.dims = np.array(dims).astype('int')
if
|
np.isscalar(delta)
|
numpy.isscalar
|
import numpy as np
from scipy.stats import norm as norm
from scipy.optimize import fmin_bfgs
from copy import deepcopy
class GridDistribution:
def __init__(self, x, y):
self.x = x
self.y = y
def pdf(self, data):
# Find the closest bins
rhs =
|
np.searchsorted(self.x, data)
|
numpy.searchsorted
|
# encoding: utf-8
"""
@author: <NAME>
@contact: <EMAIL>
@file: demo.py
@time: 6/2/21 10:49 AM
@desc: Circle Dynamics 圆圈动力学
Relax the system of circles and container in Newton-mechanics to achieve a higher entropy state.
Assume mol=1, time=1 in each iteration, then acceleration = force, V = V0 + a*t = V0 + force, X = X0 + V
input: [W, L] wight and length of rectangle container
input: [1, 2, 3, ...] radius of circles
requirements: numpy, random, matplotlib
"""
import numpy as np
from random import uniform, randint
from matplotlib import animation
import matplotlib.pyplot as plt
import heapq
class Circle:
def __init__(self, x, y, radius):
self.radius = radius
self.acceleration = np.array([0, 0])
self.velocity = np.array([uniform(0, 1), # v0
uniform(0, 1)])
self.position = np.array([x, y])
@property
def x(self):
return self.position[0]
@property
def y(self):
return self.position[1]
@property
def r(self):
return self.radius
def apply_force(self, force):
# assume m=1, then acceleration=force
self.acceleration = np.add(self.acceleration, force)
def update(self):
# assume t=1, then v=v0+a*1
self.velocity = np.add(self.velocity, self.acceleration)
# x=x0+v*1
self.position = np.add(self.position, self.velocity)
self.acceleration = 0
class Pack:
def __init__(self, width, height, list_circles, gravity_rate, collision_rate, elastic_rate, friction_rate):
self.list_circles = list_circles # list(Circle)
self.right_border = width
self.upper_border = height
self.gravity_rate = gravity_rate
self.collision_rate = collision_rate
self.elastic_rate = elastic_rate
self.friction_rate = friction_rate
# [[x, y], [x, y], ....] denote the force of collision with neighbors, added by each separate force.
self.list_separate_forces = [np.array([0, 0])] * len(self.list_circles)
self.list_near_circle = [0] * len(self.list_circles) # denote num of neighbors of each circle
radius = [c.r for c in self.list_circles]
num = len(radius)
self.max_num_index_list = list(map(radius.index, heapq.nlargest(int(num*0.1), radius)))
self.min_num_index_list = list(map(radius.index, heapq.nsmallest(int(num*0.3), radius)))
@property
def get_list_circle(self):
return self.list_circles
def _normalize(self, v):
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
def run(self):
# run one times, called externally like by matplotlib.animation
for circle in self.list_circles:
in_container = self.check_borders(circle)
self.apply_separation_forces_to_circle(circle)
self.anisotropy_gravity(circle)
if in_container:
self.check_circle_positions(circle)
def check_borders(self, circle):
in_container = True
orientation = np.array([0, 0]) # orientation of reaction force which perpendicular to collision surface
if circle._x <= 0 + circle.r:
orientation =
|
np.add([1, 0], orientation)
|
numpy.add
|
"""
Utilities to convert between strike/dip, etc and points/lines in lat, long
space.
A stereonet in <long,lat> coordinates:
<0,90>
***
* *
<-90,0> * *<90,0>
* *
* *
***
<0,-90>
If strike=0, plotting lines, rakes, planes or poles to planes is simple. For a
plane, it's a line of constant longitude at long=90-dip. For a line, it's a
point at long=0,lat=90-dip. For a rake, it's a point at long=90-dip,
lat=90-rake. These points can then be rotated to the proper strike. (A
rotation matrix around the X-axis is much simpler than the trig otherwise
necessary!)
All of these assume that strikes and dips follow the "right-hand-rule".
In other words, if we're facing in the direction given for the strike, the plane
dips to our right.
"""
import numpy as np
def sph2cart(lon, lat):
"""
Converts a longitude and latitude (or sequence of lons and lats) given in
_radians_ to cartesian coordinates, `x`, `y`, `z`, where x=0, y=0, z=0 is
the center of the globe.
Parameters
----------
lon : array-like
Longitude in radians
lat : array-like
Latitude in radians
Returns
-------
`x`, `y`, `z` : Arrays of cartesian coordinates
"""
x = np.cos(lat)*np.cos(lon)
y = np.cos(lat)*np.sin(lon)
z = np.sin(lat)
return x, y, z
def cart2sph(x, y, z):
"""
Converts cartesian coordinates `x`, `y`, `z` into a longitude and latitude.
x=0, y=0, z=0 is assumed to correspond to the center of the globe.
Returns lon and lat in radians.
Parameters
----------
`x`, `y`, `z` : Arrays of cartesian coordinates
Returns
-------
lon : Longitude in radians
lat : Latitude in radians
"""
r = np.sqrt(x**2 + y**2 + z**2)
lat = np.arcsin(z/r)
lon = np.arctan2(y, x)
return lon, lat
def _rotate(lon, lat, theta, axis='x'):
"""
Rotate "lon", "lat" coords (in _degrees_) about the X-axis by "theta"
degrees. This effectively simulates rotating a physical stereonet.
Returns rotated lon, lat coords in _radians_).
"""
# Convert input to numpy arrays in radians
lon, lat = np.atleast_1d(lon, lat)
lon, lat = map(np.radians, [lon, lat])
theta = np.radians(theta)
# Convert to cartesian coords for the rotation
x, y, z = sph2cart(lon, lat)
lookup = {'x':_rotate_x, 'y':_rotate_y, 'z':_rotate_z}
X, Y, Z = lookup[axis](x, y, z, theta)
# Now convert back to spherical coords (longitude and latitude, ignore R)
lon, lat = cart2sph(X,Y,Z)
return lon, lat # in radians!
def _rotate_x(x, y, z, theta):
X = x
Y = y*np.cos(theta) + z*np.sin(theta)
Z = -y*np.sin(theta) + z*np.cos(theta)
return X, Y, Z
def _rotate_y(x, y, z, theta):
X = x*np.cos(theta) + -z*np.sin(theta)
Y = y
Z = x*np.sin(theta) + z*np.cos(theta)
return X, Y, Z
def _rotate_z(x, y, z, theta):
X = x*np.cos(theta) + -y*np.sin(theta)
Y = x*np.sin(theta) + y*np.cos(theta)
Z = z
return X, Y, Z
def antipode(lon, lat):
"""
Calculates the antipode (opposite point on the globe) of the given point or
points. Input and output is expected to be in radians.
Parameters
----------
lon : number or sequence of numbers
Longitude in radians
lat : number or sequence of numbers
Latitude in radians
Returns
-------
lon, lat : arrays
Sequences (regardless of whether or not the input was a single value or
a sequence) of longitude and latitude in radians.
"""
x, y, z = sph2cart(lon, lat)
return cart2sph(-x, -y, -z)
def plane(strike, dip, segments=100, center=(0, 0)):
"""
Calculates the longitude and latitude of `segments` points along the
stereonet projection of each plane with a given `strike` and `dip` in
degrees. Returns points for one hemisphere only.
Parameters
----------
strike : number or sequence of numbers
The strike of the plane(s) in degrees, with dip direction indicated by
the azimuth (e.g. 315 vs. 135) specified following the "right hand
rule".
dip : number or sequence of numbers
The dip of the plane(s) in degrees.
segments : number or sequence of numbers
The number of points in the returned `lon` and `lat` arrays. Defaults
to 100 segments.
center : sequence of two numbers (lon, lat)
The longitude and latitude of the center of the hemisphere that the
returned points will be in. Defaults to 0,0 (approriate for a typical
stereonet).
Returns
-------
lon, lat : arrays
`num_segments` x `num_strikes` arrays of longitude and latitude in
radians.
"""
lon0, lat0 = center
strikes, dips = np.atleast_1d(strike, dip)
lons = np.zeros((segments, strikes.size), dtype=np.float)
lats = lons.copy()
for i, (strike, dip) in enumerate(zip(strikes, dips)):
# We just plot a line of constant longitude and rotate it by the strike.
dip = 90 - dip
lon = dip * np.ones(segments)
lat = np.linspace(-90, 90, segments)
lon, lat = _rotate(lon, lat, strike)
if lat0 != 0 or lon0 != 0:
dist = angular_distance([lon, lat], [lon0, lat0], False)
mask = dist > (np.pi / 2)
lon[mask], lat[mask] = antipode(lon[mask], lat[mask])
change = np.diff(mask.astype(int))
ind = np.flatnonzero(change) + 1
lat = np.hstack(np.split(lat, ind)[::-1])
lon = np.hstack(np.split(lon, ind)[::-1])
lons[:,i] = lon
lats[:,i] = lat
return lons, lats
def pole(strike, dip):
"""
Calculates the longitude and latitude of the pole(s) to the plane(s)
specified by `strike` and `dip`, given in degrees.
Parameters
----------
strike : number or sequence of numbers
The strike of the plane(s) in degrees, with dip direction indicated by
the azimuth (e.g. 315 vs. 135) specified following the "right hand
rule".
dip : number or sequence of numbers
The dip of the plane(s) in degrees.
Returns
-------
lon, lat : Arrays of longitude and latitude in radians.
"""
strike, dip = np.atleast_1d(strike, dip)
mask = dip > 90
dip[mask] = 180 - dip[mask]
strike[mask] += 180
# Plot the approriate point for a strike of 0 and rotate it
lon, lat = -dip, 0.0
lon, lat = _rotate(lon, lat, strike)
return lon, lat
def rake(strike, dip, rake_angle):
"""
Calculates the longitude and latitude of the linear feature(s) specified by
`strike`, `dip`, and `rake_angle`.
Parameters
----------
strike : number or sequence of numbers
The strike of the plane(s) in degrees, with dip direction indicated by
the azimuth (e.g. 315 vs. 135) specified following the "right hand
rule".
dip : number or sequence of numbers
The dip of the plane(s) in degrees.
rake_angle : number or sequence of numbers
The angle of the lineation on the plane measured in degrees downward
from horizontal. Zero degrees corresponds to the "right- hand"
direction indicated by the strike, while 180 degrees or a negative
angle corresponds to the opposite direction.
Returns
-------
lon, lat : Arrays of longitude and latitude in radians.
"""
strike, dip, rake_angle = np.atleast_1d(strike, dip, rake_angle)
# Plot the approriate point for a strike of 0 and rotate it
dip = 90 - dip
lon = dip
rake_angle = rake_angle.copy()
rake_angle[rake_angle < 0] += 180
lat = 90 - rake_angle
lon, lat = _rotate(lon, lat, strike)
return lon, lat
def line(plunge, bearing):
"""
Calculates the longitude and latitude of the linear feature(s) specified by
`plunge` and `bearing`.
Parameters
----------
plunge : number or sequence of numbers
The plunge of the line(s) in degrees. The plunge is measured in degrees
downward from the end of the feature specified by the bearing.
bearing : number or sequence of numbers
The bearing (azimuth) of the line(s) in degrees.
Returns
-------
lon, lat : Arrays of longitude and latitude in radians.
"""
plunge, bearing = np.atleast_1d(plunge, bearing)
# Plot the approriate point for a bearing of 0 and rotate it
lat = 90 - plunge
lon = 0
lon, lat = _rotate(lon, lat, bearing)
return lon, lat
def cone(plunge, bearing, angle, segments=100):
"""
Calculates the longitude and latitude of the small circle (i.e. a cone)
centered at the given *plunge* and *bearing* with an apical angle of
*angle*, all in degrees.
Parameters
----------
plunge : number or sequence of numbers
The plunge of the center of the cone(s) in degrees. The plunge is
measured in degrees downward from the end of the feature specified by
the bearing.
bearing : number or sequence of numbers
The bearing (azimuth) of the center of the cone(s) in degrees.
angle : number or sequence of numbers
The apical angle (i.e. radius) of the cone(s) in degrees.
segments : int, optional
The number of vertices in the small circle.
Returns
-------
lon, lat : arrays
`num_measurements` x `num_segments` arrays of longitude and latitude in
radians.
"""
plunges, bearings, angles = np.atleast_1d(plunge, bearing, angle)
lons, lats = [], []
for plunge, bearing, angle in zip(plunges, bearings, angles):
lat = (90 - angle) * np.ones(segments, dtype=float)
lon = np.linspace(-180, 180, segments)
lon, lat = _rotate(lon, lat, -plunge, axis='y')
lon, lat = _rotate(np.degrees(lon), np.degrees(lat), bearing, axis='x')
lons.append(lon)
lats.append(lat)
return np.vstack(lons), np.vstack(lats)
def plunge_bearing2pole(plunge, bearing):
"""
Converts the given `plunge` and `bearing` in degrees to a strike and dip
of the plane whose pole would be parallel to the line specified. (i.e. The
pole to the plane returned would plot at the same point as the specified
plunge and bearing.)
Parameters
----------
plunge : number or sequence of numbers
The plunge of the line(s) in degrees. The plunge is measured in degrees
downward from the end of the feature specified by the bearing.
bearing : number or sequence of numbers
The bearing (azimuth) of the line(s) in degrees.
Returns
-------
strike, dip : arrays
Arrays of strikes and dips in degrees following the right-hand-rule.
"""
plunge, bearing = np.atleast_1d(plunge, bearing)
strike = bearing + 90
dip = 90 - plunge
strike[strike >= 360] -= 360
return strike, dip
def pole2plunge_bearing(strike, dip):
"""
Converts the given *strike* and *dip* in dgrees of a plane(s) to a plunge
and bearing of its pole.
Parameters
----------
strike : number or sequence of numbers
The strike of the plane(s) in degrees, with dip direction indicated by
the azimuth (e.g. 315 vs. 135) specified following the "right hand
rule".
dip : number or sequence of numbers
The dip of the plane(s) in degrees.
Returns
-------
plunge, bearing : arrays
Arrays of plunges and bearings of the pole to the plane(s) in degrees.
"""
strike, dip = np.atleast_1d(strike, dip)
bearing = strike - 90
plunge = 90 - dip
bearing[bearing < 0] += 360
return plunge, bearing
def mean_vector(lons, lats):
"""
Returns the resultant vector from a series of longitudes and latitudes
Parameters
----------
lons : array-like
A sequence of longitudes (in radians)
lats : array-like
A sequence of latitudes (in radians)
Returns
-------
mean_vec : tuple
(lon, lat) in radians
r_value : number
The magnitude of the resultant vector (between 0 and 1) This represents
the degree of clustering in the data.
"""
xyz = sph2cart(lons, lats)
xyz = np.vstack(xyz).T
mean_vec = xyz.mean(axis=0)
r_value = np.linalg.norm(mean_vec)
mean_vec = cart2sph(*mean_vec)
return mean_vec, r_value
def fisher_stats(lons, lats, conf=95):
"""
Returns the resultant vector from a series of longitudes and latitudes. If
a confidence is set the function additionally returns the opening angle
of the confidence small circle (Fisher, 19..) and the dispersion factor
(kappa).
Parameters
----------
lons : array-like
A sequence of longitudes (in radians)
lats : array-like
A sequence of latitudes (in radians)
conf : confidence value
The confidence used for the calculation (float). Defaults to None.
Returns
-------
mean vector: tuple
The point that lies in the center of a set of vectors.
(Longitude, Latitude) in radians.
If 1 vector is passed to the function it returns two None-values. For
more than one vector the following 3 values are returned as a tuple:
r_value: float
The magnitude of the resultant vector (between 0 and 1) This represents
the degree of clustering in the data.
angle: float
The opening angle of the small circle that corresponds to confidence
of the calculated direction.
kappa: float
A measure for the amount of dispersion of a group of layers. For
one vector the factor is undefined. Approaches infinity for nearly
parallel vectors and zero for highly dispersed vectors.
"""
xyz = sph2cart(lons, lats)
xyz = np.vstack(xyz).T
mean_vec = xyz.mean(axis=0)
r_value = np.linalg.norm(mean_vec)
num = xyz.shape[0]
mean_vec = cart2sph(*mean_vec)
if num > 1:
p = (100.0 - conf) / 100.0
vector_sum = xyz.sum(axis=0)
result_vect = np.sqrt(np.sum(np.square(vector_sum)))
fract1 = (num - result_vect) / result_vect
fract3 = 1.0 / (num - 1.0)
angle = np.arccos(1 - fract1 * ((1 / p) ** fract3 - 1))
angle = np.degrees(angle)
kappa = (num - 1.0) / (num - result_vect)
return mean_vec, (r_value, angle, kappa)
else:
return None, None
def geographic2pole(lon, lat):
"""
Converts a longitude and latitude (from a stereonet) into the strike and dip
of the plane whose pole lies at the given longitude(s) and latitude(s).
Parameters
----------
lon : array-like
A sequence of longitudes (or a single longitude) in radians
lat : array-like
A sequence of latitudes (or a single latitude) in radians
Returns
-------
strike : array
A sequence of strikes in degrees
dip : array
A sequence of dips in degrees
"""
plunge, bearing = geographic2plunge_bearing(lon, lat)
strike = bearing + 90
strike[strike >= 360] -= 360
dip = 90 - plunge
return strike, dip
def geographic2plunge_bearing(lon, lat):
"""
Converts longitude and latitude in stereonet coordinates into a
plunge/bearing.
Parameters
----------
lon, lat : numbers or sequences of numbers
Longitudes and latitudes in radians as measured from a
lower-hemisphere stereonet
Returns
-------
plunge : array
The plunge of the vector in degrees downward from horizontal.
bearing : array
The bearing of the vector in degrees clockwise from north.
"""
lon, lat = np.atleast_1d(lon, lat)
x, y, z = sph2cart(lon, lat)
# Bearing will be in the y-z plane...
bearing = np.arctan2(z, y)
# Plunge is the angle between the line and the y-z plane
r = np.sqrt(x*x + y*y + z*z)
r[r == 0] = 1e-15
plunge = np.arcsin(x / r)
# Convert back to azimuths in degrees..
plunge, bearing = np.degrees(plunge), np.degrees(bearing)
bearing = 90 - bearing
bearing[bearing < 0] += 360
# If the plunge angle is upwards, get the opposite end of the line
upwards = plunge < 0
plunge[upwards] *= -1
bearing[upwards] -= 180
bearing[upwards & (bearing < 0)] += 360
return plunge, bearing
def plane_intersection(strike1, dip1, strike2, dip2):
"""
Finds the intersection of two planes. Returns a plunge/bearing of the linear
intersection of the two planes.
Also accepts sequences of strike1s, dip1s, strike2s, dip2s.
Parameters
----------
strike1, dip1 : numbers or sequences of numbers
The strike and dip (in degrees, following the right-hand-rule) of the
first plane(s).
strike2, dip2 : numbers or sequences of numbers
The strike and dip (in degrees, following the right-hand-rule) of the
second plane(s).
Returns
-------
plunge, bearing : arrays
The plunge and bearing(s) (in degrees) of the line representing the
intersection of the two planes.
"""
norm1 = sph2cart(*pole(strike1, dip1))
norm2 = sph2cart(*pole(strike2, dip2))
norm1, norm2 = np.array(norm1), np.array(norm2)
lon, lat = cart2sph(*np.cross(norm1, norm2, axis=0))
return geographic2plunge_bearing(lon, lat)
def project_onto_plane(strike, dip, plunge, bearing):
"""
Projects a linear feature(s) onto the surface of a plane. Returns a rake
angle(s) along the plane.
This is also useful for finding the rake angle of a feature that already
intersects the plane in question.
Parameters
----------
strike, dip : numbers or sequences of numbers
The strike and dip (in degrees, following the right-hand-rule) of the
plane(s).
plunge, bearing : numbers or sequences of numbers
The plunge and bearing (in degrees) or of the linear feature(s) to be
projected onto the plane.
Returns
-------
rake : array
A sequence of rake angles measured downwards from horizontal in
degrees. Zero degrees corresponds to the "right- hand" direction
indicated by the strike, while a negative angle corresponds to the
opposite direction. Rakes returned by this function will always be
between -90 and 90 (inclusive).
"""
# Project the line onto the plane
norm = sph2cart(*pole(strike, dip))
feature = sph2cart(*line(plunge, bearing))
norm, feature = np.array(norm), np.array(feature)
perp = np.cross(norm, feature, axis=0)
on_plane = np.cross(perp, norm, axis=0)
on_plane /= np.sqrt(np.sum(on_plane**2, axis=0))
# Calculate the angle between the projected feature and horizontal
# This is just a dot product, but we need to work with multiple measurements
# at once, so einsum is quicker than apply_along_axis.
strike_vec = sph2cart(*line(0, strike))
dot = np.einsum('ij,ij->j', on_plane, strike_vec)
rake = np.degrees(np.arccos(dot))
# Convert rakes over 90 to negative rakes...
rake[rake > 90] -= 180
rake[rake < -90] += 180
return rake
def azimuth2rake(strike, dip, azimuth):
"""
Projects an azimuth of a linear feature onto a plane as a rake angle.
Parameters
----------
strike, dip : numbers
The strike and dip of the plane in degrees following the
right-hand-rule.
azimuth : numbers
The azimuth of the linear feature in degrees clockwise from north (i.e.
a 0-360 azimuth).
Returns
-------
rake : number
A rake angle in degrees measured downwards from horizontal. Negative
values correspond to the opposite end of the strike.
"""
plunge, bearing = plane_intersection(strike, dip, azimuth, 90)
rake = project_onto_plane(strike, dip, plunge, bearing)
return rake
def xyz2stereonet(x, y, z):
"""
Converts x, y, z in _world_ cartesian coordinates into lower-hemisphere
stereonet coordinates.
Parameters
----------
x, y, z : array-likes
Sequences of world coordinates
Returns
-------
lon, lat : arrays
Sequences of longitudes and latitudes (in radians)
"""
x, y, z = np.atleast_1d(x, y, z)
return cart2sph(-z, x, y)
def stereonet2xyz(lon, lat):
"""
Converts a sequence of longitudes and latitudes from a lower-hemisphere
stereonet into _world_ x,y,z coordinates.
Parameters
----------
lon, lat : array-likes
Sequences of longitudes and latitudes (in radians) from a
lower-hemisphere stereonet
Returns
-------
x, y, z : arrays
The world x,y,z components of the vectors represented by the lon, lat
coordinates on the stereonet.
"""
lon, lat =
|
np.atleast_1d(lon, lat)
|
numpy.atleast_1d
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Klimaat
from __future__ import division
import datetime
import numpy as np
import matplotlib.pyplot as plt
def join_date(y=1970, m=1, d=1, hh=0, mm=0, ss=0):
"""
Join date/time components into datetime64 object
"""
y = (np.asarray(y) - 1970).astype("<M8[Y]")
m = (np.asarray(m) - 1).astype("<m8[M]")
d = (np.asarray(d) - 1).astype("<m8[D]")
hh = np.asarray(hh).astype("<m8[h]")
mm = np.asarray(mm).astype("<m8[m]")
ss = np.asarray(ss).astype("<m8[s]")
return y + m + d + hh + mm + ss
def split_date(dates):
"""
Split datetime64 dates into year, month, day components.
"""
y = dates.astype("<M8[Y]").astype(int) + 1970
m = dates.astype("<M8[M]").astype(int) % 12 + 1
d = (dates - dates.astype("<M8[M]")).astype("<m8[D]").astype(int) + 1
return y, m, d
def split_time(dates):
"""
Split datetime64 dates into hour, minute, second components.
"""
hh = (dates - dates.astype("<M8[D]")).astype("<m8[h]").astype(int)
mm = (dates - dates.astype("<M8[h]")).astype("<m8[m]").astype(int)
ss = (dates - dates.astype("<M8[m]")).astype("<m8[s]").astype(int)
return hh, mm, ss
def day_of_year(dates, snap=True):
"""
Calculate the day of the year (0-365/366)
"""
dt = np.asarray(dates) - dates.astype("<M8[Y]")
if snap:
# Provide value at noon (integer)
# Jan 1st anytime = 1
return dt.astype("<m8[D]").astype(int) + 1
else:
# Provide value including fractional part (float)
# Jan 1st at 00:00 = 0, Jan 1st at noon = 0.5
return dt.astype("<m8[s]").astype(int) / 86400
def julian_day(dates):
"""
Julian day calculator
"""
# Get Julian Day number
y, m, d = split_date(dates)
a = (14 - m) // 12
y += 4800 - a
m += 12 * a - 3
jd = d + ((153 * m + 2) // 5) + 365 * y + y // 4 - y // 100 + y // 400 - 32045
# Get fractional day (noon=0)
hh, mm, ss = split_time(dates)
fd = (hh - 12) / 24 + mm / 1440 + ss / 86400
return jd, fd
def orbit_ashrae(utc):
"""
Calculate solar parameters based on ASHRAE methodology.
Ref. ASHRAE HOF 2017, Chap 14
"""
# Day of year
n = day_of_year(utc, snap=True)
# Declination (eqn. 10, radians)
decl = np.radians(23.45 * np.sin(2 * np.pi * (n + 284) / 365))
# Equation of time (eqns 5 & 6, min)
gamma = 2 * np.pi * (n - 1) / 365
eqnOfTime = 2.2918 * (
0.0075
+ 0.1868 * np.cos(gamma)
- 3.2077 * np.sin(gamma)
- 1.4615 * np.cos(2 * gamma)
- 4.089 * np.sin(2 * gamma)
)
# Convert from minutes to radians
eqnOfTime *= np.pi / (60 * 12)
# Solar constant correction
solFactor = 1 + 0.033 * np.cos(np.radians(360 * (n - 3) / 365))
return np.sin(decl), np.cos(decl), eqnOfTime, solFactor
def orbit_energyplus(utc):
"""
Calculate solar coefficients based on EnergyPlus
Ref. WeatherManager.cc, function CalculateDailySolarCoeffs
"""
# Day of year
n = day_of_year(utc, snap=True)
# Day Angle
D = 2 * np.pi * n / 366.0
sinD = np.sin(D)
cosD = np.cos(D)
# Calculate declination sines & cosines
sinDec = (
0.00561800
+ 0.0657911 * sinD
- 0.392779 * cosD
+ 0.00064440 * (sinD * cosD * 2.0)
- 0.00618495 * (cosD ** 2 - sinD ** 2)
- 0.00010101 * (sinD * (cosD ** 2 - sinD ** 2) + cosD * (sinD * cosD * 2.0))
- 0.00007951 * (cosD * (cosD ** 2 - sinD ** 2) - sinD * (sinD * cosD * 2.0))
- 0.00011691 * (2.0 * (sinD * cosD * 2.0) * (cosD ** 2 - sinD ** 2))
+ 0.00002096 * ((cosD ** 2 - sinD ** 2) ** 2 - (sinD * cosD * 2.0) ** 2)
)
cosDec = np.sqrt(1 - sinDec ** 2)
# Equation of time (hours)
eqnOfTime = (
0.00021971
- 0.122649 * sinD
+ 0.00762856 * cosD
- 0.156308 * (sinD * cosD * 2.0)
- 0.0530028 * (cosD ** 2 - sinD ** 2)
- 0.00388702 * (sinD * (cosD ** 2 - sinD ** 2) + cosD * (sinD * cosD * 2.0))
- 0.00123978 * (cosD * (cosD ** 2 - sinD ** 2) - sinD * (sinD * cosD * 2.0))
- 0.00270502 * (2.0 * (sinD * cosD * 2.0) * (cosD ** 2 - sinD ** 2))
- 0.00167992 * ((cosD ** 2 - sinD ** 2) ** 2 - (sinD * cosD * 2.0) ** 2)
)
# Convert to radians
eqnOfTime = np.pi * eqnOfTime / 12
# Solar constant correction factor
solFactor = 1.000047 + 0.000352615 * sinD + 0.0334454 * cosD
return sinDec, cosDec, eqnOfTime, solFactor
def orbit_cfsr(utc):
"""
Calculate solar coefficients based on CFSR methodology
Ref. radiation_astronomy.f, subroutine solar
"""
# Get julian day and fractional part of day
jd, fjd = julian_day(utc)
# Julian day of epoch which is January 0, 1990 at 12 hours UTC
jdor = 2415020
# Days of years
cyear = 365.25
# Days between epoch and perihelioon passage of 1990
tpp = 1.55
# Days between perihelion passage and March equinox of 1990
svt6 = 78.035
# Julian centuries after epoch
t1 = (jd - jdor) / 36525.0
# Length of anomalistic and tropical years (minus 365 days)
ayear = 0.25964134e0 + 0.304e-5 * t1
tyear = 0.24219879e0 - 0.614e-5 * t1
# Orbit eccentricity and earth's inclination (deg)
ec = 0.01675104e0 - (0.418e-4 + 0.126e-6 * t1) * t1
angin = 23.452294e0 - (0.0130125e0 + 0.164e-5 * t1) * t1
ador = jdor
jdoe = np.asarray(ador + (svt6 * cyear) / (ayear - tyear), dtype=int)
# deleqn is updated svt6 for current date
deleqn = (jdoe - jd) * (ayear - tyear) / cyear
ayear = ayear + 365
sni = np.sin(np.radians(angin))
tini = 1 / np.tan(np.radians(angin))
er = np.sqrt((1 + ec) / (1 - ec))
# mean anomaly
qq = deleqn * 2 * np.pi / ayear
def solve_kepler(e, M, E=1, eps=1.3e-6):
"""
Solve Kepler equation for eccentric anomaly E by Newton's method
based on eccentricity e and mean anomaly M
"""
for i in range(10):
dE = -(E - e * np.sin(E) - M) / (1 - e * np.cos(E))
E += dE
dEmax = np.max(np.abs(dE))
if dEmax < eps:
break
else:
print("Warning: Exceeding 10 iterations in Kepler solver:", dEmax)
return E
# Eccentric anomaly at equinox
e1 = solve_kepler(ec, qq)
# True anomaly at equinox
eq = 2.0 * np.arctan(er * np.tan(0.5 * e1))
# Date is days since last perihelion passage
dat = jd - jdor - tpp + fjd
date = dat % ayear
# Mean anomaly
em = 2 * np.pi * date / ayear
# Eccentric anomaly
e1 = solve_kepler(ec, em)
# True anomaly
w1 = 2.0 * np.arctan(er * np.tan(0.5 * e1))
# Earth-Sun radius relative to mean radius
r1 = 1.0 - ec * np.cos(e1)
# Sine of declination angle
# NB. ecliptic longitude = w1 - eq
sdec = sni * np.sin(w1 - eq)
# Cosine of declination angle
cdec = np.sqrt(1.0 - sdec * sdec)
# Sun declination (radians)
dlt = np.arcsin(sdec)
# Sun right ascension (radians)
alp = np.arcsin(np.tan(dlt) * tini)
alp = np.where(np.cos(w1 - eq) < 0, np.pi - alp, alp)
alp = np.where(alp < 0, alp + 2 * np.pi, alp)
# Equation of time (radians)
sun = 2 * np.pi * (date - deleqn) / ayear
sun = np.where(sun < 0.0, sun + 2 * np.pi, sun)
slag = sun - alp - 0.03255
# Solar constant correction factor (inversely with radius squared)
solFactor = 1 / (r1 ** 2)
return sdec, cdec, slag, solFactor
def orbit_noaa(utc):
"""
Orbit as per NOAA Solar Calculation spreadsheet
https://www.esrl.noaa.gov/gmd/grad/solcalc/calcdetails.html
Similar to CFSR but faster
"""
# Julian day (including fractional part)
jd, fjd = julian_day(utc)
jd = jd + fjd
# Julian century
jc = (jd - 2451545) / 36525
# Geometric mean longitude (deg)
gml = (280.46646 + jc * (36000.76983 + jc * 0.0003032)) % 360
# Geometric mean anomaly Sun (deg)
gma = 357.52911 + jc * (35999.05029 - 0.0001537 * jc)
# Eccentricity of Earth's orbit
ecc = 0.016708634 - jc * (0.000042037 + 0.0000001267 * jc)
# Sun equation of centre (deg)
ctr = (
np.sin(np.radians(gma)) * (1.914602 - jc * (0.004817 + 0.000014 * jc))
+ np.sin(np.radians(2 * gma)) * (0.019993 - 0.000101 * jc)
+ np.sin(np.radians(3 * gma)) * 0.000289
)
# Sun true longitude (deg)
stl = gml + ctr
# Sun true anomaly (deg)
sta = gma + ctr
# Sun radius vector (AUs)
rad = (1.000001018 * (1 - ecc * ecc)) / (1 + ecc * np.cos(np.radians(sta)))
# Sun apparent longitude (deg)
sal = stl - 0.00569 - 0.00478 * np.sin(np.radians(125.04 - 1934.136 * jc))
# Mean obliquity ecliptic (deg)
moe = (
23
+ (26 + ((21.448 - jc * (46.815 + jc * (0.00059 - jc * 0.001813)))) / 60) / 60
)
# Obliquity correction (deg)
obl = moe + 0.00256 * np.cos(np.radians(125.04 - 1934.136 * jc))
# Sun right ascension (deg)
sra = np.degrees(
np.arctan2(
np.cos(np.radians(obl)) * np.sin(np.radians(sal)), np.cos(np.radians(sal)),
)
)
# Sun declination
sinDec = np.sin(np.radians(obl)) * np.sin(np.radians(sal))
cosDec = np.sqrt(1.0 - sinDec * sinDec)
# Var y
vary = np.tan(np.radians(obl / 2)) * np.tan(np.radians(obl / 2))
# Equation of time (minutes)
eqnOfTime = 4 * np.degrees(
vary * np.sin(2 * np.radians(gml))
- 2 * ecc * np.sin(np.radians(gma))
+ 4 * ecc * vary * np.sin(np.radians(gma)) * np.cos(2 * np.radians(gml))
- 0.5 * vary * vary * np.sin(4 * np.radians(gml))
- 1.25 * ecc * ecc * np.sin(2 * np.radians(gma))
)
# Convert from minutes to radians
eqnOfTime *= np.pi / (60 * 12)
# Solar constant correction factor (inversely with radius squared)
solFactor = 1 / (rad ** 2)
return sinDec, cosDec, eqnOfTime, solFactor
def orbit_merra2(utc):
"""
Orbit as per MERRA2 code
"""
# MERRA-2 solar repeats on a four-year leap-year cycle
yearlen = 365.25
days_per_cycle = 1461
if orbit_merra2.orbit is None:
# Constants from MAPL_Generic.F90
ecc = 0.0167
obliquity = np.radians(23.45)
perihelion = np.radians(102.0)
equinox = 80
omg = (2.0 * np.pi / yearlen) / np.sqrt(1 - ecc ** 2) ** 3
sob = np.sin(obliquity)
# TH: Orbit anomaly
# ZS: Sine of declination
# ZC: Cosine of declination
# PP: Inverse of square of earth-sun distance
# Integration starting at vernal equinox
def calc_omega(th):
return omg * (1.0 - ecc * np.cos(th - perihelion)) ** 2
orbit = np.recarray(
(days_per_cycle,),
dtype=[("th", float), ("zs", float), ("zc", float), ("pp", float)],
)
def update_orbit(th):
zs = np.sin(th) * sob
zc = np.sqrt(1.0 - zs ** 2)
pp = ((1.0 - ecc * np.cos(th - perihelion)) / (1.0 - ecc ** 2)) ** 2
orbit[kp] = th, zs, zc, pp
# Starting point
th = 0
kp = equinox
update_orbit(th)
# Runge-Kutta
for k in range(days_per_cycle - 1):
t1 = calc_omega(th)
t2 = calc_omega(th + 0.5 * t1)
t3 = calc_omega(th + 0.5 * t2)
t4 = calc_omega(th + t3)
kp = (kp + 1) % days_per_cycle
th += (t1 + 2 * (t2 + t3) + t4) / 6.0
update_orbit(th)
# Cache it
orbit_merra2.orbit = orbit
else:
orbit = orbit_merra2.orbit
# Map into orbit
year, month, day = split_date(utc)
doy = day_of_year(utc, snap=True)
iyear = (year - 1) % 4
iday = iyear * int(yearlen) + doy - 1
# Declination
sinDec = orbit["zs"][iday]
cosDec = orbit["zc"][iday]
# MERRA uses *solar* instead of *clock* time; no equation of time
eqnOfTime = np.zeros_like(sinDec)
# Inverse square of earth-sun distance ratio to mean distance
solFactor = orbit["pp"][iday]
return sinDec, cosDec, eqnOfTime, solFactor
# For caching MERRA-2 orbit
orbit_merra2.orbit = None
def orbit(utc, method=None):
if method is None:
method = "ASHRAE"
if callable(method):
func = method
method = "Custom"
else:
method = method.upper()
if method.startswith("A"):
func = orbit_ashrae
elif method.startswith("C"):
func = orbit_cfsr
elif method.startswith("E"):
func = orbit_energyplus
elif method.startswith("M"):
func = orbit_merra2
elif method.startswith("N"):
func = orbit_noaa
else:
raise NotImplementedError(method)
return func(utc)
def total_solar_irradiance_ashrae(utc):
"""
Return ASHRAE constant solar irradiance value (W/m²)
"""
return 1367.0 * (np.ones_like(utc).astype(float))
def total_solar_irradiance_cfsr(utc):
"""
Calculate CFSR total solar irradiance (W/m²) based on year and month
NB. Interpolates from yearly data
"""
#
year, month, _ = split_date(utc)
# TSI datum
TSI_datum = 1360.0
# <NAME> data (1979-2006); assumed valid in July of that year
# fmt: off
dTSI = np.array([
6.70, 6.70, 6.80, 6.60, 6.20, 6.00, 5.70, 5.70, 5.80, 6.20, 6.50,
6.50, 6.50, 6.40, 6.00, 5.80, 5.70, 5.70, 5.90, 6.40, 6.70, 6.70,
6.80, 6.70, 6.30, 6.10, 5.90, 5.70
])
# fmt: on
n = len(dTSI)
# Index into dTSI (float)
i = np.asarray(year).astype(int) - 1979 + (np.asarray(month) - 7) / 12
# Extend backward and/or forward assuming 11-year sunspot cycle
while np.any(i < 0):
i[i < 0] += 11
while
|
np.any(i > n - 1)
|
numpy.any
|
import skfuzzy as fuzzy
from numpy import arange
from skfuzzy import control
def get_fever_antecedents():
temperature = control.Antecedent(arange(30, 41, .1), 'body_temperature')
duration = control.Antecedent(arange(0, 7, 1), 'fever_duration')
temperature['normal'] = fuzzy.gaussmf(temperature.universe, 30, 4)
temperature['feverish'] = fuzzy.gaussmf(temperature.universe, 40, 3)
duration['short'] = fuzzy.trapmf(duration.universe, [0, 0, 2, 3])
duration['medium'] = fuzzy.trapmf(duration.universe, [1, 2, 3, 4])
duration['long'] = fuzzy.trapmf(duration.universe, [3, 4, 7, 7])
return temperature, duration
def get_melasma_antecedents():
when = control.Antecedent(arange(0, 7, .1), 'melasma')
when['beginning'] = fuzzy.gbellmf(when.universe, 1.5, 5, 0)
when['middle'] = fuzzy.gbellmf(when.universe, .9, 5, 3.5)
when['ending'] = fuzzy.gbellmf(when.universe, 3, 3, 7)
return when
def get_muscle_pain_antecedents():
frequency = control.Antecedent(arange(0, 10, .1), 'muscle_pain_frequency')
frequency['low'] = fuzzy.gaussmf(frequency.universe, 0, 1.5)
frequency['medium'] = fuzzy.gaussmf(frequency.universe, 5, .75)
frequency['high'] = fuzzy.gaussmf(frequency.universe, 10, 1.5)
return frequency
def get_joint_pain_antecedents():
frequency = control.Antecedent(arange(0, 10, .1), 'joint_pain_freq')
intensity = control.Antecedent(arange(0, 10, .1), 'joint_pain_intensity')
edema = control.Antecedent(arange(0, 10, .1), 'joint_edema')
edema_intensity = control.Antecedent(arange(0, 10, .1), 'joint_edema_intensity')
frequency['rare'] = fuzzy.gaussmf(frequency.universe, 0, 1.5)
frequency['common'] = fuzzy.gaussmf(frequency.universe, 5, .75)
frequency['frequent'] = fuzzy.gaussmf(frequency.universe, 10, 1.5)
intensity['mild'] = fuzzy.gaussmf(intensity.universe, 0, 1.5)
intensity['moderate'] = fuzzy.gaussmf(intensity.universe, 5, .75)
intensity['intense'] = fuzzy.gaussmf(intensity.universe, 10, 1.5)
edema['rare'] = fuzzy.gaussmf(edema.universe, 0, 1.5)
edema['common'] = fuzzy.gaussmf(edema.universe, 5, .75)
edema['frequent'] = fuzzy.gaussmf(edema.universe, 10, 1.5)
edema_intensity['mild'] = fuzzy.gaussmf(edema_intensity.universe, 0, 1.5)
edema_intensity['moderate'] = fuzzy.gaussmf(edema_intensity.universe, 5, .75)
edema_intensity['intense'] = fuzzy.gaussmf(edema_intensity.universe, 10, 1.5)
return [frequency, intensity, edema, edema_intensity]
def get_conjunctivitis_antecedents():
occurence = control.Antecedent(arange(0, 1.1, .1), 'conjunctivitis')
occurence['no'] = fuzzy.gaussmf(occurence.universe, 0, .25)
occurence['yes'] = fuzzy.gaussmf(occurence.universe, 1, .25)
return occurence
def get_headache_antecedents():
frequency = control.Antecedent(
|
arange(0, 10, .1)
|
numpy.arange
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 11:16, 26/04/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieunguyen5991 %
#-------------------------------------------------------------------------------------------------------%
from numpy import dot, array, sum, matmul, where, sqrt, sign, min, cos, pi, exp, round
from opfunu.cec.utils import BasicFunction
class Model(BasicFunction):
def __init__(self, problem_size=None, cec_type="cec2013", f_shift="shift_data", f_matrix="M_D", bound=(-100, 100),
dimensions=(2, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100)):
BasicFunction.__init__(self, cec_type)
self.problem_size = problem_size
self.dimensions = dimensions
self.check_dimensions(self.problem_size)
self.bound = bound
self.f_shift = f_shift + ".txt"
self.f_matrix = f_matrix + str(self.problem_size) + ".txt"
self.shift = self.load_matrix_data__(self.f_shift)[:, :problem_size]
self.matrix = self.load_matrix_data__(self.f_matrix)
def F1(self, solution=None, name="Sphere Function", shift=None, f_bias=-1400):
if shift is None:
shift = self.shift[0]
return sum((solution - shift)**2) + f_bias
def F2(self, solution=None, name="Rotated High Conditioned Elliptic Function", shift=None, matrix=None, f_bias=-1300):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:self.problem_size, :]
t1 = dot(matrix, solution - shift)
t2 = self.osz_func__(t1)
return self.elliptic__(t2) + f_bias
def F3(self, solution=None, name="Rotated Bent Cigar Function", shift=None, matrix=None, f_bias=-1200):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:2*self.problem_size, :]
t1 = dot(matrix[:self.problem_size, :], solution - shift)
t2 = self.asy_func__(t1, beta=0.5)
t3 = dot(matrix[self.problem_size:2 * self.problem_size, :], t2)
return self.bent_cigar__(t3) + f_bias
def F4(self, solution=None, name="Rotated Discus Function", shift=None, matrix=None, f_bias=-1100):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:self.problem_size, :]
t1 = dot(matrix, solution - shift)
t2 = self.osz_func__(t1)
return self.discus__(t2) + f_bias
def F5(self, solution=None, name="Different Powers Function", shift=None, f_bias=-1000):
if shift is None:
shift = self.shift[0]
return self.different_powers__(solution - shift) + f_bias
def F6(self, solution=None, name="Rotated Rosenbrock’s Function", shift=None, matrix=None, f_bias=-900):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:self.problem_size, :]
t1 = 2.048 * (solution - shift) / 100
t2 = dot(matrix, t1) + 1
return self.rosenbrock__(t2) + f_bias
def F7(self, solution=None, name="Rotated Schaffers F7 Function", shift=None, matrix=None, f_bias=-800):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:2*self.problem_size, :]
t2 = dot(matrix[:self.problem_size, :], solution - shift)
t3 = self.asy_func__(t2, 0.5)
t4 = self.create_diagonal_matrix__(self.problem_size, alpha=10)
t5 = matmul(t4, matrix[self.problem_size: 2 * self.problem_size, :])
t6 = dot(t5, t3)
return self.schaffers_f7__(t6) + f_bias
def F8(self, solution=None, name="Rotated Ackley’s Function", shift=None, matrix=None, f_bias=-700):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:2 * self.problem_size, :]
t2 = dot(matrix[:self.problem_size, :], solution - shift)
t3 = self.asy_func__(t2, 0.5)
t4 = self.create_diagonal_matrix__(self.problem_size, alpha=10)
t5 = matmul(t4, matrix[self.problem_size: 2 * self.problem_size, :])
t6 = dot(t5, t3)
return self.ackley__(t6) + f_bias
def F9(self, solution=None, name="Rotated Weierstrass Function", shift=None, matrix=None, f_bias=-600):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:2 * self.problem_size, :]
t1 = 0.5 * (solution - shift) / 100
t2 = dot(matrix[:self.problem_size, :], t1)
t3 = self.asy_func__(t2, 0.5)
t4 = self.create_diagonal_matrix__(self.problem_size, alpha=10)
t5 = matmul(t4, matrix[self.problem_size: 2 * self.problem_size, :])
t6 = dot(t5, t3)
return self.weierstrass__(t6) + f_bias
def F10(self, solution=None, name="Rotated Griewank’s Function", shift=None, matrix=None, f_bias=-500):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:2 * self.problem_size, :]
t1 = 600 * (solution - shift) / 100
t2 = self.create_diagonal_matrix__(self.problem_size, alpha=100)
t3 = matmul(t2, matrix[:self.problem_size, :])
t4 = dot(t3, t1)
return self.griewank__(t4) + f_bias
def F11(self, solution=None, name="Rastrigin’s Function", shift=None, f_bias=-400):
if shift is None:
shift = self.shift[0]
t2 = self.osz_func__(5.12 * (solution - shift) / 100)
t3 = self.asy_func__(t2, beta=0.2)
t4 = self.create_diagonal_matrix__(self.problem_size, alpha=10)
t5 = dot(t4, t3)
return self.rastrigin__(t5) + f_bias
def F12(self, solution=None, name="Rotated Rastrigin’s Function", shift=None, matrix=None, f_bias=-300):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:2 * self.problem_size, :]
t1 = 5.12 * (solution - shift) / 100
t2 = dot(matrix[:self.problem_size, :], t1)
t3 = self.osz_func__(t2)
t4 = self.asy_func__(t3, beta=0.2)
t5 = self.create_diagonal_matrix__(self.problem_size, alpha=10)
t6 = matmul(matrix[:self.problem_size, :], t5)
t7 = matmul(t6, matrix[self.problem_size: 2 * self.problem_size, :])
t8 = dot(t7, t4)
return self.rastrigin__(t8) + f_bias
def F13(self, solution=None, name="Non-continuous Rotated Rastrigin’s Function", shift=None, matrix=None, f_bias=-200):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:2 * self.problem_size, :]
t1 = 5.12 * (solution - shift) / 100
t2 = dot(matrix[:self.problem_size, :], t1)
t3 = where(abs(t2) > 0.5, round(2 * t2) / 2, t2)
t4 = self.osz_func__(t3)
t5 = self.asy_func__(t4, beta=0.2)
t6 = self.create_diagonal_matrix__(self.problem_size, alpha=10)
t7 = matmul(matrix[:self.problem_size, :], t6)
t8 = matmul(t7, matrix[self.problem_size: 2 * self.problem_size, :])
t9 = dot(t8, t5)
return self.rastrigin__(t9) + f_bias
def F14(self, solution=None, name="Schwefel’s Function", shift=None, f_bias=-100):
if shift is None:
shift = self.shift[0]
t1 = 1000 * (solution - shift) / 100
t2 = self.create_diagonal_matrix__(self.problem_size, alpha=10)
t3 = dot(t2, t1) + 4.209687462275036e+002
return self.modified_schwefel__(t3) + f_bias
def F15(self, solution=None, name="Rotated Schwefel’s Function", shift=None, matrix=None, f_bias=100):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:self.problem_size, :]
t1 = 1000 * (solution - shift) / 100
t2 = self.create_diagonal_matrix__(self.problem_size, alpha=10)
t3 = matmul(t2, matrix[:self.problem_size, :])
t4 = dot(t3, t1) + 4.209687462275036e+002
return self.modified_schwefel__(t4) + f_bias
def F16(self, solution=None, name="Rotated Katsuura Function", shift=None, matrix=None, f_bias=200):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:2*self.problem_size, :]
t1 = 5 * (solution - shift) / 100
t2 = dot(matrix[:self.problem_size, :], t1)
t3 = self.create_diagonal_matrix__(self.problem_size, alpha=100)
t4 = matmul(matrix[self.problem_size:2 * self.problem_size, :], t3)
t5 = dot(t4, t2)
return self.katsuura__(t5) + f_bias
def F17(self, solution=None, name="Lunacek bi-Rastrigin Function", shift=None, f_bias=300):
if shift is None:
shift = self.shift[0]
d = 1
s = 1 - 1.0 / (2 * sqrt(self.problem_size + 20) - 8.2)
miu0 = 2.5
miu1 = -sqrt((miu0 ** 2 - d) / s)
t1 = 10 * (solution - shift) / 100
t2 = 2 * sign(solution) * t1 + miu0
t3 = self.create_diagonal_matrix__(self.problem_size, alpha=100)
t4 = dot(t3, (t2 - miu0))
vp1 = sum((t2 - miu0) ** 2)
vp2 = d * self.problem_size + s * sum((t2 - miu1) ** 2) + 10 * (self.problem_size - sum(cos(2 * pi * t4)))
return min([vp1, vp2]) + f_bias
def F18(self, solution=None, name="Rotated Lunacek bi-Rastrigin Function", shift=None, matrix=None, f_bias=400):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:2*self.problem_size, :]
d = 1
s = 1 - 1.0 / (2 * sqrt(self.problem_size + 20) - 8.2)
miu0 = 2.5
miu1 = -sqrt((miu0 ** 2 - d) / s)
t1 = 10 * (solution - shift) / 100
t2 = 2 * sign(t1) * t1 + miu0
t3 = self.create_diagonal_matrix__(self.problem_size, alpha=100)
t4 = matmul(matrix[self.problem_size:2 * self.problem_size, :], t3)
t5 = dot(matrix[:self.problem_size, :], (t2 - miu0))
t6 = dot(t4, t5)
vp1 = sum((t2 - miu0) ** 2)
vp2 = d * self.problem_size + s * sum((t2 - miu1) ** 2) + 10 * (self.problem_size - sum(cos(2 * pi * t6)))
return min([vp1, vp2]) + f_bias
def F19(self, solution=None, name="Rotated Expanded Griewank’s plus Rosenbrock’s Function", shift=None, matrix=None, f_bias=500):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:self.problem_size, :]
t1 = 5 * (solution - shift) / 100
t2 =
|
dot(matrix[:self.problem_size, :], t1)
|
numpy.dot
|
import numpy as np
import pandas as pd
import torch, time, warnings, sys, argparse
import torch.nn.functional as F
from sklearn.model_selection import train_test_split, StratifiedKFold
from tabulate import tabulate
from torch_geometric.datasets import Planetoid, CitationFull, Coauthor, Amazon
import matplotlib.pyplot as plt
import itertools, pickle
if not sys.warnoptions:
warnings.simplefilter("ignore")
def set_up_model(framework):
model_name = ['NSGNN','GCN','GAT','GRAPHSAGE','MLP']
model_reference = [[1,'X'],[0,1],[0,3],[0,2],[0,7]]
reference = dict(zip(model_name,model_reference))
return reference[framework]
def load_metrics(name=''):
saved_metrics = pickle.load(open(f'RESULTS/metrics_{name}.pickle', "rb", -1))
return saved_metrics
def plot_metrics_training(metrics,title,name,index_choice=1):
if index_choice == 1:
training = metrics.training_loss1
validation = metrics.valid_loss1
else:
training = metrics.training_loss2
validation = metrics.valid_loss2
epochs = range(1,len(training)+1)
idx_min = np.argmin(validation)+1
plt.plot(epochs, training,color='green',label = 'Training')
plt.plot(epochs, validation,color='red',label = 'Validation')
plt.grid()
# plt.axvline(x = idx_min, color ='black',label = 'Early-stopping')
plt.legend(loc='best',fontsize=18)
plt.xlabel('Epochs',fontsize=18)
plt.xticks(fontsize=18)
plt.ylabel('Loss',fontsize=15)
plt.yticks(fontsize=15)
plt.tight_layout()
plt.title(title,fontsize=18)
plt.gca().set_ylim(0,2.5)
plt.tight_layout()
plt.savefig(f'{name}')
def make_masks(data_y, random_seed, mode='training-first', split='stratified'):
values = data_y.cpu()
num_nodes = values.size(0)
mask_values0 = torch.zeros(num_nodes).bool()
mask_values1 = torch.zeros(num_nodes).bool()
mask_values2 = torch.zeros(num_nodes).bool()
if split == 'stratified':
skf = StratifiedKFold(5, shuffle=True, random_state=random_seed)
idx = [torch.from_numpy(i) for _, i in skf.split(values, values)]
if mode == 'training-first' :
train = torch.cat(idx[:3], dim=0)
valid = idx[4]
test = idx[5]
elif mode == 'testing-first' :
train = idx[0]
valid = idx[1]
test = torch.cat(idx[2:], dim=0)
elif split == 'random':
all_masks = np.arange(num_nodes)
if mode == 'training-first':
train,test_V = train_test_split(all_masks,train_size=0.6,random_state=random_seed)
valid,test = train_test_split(test_V,test_size=0.5,random_state=random_seed)
elif mode == 'testing-first':
train_V,test = train_test_split(all_masks,test_size=0.6,random_state=random_seed)
train,valid = train_test_split(train_V,test_size=0.5,random_state=random_seed)
mask_values0[train] = True
mask_values1[test] = True
mask_values2[valid] = True
return mask_values0, mask_values1, mask_values2
def import_dataset(name='CORA'):
root = f'BENCHMARK/{name.upper()}/'
if name.upper() == 'CORA':
dataset = Planetoid(root=root, name='CORA')
elif name.upper() == 'CORA-F':
dataset = CitationFull(root=root, name='cora')
elif name.upper() == 'CITESEER':
dataset = Planetoid(root=root, name='citeseer')
elif name.upper() == 'PUBMED':
dataset = Planetoid(root=root, name='PubMed')
elif name.upper() == 'COAUTHOR-P':
dataset = Coauthor(root=root, name='Physics')
elif name.upper() == 'COAUTHOR-C':
dataset = Coauthor(root=root, name='CS')
elif name.upper() == 'AMAZON-C':
dataset = Amazon(root=root, name='Computers')
elif name.upper() == 'AMAZON-P':
dataset = Amazon(root=root, name='Photo')
elif name.lower() == 'all':
Planetoid(root=root, name='CORA')
Planetoid(root=root, name='citeseer')
CitationFull(root=root, name='cora')
Planetoid(root=root, name='PubMed')
Coauthor(root=root, name='Physics')
Coauthor(root=root, name='CS')
Amazon(root=root, name='Computers')
Amazon(root=root, name='Photo')
exit()
return dataset
def output_training(metrics_obj,epoch,estop_val,extra='---'):
header_1, header_2 = 'NLL-Loss | e-stop','Accuracy | e-stop'
train_1,train_2 = metrics_obj.training_loss1[epoch],metrics_obj.training_loss2[epoch]
valid_1,valid_2 = metrics_obj.valid_loss1[epoch],metrics_obj.valid_loss2[epoch]
tab_val = [['TRAINING',f'{train_1:.4f}',f'{train_2:.4f}%'],['VALIDATION',f'{valid_1:.4f}',f'{valid_2:.4f}%'],['E-STOPPING',f'{estop_val}',f'{extra}']]
output = tabulate(tab_val,headers= [f'EPOCH # {epoch}',header_1,header_2],tablefmt='fancy_grid')
print(output)
return output
def live_plot(epoch, Training_list, Validation_list, watch=False,interval=0.2,extra_array=[]):
if watch == True:
if epoch >=1:
plt.plot([epoch,epoch+1],[Training_list[epoch-1],Training_list[epoch]],'g-')
plt.plot([epoch,epoch+1],[Validation_list[epoch-1],Validation_list[epoch]],'r-')
if len(extra_array)>0:
plt.plot([epoch,epoch+1],[extra_array[epoch-1],extra_array[epoch]],'b-')
plt.pause(interval)
else: pass
def torch_accuracy(tensor_pred,tensor_real):
correct = torch.eq(tensor_pred,tensor_real).sum().float().item()
return 100*correct/len(tensor_real)
def mess_up_dataset(dataset, num_noise):
dataset = dataset.to('cpu')
actual_labels = torch.unique(dataset.y)
actual_nodes = np.arange(dataset.x.size(0)).reshape(-1,1)
real_flags = np.ones(dataset.x.size(0))
fake_flags = np.zeros(num_noise)
flags = np.hstack([real_flags,fake_flags])
np.random.seed(num_noise)
torch.manual_seed(num_noise)
print('> Number of fake data: ',num_noise)
fake_nodes = np.arange(dataset.x.size(0),dataset.x.size(0)+num_noise)
size_feat = dataset.x.size(1)
avg_connect = int(dataset.edge_index.size(1)/dataset.x.size(0))
# fake data
fake_labels = torch.tensor(np.random.choice(actual_labels,num_noise).reshape(-1))
fake_feature = torch.randn(num_noise,size_feat)
# making fake edges
real2fake = np.random.choice(fake_nodes,size=(dataset.x.size(0),avg_connect)).reshape(-1)
fake2real = np.repeat(actual_nodes,avg_connect,axis=-1).reshape(-1)
np_edge_index = dataset.edge_index.numpy()
temp_TOP = np.hstack((np_edge_index[0],fake2real))
idx_sorting =
|
np.argsort(temp_TOP)
|
numpy.argsort
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...)
on a text file or a dataset without using HuggingFace Trainer.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=masked-lm
"""
# You can also adapt this script on your own mlm task. Pointers for this are left as comments.
"""BERT Pretraining"""
import argparse
import csv
import h5py
import os
import glob
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, Dataset
from torch.utils.data.distributed import DistributedSampler
import logging
import math
import multiprocessing
import numpy as np
import os
import random
import re
import time
from collections import OrderedDict
from concurrent.futures import ProcessPoolExecutor
#from modeling import BertForPretraining, BertConfig
from schedulers import LinearWarmupPolyDecayScheduler
import utils
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, Dataset
from torch.utils.data.distributed import DistributedSampler
import argparse
import logging
import math
import os
import random
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_MAPPING,
AdamW,
AutoConfig,
AutoModelForPreTraining,
AutoTokenizer,
DataCollatorForLanguageModeling,
SchedulerType,
get_scheduler,
set_seed,
)
from schedulers import LinearWarmUpScheduler, LinearWarmupPolyDecayScheduler
#from lamb import Lamb
from intel_extension_for_pytorch.optim._lamb import Lamb
try:
import torch_ccl
except ImportError as e:
torch_ccl = False
import intel_extension_for_pytorch as ipex
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
class WorkerInitObj(object):
def __init__(self, seed):
self.seed = seed
def __call__(self, id):
np.random.seed(seed=self.seed + id)
random.seed(self.seed + id)
def get_eval_batchsize_per_worker(args):
if torch.distributed.is_initialized():
chunk_size = args.num_eval_examples // args.world_size
rank = args.local_rank
remainder = args.num_eval_examples % args.world_size
if rank<remainder:
return (chunk_size+1)
else:
return chunk_size
def create_pretraining_dataset(input_file, max_pred_length, shared_list, args, worker_init_fn):
train_data = pretraining_dataset(input_file=input_file, max_pred_length=max_pred_length)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler,
batch_size=args.train_batch_size)
return train_dataloader, input_file
def create_eval_dataset(args, worker_init_fn):
eval_data = []
for eval_file in sorted(os.listdir(args.eval_dir)):
eval_file_path = os.path.join(args.eval_dir, eval_file)
if os.path.isfile(eval_file_path) and 'part' in eval_file_path:
eval_data.extend(pretraining_dataset(eval_file_path, max_pred_length=args.max_predictions_per_seq))
if len(eval_data) > args.num_eval_examples:
eval_data = eval_data[:args.num_eval_examples]
break
if torch.distributed.is_initialized():
chunk_size = args.num_eval_examples // args.world_size
rank = args.local_rank
remainder = args.num_eval_examples % args.world_size
if rank<remainder:
eval_data = eval_data[(chunk_size+1)*rank : (chunk_size+1)*(rank+1)]
else:
eval_data = eval_data[chunk_size*rank+remainder : chunk_size*(rank+1)+remainder]
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size, num_workers=0)
return eval_dataloader
class pretraining_dataset(Dataset):
def __init__(self, input_file, max_pred_length):
self.input_file = input_file
self.max_pred_length = max_pred_length
f = h5py.File(input_file, "r")
keys = ['input_ids', 'input_mask', 'segment_ids', 'masked_lm_positions', 'masked_lm_ids',
'next_sentence_labels']
self.inputs = [np.asarray(f[key][:]) for key in keys]
f.close()
def __len__(self):
'Denotes the total number of samples'
return len(self.inputs[0])
def __getitem__(self, index):
[input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_ids, next_sentence_labels] = [
torch.from_numpy(input[index].astype(np.int64)) if indice < 5 else torch.from_numpy(
np.asarray(input[index].astype(np.int64))) for indice, input in enumerate(self.inputs)]
masked_lm_labels = torch.zeros(input_ids.shape, dtype=torch.long) - 100
index = self.max_pred_length
masked_token_count = torch.count_nonzero(masked_lm_positions)
if masked_token_count != 0:
index = masked_token_count
masked_lm_labels[masked_lm_positions[:index]] = masked_lm_ids[:index]
return [input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels]
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a Masked Language Modeling task")
## Required parameters
parser.add_argument("--input_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain .hdf5 files for the task.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints will be written.")
parser.add_argument("--eval_dir",
default=None,
type=str,
help="The eval data dir. Should contain .hdf5 files for the task.")
parser.add_argument("--eval_iter_start_samples",
default=3000000,
type=int,
help="Sample to begin performing eval.")
parser.add_argument("--eval_iter_samples",
default=-1,
type=int,
help="If set to -1, disable eval, \
else evaluate every eval_iter_samples during training")
parser.add_argument("--num_eval_examples",
default=10000,
type=int,
help="number of eval examples to run eval on")
parser.add_argument("--init_checkpoint",
default=None,
type=str,
help="The initial checkpoint to start training from.")
parser.add_argument("--init_tf_checkpoint",
default=None,
type=str,
help="The initial TF checkpoint to start training from.")
parser.add_argument(
"--train_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--config_name",
type=str,
default=None,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--max_predictions_per_seq",
default=76,
type=int,
help="The maximum total of masked tokens in input sequence")
parser.add_argument("--train_batch_size",
default=18,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=128,
type=int,
help="Total batch size for training.")
parser.add_argument("--weight_decay_rate",
default=0.01,
type=float,
help="weight decay rate for LAMB.")
parser.add_argument("--opt_lamb_beta_1",
default=0.9,
type=float,
help="LAMB beta1.")
parser.add_argument("--opt_lamb_beta_2",
default=0.999,
type=float,
help="LAMB beta2.")
parser.add_argument("--max_steps",
default=1536,
type=float,
help="Total number of training steps to perform.")
parser.add_argument("--max_samples_termination",
default=14000000,
type=float,
help="Total number of training samples to run.")
parser.add_argument("--warmup_proportion",
default=0.01,
type=float,
help="Proportion of optimizer update steps to perform linear learning rate warmup for. "
"Typically 1/8th of steps for Phase2")
parser.add_argument("--warmup_steps",
default=0,
type=float,
help="Number of optimizer update steps to perform linear learning rate warmup for. "
"Typically 1/8th of steps for Phase2")
parser.add_argument("--start_warmup_step",
default=0,
type=float,
help="Starting step for warmup. ")
parser.add_argument('--log_freq',
type=float, default=10000.0,
help='frequency of logging loss. If not positive, no logging is provided for training loss')
parser.add_argument('--checkpoint_activations',
default=False,
action='store_true',
help="Whether to use gradient checkpointing")
parser.add_argument("--resume_from_checkpoint",
default=False,
action='store_true',
help="Whether to resume training from checkpoint. If set, precedes init_checkpoint/init_tf_checkpoint")
parser.add_argument('--keep_n_most_recent_checkpoints',
type=int,
default=20,
help="Number of checkpoints to keep (rolling basis).")
parser.add_argument('--num_samples_per_checkpoint',
type=int,
default=500000,
help="Number of update steps until a model checkpoint is saved to disk.")
parser.add_argument('--min_samples_to_start_checkpoints',
type=int,
default=3000000,
help="Number of update steps until model checkpoints start saving to disk.")
parser.add_argument('--skip_checkpoint',
default=False,
action='store_true',
help="Whether to save checkpoints")
parser.add_argument('--phase2',
default=False,
action='store_true',
help="Only required for checkpoint saving format")
parser.add_argument("--do_train",
default=False,
action='store_true',
help="Whether to run training.")
parser.add_argument('--bert_config_path',
type=str,
default="/workspace/phase1",
help="Path bert_config.json is located in")
parser.add_argument('--target_mlm_accuracy',
type=float,
default=0.72,
help="Stop training after reaching this Masked-LM accuracy")
parser.add_argument('--train_mlm_accuracy_window_size',
type=int,
default=0,
help="Average accuracy over this amount of batches before performing a stopping criterion test")
parser.add_argument('--num_epochs_to_generate_seeds_for',
type=int,
default=2,
help="Number of epochs to plan seeds for. Same set across all workers.")
parser.add_argument("--use_gradient_as_bucket_view",
default=False,
action='store_true',
help="Turn ON gradient_as_bucket_view optimization in native DDP.")
parser.add_argument("--dense_seq_output",
default=False,
action='store_true',
help="Whether to run with optimizations.")
parser.add_argument("--bf16",
default=False,
action='store_true',
help="Enale BFloat16 training")
parser.add_argument("--benchmark", action="store_true", help="Whether to enable benchmark")
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.")
parser.add_argument(
"--model_type",
type=str,
default=None,
help="Model type to use if training from scratch.",
choices=MODEL_TYPES,
)
parser.add_argument(
"--preprocessing_num_workers",
type=int,
default=None,
help="The number of processes to use for the preprocessing.",
)
parser.add_argument("--local_rank",
default=0,
type=int,
help="Total batch size for training.")
parser.add_argument("--world_size",
default=1,
type=int,
help="Total batch size for training.")
parser.add_argument("--profile", action="store_true", help="Whether to enable profiling")
args = parser.parse_args()
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
#assert args.init_checkpoint is not None or args.init_tf_checkpoint is not None or found_resume_checkpoint(args), \
# "Must specify --init_checkpoint, --init_tf_checkpoint or have ckpt to resume from in --output_dir of the form *.pt"
#assert not (args.init_checkpoint is not None and args.init_tf_checkpoint is not None), \
# "Can only specify one of --init_checkpoint and --init_tf_checkpoint"
return args
def found_resume_checkpoint(args):
if args.phase2:
checkpoint_str = "phase2_ckpt*.pt"
else:
checkpoint_str = "phase1_ckpt*.pt"
return args.resume_from_checkpoint and len(glob.glob(os.path.join(args.output_dir, checkpoint_str))) > 0
def setup_training(args):
device = torch.device("cpu")
if torch_ccl and int(os.environ.get('PMI_SIZE', '0')) > 1:
os.environ['RANK'] = os.environ.get('PMI_RANK', '0')
os.environ['WORLD_SIZE'] = os.environ.get('PMI_SIZE', '1')
torch.distributed.init_process_group(backend="ccl")
device = torch.device("cpu")
args.local_rank = torch.distributed.get_rank()
args.world_size = torch.distributed.get_world_size()
print("##################Using CCL dist run", flush=True)
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if args.train_batch_size % args.gradient_accumulation_steps != 0:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, batch size {} should be divisible".format(
args.gradient_accumulation_steps, args.train_batch_size))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
if not (args.do_train or (args.eval_dir and args.eval_iter_samples <= 0)):
raise ValueError(" `do_train` or should be in offline eval mode")
if not args.resume_from_checkpoint or not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
return device, args
def prepare_model_and_optimizer(args, device):
global_step = 0
args.resume_step = 0
checkpoint = None
# download model & vocab.
if args.config_name:
config = AutoConfig.from_pretrained(args.config_name)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path)
else:
config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
config.dense_seq_output = args.dense_seq_output
if args.model_name_or_path:
model = AutoModelForPreTraining.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForPreTraining.from_config(config)
## Load from Pyt checkpoint - either given as init_checkpoint, or picked up from output_dir if found
#if args.init_checkpoint is not None or found_resume_checkpoint(args):
# # Prepare model
# #model = BertForPreTraining(config)
# model = BertForPreTrainingSegmented(config)
# # for k,v in model.state_dict().items():
# # print(f'model-k,len(v)={k}, {v.numel()}')
# #model = BertForPretraining(config)
# if args.init_checkpoint is None: # finding checkpoint in output_dir
# assert False, "code path not tested with cuda graphs"
# checkpoint_str = "phase2_ckpt_*.pt" if args.phase2 else "phase1_ckpt_*.pt"
# model_names = [f for f in glob.glob(os.path.join(args.output_dir, checkpoint_str))]
# global_step = max([int(x.split('.pt')[0].split('_')[-1].strip()) for x in model_names])
# args.resume_step = global_step #used for throughput computation
# resume_init_checkpoint = os.path.join(args.output_dir, checkpoint_str.replace("*", str(global_step)))
# print("Setting init checkpoint to %s - which is the latest in %s" %(resume_init_checkpoint, args.output_dir))
# checkpoint=torch.load(resume_init_checkpoint, map_location="cpu")
# else:
# checkpoint=torch.load(args.init_checkpoint, map_location="cpu")["model"]
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta', 'LayerNorm']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay_rate},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]
optimizer = Lamb(optimizer_grouped_parameters, lr=args.learning_rate, betas=(args.opt_lamb_beta_1, args.opt_lamb_beta_2), fused=True)
if args.warmup_steps == 0:
warmup_steps = int(args.max_steps * args.warmup_proportion)
warmup_start = 0
else:
warmup_steps = args.warmup_steps
warmup_start = args.start_warmup_step
lr_scheduler = LinearWarmupPolyDecayScheduler(optimizer, start_warmup_steps=warmup_start, warmup_steps=warmup_steps,
total_steps=args.max_steps, end_learning_rate=0.0, degree=1.0)
#if found_resume_checkpoint(args):
# assert False, "code path not tested with cuda graphs"
# optimizer.load_state_dict(checkpoint['optimizer']) #restores m,v states (only if resuming checkpoint, not for init_checkpoint and init_tf_checkpoint for now)
return model, optimizer, lr_scheduler, checkpoint, global_step
def take_optimizer_step(args, optimizer, model, overflow_buf, global_step):
global skipped_steps
optimizer.step()
global_step += 1
return global_step
def run_eval(model, eval_dataloader, device, num_eval_examples, args, first_eval=False, use_cache=False):
model.eval()
total_eval_loss, total_eval_mlm_acc = 0.0, 0.0
total_masked = 0
with torch.no_grad():
for batch in eval_dataloader:
input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels = batch
outputs = None
if args.bf16:
with torch.cpu.amp.autocast():
outputs = model(
input_ids=input_ids,
token_type_ids=segment_ids,
attention_mask=input_mask,
labels=masked_lm_labels,
next_sentence_label=next_sentence_labels)
else:
outputs = model(
input_ids=input_ids,
token_type_ids=segment_ids,
attention_mask=input_mask,
labels=masked_lm_labels,
next_sentence_label=next_sentence_labels)
mlm_acc, num_masked = calc_mlm_acc(outputs, masked_lm_labels, args.dense_seq_output)
total_eval_loss += outputs.loss.item() * num_masked
total_eval_mlm_acc += mlm_acc * num_masked
total_masked += num_masked
model.train()
total_masked = torch.tensor(total_masked, device=device, dtype=torch.int64)
total_eval_loss = torch.tensor(total_eval_loss, device=device, dtype=torch.float64)
if torch.distributed.is_initialized():
#Collect total scores from all ranks
torch.distributed.all_reduce(total_eval_mlm_acc, op=torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce(total_eval_loss, op=torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce(total_masked, op=torch.distributed.ReduceOp.SUM)
# Average by number of examples
total_eval_mlm_acc /= total_masked
total_eval_loss /= total_masked
return total_eval_loss, total_eval_mlm_acc
def global_batch_size(args):
return args.train_batch_size * args.gradient_accumulation_steps * args.world_size
def calc_mlm_acc(outputs, masked_lm_labels, dense_seq_output=False):
prediction_scores = outputs.prediction_logits
masked_lm_labels_flat = masked_lm_labels.view(-1)
mlm_labels = masked_lm_labels_flat[masked_lm_labels_flat != -100]
if not dense_seq_output:
prediction_scores_flat = prediction_scores.view(-1, prediction_scores.shape[-1])
mlm_predictions_scores = prediction_scores_flat[masked_lm_labels_flat != -100]
mlm_predictions = mlm_predictions_scores.argmax(dim=-1)
else:
mlm_predictions = prediction_scores.argmax(dim=-1)
num_masked = mlm_labels.numel()
mlm_acc = (mlm_predictions == mlm_labels).sum(dtype=torch.float) / num_masked
return mlm_acc, num_masked
def calc_accuracy(outputs, masked_lm_labels, next_sentence_label, args):
loss = outputs.loss.item()
prediction_logits = outputs.prediction_logits
seq_relationship_logits = outputs.seq_relationship_logits
mlm_acc, num_masked = calc_mlm_acc(outputs, masked_lm_labels, args.dense_seq_output)
seq_acc_t = torch.argmax(seq_relationship_logits, dim=-1).eq(next_sentence_label.view([-1])).to(torch.float)
seq_acc_true, seq_tot = seq_acc_t.sum().item(), seq_acc_t.numel()
seq_acc = seq_acc_true / seq_tot
return loss, mlm_acc, num_masked, seq_acc, seq_tot
def main():
args = parse_args()
status = 'aborted' # later set to 'success' if termination criteria met
device, args = setup_training(args)
total_batch_size = global_batch_size(args)
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
if args.local_rank == 0 or args.local_rank == -1:
print("parsed args:")
print(args)
# Prepare optimizer
model, optimizer, lr_scheduler, checkpoint, global_step = prepare_model_and_optimizer(args, device)
model.train()
model, optimizer = ipex.optimize(model, optimizer=optimizer, dtype=torch.bfloat16 if args.bf16 else torch.float32)
worker_seeds, shuffling_seeds = utils.setup_seeds(args.seed, args.num_epochs_to_generate_seeds_for, device)
worker_seed = worker_seeds[args.local_rank]
random.seed(worker_seed)
np.random.seed(worker_seed)
torch.manual_seed(worker_seed)
worker_init = WorkerInitObj(worker_seed)
samples_trained = global_step * args.train_batch_size * args.gradient_accumulation_steps * args.world_size
final_loss = float("inf")
train_time_raw = float("inf")
raw_train_start = time.time()
if args.do_train:
model.train()
most_recent_ckpts_paths = []
average_loss = 0.0 # averaged loss every args.log_freq steps
epoch = 1
training_steps = 0
end_training, converged = False, False
samples_trained_prev = 0
# pre-compute eval boundaries
samples_trained_per_step = args.train_batch_size * args.gradient_accumulation_steps * args.world_size
start, stop, step = args.eval_iter_start_samples, args.max_samples_termination, args.eval_iter_samples
eval_steps = [math.ceil(i/samples_trained_per_step) for i in
|
np.arange(start, stop, step)
|
numpy.arange
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.