prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
import matplotlib.pyplot as plt
import inspect # Used for storing the input
from .element import Element
from .equation import HeadEquation, PotentialEquation
from .besselaesnumba import besselaesnumba
besselaesnumba.initialize()
try:
from .src import besselaesnew
besselaesnew.besselaesnew.initialize()
#print('succes on f2py')
except:
pass
from .controlpoints import controlpoints, strengthinf_controlpoints
__all__ = ['LineSinkBase', 'HeadLineSinkZero', 'HeadLineSink', 'LineSinkDitch',
'HeadLineSinkString', 'LineSinkDitchString']
class LineSinkChangeTrace:
def changetrace(self, xyzt1, xyzt2, aq, layer, ltype, modellayer, direction, hstepmax, verbose=False):
changed = False
terminate = False
xyztnew = 0
if (ltype == 'a'):
if True:
# if (layer == self.layers).any(): # in layer where line-sink is screened
# not needed anymore, I thin this is all taken care of with checking Qn1 and Qn2
if verbose:
print('hello changetrace')
print('xyz1:', xyzt1[:-1])
print('xyz2:', xyzt2[:-1])
x1, y1, z1, t1 = xyzt1
x2, y2, z2, t2 = xyzt2
eps = 1e-8
za = x1 + y1 * 1j
zb = x2 + y2 * 1j
Za = (2 * za - (self.z1 + self.z2)) / (self.z2 - self.z1)
Zb = (2 * zb - (self.z1 + self.z2)) / (self.z2 - self.z1)
if Za.imag * Zb.imag < 0:
Xa, Ya = Za.real, Za.imag
Xb, Yb = Zb.real, Zb.imag
X = Xa - Ya * (Xb - Xa) / (Yb - Ya)
if verbose: print('X', X)
if abs(X) <= 1: # crosses line-sink
if verbose: print('crosses line-sink')
Znew1 = X - eps * np.sign(Yb) * 1j # steps to side of Ya
Znew2 = X + eps * np.sign(Yb) * 1j # steps to side of Yb
znew1 = 0.5 * ((self.z2 - self.z1) * Znew1 + self.z1 + self.z2)
znew2 = 0.5 * ((self.z2 - self.z1) * Znew2 + self.z1 + self.z2)
xnew1, ynew1 = znew1.real, znew1.imag
xnew2, ynew2 = znew2.real, znew2.imag
if Ya < 0:
theta = self.theta_norm_out
else:
theta = self.theta_norm_out + np.pi
Qx1, Qy1 = self.model.disvec(xnew1, ynew1)[:, layer] * direction
Qn1 = Qx1 * np.cos(theta) + Qy1 * np.sin(theta)
Qx2, Qy2 = self.model.disvec(xnew2, ynew2)[:, layer] * direction
Qn2 = Qx2 * np.cos(theta) + Qy2 * np.sin(theta)
if verbose:
print('xnew1, ynew1:', xnew1, ynew1)
print('xnew2, ynew2:', xnew2, ynew2)
print('Qn1, Qn2', Qn1, Qn2)
print('Qn2 > Qn1:', Qn2 > Qn1)
if Qn1 < 0: # trying to cross line-sink that infiltrates, stay on bottom, don't terminate
if verbose: print('change 1')
xnew = xnew1
ynew = ynew1
dold = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
dnew = np.sqrt((x1 - xnew) ** 2 + (y1 - ynew) ** 2)
znew = z1 + dnew / dold * (z2 - z1)
tnew = t1 + dnew / dold * (t2 - t1)
changed = True
xyztnew = [np.array([xnew, ynew, znew, tnew])]
elif Qn2 < 0: # all water is taken out, terminate
if verbose: print('change 2')
xnew = xnew2
ynew = ynew2
dold = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
dnew = np.sqrt((x1 - xnew) ** 2 + (y1 - ynew) ** 2)
znew = z1 + dnew / dold * (z2 - z1)
tnew = t1 + dnew / dold * (t2 - t1)
changed = True
terminate = True
xyztnew = [np.array([xnew, ynew, znew, tnew])]
elif Qn2 > Qn1: # line-sink infiltrates
if verbose: print('change 3')
xnew = xnew2
ynew = ynew2
dold = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
dnew = np.sqrt((x1 - xnew) ** 2 + (y1 - ynew) ** 2)
znew = z1 + dnew / dold * (z2 - z1) # elevation just before jump
tnew = t1 + dnew / dold * (t2 - t1)
Qbelow = (znew - aq.z[modellayer + 1]) / aq.Haq[layer] * Qn1
znew2 = aq.z[modellayer + 1] + Qbelow / Qn2 * aq.Haq[layer]
changed = True
xyztnew = [np.array([xnew, ynew, znew, tnew]), np.array([xnew, ynew, znew2, tnew])]
else: # line-sink takes part of water out
if verbose: print('change 4')
xnew = xnew2
ynew = ynew2
dold = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
dnew = np.sqrt((x1 - xnew) ** 2 + (y1 - ynew) ** 2)
znew = z1 + dnew / dold * (z2 - z1) # elevation just before jump
tnew = t1 + dnew / dold * (t2 - t1)
Qbelow = (znew - aq.z[modellayer + 1]) / aq.Haq[layer] * Qn1
if Qbelow > Qn2: # taken out
terminate = True
xyztnew = [np.array([xnew, ynew, znew, tnew])]
else:
znew2 = aq.z[modellayer + 1] + Qbelow / Qn2 * aq.Haq[layer]
xyztnew = [np.array([xnew, ynew, znew, tnew]), np.array([xnew, ynew, znew2, tnew])]
changed = True
return changed, terminate, xyztnew
class LineSinkBase(LineSinkChangeTrace, Element):
def __init__(self, model, x1=-1, y1=0, x2=1, y2=0, Qls=100.0, \
res=0, wh=1, layers=0, name='LineSinkBase', label=None, \
addtomodel=True):
Element.__init__(self, model, nparam=1, nunknowns=0, layers=layers, \
name=name, label=label)
self.nparam = len(self.layers)
self.x1 = float(x1)
self.y1 = float(y1)
self.x2 = float(x2)
self.y2 = float(y2)
self.Qls = np.atleast_1d(Qls)
self.res = float(res)
self.wh = wh
self.addtomodel = addtomodel
if self.addtomodel: self.model.add_element(self)
# self.xa,self.ya,self.xb,self.yb,self.np = np.zeros(1),np.zeros(1),np.zeros(1),np.zeros(1),np.zeros(1,'i') # needed to call bessel.circle_line_intersection
if self.model.f2py:
self.bessel = besselaesnew.besselaesnew
else:
self.bessel = besselaesnumba
def __repr__(self):
return self.name + ' from ' + str((self.x1, self.y1)) + ' to ' + str(
(self.x2, self.y2))
def initialize(self):
self.xc = np.array([0.5 * (self.x1 + self.x2)])
self.yc = np.array([0.5 * (self.y1 + self.y2)])
self.ncp = 1
self.z1 = self.x1 + 1j * self.y1
self.z2 = self.x2 + 1j * self.y2
self.L = np.abs(self.z1 - self.z2)
self.theta_norm_out = np.arctan2(self.y2 - self.y1,
self.x2 - self.x1) + np.pi / 2
self.order = 0 # This is for uniform strength only
self.aq = self.model.aq.find_aquifer_data(self.xc, self.yc)
if self.addtomodel: self.aq.add_element(self)
self.parameters = np.empty((self.nparam, 1))
self.parameters[:, 0] = self.Qls / self.L
if self.wh == 'H':
self.wh = self.aq.Haq[self.layers]
elif self.wh == '2H':
self.wh = 2.0 * self.aq.Haq[self.layers]
elif np.isscalar(self.wh):
self.wh = self.wh * np.ones(self.nlayers)
self.resfac = self.aq.T[self.layers] * self.res / self.wh
def potinf(self, x, y, aq=None):
'''Can be called with only one x,y value'''
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((self.nparam, aq.naq))
if aq == self.aq:
pot = np.zeros(aq.naq)
pot[:] = self.bessel.potbeslsho(float(x), float(y), self.z1, self.z2, aq.lab, 0,
aq.ilap, aq.naq)
rv[:] = self.aq.coef[self.layers] * pot
return rv
def disvecinf(self, x, y, aq=None):
'''Can be called with only one x,y value'''
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((2, self.nparam, aq.naq))
if aq == self.aq:
qxqy = np.zeros((2, aq.naq))
qxqy[:, :] = self.bessel.disbeslsho(float(x), float(y), self.z1, self.z2, aq.lab,
0, aq.ilap, aq.naq)
rv[0] = self.aq.coef[self.layers] * qxqy[0]
rv[1] = self.aq.coef[self.layers] * qxqy[1]
return rv
def discharge(self):
# returns the discharge in each layer
Q = np.zeros(self.aq.naq)
Q[self.layers] = self.parameters[:, 0] * self.L
return Q
def plot(self):
plt.plot([self.x1, self.x2], [self.y1, self.y2], 'k')
class HeadLineSinkZero(LineSinkBase, HeadEquation):
def __init__(self, model, x1=-1, y1=0, x2=1, y2=0, hls=1.0, \
res=0, wh=1, layers=0, label=None, addtomodel=True):
self.storeinput(inspect.currentframe())
LineSinkBase.__init__(self, model, x1, y1, x2, y2, Qls=0, \
res=res, wh=wh, layers=layers,
name='HeadLineSink', label=label, \
addtomodel=addtomodel)
self.hc = np.atleast_1d(float(hls))
self.nunknowns = self.nparam
def initialize(self):
LineSinkBase.initialize(self)
self.pc = self.hc * self.aq.T[self.layers] # Needed in solving
def setparams(self, sol):
self.parameters[:, 0] = sol
class LineSinkHoBase(LineSinkChangeTrace, Element):
def __init__(self, model, x1=-1, y1=0, x2=1, y2=0, \
Qls=0.0, layers=0, order=0, name='LineSinkHoBase', \
label=None, addtomodel=True, aq=None, zcinout=None):
Element.__init__(self, model, nparam=1, nunknowns=0, layers=layers, \
name=name, label=label)
self.x1 = float(x1)
self.y1 = float(y1)
self.x2 = float(x2)
self.y2 = float(y2)
self.Qls = np.atleast_1d(Qls)
self.order = order
self.nparam = self.nlayers * (self.order + 1)
self.addtomodel = addtomodel
if addtomodel: self.model.add_element(self)
self.aq = aq
self.zcinout = zcinout
if self.model.f2py:
self.bessel = besselaesnew.besselaesnew
else:
self.bessel = besselaesnumba
def __repr__(self):
return self.name + ' from ' + str((self.x1, self.y1)) + ' to ' + str(
(self.x2, self.y2))
def initialize(self):
self.ncp = self.order + 1
self.z1 = self.x1 + 1j * self.y1
self.z2 = self.x2 + 1j * self.y2
self.L = np.abs(self.z1 - self.z2)
self.theta_norm_out = np.arctan2(self.y2 - self.y1,
self.x2 - self.x1) + np.pi / 2.0 # changed minus to plus
self.cosnorm = np.cos(self.theta_norm_out) * np.ones(self.ncp)
self.sinnorm = np.sin(self.theta_norm_out) * np.ones(self.ncp)
self.strengthinf = strengthinf_controlpoints(self.ncp, self.nlayers) # array of ncp by nlayers * (order + 1)
#
self.xc, self.yc = controlpoints(self.ncp, self.z1, self.z2, eps=0)
if self.zcinout is not None:
self.xcin, self.ycin = controlpoints(self.ncp, self.zcinout[0],
self.zcinout[1], eps=0)
self.xcout, self.ycout = controlpoints(self.ncp, self.zcinout[2],
self.zcinout[3], eps=0)
else:
self.xcin, self.ycin = controlpoints(self.ncp, self.z1, self.z2,
eps=1e-6)
self.xcout, self.ycout = controlpoints(self.ncp, self.z1, self.z2,
eps=-1e-6)
if self.aq is None:
self.aq = self.model.aq.find_aquifer_data(self.xc[0], self.yc[0])
if self.addtomodel:
self.aq.add_element(self)
self.parameters = np.empty((self.nparam, 1))
# Not sure if that needs to be here
self.parameters[:, 0] = self.Qls / self.L
def potinf(self, x, y, aq=None):
'''Can be called with only one x,y value
Returns array(nparam, self.aq.naq) with order
order 0, layer[0]
order 0, layer[1]
...
order 1, layer[0]
order 1, layer[1]
etc
'''
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((self.nparam, aq.naq))
if aq == self.aq:
# clever way of using a reshaped rv here
potrv = rv.reshape((self.order + 1, self.nlayers, aq.naq))
pot = np.zeros((self.order + 1, aq.naq))
pot[:, :] = self.bessel.potbeslsv(float(x), float(y), self.z1, self.z2, aq.lab,
self.order, aq.ilap, aq.naq)
potrv[:] = self.aq.coef[self.layers] * pot[:, np.newaxis, :]
return rv
def disvecinf(self, x, y, aq=None):
'''Can be called with only one x,y value
Returns array(nparam, self.aq.naq) with order
order 0, layer[0]
order 0, layer[1]
...
order 1, layer[0]
order 1, layer[1]
etc
'''
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((2, self.nparam, aq.naq))
if aq == self.aq:
qxqyrv = rv.reshape((2, self.order + 1, self.nlayers, aq.naq))
qxqy = np.zeros((2 * (self.order + 1), aq.naq))
qxqy[:, :] = self.bessel.disbeslsv(float(x), float(y), self.z1, self.z2, aq.lab,
self.order, aq.ilap, aq.naq)
qxqyrv[0, :] = self.aq.coef[self.layers] * qxqy[:self.order + 1,
np.newaxis, :]
qxqyrv[1, :] = self.aq.coef[self.layers] * qxqy[self.order + 1:,
np.newaxis, :]
return rv
def plot(self):
plt.plot([self.x1, self.x2], [self.y1, self.y2], 'k')
def dischargeinf(self):
# returns the unit contribution to the discharge in each layer
# array of length nunknowns
Qdisinf = | np.zeros((self.order + 1, self.nlayers)) | numpy.zeros |
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
Sparse Regression
=================
We demonstrate how to do (fully Bayesian) sparse linear regression using the
approach described in [1]. This approach is particularly suitable for situations
with many feature dimensions (large P) but not too many datapoints (small N).
In particular we consider a quadratic regressor of the form:
.. math::
f(X) = \\text{constant} + \\sum_i \\theta_i X_i + \\sum_{i<j} \\theta_{ij} X_i X_j + \\text{observation noise}
**References:**
1. <NAME>, <NAME>, <NAME>, <NAME> (2019),
"The Kernel Interaction Trick: Fast Bayesian Discovery of Pairwise Interactions in High Dimensions",
(https://arxiv.org/abs/1905.06501)
"""
import argparse
import itertools
import os
import time
import numpy as np
import jax
from jax import vmap
import jax.numpy as jnp
import jax.random as random
from jax.scipy.linalg import cho_factor, cho_solve, solve_triangular
import numpyro
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS
def dot(X, Z):
return jnp.dot(X, Z[..., None])[..., 0]
# The kernel that corresponds to our quadratic regressor.
def kernel(X, Z, eta1, eta2, c, jitter=1.0e-4):
eta1sq = jnp.square(eta1)
eta2sq = jnp.square(eta2)
k1 = 0.5 * eta2sq * jnp.square(1.0 + dot(X, Z))
k2 = -0.5 * eta2sq * dot(jnp.square(X), jnp.square(Z))
k3 = (eta1sq - eta2sq) * dot(X, Z)
k4 = jnp.square(c) - 0.5 * eta2sq
if X.shape == Z.shape:
k4 += jitter * jnp.eye(X.shape[0])
return k1 + k2 + k3 + k4
# Most of the model code is concerned with constructing the sparsity inducing prior.
def model(X, Y, hypers):
S, P, N = hypers['expected_sparsity'], X.shape[1], X.shape[0]
sigma = numpyro.sample("sigma", dist.HalfNormal(hypers['alpha3']))
phi = sigma * (S / jnp.sqrt(N)) / (P - S)
eta1 = numpyro.sample("eta1", dist.HalfCauchy(phi))
msq = numpyro.sample("msq", dist.InverseGamma(hypers['alpha1'], hypers['beta1']))
xisq = numpyro.sample("xisq", dist.InverseGamma(hypers['alpha2'], hypers['beta2']))
eta2 = jnp.square(eta1) * jnp.sqrt(xisq) / msq
lam = numpyro.sample("lambda", dist.HalfCauchy(jnp.ones(P)))
kappa = jnp.sqrt(msq) * lam / jnp.sqrt(msq + jnp.square(eta1 * lam))
# compute kernel
kX = kappa * X
k = kernel(kX, kX, eta1, eta2, hypers['c']) + sigma ** 2 * jnp.eye(N)
assert k.shape == (N, N)
# sample Y according to the standard gaussian process formula
numpyro.sample("Y", dist.MultivariateNormal(loc=jnp.zeros(X.shape[0]), covariance_matrix=k),
obs=Y)
# Compute the mean and variance of coefficient theta_i (where i = dimension) for a
# MCMC sample of the kernel hyperparameters (eta1, xisq, ...).
# Compare to theorem 5.1 in reference [1].
def compute_singleton_mean_variance(X, Y, dimension, msq, lam, eta1, xisq, c, sigma):
P, N = X.shape[1], X.shape[0]
probe = jnp.zeros((2, P))
probe = jax.ops.index_update(probe, jax.ops.index[:, dimension], jnp.array([1.0, -1.0]))
eta2 = jnp.square(eta1) * jnp.sqrt(xisq) / msq
kappa = jnp.sqrt(msq) * lam / jnp.sqrt(msq + jnp.square(eta1 * lam))
kX = kappa * X
kprobe = kappa * probe
k_xx = kernel(kX, kX, eta1, eta2, c) + sigma ** 2 * jnp.eye(N)
k_xx_inv = jnp.linalg.inv(k_xx)
k_probeX = kernel(kprobe, kX, eta1, eta2, c)
k_prbprb = kernel(kprobe, kprobe, eta1, eta2, c)
vec = jnp.array([0.50, -0.50])
mu = jnp.matmul(k_probeX, jnp.matmul(k_xx_inv, Y))
mu = jnp.dot(mu, vec)
var = k_prbprb - jnp.matmul(k_probeX, jnp.matmul(k_xx_inv, jnp.transpose(k_probeX)))
var = jnp.matmul(var, vec)
var = jnp.dot(var, vec)
return mu, var
# Compute the mean and variance of coefficient theta_ij for a MCMC sample of the
# kernel hyperparameters (eta1, xisq, ...). Compare to theorem 5.1 in reference [1].
def compute_pairwise_mean_variance(X, Y, dim1, dim2, msq, lam, eta1, xisq, c, sigma):
P, N = X.shape[1], X.shape[0]
probe = jnp.zeros((4, P))
probe = jax.ops.index_update(probe, jax.ops.index[:, dim1], jnp.array([1.0, 1.0, -1.0, -1.0]))
probe = jax.ops.index_update(probe, jax.ops.index[:, dim2], jnp.array([1.0, -1.0, 1.0, -1.0]))
eta2 = jnp.square(eta1) * jnp.sqrt(xisq) / msq
kappa = jnp.sqrt(msq) * lam / jnp.sqrt(msq + jnp.square(eta1 * lam))
kX = kappa * X
kprobe = kappa * probe
k_xx = kernel(kX, kX, eta1, eta2, c) + sigma ** 2 * jnp.eye(N)
k_xx_inv = jnp.linalg.inv(k_xx)
k_probeX = kernel(kprobe, kX, eta1, eta2, c)
k_prbprb = kernel(kprobe, kprobe, eta1, eta2, c)
vec = jnp.array([0.25, -0.25, -0.25, 0.25])
mu = jnp.matmul(k_probeX, jnp.matmul(k_xx_inv, Y))
mu = jnp.dot(mu, vec)
var = k_prbprb - jnp.matmul(k_probeX, jnp.matmul(k_xx_inv, jnp.transpose(k_probeX)))
var = jnp.matmul(var, vec)
var = jnp.dot(var, vec)
return mu, var
# Sample coefficients theta from the posterior for a given MCMC sample.
# The first P returned values are {theta_1, theta_2, ...., theta_P}, while
# the remaining values are {theta_ij} for i,j in the list `active_dims`,
# sorted so that i < j.
def sample_theta_space(X, Y, active_dims, msq, lam, eta1, xisq, c, sigma):
P, N, M = X.shape[1], X.shape[0], len(active_dims)
# the total number of coefficients we return
num_coefficients = P + M * (M - 1) // 2
probe = jnp.zeros((2 * P + 2 * M * (M - 1), P))
vec = jnp.zeros((num_coefficients, 2 * P + 2 * M * (M - 1)))
start1 = 0
start2 = 0
for dim in range(P):
probe = jax.ops.index_update(probe, jax.ops.index[start1:start1 + 2, dim], jnp.array([1.0, -1.0]))
vec = jax.ops.index_update(vec, jax.ops.index[start2, start1:start1 + 2], jnp.array([0.5, -0.5]))
start1 += 2
start2 += 1
for dim1 in active_dims:
for dim2 in active_dims:
if dim1 >= dim2:
continue
probe = jax.ops.index_update(probe, jax.ops.index[start1:start1 + 4, dim1],
jnp.array([1.0, 1.0, -1.0, -1.0]))
probe = jax.ops.index_update(probe, jax.ops.index[start1:start1 + 4, dim2],
jnp.array([1.0, -1.0, 1.0, -1.0]))
vec = jax.ops.index_update(vec, jax.ops.index[start2, start1:start1 + 4],
jnp.array([0.25, -0.25, -0.25, 0.25]))
start1 += 4
start2 += 1
eta2 = jnp.square(eta1) * jnp.sqrt(xisq) / msq
kappa = jnp.sqrt(msq) * lam / jnp.sqrt(msq + jnp.square(eta1 * lam))
kX = kappa * X
kprobe = kappa * probe
k_xx = kernel(kX, kX, eta1, eta2, c) + sigma ** 2 * jnp.eye(N)
L = cho_factor(k_xx, lower=True)[0]
k_probeX = kernel(kprobe, kX, eta1, eta2, c)
k_prbprb = kernel(kprobe, kprobe, eta1, eta2, c)
mu = jnp.matmul(k_probeX, cho_solve((L, True), Y))
mu = jnp.sum(mu * vec, axis=-1)
Linv_k_probeX = solve_triangular(L, jnp.transpose(k_probeX), lower=True)
covar = k_prbprb - jnp.matmul(jnp.transpose(Linv_k_probeX), Linv_k_probeX)
covar = jnp.matmul(vec, jnp.matmul(covar, jnp.transpose(vec)))
# sample from N(mu, covar)
L = jnp.linalg.cholesky(covar)
sample = mu + jnp.matmul(L, np.random.randn(num_coefficients))
return sample
# Helper function for doing HMC inference
def run_inference(model, args, rng_key, X, Y, hypers):
start = time.time()
kernel = NUTS(model)
mcmc = MCMC(kernel, args.num_warmup, args.num_samples, num_chains=args.num_chains,
progress_bar=False if "NUMPYRO_SPHINXBUILD" in os.environ else True)
mcmc.run(rng_key, X, Y, hypers)
mcmc.print_summary()
print('\nMCMC elapsed time:', time.time() - start)
return mcmc.get_samples()
# Get the mean and variance of a gaussian mixture
def gaussian_mixture_stats(mus, variances):
mean_mu = jnp.mean(mus)
mean_var = jnp.mean(variances) + jnp.mean(jnp.square(mus)) - jnp.square(mean_mu)
return mean_mu, mean_var
# Create artificial regression dataset where only S out of P feature
# dimensions contain signal and where there is a single pairwise interaction
# between the first and second dimensions.
def get_data(N=20, S=2, P=10, sigma_obs=0.05):
assert S < P and P > 1 and S > 0
np.random.seed(0)
X = | np.random.randn(N, P) | numpy.random.randn |
import numpy as np
import joblib
from .base import Model
from pathlib import Path
BANDNAMES = ["B02", "B03", "B04", "B05", "B06", "B07", "B08", "B8A", "B11", "B12"]
def cosine(X, Y):
"""
Cosine distance between `X` and `Y` calculated over axis=2.
"""
nX = 1 / np.sqrt(np.sum(np.square(X), axis=2))
nY = 1 / np.sqrt(np.sum(np.square(Y), axis=2))
XX = np.einsum("ij,ijk->ijk", nX, X)
YY = np.einsum("ij,ijk->ijk", nY, Y)
return 1.0 - | np.einsum("ijk,ijk->ij", XX, YY) | numpy.einsum |
import numpy as np
from core_parallel.communicators import Communicators
from mpi4py import MPI
import scipy as sc
class LinearHelpers(Communicators):
def __init__(self):
Communicators.__init__(self)
def __next_alpha__(self, idx):
if idx + 1 < len(self.alphas) and self.time_intervals > 1:
idx += 1
return idx
def __get_v__(self, t_start):
v = np.zeros((self.rows_loc, self.cols_loc), dtype=complex)
shift = self.rank_row * self.cols_loc
# if we have spatial parallelization
if self.frac > 1:
for j in range(self.cols_loc):
for k in range(self.time_points):
v[:, j] += self.dt * self.Q[self.rank_subcol_alternating, k] * self.bpar(t_start + self.t[k] + (shift + j) * self.dt)
# case without spatial parallelization
else:
for i in range(self.Frac):
for j in range(self.cols_loc):
for k in range(self.time_points):
v[i * self.global_size_A:(i+1)*self.global_size_A, j] += self.dt * self.Q[i + self.Frac * self.rank_col, k] * self.bpar(t_start + self.t[k] + (shift + j) * self.dt)
return v
def __get_r__(self, v_loc):
r = 0
temp = 0
for j in range(self.cols_loc):
if self.rank_row == 0:
# with spatial parallelization
if self.frac is not 0:
temp = np.linalg.norm(v_loc[:, j] + self.u0_loc, np.infty)
# without spatial parallelization
else:
for i in range(self.Frac):
temp = max(temp, np.linalg.norm(v_loc[i * self.global_size_A:(i+1) * self.global_size_A, j] + self.u0_loc, np.infty))
else:
temp = np.linalg.norm(v_loc[:, j], np.infty)
r = max(r, temp)
if self.size > 1:
time_beg = MPI.Wtime()
temp = self.comm.allreduce(r, op=MPI.MAX)
self.communication_time += MPI.Wtime() - time_beg
return temp
else:
return r
# fft
def __get_fft__(self, w_loc, a):
if self.time_intervals == 1:
return w_loc, ['0']
g_loc = a ** (self.rank_row / self.time_intervals) / self.time_intervals * w_loc # scale
n = int(np.log2(self.time_intervals))
P = format(self.rank_row, 'b').zfill(n) # binary of the rank in string
R = P[::-1] # reversed binary in string, index that the proc will have after ifft
we = np.exp(-2 * np.pi * 1j / self.time_intervals)
# stages of butterfly
for k in range(n):
p = self.time_intervals // 2 ** (k + 1)
r = int(R, 2) % 2 ** (k + 1) - 2 ** k
scalar = we ** (r * p)
factor = 1
if P[k] == '1':
factor = -1
if scalar != 1: # multiply if the factor is != 1
g_loc *= scalar
# make a new string and an int from it, a proc to communicate with
comm_with = list(P)
if comm_with[k] == '1':
comm_with[k] = '0'
else:
comm_with[k] = '1'
comm_with = int(''.join(comm_with), 2)
# now communicate
time_beg = MPI.Wtime()
req = self.comm_row.isend(g_loc, dest=comm_with, tag=k)
gr = self.comm_row.recv(source=comm_with, tag=k)
req.Wait()
self.communication_time += MPI.Wtime() - time_beg
# glue the info
g_loc = gr + factor * g_loc
return g_loc, [R]
def __get_w__(self, a, v_loc, v1=None):
w_loc = v_loc.copy()
if v1 is not None:
# with spatial parallelization
if self.frac > 1:
w_loc[:, 0] = v1 + self.u0_loc - a * self.u_last_loc
# without spatial parallelization
else:
for i in range(self.Frac):
w_loc[i * self.global_size_A:(i+1) * self.global_size_A, 0] = self.u0_loc - a * self.u_last_loc
w_loc[:, 0] += v1
return w_loc
def __step1__(self, Zinv, g_loc):
h_loc = | np.empty_like(g_loc, dtype=complex) | numpy.empty_like |
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from nilearn.input_data import NiftiMasker
import nibabel
import pytest
from fmralign.tests.utils import random_niimg
from fmralign._utils import _make_parcellation, voxelwise_correlation
def test_make_parcellation():
# make_parcellation is built on Nilearn which already has several test for its Parcellation class
# here we test just the call of the API is right on a simple example
img, mask_img = random_niimg((7, 6, 8, 5))
masker = NiftiMasker(mask_img=mask_img).fit()
n_pieces = 2
# create a predefined parcellation
labels_img = nibabel.Nifti1Image(
np.hstack([np.ones((7, 3, 8)), 2 * | np.ones((7, 3, 8)) | numpy.ones |
import matplotlib.pyplot as plt
import numpy as np
from math import sqrt, pi, pow, fabs
#======== how to use ==========
# wnn = WNN()
# wnn.load_first_function()
# wnn.train()
class WNN(object):
def __init__(self, eta_wnn=0.008, epoch_max=20000, Ni_wnn=1, Nh_wnn=40, Ns=1, plot_flag=False):
self.Ni_wnn = Ni_wnn
self.eta_wnn = eta_wnn
self.Nh_wnn = Nh_wnn
self.Aini = 0.01
self.epoch_max = epoch_max
self.Ns = Ns
self.plot_flag = plot_flag
def load_first_function(self, X, Y):
self.N = X.shape[0]
xmax = fabs(np.max(X))
# self.ymax = fabs(np.max(Y))
self.X_train = X / xmax
self.d = Y #/ ymax
def sig_dev2(self, theta):
return 2 * (1 / (1 + np.exp(-theta)))**3 - 3 * (1 / (1 + np.exp(-theta)))**2 + (1 / (1 + np.exp(-theta)))
def sig_dev3(self, theta):
return -6 * (1 / (1 + np.exp(-theta)))**4 + 12 * (1 / (1 + np.exp(-theta)))**3 - 7 * (1 / (1 + np.exp(-theta)))**2 + (1 / (1 + np.exp(-theta)))
def train(self):
# Initializing the weights
self.A = np.random.rand(self.Ns, self.Nh_wnn) * self.Aini
# Initializing the centers
self.t = np.zeros((1, self.Nh_wnn))
idx = np.random.permutation(self.Nh_wnn)
for j in range(self.Nh_wnn):
self.t[0,j] = self.d[idx[j]]
# Initializing widths
self.R = abs( | np.max(self.t) | numpy.max |
__author__ = 'ikibardin'
import os
import multiprocessing
from typing import List, Dict
import numpy as np
import pandas as pd
import cv2
import skimage.io
import torch
from torch.utils.data import Dataset
import albumentations as A
from albumentations.pytorch import ToTensor
from power_fist.common_utils import SegmentationDataInterface, saver, transforms
IMAGENET_MEAN = np.array([0.485, 0.456, 0.406])
IMAGENET_STD = np.array([0.229, 0.224, 0.225])
def post_transform() -> A.Compose:
return A.Compose([
A.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
ToTensor(num_classes=1, sigmoid=False),
])
def hard_transform(crop_size: int, pad_height: int, pad_width: int) -> A.Compose:
return A.Compose(
[
A.ShiftScaleRotate(
shift_limit=0.01,
scale_limit=(-0.2, 0.2),
rotate_limit=15,
p=0.5,
border_mode=cv2.BORDER_REPLICATE,
),
A.RandomCrop(crop_size, crop_size),
A.Cutout(p=0.5),
A.HorizontalFlip(p=0.5),
A.VerticalFlip(p=0.5),
post_transform(),
]
)
def hsv_transform(crop_size: int, pad_height: int, pad_width: int) -> A.Compose:
return A.Compose(
[
A.ShiftScaleRotate(
shift_limit=0.01,
scale_limit=(-0.2, 0.2),
rotate_limit=15,
p=0.5,
border_mode=cv2.BORDER_REPLICATE,
),
A.RandomCrop(crop_size, crop_size),
A.HueSaturationValue(p=0.5),
A.Cutout(p=0.5),
A.HorizontalFlip(p=0.5),
A.VerticalFlip(p=0.5),
post_transform(),
]
)
def hsv_no_cutout_transform(crop_size: int, pad_height: int, pad_width: int) -> A.Compose:
return A.Compose(
[
A.ShiftScaleRotate(
shift_limit=0.01,
scale_limit=(-0.2, 0.2),
rotate_limit=15,
p=0.5,
border_mode=cv2.BORDER_REPLICATE,
),
A.RandomCrop(crop_size, crop_size),
A.HueSaturationValue(p=0.5),
# A.Cutout(p=0.5),
A.HorizontalFlip(p=0.5),
A.VerticalFlip(p=0.5),
post_transform(),
]
)
def hsv_no_cutout_harder_transform(crop_size: int, pad_height: int, pad_width: int) -> A.Compose:
return A.Compose(
[
A.ShiftScaleRotate(
shift_limit=0.01,
scale_limit=(-0.2, 0.2),
rotate_limit=35,
p=0.75,
border_mode=cv2.BORDER_REPLICATE,
),
A.RandomCrop(crop_size, crop_size),
A.HueSaturationValue(p=0.5),
# A.Cutout(p=0.5),
A.HorizontalFlip(p=0.5),
A.VerticalFlip(p=0.5),
post_transform(),
]
)
def light_transform(crop_size: int, pad_height: int, pad_width: int) -> A.Compose:
return A.Compose(
[
A.Cutout(p=0.5),
A.HorizontalFlip(p=0.5),
A.VerticalFlip(p=0.5),
A.PadIfNeeded(
min_height=pad_height, min_width=pad_width, always_apply=True, p=1., border_mode=cv2.BORDER_REPLICATE,
),
post_transform(),
]
)
def valid_transform(crop_size: int, pad_height: int, pad_width: int) -> A.Compose:
return A.Compose(
[
A.PadIfNeeded(
min_height=pad_height, min_width=pad_width, always_apply=True, p=1., border_mode=cv2.BORDER_REPLICATE,
),
post_transform(),
]
)
def test_transform(**kwargs) -> A.Compose:
return valid_transform(**kwargs)
def test_tta_d4(**kwargs) -> transforms._TTAFullD4Base:
return transforms._TTAFullD4Base(final_transform=valid_transform(**kwargs))
AUGMENTATIONS = {
'heavy': {
'train': hard_transform,
'valid': valid_transform,
'test': test_transform,
},
'hsv': {
'train': hsv_transform,
'valid': valid_transform,
'test': test_transform,
},
'hsv_no_cutout': {
'train': hsv_no_cutout_transform,
'valid': valid_transform,
'test': test_transform,
},
'hsv_no_cutout_harder': {
'train': hsv_no_cutout_harder_transform,
'valid': valid_transform,
'test': test_tta_d4,
},
'light': {
'train': light_transform,
'valid': valid_transform,
'test': test_transform,
},
}
MEDIAN_SPEED_IN_BINS = [5, 15, 25, 35, 45, 55, 65]
class ImageReader:
def __init__(self, mode: str, paths_config: Dict[str, Dict[str, str]], activation: str):
self._mode = mode
assert mode in ('train', 'valid', 'test'), mode
self._paths = paths_config
assert activation in ('sigmoid', 'softmax'), activation
self._activation = activation
def load_image(self, image_id: str) -> np.ndarray:
path = self._get_path(image_id, is_mask=False)
img = cv2.imread(path)
assert img is not None, path
return img
def load_mask(self, image_id: str) -> np.ndarray:
mask_path = self._get_path(image_id, is_mask=True)
mask = skimage.io.imread(mask_path)
assert mask is not None, mask_path
if self._activation == 'sigmoid':
mask = mask[:, :, -1]
# print('Mask max: ', mask.max())
assert len(mask.shape) == 2, mask.shape
return mask
elif self._activation == 'softmax':
h, w, _ = mask.shape
probability_mask = np.zeros(shape=(len(MEDIAN_SPEED_IN_BINS), h, w), dtype=np.float32)
for bin_index in range(len(MEDIAN_SPEED_IN_BINS)):
bin_mask = mask[:, :, bin_index] > 127
probability_mask[bin_index][bin_mask] = 0.6
if bin_index > 0:
probability_mask[bin_index - 1][bin_mask] = 0.2
else:
probability_mask[bin_index][bin_mask] += 0.2
if bin_index < len(MEDIAN_SPEED_IN_BINS) - 1:
probability_mask[bin_index + 1][bin_mask] = 0.2
else:
probability_mask[bin_index][bin_mask] += 0.2
probability_mask[0][probability_mask.sum(axis=0) == 0.0] = 1.0
# print(probability_mask[:, probability_mask.sum(axis=0) == 2.0])
assert np.allclose(probability_mask.sum(axis=0), 1.0), \
(probability_mask.sum(axis=0).min(), probability_mask.sum(axis=0).max())
return probability_mask
else:
raise ValueError(f'Unknown activation: {self._activation}')
def _get_path(self, image_id: str, is_mask: bool = False) -> str:
dataset_paths = self._paths['dataset']
if self._mode == 'test':
assert not is_mask
path = os.path.join(dataset_paths['test_dir'], f'{image_id}.tif')
if not os.path.exists(path):
train_path = os.path.join(dataset_paths['path'], dataset_paths['images_dir'], f'{image_id}.tif')
if os.path.exists(train_path):
path = train_path
else:
raise FileNotFoundError(
f'Image not found neither in test dir `{path}` nor in train dir `{train_path}`')
elif is_mask:
path = os.path.join(dataset_paths['path'], dataset_paths['masks_dir'], f'{image_id}.tif')
else:
path = os.path.join(dataset_paths['path'], dataset_paths['images_dir'], f'{image_id}.tif')
assert os.path.exists(path), path
return path
class SpacenetDataset(Dataset):
def __init__(self, paths_config: dict, data_params: dict, df: pd.DataFrame, transform: A.Compose, mode: str):
assert mode in ('train', 'valid', 'test'), mode
self._paths = paths_config
self._data_params = data_params
self._df = df
self._transform = transform
self._mode = mode
self.data_params = data_params
self._image_reader = ImageReader(mode=mode, paths_config=paths_config, activation=data_params['activation'])
self._activation = data_params['activation']
def get_mode(self) -> str:
return self._mode
def __len__(self) -> int:
return self._df.shape[0]
def __getitem__(self, item: int) -> dict:
image_id = self._get_image_id(item)
result = {'id': image_id}
image = self._image_reader.load_image(image_id)
if self._mode == 'test':
if self._transform is not None:
image = self._transform(image=image)['image']
result['image'] = image
return result
mask = self._image_reader.load_mask(image_id)
if self._transform is not None:
if self._activation == 'sigmoid':
assert image.shape[:2] == mask.shape, (image.shape, mask.shape)
tr = self._transform(image=image, mask=mask)
image, mask = tr['image'], tr['mask']
elif self._activation == 'softmax':
assert image.shape[:2] == mask.shape[-2:], (image.shape, mask.shape)
# print('BEFORE ALBU: ', mask.min(), mask.max(), mask.mean())
tr = self._transform(image=image, masks=[mask[c] for c in range(mask.shape[0])])
image, masks_list = tr['image'], tr['masks']
mask = torch.stack(tuple(map(torch.FloatTensor, masks_list)))
# print('AFTER ALBU: ', mask.min(), mask.max(), mask.mean())
else:
raise ValueError(f'Unknown activation: {self._activation}')
# print('>>>>>> ', mask.max())
result['image'] = image
result['target'] = mask
return result
def _get_image_id(self, item: int) -> str:
if isinstance(item, torch.Tensor):
item = item.item()
return self._df.loc[item, 'id']
SAVER_POOL = None
class SpacenetPredictionsSaver(saver.PredictionSaverInterface):
def __init__(self, destination: str, paths_config: Dict[str, Dict[str, str]], crop_height: int, crop_width: int,
activation: str):
super().__init__()
self._image_reader = ImageReader(mode='test', paths_config=paths_config, activation=activation)
self._dir = destination
assert activation in ('sigmoid', 'softmax'), activation
self._activation = activation
os.makedirs(self._dir, exist_ok=True)
self._crop = A.CenterCrop(height=crop_height, width=crop_width, always_apply=True, p=1.)
self._tta_size = 8
global SAVER_POOL
SAVER_POOL = multiprocessing.Pool(multiprocessing.cpu_count())
def add(self, image_ids: List[str], predictions: torch.Tensor):
predictions = self._prepare_predictions(predictions)
results = []
for id_, pred in zip(image_ids, predictions):
results.append(SAVER_POOL.apply_async(self._add_single, (id_, pred)))
for res in results:
res.get()
def _add_single(self, image_id: str, predictions: np.ndarray):
# print('IN')
if self._activation == 'sigmoid':
mask = np.transpose((predictions * 255.0).astype(np.uint8))
mask = self._crop(image=mask)['image']
h, w, _ = self._image_reader.load_image(image_id=image_id).shape
assert mask.shape == (h, w), mask.shape
cv2.imwrite(
os.path.join(self._dir, f'{image_id}.png'),
mask,
)
elif self._activation == 'softmax':
# mask = (predictions * 255.0).astype(np.uint8)
mask = self._crop(image=predictions)['image']
mask = np.transpose(mask, (2, 0, 1))
h, w, _ = self._image_reader.load_image(image_id=image_id).shape
assert mask.shape == (7, h, w), mask.shape
speed_mask = self._get_speed_mask(mask)
speed_mask = (speed_mask / 65.0 * 255.0).astype(np.uint8)
cv2.imwrite(
os.path.join(self._dir, f'{image_id}.png'),
speed_mask,
)
else:
raise ValueError(f'Unknown activation: {self._activation}')
def save(self):
pass
def _prepare_predictions(self, predictions: torch.Tensor) -> np.ndarray:
if self._activation == 'sigmoid':
predictions = torch.sigmoid(predictions)
_, c, h, w = predictions.size()
predictions = predictions.data.view(-1, self._tta_size, c, h, w).cpu().numpy()
predictions = predictions.transpose((0, 1, 3, 4, 2)).squeeze(-1)
masks = []
for index in range(predictions.shape[0]): # FIXME: slow ?
mask = self._average_tta_pack(predictions[index])
# mask = cv2.flip(mask, flipCode=1)
# mask = cv2.flip(mask, flipCode=0)
mask = mask.T
masks.append(mask)
return np.array(masks)
elif self._activation == 'softmax':
predictions = torch.softmax(predictions, dim=1)
_, c, h, w = predictions.size()
predictions = predictions.data.view(-1, self._tta_size, c, h, w).cpu().numpy()
predictions = predictions.transpose((0, 1, 3, 4, 2))
masks = []
for index in range(predictions.shape[0]): # FIXME: slow ?
mask = self._average_tta_pack(predictions[index])
# mask = cv2.flip(mask, flipCode=1)
# mask = cv2.flip(mask, flipCode=0)
masks.append(mask)
return np.array(masks)
else:
raise ValueError(f'Unknown activation: {self._activation}')
def _average_tta_pack(self, d4_pack: np.ndarray) -> np.ndarray:
# print('d4 pack ', d4_pack.shape)
norm_orient = [
d4_pack[0],
self._get_rotated(d4_pack[1], 270),
self._get_rotated(d4_pack[2], 180),
self._get_rotated(d4_pack[3], 90),
cv2.flip(d4_pack[4], 1),
cv2.flip(self._get_rotated(d4_pack[5], 270), 1),
cv2.flip(self._get_rotated(d4_pack[6], 180), 1),
cv2.flip(self._get_rotated(d4_pack[7], 90), 1),
]
return np.mean( | np.array(norm_orient) | numpy.array |
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from scripts import config
if not config.DEBUG:
# from woodev.apis import init_detector, inference_detector
import pycocotools.mask as maskUtils
import mmcv
from PIL import Image
import cv2
import numpy as np
# 这个函数是从 mmdet.apis 的interface 代码中复制过来的,这里修改使其返回图片,而不是保存
# TODO: merge this method with the one in BaseDetector
def new_show_result(img, result, class_names, score_thr=0.3, out_file=None):
"""Visualize the detection results on the image.
Args:
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
class_names (list[str] or tuple[str]): A list of class names.
score_thr (float): The threshold to visualize the bboxes and masks.
out_file (str, optional): If specified, the visualization result will
be written to the out file instead of shown in a window.
"""
assert isinstance(class_names, (tuple, list))
img = mmcv.imread(img)
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
# print(labels)
# draw segmentation masks
bbox_color = (0, 255, 0)
pixels_output = []
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
label = labels[i]
if label > 1:
continue
color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
bbox_int = bboxes[i].astype(np.int32)
left_top = (bbox_int[0], bbox_int[1])
right_bottom = (bbox_int[2], bbox_int[3])
cv2.rectangle(
img, left_top, right_bottom, bbox_color, thickness=1)
#print('label_names:', class_names)
label_loc = (bbox_int[0]//2 + bbox_int[2]//2 - 30, bbox_int[1]//2 + bbox_int[3]//2 - 8)
label_pixels = (bbox_int[0]//2 + bbox_int[2]//2 - 20, bbox_int[1]//2 + bbox_int[3]//2 + 8)
label_text = class_names[
label] if class_names is not None else 'cls {} {}'.format(label, i)
label_text = label_text + ' ' + str(i + 1)
cv2.putText(img, label_text, label_loc,
cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 0, 0), 1)
pixels = len(mask[mask == True])
pixels_text = label_text + ':{}'.format(pixels)
#cv2.putText(img, pixels_txt, label_pixels,
#cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 0, 0))
#label_text = 'num|{:d}'.format(len(inds))
#cv2.putText(img, label_text, (20, 20),
# cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 0, 0))
pixels_output.append(pixels_text)
img = Image.fromarray(np.uint8(img))
if out_file==None:
#img.show()
return img, inds, pixels_output
else:
img.save(out_file)
cv2.waitKey(0)
dir(img)
#通过文件路径得到原始深度图的数据
#这里文件的深度名字格式固定可以从原始图的名字中解析出来
def get_depth_data(file_path):
f = open(file_path,'rb')
raw_data = f.read()
f.close()
depth_data = []
for i in range(int(len(raw_data)/2)):
depth_data.append(int.from_bytes(raw_data[i*2:i*2+2],byteorder='little'))
depth_data = np.array(depth_data)
depth_data = np.resize(depth_data, (360,640))
depth_data = depth_data.transpose([1,0])
return depth_data
# 得到每个对应的椭圆参数
# input:一个分割掩码的二值图
# return:list [椭圆对象,短轴线段,长轴线段]
def get_ellipse(mask):
pass
mask = np.array(mask,dtype=np.uint8)
#找到边界点来拟合
contous,_ = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
cnts = contous[0]
#根据边界点来拟合椭圆
ellipse_res = cv2.fitEllipse(cnts)
#得到椭圆的长轴和短轴
center_p = ellipse_res[0]
center_x = center_p[0]
center_y = center_p[1]
axisa = ellipse_res[1][0]
axisb = ellipse_res[1][1]
r_angel = ellipse_res[2]
theta = r_angel * 3.14/180
a_p1_x = int(center_x + axisa*np.cos(theta)/2)
a_p1_y = int(center_y + axisa*np.sin(theta)/2)
a_p2_x = int(center_x - axisa*np.cos(theta)/2)
a_p2_y = int(center_y - axisa*np.sin(theta)/2)
b_p1_x = int(center_x + axisb*np.cos(theta+3.14/2)/2)
b_p1_y = int(center_y + axisb*np.sin(theta+3.14/2)/2)
b_p2_x = int(center_x - axisb*np.cos(theta+3.14/2)/2)
b_p2_y = int(center_y - axisb*np.sin(theta+3.14/2)/2)
line_a = [a_p1_x,a_p1_y,a_p2_x,a_p2_y]
line_b = [b_p1_x,b_p1_y,b_p2_x,b_p2_y]
return [ellipse_res,line_a,line_b]
#从检测的结果中保存为csv_格式
#input:detect_result 检测结果的列表
# :save_img_path 保存的原始图片的路径,其他文件名称路径从这个文件中解析
#列表的检测结果为:
#[org_img det_img mask_final box_result calcul_result ellipse_result,mask_index_range]
#原图 检测图 掩码图 检测框 计算长短轴 椭圆拟合结果 mask_index 索引的范围
#return:保存csv格式 以及对应的原图,检测图片,掩码图
def save_result(detect_result,save_img_path):
org_img,det_img,mask_final_org,mask_final,box_result,calcul_result,ellipse_result,mask_index_range = detect_result
det_out_file = save_img_path[:-4] + '_det.png'
mask_out_org_file = save_img_path[:-4] + '_orgmask.png'
mask_out_file = save_img_path[:-4] + '_mask.png'
csv_out_file = save_img_path[:-4]+'.csv'
cv2.imwrite(save_img_path, org_img)
cv2.imwrite(det_out_file, det_img)
cv2.imwrite(mask_out_file, mask_final)
cv2.imwrite(mask_out_org_file, mask_final_org)
f = open(csv_out_file,'w+')
for i in range(len(box_result)):
id = mask_index_range[0]+i;
current_box = box_result[i];
ellipse_res, line_a, line_b = ellipse_result[i]
current_cal = calcul_result[i]
# print(current_cal)
#log info
log_info = ("{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n").format(id,
current_box[0],current_box[1],current_box[2],current_box[3],
current_cal[0],current_cal[1],
line_a[0], line_a[1],line_a[2], line_a[3],
line_b[0], line_b[1], line_b[2], line_b[3],
)
# print(log_info)
f.write(log_info)
f.close()
def new_show_result_2(img, img_name, result, class_names, score_thr=0.3, mask_index = 5):
"""Visualize the detection results on the image.
Args:
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
class_names (list[str] or tuple[str]): A list of class names.
score_thr (float): The threshold to visualize the bboxes and masks.
out_file (str, optional): If specified, the visualization result will
be written to the out file instead of shown in a window.
"""
assert isinstance(class_names, (tuple, list))
img = mmcv.imread(img)
org_img = img.copy()
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
# print(labels)
# draw segmentation masks
bbox_color = (255, 128, 128)
#读取深度图
# 得到深度图的路径
# depth_data_path = img[:-4] + '.raw'
# depth_data = get_depth_data('./demo/5_Depth.raw')
# current_depth_data = depth_data[310:330,170:190]
#current_mean_dis = np.mean(current_depth_data)
depth_data_path = img_name.split('/')[-1][:-4] + '.tif'
# depth_data = None
# if depth_data_path != 'none' and os.path.exists(depth_data_path):
# depth_data = cv2.imread(depth_data_path, -1)
current_mean_dis = 2000
if depth_data_path != 'none' and os.path.exists(depth_data_path):
depth_data = cv2.imread(depth_data_path, -1)
w, h = depth_data.shape
current_depth_data = depth_data[w // 2 - 10:w // 2 + 10, h // 2 - 10:h // 2 + 10]
current_mean_dis = np.mean(current_depth_data)
#包含检测框和椭圆的结果
detect_result = []
box_result = []
mask_result = []
ellipse_result = []
calcul_result = []
num_result = []
up_left_point_list = []
mask_index_range = []
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
ellipse_res,line_a,line_b = get_ellipse(mask)
line_a_len = np.sqrt(np.square(line_a[2] - line_a[0]) + np.square(line_a[3] - line_a[1]))
line_b_len = np.sqrt(np.square(line_b[2] - line_b[0]) + np.square(line_b[3] - line_b[1]))
#
# print('pic length is :%.2f,current dis is %.2f' %(line_a_len, current_mean_dis))
# print('cal real length is :%.2f' %(cal_result))
#
# print('pic length is :%.2f,current dis is %.2f' % (line_b_len, current_mean_dis))
# print('cal real length is :%.2f' % (cal_result))
# cv2.ellipse(img,ellipse_res,(255,0,0),2)
# cv2.line(img, (line_a[0],line_a[1]),(line_a[2],line_a[3]), (255, 0, 0), 2)
# cv2.line(img, (line_b[0],line_b[1]),(line_b[2],line_b[3]), (255, 0, 0), 2)
# img[mask] = img[mask] * 0.5 + color_mask * 0.5
bbox_int = bboxes[i].astype(np.int32)
left_top = (bbox_int[0]+10, bbox_int[1]+10)
right_bottom = (bbox_int[2]-10, bbox_int[3]-10)
# cv2.rectangle(img, left_top, right_bottom, bbox_color, thickness=1)
# current_mean_dis = 2000
# if depth_data_path != 'none' and os.path.exists(depth_data_path):
# w, h = depth_data.shape
# current_depth_data = depth_data[bbox_int[0]:bbox_int[2], bbox_int[1]:bbox_int[3]]
# current_mean_dis = np.mean(current_depth_data)
cal_result_a = line_a_len * current_mean_dis / (75 * 5)
cal_result_b = line_b_len * current_mean_dis / (75 * 5)
box_result.append([bbox_int[0],bbox_int[1],bbox_int[2],bbox_int[3]])
up_left_point_list.append([bbox_int[1],bbox_int[0]])
mask_result.append(mask)
calcul_result.append([cal_result_b,cal_result_a])
ellipse_result.append([ellipse_res,line_a,line_b])
label = labels[i]
# label_text = class_names[
# label] if class_names is not None else 'cls {}'.format(label)
#label_text = ''
# print(label_text)
#cv2.putText(img, label_text, (int((bbox_int[0]+bbox_int[2])/2-5), int((bbox_int[1]+bbox_int[3])/2-1)),
#cv2.FONT_HERSHEY_COMPLEX, 0.25, (128, 0, 0))
# label_text = 'num|{:d}'.format(len(inds))
# cv2.putText(img, label_text, (20, 20),
# cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 0, 0))
num_result.append(len(inds))
#对坐标卡框进行排序先上后下,先左后右
sort_index = sorted(range(len(up_left_point_list)),key=lambda x:(up_left_point_list[x][0],up_left_point_list[x][1]))
#print(sort_index)
# print(type(sort_index))
#sort_index = np.array(sort_index,dtype=np.int)
# sort_index=up_left_point_list.argsort(key=lambda x:(x[0],x[1]))
box_result = [box_result[i] for i in sort_index]
mask_result = [mask_result[i] for i in sort_index]
calcul_result = [calcul_result[i] for i in sort_index]
ellipse_result = [ellipse_result[i] for i in sort_index]
mask_final = np.zeros((img.shape[0], img.shape[1],3))
mask_final_org = np.zeros((img.shape[0], img.shape[1], 3))
mask_index_val = int(mask_index)
mask_index_range.append(mask_index_val)
for i in range(len(box_result)):
bbox_int = box_result[i]
left_top = (bbox_int[0], bbox_int[1])
right_bottom = (bbox_int[2], bbox_int[3])
cv2.rectangle(img, left_top, right_bottom, bbox_color, thickness=1)
mask = mask_result[i]
color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
result_n = []
if(mask_index==5):
result_n.append(mask_index_val)
result_n.append(0)
result_n.append(0)
else:
result_n.append(0)
result_n.append(mask_index_val)
result_n.append(0)
# tmp = int(mask_index_val)
# while(tmp!=0):
# n = tmp % 25
# result_n.append(n)
# tmp = int(tmp/25)
# for i in range(len(result_n),3):
# result_n.append(0)
# print(result_n)
current_code_mask = np.stack([mask*result_n[j] for j in range(3)],axis=2)
# print(current_code_mask.shape)
mask_final_org = mask_final_org + current_code_mask
if(bbox_int[2]-bbox_int[0]>50):
iter_num = 6
else:
iter_num = 4
kernel = np.ones(5, dtype=np.uint8)
mask_er = cv2.erode(np.array(mask,dtype=np.uint8), kernel, iterations=iter_num)
current_code_mask = np.stack([mask_er * result_n[j] for j in range(3)], axis=2)
mask_final = mask_final + current_code_mask
mask_index_val = mask_index_val + 2
ellipse_res, line_a, line_b = ellipse_result[i]
cv2.ellipse(img, ellipse_res, (255, 0, 0), 2)
cv2.line(img, (line_a[0], line_a[1]), (line_a[2], line_a[3]), (255, 0, 0), 2)
cv2.line(img, (line_b[0], line_b[1]), (line_b[2], line_b[3]), (255, 0, 0), 2)
#cv2.imshow('img', img)
#cv2.waitKey()
return img, inds, calcul_result
def new_show_result_21(img, result, class_names, score_thr=0.3, mask_index = 5):
"""Visualize the detection results on the image.
Args:
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
class_names (list[str] or tuple[str]): A list of class names.
score_thr (float): The threshold to visualize the bboxes and masks.
out_file (str, optional): If specified, the visualization result will
be written to the out file instead of shown in a window.
"""
assert isinstance(class_names, (tuple, list))
img = mmcv.imread(img)
org_img = img.copy()
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
# print(labels)
# draw segmentation masks
bbox_color = (255, 128, 128)
#读取深度图
# 得到深度图的路径
# depth_data_path = img[:-4] + '.raw'
# depth_data = get_depth_data('./demo/5_Depth.raw')
# current_depth_data = depth_data[310:330,170:190]
#current_mean_dis = np.mean(current_depth_data)
# depth_data_path = img.split('/')[-1][:-4] + '.tif'
current_mean_dis = 2500
# if depth_data_path != 'none' and os.path.exists(depth_data_path):
# depth_data = cv2.imread(depth_data_path, -1)
# w, h = depth_data.shape
# current_depth_data = depth_data[w // 2 - 10:w // 2 + 10, h // 2 - 10:h // 2 + 10]
# current_mean_dis = np.mean(current_depth_data)
#包含检测框和椭圆的结果
detect_result = []
box_result = []
mask_result = []
ellipse_result = []
calcul_result = []
num_result = []
up_left_point_list = []
mask_index_range = []
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
ellipse_res,line_a,line_b = get_ellipse(mask)
line_a_len = np.sqrt(np.square(line_a[2] - line_a[0]) + np.square(line_a[3] - line_a[1]))
line_b_len = np.sqrt(np.square(line_b[2] - line_b[0]) + np.square(line_b[3] - line_b[1]))
cal_result_a = line_a_len * current_mean_dis / (75 * 5)
cal_result_b = line_b_len * current_mean_dis / (75 * 5)
#
# print('pic length is :%.2f,current dis is %.2f' %(line_a_len, current_mean_dis))
# print('cal real length is :%.2f' %(cal_result))
#
# print('pic length is :%.2f,current dis is %.2f' % (line_b_len, current_mean_dis))
# print('cal real length is :%.2f' % (cal_result))
# cv2.ellipse(img,ellipse_res,(255,0,0),2)
# cv2.line(img, (line_a[0],line_a[1]),(line_a[2],line_a[3]), (255, 0, 0), 2)
# cv2.line(img, (line_b[0],line_b[1]),(line_b[2],line_b[3]), (255, 0, 0), 2)
# img[mask] = img[mask] * 0.5 + color_mask * 0.5
bbox_int = bboxes[i].astype(np.int32)
left_top = (bbox_int[0]+10, bbox_int[1]+10)
right_bottom = (bbox_int[2]-10, bbox_int[3]-10)
# cv2.rectangle(img, left_top, right_bottom, bbox_color, thickness=1)
box_result.append([bbox_int[0],bbox_int[1],bbox_int[2],bbox_int[3]])
up_left_point_list.append([bbox_int[1],bbox_int[0]])
mask_result.append(mask)
calcul_result.append([cal_result_b,cal_result_a])
ellipse_result.append([ellipse_res,line_a,line_b])
label = labels[i]
# label_text = class_names[
# label] if class_names is not None else 'cls {}'.format(label)
#label_text = ''
# print(label_text)
#cv2.putText(img, label_text, (int((bbox_int[0]+bbox_int[2])/2-5), int((bbox_int[1]+bbox_int[3])/2-1)),
#cv2.FONT_HERSHEY_COMPLEX, 0.25, (128, 0, 0))
# label_text = 'num|{:d}'.format(len(inds))
# cv2.putText(img, label_text, (20, 20),
# cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 0, 0))
num_result.append(len(inds))
#对坐标卡框进行排序先上后下,先左后右
sort_index = sorted(range(len(up_left_point_list)),key=lambda x:(up_left_point_list[x][0],up_left_point_list[x][1]))
#print(sort_index)
# print(type(sort_index))
#sort_index = np.array(sort_index,dtype=np.int)
# sort_index=up_left_point_list.argsort(key=lambda x:(x[0],x[1]))
box_result = [box_result[i] for i in sort_index]
mask_result = [mask_result[i] for i in sort_index]
calcul_result = [calcul_result[i] for i in sort_index]
ellipse_result = [ellipse_result[i] for i in sort_index]
mask_final = np.zeros((img.shape[0], img.shape[1],3))
mask_final_org = np.zeros((img.shape[0], img.shape[1], 3))
mask_index_val = int(mask_index)
mask_index_range.append(mask_index_val)
for i in range(len(box_result)):
bbox_int = box_result[i]
left_top = (bbox_int[0], bbox_int[1])
right_bottom = (bbox_int[2], bbox_int[3])
cv2.rectangle(img, left_top, right_bottom, bbox_color, thickness=1)
mask = mask_result[i]
color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
result_n = []
if(mask_index==5):
result_n.append(mask_index_val)
result_n.append(0)
result_n.append(0)
else:
result_n.append(0)
result_n.append(mask_index_val)
result_n.append(0)
# tmp = int(mask_index_val)
# while(tmp!=0):
# n = tmp % 25
# result_n.append(n)
# tmp = int(tmp/25)
# for i in range(len(result_n),3):
# result_n.append(0)
# print(result_n)
current_code_mask = np.stack([mask*result_n[j] for j in range(3)],axis=2)
# print(current_code_mask.shape)
mask_final_org = mask_final_org + current_code_mask
if(bbox_int[2]-bbox_int[0]>50):
iter_num = 6
else:
iter_num = 4
kernel = np.ones(5, dtype=np.uint8)
mask_er = cv2.erode(np.array(mask,dtype=np.uint8), kernel, iterations=iter_num)
current_code_mask = np.stack([mask_er * result_n[j] for j in range(3)], axis=2)
mask_final = mask_final + current_code_mask
mask_index_val = mask_index_val + 2
ellipse_res, line_a, line_b = ellipse_result[i]
cv2.ellipse(img, ellipse_res, (255, 0, 0), 2)
cv2.line(img, (line_a[0], line_a[1]), (line_a[2], line_a[3]), (255, 0, 0), 2)
cv2.line(img, (line_b[0], line_b[1]), (line_b[2], line_b[3]), (255, 0, 0), 2)
#cv2.imshow('img', img)
#cv2.waitKey()
return img, inds, calcul_result
# TODO: merge this method with the one in BaseDetector
def show_result(img, result, class_names, score_thr=0.3, out_file=None,mask_index=1):
"""Visualize the detection results on the image.
Args:
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
class_names (list[str] or tuple[str]): A list of class names.
score_thr (float): The threshold to visualize the bboxes and masks.
out_file (str, optional): If specified, the visualization result will
be written to the out file instead of shown in a window.
"""
assert isinstance(class_names, (tuple, list))
img = mmcv.imread(img)
org_img = img.copy()
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
# print(labels)
# draw segmentation masks
bbox_color = (255, 128, 128)
#读取深度图
# 得到深度图的路径
# depth_data_path = img[:-4] + '.raw'
# depth_data = get_depth_data('./demo/5_Depth.raw')
# current_depth_data = depth_data[310:330,170:190]
#current_mean_dis = np.mean(current_depth_data)
current_mean_dis = 0
#包含检测框和椭圆的结果
detect_result = []
box_result = []
mask_result = []
ellipse_result = []
calcul_result = []
num_result = []
up_left_point_list = []
mask_index_range = []
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
ellipse_res,line_a,line_b = get_ellipse(mask)
line_a_len = np.sqrt(np.square(line_a[2] - line_a[0]) + | np.square(line_a[3] - line_a[1]) | numpy.square |
#coding=utf-8
#!/usr/bin/env python
import numpy as np
from numpy.random import choice
import os
import pandas as pd
import sys
import nltk
import tensorflow as tf
HOME = '.'
sys.path.insert(0, HOME)
from alo import dataset
from alo import word_embed
from alo import nn
from alo import utils
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 64, 'batch size')
tf.app.flags.DEFINE_string('corpus', 'rt.txt', 'input corpus')
tf.app.flags.DEFINE_integer('num_filter', 100, 'number of feature maps')
tf.app.flags.DEFINE_float('keep_prob', 0.5, 'dropout keep rate')
tf.app.flags.DEFINE_float('lr', 0.1, 'learning rate')
tf.app.flags.DEFINE_integer('max_len', 124, 'max sentence length')
tf.app.flags.DEFINE_integer('epoches', 32, 'number of epoches')
tf.app.flags.DEFINE_string('embed_fn', 'text8-vector.bin', 'embeddings file')
tf.app.flags.DEFINE_integer('embed_dim', 200, 'word embedding dimentionality')
tf.app.flags.DEFINE_integer('n_tasks', 3, 'number of tasks')
tf.app.flags.DEFINE_integer('task_idx', 3, 'index of the task, 1 for toxicity, 2 for aggression 3 for attack')
tf.app.flags.DEFINE_string('output', 'Output_sep_task_all_b64_x20.txt', 'output_file')
tf.app.flags.DEFINE_integer('xfold', 10, 'number of folds')
tf.app.flags.DEFINE_string('save_to', 'saved_models', 'folder for saved models')
def main(_):
#wiki = dataset.WikiTalk()
#data = wiki.data2matrix()
data = np.load(os.path.join(HOME, 'data.npy'))
output_file=open(FLAGS.output,"w")
if FLAGS.n_tasks == 1:
X_data, Y_data = data[:, :-3], data[:, -FLAGS.task_idx]
else:
X_data, Y_data = data[:, :-3], data[:, -3:]
print(X_data.shape)
print(Y_data.shape)
embed_path = os.path.join(HOME, 'resource', FLAGS.embed_fn)
embeddings = word_embed.Word2Vec(embed_path)
word_embeddings = np.array(list(embeddings.word2embed.values()))
dir_saved_models = os.path.join(HOME, FLAGS.save_to)
if not os.path.exists(dir_saved_models):
os.makedirs(dir_saved_models)
fold_size = X_data.shape[0] // FLAGS.xfold
with tf.Graph().as_default(), tf.Session() as sess:
for i in range(FLAGS.xfold):
print('{}\nValidating fold {}, validate_idx = [{}:{}]\n{}'.format(
'-'*79, i+1, i*fold_size, (i+1)*fold_size, '-'*79), file=output_file, flush=True)
X_train = np.vstack((X_data[:i*fold_size], X_data[(i+1)*fold_size:]))
if FLAGS.n_tasks == 1:
Y_train = np.hstack((Y_data[:i*fold_size], Y_data[(i+1)*fold_size:]))
else:
Y_train = np.vstack((Y_data[:i*fold_size], Y_data[(i+1)*fold_size:]))
X_test = X_data[i*fold_size:(i+1)*fold_size]
Y_test = Y_data[i*fold_size:(i+1)*fold_size]
if FLAGS.n_tasks == 1:
tccnn = nn.STCNN(
filter_heights=[3, 4, 5],
word_embeddings=word_embeddings,
sent_len=FLAGS.max_len,
batch_size=FLAGS.batch_size,
num_filter=FLAGS.num_filter,
keep_prob=FLAGS.keep_prob,
lr=FLAGS.lr,
embed_dim=FLAGS.embed_dim)
best_accuracy = np.array([-1.])
else:
tccnn = nn.MTCNN(
filter_heights=[3, 4, 5],
word_embeddings=word_embeddings,
sent_len=FLAGS.max_len,
batch_size=FLAGS.batch_size,
num_filter=FLAGS.num_filter,
keep_prob=FLAGS.keep_prob,
lr=FLAGS.lr,
embed_dim=FLAGS.embed_dim,
n_tasks=FLAGS.n_tasks)
best_accuracy = np.array([-1., -1., -1.])
saver = tf.train.Saver()
fold_idx = i
for epoch in range(FLAGS.epoches):
train_loss, train_acc = .0, np.array([0.] * FLAGS.n_tasks)
num_batches = X_train.shape[0] // FLAGS.batch_size
shuffled_batches = choice(range(num_batches), num_batches, replace=False)
for _n, idx in enumerate(shuffled_batches):
X_train_batch = X_train[idx*FLAGS.batch_size:(idx+1)*FLAGS.batch_size]
Y_train_batch = Y_train[idx*FLAGS.batch_size:(idx+1)*FLAGS.batch_size]
feed_dict = {tccnn.X: X_train_batch, tccnn.Y: Y_train_batch}
sess.run(tccnn.step, feed_dict=feed_dict)
train_loss += sess.run(tccnn.loss, feed_dict=feed_dict)
_train_acc = sess.run(tccnn.accuracy, feed_dict=feed_dict)
if FLAGS.n_tasks == 1:
_train_acc = np.array([_train_acc])
for i in range(FLAGS.n_tasks):
train_acc[i] += _train_acc[i]
# train_acc += _train_acc
# print('Training epoch {:2d}, batch {:4d}/{:4d}, loss = {:.8f}'.format(
# epoch+1, _n, num_batches, train_loss/(_n+1)), end='\r', flush=True)
#if (epoch+1) % 4 != 0:
# continue
test_acc = np.array([0.] * FLAGS.n_tasks)
test_p = np.array([0.] * FLAGS.n_tasks)
test_r = np.array([0.] * FLAGS.n_tasks)
test_fscore = np.array([0.] * FLAGS.n_tasks)
test_auc = np.array([0.] * FLAGS.n_tasks)
num_batches_test = X_test.shape[0] // FLAGS.batch_size
for _n, idx in enumerate(range(num_batches_test)):
X_test_batch = X_test[idx*FLAGS.batch_size:(idx+1)*FLAGS.batch_size]
Y_test_batch = Y_test[idx*FLAGS.batch_size:(idx+1)*FLAGS.batch_size]
# print (Y_pred,flush=False)
# print (Y_test_batch,flush=False)
feed_dict = {tccnn.X: X_test_batch, tccnn.Y: Y_test_batch}
_test_acc = sess.run(tccnn.accuracy, feed_dict=feed_dict)
_test_p=sess.run(tccnn.p, feed_dict=feed_dict)
_test_r=sess.run(tccnn.r, feed_dict=feed_dict)
_test_f1=sess.run(tccnn.f1, feed_dict=feed_dict)
for i in tccnn.auc_op:
sess.run(i, feed_dict=feed_dict)
_test_auc = sess.run(tccnn.auc, feed_dict=feed_dict)
# print (_test_acc,flush=False)
# print (_test_f1,flush=False)
if FLAGS.n_tasks == 1:
_test_acc = np.array([_test_acc])
_test_f1 = np.array([_test_f1])
_test_auc = np.array([_test_auc])
if FLAGS.n_tasks == 3:
_test_acc = np.array([_test_acc])[0]
_test_p = | np.array([_test_p]) | numpy.array |
import numpy as np
from context import gpytoolbox
# Build a polyline; for example, a square
V = np.array([ [-1.0, -1.0], [-1.0, 1.0], [1.0, 1.0], [1.0, -1.0] ])
# Camera position and direction
cam_pos = np.array([-0.5,-1.5])
cam_dir = np.array([0.0,1.0])
# Looking upwards: intersection should be [-0.5,-1.0], normal downwards
x, n, ind = gpytoolbox.ray_polyline_intersect(cam_pos,cam_dir,V)
assert((np.isclose(x,np.array([-0.5,-1.0]))).all())
assert((np.isclose(n,np.array([0.0,-1.0]))).all())
# Oblique direction, inside
cam_pos = np.array([0.2,0.0])
cam_dir = np.array([0.3,0.4])
# Intersection should be [0.95,1.0], normal should be upwards
x, n, ind = gpytoolbox.ray_polyline_intersect(cam_pos,cam_dir,V)
assert((np.isclose(x,np.array([0.95,1.0]))).all())
assert((np.isclose(n,np.array([0.0,1.0]))).all())
# Degeneracies: Parallel without a hit
cam_pos = np.array([1.1,1.1])
cam_dir = np.array([0.0,-1.0])
# There should be no intersection
x, n, ind = gpytoolbox.ray_polyline_intersect(cam_pos,cam_dir,V)
# if no intersection, x is infinity and n is zero
assert((np.isclose(x,np.array([np.Inf,np.Inf]))).all())
assert((np.isclose(n,np.array([0.0, 0.0]))).all())
# Index is -1
assert(ind==-1)
# Degeneracy: Coincident
cam_pos = | np.array([-1.0,-2.0]) | numpy.array |
import json
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import csv
import time
import copy
import os
from datetime import datetime
import error_metrics
global gld_num
gld_num = '1'
os.chdir('/home/ankit/PFO-ADC-DER-Testbed/ADC-DER-Testbed/testbed/post_process')
# discard_time = 3600*4
## loading cosim_manager data
lp = open('./cosim_data.json').read()
cosim_data = json.loads(lp)
## Appending all cosim data with one more entry
for key, value in cosim_data.items():
for k, v in value.items():
if k == 'Timestamp':
# v.append(v[-1]+v[-1]-v[-2]) # adding one more timestamp
v.append(v[-1] + v[0])
else:
v.append(v[-1]) # repeating the last value again
cosim_data[key][k] = v
cosim_time = cosim_data[list(cosim_data)[0]]['Timestamp']
cosim_data['time'] = np.array([int(i) for i in cosim_time])
# create mapping of each node to its ADC
adc_nodes_map=[]
adc_file = "./../../../GLD/initial_scenario/ADC_Location/ADC_Placement_by_Voltage_Drop.csv"
with open(adc_file, mode='r') as csv_file:
for i in range(1):
next(csv_file)
csv_reader = csv.reader(csv_file)
for row in csv_reader:
adc_nodes_map.append([row[0], row[-1]])
adc_nodes_map = np.array(adc_nodes_map)
#function to return adc name of the input node
def find_adc(node, adc_nodes_map=adc_nodes_map):
ind = np.where(adc_nodes_map[:,0]==node)[0][0]
adc_name = 'M' + gld_num + '_ADC' + adc_nodes_map[ind,1]
return adc_name
# Loading gld_data.json
lp = open('GLD_' + gld_num + '_data.json').read()
gld_data = json.loads(lp)
# creating a dict to map each adc to the indexes of devices in gld_data for each der type
# adc['der']['adc name']=[indexes in the gld data]
# t=time.time()
# adc_ind = {}
# der_type=[['battInv', 'power'], ['solarInv','power'], ['hvac','power'], ['wh','power']]
# for der in der_type:
# adc_ind[der[0]] = {}
# obj = gld_data[der[0]][der[1]]['object_name']
# for a in obj:
# b = a.split('_')[-2][1:]
# # if 'l102_tm' in a:
# if find_adc(b) not in adc_ind[der[0]]:
# adc_ind[der[0]][find_adc(b)] = []
# adc_ind[der[0]][find_adc(b)].append(obj.index(a))
# print('elapsed time is ',time.time()-t)
# creating a dict to map each adc to the indexes of devices in gld_data for each der type
# adc_ind['adc name']['der']=[indexes in the gld data]
t=time.time()
adc_ind = {}
der_type=[['battInv', 'power'], ['solarInv','power'], ['hvac','power'], ['wh','power']]
for der in der_type:
obj = gld_data[der[0]][der[1]]['object_name']
for a in obj:
b = a.split('_')[-2][1:]
# if 'l102_tm' in a:
if find_adc(b) == 'M1_ADCNONE':
continue
if find_adc(b) not in adc_ind:
adc_ind[find_adc(b)] = {}
if der[0] not in adc_ind[find_adc(b)]:
adc_ind[find_adc(b)][der[0]]=[]
adc_ind[find_adc(b)][der[0]].append(obj.index(a))
# print('elapsed time is ',time.time()-t)
#Voltages
voltages = np.array(gld_data['hvac']['voltages']['values']).astype(np.cfloat)
# Actuation Signals
#hrs = gld_data['battInv']['P_Out']['time']
battInv_Pout = np.array(gld_data['battInv']['P_Out']['values']).astype(np.float)
battInv_Qout = np.array(gld_data['battInv']['Q_Out']['values']).astype(np.float)
solarInv_Pout = np.array(gld_data['solarInv']['P_Out']['values']).astype(np.float)
solarInv_Qout = np.array(gld_data['solarInv']['Q_Out']['values']).astype(np.float)
hvac_seth = np.array(gld_data['hvac']['heating_setpoint']['values']).astype(np.float)
hvac_setc = np.array(gld_data['hvac']['cooling_setpoint']['values']).astype(np.float)
hvac_cooling_demand = (np.array(gld_data['hvac']['cooling_demand']['values'])).astype(np.float)
hvac_fan_power = (np.array(gld_data['hvac']['fan_design_power']['values'])).astype(np.float)/1000
hvac_rating = hvac_cooling_demand+hvac_fan_power
hvac_c_thermal_capacity = (np.array(gld_data['hvac']['design_cooling_capacity']['values'])).astype(np.float)
hvac_c_cop = (np.array(gld_data['hvac']['cooling_COP']['values'])).astype(np.float)
hvac_rating1 = hvac_c_thermal_capacity/12000/hvac_c_cop*3.5168
wh_tanks = np.array(gld_data['wh']['tank_setpoint']['values']).astype(np.float)
hvac_c_status = np.array(gld_data['hvac']['cooling_status']['values']).astype(np.float)
wh_rating = np.array(gld_data['wh']['heating_element_capacity']['values']).astype(np.float)
battInv_rated = (np.array(gld_data['battInv']['rated_power']['values'])).astype(np.float)
batt_rated = (np.array(gld_data['batt']['rated_power']['values'])).astype(np.float)
solar_rated = (np.array(gld_data['solar']['rated_power']['values'])).astype(np.float)
# Device Power Outputs
battInv_power = (np.array(gld_data['battInv']['power']['values'])).astype(np.cfloat)
solarInv_power = (np.array(gld_data['solarInv']['power']['values'])).astype(np.cfloat)
hvac_power = (np.array(gld_data['hvac']['power']['values'])).astype(np.cfloat)
wh_power = (np.array(gld_data['wh']['power']['values'])).astype(np.cfloat)
solar_VA = (np.array(gld_data['solar']['VA']['values'])).astype(np.cfloat)
#aggregating device outputs per adc in adc_agg dict
# adc_agg['adc name']['der type']=sum of all devices of der type
t=time.time()
adc_agg = copy.deepcopy(adc_ind)
adc_Prating = {}
num_der = {}
total_num_der = 0
for adc_num in adc_ind:
adc_Prating[adc_num] = {}
if "battInv" in adc_agg[adc_num]:
adc_agg[adc_num]["battInv"] = np.sum(battInv_power[:, adc_ind[adc_num]['battInv']], 1)/1000
adc_agg[adc_num]["batt_Pout"] = np.sum(battInv_Pout[:, adc_ind[adc_num]['battInv']], 1) / 1000
adc_agg[adc_num]["batt_Qout"] = np.sum(battInv_Qout[:, adc_ind[adc_num]['battInv']], 1) / 1000
adc_agg[adc_num]["total"] = adc_agg[adc_num]["battInv"]
adc_Prating[adc_num]["battInv"] = np.sum(battInv_rated[0, adc_ind[adc_num]['battInv']])/1000
adc_Prating[adc_num]["total"] = adc_Prating[adc_num]["battInv"]
if "solarInv" in adc_agg[adc_num]:
adc_agg[adc_num]["solarInv"] = np.sum(solarInv_power[:, adc_ind[adc_num]['solarInv']], 1) / 1000
adc_agg[adc_num]["solar_Pout"] = np.sum(solarInv_Pout[:, adc_ind[adc_num]['solarInv']], 1) / 1000
adc_agg[adc_num]["solar_Qout"] = np.sum(solarInv_Qout[:, adc_ind[adc_num]['solarInv']], 1) / 1000
adc_agg[adc_num]["total"] = adc_agg[adc_num]["total"] + adc_agg[adc_num]["solarInv"]
adc_Prating[adc_num]["solarInv"] = np.sum(solar_rated[0, adc_ind[adc_num]['solarInv']]) / 1000
adc_Prating[adc_num]["solarVA"] = np.sum(solar_VA[:, adc_ind[adc_num]['solarInv']], 1) / 1000
adc_Prating[adc_num]["total"] = adc_Prating[adc_num]["total"] + adc_Prating[adc_num]["solarInv"]
if "hvac" in adc_agg[adc_num]:
adc_agg[adc_num]["hvac"] = np.sum(hvac_power[:, adc_ind[adc_num]['hvac']], 1)
adc_agg[adc_num]["total"] = adc_agg[adc_num]["total"] + adc_agg[adc_num]["hvac"]
adc_Prating[adc_num]["hvac"] = np.sum(hvac_rating[0, adc_ind[adc_num]['hvac']])
adc_Prating[adc_num]["total"] = adc_Prating[adc_num]["total"] + adc_Prating[adc_num]["hvac"]
if "wh" in adc_agg[adc_num]:
adc_agg[adc_num]["wh"] = np.sum(wh_power[:, adc_ind[adc_num]['wh']], 1)
adc_agg[adc_num]["total"] = adc_agg[adc_num]["total"] + adc_agg[adc_num]["wh"]
adc_Prating[adc_num]["wh"] = np.sum(wh_rating[0, adc_ind[adc_num]['wh']])
adc_Prating[adc_num]["total"] = adc_Prating[adc_num]["total"] + adc_Prating[adc_num]["wh"]
error_metrics.calculate(adc_agg, adc_Prating, cosim_data)
#Plot aggregate devices output at given adc for each der type
time_format = '%H:%M:%S'
time_stamp = [t.split(' ')[1] for t in gld_data['wh']['power']['time']]
time_h = [datetime.strptime(t, '%H:%M:%S') for t in time_stamp]
hrs = [int((i-time_h[0]).total_seconds()) for i in time_h]
# start_time = 3600*4
adc_num = 'M1_ADC18'
# total_rating = sum(wh_rating[0, adc_ind[adc_num]['wh']]) + sum(hvac_rating[0, adc_ind[adc_num]['hvac']]) + sum(
# battInv_rated[0, adc_ind[adc_num]['battInv']]) / 1000 + sum(solar_rated[0, adc_ind[adc_num]['solarInv']]) / 1000
fig1, ax1 = plt.subplots(2, 2, sharex='col')
# ax1[0,0].plot(hrs, np.real(adc_agg[adc_num]['batt_Pout']), label='Battery', color='C0')
# ax1[0,0].plot(hrs, np.real(adc_agg[adc_num]['solar_Pout']), label='Solar', color='C1')
ax1[0,0].plot(hrs, np.real(adc_agg[adc_num]['batt_Pout'] + adc_agg[adc_num]['solar_Pout']), label='Solar+Battery', color='C2')
ax1[0,0].plot(hrs, np.real(adc_agg[adc_num]['wh']), label='WH', color='C3')
ax1[0,0].plot(hrs, np.real(adc_agg[adc_num]['hvac']), label='HVAC', color='C4')
#
# ax1[0,0].step((cosim_data['time']),np.array(cosim_data[adc_num]['Popt_BATT'])/1,'k', linestyle='--', color='C0', where='post', label='battery set point')
# ax1[0,0].step((cosim_data['time']),np.array(cosim_data[adc_num]['Popt_PV'])/1,'k', linestyle='--', color='C1', where='post', label='pv set point')
ax1[0,0].step((cosim_data['time']),np.array(cosim_data[adc_num]['Popt_PV']) + np.array(cosim_data[adc_num]['Popt_BATT']),'k', linestyle='--', color='C2', where='post', label='PV+Batt set point')
ax1[0,0].step((cosim_data['time']),np.array(cosim_data[adc_num]['Popt_WH'])/1,'k', linestyle='--', color='C3', where='post', label='WH set point')
ax1[0,0].step((cosim_data['time']),np.array(cosim_data[adc_num]['Popt_HVAC'])/1,'k', linestyle='--', color='C4', where='post', label='AC set point')
ax1[0,0].set_ylabel("kW")
ax1[0,0].set_title("Aggregated kW at ADC "+adc_num+" by DER")
ax1[0,0].legend(loc='best')
# plt.xlim(left=start_time)
# ax1[0,1].plot(hrs, np.real(adc_agg[adc_num]['batt_Qout']), label='Battery')
# ax1[0,1].plot(hrs, np.real(adc_agg[adc_num]['solar_Qout']), label='Solar')
ax1[0,1].plot(hrs, np.real(adc_agg[adc_num]['batt_Qout'] + adc_agg[adc_num]['solar_Qout']), label='Solar+Battery', color='C2')
ax1[0,1].plot(hrs, np.imag(adc_agg[adc_num]['wh']), label='WH', color='C3')
ax1[0,1].plot(hrs, np.imag(adc_agg[adc_num]['hvac']), label='HVAC', color='C4')
# ax1[0,1].step((cosim_data['time']),np.array(cosim_data[adc_num]['Qopt_BATT'])/1,'k', linestyle='--', color='C0', where='post', label='battery set point')
# ax1[0,1].step((cosim_data['time']),np.array(cosim_data[adc_num]['Qopt_PV'])/1,'k', linestyle='--', color='C1', where='post', label='pv set point')
ax1[0,1].step((cosim_data['time']),np.array(cosim_data[adc_num]['Qopt_PV']) + np.array(cosim_data[adc_num]['Qopt_BATT']),'k', linestyle='--', color='C2', where='post', label='PV+Batt set point')
ax1[0,1].step((cosim_data['time']),np.array(cosim_data[adc_num]['Qopt_WH'])/1,'k', linestyle='--', color='C3', where='post', label='WH set point')
ax1[0,1].step((cosim_data['time']), | np.array(cosim_data[adc_num]['Qopt_HVAC']) | numpy.array |
#!/usr/bin/python
#
# test_transform.py - Unit tests for transform module
#
# Author: <NAME> (<EMAIL>)
# Date: 7/1/2015
#
# Requires:
# * FlowCal.io
# * FlowCal.transform
# * numpy
#
import FlowCal.io
import FlowCal.transform
import numpy as np
import unittest
import os
class TestRFIArray(unittest.TestCase):
def setUp(self):
self.d = np.array([
[1, 7, 2],
[2, 8, 3],
[3, 9, 4],
[4, 10, 5],
[5, 1, 6],
[6, 2, 7],
[7, 3, 8],
[8, 4, 9],
[9, 5, 10],
[10, 6, 1],
])
def test_rfi_original_integrity(self):
db = self.d.copy()
dt = FlowCal.transform.to_rfi(self.d,
channels=[0,1],
amplification_type=[(0,0), (0,0)],
amplifier_gain=[1.0, 1.0],
resolution=[1024, 1024],)
np.testing.assert_array_equal(self.d, db)
def test_rfi_arg_error_amplification_type_absent(self):
with self.assertRaises(ValueError):
FlowCal.transform.to_rfi(self.d,
channels=[0,1])
def test_rfi_arg_error_amplification_type_length(self):
with self.assertRaises(ValueError):
FlowCal.transform.to_rfi(self.d,
channels=[0,1],
amplification_type=[(4,1), (4,1), (4,1)])
def test_rfi_arg_error_resolution_absent(self):
with self.assertRaises(ValueError):
FlowCal.transform.to_rfi(self.d,
channels=[0,1],
amplification_type=[(4,1), (4,1)])
def test_rfi_arg_error_resolution_length(self):
with self.assertRaises(ValueError):
FlowCal.transform.to_rfi(self.d,
channels=[0,1],
amplification_type=[(4,1), (4,1)],
resolution=[1024])
def test_rfi_arg_error_amplifier_gain_length(self):
with self.assertRaises(ValueError):
FlowCal.transform.to_rfi(self.d,
channels=[0,1],
amplification_type=[(0,0), (0,0)],
amplifier_gain=[3,4,4])
def test_rfi_1d_log_1(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=1,
amplification_type=(4, 1),
resolution=1024)
np.testing.assert_array_equal(dt[:,0], self.d[:,0])
np.testing.assert_array_equal(dt[:,1], 10**(self.d[:,1]/256.0))
np.testing.assert_array_equal(dt[:,2], self.d[:,2])
def test_rfi_1d_log_2(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=2,
amplification_type=(2, 0.01),
amplifier_gain=5.0,
resolution=256)
np.testing.assert_array_equal(dt[:,0], self.d[:,0])
np.testing.assert_array_equal(dt[:,1], self.d[:,1])
np.testing.assert_array_equal(dt[:,2], 0.01*10**(self.d[:,2]/128.0))
def test_rfi_1d_linear_1(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=2,
amplification_type=(0, 0),
amplifier_gain=None,
resolution=256)
np.testing.assert_array_equal(dt[:,0], self.d[:,0])
np.testing.assert_array_equal(dt[:,1], self.d[:,1])
np.testing.assert_array_equal(dt[:,2], self.d[:,2])
def test_rfi_1d_linear_2(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=1,
amplification_type=(0, 0),
amplifier_gain=5.0,
resolution=256)
np.testing.assert_array_equal(dt[:,0], self.d[:,0])
np.testing.assert_array_equal(dt[:,1], self.d[:,1]/5.0)
np.testing.assert_array_equal(dt[:,2], self.d[:,2])
def test_rfi_2d_log_1(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=[1,2],
amplification_type=[(4, 1), (2, 0.01)],
resolution=[1024, 256])
np.testing.assert_array_equal(dt[:,0], self.d[:,0])
np.testing.assert_array_equal(dt[:,1], 10**(self.d[:,1]/256.0))
np.testing.assert_array_equal(dt[:,2], 0.01*10**(self.d[:,2]/128.0))
def test_rfi_2d_mixed_1(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=[1,2],
amplification_type=[(4, 1), (0, 0)],
amplifier_gain=[4., None],
resolution=[1024, 1024])
np.testing.assert_array_equal(dt[:,0], self.d[:,0])
np.testing.assert_array_equal(dt[:,1], 10**(self.d[:,1]/256.0))
np.testing.assert_array_equal(dt[:,2], self.d[:,2])
def test_rfi_2d_mixed_2(self):
dt = FlowCal.transform.to_rfi(self.d,
channels=[1,2],
amplification_type=[(4, 1), (0, 0)],
amplifier_gain=[4., 10.],
resolution=[1024, 1024])
np.testing.assert_array_equal(dt[:,0], self.d[:,0])
np.testing.assert_array_equal(dt[:,1], 10**(self.d[:,1]/256.0))
np.testing.assert_array_equal(dt[:,2], self.d[:,2]/10.)
def test_rfi_default_channel_1(self):
dt = FlowCal.transform.to_rfi(self.d,
amplification_type=[(4,1)]*3,
amplifier_gain=[4., 5., 10.],
resolution=[1024]*3)
| np.testing.assert_array_equal(dt, 10**(self.d/256.0)) | numpy.testing.assert_array_equal |
import platform
import numpy as np
from bioptim import OdeSolver
from .utils import TestUtils
def test_xia_fatigable_muscles():
bioptim_folder = TestUtils.bioptim_folder()
fatigue = TestUtils.load_module(f"{bioptim_folder}/examples/fatigue/static_arm_with_fatigue.py")
model_path = f"{bioptim_folder}/examples/fatigue/models/arm26_constant.bioMod"
ocp = fatigue.prepare_ocp(
biorbd_model_path=model_path,
final_time=0.9,
n_shooting=5,
fatigue_type="xia",
ode_solver=OdeSolver.COLLOCATION(),
torque_level=1,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 19.770521758810368)
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (552, 1))
np.testing.assert_almost_equal(g, np.zeros((552, 1)))
# Check some of the results
states, controls = sol.states, sol.controls
q, qdot, ma, mr, mf = states["q"], states["qdot"], states["muscles_ma"], states["muscles_mr"], states["muscles_mf"]
tau, muscles = controls["tau"], controls["muscles"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0.07, 1.4)))
np.testing.assert_almost_equal(q[:, -1], np.array((1.64470726, 2.25033212)))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(qdot[:, -1], np.array((-2.93853331, 3.00564551)))
# fatigue parameters
np.testing.assert_almost_equal(ma[:, 0], np.array((0, 0, 0, 0, 0, 0)))
np.testing.assert_almost_equal(
ma[:, -1], np.array((0.00739128, 0.00563555, 0.00159309, 0.02418655, 0.02418655, 0.00041913))
)
np.testing.assert_almost_equal(mr[:, 0], np.array((1, 1, 1, 1, 1, 1)))
np.testing.assert_almost_equal(
mr[:, -1], np.array((0.99260018, 0.99281414, 0.99707397, 0.97566527, 0.97566527, 0.99904065))
)
np.testing.assert_almost_equal(mf[:, 0], np.array((0, 0, 0, 0, 0, 0)))
np.testing.assert_almost_equal(
mf[:, -1],
np.array((8.54868154e-06, 1.55030599e-03, 1.33293886e-03, 1.48176210e-04, 1.48176210e-04, 5.40217808e-04)),
)
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((0.80920008, 1.66855572)))
np.testing.assert_almost_equal(tau[:, -2], np.array((0.81847388, -0.85234628)))
np.testing.assert_almost_equal(
muscles[:, 0],
np.array((6.22395441e-08, 4.38966513e-01, 3.80781292e-01, 2.80532297e-07, 2.80532297e-07, 2.26601989e-01)),
)
np.testing.assert_almost_equal(
muscles[:, -2],
np.array((8.86069119e-03, 1.17337666e-08, 1.28715148e-08, 2.02340603e-02, 2.02340603e-02, 2.16517945e-088)),
)
# save and load
TestUtils.save_and_load(sol, ocp, True)
# simulate
TestUtils.simulate(sol)
def test_michaud_fatigable_muscles():
bioptim_folder = TestUtils.bioptim_folder()
fatigue = TestUtils.load_module(f"{bioptim_folder}/examples/fatigue/static_arm_with_fatigue.py")
model_path = f"{bioptim_folder}/examples/fatigue/models/arm26_constant.bioMod"
ocp = fatigue.prepare_ocp(
biorbd_model_path=model_path,
final_time=0.9,
n_shooting=5,
fatigue_type="michaud",
ode_solver=OdeSolver.COLLOCATION(),
torque_level=1,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
if platform.system() == "Linux":
np.testing.assert_almost_equal(f[0, 0], 16.32400654587575)
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (702, 1))
np.testing.assert_almost_equal(g, np.zeros((702, 1)))
# Check some of the results
states, controls = sol.states, sol.controls
q, qdot, ma, mr, mf = states["q"], states["qdot"], states["muscles_ma"], states["muscles_mr"], states["muscles_mf"]
tau, muscles = controls["tau"], controls["muscles"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0.07, 1.4)))
np.testing.assert_almost_equal(q[:, -1], np.array((1.64470726, 2.25033212)))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(ma[:, 0], np.array((0, 0, 0, 0, 0, 0)))
np.testing.assert_almost_equal(mr[:, 0], np.array((1, 1, 1, 1, 1, 1)))
np.testing.assert_almost_equal(mf[:, 0], np.array((0, 0, 0, 0, 0, 0)))
np.testing.assert_almost_equal(
mf[:, -1],
np.array((0, 3.59773278e-04, 3.59740895e-04, 0, 0, 0)),
)
if platform.system() == "Linux":
np.testing.assert_almost_equal(qdot[:, -1], np.array((-3.8913551, 3.68787122)))
np.testing.assert_almost_equal(
ma[:, -1], np.array((0.03924828, 0.01089071, 0.00208428, 0.05019898, 0.05019898, 0.00058203))
)
np.testing.assert_almost_equal(
mr[:, -1], np.array((0.96071394, 0.98795266, 0.99699829, 0.9496845, 0.9496845, 0.99917771))
)
np.testing.assert_almost_equal(tau[:, 0], np.array((0.96697626, 0.7686893)))
np.testing.assert_almost_equal(tau[:, -2], np.array((0.59833412, -0.73455049)))
np.testing.assert_almost_equal(
muscles[:, 0],
np.array((1.25202085e-07, 3.21982969e-01, 2.28408549e-01, 3.74330449e-07, 3.74330448e-07, 1.69987512e-01)),
)
np.testing.assert_almost_equal(
muscles[:, -2],
np.array((0.0441982, 0.00474236, 0.0009076, 0.04843388, 0.04843388, 0.00025345)),
)
# save and load
TestUtils.save_and_load(sol, ocp, True)
# simulate
TestUtils.simulate(sol)
def test_effort_fatigable_muscles():
bioptim_folder = TestUtils.bioptim_folder()
fatigue = TestUtils.load_module(f"{bioptim_folder}/examples/fatigue/static_arm_with_fatigue.py")
model_path = f"{bioptim_folder}/examples/fatigue/models/arm26_constant.bioMod"
ocp = fatigue.prepare_ocp(
biorbd_model_path=model_path,
final_time=0.9,
n_shooting=5,
fatigue_type="effort",
ode_solver=OdeSolver.COLLOCATION(),
torque_level=1,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 15.6707872174798)
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (252, 1))
np.testing.assert_almost_equal(g, np.zeros((252, 1)))
# Check some of the results
states, controls = sol.states, sol.controls
q, qdot, mf = states["q"], states["qdot"], states["muscles_mf"]
tau, muscles = controls["tau"], controls["muscles"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0.07, 1.4)))
np.testing.assert_almost_equal(q[:, -1], np.array((1.64470726, 2.25033212)))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(qdot[:, -1], np.array((-3.88775204, 3.6333437)))
# fatigue parameters
np.testing.assert_almost_equal(mf[:, 0], np.array((0, 0, 0, 0, 0, 0)))
np.testing.assert_almost_equal(
mf[:, -1],
np.array((0, 5.20374400e-06, 3.66692929e-06, 0, 0, 0)),
)
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((1.00151658, 0.75680837)))
np.testing.assert_almost_equal(tau[:, -2], np.array((0.52586746, -0.65113291)))
np.testing.assert_almost_equal(
muscles[:, 0],
np.array((0, 3.22448960e-01, 2.29707272e-01, 2.48558904e-08, 2.48558904e-08, 1.68035094e-01)),
)
np.testing.assert_almost_equal(
muscles[:, -2],
np.array((3.86483801e-02, 1.10050570e-09, 2.74223027e-09, 4.25097689e-02, 4.25097689e-02, 6.56234006e-09)),
)
# save and load
TestUtils.save_and_load(sol, ocp, True)
# simulate
TestUtils.simulate(sol)
def test_fatigable_xia_torque_non_split():
bioptim_folder = TestUtils.bioptim_folder()
fatigue = TestUtils.load_module(f"{bioptim_folder}/examples/fatigue/pendulum_with_fatigue.py")
model_path = f"{bioptim_folder}/examples/fatigue/models/pendulum.bioMod"
ocp = fatigue.prepare_ocp(
biorbd_model_path=model_path,
final_time=1,
n_shooting=10,
fatigue_type="xia",
split_controls=False,
use_sx=False,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
if platform.system() == "Linux":
np.testing.assert_almost_equal(f[0, 0], 681.4936347682981)
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (160, 1))
np.testing.assert_almost_equal(g, np.zeros((160, 1)))
# Check some of the results
states, controls = sol.states, sol.controls
q, qdot = states["q"], states["qdot"]
ma_minus, mr_minus, mf_minus = states["tau_minus_ma"], states["tau_minus_mr"], states["tau_minus_mf"]
ma_plus, mr_plus, mf_plus = states["tau_plus_ma"], states["tau_plus_mr"], states["tau_plus_mf"]
tau = controls["tau"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(q[:, -1], np.array((0, 3.14)))
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(qdot[:, -1], np.array((0, 0)))
np.testing.assert_almost_equal(ma_minus[:, 0], np.array((0.0, 0)))
np.testing.assert_almost_equal(mr_minus[:, 0], np.array((1, 1)))
np.testing.assert_almost_equal(mf_minus[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(ma_plus[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(mr_plus[:, 0], np.array((1, 1)))
np.testing.assert_almost_equal(mf_plus[:, 0], np.array((0, 0)))
if platform.system() == "Linux":
np.testing.assert_almost_equal(ma_minus[:, -1], np.array((2.05715389e-01, 0)))
np.testing.assert_almost_equal(mr_minus[:, -1], np.array((0.71681593, 1)))
np.testing.assert_almost_equal(mf_minus[:, -1], np.array((7.74686771e-02, 0)))
np.testing.assert_almost_equal(ma_plus[:, -1], np.array((4.54576950e-03, 0)))
np.testing.assert_almost_equal(mr_plus[:, -1], np.array((0.91265673, 1)))
np.testing.assert_almost_equal(mf_plus[:, -1], np.array((8.27975034e-02, 0)))
np.testing.assert_almost_equal(tau[:, 0], np.array((4.65387493, 0)))
np.testing.assert_almost_equal(tau[:, -2], np.array((-21.7531631, 0)))
# save and load
TestUtils.save_and_load(sol, ocp, True)
# simulate
TestUtils.simulate(sol)
def test_fatigable_xia_torque_split():
bioptim_folder = TestUtils.bioptim_folder()
fatigue = TestUtils.load_module(f"{bioptim_folder}/examples/fatigue/pendulum_with_fatigue.py")
model_path = f"{bioptim_folder}/examples/fatigue/models/pendulum.bioMod"
ocp = fatigue.prepare_ocp(
biorbd_model_path=model_path, final_time=1, n_shooting=30, fatigue_type="xia", split_controls=True, use_sx=False
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 46.97293026598778)
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (480, 1))
np.testing.assert_almost_equal(g, np.zeros((480, 1)))
# Check some of the results
states, controls = sol.states, sol.controls
q, qdot = states["q"], states["qdot"]
ma_minus, mr_minus, mf_minus = states["tau_minus_ma"], states["tau_minus_mr"], states["tau_minus_mf"]
ma_plus, mr_plus, mf_plus = states["tau_plus_ma"], states["tau_plus_mr"], states["tau_plus_mf"]
tau_minus, tau_plus = controls["tau_minus"], controls["tau_plus"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(q[:, -1], np.array((0, 3.14)))
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(qdot[:, -1], np.array((0, 0)))
np.testing.assert_almost_equal(ma_minus[:, 0], np.array((0.0, 0)))
np.testing.assert_almost_equal(ma_minus[:, -1], np.array((9.74835527e-02, 0)))
np.testing.assert_almost_equal(mr_minus[:, 0], np.array((1, 1)))
np.testing.assert_almost_equal(mr_minus[:, -1], np.array((0.88266826, 1)))
np.testing.assert_almost_equal(mf_minus[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(mf_minus[:, -1], np.array((1.98481921e-02, 0)))
np.testing.assert_almost_equal(ma_plus[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(ma_plus[:, -1], np.array((5.69110401e-06, 0)))
np.testing.assert_almost_equal(mr_plus[:, 0], np.array((1, 1)))
np.testing.assert_almost_equal(mr_plus[:, -1], np.array((0.9891588, 1)))
np.testing.assert_almost_equal(mf_plus[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(mf_plus[:, -1], np.array((1.08355110e-02, 0)))
np.testing.assert_almost_equal(tau_minus[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(tau_minus[:, -2], np.array((-10.29111867, 0)))
np.testing.assert_almost_equal(tau_plus[:, 0], np.array((7.0546191, 0)))
np.testing.assert_almost_equal(tau_plus[:, -2], np.array((0, 0)))
# save and load
TestUtils.save_and_load(sol, ocp, True)
# simulate
TestUtils.simulate(sol)
def test_fatigable_michaud_torque_non_split():
bioptim_folder = TestUtils.bioptim_folder()
fatigue = TestUtils.load_module(f"{bioptim_folder}/examples/fatigue/pendulum_with_fatigue.py")
model_path = f"{bioptim_folder}/examples/fatigue/models/pendulum.bioMod"
ocp = fatigue.prepare_ocp(
biorbd_model_path=model_path,
final_time=1,
n_shooting=10,
fatigue_type="michaud",
split_controls=False,
use_sx=False,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
if platform.system() == "Linux":
np.testing.assert_almost_equal(f[0, 0], 752.2660291516361)
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (200, 1))
np.testing.assert_almost_equal(g, np.zeros((200, 1)), decimal=6)
# Check some of the results
states, controls = sol.states, sol.controls
q, qdot = states["q"], states["qdot"]
ma_minus, mr_minus, mf_minus = states["tau_minus_ma"], states["tau_minus_mr"], states["tau_minus_mf"]
ma_plus, mr_plus, mf_plus = states["tau_plus_ma"], states["tau_plus_mr"], states["tau_plus_mf"]
tau = controls["tau"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(q[:, -1], np.array((0, 3.14)))
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(qdot[:, -1], np.array((0, 0)))
np.testing.assert_almost_equal(ma_minus[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(mr_minus[:, 0], np.array((1, 1)))
np.testing.assert_almost_equal(mf_minus[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(ma_plus[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(mr_plus[:, 0], np.array((1, 1)))
np.testing.assert_almost_equal(mf_plus[:, 0], np.array((0, 0)))
if platform.system() == "Linux":
np.testing.assert_almost_equal(ma_minus[:, -1], np.array((2.27726849e-01, 0)))
np.testing.assert_almost_equal(mr_minus[:, -1], np.array((0.77154438, 1)))
np.testing.assert_almost_equal(mf_minus[:, -1], np.array((2.99934839e-04, 0)))
np.testing.assert_almost_equal(ma_plus[:, -1], np.array((2.94965705e-03, 0)))
np.testing.assert_almost_equal(mr_plus[:, -1], np.array((0.99650902, 1)))
np.testing.assert_almost_equal(tau[:, 0], np.array((4.59966318, 0)))
np.testing.assert_almost_equal(tau[:, -2], np.array((-22.86838109, 0)))
np.testing.assert_almost_equal(mf_plus[:, -1], np.array((9.99805014e-05, 0)))
# save and load
TestUtils.save_and_load(sol, ocp, True)
# simulate
TestUtils.simulate(sol, decimal_value=5)
def test_fatigable_michaud_torque_split():
bioptim_folder = TestUtils.bioptim_folder()
fatigue = TestUtils.load_module(f"{bioptim_folder}/examples/fatigue/pendulum_with_fatigue.py")
model_path = f"{bioptim_folder}/examples/fatigue/models/pendulum.bioMod"
ocp = fatigue.prepare_ocp(
biorbd_model_path=model_path,
final_time=1,
n_shooting=10,
fatigue_type="michaud",
split_controls=True,
use_sx=False,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 66.4869989782804)
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (200, 1))
np.testing.assert_almost_equal(g, np.zeros((200, 1)))
# Check some of the results
states, controls = sol.states, sol.controls
q, qdot = states["q"], states["qdot"]
ma_minus, mr_minus, mf_minus = states["tau_minus_ma"], states["tau_minus_mr"], states["tau_minus_mf"]
ma_plus, mr_plus, mf_plus = states["tau_plus_ma"], states["tau_plus_mr"], states["tau_plus_mf"]
tau_minus, tau_plus = controls["tau_minus"], controls["tau_plus"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(q[:, -1], np.array((0, 3.14)))
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(qdot[:, -1], np.array((0, 0)))
np.testing.assert_almost_equal(ma_minus[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(ma_minus[:, -1], np.array((1.14840287e-01, 0)))
np.testing.assert_almost_equal(mr_minus[:, 0], np.array((1, 1)))
np.testing.assert_almost_equal(mr_minus[:, -1], np.array((0.88501154, 1)))
np.testing.assert_almost_equal(mf_minus[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(mf_minus[:, -1], np.array((0, 0)))
np.testing.assert_almost_equal(ma_plus[:, 0], | np.array((0, 0)) | numpy.array |
import numpy as np
def sqrt_gain_db(gain_db):
return gain_db / 2
def design_low_shelving_filter(g_db, f, q, fs):
k = np.tan((np.pi * f) / fs);
v0 = np.power(10.0, g_db / 20.0);
root2 = 1.0 / q
if v0 < 1:
v0 = 1 / v0
if g_db > 0:
b0 = (1 + np.sqrt(v0) * root2 * k + v0 * k * k) / (1 + root2 * k + k * k)
b1 = (2 * (v0 * k * k - 1)) / (1 + root2 * k + k * k)
b2 = (1 - np.sqrt(v0) * root2 * k + v0 * k * k) / (1 + root2 * k + k * k)
a0 = 1
a1 = (2 * (k * k - 1)) / (1 + root2 * k + k * k)
a2 = (1 - root2 * k + k * k) / (1 + root2 * k + k * k)
elif g_db < 0:
b0 = (1 + root2 * k + k * k) / (1 + root2 * np.sqrt(v0) * k + v0 * k * k)
b1 = (2 * (k * k - 1)) / (1 + root2 * np.sqrt(v0) * k + v0 * k * k)
b2 = (1 - root2 * k + k * k) / (1 + root2 * np.sqrt(v0) * k + v0 * k * k)
a0 = 1
a1 = (2 * (v0 * k * k - 1)) / (1 + root2 * np.sqrt(v0) * k + v0 * k * k)
a2 = (1 - root2 * | np.sqrt(v0) | numpy.sqrt |
'''
This script plots the four test predictions of the predictive
expectation and variance of BAR-DenseED seen in Figure 13 of the paper.
===
Distributed by: <NAME> (MIT Liscense)
- Associated publication:
url: http://www.sciencedirect.com/science/article/pii/S0021999119307612
doi: https://doi.org/10.1016/j.jcp.2019.109056
github: https://github.com/cics-nd/ar-pde-cnn
===
'''
import sys
sys.path.append("..") # Adds higher directory to python modules path.
from args import Parser
from nn.denseEDcirc import DenseED
from nn.bayesNN import BayesNN
from nn.swag import SwagNN
from utils.utils import mkdirs
from utils.burgerLoader import BurgerLoader
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import rc
import matplotlib.gridspec as gridspec
import torch
import numpy as np
import os
import time
def testSample(args, swag_nn, test_loader, tstep=100, n_samples=10):
'''
Tests the samples of the Bayesian SWAG model
Args:
args (argparse): object with programs arguements
model (PyTorch model): DenseED model to be tested
test_loader (dataloader): dataloader with test cases (use createTestingLoader)
tstep (int): number of timesteps to predict for
n_samples (int): number of model samples to draw
Returns:
u_out (torch.Tensor): [d x nsamples x tstep x nel] predicted quantities of each sample
u_target (torch.Tensor): [d x tstep x nel] respective target values loaded from simulator
'''
mb_size = int(len(test_loader.dataset)/len(test_loader))
u_out = torch.zeros(mb_size, n_samples, tstep+1, args.nel)
betas = torch.zeros(n_samples)
for i in range(n_samples):
print('Executing model sample {:d}'.format(i))
model = swag_nn.sample(diagCov=True)
model.eval()
betas[i] = model.model.log_beta.exp()
for batch_idx, (input0, uTarget0) in enumerate(test_loader):
input = input0.to(args.device)
u_target = uTarget0
u_out[:,i,0,:] = input[:,0]
# Auto-regress
for t_idx in range(tstep):
uPred = model(input[:,-args.nic:,:])
u_out[:,i,t_idx+1,:] = uPred[:,0]
input = input[:,-int(args.nic-1):,:].detach()
input0 = uPred[:,0,:].unsqueeze(1).detach()
input = torch.cat([input, input0], dim=1)
# Only do the first mini-batch
break
return u_out, betas, u_target
def plotContourGrid(t, xT, uPred, betas, uTarget):
'''
Creates grid of 4 different test cases, plots target, prediction, variance and error for each
'''
mpl.rcParams['font.family'] = ['serif'] # default is sans-serif
rc('text', usetex=False)
fig = plt.figure(figsize=(15, 13), dpi=150)
outer = gridspec.GridSpec(2, 2, wspace=0.45, hspace=0.2) # Outer grid
for i in range(4):
# Inner grid
inner = gridspec.GridSpecFromSubplotSpec(4, 1,
subplot_spec=outer[i], wspace=0, hspace=0.25)
ax = []
for j in range(4):
ax0 = plt.Subplot(fig, inner[j])
fig.add_subplot(ax0)
ax.append(ax0)
# Plot specific test case
plotPred(fig, ax, t, xT, uPred[i], betas, uTarget[i])
file_dir = '.'
# If directory does not exist create it
if not os.path.exists(file_dir):
os.makedirs(file_dir)
file_name = file_dir+"/burger_BAR_pred"
plt.savefig(file_name+".png", bbox_inches='tight')
plt.savefig(file_name+".pdf", bbox_inches='tight')
plt.show()
def plotPred(fig, ax, t, xT, uPred, betas, uTarget):
'''
Plots specific test case
Args:
fig: matplotlib figure
ax (list): list of four subplot axis
t (np.array): [n] array to time values for x axis
xT (np.array): [m] array of spacial coordinates for y axis
uPred (np.array): [n x m] model predictions
uTarget (np.array): [n x m] target field
'''
# Start with the target up top
cmap = "inferno"
c0 = ax[0].imshow(uTarget.T, interpolation='nearest', cmap=cmap, origin='lower', aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
c_max = np.max(uTarget.T)
c_min = np.min(uTarget.T)
c0.set_clim(vmin=c_min, vmax=c_max)
# Plot the mean
uPred_mean = np.mean(uPred, axis=0)
c0 = ax[1].imshow(uPred_mean.T, interpolation='nearest', cmap=cmap, origin='lower', aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
c0.set_clim(vmin=c_min, vmax=c_max)
p0 = ax[0].get_position().get_points().flatten()
p1 = ax[1].get_position().get_points().flatten()
ax_cbar = fig.add_axes([p1[2]+0.015, p1[1], 0.020, p0[3]-p1[1]])
ticks = np.linspace(0, 1, 5)
tickLabels = np.linspace(c_min, c_max, 5)
tickLabels = ["{:02.2f}".format(t0) for t0 in tickLabels]
cbar = mpl.colorbar.ColorbarBase(ax_cbar, cmap=plt.get_cmap(cmap), orientation='vertical', ticks=ticks)
cbar.set_ticklabels(tickLabels)
# Variance
betas = np.expand_dims(betas, axis=1).repeat(uPred.shape[1], axis=1) # Expand noise parameter
betas = np.expand_dims(betas, axis=2).repeat(uPred.shape[2], axis=2) # Expand noise parameter
uPred_var = np.mean(1./betas + uPred*uPred, axis=0) - uPred_mean*uPred_mean
c0 = ax[2].imshow(uPred_var.T, interpolation='nearest', cmap=cmap, origin='lower', aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
c_max = np.max(uPred_var)
c0.set_clim(vmin=0, vmax=c_max)
p0 = ax[2].get_position().get_points().flatten()
ax_cbar = fig.add_axes([p0[2]+0.015, p0[1], 0.020, p0[3]-p0[1]])
ticks = np.linspace(0, 1, 5)
tickLabels = np.linspace(0, c_max, 5)
tickLabels = ["{:02.2f}".format(t0) for t0 in tickLabels]
cbar = mpl.colorbar.ColorbarBase(ax_cbar, cmap=plt.get_cmap(cmap), orientation='vertical', ticks=ticks)
cbar.set_ticklabels(tickLabels)
# Mean Error
cmap = "viridis"
c0 = ax[3].imshow(np.abs(uPred_mean.T - uTarget.T), interpolation='nearest', cmap=cmap, origin='lower', aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
p0 = ax[3].get_position().get_points().flatten()
ax_cbar = fig.add_axes([p0[2]+0.015, p0[1], 0.020, p0[3]-p0[1]])
ticks = | np.linspace(0, 1, 5) | numpy.linspace |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
import datetime as dt
import re
import cupy as cp
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from pandas.util.testing import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
import cudf
from cudf.core import DataFrame, Series
from cudf.core.index import DatetimeIndex
from cudf.tests.utils import NUMERIC_TYPES, assert_eq
def data1():
return pd.date_range("20010101", "20020215", freq="400h", name="times")
def data2():
return pd.date_range("20010101", "20020215", freq="400h", name="times")
def timeseries_us_data():
return pd.date_range(
"2019-07-16 00:00:00",
"2019-07-16 00:00:01",
freq="5555us",
name="times",
)
def timestamp_ms_data():
return pd.Series(
[
"2019-07-16 00:00:00.333",
"2019-07-16 00:00:00.666",
"2019-07-16 00:00:00.888",
]
)
def timestamp_us_data():
return pd.Series(
[
"2019-07-16 00:00:00.333333",
"2019-07-16 00:00:00.666666",
"2019-07-16 00:00:00.888888",
]
)
def timestamp_ns_data():
return pd.Series(
[
"2019-07-16 00:00:00.333333333",
"2019-07-16 00:00:00.666666666",
"2019-07-16 00:00:00.888888888",
]
)
def numerical_data():
return np.arange(1, 10)
fields = ["year", "month", "day", "hour", "minute", "second", "weekday"]
@pytest.mark.parametrize("data", [data1(), data2()])
def test_series(data):
pd_data = pd.Series(data.copy())
gdf_data = Series(pd_data)
assert_eq(pd_data, gdf_data)
@pytest.mark.parametrize(
"lhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
@pytest.mark.parametrize(
"rhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_datetime_series_binops_pandas(lhs_dtype, rhs_dtype):
pd_data_1 = pd.Series(
pd.date_range("20010101", "20020215", freq="400h", name="times")
)
pd_data_2 = pd.Series(
pd.date_range("20010101", "20020215", freq="401h", name="times")
)
gdf_data_1 = Series(pd_data_1).astype(lhs_dtype)
gdf_data_2 = Series(pd_data_2).astype(rhs_dtype)
assert_eq(pd_data_1, gdf_data_1.astype("datetime64[ns]"))
assert_eq(pd_data_2, gdf_data_2.astype("datetime64[ns]"))
assert_eq(pd_data_1 < pd_data_2, gdf_data_1 < gdf_data_2)
assert_eq(pd_data_1 > pd_data_2, gdf_data_1 > gdf_data_2)
assert_eq(pd_data_1 == pd_data_2, gdf_data_1 == gdf_data_2)
assert_eq(pd_data_1 <= pd_data_2, gdf_data_1 <= gdf_data_2)
assert_eq(pd_data_1 >= pd_data_2, gdf_data_1 >= gdf_data_2)
@pytest.mark.parametrize(
"lhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
@pytest.mark.parametrize(
"rhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_datetime_series_binops_numpy(lhs_dtype, rhs_dtype):
pd_data_1 = pd.Series(
pd.date_range("20010101", "20020215", freq="400h", name="times")
)
pd_data_2 = pd.Series(
pd.date_range("20010101", "20020215", freq="401h", name="times")
)
gdf_data_1 = Series(pd_data_1).astype(lhs_dtype)
gdf_data_2 = Series(pd_data_2).astype(rhs_dtype)
np_data_1 = np.array(pd_data_1).astype(lhs_dtype)
np_data_2 = np.array(pd_data_2).astype(rhs_dtype)
np.testing.assert_equal(np_data_1, gdf_data_1.to_array())
np.testing.assert_equal(np_data_2, gdf_data_2.to_array())
np.testing.assert_equal(
np.less(np_data_1, np_data_2), (gdf_data_1 < gdf_data_2).to_array()
)
np.testing.assert_equal(
np.greater(np_data_1, np_data_2), (gdf_data_1 > gdf_data_2).to_array()
)
np.testing.assert_equal(
np.equal(np_data_1, np_data_2), (gdf_data_1 == gdf_data_2).to_array()
)
np.testing.assert_equal(
np.less_equal(np_data_1, np_data_2),
(gdf_data_1 <= gdf_data_2).to_array(),
)
np.testing.assert_equal(
| np.greater_equal(np_data_1, np_data_2) | numpy.greater_equal |
from __future__ import division
from openmdao.api import Problem, Group, IndepVarComp, NewtonSolver, DirectSolver, BoundsEnforceLS
from openmdao.api import ScipyOptimizeDriver, ExplicitComponent, ImplicitComponent
import numpy as np
import scipy.sparse as sp
import sys, os
sys.path.insert(0,os.getcwd())
from openconcept.components.ducts import ImplicitCompressibleDuct
from openconcept.utilities.math.integrals import Integrator
from openconcept.utilities.math.derivatives import FirstDerivative
from openconcept.utilities.math import AddSubtractComp, ElementMultiplyDivideComp, VectorConcatenateComp, VectorSplitComp
from openconcept.analysis.atmospherics.compute_atmos_props import ComputeAtmosphericProperties
"""Analysis routines for simulating thermal management of aircraft components"""
class ThermalComponentWithMass(ExplicitComponent):
"""
Computes thermal residual of a component with heating, cooling, and thermal mass
Inputs
------
q_in : float
Heat generated by the component (vector, W)
q_out : float
Heat to waste stream (vector, W)
mass : float
Thermal mass (scalar, kg)
Outputs
-------
dTdt : float
First derivative of temperature (vector, K/s)
Options
-------
specific_heat : float
Specific heat capacity of the object in J / kg / K (default 921 = aluminum)
num_nodes : float
The number of analysis points to run
"""
def initialize(self):
self.options.declare('num_nodes', default=1)
self.options.declare('specific_heat', default=921, desc='Specific heat in J/kg/K - default 921 for aluminum')
def setup(self):
nn_tot = self.options['num_nodes']
arange = np.arange(0, nn_tot)
self.add_input('q_in', units='W', shape=(nn_tot,))
self.add_input('q_out', units='W', shape=(nn_tot,))
self.add_input('mass', units='kg')
self.add_output('dTdt', units='K/s', shape=(nn_tot,))
self.declare_partials(['dTdt'], ['q_in'], rows=arange, cols=arange)
self.declare_partials(['dTdt'], ['q_out'], rows=arange, cols=arange)
self.declare_partials(['dTdt'], ['mass'], rows=arange, cols=np.zeros((nn_tot,)))
def compute(self, inputs, outputs):
spec_heat = self.options['specific_heat']
outputs['dTdt'] = (inputs['q_in'] - inputs['q_out']) / inputs['mass'] / spec_heat
def compute_partials(self, inputs, J):
nn_tot = self.options['num_nodes']
spec_heat = self.options['specific_heat']
J['dTdt','mass'] = - (inputs['q_in'] - inputs['q_out']) / inputs['mass']**2 / spec_heat
J['dTdt','q_in'] = 1 / inputs['mass'] / spec_heat
J['dTdt','q_out'] = - 1 / inputs['mass'] / spec_heat
class CoolantReservoirRate(ExplicitComponent):
"""
Computes dT/dt of a coolant reservoir based on inflow and current temps and flow rate
Inputs
------
T_in : float
Coolant stream in (vector, K)
T_out : float
Temperature of the reservoir (vector, K)
mass : float
Total quantity of coolant (scalar, kg)
mdot_coolant : float
Mass flow rate of the coolant (vector, kg/s)
Outputs
-------
dTdt : float
First derivative of temperature (vector, K/s)
Options
-------
num_nodes : float
The number of analysis points to run
"""
def initialize(self):
self.options.declare('num_nodes', default=1)
def setup(self):
nn_tot = self.options['num_nodes']
arange = np.arange(0, nn_tot)
self.add_input('T_in', units='K', shape=(nn_tot,))
self.add_input('T_out', units='K', shape=(nn_tot,))
self.add_input('mdot_coolant', units='kg/s', shape=(nn_tot,))
self.add_input('mass', units='kg')
self.add_output('dTdt', units='K/s', shape=(nn_tot,))
self.declare_partials(['dTdt'], ['T_in','T_out','mdot_coolant'], rows=arange, cols=arange)
self.declare_partials(['dTdt'], ['mass'], rows=arange, cols=np.zeros((nn_tot,)))
def compute(self, inputs, outputs):
outputs['dTdt'] = inputs['mdot_coolant'] / inputs['mass'] * (inputs['T_in'] - inputs['T_out'])
def compute_partials(self, inputs, J):
J['dTdt','mass'] = - inputs['mdot_coolant'] / inputs['mass']**2 * (inputs['T_in'] - inputs['T_out'])
J['dTdt','mdot_coolant'] = 1 / inputs['mass'] * (inputs['T_in'] - inputs['T_out'])
J['dTdt','T_in'] = inputs['mdot_coolant'] / inputs['mass']
J['dTdt','T_out'] = - inputs['mdot_coolant'] / inputs['mass']
class ThermalComponentMassless(ImplicitComponent):
"""
Computes thermal residual of a component with heating, cooling, and thermal mass
Inputs
------
q_in : float
Heat generated by the component (vector, W)
q_out : float
Heat to waste stream (vector, W)
Outputs
-------
T_object : float
Object temperature (vector, K/s)
Options
-------
num_nodes : float
The number of analysis points to run
"""
def initialize(self):
self.options.declare('num_nodes',default=1)
def setup(self):
nn_tot = self.options['num_nodes']
arange = np.arange(0, nn_tot)
self.add_input('q_in', units='W', shape=(nn_tot,))
self.add_input('q_out', units='W', shape=(nn_tot,))
self.add_output('T_object', units='K', shape=(nn_tot,))
self.declare_partials(['T_object'], ['q_in'], rows=arange, cols=arange, val=np.ones((nn_tot,)))
self.declare_partials(['T_object'], ['q_out'], rows=arange, cols=arange, val=-np.ones((nn_tot,)))
def apply_nonlinear(self, inputs, outputs, residuals):
residuals['T_object'] = inputs['q_in'] - inputs['q_out']
class ConstantSurfaceTemperatureColdPlate_NTU(ExplicitComponent):
"""
Computes heat rejection to fluid stream of a microchannel cold plate
with uniform temperature
Inputs
------
T_in : float
Coolant inlet temperature (vector, K)
T_surface : float
Temperature of the cold plate (vector, K)
mdot_coolant : float
Mass flow rate of the coolant (vector, kg/s)
channel_length : float
Length of each microchannel (scalar, m)
channel_width : float
Width of each microchannel (scalar, m)
channel_height : float
Height of each microchannel (scalar, m)
n_parallel : float
Number of fluid channels (scalar, dimensionless)
Outputs
-------
q : float
Heat transfer rate from the plate to the fluid (vector, W)
T_out : float
Outlet fluid temperature (vector, K)
Options
-------
num_nodes : float
The number of analysis points to run
fluid_rho : float
Coolant density in kg/m**3 (default 0.997, water)
fluid_k : float
Thermal conductivity of the fluid (W/m/K) (default 0.405, glycol/water)
nusselt : float
Hydraulic diameter Nusselt number of the coolant in the channels
(default 7.54 for constant temperature infinite parallel plate)
specific_heat : float
Specific heat of the coolant (J/kg/K) (default 3801, glycol/water)
"""
def initialize(self):
self.options.declare('num_nodes', default=1, desc='Number of analysis points')
self.options.declare('fluid_rho', default=997.0, desc='Fluid density in kg/m3')
self.options.declare('fluid_k', default=0.405, desc='Thermal conductivity of the fluid in W / mK')
self.options.declare('nusselt', default=7.54, desc='Hydraulic diameter Nusselt number')
self.options.declare('specific_heat', default=3801, desc='Specific heat in J/kg/K')
def setup(self):
nn_tot = self.options['num_nodes']
arange = np.arange(0, nn_tot)
self.add_input('T_in', units='K', shape=(nn_tot,))
self.add_input('T_surface', units='K', shape=(nn_tot,))
self.add_input('channel_width', units='m')
self.add_input('channel_height', units='m')
self.add_input('channel_length', units='m')
self.add_input('n_parallel')
self.add_input('mdot_coolant', units='kg/s', shape=(nn_tot,))
self.add_output('q', units='W', shape=(nn_tot,))
self.add_output('T_out', units='K', shape=(nn_tot,))
self.declare_partials(['q','T_out'], ['T_in','T_surface','mdot_coolant'], method='cs')
self.declare_partials(['q','T_out'], ['channel_width','channel_height','channel_length','n_parallel'], method='cs')
def compute(self, inputs, outputs):
Ts = inputs['T_surface']
Ti = inputs['T_in']
Cmin = inputs['mdot_coolant'] * self.options['specific_heat']
#cross_section_area = inputs['channel_width'] * inputs['channel_height'] * inputs['n_parallel']
#flow_rate = inputs['mdot_coolant'] / self.options['rho'] / cross_section_area # m/s
surface_area = 2 * (inputs['channel_width']*inputs['channel_length'] +
inputs['channel_height'] * inputs['channel_length']) * inputs['n_parallel']
d_h = 2 * inputs['channel_width'] * inputs['channel_height'] / (inputs['channel_width'] + inputs['channel_height'])
h = self.options['nusselt'] * self.options['fluid_k'] / d_h
ntu = surface_area * h / Cmin
effectiveness = 1 - np.exp(-ntu)
outputs['q'] = effectiveness * Cmin * (Ts - Ti)
outputs['T_out'] = inputs['T_in'] + outputs['q'] / inputs['mdot_coolant'] / self.options['specific_heat']
class LiquidCooledComp(Group):
"""A component (heat producing) with thermal mass
cooled by a cold plate.
Inputs
------
q_in : float
Heat produced by the operating component (vector, W)
mdot_coolant : float
Coolant mass flow rate (vector, kg/s)
T_in : float
Instantaneous coolant inflow temperature (vector, K)
mass : float
Object mass (only required in thermal mass mode) (scalar, kg)
T_initial : float
Initial temperature of the cold plate (only required in thermal mass mode) / object (scalar, K)
duration : float
Duration of mission segment, only required in unsteady mode
channel_width : float
Width of coolant channels (scalar, m)
channel_height : float
Height of coolant channels (scalar, m)
channel_length : float
Length of coolant channels (scalar, m)
n_parallel : float
Number of identical coolant channels (scalar, dimensionless)
Outputs
-------
T_out : float
Instantaneous coolant outlet temperature (vector, K)
T: float
Object temperature (vector, K)
Options
-------
specific_heat_object : float
Specific heat capacity of the object in J / kg / K (default 921 = aluminum)
specific_heat_coolant : float
Specific heat capacity of the coolant in J / kg / K (default 3801, glycol/water)
num_nodes : int
Number of analysis points to run
quasi_steady : bool
Whether or not to treat the component as having thermal mass
"""
def initialize(self):
self.options.declare('specific_heat_object', default=921.0, desc='Specific heat in J/kg/K')
self.options.declare('specific_heat_coolant', default=3801, desc='Specific heat in J/kg/K')
self.options.declare('quasi_steady', default=False, desc='Treat the component as quasi-steady or with thermal mass')
self.options.declare('num_nodes', default=1, desc='Number of quasi-steady points to runs')
def setup(self):
nn = self.options['num_nodes']
quasi_steady = self.options['quasi_steady']
if not quasi_steady:
self.add_subsystem('base',
ThermalComponentWithMass(specific_heat=self.options['specific_heat_object'],
num_nodes=nn),
promotes_inputs=['q_in', 'mass'])
self.add_subsystem('integratetemp',
Integrator(num_intervals=int((nn-1)/2),
quantity_units='K',
diff_units='s',
method='simpson',
time_setup='duration'),
promotes_inputs=['duration',('q_initial','T_initial')],
promotes_outputs=[('q','T'),('q_final','T_final')])
self.connect('base.dTdt','integratetemp.dqdt')
else:
self.add_subsystem('base',
ThermalComponentMassless(num_nodes=nn),
promotes_inputs=['q_in'],
promotes_outputs=['T'])
self.add_subsystem('hex',
ConstantSurfaceTemperatureColdPlate_NTU(num_nodes=nn, specific_heat=self.options['specific_heat_coolant']),
promotes_inputs=['T_in', ('T_surface','T'),'n_parallel','channel*','mdot_coolant'],
promotes_outputs=['T_out'])
self.connect('hex.q','base.q_out')
class CoolantReservoir(Group):
"""A reservoir of coolant capable of buffering temperature
Inputs
------
mdot_coolant : float
Coolant mass flow rate (vector, kg/s)
T_in : float
Coolant inflow temperature (vector, K)
mass : float
Object mass (only required in thermal mass mode) (scalar, kg)
T_initial : float
Initial temperature of the coolant reservoir(only required in thermal mass mode) / object (scalar, K)
duration : float
Time step of each mission segment (one for each segment) (scalar, s)
If a single segment is provided (by default) this variable will be called just 'dt'
only required in thermal mass mode
Outputs
-------
T_out : float
Coolant outlet temperature (vector, K)
Options
-------
num_nodes : int
Number of analysis points to run
"""
def initialize(self):
self.options.declare('num_nodes',default=5)
def setup(self):
nn = self.options['num_nodes']
self.add_subsystem('rate',
CoolantReservoirRate(num_nodes=nn),
promotes_inputs=['T_in', 'T_out', 'mass', 'mdot_coolant'])
self.add_subsystem('integratetemp',
Integrator(num_intervals=int((nn-1)/2),
quantity_units='K',
diff_units='s',
method='simpson',
time_setup='duration'),
promotes_inputs=['duration',('q_initial','T_initial')],
promotes_outputs=[('q','T_out'),('q_final','T_final')])
self.connect('rate.dTdt','integratetemp.dqdt')
class LiquidCoolantTestGroup(Group):
"""A component (heat producing) with thermal mass
cooled by a cold plate.
"""
def initialize(self):
self.options.declare('num_nodes',default=11)
self.options.declare('quasi_steady', default=False, desc='Treat the component as quasi-steady or with thermal mass')
def setup(self):
quasi_steady = self.options['quasi_steady']
nn = self.options['num_nodes']
iv = self.add_subsystem('iv',IndepVarComp(), promotes_outputs=['*'])
#iv.add_output('q_in', val=10*np.concatenate([np.ones((nn,)),0.5*np.ones((nn,)),0.2*np.ones((nn,))]), units='kW')
throttle_profile = np.ones((nn,))
iv.add_output('q_in',val=10*throttle_profile, units='kW')
#iv.add_output('T_in', val=40*np.ones((nn_tot,)), units='degC')
iv.add_output('mdot_coolant', val=0.1*np.ones((nn,)), units='kg/s')
iv.add_output('rho_coolant', val=997*np.ones((nn,)),units='kg/m**3')
iv.add_output('motor_mass', val=50., units='kg')
iv.add_output('coolant_mass', val=10., units='kg')
iv.add_output('T_motor_initial', val=15, units='degC')
iv.add_output('T_res_initial', val=15.1, units='degC')
iv.add_output('duration', val=800, units='s')
iv.add_output('channel_width', val=1, units='mm')
iv.add_output('channel_height', val=20, units='mm')
iv.add_output('channel_length', val=0.2, units='m')
iv.add_output('n_parallel', val=20)
Ueas = np.ones((nn))*150
h = np.concatenate([np.linspace(0,25000,nn)])
iv.add_output('fltcond|Ueas', val=Ueas, units='kn' )
iv.add_output('fltcond|h', val=h, units='ft')
self.add_subsystem('atmos',
ComputeAtmosphericProperties(num_nodes=nn),
promotes_inputs=["fltcond|h",
"fltcond|Ueas"])
if not quasi_steady:
lc_promotes = ['q_in',('mass','motor_mass'),'duration','channel_*','n_parallel']
else:
lc_promotes = ['q_in','channel_*','n_parallel']
self.add_subsystem('component',
LiquidCooledComp(num_nodes=nn,
quasi_steady=quasi_steady),
promotes_inputs=lc_promotes)
self.add_subsystem('duct',
ImplicitCompressibleDuct(num_nodes=nn))
self.connect('atmos.fltcond|p','duct.p_inf')
self.connect('atmos.fltcond|T','duct.T_inf')
self.connect('atmos.fltcond|Utrue','duct.Utrue')
self.connect('component.T_out','duct.T_in_hot')
self.connect('rho_coolant','duct.rho_hot')
if quasi_steady:
self.connect('duct.T_out_hot','component.T_in')
self.connect('mdot_coolant',['component.mdot_coolant','duct.mdot_hot'])
else:
self.add_subsystem('reservoir',
CoolantReservoir(num_nodes=nn),
promotes_inputs=['duration',('mass','coolant_mass')])
self.connect('duct.T_out_hot','reservoir.T_in')
self.connect('reservoir.T_out','component.T_in')
self.connect('mdot_coolant',['component.mdot_coolant','duct.mdot_hot','reservoir.mdot_coolant'])
self.connect('T_motor_initial','component.T_initial')
self.connect('T_res_initial','reservoir.T_initial')
if __name__ == '__main__':
# run this script from the root openconcept directory like so:
# python .\openconcept\components\ducts.py
quasi_steady = False
nn = 11
prob = Problem(LiquidCoolantTestGroup(quasi_steady=quasi_steady, num_nodes=nn))
prob.model.options['assembled_jac_type'] = 'csc'
prob.model.nonlinear_solver=NewtonSolver(iprint=2)
prob.model.linear_solver = DirectSolver(assemble_jac=True)
prob.model.nonlinear_solver.options['solve_subsystems'] = True
prob.model.nonlinear_solver.options['maxiter'] = 20
prob.model.nonlinear_solver.options['atol'] = 1e-8
prob.model.nonlinear_solver.options['rtol'] = 1e-8
prob.model.nonlinear_solver.linesearch = BoundsEnforceLS(bound_enforcement='scalar',print_bound_enforce=True)
prob.setup(check=True,force_alloc_complex=True)
prob.run_model()
#print(prob['duct.inlet.M'])
print(np.max(prob['component.T']-273.15))
print( | np.max(-prob['duct.force.F_net']) | numpy.max |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 16:51:05 2016
@author: <NAME>
"""
import numpy as np
from scipy import optimize
from scipy.stats import norm
# from ...finutils.FinMath import N, nprime
from ...finutils.FinDate import FinDate
from ...finutils.FinMath import nprime
from ...finutils.FinGlobalVariables import gDaysInYear
from ...finutils.FinError import FinError
from ...products.equity.FinEquityOption import FinEquityOption
from ...products.equity.FinEquityOption import FinEquityOptionTypes
from ...products.equity.FinEquityModelTypes import FinEquityModel
from ...products.equity.FinEquityModelTypes import FinEquityModelBlackScholes
N = norm.cdf
###############################################################################
def f(volatility, *args):
self = args[0]
valueDate = args[1]
stockPrice = args[2]
discountCurve = args[3]
dividendYield = args[4]
price = args[5]
model = FinEquityModelBlackScholes(volatility)
objFn = self.value(valueDate,
stockPrice,
discountCurve,
dividendYield,
model) - price
# print(volatility, price, objFn)
return objFn
###############################################################################
def fvega(volatility, *args):
self = args[0]
valueDate = args[1]
stockPrice = args[2]
discountCurve = args[3]
dividendYield = args[4]
model = FinEquityModelBlackScholes(volatility)
fprime = self.vega(
valueDate,
stockPrice,
discountCurve,
dividendYield,
model)
return fprime
###############################################################################
class FinEquityVanillaOption(FinEquityOption):
def __init__(self,
expiryDate,
strikePrice,
optionType):
if optionType != FinEquityOptionTypes.EUROPEAN_CALL and \
optionType != FinEquityOptionTypes.EUROPEAN_PUT:
raise FinError("Unknown Option Type", optionType)
self._expiryDate = expiryDate
self._strikePrice = strikePrice
self._optionType = optionType
###############################################################################
def value(self,
valueDate,
stockPrice,
discountCurve,
dividendYield,
model):
if type(valueDate) == FinDate:
t = (self._expiryDate - valueDate) / gDaysInYear
else:
t = valueDate
if np.any(stockPrice <= 0.0):
raise FinError("Stock price must be greater than zero.")
if model._parentType != FinEquityModel:
raise FinError("Model is not inherited off type FinEquityModel.")
if np.any(t < 0.0):
raise FinError("Time to expiry must be positive.")
t = np.maximum(t, 1e-10)
df = discountCurve.df(t)
interestRate = -np.log(df)/t
if type(model) == FinEquityModelBlackScholes:
volatility = model._volatility
if np.any(volatility) < 0.0:
raise FinError("Volatility should not be negative.")
volatility = np.maximum(volatility, 1e-10)
lnS0k = np.log(stockPrice / self._strikePrice)
sqrtT = np.sqrt(t)
den = volatility * sqrtT
mu = interestRate - dividendYield
v2 = volatility * volatility
d1 = (lnS0k + (mu + v2 / 2.0) * t) / den
d2 = (lnS0k + (mu - v2 / 2.0) * t) / den
if self._optionType == FinEquityOptionTypes.EUROPEAN_CALL:
v = stockPrice * np.exp(-dividendYield * t) * N(d1)
v = v - self._strikePrice * np.exp(-interestRate * t) * N(d2)
elif self._optionType == FinEquityOptionTypes.EUROPEAN_PUT:
v = self._strikePrice * np.exp(-interestRate * t) * N(-d2)
v = v - stockPrice * np.exp(-dividendYield * t) * N(-d1)
else:
raise FinError("Unknown option type")
else:
raise FinError("Unknown Model Type")
return v
###############################################################################
def xdelta(self,
valueDate,
stockPrice,
discountCurve,
dividendYield,
model):
if type(valueDate) == FinDate:
t = (self._expiryDate - valueDate) / gDaysInYear
else:
t = valueDate
if np.any(stockPrice <= 0.0):
raise FinError("Stock price must be greater than zero.")
if model._parentType != FinEquityModel:
raise FinError("Model is not inherited off type FinEquityModel.")
if np.any(t < 0.0):
raise FinError("Time to expiry must be positive.")
t = np.maximum(t, 1e-10)
df = discountCurve.df(t)
interestRate = -np.log(df)/t
if type(model) == FinEquityModelBlackScholes:
volatility = model._volatility
if np.any(volatility < 0.0):
raise FinError("Volatility should not be negative.")
volatility = np.maximum(volatility, 1e-10)
lnS0k = np.log(stockPrice / self._strikePrice)
sqrtT = np.sqrt(t)
den = volatility * sqrtT
mu = interestRate - dividendYield
v2 = volatility * volatility
d1 = (lnS0k + (mu + v2 / 2.0) * t) / den
if self._optionType == FinEquityOptionTypes.EUROPEAN_CALL:
delta = np.exp(-dividendYield * t) * N(d1)
elif self._optionType == FinEquityOptionTypes.EUROPEAN_PUT:
delta = -np.exp(-dividendYield * t) * N(-d1)
else:
raise FinError("Unknown option type")
return delta
###############################################################################
def xgamma(self,
valueDate,
stockPrice,
discountCurve,
dividendYield,
model):
if type(valueDate) == FinDate:
t = (self._expiryDate - valueDate) / gDaysInYear
else:
t = valueDate
if np.any(stockPrice <= 0.0):
raise FinError("Stock price must be greater than zero.")
if model._parentType != FinEquityModel:
raise FinError("Model is not inherited off type FinEquityModel.")
if np.any(t < 0.0):
raise FinError("Time to expiry must be positive.")
t = np.maximum(t, 1e-10)
df = discountCurve.df(t)
interestRate = -np.log(df)/t
if type(model) == FinEquityModelBlackScholes:
volatility = model._volatility
if np.any(volatility) < 0.0:
raise FinError("Volatility should not be negative.")
volatility = np.maximum(volatility, 1e-10)
lnS0k = np.log(stockPrice / self._strikePrice)
sqrtT = np.sqrt(t)
den = volatility * sqrtT
mu = interestRate - dividendYield
v2 = volatility * volatility
d1 = (lnS0k + (mu + v2 / 2.0) * t) / den
gamma = np.exp(-dividendYield * t) * nprime(d1) / stockPrice / den
else:
raise FinError("Unknown Model Type")
return gamma
###############################################################################
def xvega(self,
valueDate,
stockPrice,
discountCurve,
dividendYield,
model):
if type(valueDate) == FinDate:
t = (self._expiryDate - valueDate) / gDaysInYear
else:
t = valueDate
if np.any(stockPrice <= 0.0):
raise FinError("Stock price must be greater than zero.")
if model._parentType != FinEquityModel:
raise FinError("Model is not inherited off type FinEquityModel.")
if np.any(t < 0.0):
raise FinError("Time to expiry must be positive.")
t = np.maximum(t, 1e-10)
df = discountCurve.df(t)
interestRate = -np.log(df)/t
if type(model) == FinEquityModelBlackScholes:
volatility = model._volatility
if np.any(volatility) < 0.0:
raise FinError("Volatility should not be negative.")
volatility = np.maximum(volatility, 1e-10)
lnS0k = np.log(stockPrice / self._strikePrice)
sqrtT = np.sqrt(t)
den = volatility * sqrtT
mu = interestRate - dividendYield
v2 = volatility * volatility
d1 = (lnS0k + (mu + v2 / 2.0) * t) / den
vega = stockPrice * sqrtT * np.exp(-dividendYield * t) * nprime(d1)
else:
raise FinError("Unknown Model type")
return vega
###############################################################################
def xtheta(self,
valueDate,
stockPrice,
discountCurve,
dividendYield,
model):
if type(valueDate) == FinDate:
t = (self._expiryDate - valueDate) / gDaysInYear
else:
t = valueDate
if np.any(stockPrice <= 0.0):
raise FinError("Stock price must be greater than zero.")
if model._parentType != FinEquityModel:
raise FinError("Model is not inherited off type FinEquityModel.")
if np.any(t < 0.0):
raise FinError("Time to expiry must be positive.")
t = np.maximum(t, 1e-10)
df = discountCurve.df(t)
interestRate = -np.log(df)/t
if type(model) == FinEquityModelBlackScholes:
volatility = model._volatility
if np.any(volatility) < 0.0:
raise FinError("Volatility should not be negative.")
volatility = np.maximum(volatility, 1e-10)
lnS0k = np.log(stockPrice / self._strikePrice)
sqrtT = np.sqrt(t)
den = volatility * sqrtT
mu = interestRate - dividendYield
v2 = volatility * volatility
d1 = (lnS0k + (mu + v2 / 2.0) * t) / den
d2 = (lnS0k + (mu - v2 / 2.0) * t) / den
if self._optionType == FinEquityOptionTypes.EUROPEAN_CALL:
v = - stockPrice * np.exp(-dividendYield * t) * \
nprime(d1) * volatility / 2.0 / sqrtT
v = v - interestRate * self._strikePrice * \
np.exp(-interestRate * t) * N(d2)
v = v + dividendYield * stockPrice * \
np.exp(-dividendYield * t) * N(d1)
elif self._optionType == FinEquityOptionTypes.EUROPEAN_PUT:
v = - stockPrice * np.exp(-dividendYield * t) * \
nprime(d1) * volatility / 2.0 / sqrtT
v = v + interestRate * self._strikePrice * \
np.exp(-interestRate * t) * N(-d2)
v = v - dividendYield * stockPrice * \
np.exp(-dividendYield * t) * N(-d1)
else:
raise FinError("Unknown option type")
else:
raise FinError("Unknown Model Type")
return v
###############################################################################
def impliedVolatility(self,
valueDate,
stockPrice,
discountCurve,
dividendYield,
price):
argtuple = (self, valueDate, stockPrice,
discountCurve, dividendYield, price)
sigma = optimize.newton(f, x0=0.2, fprime=fvega, args=argtuple,
tol=1e-5, maxiter=50, fprime2=None)
return sigma
###############################################################################
def valueMC(self,
valueDate,
stockPrice,
discountCurve,
dividendYield,
model,
numPaths=10000,
seed=4242):
if model._parentType == FinEquityModel:
volatility = model._volatility
else:
raise FinError("Model Type invalid")
np.random.seed(seed)
t = (self._expiryDate - valueDate) / gDaysInYear
df = discountCurve.df(self._expiryDate)
r = -np.log(df)/t
mu = r - dividendYield
v2 = volatility**2
K = self._strikePrice
sqrtdt = np.sqrt(t)
# Use Antithetic variables
g = np.random.normal(0.0, 1.0, size=(1, numPaths))
s = stockPrice * np.exp((mu - v2 / 2.0) * t)
m = np.exp(g * sqrtdt * volatility)
s_1 = s * m
s_2 = s / m
if self._optionType == FinEquityOptionTypes.EUROPEAN_CALL:
payoff_a_1 = np.maximum(s_1 - K, 0)
payoff_a_2 = np.maximum(s_2 - K, 0)
elif self._optionType == FinEquityOptionTypes.EUROPEAN_PUT:
payoff_a_1 = np.maximum(K - s_1, 0)
payoff_a_2 = np.maximum(K - s_2, 0)
else:
raise FinError("Unknown option type.")
payoff = | np.mean(payoff_a_1) | numpy.mean |
import numpy as np
from matplotlib import pyplot as plt
import sys
def findConvexHull(points):
points = np.array(points, dtype=np.float32)
if points.shape[0] < 3:
print ("Minimum 3 points required")
sys.exit(1)
rotated_points = np.rot90(points)
points = points[ | np.lexsort(rotated_points) | numpy.lexsort |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2020, <NAME>, <NAME>, <NAME>, <NAME>"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "1.0"
__status__ = "Development"
"""
Contains the functionalities to identify individuals and cut them out.
"""
import sys
MASK_RCNN_LIBRARY = ''
sys.path.append(MASK_RCNN_LIBRARY)
import configuration as cf
import os, shutil
from mrcnn.config import Config
from mrcnn.model import MaskRCNN, mold_image, load_image_gt
from mrcnn.utils import Dataset
import numpy as np
from datetime import datetime
import cv2
### mrcnn class
class Prediction_Config(Config):
NAME = " "
NUM_CLASSES = 2
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MAX_INSTANCES = 1
DETECTION_MIN_CONFIDENCE = cf.DETECTION_MIN_CONFIDENCE
def _setClassVariables(self, name, names_labels):
self.NAME = name
self.NUM_CLASSES = 1 + len(names_labels)
self.DETECTION_MAX_INSTANCES = len(names_labels)
class Prediction_Dataset(Dataset):
# load the dataset definitions
def load_dataset(self, night_images_path, names):
n = 0
for name in names:
# define one class
self.add_class("dataset", n + 1, name)
n += 1
# find all images
for interval_num in sorted(os.listdir(night_images_path)):
for filename in sorted(os.listdir(night_images_path + interval_num)):
if not filename.endswith('.jpg'):
continue
# extract image id
image_id = filename[:-4]
img_path = night_images_path + interval_num + '/' + filename
# add to dataset
self.add_image('dataset', image_id=image_id, path=img_path)
# load an image reference
def image_reference(self, image_id):
info = self.image_info[image_id]
return info['path']
def load_mask(self, image_id):
mask = np.empty([0, 0, 0])
class_ids = np.empty([0], np.int32)
return mask, class_ids
def _get_enclosurecode(species, zoo, enclosure):
return species+'_'+zoo+'_'+str(enclosure)
def _check_configuration(species, zoo, enclosure, base_input):
ret = True
enclosure_code = _get_enclosurecode(species, zoo, enclosure)
net, label = _get_network_and_label(species, zoo, enclosure)
if not os.path.exists(base_input+enclosure_code):
print("Error: Input folder for object detection not found:", base_input, enclosure_code)
return False
if not net:
print("Error: No object detection network was found:", enclosure_code)
return False
if not label:
print("Error: No labels were found:", enclosure_code)
return False
return ret
def _get_network_and_label(species, zoo, enclosure_num,
basenets = cf.BASE_OD_NETWORK,
zoonets = cf.ZOO_OD_NETWORK,
enclosurenets = cf.ENCLOSURE_OD_NETWORK,
labels = cf.OD_NETWORK_LABELS):
enclosure_code = species+'_'+zoo+'_'+str(enclosure_num)
zoo_code = species+'_'+zoo
net = False
label = False
if enclosure_code in enclosurenets.keys():
net = enclosurenets[enclosure_code]
elif zoo_code in zoonets.keys():
net = zoonets[zoo_code]
elif species in basenets.keys():
net = basenets[species]
if enclosure_code in labels.keys():
label = labels[enclosure_code]
elif zoo_code in labels.keys():
label = labels[zoo_code]
elif species in labels.keys():
label = labels[species]
return net, label
def postprocess_boxes(yhat):
def non_max_suppression(boxes, scores, threshold):
def compute_iou(box, boxes, box_area, boxes_area):
assert boxes.shape[0] == boxes_area.shape[0]
ys1 = np.maximum(box[0], boxes[:, 0])
xs1 = np.maximum(box[1], boxes[:, 1])
ys2 = np.minimum(box[2], boxes[:, 2])
xs2 = np.minimum(box[3], boxes[:, 3])
intersections = np.maximum(ys2 - ys1, 0) * np.maximum(xs2 - xs1, 0)
unions = box_area + boxes_area - intersections
ious = intersections / unions
return ious
assert boxes.shape[0] == scores.shape[0]
# bottom-left origin
ys1 = boxes[:, 0]
xs1 = boxes[:, 1]
# top-right target
ys2 = boxes[:, 2]
xs2 = boxes[:, 3]
# box coordinate ranges are inclusive-inclusive
areas = (ys2 - ys1) * (xs2 - xs1)
scores_indexes = scores.argsort().tolist()
boxes_keep_index = []
while len(scores_indexes):
index = scores_indexes.pop()
boxes_keep_index.append(index)
if not len(scores_indexes):
break
ious = compute_iou(boxes[index], boxes[scores_indexes], areas[index],
areas[scores_indexes])
filtered_indexes = set((ious > threshold).nonzero()[0])
# if there are no more scores_index
# then we should pop it
scores_indexes = [
v for (i, v) in enumerate(scores_indexes)
if i not in filtered_indexes
]
return np.array(boxes_keep_index)
def remove_clones(boxes, class_ids, scores):
# -! works only on two individuals so far
if class_ids[0] == class_ids[1]:
x = np.argmax(scores) # number 0 or 1
if class_ids[x] == 1:
correct_label = 1
false_label = 2
else:
correct_label = 2
false_label = 1
class_ids[x] = correct_label
class_ids[1 - x] = false_label
return boxes, class_ids
def remove_double_boxes(boxes, class_ids, scores):
# how to set iou-threshold ?
iou_threshold = 0.8
keep_indices = non_max_suppression(boxes, scores, iou_threshold)
boxes_ret = [boxes[i] for i in keep_indices]
class_ids_ret = [class_ids[i] for i in keep_indices]
scores_ret = [scores[i] for i in keep_indices]
return boxes_ret, class_ids_ret, scores_ret
def remove_small_boxes(boxes):
return boxes # dirty fix because it is not necessary anymore
# yhat is model detection return.
boxes = yhat['rois']
class_ids = yhat['class_ids']
scores = yhat['scores']
# -! works only on two individuals so far
if len(boxes) >= 2:
# remove multiple instances of individuals
boxes, class_ids = remove_clones(boxes, class_ids, scores)
# suppress double boxing
boxes, class_ids, scores = remove_double_boxes(boxes, class_ids, scores)
boxes = remove_small_boxes(boxes)
return boxes, class_ids, scores
def ensure_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def _cut_out_prediction_data(od_prediction_set, od_model, od_cfg, enclosure_code, output_folder, label_names, img_size=cf.IMG_SIZE):
def _individual_name_from_boxcode(label_names, boxcode):
tmp = label_names[boxcode - 1]
if tmp.startswith("Elenantilope"):
tmp = tmp.replace("Elenantilope", "Elen")
return tmp
# load image and mask
i = 1
for image_id in od_prediction_set.image_ids:
i += 1
# load the image and mask
image, image_meta, gt_class_id, gt_bbox, gt_mask = load_image_gt(od_prediction_set, od_cfg, image_id, use_mini_mask=False)
scaled_image = mold_image(image, od_cfg)
sample = np.expand_dims(scaled_image, 0)
# make prediction
yhat = od_model.detect(sample, verbose=0)[0]
img_path = od_prediction_set.image_info[image_id]['path']
img_name = img_path.split("/")[-1]
interval_num = img_path.split("/")[-2]
if i % 6250 == 0:
print(datetime.now())
print("Predicted " + str(i) + " images of " + output_folder)
# plot each box
boxes, classes, scores = postprocess_boxes(yhat) # OnePerClassOutOfTwo(yhat)
box_num = 0
for box in boxes:
pred_class = classes[box_num]
ind_name = _individual_name_from_boxcode(label_names = label_names, boxcode=pred_class)
# get coordinates
y1, x1, y2, x2 = box
box_part = image[y1:y2, x1:x2]
box_part_rs = cv2.resize(box_part, img_size, interpolation=cv2.INTER_AREA)
save_path = output_folder + ind_name + '/' + interval_num + '/'
ensure_dir(save_path)
cv2.imwrite(save_path + img_name, box_part_rs)
text_file = open(save_path + "box_info.txt", "a+")
text_file.write(img_name + "-" + str(y1) + "*" + str(x1) + "*" + str(y2) + "*" + str(x2) + "\n")
text_file.close()
box_num += 1
os.remove(img_path)
def _predict_one_night(input_path_night, od_model, od_cfg, od_labels, enclosure_code, output_folder):
def load_prediction_set(path = input_path_night, label_names = od_labels):
prediction_set = Prediction_Dataset()
prediction_set.load_dataset(night_images_path=path, names=label_names)
prediction_set.prepare()
print('In this folder, there are %d images to predict.' % len(prediction_set.image_ids))
return prediction_set
prediction_set = load_prediction_set()
_cut_out_prediction_data(prediction_set, od_model, od_cfg, enclosure_code, output_folder, od_labels)
def merge_timeinterval_images(path_to_intervalfolders, output_path_intervalimg, output_path_single_frame, output_path_text):
"""
Parameters
----------
path_to_intervalfolders : string
TMP_STORAGE/intervals/enclosure_code/datum/
contains for each individual a folder, each of those contains folders of intervals
output_path_intervalimg : TYPE
TMP_STORAGE/joint_images/enclosure_code/datum/.
output_path_single_frame : TYPE
TMP_STORAGE/single_frames/enclosure_code/datum/.
Returns
-------
Writes output_path_intervalimg/interval_num.jpg and output_path_single_frame/frame_num.jpg (up to 2 out of 4)
"""
def _write_joint_image(imgpath_list, out_directory, time_interval):
img_list = []
for imgpath in imgpath_list:
img = cv2.imread(imgpath)
img_list.append(img)
if len(img_list) == 0:
return
h, w, d = img_list[0].shape
img_black = np.zeros([w, h, d],dtype=np.uint8)
if len(img_list) == 1:
vis1 = np.concatenate((img_list[0], img_black), axis=1)
vis2 = np.concatenate((img_black, img_black), axis=1)
vis = | np.concatenate((vis1, vis2), axis=0) | numpy.concatenate |
import os
import numpy as np
from colorama import init as cinit
from colorama import Fore, Back, Style
import random
from time import monotonic as clock
import math
import config as conf
import utils
from thing import Thing
class FireBeam(Thing):
'''
firebeam obstacle object
uses physics inherited from Thing
'''
DIR_HOR = 0 # --
DIR_VERT = 1 # |
DIR_DIA_DOWN = 2 # \
DIR_DIA_UP = 3 # /
def __init__(self, game_height, game_width, size, direction, x=0, y=0):
if type(size) != int or direction not in (self.DIR_HOR, self.DIR_VERT, self.DIR_DIA_DOWN, self.DIR_DIA_UP):
raise ValueError('Invalid arguments')
if direction == self.DIR_HOR:
size_arr = | np.array([1, size]) | numpy.array |
__author__ = "<NAME>"
__license__ = "MIT"
__version__ = "0.0.1"
# -------------------------------------------------------------------------------------------------------------------- #
# Imports
# Module imports
import shapely
from shapely.geometry import Polygon
import shapefile
import numpy as np
from numpy.linalg import norm
import pymesh
# Livestock imports
# -------------------------------------------------------------------------------------------------------------------- #
# Livestock Geometry Functions
def fix_mesh(mesh, detail="normal"):
bbox_min, bbox_max = mesh.bbox
diag_len = norm(bbox_max - bbox_min)
if detail == "normal":
target_len = diag_len * 1e-2
elif detail == "high":
target_len = diag_len * 5e-3
elif detail == "low":
target_len = diag_len * 0.03
print("Target resolution: {} mm".format(target_len))
count = 0
mesh, __ = pymesh.remove_degenerated_triangles(mesh, 100)
mesh, __ = pymesh.split_long_edges(mesh, target_len)
num_vertices = mesh.num_vertices
while True:
mesh, __ = pymesh.collapse_short_edges(mesh, 1e-6)
mesh, __ = pymesh.collapse_short_edges(mesh, target_len, preserve_feature=True)
mesh, __ = pymesh.remove_obtuse_triangles(mesh, 150.0, 100)
if mesh.num_vertices == num_vertices:
break
num_vertices = mesh.num_vertices
print("#v: {}".format(num_vertices))
count += 1
if count > 10:
break
mesh = pymesh.resolve_self_intersection(mesh)
mesh, __ = pymesh.remove_duplicated_faces(mesh)
mesh = pymesh.compute_outer_hull(mesh)
mesh, __ = pymesh.remove_duplicated_faces(mesh)
mesh, __ = pymesh.remove_obtuse_triangles(mesh, 179.0, 5)
mesh, __ = pymesh.remove_isolated_vertices(mesh)
return mesh
def ray_triangle_intersection(ray_near, ray_dir, V):
"""
Möller–Trumbore intersection algorithm in pure python
Based on http://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm
"""
v1 = V[0]
v2 = V[1]
v3 = V[2]
eps = 0.000001
edge1 = v2 - v1
edge2 = v3 - v1
pvec = np.cross(ray_dir, edge2)
det = edge1.dot(pvec)
if abs(det) < eps:
return False, None
inv_det = 1. / det
tvec = ray_near - v1
u = tvec.dot(pvec) * inv_det
if u < 0. or u > 1.:
return False, None
qvec = np.cross(tvec, edge1)
v = ray_dir.dot(qvec) * inv_det
if v < 0. or u + v > 1.:
return False, None
t = edge2.dot(qvec) * inv_det
if t < eps:
return False, None
return True, t
def lowest_face_vertex(v0, v1, v2):
V = [v0, v1, v2]
x0 = v0[0]
y0 = v0[1]
z0 = v0[2]
x1 = v1[0]
y1 = v1[1]
z1 = v1[2]
x2 = v2[0]
y2 = v2[1]
z2 = v2[2]
X = [x0, x1, x2]
Y = [y0, y1, y2]
Z = [z0, z1, z2]
Zsort = sorted(Z)
if Zsort[0] == Zsort[2]:
return np.array([sum(X)/3, sum(Y)/3, sum(Z)/3])
elif Zsort[0] < Zsort[1]:
i = Z.index(Zsort[0])
return V[i]
elif Zsort[0] == Zsort[1]:
i0 = Z.index(Zsort[0])
i1 = Z.index(Zsort[1])
x = 0.5*(X[i0] + X[i1])
y = 0.5*(Y[i0] + Y[i1])
return np.array([x, y, Zsort[0]])
else:
print('Error finding lowest point!')
print('v0:',v0)
print('v1:', v1)
print('v2:', v2)
return None
def angle_between_vectors(v1, v2, force_angle=None):
"""
Computes the angle between two vectors.
:param v1: Vector1 as numpy array
:param v2: Vector2 as numpy array
:param force_angle: Default is None. Use to force angle into acute or obtuse.
:return: Angle in radians and its angle type.
"""
# Dot product
dot_v1v2 = np.dot(v1, v2)
# Determine angle type
if dot_v1v2 > 0:
angle_type = 'acute'
elif dot_v1v2 == 0:
return np.pi/2, 'perpendicular'
else:
angle_type = 'obtuse'
# Vector magnitudes and compute angle
mag_v1 = np.sqrt(v1.dot(v1))
mag_v2 = np.sqrt(v2.dot(v2))
angle = np.arccos(abs(dot_v1v2 / (mag_v1 * mag_v2)))
# Compute desired angle type
if not force_angle:
return angle, angle_type
elif force_angle == 'acute':
if angle_type == 'acute':
return angle, 'acute'
else:
angle = np.pi - angle
return angle, 'acute'
elif force_angle == 'obtuse':
if angle > np.pi/2:
return angle, 'obtuse'
else:
angle = np.pi - angle
return angle, 'obtuse'
else:
print('force_angle has to be defined as None, acute or obtuse. force_angle was:', str(force_angle))
return None, None
def line_intersection(p1, p2, p3, p4):
"""
Computes the intersection between two lines given 4 points on those lines.
:param p1: Numpy array. First point on line 1
:param p2: Numpy array. Second point on line 1
:param p3: Numpy array. First point on line 2
:param p4: Numpy array. Second point on line 2
:return: Numpy array. Intersection point
"""
# Direction vectors
v1 = (p2 - p1)
v2 = (p4 - p3)
# Cross-products and vector norm
cv12 = np.cross(v1, v2)
cpv = np.cross((p1 - p3), v2)
t = | norm(cpv) | numpy.linalg.norm |
# -*- coding: utf-8 -*-
import matplotlib
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
# from mpl_toolkits.basemap import Basemap
import xarray as xr
import re
from collections import OrderedDict
from datetime import datetime, timedelta
from scipy.spatial import cKDTree, KDTree
from pyproj import Proj
import numpy.ma as ma
import argparse
from glob import glob
import json
import os
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
deg2rad = np.pi / 180
NRANGE = {'LPRO': 90, 'SILL': 60, 'FIST': 60, 'VILA': 60, 'PRIO': 60}
dtypes = {"TIME": 'float64',
"DEPH": 'float32',
"BEAR": 'float32',
"RNGE": 'float32',
"LONGITUDE": 'float32',
"LATITUDE": 'float32',
"XDST": 'int32',
"YDST": 'int32',
"RDVA": 'int16',
"DRVA": 'int32',
"EWCT": 'int16',
"NSCT": 'int16',
"MAXV": 'int16',
"MINV": 'int16',
"ESPC": 'int16',
"ETMP": 'int16',
"ERSC": 'int16',
"ERTC": 'int16',
"SPRC": 'int16',
"NARX": 'int8',
"NATX": 'int8',
"SLTR": 'int32',
"SLNR": 'int32',
"SLTT": 'int16',
"SLNT": 'int16',
"TIME_QC": 'int8',
"POSITION_QC": 'int8',
"DEPH_QC": 'int8',
"QCflag": 'int8',
"OWTR_QC": 'int8',
"MDFL_QC": 'int8',
"VART_QC": 'int8',
"CSPD_QC": 'int8',
"AVRB_QC": 'int8',
"RDCT_QC": 'int8'}
scale_factors = {"XDST": 0.001,
"YDST": 0.001,
"RDVA": 0.001,
"DRVA": 0.001,
"EWCT": 0.001,
"NSCT": 0.001,
"ESPC": 0.001,
"ETMP": 0.001,
"MAXV": 0.001,
"MINV": 0.001,
"ERSC": 1,
"ERTC": 1,
"XDST": 0.001,
"YDST": 0.001,
"SPRC": 1,
"NARX": 1,
"NATX": 1,
"SLTR": 0.001,
"SLNR": 0.001,
"SLTT": 0.001,
"SLNT": 0.001,
"TIME_QC": 1,
"POSITION_QC": 1,
"DEPH_QC": 1,
"QCflag": 1,
"OWTR_QC": 1,
"MDFL_QC": 1,
"VART_QC": 1,
"CSPD_QC": 1,
"AVRB_QC": 1,
"RDCT_QC": 1}
add_offsets = {}
for key, value in scale_factors.items():
if isinstance(value, float):
scale_factors[key] = | np.float32(scale_factors[key]) | numpy.float32 |
'''
Functions for use
'''
import torch
from torch.autograd import Variable
import numpy as np
import itertools
import os
import random
import pickle
import torch as t
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import ShuffleSplit
from sklearn.feature_extraction.text import TfidfTransformer
# from tsne import tsne
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
torch.cuda.set_device(0)
FloatTensor = torch.cuda.FloatTensor if USE_CUDA else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if USE_CUDA else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if USE_CUDA else torch.ByteTensor
Device = 'cuda:1' if USE_CUDA else 'cpu'
"""process data input to bag-of-words representation"""
def dataset(data_url, monitor=False):
data_withorder = np.load(data_url + '.multi.npy', allow_pickle=True)
seq = [list(x) for x in data_withorder]
if monitor:
print('Converting data to sequence')
try:
labels_with_names = np.load(data_url + '.labels.multi.npy', allow_pickle=True)
labels = labels_with_names[0]
class_names = labels_with_names[1]
except:
if monitor:
print("No labels.")
labels = None
class_names = ['None']
return seq, labels, class_names
def onehot(data, min_length):
return np.bincount(data, minlength=min_length)
def bow_dataset(data_url, vocab_size, additional_text=False, monitor=False):
data_withorder = np.load(data_url + '.multi.npy', allow_pickle=True)
if monitor:
print('Converting data to BoW representation')
data_multihot = np.array([onehot(doc.astype('int'), vocab_size) for doc in data_withorder])
word_count = [np.sum(doc) for doc in data_multihot]
try:
labels_with_names = np.load(data_url + '.labels.multi.npy', allow_pickle=True)
labels = labels_with_names[0]
class_names = labels_with_names[1]
except:
if monitor:
print("No labels.")
labels = None
class_names = ['None']
if additional_text:
return data_multihot, labels, word_count, class_names, data_withorder
return data_multihot, labels, word_count, class_names
'''Create batches'''
def pad_to_batch(x, max_len=80):
x_p = []
for i in range(len(x)):
x_len = len(x[i])
if x_len < max_len:
x_p.append(Variable(LongTensor(x[i] + [0]*(max_len - x_len))))
else:
x_p.append(Variable(LongTensor(x[i][:max_len])))
return torch.stack(x_p, 0)
# TODO: Populate the docstring.
def pad_to_train(batch, max_len=80):
x, y = zip(*batch)
return pad_to_batch(x, max_len=max_len), y
def getBatch(batch_size, train_data, shuffle=False):
if shuffle:
random.shuffle(train_data)
sindex = 0
eindex = batch_size
while eindex < len(train_data):
batch = train_data[sindex: eindex]
temp = eindex
eindex = eindex + batch_size
sindex = temp
yield batch
if eindex >= len(train_data):
batch = train_data[sindex:]
yield batch
def getBatch_iter(batch_size, train_data, shuffle=False):
if shuffle:
random.shuffle(train_data)
ret = []
while True:
for i, data in enumerate(train_data):
ret.append(data)
if i % batch_size == 0:
yield ret
ret = []
if len(ret) > 0:
yield ret
break
'''Build attributes'''
def build_A(data_url, vocab_size, n_attribute):
data, labels, _, _ = bow_dataset(data_url, vocab_size)
n_label = labels.shape[1]
n_vocab = len(data[1])
A_large = np.zeros([n_label, n_vocab])
for i, doc in enumerate(data):
A_large[labels[i]==1] += doc
transformer = TfidfTransformer(smooth_idf=False)
A_tfidf = transformer.fit_transform(A_large).toarray()
A = tsne.tsne(A_tfidf, n_attribute, n_vocab)
return FloatTensor(A)
'''Extract labels based on probability'''
def multi_label_extract(label_dist, threshold=0.5):
labels = torch.zeros(label_dist.size())
labels[label_dist > threshold] = 1
return labels
def plot_threshold(thrs, var, threshold, title, savedir=None):
for v in var:
plt.plot(thrs, v[0], label=v[1])
plt.axvline(x=threshold)
plt.legend()
plt.title(title)
if savedir is not None:
plt.savefig('%s_%s.png'%(savedir, title))
plt.show()
plt.clf()
'''Evaluation'''
# Sorower, <NAME>. "A literature survey on algorithms for multi-label learning." Oregon State University, Corvallis (2010)
def multilabel_eval(true, pred, sample_weight=None, monitor=False, full=False):
n, p = true.shape
score = {}
score['match'] = np.mean([(pred[i][true[i]]==1).any() for i in range(len(pred))])
hit = ((pred==1)*(true==1)).sum(1)
score['HS'] = (hit/(((pred==1)+(true==1))>0).sum(1)).mean()
score['f1'] = (2*hit/((pred==1).sum(1)+(true==1).sum(1))).mean()
if full:
match = (pred==true)
score['HL'] = (pred!=true).mean(1).mean()
score['exact_acc'] = match.all(1).mean()
score['min_acc'] = match.mean(0).min()
score['density_chosen'] = pred.sum(1).mean()/p
score['density'] = true.sum(1).mean()/p
score['precision'] = (hit/(true==1).sum(1)).mean()
score['recal'] = (hit/((pred==1).sum(1)+1e-12)).mean()
score['no_pred'] = (pred!=1).all(1).mean()
if monitor:
print(score)
return score
def singlelabel_eval(true, pred, sample_weight=None, monitor=False):
score = {}
score['acc'] = accuracy_score(true, pred)
score['precision'] = precision_score(true, pred)
score['recal'] = recall_score(true, pred)
score['f1'] = f1_score(true, pred)
score['cfm'] = confusion_matrix(true, pred)
if monitor:
print('Acc: %5f, F1: %5f, precision: %5f, recall: %5f' %(score['acc'], score['f1'], score['precision'], score['recal']))
return score
def inference_analysis(class_word, vocab_url, class_names):
if type(topic_word) is not np.ndarray:
topic_word = topic_word.data.cpu().numpy()
if 'pkl' in vocab_url:
vocab = pickle.load(open(vocab_url, 'rb'))
vocab = list(zip(*sorted(vocab.items(), key=lambda x: x[1])))[0]
else:
vocab = []
with open(vocab_url, 'r') as fin:
for line in fin:
vocab.append(line.split(' ')[0])
for i, weights in enumerate(topic_word):
ind = np.argsort(topic)[-1:-21:-1]
if len(names) == len(topic_word):
print(names[i])
print(np.array(vocab)[ind])
def save_res_multi(tests, vals, trains, class_names, vocab_url):
if 'pkl' in vocab_url:
vocab = pickle.load(open(vocab_url, 'rb'))
vocab = list(zip(*sorted(vocab.items(), key=lambda x: x[1])))[0]
else:
vocab = []
with open(vocab_url, 'r') as fin:
for line in fin:
vocab.append(line.split(' ')[0])
vocab_size = len(vocab)
import csv
with open('res.csv', 'w') as csvfile:
spamwriter = csv.writer(csvfile, delimiter='\t', quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(['sentence', 'true_label', 'prediction', 'word_by_importance', 'dataset'])
for pred, sent, recon in tests:
sent_order = ' '.join([vocab[word] for word in sent])
true_label = ' '
pred_labels = '+'.join(class_names[pred==1])
recon_sent = np.argsort(recon)[::-1]
sent_importance = ' '.join([vocab[word] for word in recon_sent if word in sent])
group = 'test'
spamwriter.writerow([sent_order, true_label, pred_labels, sent_importance, group])
for pred, true, sent, recon in vals:
pred_labels = '+'.join(class_names[pred==1])
true_label = '+'.join(class_names[true==1])
sent_order = ' '.join([vocab[word] for word in sent])
recon_sent = np.argsort(recon)[::-1]
sent_importance = ' '.join([vocab[word] for word in recon_sent if word in sent])
group = 'validation'
spamwriter.writerow([sent_order, true_label, pred_labels, sent_importance, group])
for pred, true, sent, recon in trains:
pred_labels = '+'.join(class_names[pred==1])
true_label = '+'.join(class_names[true==1])
sent_order = ' '.join([vocab[word] for word in sent])
recon_sent = np.argsort(recon)[::-1]
sent_importance = ' '.join([vocab[word] for word in recon_sent if word in sent])
group = 'train'
spamwriter.writerow([sent_order, true_label, pred_labels, sent_importance, group])
print('Result saved in csv.')
def print_res_multi(tests, vals, trains, class_names, topic_word, class_word, vocab_url):
if 'pkl' in vocab_url:
vocab = pickle.load(open(vocab_url, 'rb'))
vocab = list(zip(*sorted(vocab.items(), key=lambda x: x[1])))[0]
else:
with open(vocab_url, 'r') as fin:
for line in fin:
vocab.append(line.split(' ')[0])
vocab_size = len(vocab)
with open('res.html', 'w', encoding='gbk') as f:
if topic_word is not None:
f.write('<p style="background-color:green;">Topic word (beta)</p>')
for i, topic in enumerate(topic_word):
ind = np.argsort(topic)[-1:-21:-1]
f.write('<p> {} </p>'.format(class_names[i]))
for word in ind:
f.write('{} '.format(vocab[word]))
f.write('</p>')
if class_word is not None:
f.write('<p style="background-color:green;">Class word (sum_theta*beta)</p>')
for i, topic in enumerate(class_word):
ind = np.argsort(topic)[-1:-21:-1]
f.write('<p> {} </p>'.format(class_names[i]))
for word in ind:
f.write('{} '.format(vocab[word]))
f.write('</p>')
f.write('<p style="background-color:green;">Test</p>')
for pred_val, sent, recon in tests:
f.write('<p>validation threshold: {}, train threshold: {}</p>'.format(class_names[pred_val==1], class_names[pred_train==1]))
f.write('<p>')
f.write('<p> In Order:')
for word in sent:
f.write('{} '.format(vocab[word]))
f.write('</p>')
f.write('<p> By Importance:')
recon_sent = np.argsort(recon)[::-1]
for word in recon_sent:
if word in sent:
f.write('{} '.format(vocab[word]))
f.write('</p>')
f.write('<p> Reconstruct:')
for word in recon_sent:
if recon[word]>=1/vocab_size*10:
if word in sent:
f.write('<mark class="red">{}</mark> '.format(vocab[word]))
else:
f.write('{} '.format(vocab[word]))
else:
break
f.write('</p>')
f.write('<HR SIZE=5>')
if vals is not None:
f.write('<p style="background-color:green;">Validation</p>')
for pred, true, sent, recon in trains:
if (pred[true==1] != 1).all():
f.write('<p style="background-color:red;">All Wrong</p>')
elif (pred != true).any():
f.write('<p style="background-color:blue;">Partial Wrong</p>')
f.write('<p>prediction: {}, true: {}</p>'.format(class_names[pred==1], class_names[true==1]))
f.write('<p>')
f.write('<p> In Order:')
for word in sent:
f.write('{} '.format(vocab[word]))
f.write('</p>')
f.write('<p> By Importance:')
recon_sent = np.argsort(recon)[::-1]
for word in recon_sent:
if word in sent:
f.write('{} '.format(vocab[word]))
f.write('</p>')
f.write('<p> Reconstruct:')
for word in recon_sent:
if recon[word]>=1/vocab_size*10:
if word in sent:
f.write('<mark class="red">{}</mark> '.format(vocab[word]))
else:
f.write('{} '.format(vocab[word]))
else:
break
f.write('</p>')
f.write('<HR SIZE=5>')
f.write('<p style="background-color:green;">Train</p>')
for pred, true, sent, recon in trains:
if (pred[true==1] != 1).all():
f.write('<p style="background-color:red;">All Wrong</p>')
elif (pred != true).any():
f.write('<p style="background-color:blue;">Partial Wrong</p>')
f.write('<p>prediction: {}, true: {}</p>'.format(class_names[pred==1], class_names[true==1]))
f.write('<p>')
f.write('<p> In Order:')
for word in sent:
f.write('{} '.format(vocab[word]))
f.write('</p>')
f.write('<p> By Importance:')
recon_sent = np.argsort(recon)[::-1]
for word in recon_sent:
if word in sent:
f.write('{} '.format(vocab[word]))
f.write('</p>')
f.write('<p> Reconstruct:')
for word in recon_sent:
if recon[word]>=1/vocab_size*10:
if word in sent:
f.write('<mark class="red">{}</mark> '.format(vocab[word]))
else:
f.write('{} '.format(vocab[word]))
else:
break
f.write('</p>')
f.write('<HR SIZE=5>')
print('Result saved in html.')
'''Visualization for development'''
def plot_training(caches, labels, rec, names, save=False):
n = len(names)
plt.figure(figsize=(5*n, n))
plt.clf()
gs = gridspec.GridSpec(1, n)
gs.update(wspace=0.1, hspace=0.1)
for i in range(n):
plt.subplot(gs[i])
title = '%s_Plot' %(names[i])
plt.title(title)
plt.xlabel('Training Steps')
plt.ylabel(names[i])
for j, values in enumerate(caches[i]):
plt.plot(rec[i], values, label=labels[i][j])
if save:
plt.savefig('fig/log.png')
plt.show()
def multilabel_confusion_matrix(true, pred, labels, normalize=False, cmap=plt.cm.Blues):
from sklearn.metrics import confusion_matrix
conf_mats=[]
for label_col in range(len(labels)):
true_label = true[:, label_col]
pred_label = pred[:, label_col]
conf_mats.append(confusion_matrix(pred_label, true_label))
plt.figure(figsize=(5*len(labels), len(labels)))
plt.clf()
gs = gridspec.GridSpec(1, len(labels))
gs.update(wspace=1./len(labels), hspace=1./len(labels))
for i, label in enumerate(labels):
if normalize:
cm = conf_mats[i].astype('float') / cm.sum(axis=1)[:, np.newaxis]
else:
cm = conf_mats[i]
plt.subplot(gs[i])
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(label)
plt.colorbar()
tick_marks = np.arange(2)
plt.xticks(tick_marks, tick_marks)
plt.yticks(tick_marks, tick_marks)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis].astype('float')
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def data_visualize_multilabel(data, labels, n_topic, classnames, save=False, show=True, legend=True):
idx = np.random.choice(range(len(labels)), 100, replace=False)
transformer = TfidfTransformer(smooth_idf=False)
data = transformer.fit_transform(data[idx]).toarray()
labels = np.array(labels)[idx]
Y = tsne.tsne(data, 2, data.shape[0])
colors = ['orange'] + ['blue'] + ['y'] + ['m'] + ['r']
for i in range(n_topic):
idx = np.where(labels[:,i]==1)
plt.scatter(Y[idx, 0], Y[idx, 1], c=colors[i], label=classnames[i], marker='.')
if legend:
plt.legend()
if save:
plt.savefig('fig/train_distri.png')
if show:
plt.show()
def explore(data, A_mean, A_logsigm, topic_word, topic_var, n_topic, n_hidden, name, save=False):
test_set, test_labels, pred_labels, z, class_names = data
# latent
A_sigm = np.exp(A_logsigm)
samples = []
for i in range(n_topic):
sample = np.array(list(map(np.random.normal, A_mean[i], A_sigm[i], [50]*n_hidden)))
samples += [sample.T]
ss = | np.concatenate(samples) | numpy.concatenate |
import numpy as np
z = np.array([1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0])
print(np.exp(z)/sum( | np.exp(z) | numpy.exp |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import scipy.sparse as sps
import pandas as pd
from mars import dataframe as md
from mars import tensor as mt
from mars.core import get_tiled
from mars.tensor.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, \
expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, \
hsplit, vsplit, dsplit, roll, squeeze, diff, ediff1d, flip, flipud, fliplr, repeat, tile, \
isin, searchsorted, unique, sort, argsort, partition, argpartition, topk, argtopk, \
trapz, shape, to_gpu, to_cpu, swapaxes
from mars.tensor.datasource import tensor, ones, zeros, arange
from mars.tests.core import require_cupy, TestBase
class Test(TestBase):
def setUp(self):
self.ctx, self.executor = self._create_test_context()
def testRechunkExecution(self):
raw = np.random.RandomState(0).random((11, 8))
arr = tensor(raw, chunk_size=3)
arr2 = arr.rechunk(4)
res = self.executor.execute_tensor(arr2)
self.assertTrue(np.array_equal(res[0], raw[:4, :4]))
self.assertTrue(np.array_equal(res[1], raw[:4, 4:]))
self.assertTrue(np.array_equal(res[2], raw[4:8, :4]))
self.assertTrue(np.array_equal(res[3], raw[4:8, 4:]))
self.assertTrue(np.array_equal(res[4], raw[8:, :4]))
self.assertTrue(np.array_equal(res[5], raw[8:, 4:]))
def testCopytoExecution(self):
a = ones((2, 3), chunk_size=1)
b = tensor([3, -1, 3], chunk_size=2)
copyto(a, b, where=b > 1)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.array([[3, 1, 3], [3, 1, 3]])
np.testing.assert_equal(res, expected)
a = ones((2, 3), chunk_size=1)
b = tensor(np.asfortranarray(np.random.rand(2, 3)), chunk_size=2)
copyto(b, a)
res = self.executor.execute_tensor(b, concat=True)[0]
expected = np.asfortranarray(np.ones((2, 3)))
np.testing.assert_array_equal(res, expected)
self.assertTrue(res.flags['F_CONTIGUOUS'])
self.assertFalse(res.flags['C_CONTIGUOUS'])
def testAstypeExecution(self):
raw = np.random.random((10, 5))
arr = tensor(raw, chunk_size=3)
arr2 = arr.astype('i8')
res = self.executor.execute_tensor(arr2, concat=True)
np.testing.assert_array_equal(res[0], raw.astype('i8'))
raw = sps.random(10, 5, density=.2)
arr = tensor(raw, chunk_size=3)
arr2 = arr.astype('i8')
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0].toarray(), raw.astype('i8').toarray()))
raw = np.asfortranarray(np.random.random((10, 5)))
arr = tensor(raw, chunk_size=3)
arr2 = arr.astype('i8', order='C')
res = self.executor.execute_tensor(arr2, concat=True)[0]
np.testing.assert_array_equal(res, raw.astype('i8'))
self.assertTrue(res.flags['C_CONTIGUOUS'])
self.assertFalse(res.flags['F_CONTIGUOUS'])
def testTransposeExecution(self):
raw = np.random.random((11, 8, 5))
arr = tensor(raw, chunk_size=3)
arr2 = transpose(arr)
res = self.executor.execute_tensor(arr2, concat=True)
np.testing.assert_array_equal(res[0], raw.T)
arr3 = transpose(arr, axes=(-2, -1, -3))
res = self.executor.execute_tensor(arr3, concat=True)
np.testing.assert_array_equal(res[0], raw.transpose(1, 2, 0))
raw = sps.random(11, 8)
arr = tensor(raw, chunk_size=3)
arr2 = transpose(arr)
self.assertTrue(arr2.issparse())
res = self.executor.execute_tensor(arr2, concat=True)
np.testing.assert_array_equal(res[0].toarray(), raw.T.toarray())
# test order
raw = np.asfortranarray(np.random.random((11, 8, 5)))
arr = tensor(raw, chunk_size=3)
arr2 = transpose(arr)
res = self.executor.execute_tensor(arr2, concat=True)[0]
expected = np.transpose(raw).copy(order='A')
np.testing.assert_array_equal(res, expected)
self.assertEqual(res.flags['C_CONTIGUOUS'], expected.flags['C_CONTIGUOUS'])
self.assertEqual(res.flags['F_CONTIGUOUS'], expected.flags['F_CONTIGUOUS'])
arr = tensor(raw, chunk_size=3)
arr2 = transpose(arr, (1, 2, 0))
res = self.executor.execute_tensor(arr2, concat=True)[0]
expected = np.transpose(raw, (1, 2, 0)).copy(order='A')
np.testing.assert_array_equal(res, expected)
self.assertEqual(res.flags['C_CONTIGUOUS'], expected.flags['C_CONTIGUOUS'])
self.assertEqual(res.flags['F_CONTIGUOUS'], expected.flags['F_CONTIGUOUS'])
df = md.DataFrame(mt.random.rand(10, 5, chunk_size=5))
df = df[df[0] < 1]
# generate tensor with unknown shape
t = df.to_tensor()
t2 = transpose(t)
res = self.executor.execute_tensor(t2, concat=True)[0]
self.assertEqual(res.shape, (5, 10))
def testSwapaxesExecution(self):
raw = np.random.random((11, 8, 5))
arr = swapaxes(raw, 2, 0)
res = self.executor.execute_tensor(arr, concat=True)
np.testing.assert_array_equal(res[0], raw.swapaxes(2, 0))
raw = np.random.random((11, 8, 5))
arr = tensor(raw, chunk_size=3)
arr2 = arr.swapaxes(2, 0)
res = self.executor.execute_tensor(arr2, concat=True)
np.testing.assert_array_equal(res[0], raw.swapaxes(2, 0))
raw = sps.random(11, 8, density=.2)
arr = tensor(raw, chunk_size=3)
arr2 = arr.swapaxes(1, 0)
res = self.executor.execute_tensor(arr2, concat=True)
np.testing.assert_array_equal(res[0].toarray(), raw.toarray().swapaxes(1, 0))
# test order
raw = np.asfortranarray(np.random.rand(11, 8, 5))
arr = tensor(raw, chunk_size=3)
arr2 = arr.swapaxes(2, 0)
res = self.executor.execute_tensor(arr2, concat=True)[0]
expected = raw.swapaxes(2, 0).copy(order='A')
np.testing.assert_array_equal(res, expected)
self.assertEqual(res.flags['C_CONTIGUOUS'], expected.flags['C_CONTIGUOUS'])
self.assertEqual(res.flags['F_CONTIGUOUS'], expected.flags['F_CONTIGUOUS'])
arr = tensor(raw, chunk_size=3)
arr2 = arr.swapaxes(0, 2)
res = self.executor.execute_tensor(arr2, concat=True)[0]
expected = raw.swapaxes(0, 2).copy(order='A')
np.testing.assert_array_equal(res, expected)
self.assertEqual(res.flags['C_CONTIGUOUS'], expected.flags['C_CONTIGUOUS'])
self.assertEqual(res.flags['F_CONTIGUOUS'], expected.flags['F_CONTIGUOUS'])
arr = tensor(raw, chunk_size=3)
arr2 = arr.swapaxes(1, 0)
res = self.executor.execute_tensor(arr2, concat=True)[0]
expected = raw.swapaxes(1, 0).copy(order='A')
np.testing.assert_array_equal(res, expected)
self.assertEqual(res.flags['C_CONTIGUOUS'], expected.flags['C_CONTIGUOUS'])
self.assertEqual(res.flags['F_CONTIGUOUS'], expected.flags['F_CONTIGUOUS'])
def testMoveaxisExecution(self):
x = zeros((3, 4, 5), chunk_size=2)
t = moveaxis(x, 0, -1)
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (4, 5, 3))
t = moveaxis(x, -1, 0)
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 3, 4))
t = moveaxis(x, [0, 1], [-1, -2])
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 4, 3))
t = moveaxis(x, [0, 1, 2], [-1, -2, -3])
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 4, 3))
def testBroadcastToExecution(self):
raw = np.random.random((10, 5, 1))
arr = tensor(raw, chunk_size=2)
arr2 = broadcast_to(arr, (5, 10, 5, 6))
res = self.executor.execute_tensor(arr2, concat=True)[0]
np.testing.assert_array_equal(res, np.broadcast_to(raw, (5, 10, 5, 6)))
# test chunk with unknown shape
arr1 = mt.random.rand(3, 4, chunk_size=2)
arr2 = mt.random.permutation(arr1)
arr3 = broadcast_to(arr2, (2, 3, 4))
res = self.executor.execute_tensor(arr3, concat=True)[0]
self.assertEqual(res.shape, (2, 3, 4))
def testBroadcastArraysExecutions(self):
x_data = [[1, 2, 3]]
x = tensor(x_data, chunk_size=1)
y_data = [[1], [2], [3]]
y = tensor(y_data, chunk_size=2)
a = broadcast_arrays(x, y)
res = [self.executor.execute_tensor(arr, concat=True)[0] for arr in a]
expected = np.broadcast_arrays(x_data, y_data)
for r, e in zip(res, expected):
np.testing.assert_equal(r, e)
def testWhereExecution(self):
raw_cond = np.random.randint(0, 2, size=(4, 4), dtype='?')
raw_x = np.random.rand(4, 1)
raw_y = np.random.rand(4, 4)
cond, x, y = tensor(raw_cond, chunk_size=2), tensor(raw_x, chunk_size=2), tensor(raw_y, chunk_size=2)
arr = where(cond, x, y)
res = self.executor.execute_tensor(arr, concat=True)
self.assertTrue(np.array_equal(res[0], np.where(raw_cond, raw_x, raw_y)))
raw_cond = sps.csr_matrix(np.random.randint(0, 2, size=(4, 4), dtype='?'))
raw_x = sps.random(4, 1, density=.1)
raw_y = sps.random(4, 4, density=.1)
cond, x, y = tensor(raw_cond, chunk_size=2), tensor(raw_x, chunk_size=2), tensor(raw_y, chunk_size=2)
arr = where(cond, x, y)
res = self.executor.execute_tensor(arr, concat=True)[0]
self.assertTrue(np.array_equal(res.toarray(),
np.where(raw_cond.toarray(), raw_x.toarray(), raw_y.toarray())))
# GH 2009
raw_x = np.arange(9.).reshape(3, 3)
x = arange(9.).reshape(3, 3)
arr = where(x < 5, 2, -1)
res = self.executor.execute_tensor(arr, concat=True)[0]
np.testing.assert_array_equal(res, np.where(raw_x < 5, 2, -1))
def testReshapeExecution(self):
raw_data = np.random.rand(10, 20, 30)
x = tensor(raw_data, chunk_size=6)
y = x.reshape(-1, 30)
res = self.executor.execute_tensor(y, concat=True)
np.testing.assert_array_equal(res[0], raw_data.reshape(-1, 30))
y2 = x.reshape(10, -1)
res = self.executor.execute_tensor(y2, concat=True)
np.testing.assert_array_equal(res[0], raw_data.reshape(10, -1))
y3 = x.reshape(-1)
res = self.executor.execute_tensor(y3, concat=True)
np.testing.assert_array_equal(res[0], raw_data.reshape(-1))
y4 = x.ravel()
res = self.executor.execute_tensor(y4, concat=True)
np.testing.assert_array_equal(res[0], raw_data.ravel())
raw_data = np.random.rand(30, 100, 20)
x = tensor(raw_data, chunk_size=6)
y = x.reshape(-1, 20, 5, 5, 4)
res = self.executor.execute_tensor(y, concat=True)
np.testing.assert_array_equal(res[0], raw_data.reshape(-1, 20, 5, 5, 4))
y2 = x.reshape(3000, 10, 2)
res = self.executor.execute_tensor(y2, concat=True)
np.testing.assert_array_equal(res[0], raw_data.reshape(3000, 10, 2))
y3 = x.reshape(60, 25, 40)
res = self.executor.execute_tensor(y3, concat=True)
np.testing.assert_array_equal(res[0], raw_data.reshape(60, 25, 40))
y4 = x.reshape(60, 25, 40)
y4.op.extra_params['_reshape_with_shuffle'] = True
size_res = self.executor.execute_tensor(y4, mock=True)
res = self.executor.execute_tensor(y4, concat=True)
self.assertEqual(res[0].nbytes, sum(v[0] for v in size_res))
self.assertTrue(np.array_equal(res[0], raw_data.reshape(60, 25, 40)))
y5 = x.ravel(order='F')
res = self.executor.execute_tensor(y5, concat=True)[0]
expected = raw_data.ravel(order='F')
np.testing.assert_array_equal(res, expected)
self.assertEqual(res.flags['C_CONTIGUOUS'], expected.flags['C_CONTIGUOUS'])
self.assertEqual(res.flags['F_CONTIGUOUS'], expected.flags['F_CONTIGUOUS'])
def testExpandDimsExecution(self):
raw_data = np.random.rand(10, 20, 30)
x = tensor(raw_data, chunk_size=6)
y = expand_dims(x, 1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 1)))
y = expand_dims(x, 0)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 0)))
y = expand_dims(x, 3)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 3)))
y = expand_dims(x, -1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, -1)))
y = expand_dims(x, -4)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, -4)))
with self.assertRaises(np.AxisError):
expand_dims(x, -5)
with self.assertRaises(np.AxisError):
expand_dims(x, 4)
def testRollAxisExecution(self):
x = ones((3, 4, 5, 6), chunk_size=1)
y = rollaxis(x, 3, 1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.rollaxis(np.ones((3, 4, 5, 6)), 3, 1)))
def testAtleast1dExecution(self):
x = 1
y = ones(3, chunk_size=2)
z = ones((3, 4), chunk_size=2)
t = atleast_1d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.array([1])))
self.assertTrue(np.array_equal(res[1], np.ones(3)))
self.assertTrue(np.array_equal(res[2], np.ones((3, 4))))
def testAtleast2dExecution(self):
x = 1
y = ones(3, chunk_size=2)
z = ones((3, 4), chunk_size=2)
t = atleast_2d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.array([[1]])))
self.assertTrue(np.array_equal(res[1], np.atleast_2d(np.ones(3))))
self.assertTrue(np.array_equal(res[2], np.ones((3, 4))))
def testAtleast3dExecution(self):
x = 1
y = ones(3, chunk_size=2)
z = ones((3, 4), chunk_size=2)
t = atleast_3d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.atleast_3d(x)))
self.assertTrue(np.array_equal(res[1], np.atleast_3d(np.ones(3))))
self.assertTrue(np.array_equal(res[2], np.atleast_3d(np.ones((3, 4)))))
def testArgwhereExecution(self):
x = arange(6, chunk_size=2).reshape(2, 3)
t = argwhere(x > 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.argwhere(np.arange(6).reshape(2, 3) > 1)
np.testing.assert_array_equal(res, expected)
data = np.asfortranarray(np.random.rand(10, 20))
x = tensor(data, chunk_size=10)
t = argwhere(x > 0.5)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.argwhere(data > 0.5)
np.testing.assert_array_equal(res, expected)
self.assertTrue(res.flags['F_CONTIGUOUS'])
self.assertFalse(res.flags['C_CONTIGUOUS'])
def testArraySplitExecution(self):
x = arange(48, chunk_size=3).reshape(2, 3, 8)
ss = array_split(x, 3, axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.array_split(np.arange(48).reshape(2, 3, 8), 3, axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
ss = array_split(x, [3, 5, 6, 10], axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.array_split(np.arange(48).reshape(2, 3, 8), [3, 5, 6, 10], axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
def testSplitExecution(self):
for a in ((1, 1, 1, 2, 2, 3), [1, 1, 1, 2, 2, 3]):
splits = split(a, (3, 5))
self.assertEqual(len(splits), 3)
splits0 = self.executor.execute_tensor(splits[0], concat=True)[0]
np.testing.assert_array_equal(splits0, (1, 1, 1))
splits1 = self.executor.execute_tensor(splits[1], concat=True)[0]
np.testing.assert_array_equal(splits1, (2, 2))
splits2 = self.executor.execute_tensor(splits[2], concat=True)[0]
np.testing.assert_array_equal(splits2, (3,))
x = arange(48, chunk_size=3).reshape(2, 3, 8)
ss = split(x, 4, axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(np.arange(48).reshape(2, 3, 8), 4, axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
ss = split(x, [3, 5, 6, 10], axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(np.arange(48).reshape(2, 3, 8), [3, 5, 6, 10], axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# hsplit
x = arange(120, chunk_size=3).reshape(2, 12, 5)
ss = hsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.hsplit(np.arange(120).reshape(2, 12, 5), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# vsplit
x = arange(48, chunk_size=3).reshape(8, 3, 2)
ss = vsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.vsplit(np.arange(48).reshape(8, 3, 2), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# dsplit
x = arange(48, chunk_size=3).reshape(2, 3, 8)
ss = dsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.dsplit(np.arange(48).reshape(2, 3, 8), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
x_data = sps.random(12, 8, density=.1)
x = tensor(x_data, chunk_size=3)
ss = split(x, 4, axis=0)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(x_data.toarray(), 4, axis=0)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r.toarray(), e) for r, e in zip(res, expected)]
def testRollExecution(self):
x = arange(10, chunk_size=2)
t = roll(x, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10), 2)
np.testing.assert_equal(res, expected)
x2 = x.reshape(2, 5)
t = roll(x2, 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1)
np.testing.assert_equal(res, expected)
t = roll(x2, 1, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1, axis=0)
np.testing.assert_equal(res, expected)
t = roll(x2, 1, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1, axis=1)
np.testing.assert_equal(res, expected)
def testSqueezeExecution(self):
data = np.array([[[0], [1], [2]]])
x = tensor(data, chunk_size=1)
t = squeeze(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.squeeze(data)
np.testing.assert_equal(res, expected)
t = squeeze(x, axis=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.squeeze(data, axis=2)
np.testing.assert_equal(res, expected)
def testDiffExecution(self):
data = np.array([1, 2, 4, 7, 0])
x = tensor(data, chunk_size=2)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data)
np.testing.assert_equal(res, expected)
t = diff(x, n=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data, n=2)
np.testing.assert_equal(res, expected)
data = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
x = tensor(data, chunk_size=2)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data)
np.testing.assert_equal(res, expected)
t = diff(x, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data, axis=0)
np.testing.assert_equal(res, expected)
x = mt.arange('1066-10-13', '1066-10-16', dtype=mt.datetime64)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64))
np.testing.assert_equal(res, expected)
def testEdiff1d(self):
data = np.array([1, 2, 4, 7, 0])
x = tensor(data, chunk_size=2)
t = ediff1d(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data)
np.testing.assert_equal(res, expected)
to_begin = tensor(-99, chunk_size=2)
to_end = tensor([88, 99], chunk_size=2)
t = ediff1d(x, to_begin=to_begin, to_end=to_end)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data, to_begin=-99, to_end=np.array([88, 99]))
np.testing.assert_equal(res, expected)
data = [[1, 2, 4], [1, 6, 24]]
t = ediff1d(tensor(data, chunk_size=2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data)
np.testing.assert_equal(res, expected)
def testFlipExecution(self):
a = arange(8, chunk_size=2).reshape((2, 2, 2))
t = flip(a, 0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flip(np.arange(8).reshape(2, 2, 2), 0)
np.testing.assert_equal(res, expected)
t = flip(a, 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flip(np.arange(8).reshape(2, 2, 2), 1)
np.testing.assert_equal(res, expected)
t = flipud(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flipud(np.arange(8).reshape(2, 2, 2))
np.testing.assert_equal(res, expected)
t = fliplr(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.fliplr(np.arange(8).reshape(2, 2, 2))
np.testing.assert_equal(res, expected)
def testRepeatExecution(self):
a = repeat(3, 4)
res = self.executor.execute_tensor(a)[0]
expected = np.repeat(3, 4)
np.testing.assert_equal(res, expected)
x_data = np.random.randn(20, 30)
x = tensor(x_data, chunk_size=(3, 4))
t = repeat(x, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, 2)
np.testing.assert_equal(res, expected)
t = repeat(x, 3, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, 3, axis=1)
np.testing.assert_equal(res, expected)
t = repeat(x, np.arange(20), axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, np.arange(20), axis=0)
np.testing.assert_equal(res, expected)
t = repeat(x, arange(20, chunk_size=5), axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, np.arange(20), axis=0)
np.testing.assert_equal(res, expected)
x_data = sps.random(20, 30, density=.1)
x = tensor(x_data, chunk_size=(3, 4))
t = repeat(x, 2, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data.toarray(), 2, axis=1)
np.testing.assert_equal(res.toarray(), expected)
def testTileExecution(self):
a_data = np.array([0, 1, 2])
a = tensor(a_data, chunk_size=2)
t = tile(a, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, 2)
np.testing.assert_equal(res, expected)
t = tile(a, (2, 2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, (2, 2))
np.testing.assert_equal(res, expected)
t = tile(a, (2, 1, 2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, (2, 1, 2))
np.testing.assert_equal(res, expected)
b_data = np.array([[1, 2], [3, 4]])
b = tensor(b_data, chunk_size=1)
t = tile(b, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(b_data, 2)
np.testing.assert_equal(res, expected)
t = tile(b, (2, 1))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(b_data, (2, 1))
np.testing.assert_equal(res, expected)
c_data = np.array([1, 2, 3, 4])
c = tensor(c_data, chunk_size=3)
t = tile(c, (4, 1))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(c_data, (4, 1))
np.testing.assert_equal(res, expected)
def testIsInExecution(self):
element = 2 * arange(4, chunk_size=1).reshape((2, 2))
test_elements = [1, 2, 4, 8]
mask = isin(element, test_elements)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_elements)
np.testing.assert_equal(res, expected)
res = self.executor.execute_tensor(element[mask], concat=True)[0]
expected = np.array([2, 4])
np.testing.assert_equal(res, expected)
mask = isin(element, test_elements, invert=True)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_elements, invert=True)
np.testing.assert_equal(res, expected)
res = self.executor.execute_tensor(element[mask], concat=True)[0]
expected = np.array([0, 6])
np.testing.assert_equal(res, expected)
test_set = {1, 2, 4, 8}
mask = isin(element, test_set)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_set)
np.testing.assert_equal(res, expected)
def testRavelExecution(self):
arr = ones((10, 5), chunk_size=2)
flat_arr = mt.ravel(arr)
res = self.executor.execute_tensor(flat_arr, concat=True)[0]
self.assertEqual(len(res), 50)
np.testing.assert_equal(res, np.ones(50))
def testSearchsortedExecution(self):
raw = np.sort(np.random.randint(100, size=(16,)))
# test different chunk_size, 3 will have combine, 6 will skip combine
for chunk_size in (3, 6):
arr = tensor(raw, chunk_size=chunk_size)
# test scalar, with value in the middle
t1 = searchsorted(arr, 20)
res = self.executor.execute_tensor(t1, concat=True)[0]
expected = np.searchsorted(raw, 20)
np.testing.assert_array_equal(res, expected)
# test scalar, with value larger than 100
t2 = searchsorted(arr, 200)
res = self.executor.execute_tensor(t2, concat=True)[0]
expected = np.searchsorted(raw, 200)
np.testing.assert_array_equal(res, expected)
# test scalar, side left, with value exact in the middle of the array
t3 = searchsorted(arr, raw[10], side='left')
res = self.executor.execute_tensor(t3, concat=True)[0]
expected = np.searchsorted(raw, raw[10], side='left')
np.testing.assert_array_equal(res, expected)
# test scalar, side right, with value exact in the middle of the array
t4 = searchsorted(arr, raw[10], side='right')
res = self.executor.execute_tensor(t4, concat=True)[0]
expected = np.searchsorted(raw, raw[10], side='right')
np.testing.assert_array_equal(res, expected)
# test scalar, side left, with value exact in the end of the array
t5 = searchsorted(arr, raw[15], side='left')
res = self.executor.execute_tensor(t5, concat=True)[0]
expected = np.searchsorted(raw, raw[15], side='left')
np.testing.assert_array_equal(res, expected)
# test scalar, side right, with value exact in the end of the array
t6 = searchsorted(arr, raw[15], side='right')
res = self.executor.execute_tensor(t6, concat=True)[0]
expected = np.searchsorted(raw, raw[15], side='right')
np.testing.assert_array_equal(res, expected)
# test scalar, side left, with value exact in the start of the array
t7 = searchsorted(arr, raw[0], side='left')
res = self.executor.execute_tensor(t7, concat=True)[0]
expected = np.searchsorted(raw, raw[0], side='left')
np.testing.assert_array_equal(res, expected)
# test scalar, side right, with value exact in the start of the array
t8 = searchsorted(arr, raw[0], side='right')
res = self.executor.execute_tensor(t8, concat=True)[0]
expected = np.searchsorted(raw, raw[0], side='right')
np.testing.assert_array_equal(res, expected)
raw2 = np.random.randint(100, size=(3, 4))
# test tensor, side left
t9 = searchsorted(arr, tensor(raw2, chunk_size=2), side='left')
res = self.executor.execute_tensor(t9, concat=True)[0]
expected = np.searchsorted(raw, raw2, side='left')
np.testing.assert_array_equal(res, expected)
# test tensor, side right
t10 = searchsorted(arr, tensor(raw2, chunk_size=2), side='right')
res = self.executor.execute_tensor(t10, concat=True)[0]
expected = np.searchsorted(raw, raw2, side='right')
np.testing.assert_array_equal(res, expected)
# test one chunk
arr = tensor(raw, chunk_size=16)
# test scalar, tensor to search has 1 chunk
t11 = searchsorted(arr, 20)
res = self.executor.execute_tensor(t11, concat=True)[0]
expected = np.searchsorted(raw, 20)
np.testing.assert_array_equal(res, expected)
# test tensor with 1 chunk, tensor to search has 1 chunk
t12 = searchsorted(arr, tensor(raw2, chunk_size=4))
res = self.executor.execute_tensor(t12, concat=True)[0]
expected = np.searchsorted(raw, raw2)
np.testing.assert_array_equal(res, expected)
# test tensor with more than 1 chunk, tensor to search has 1 chunk
t13 = searchsorted(arr, tensor(raw2, chunk_size=2))
res = self.executor.execute_tensor(t13, concat=True)[0]
expected = np.searchsorted(raw, raw2)
np.testing.assert_array_equal(res, expected)
# test sorter
raw3 = np.random.randint(100, size=(16,))
arr = tensor(raw3, chunk_size=3)
order = np.argsort(raw3)
order_arr = tensor(order, chunk_size=4)
t14 = searchsorted(arr, 20, sorter=order_arr)
res = self.executor.execute_tensor(t14, concat=True)[0]
expected = np.searchsorted(raw3, 20, sorter=order)
np.testing.assert_array_equal(res, expected)
def testUniqueExecution(self):
rs = np.random.RandomState(0)
raw = rs.randint(10, size=(10,))
for chunk_size in (10, 3):
x = tensor(raw, chunk_size=chunk_size)
y = unique(x)
res = self.executor.execute_tensor(y, concat=True)[0]
expected = np.unique(raw)
np.testing.assert_array_equal(res, expected)
y, indices = unique(x, return_index=True)
res = self.executor.execute_tensors([y, indices])
expected = np.unique(raw, return_index=True)
self.assertEqual(len(res), 2)
self.assertEqual(len(expected), 2)
np.testing.assert_array_equal(res[0], expected[0])
np.testing.assert_array_equal(res[1], expected[1])
y, inverse = unique(x, return_inverse=True)
res = self.executor.execute_tensors([y, inverse])
expected = np.unique(raw, return_inverse=True)
self.assertEqual(len(res), 2)
self.assertEqual(len(expected), 2)
np.testing.assert_array_equal(res[0], expected[0])
np.testing.assert_array_equal(res[1], expected[1])
y, counts = unique(x, return_counts=True)
res = self.executor.execute_tensors([y, counts])
expected = np.unique(raw, return_counts=True)
self.assertEqual(len(res), 2)
self.assertEqual(len(expected), 2)
np.testing.assert_array_equal(res[0], expected[0])
np.testing.assert_array_equal(res[1], expected[1])
y, indices, inverse, counts = unique(x, return_index=True,
return_inverse=True, return_counts=True)
res = self.executor.execute_tensors([y, indices, inverse, counts])
expected = np.unique(raw, return_index=True,
return_inverse=True, return_counts=True)
self.assertEqual(len(res), 4)
self.assertEqual(len(expected), 4)
np.testing.assert_array_equal(res[0], expected[0])
np.testing.assert_array_equal(res[1], expected[1])
np.testing.assert_array_equal(res[2], expected[2])
np.testing.assert_array_equal(res[3], expected[3])
y, indices, counts = unique(x, return_index=True, return_counts=True)
res = self.executor.execute_tensors([y, indices, counts])
expected = np.unique(raw, return_index=True, return_counts=True)
self.assertEqual(len(res), 3)
self.assertEqual(len(expected), 3)
np.testing.assert_array_equal(res[0], expected[0])
np.testing.assert_array_equal(res[1], expected[1])
np.testing.assert_array_equal(res[2], expected[2])
raw2 = rs.randint(10, size=(4, 5, 6))
x2 = tensor(raw2, chunk_size=chunk_size)
y2 = unique(x2)
res = self.executor.execute_tensor(y2, concat=True)[0]
expected = np.unique(raw2)
np.testing.assert_array_equal(res, expected)
y2 = unique(x2, axis=1)
res = self.executor.execute_tensor(y2, concat=True)[0]
expected = np.unique(raw2, axis=1)
np.testing.assert_array_equal(res, expected)
y2 = unique(x2, axis=2)
res = self.executor.execute_tensor(y2, concat=True)[0]
expected = np.unique(raw2, axis=2)
np.testing.assert_array_equal(res, expected)
raw = rs.randint(10, size=(10, 20))
raw[:, 0] = raw[:, 11] = rs.randint(10, size=(10,))
x = tensor(raw, chunk_size=2)
y, ind, inv, counts = unique(x, aggregate_size=3, axis=1, return_index=True,
return_inverse=True, return_counts=True)
res_unique, res_ind, res_inv, res_counts = self.executor.execute_tensors((y, ind, inv, counts))
exp_unique, exp_ind, exp_counts = np.unique(raw, axis=1, return_index=True, return_counts=True)
raw_res_unique = res_unique
res_unique_df = pd.DataFrame(res_unique)
res_unique_ind = np.asarray(res_unique_df.sort_values(list(range(res_unique.shape[0])),
axis=1).columns)
res_unique = res_unique[:, res_unique_ind]
res_ind = res_ind[res_unique_ind]
res_counts = res_counts[res_unique_ind]
np.testing.assert_array_equal(res_unique, exp_unique)
np.testing.assert_array_equal(res_ind, exp_ind)
np.testing.assert_array_equal(raw_res_unique[:, res_inv], raw)
np.testing.assert_array_equal(res_counts, exp_counts)
x = (mt.random.RandomState(0).rand(1000, chunk_size=20) > 0.5).astype(np.int32)
y = unique(x)
res = np.sort(self.executor.execute_tensor(y, concat=True)[0])
np.testing.assert_array_equal(res, np.array([0, 1]))
# test sparse
sparse_raw = sps.random(10, 3, density=0.1, format='csr', random_state=rs)
x = tensor(sparse_raw, chunk_size=2)
y = unique(x)
res = np.sort(self.executor.execute_tensor(y, concat=True)[0])
np.testing.assert_array_equal(res, np.unique(sparse_raw.data))
# test empty
x = tensor([])
y = unique(x)
res = self.executor.execute_tensor(y, concat=True)[0]
np.testing.assert_array_equal(res, np.unique([]))
x = tensor([[]])
y = unique(x)
res = self.executor.execute_tensor(y, concat=True)[0]
np.testing.assert_array_equal(res, np.unique([[]]))
@require_cupy
def testToGPUExecution(self):
raw = np.random.rand(10, 10)
x = tensor(raw, chunk_size=3)
gx = to_gpu(x)
res = self.executor.execute_tensor(gx, concat=True)[0]
np.testing.assert_array_equal(res.get(), raw)
@require_cupy
def testToCPUExecution(self):
raw = np.random.rand(10, 10)
x = tensor(raw, chunk_size=3, gpu=True)
cx = to_cpu(x)
res = self.executor.execute_tensor(cx, concat=True)[0]
np.testing.assert_array_equal(res, raw)
def testSortExecution(self):
# only 1 chunk when axis = -1
raw = np.random.rand(100, 10)
x = tensor(raw, chunk_size=10)
sx = sort(x)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
# 1-d chunk
raw = np.random.rand(100)
x = tensor(raw, chunk_size=10)
sx = sort(x)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
# test force need_align=True
sx = sort(x)
sx.op._need_align = True
res = self.executor.execute_tensor(sx, concat=True)[0]
self.assertEqual(get_tiled(sx).nsplits, get_tiled(x).nsplits)
np.testing.assert_array_equal(res, np.sort(raw))
# test psrs_kinds
sx = sort(x, psrs_kinds=[None, None, 'quicksort'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
# structured dtype
raw = np.empty(100, dtype=[('id', np.int32), ('size', np.int64)])
raw['id'] = np.random.randint(1000, size=100, dtype=np.int32)
raw['size'] = np.random.randint(1000, size=100, dtype=np.int64)
x = tensor(raw, chunk_size=10)
sx = sort(x, order=['size', 'id'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, order=['size', 'id']))
# test psrs_kinds with structured dtype
sx = sort(x, order=['size', 'id'], psrs_kinds=[None, None, 'quicksort'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, order=['size', 'id']))
# test flatten case
raw = np.random.rand(10, 10)
x = tensor(raw, chunk_size=5)
sx = sort(x, axis=None)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=None))
# test multi-dimension
raw = np.random.rand(10, 100)
x = tensor(raw, chunk_size=(2, 10))
sx = sort(x, psrs_kinds=['quicksort'] * 3)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
sx = sort(x, psrs_kinds=[None, None, 'quicksort'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
raw = np.random.rand(10, 99)
x = tensor(raw, chunk_size=(2, 10))
sx = sort(x)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
# test 3-d
raw = np.random.rand(20, 25, 28)
x = tensor(raw, chunk_size=(10, 5, 7))
sx = sort(x)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
sx = sort(x, psrs_kinds=[None, None, 'quicksort'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
sx = sort(x, axis=0)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=0))
sx = sort(x, axis=0, psrs_kinds=[None, None, 'quicksort'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=0))
sx = sort(x, axis=1)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=1))
sx = sort(x, axis=1, psrs_kinds=[None, None, 'quicksort'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=1))
# test multi-dimension with structured type
raw = np.empty((10, 100), dtype=[('id', np.int32), ('size', np.int64)])
raw['id'] = np.random.randint(1000, size=(10, 100), dtype=np.int32)
raw['size'] = np.random.randint(1000, size=(10, 100), dtype=np.int64)
x = tensor(raw, chunk_size=(3, 10))
sx = sort(x)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, | np.sort(raw) | numpy.sort |
import numpy as np
import torch
import visdom
import analysis.variationalbayes.mcvb as mcvb
def vec_pts_to_line(pts, ptline, vline):
""" Normal vector from a point to a line.
Arguments:
- pts - [N, D] - N D-dimensional points
- ptline - [D] - point on a line
- vline - [D] - vector of the line, not necessarily normalized
Returns:
- [N, D] - shortest vectors from the N points to the line (unnormalized)
"""
a = np.dot(vline, (pts - ptline).T) / vline.dot(vline)
res = a[:, None] * vline + ptline - pts
return res
def rot_90(x):
if x.dim() == 1:
return x[[1, 0]] * torch.tensor([-1.0, 1.0]).to(x)
elif x.dim() > 1:
return x[..., [1, 0]] * torch.tensor([-1.0, 1.0]).to(x)
def vec_pts_to_line_torch(pts, ptlines, vlines):
""" Normal vector from a point to a line.
Arguments:
- pts - [N, D] - N D-dimensional points
- ptlines - [N, D] - points on the lines
- vlines - [N, D] - vectors of the lines, not necessarily normalized
Returns:
- [N, D] - shortest vectors from the N points to the lines (unnormalized)
"""
a = torch.sum(vlines * (pts - ptlines), dim=-1) / torch.sum(vlines * vlines, dim=-1)
res = a[:, None] * vlines + ptlines - pts
return res
def vec_pts_to_lines_torch(pts, ptlines, vlines):
""" Normal vector from N points to N lines, elementwise.
Arguments:
- pts - [..., D] - many D-dimensional points
- ptlines - [..., D] - points on many lines
- vlines - [..., D] - vectors of many lines, not necessarily normalized
Returns:
- [N, D] - shortest vectors from the N points to the lines (unnormalized)
"""
a = torch.sum(vlines * (pts - ptlines), dim=-1) / torch.sum(vlines * vlines, dim=-1)
res = a[..., None] * vlines + ptlines - pts
return res
def stable_normalize(x, etha=1.0e-8):
""" Numerically stable vector normalization
"""
n = np.linalg.norm(x, axis=-1, keepdims=True)
if n < etha:
n = etha
return x / n
def stable_normalize_torch(x, etha=1.0e-8):
""" Numerically stable vector normalization
"""
return x / torch.norm(x, dim=-1, keepdim=True).clamp(min=etha)
def stable_sigmoid(x):
""" Numerically stable sigmoid function of one variable
"""
if x >= 0:
z = exp(-x)
return 1 / (1 + z)
else:
# if x is less than zero then z will be small, denom can't be
# zero because it's 1+z.
z = exp(x)
return z / (1 + z)
def sigmoid_torch(x, a=1, b=0):
""" Sigmoid function
"""
#return 1.0 / (1.0 + torch.exp(-a*(x - b))) # numerically unstable, but directly interpretable
#return 1.0 / (1.0 + torch.exp(- torch.abs(a)*(x - b))) # numerically unstable, but directly interpretable
#return 1.0 / (1.0 + torch.exp(torch.clamp(-a*(x - b), min=-10, max=10))) # stable, interpretable
#return 1.0 / (1.0 + torch.exp(-a*x + b)) # numerically most stable, use sigmoid_inflection_pt() to interpret
return 1.0 / (1.0 + torch.exp(torch.clamp(-a*x + b, min=-10, max=10))) # numerically most stable, use sigmoid_inflection_pt() to interpret
def sigmoid_inflection_pt(a=1, b=0):
""" Inflection point (f(x_i)=0.5)
-a*x+b = 0
x_i = b / a
"""
return b / a
def sigmoid_inflection_a_to_b(x_i, a):
""" x_i = b / a
b = x_i * a
"""
return x_i * a
if False:
import matplotlib.pyplot as plt
x = torch.arange(-3.0, 3, 0.1)
y = sigmoid_torch(x, a=5, b=-2)
plt.plot(x.data.numpy(), y.data.numpy())
plt.show()
exit()
class RegressorType(object):
Const = 0
Position = 1
Velocity1D = 2
Acceleration = 3
Control = 4
OptimalTarget = 5
OptimalTrajectory = 6
count = 7
names = ("Const",
"Position",
"Velocity1D",
"Acceleration",
"Control",
"OptimalTarget",
"OptimalTrajectory",)
statesize = (1, 2, 1, 2, 2, 2, 2)
def pprint_policy(policy, regressortypes):
print("Policy:")
i = 0
for r in regressortypes:
s = RegressorType.statesize[r]
p = policy[:, i:i+s]
i += s
print("{}: max(abs)={} \n{}".format(RegressorType.names[r], np.max(np.abs(p)), p))
def signal_dependent_noise_covar_torch(control, scale=torch.tensor([2.0, 1.0]), uniform=0.0, etha=1.0e-6):
""" Signal-dependent noise covariance matrix.
control: [2] - control vector
scale: [2] - covariance scale in the [control, normal] directions
uniform: scalar - additional diagonal noise
etha: scalar - diagonal boost
"""
control_n = rot_90(control)
m_control_global = torch.stack([control, control_n], dim=-1)
m_control_globalscaled = m_control_global * scale[None, :]
vuvt = torch.einsum("...ik,...jk->...ij", m_control_globalscaled, m_control_globalscaled)
m_covar = vuvt + torch.eye(2).to(vuvt) * (uniform + etha)
return m_covar, m_control_globalscaled
def signal_dependent_noise_covar_xaligned_torch(control, scale=torch.tensor([2.0, 1.0]), uniform=0.0, etha=1.0e-6):
""" Signal-dependent uncorrelated noise covariances for x-aligned control vector.
control: [2] - x-aligned control vector; control[-1] == 0
scale: [2] - covariance scale in the [control, normal] directions
uniform: scalar - additional diagonal noise
etha: scalar - diagonal boost
"""
control_n = rot_90(control)
m_control_global = torch.stack([control, control_n], dim=-1)
m_control_globalscaled = m_control_global * scale[None, :]
diag_covar = (torch.norm(control, dim=-1, keepdim=True) * scale)**2 + (uniform + etha)
return diag_covar, m_control_globalscaled
class TargetSwitchingModel(mcvb.TensorContainer):
def __init__(self,
data,
memsize=1,
delaysize=0,
regressortypes=None,
initialpolicy=None,
fitsigmoid=True,
fittrajectories=False,
fitpolicy=True,
fitnoise=True,
policyactivation=False):
"""
data : list of tumples (trajectory, ptstart, ptend, ptstart_sw, ptend_sw)
"""
super(TargetSwitchingModel, self).__init__()
self.data = data
self.memsize = memsize
self.delaysize = delaysize
self.regressortypes = regressortypes
self.fitsigmoid = fitsigmoid
self.fittrajectories = fittrajectories
self.fitpolicy = fitpolicy
self.fitnoise = fitnoise
self.policyactivation = policyactivation
statesize = 0
for r in regressortypes:
if r == RegressorType.Const:
statesize += 1
else:
statesize += RegressorType.statesize[r] * self.memsize
if initialpolicy is None:
initialpolicy = np.zeros([2, statesize])
self.initialpolicy = initialpolicy
# Construct extended data tensors with maxlength
# to unify the model and a binary mask for the missing data
N = len(self.data) # number of trials
lengths = np.array([len(x) for x, _, _, _, _ in self.data]) # [N]
self.lengths = lengths
maxlength = np.max(lengths) # max trial length
x = [np.concatenate([tr, 0.0 * np.zeros([maxlength-len(tr), 2])], axis=0) for tr, _, _, _, _ in self.data] # [N, Xmaxlength, D]
self.x = np.stack(x, axis=0) # [N, T, D]
m = [np.concatenate([np.ones(length), np.zeros(maxlength-length)]) for length in lengths]
self.mask = np.stack(m, axis=0) # [N, T], {0-1} mask
self.disturbed = np.array([not np.allclose(b1, b2) for tr, a1, b1, a2, b2 in self.data]) # [N]
self.start = np.stack(np.array([s for _, s, _, _, _ in self.data], dtype=float), axis=0) # [N, D]
self.target = np.stack(np.array([t for _, _, t, _, _ in self.data], dtype=float), axis=0) # [N, D]
self.start_dist = np.stack(np.array([s for _, _, _, s, _ in self.data], dtype=float), axis=0) # [N, D], disturbed
self.target_dist = np.stack(np.array([t for _, _, _, _, t in self.data], dtype=float), axis=0) # [N, D], disturbed
self["x"] = torch.from_numpy(self.x)
self["mask"] = torch.tensor(self.mask.astype(int))
self["disturbed"] = torch.tensor(self.disturbed.astype(int))
self["start"] = torch.from_numpy(self.start)
self["target"] = torch.from_numpy(self.target)
self["start_dist"] = torch.from_numpy(self.start_dist)
self["target_dist"] = torch.from_numpy(self.target_dist)
def model(self, env):
xdata = self["x"]
mask = self["mask"]
disturbed = self["disturbed"]
start = self["start"]
target = self["target"]
start_dist = self["start_dist"]
target_dist = self["target_dist"]
# Linear policy
log_cn_covar = env.param("log_cn_covar", torch.tensor([-2.0, -2.0])) # control execution output noise
cn_covar = torch.exp(log_cn_covar)
log_pn_covar = env.param("log_pn_covar", torch.tensor(-2.0)) # policy noise covar
pn_covar = torch.exp(log_pn_covar)
policy = env.sample("policy", mcvb.Normal(loc=0.0, scale=10.0).expand_right(self.initialpolicy.shape)) # linear policy matrix
N, T, D = xdata.shape
# Process all N trials in parallel
# Policy activation sigmoid (movement onset)
if self.policyactivation:
policy_act_log_sigm_a = env.param("policy_act_log_sigm_a", torch.ones(N))
policy_act_sigm_a = torch.exp(policy_act_log_sigm_a)
policy_act_sigm_b = env.param("policy_act_sigm_b", -2.0 * torch.ones(N))
# Target switching sigmoid
prior_inflection = 0.5
prior_a = 10.0
sigm_a = env.sample("sigm_a", mcvb.LogNormal(loc=np.log(prior_a), scale=5.0).expand_right([N]).mask(disturbed)) # [N], prior over sigmoid parameter a
sigm_b = env.sample("sigm_b", mcvb.Normal(loc=prior_a * prior_inflection, scale=5.0).expand_right([N]).mask(disturbed)) # [N], prior over sigmoid parameter b
env.store("sigm_a", sigm_a)
env.store("sigm_b", sigm_b)
x = [] # list of sampled trajectories
for t in range(3):
x.append(env.sample("x_{}".format(t),
mcvb.Normal(loc=0.0, scale=1.0).expand_right([N, D]).mask(mask[..., t][..., None]), obs=xdata[..., t, :]))
for t in range(2, T-1):
# t - current time step
# t+1 - next time step
v = x[t] - x[t-1] # current velocity vector
vnorm = stable_normalize_torch(v)
nv_vnorm = rot_90(vnorm)
vbasis = torch.stack([vnorm, nv_vnorm], dim=-1) # from local to global
vbasisinv = vbasis.transpose(-2, -1) # from global to local. Because basis matrix is ortonormal, inverse is transpose
v_local = torch.einsum("...ij,...j->...i", vbasisinv, v)
vprev = x[t-1] - x[t-2] # previous velocity vector
aprev = v - vprev # previous acceleration
vprevnorm = stable_normalize_torch(vprev)
nv_vprevnorm = rot_90(vprevnorm)
vprevbasis = torch.stack([vprevnorm, nv_vprevnorm], dim=-1) # from local to global
vprevbasisinv = vprevbasis.transpose(-2, -1) # from global to local. Because basis matrix is ortonormal, inverse is transpose
aprev_local = torch.einsum("...ij,...j->...i", vprevbasisinv, aprev)
# Sigmoid policy activation
if self.policyactivation:
policy_act_sigm = sigmoid_torch(x=t/T, a=policy_act_sigm_a, b=policy_act_sigm_b)
# Sigmoidal switching of start and target
sigm = sigmoid_torch(x=t/T, a=sigm_a, b=sigm_b) # sigmoidal weight for every time point
env.store("sigmoid_{}".format(t+1), sigm)
sigm = sigm[..., None] # broadcast to dimensions
start_sigm = start * (1.0 - sigm) + start_dist * sigm
target_sigm = target * (1.0 - sigm) + target_dist * sigm
vbasisinv_ex = mcvb.extend_batch(vbasisinv, [target_sigm.shape[0]])
vbasis_ex = mcvb.extend_batch(vbasis, [target_sigm.shape[0]])
target_local = torch.einsum("...ij,...j->...i", vbasisinv_ex, target_sigm - x[t])
# Construct the current state vector (regressors)
regressors = []
for r in self.regressortypes:
if r == RegressorType.Const:
regressors.append(torch.ones(N, 1).to(policy))
elif r == RegressorType.Position:
raise NotImplementedError()
elif r == RegressorType.Velocity1D:
regressors.append(v_local[..., 0][..., None])
elif r == RegressorType.Acceleration:
regressors.append(aprev_local)
elif r == RegressorType.Control:
raise NotImplementedError()
elif r == RegressorType.OptimalTarget:
regressors.append(target_local)
elif r == RegressorType.OptimalTrajectory:
vec_to_tr = vec_pts_to_lines_torch(x[t], start_sigm, target_sigm)
vec_to_tr_local = torch.einsum("...ij,...j->...i", vbasisinv_ex, vec_to_tr) # vector to optimal trajectory
regressors.append(vec_to_tr_local)
state = mcvb.expand_cat(regressors, dim=-1)
# policy: [nparticles, ndims, nregressors]
# state: [nparticles, ntrials, nregressors]
# prediction: [nparticles, ntrials, ndims]
policy_ex = mcvb.insert_batch(policy, [state.shape[-2]], 2)
control_local = torch.einsum("...ij,...j->...i", policy_ex, state) # control in local coordinates
if self.policyactivation:
control_local = policy_act_sigm[None, :, None] * control_local # modulate the control
control_global = torch.einsum("...ij,...j->...i", vbasis_ex, control_local)
# Control noise covariance
covar_next, _ = signal_dependent_noise_covar_torch(control=control_global, scale=cn_covar, uniform=pn_covar)
xnext_local = v_local + control_local
loc_next = x[t] + torch.einsum("...ij,...j->...i", vbasis_ex, xnext_local)
x.append(env.sample("x_{}".format(t+1),
mcvb.MultivariateNormal(loc=loc_next, covariance_matrix=covar_next).mask(mask[..., t+1]),
obs=xdata[..., t+1, :]))
return x
def model_fast(self, env):
""" Fast but doesn't support sampling
"""
xdata = self["x"]
mask = self["mask"]
disturbed = self["disturbed"]
start = self["start"]
target = self["target"]
start_dist = self["start_dist"]
target_dist = self["target_dist"]
# Linear policy
log_cn_covar = env.param("log_cn_covar", torch.tensor(np.log([1.0, 1.0]))) # control execution output noise
cn_covar = torch.exp(log_cn_covar)
log_pn_covar = env.param("log_pn_covar", torch.tensor(np.log(1.0))) # policy noise covar
pn_covar = torch.exp(log_pn_covar)
#pn_covar = pn_covar * 0.0
policy = env.sample("policy", mcvb.Normal(loc=0.0, scale=10.0).expand_right(self.initialpolicy.shape)) # linear policy matrix
N, T, D = xdata.shape
# Process all N trials in parallel
# Policy activation sigmoid (movement onset)
if self.policyactivation:
policy_act_log_sigm_a = env.param("policy_act_log_sigm_a", torch.ones(N))
policy_act_sigm_a = torch.exp(policy_act_log_sigm_a)
policy_act_sigm_b = env.param("policy_act_sigm_b", -2.0 * torch.ones(N))
# Target switching sigmoid
prior_inflection = 0.5
prior_a = 1.0
sigm_a = env.sample("sigm_a", mcvb.LogNormal(loc=np.log(prior_a), scale=1.0).expand_right([N]).mask(disturbed)) # [N], prior over sigmoid parameter a
sigm_b = env.sample("sigm_b", mcvb.Normal(loc=prior_inflection * prior_a, scale=1.0).expand_right([N]).mask(disturbed)) # [N], prior over sigmoid parameter b
#x = [] # list of sampled trajectories
x = env.observe("xdata", xdata)
for t in range(2 + self.memsize + self.delaysize):
env.sample("x_{}".format(t),
mcvb.Normal(loc=0.0, scale=1.0).expand_right([N, D]).mask(mask[..., t][..., None]),
obs=xdata[..., t, :])
xtm2 = x[:, 0:T-3, :] # prev, t-1
xtm1 = x[:, 1:T-2, :] # prev, t-1
xt = x[:, 2:T-1, :] # current, t
xtp1 = x[:, 3:T, :] # next, t+1
# t - current time step
# t+1 - next time step
v = xt - xtm1 # current velocity vector
vnorm = stable_normalize_torch(v)
nv_vnorm = rot_90(vnorm)
vbasis = torch.stack([vnorm, nv_vnorm], dim=-1) # from local to global
vbasisinv = vbasis.transpose(-2, -1) # from global to local. Because basis matrix is ortonormal, inverse is transpose
v_local = torch.einsum("...ij,...j->...i", vbasisinv, v)
vprev = xtm1 - xtm2 # previous velocity vector
aprev = v - vprev # previous acceleration
vprevnorm = stable_normalize_torch(vprev)
nv_vprevnorm = rot_90(vprevnorm)
vprevbasis = torch.stack([vprevnorm, nv_vprevnorm], dim=-1) # from local to global
vprevbasisinv = vprevbasis.transpose(-2, -1) # from global to local. Because basis matrix is ortonormal, inverse is transpose
aprev_local = torch.einsum("...ij,...j->...i", vprevbasisinv, aprev)
t = torch.arange(2, T-1, device=self.device, dtype=self.dtype).to(xdata)
# Sigmoid policy activation
if self.policyactivation:
policy_act_sigm = sigmoid_torch(x=t/T, a=policy_act_sigm_a[..., None], b=policy_act_sigm_b[..., None])
# Sigmoidal switching of start and target
sigm = sigmoid_torch(x=t/T, a=sigm_a[..., None], b=sigm_b[..., None]) # sigmoidal weight for every time point
sigm = sigm[..., None] # broadcast to dimensions
start_sigm = start[:, None, :] * (1.0 - sigm) + start_dist[:, None, :] * sigm
target_sigm = target[:, None, :] * (1.0 - sigm) + target_dist[:, None, :] * sigm
vbasisinv_ex = mcvb.extend_batch(vbasisinv, [target_sigm.shape[0]])
vbasis_ex = mcvb.extend_batch(vbasis, [target_sigm.shape[0]])
target_local = torch.einsum("...ij,...j->...i", vbasisinv_ex, target_sigm - xt)
# Construct the current state vector (regressors)
regressors = []
for r in self.regressortypes:
if r == RegressorType.Position:
raise NotImplementedError()
elif r == RegressorType.Velocity1D:
regressors.append(v_local[..., 0][..., None]) # add higher order
elif r == RegressorType.Acceleration:
regressors.append(aprev_local) # add higher order
elif r == RegressorType.Control:
raise NotImplementedError()
elif r == RegressorType.OptimalTarget:
regressors.append(target_local) # add higher order
elif r == RegressorType.OptimalTrajectory:
vec_to_tr = vec_pts_to_lines_torch(xt[None, ...], start_sigm, target_sigm)
vec_to_tr_local = torch.einsum("...ij,...j->...i", vbasisinv_ex, vec_to_tr) # vector to optimal trajectory
regressors.append(vec_to_tr_local) # add higher order
state = mcvb.expand_cat(regressors, dim=-1)
# Tile the state for higher order regression
state = torch.cat([state[..., i:i-self.memsize+len(t)+1, :] for i in range(self.memsize)], dim=-1)
# Add single const regressor
if r == RegressorType.Const:
state = mcvb.expand_cat([state, torch.ones(N, state.shape[-2], 1).to(policy)], dim=-1)
# policy: [nparticles, ndims, nregressors]
# state: [nparticles, ntrials, nt, nregressors]
# prediction: [nparticles, ntrials, nt, ndims]
policy_ex = mcvb.insert_batch(policy, [state.shape[-3], state.shape[-2]], 2)
if policy_ex.dim() == state.dim() + 2:
state = mcvb.extend_batch(state, [policy_ex.shape[0]])
control_local = torch.einsum("...ij,...j->...i", policy_ex, state) # control in local coordinates
if self.policyactivation:
control_local = policy_act_sigm[None, :, :, None] * control_local
vbasis_ex = vbasis_ex[..., self.memsize-1:, :, :]
v_local = v_local[:, self.memsize-1:, :]
control_global = torch.einsum("...ij,...j->...i", vbasis_ex, control_local)
control_global_norm = stable_normalize_torch(control_global)
vn_control_global_norm = rot_90(control_global_norm)
controlbasis = torch.stack([control_global_norm, vn_control_global_norm], dim=-1)
controlbasisinv = controlbasis.transpose(-2, -1)
def global_to_control(a):
a_control = torch.einsum("...ij,...j->...i", controlbasisinv, a)
return a_control
def local_to_control(a):
a_global = torch.einsum("...ij,...j->...i", vbasis_ex, a)
a_control = global_to_control(a_global)
return a_control
control_control = local_to_control(control_local)
covar_next, _ = signal_dependent_noise_covar_xaligned_torch(control_control, cn_covar, pn_covar)
std_next = torch.sqrt(covar_next)
xnext_local = v_local + control_local
xnext_control = local_to_control(xnext_local)
xpt1_xt = (xtp1 - xt)[:, self.memsize-1:, :]
xtp1_control = global_to_control(xpt1_xt.expand(xnext_local.shape))
tp1 = torch.arange(2+self.memsize+self.delaysize, T, device=self.device)
s = xtp1_control.shape[-2] - self.delaysize # valid size for prediction
env.sample("xtp1_control",
mcvb.Normal(loc=xnext_control[..., :s, :],
scale=std_next[..., :s, :]).mask(mask[..., tp1, None]),
obs=xtp1_control[..., -s:, :])
def proposal(self, env):
self.proposal_factorized(env)
def proposal_factorized(self, env):
xdata = self["x"]
mask = self["mask"]
disturbed = self["disturbed"]
start = self["start"]
target = self["target"]
start_dist = self["start_dist"]
target_dist = self["target_dist"]
N, T, D = xdata.shape
prior_inflection = 0.5
prior_a = 1.0
log_sigm_a_loc = env.param("log_sigm_a.loc", np.log(prior_a) * torch.ones(N))
log_sigm_a_logscale = env.param("log_sigm_a.logscale", np.log(1.0) * torch.ones(N))
sigm_a = env.sample("sigm_a", mcvb.LogNormal(loc=log_sigm_a_loc, scale=torch.exp(log_sigm_a_logscale)).mask(disturbed)) # [N], prior over sigmoid parameter a
sigm_b_loc = env.param("sigm_b.loc", prior_inflection * prior_a * torch.ones(N))
sigm_b_logscale = env.param("sigm_b.logscale", np.log(1.0) * torch.ones(N))
sigm_b = env.sample("sigm_b", mcvb.Normal(loc=sigm_b_loc, scale=torch.exp(sigm_b_logscale)).mask(disturbed)) # [N], prior over sigmoid parameter b
policy_loc = env.param("policy.loc", torch.zeros(self.initialpolicy.shape))
policy_logscale = env.param("policy.logscale", np.log(1.0) * torch.ones(self.initialpolicy.shape))
policy = env.sample("policy", mcvb.Normal(loc=policy_loc, scale=torch.exp(policy_logscale)))
def run_policy(policy, start, target, n=5, regressortypes=None):
def rot_90(x):
return x[[1, 0]] * np.array([-1.0, 1.0])
x = np.zeros([n, 2])
if start is None:
x[:3] = np.array([[0.0, 0.0], [0.01, 0.01], [0.02, 0.02]])
else:
x[:3] = start
if regressortypes is None:
regressortypes = [RegressorType.Velocity, RegressorType.Acceleration, RegressorType.OptimalTarget, RegressorType.Const]
regressortypes.append(RegressorType.OptimalTrajectory)
for i in range(2, n-1):
# i - current time step
# i+1 - next time step
v = x[i] - x[i-1] # current velocity vector
vnorm = stable_normalize(v)
nv_vnorm = rot_90(vnorm)
vbasis = np.vstack([vnorm, nv_vnorm]).T # from local to global
vbasisinv = vbasis.T # from global to local. Because basis matrix is ortonormal, inverse is transpose
v_local = np.dot(vbasisinv, v)
vprev = x[i-1] - x[i-2] # previous velocity vector
aprev = v - vprev # previous acceleration
vprevnorm = stable_normalize(vprev)
nv_vprevnorm = rot_90(vprevnorm)
vprevbasis = np.vstack([vprevnorm, nv_vprevnorm]).T # from local to global
vprevbasisinv = vprevbasis.T # from global to local. Because basis matrix is ortonormal, inverse is transpose
aprev_local = np.dot(vprevbasisinv, aprev)
target_local = | np.dot(vbasisinv, target - x[i]) | numpy.dot |
import seaborn as sns
import numpy as np
import os
N = 100
DELTA = 0.025
c = 1.
def load_data(folder,dtype=float):
return [np.loadtxt(folder+'/n_%d'%n,dtype=dtype) for n in range(1,100)]
def save_mean_Lambda_CC(data_CC,str_MU):
"""saves mean Lambda_CC vals in file"""
coop_int = np.array([np.mean(dat_n)/(i+1.) for i,dat_n in enumerate(data_CC)])
np.savetxt('MU-%s/Lambda_CC'%str_MU,coop_int)
return coop_int
# def fix_prob_1(b,c,Lambda_CC,DELTA):
# """calculates fix probability for single initial cooperator"""
# f = lambda j: -c/N +b/N*(Lambda_CC[j-1]*N-j)/(N-j)
# return 1./N +DELTA/N*sum(sum(f(j) for j in range(1,k+1)) for k in range(1,N))
def fix_prob_1(b,c,Lambda_CC,DELTA):
"""calculates fix probability for single initial cooperator"""
f = lambda j: (Lambda_CC[j-1]-float(j)/N)/float(N-j)
return 1./N +DELTA/N*( (-c*(N-1.))/2. + b*sum(sum(f(j) for j in range(1,k+1)) for k in range(1,N)))
def critical_ratio(str_MU):
try: Lambda_CC = np.loadtxt('MU-%s/Lambda_CC'%str_MU)
except:
data_CC = load_data('MU-%s/wints_CC'%str_MU)
Lambda_CC = save_mean_Lambda_CC(data_CC,str_MU)
f = lambda j: (Lambda_CC[j-1]-float(j)/N)/float(N-j)
return (N-1.)/2. *1/(sum(sum(f(j) for j in range(1,k+1)) for k in range(1,N)))
def save_critical_ratios(str_MU_list,fname):
with open(fname,'w') as f:
for str_MU in str_MU_list:
f.write('%s %.3f \n'%(str_MU,critical_ratio(str_MU)))
def save_fix_probs(DELTA_vals,b_vals,fname,str_MU):
try: Lambda_CC = | np.loadtxt('MU-%s/Lambda_CC'%str_MU) | numpy.loadtxt |
import unittest
from datetime import datetime
import numpy as np
import netCDF4 as nc
from ocgis.api.operations import OcgOperations
from ocgis.util.shp_cabinet import ShpCabinet
from shapely.geometry.polygon import Polygon
from ocgis import env
from ocgis.api.interpreter import OcgInterpreter
from ocgis.util.inspect import Inspect
class NcSpatial(object):
def __init__(self,resolution,lat_bnds,lon_bnds):
self.resolution = resolution
self.lat_bnds = lat_bnds
self.lon_bnds = lon_bnds
@property
def shift(self):
return(self.resolution/2.0)
@property
def lat_values(self):
return(self.get_centroids(self.lat_bnds))
@property
def lon_values(self):
return(self.get_centroids(self.lon_bnds))
@property
def latb_values(self):
return(self.make_bounds(self.lat_values))
@property
def lonb_values(self):
return(self.make_bounds(self.lon_values))
def get_centroids(self,bounds):
return(np.arange(bounds[0]+self.shift,bounds[1]+self.shift,self.resolution,dtype=float))
def make_bounds(self,arr):
lower = arr - self.shift
upper = arr + self.shift
bounds = np.hstack((lower.reshape(-1,1),upper.reshape(-1,1)))
return(bounds)
class Test360(unittest.TestCase):
def test_high_res(self):
nc_spatial = NcSpatial(0.5,(-90.0,90.0),(0.0,360.0))
path = self.make_data(nc_spatial)
dataset = {'uri':path,'variable':'foo'}
output_format = 'nc'
snippet = True
geom = self.nebraska
for s_abstraction in ['point','polygon']:
interface = {'s_abstraction':s_abstraction}
ops = OcgOperations(dataset=dataset,output_format=output_format,
geom=geom,snippet=snippet,abstraction=s_abstraction)
ret = OcgInterpreter(ops).execute()
def test_low_res(self):
nc_spatial = NcSpatial(5.0,(-90.0,90.0),(0.0,360.0))
path = self.make_data(nc_spatial)
dataset = {'uri':path,'variable':'foo'}
output_format = 'shp'
geom = self.nebraska
ip = Inspect(dataset['uri'],dataset['variable'])
for s_abstraction in ['point','polygon']:
interface = {'s_abstraction':s_abstraction}
ops = OcgOperations(dataset=dataset,
output_format=output_format,
geom=geom,
abstraction=s_abstraction)
ret = OcgInterpreter(ops).execute()
@property
def nebraska(self):
sc = ShpCabinet()
geom_dict = sc.get_geom_dict('state_boundaries',{'ugid':[16]})
return(geom_dict)
def transform_to_360(self,polygon):
def _transform_lon_(ctup):
lon = ctup[0]
if lon < 180:
lon += 360
return([lon,ctup[1]])
transformed = map(_transform_lon_,polygon.exterior.coords)
new_polygon = Polygon(transformed)
return(new_polygon)
def make_variable(self,varname,arr,dimensions):
var = self.ds.createVariable(varname,arr.dtype,dimensions=dimensions)
var[:] = arr
return(var)
def make_data(self,nc_spatial):
path = '/tmp/test360 {0}.nc'.format(datetime.now())
calendar = 'standard'
units = 'days since 0000-01-01'
time_values = [datetime(2000,m,15) for m in range(1,13)]
time_values = nc.date2num(time_values,units,calendar=calendar)
level_values = | np.array([100,200]) | numpy.array |
import os
import time
import warnings
import multiprocessing as mp
from typing import List
import pandas as pd
import numpy as np
import scipy
import scipy.stats as stats
import matplotlib.pyplot as plt
from dateutil.relativedelta import relativedelta
from datetime import datetime
from tqdm import tqdm
from pvrpm.core.enums import ConfigKeys as ck
from pvrpm.core.case import SamCase
from pvrpm.core.components import Components
from pvrpm.core.utils import summarize_dc_energy, component_degradation
from pvrpm.core.logger import logger
def cf_interval(alpha: float, std: float, num_samples: int) -> float:
"""
Calculates the two tails margin of error given the desired input. The margin of error is the value added and subtracted by the sample mean to obtain the confidence interval
Sample sizes less then equal to 30 use t score, greater then 30 use z score
Args:
alpha (float): The significance level for the interval
std (float): The standard deviation of the data
num_samples (int): The number of samples in the data
Returns:
float: The margin of error
"""
# two tails
alpha = alpha / 2
if num_samples > 30:
score = stats.norm.ppf(alpha)
else:
score = stats.t.ppf(1 - alpha, num_samples - 1)
return score * std / np.sqrt(num_samples)
def simulate_day(case: SamCase, comp: Components, day: int):
"""
Updates and increments the simulation by a day, performing all neccesary component updates.
Args:
case (:obj:`SamCase`): The current Sam Case of the simulation
comp (:obj:`Components`): The components class containing all the outputs for this simulation
day (int): Current day in the simulation
"""
# static monitoring starts the day, if available. This is updated independently of component levels
comp.update_indep_monitor(day)
for c in ck.component_keys:
if not case.config.get(c, None):
continue
df = comp.comps[c]
# if component can't fail, just continue
if case.config[c][ck.CAN_FAIL]:
# decrement time to failures for operational modules
# fail components when their time has come
comp.update_fails(c, day)
# update monitoring
comp.update_monitor(c, day)
if case.config[c][ck.CAN_REPAIR]:
# repair components when they are done and can be repaired
comp.update_repairs(c, day)
if case.config[c].get(ck.WARRANTY, None):
df["time_left_on_warranty"] -= 1
# availability
if c == ck.GRID:
# for the grid only, the availability is based on the full 24-hour day.
df.loc[df["state"] == 0, "avail_downtime"] += 24
else:
# else, use the sun hours for this day
df.loc[df["state"] == 0, "avail_downtime"] += case.daylight_hours[day % 365]
# module can still degrade even if it cant fail
if case.config[c].get(ck.DEGRADE, None):
df["days_of_degradation"] += 1
df["degradation_factor"] = [
component_degradation(case.config[c][ck.DEGRADE] / 365, d) for d in df["days_of_degradation"]
]
def run_system_realization(
case: SamCase, seed: bool = False, realization_num: int = 0, progress_bar: bool = False, debug: int = 0,
) -> Components:
"""
Run a full realization for calculating costs
Args:
case (:obj:`SamCase`): The loaded and verified case to use with the simulation
seed (bool, Optional): Whether to seed the random number generator, for multiprocessing
realization_num (int, Optional): Current realization number, used for multiprocessing
progress_bar (bool, Optional): Whether to display progress bar during the realization
debug (int, Optional): Whether to save simulation state every `debug` days (0 to turn off)
Returns:
:obj:`Components`: The components object which contains all the data for this realization
"""
if seed:
np.random.seed()
# data storage
comp = Components(case)
lifetime = case.config[ck.LIFETIME_YRS]
if case.config[ck.TRACKING]:
comp.tracker_power_loss_factor[0] = 1
comp.tracker_availability[0] = 1
# initial timestep
comp.module_degradation_factor[0] = comp.current_degradation()
comp.dc_power_availability[0] = comp.dc_availability()
comp.ac_power_availability[0] = comp.ac_availability()
if progress_bar:
iterator = tqdm(
range(1, lifetime * 365),
ascii=True,
desc=f"Running realization {realization_num}",
unit="day",
position=mp.current_process()._identity[0],
leave=False,
)
else:
logger.info(f"Running realization {realization_num}...")
iterator = range(1, lifetime * 365)
for i in iterator:
# calculate new labor rate each year
if i == 1 or i % 365 == 0:
year = np.floor(i / 365)
inflation = np.power(1 + case.config[ck.INFLATION] / 100, year)
comp.update_labor_rates(case.config[ck.LABOR_RATE] * inflation)
# Decided to remove since it doesnt make sense for only trackers to rise with inflation and not
# all other failures. Plus, this was broken.
# need to store original cost of tracker failures for each failure and increase based on that cost
# also need to take in concurrent failures
# if case.config[ck.TRACKING]:
# for fail in case.config[ck.TRACKER][ck.FAILURE].keys():
# case.config[ck.TRACKER][ck.FAILURE][fail][ck.COST] *= inflation
# save state if debugging
if debug > 0 and i % debug == 0:
state_dict = comp.snapshot()
folder = f"debug_day_{i}"
save_path = os.path.join(case.config[ck.RESULTS_FOLDER], folder)
os.makedirs(save_path, exist_ok=True)
for key, val in state_dict.items():
val.to_csv(os.path.join(save_path, f"{key}_state.csv"), index=True)
# timestep is applied each day
simulate_day(case, comp, i)
if case.config[ck.TRACKING]:
comp.tracker_availability[i], comp.tracker_power_loss_factor[i] = comp.tracker_power_loss(i)
comp.module_degradation_factor[i] = comp.current_degradation()
comp.dc_power_availability[i] = comp.dc_availability()
comp.ac_power_availability[i] = comp.ac_availability()
# create same performance adjustment tables for avail, degradation, tracker losses
if case.config[ck.TRACKING]:
daily_dc_loss = 100 * (
1 - (comp.dc_power_availability * comp.module_degradation_factor * comp.tracker_power_loss_factor)
)
else:
daily_dc_loss = 100 * (1 - (comp.dc_power_availability * comp.module_degradation_factor))
daily_ac_loss = 100 * (1 - comp.ac_power_availability)
case.value("en_dc_lifetime_losses", 1)
case.value("dc_lifetime_losses", list(daily_dc_loss))
case.value("en_ac_lifetime_losses", 1)
case.value("ac_lifetime_losses", list(daily_ac_loss))
o_m_yearly_costs = np.zeros(lifetime)
for c in ck.component_keys:
if not case.config.get(c, None):
continue
comp_yearly_cost = np.sum(np.reshape(comp.costs[c], (lifetime, 365)), axis=1)
o_m_yearly_costs += comp_yearly_cost
case.value("om_fixed", list(o_m_yearly_costs))
case.simulate()
# add the results of the simulation to the components class and return
comp.timeseries_dc_power = case.output("dc_net")
comp.timeseries_ac_power = case.value("gen")
comp.lcoe = case.output("lcoe_real")
comp.npv = case.get_npv()
# remove the first element from cf_energy_net because it is always 0, representing year 0
comp.annual_energy = np.array(case.output("cf_energy_net")[1:])
# more results, for graphing and what not
try:
comp.tax_cash_flow = case.output("cf_after_tax_cash_flow")
except AttributeError:
comp.tax_cash_flow = case.output("cf_pretax_cashflow")
for loss in ck.losses:
try:
comp.losses[loss] = case.output(loss)
except:
comp.losses[loss] = 0
return comp
def gen_results(case: SamCase, results: List[Components]) -> List[pd.DataFrame]:
"""
Generates results for the given SAM case and list of component objects containing the results of each realization.
Args:
case (:obj:`SamCase`): The loaded and verified case to use with the simulation
results (:obj:`list(Components)`): List of component objects that contain the results for each realization
Returns:
:obj:`list(pd.DataFrame)`: List of dataframes containing the results.
Note:
The order of the returned dataframes is:
- Summary Results
- Degradation Results
- DC Power
- AC Power
- Yearly Costs
"""
summary_index = ["Base Case"]
summary_data = {"lcoe": [case.base_lcoe], "npv": [case.base_npv]}
lifetime = case.config[ck.LIFETIME_YRS]
p_vals = [99, 95, 90, 75, 50, 10]
# ac energy
cumulative_ac_energy = np.cumsum(case.base_annual_energy)
for i in range(int(lifetime)):
summary_data[f"annual_ac_energy_{i+1}"] = [case.base_annual_energy[i]]
# split up so the order of columns is nicer
for i in range(int(lifetime)):
summary_data[f"cumulative_ac_energy_{i+1}"] = [cumulative_ac_energy[i]]
# dc energy
for i in range(len(case.base_dc_energy)):
summary_data[f"dc_energy_{i+1}"] = [case.base_dc_energy[i]]
# TODO: also, need to clean this up, i just use dictionaries and fill in blanks for base case, but this can be much cleaner
# per realization results
day_index = np.arange(lifetime * 365) + 1
timeseries_index = np.arange(len(results[0].timeseries_dc_power))
year_index = np.arange(lifetime) + 1
yearly_cost_index = []
degradation_data = {}
timeseries_dc_data = {}
timeseries_ac_data = {}
yearly_cost_data = {}
yearly_fail_data = {}
for i, comp in enumerate(results):
# daily degradation
degradation_data[f"Realization {i+1}"] = comp.module_degradation_factor
# power
timeseries_dc_data[f"Realization {i+1}"] = comp.timeseries_dc_power
timeseries_ac_data[f"Realization {i+1}"] = comp.timeseries_ac_power
# yearly cost and total fails for each component
yearly_cost_index.append(f"Realization {i+1}")
for c in ck.component_keys:
if not case.config.get(c, None):
continue
if c not in yearly_cost_data:
yearly_cost_data[c] = []
if c not in yearly_fail_data:
yearly_fail_data[c] = []
yearly_cost_data[c] += list(np.sum(np.reshape(comp.costs[c], (lifetime, 365)), axis=1))
# add total fails per year for each failure mode for this component level
total_fails = np.zeros(lifetime * 365)
for f in comp.summarize_failures(c).values():
total_fails += f
yearly_fail_data[c] += list(np.sum(np.reshape(total_fails, (lifetime, 365)), axis=1))
# summary
summary_index.append(f"Realization {i+1}")
summary_data["lcoe"] += [comp.lcoe]
summary_data["npv"] += [comp.npv]
# ac energy
# remove the first element from cf_energy_net because it is always 0, representing year 0
cumulative_ac_energy = np.cumsum(comp.annual_energy)
for i in range(int(lifetime)):
summary_data[f"annual_ac_energy_{i+1}"] += [comp.annual_energy[i]]
summary_data[f"cumulative_ac_energy_{i+1}"] += [cumulative_ac_energy[i]]
# dc energy
dc_energy = summarize_dc_energy(comp.timeseries_dc_power, lifetime)
for i in range(len(dc_energy)):
summary_data[f"dc_energy_{i+1}"] += [dc_energy[i]]
# calculate total failures, availability, mttr, mtbf, etc
for c in ck.component_keys:
if not case.config.get(c, None):
continue
if f"{c}_total_failures" not in summary_data:
summary_data[f"{c}_total_failures"] = [None] # no failures for base case
if f"{c}_mtbf" not in summary_data:
summary_data[f"{c}_mtbf"] = [None]
if f"{c}_mttr" not in summary_data:
summary_data[f"{c}_mttr"] = [None]
if f"{c}_mttd" not in summary_data:
summary_data[f"{c}_mttd"] = [None]
if case.config[c][ck.CAN_FAIL]:
sum_fails = comp.comps[c]["cumulative_failures"].sum()
summary_data[f"{c}_total_failures"] += [sum_fails]
for fail in case.config[c].get(ck.FAILURE, {}).keys():
if f"{c}_failures_by_type_{fail}" not in summary_data:
summary_data[f"{c}_failures_by_type_{fail}"] = [None]
summary_data[f"{c}_failures_by_type_{fail}"] += [comp.comps[c][f"failure_by_type_{fail}"].sum()]
# partial failures
for fail in case.config[c].get(ck.PARTIAL_FAIL, {}).keys():
if f"{c}_failures_by_type_{fail}" not in summary_data:
summary_data[f"{c}_failures_by_type_{fail}"] = [None]
summary_data[f"{c}_failures_by_type_{fail}"] += [comp.comps[c][f"failure_by_type_{fail}"].sum()]
# if the component had no failures, set everything here and continue
if sum_fails == 0:
summary_data[f"{c}_mtbf"] += [lifetime * 365]
summary_data[f"{c}_mttr"] += [0]
summary_data[f"{c}_mttd"] += [0]
else:
# mean time between failure
summary_data[f"{c}_mtbf"] += [lifetime * 365 * case.config[c][ck.NUM_COMPONENT] / sum_fails]
# mean time to repair
if case.config[c][ck.CAN_REPAIR]:
# take the number of fails minus whatever components have not been repaired by the end of the simulation to get the number of repairs
sum_repairs = sum_fails - len(comp.comps[c].loc[(comp.comps[c]["state"] == 0)])
if sum_repairs > 0:
summary_data[f"{c}_mttr"] += [comp.total_repair_time[c] / sum_repairs]
else:
summary_data[f"{c}_mttr"] += [0]
else:
summary_data[f"{c}_mttr"] += [0]
# mean time to detection (mean time to acknowledge)
if (
case.config[c][ck.CAN_MONITOR]
or case.config[c].get(ck.COMP_MONITOR, None)
or case.config[c].get(ck.INDEP_MONITOR, None)
):
# take the number of fails minus the components that have not been repaired and also not be detected by monitoring
mask = (comp.comps[c]["state"] == 0) & (comp.comps[c]["time_to_detection"] > 1)
sum_monitor = sum_fails - len(comp.comps[c].loc[mask])
if sum_monitor > 0:
summary_data[f"{c}_mttd"] += [comp.total_monitor_time[c] / sum_monitor]
else:
summary_data[f"{c}_mttd"] += [0]
else:
summary_data[f"{c}_mttd"] += [0]
else:
# mean time between failure
summary_data[f"{c}_total_failures"] += [0]
summary_data[f"{c}_mtbf"] += [lifetime * 365]
summary_data[f"{c}_mttr"] += [0]
summary_data[f"{c}_mttd"] += [0]
# availability
if f"{c}_availability" not in summary_data:
summary_data[f"{c}_availability"] = [None]
summary_data[f"{c}_availability"] += [
(
1
- (comp.comps[c]["avail_downtime"].sum() / (lifetime * case.annual_daylight_hours))
/ case.config[c][ck.NUM_COMPONENT]
)
]
# generate dataframes
summary_results = pd.DataFrame(index=summary_index, data=summary_data)
summary_results.index.name = "Realization"
# reorder columns for summary results
reorder = list(summary_results.columns[0:2]) # lcoe and npv
reorder += list(summary_results.columns[lifetime * 3 + 2 :]) # failures and avail
reorder += list(summary_results.columns[2 : lifetime * 3 + 2]) # energy
summary_results = summary_results[reorder]
degradation_results = pd.DataFrame(index=day_index, data=degradation_data)
dc_power_results = pd.DataFrame(index=timeseries_index, data=timeseries_dc_data)
ac_power_results = pd.DataFrame(index=timeseries_index, data=timeseries_ac_data)
dc_power_results.index.name = "Hour"
ac_power_results.index.name = "Hour"
degradation_results.index.name = "Day"
cost_index = pd.MultiIndex.from_product([yearly_cost_index, year_index], names=["Realization", "Year"])
yearly_cost_results = pd.DataFrame(index=cost_index, data=yearly_cost_data)
yearly_cost_results["total"] = yearly_cost_results.sum(axis=1)
# fails per year, same multi index as cost
yearly_fail_results = pd.DataFrame(index=cost_index, data=yearly_fail_data)
yearly_fail_results["total"] = yearly_fail_results.sum(axis=1)
stats_append = []
summary_no_base = summary_results.iloc[1:]
min = summary_no_base.min()
min.name = "min"
stats_append.append(min)
max = summary_no_base.max()
max.name = "max"
stats_append.append(max)
mean = summary_no_base.mean()
mean.name = "mean"
stats_append.append(mean)
median = summary_no_base.median()
median.name = "median"
stats_append.append(median)
std = summary_no_base.std()
std.name = "stddev"
stats_append.append(std)
conf_interval = case.config[ck.CONF_INTERVAL]
conf_int = cf_interval(1 - (conf_interval / 100), std, case.config[ck.NUM_REALIZATION])
lower_conf = mean - conf_int
lower_conf.name = f"{conf_interval}% lower confidence interval of mean"
stats_append.append(lower_conf)
upper_conf = mean + conf_int
upper_conf.name = f"{conf_interval}% upper confidence interval of mean"
stats_append.append(upper_conf)
# p test, which is using the ppf of the normal distribituion with our calculated mean and std. We use scipy's functions for this
# see https://help.helioscope.com/article/141-creating-a-p50-and-p90-with-helioscope
for p in p_vals:
values = []
# calculate the p value for every column
for m, s in zip(mean, std):
if s != 0: # for columns with no STDDEV
values.append(stats.norm.ppf((1 - p / 100), loc=m, scale=s))
else:
values.append(None)
# save results
values = pd.Series(values, index=mean.index)
values.name = f"P{p}"
stats_append.append(values)
# since pandas wants to depercate append, gotta convert series into dataframes
summary_results = pd.concat([summary_results, *[s.to_frame().transpose() for s in stats_append]])
return [
summary_results,
degradation_results,
dc_power_results,
ac_power_results,
yearly_cost_results,
yearly_fail_results,
]
def graph_results(case: SamCase, results: List[Components], save_path: str = None) -> None:
"""
Generate graphs from a list of Component objects from each realization
Args:
case (:obj:`SamCase`): The loaded and verified case to use with the simulation
results (:obj:`list(Components)`): List of component objects that contain the results for each realization
save_path (str, Optional): Path to save graphs to, if provided
"""
lifetime = case.config[ck.LIFETIME_YRS]
colors = [
"r",
"g",
"b",
"c",
"m",
"y",
"k",
"tab:orange",
"tab:brown",
"lime",
"tab:gray",
"indigo",
"navy",
"pink",
"coral",
"yellow",
"teal",
"fuchsia",
"palegoldenrod",
"darkgreen",
]
# base case data to compare to
base_losses = case.base_losses
base_load = np.array(case.base_load) if case.base_load is not None else None
base_ac_energy = np.array(case.base_ac_energy)
base_annual_energy = np.array(case.base_annual_energy)
base_tax_cash_flow = np.array(case.base_tax_cash_flow)
# parse data
avg_ac_energy = np.zeros(len(case.base_ac_energy)) # since length is variable based on frequency of weather file
avg_annual_energy = np.zeros(lifetime)
avg_losses = np.zeros(len(ck.losses))
avg_tax_cash_flow = np.zeros(lifetime + 1) # add 1 for year 0
avg_failures = np.zeros((len(ck.component_keys), lifetime * 365)) # 7 types of components
# computing the average across every realization
for comp in results:
avg_ac_energy += np.array(comp.timeseries_ac_power)
avg_annual_energy += np.array(comp.annual_energy)
avg_losses += np.array(list(comp.losses.values()))
avg_tax_cash_flow += np.array(comp.tax_cash_flow)
for i, c in enumerate(ck.component_keys):
if not case.config.get(c, None):
continue
for f in comp.summarize_failures(c).values():
avg_failures[i] += f
# monthly and annual energy
avg_ac_energy /= len(results)
avg_annual_energy /= len(results)
avg_losses /= len(results)
avg_tax_cash_flow /= len(results)
avg_failures /= len(results)
# sum up failures to be per year
avg_failures = np.sum(np.reshape(avg_failures, (len(ck.component_keys), lifetime, 365)), axis=2)
# determine the frequency of the data, same as frequncy of supplied weather file
total = int(len(avg_ac_energy) / lifetime)
if total == 8760:
freq = 1
else:
freq = 0
while total > 8760:
freq += 1
total /= freq
avg_ac_energy = np.reshape(avg_ac_energy[0::freq], (lifetime, 8760)) # yearly energy by hour
avg_ac_energy = np.sum(avg_ac_energy, axis=0) / lifetime # yearly energy average
avg_ac_energy = np.reshape(avg_ac_energy, (365, 24)) # day energy by hour
avg_day_energy_by_hour = avg_ac_energy.copy() # copy for heatmap yearly energy generation
avg_ac_energy = np.sum(avg_ac_energy, axis=1) # energy per day
base_ac_energy = np.reshape(base_ac_energy[0::freq], (lifetime, 8760))
base_ac_energy = np.sum(base_ac_energy, axis=0) / lifetime
base_ac_energy = | np.reshape(base_ac_energy, (365, 24)) | numpy.reshape |
import numpy as np
import pandas as pd
import sys
np.random.seed(0)
def one_hot_encoder(Y):
new_Y = np.zeros((Y.shape[0], np.max(Y)+1))
new_Y[np.arange(Y.shape[0]), Y] = 1
return new_Y
#Sigmoid function
def sigmoid(Z):
return 1/(1+np.exp(-Z))
#Tanh function, can be written in terms of sigmoid
def tanh(Z):
return np.tanh(Z)
#Relu function
def relu(Z):
return Z*(Z > 0)
def activate(Z, activation):
if(activation == 'sigmoid'):
return sigmoid(Z)
elif(activation == 'relu'):
return relu(Z)
else:
return tanh(Z)
def derivative_activate(A, activation):
if(activation == 'sigmoid'):
return (A*(1-A))
elif(activation == 'relu'):
return ((A>0)).astype(int)
else:
return (1-A*A)
def init_network(M_data, num_labels, num_hidden_layers, hidden_layer_sizes, activation):
W = dict() #W[l] means the weights between layer l and layer l-1. Dimension = (#units in layer l-1 x #units in layer l)
b = dict() #b[l] means the bias value added for each unit of layer l
#Will use 1-based indexing for weights, since hidden-layer are also 1-based indexed
L = num_hidden_layers
if(L != 0):
#Will use Xavier initialization of weights
if(activation == 'relu'):
W[1] = np.random.randn(M_data, hidden_layer_sizes[0])*np.sqrt(2.0/M_data) #Factor of 2 helps in case of relu activation function
else:
W[1] = np.random.randn(M_data, hidden_layer_sizes[0])*np.sqrt(1.0/M_data)
b[1] = np.zeros((1, hidden_layer_sizes[0]))
for i in range(1, L):
if(activation == 'relu'):
W[i+1] = np.random.randn(hidden_layer_sizes[i-1], hidden_layer_sizes[i])*np.sqrt(2.0/hidden_layer_sizes[i-1])
else:
W[i+1] = | np.random.randn(hidden_layer_sizes[i-1], hidden_layer_sizes[i]) | numpy.random.randn |
import datetime
import numpy as np
import matplotlib.pyplot as plt
from numpy.lib.function_base import append
import sympy as sp
from multiprocessing import Pool
import os
import cppsolver as cs
from tqdm import tqdm
from ..filter import Magnet_UKF, Magnet_KF
from ..solver import Solver, Solver_jac
class Simu_Data:
def __init__(self, gt, snr, result):
self.gt = gt
self.snr = snr
self.result = result
def __len__(self):
return self.gt.shape[0]
def store(self):
np.savez('result/test.npz', gt=self.gt, data=self.result)
class expression:
def __init__(self, mag_count=1):
if mag_count == 1:
x, y, z, M, theta, phy, gx, gy, gz, xs, ys, zs = sp.symbols(
'x, y, z, M, theta, phy, gx, gy, gz, xs, ys, zs', real=True)
G = sp.Matrix([[gx], [gy], [gz]])
# theta2 = sp.tanh(theta)
# phy2 = sp.tanh(phy)
vecR = sp.Matrix([xs - x, ys - y, zs - z]).reshape(3, 1)
# vecR = sp.Matrix([x, y, z]).reshape(3, 1)
dis = sp.sqrt(vecR[0]**2 + vecR[1]**2 + vecR[2]**2)
# VecM = M*sp.Matrix([sp.sin(theta2)*sp.cos(phy2),
# sp.sin(theta2)*sp.sin(phy2), sp.cos(theta2)])
VecM = 1e-7 * sp.exp(M) * sp.Matrix([
sp.sin(theta) * sp.cos(phy),
sp.sin(theta) * sp.sin(phy),
sp.cos(theta)
])
VecB = 3 * vecR * (VecM.T * vecR) / dis**5 - VecM / dis**3 + G
VecB *= 1e6
# convert to function for faster evaluation
self.VecB = sp.lambdify(
[gx, gy, gz, xs, ys, zs, x, y, z, M, theta, phy],
VecB, 'numpy')
elif mag_count == 2:
x0, y0, z0, M0, theta0, phy0, x1, y1, z1, M1, theta1, phy1, gx, gy, gz, xs, ys, zs = sp.symbols(
'x0, y0, z0, M0, theta0, phy0, x1, y1, z1, M1, theta1, phy1, gx, gy, gz, xs, ys, zs', real=True)
G = sp.Matrix([[gx], [gy], [gz]])
# theta2 = sp.tanh(theta)
# phy2 = sp.tanh(phy)
x = [x0, x1]
y = [y0, y1]
z = [z0, z1]
M = [M0, M1]
theta = [theta0, theta1]
phy = [phy0, phy1]
VecB = G
for i in range(mag_count):
vecR = sp.Matrix(
[xs - x[i], ys - y[i], zs - z[i]]).reshape(3, 1)
# vecR = sp.Matrix([x, y, z]).reshape(3, 1)
dis = sp.sqrt(vecR[0] ** 2 + vecR[1] ** 2 + vecR[2] ** 2)
# VecM = M*sp.Matrix([sp.sin(theta2)*sp.cos(phy2),
# sp.sin(theta2)*sp.sin(phy2), sp.cos(theta2)])
VecMi = 1e-7 * sp.exp(M[i]) * sp.Matrix([sp.sin(theta[i]) * sp.cos(
phy[i]), sp.sin(theta[i]) * sp.sin(phy[i]), sp.cos(theta[i])])
VecBi = 3 * vecR * (VecMi.T * vecR) / \
dis ** 5 - VecMi / dis ** 3
VecB += VecBi
VecB = 1e6 * VecB
# convert to function for faster evaluation
self.VecB = sp.lambdify(
[gx, gy, gz, xs, ys, zs, x0, y0, z0, M0, theta0, phy0, x1, y1,
z1, M1, theta1, phy1],
VecB, 'numpy')
class Result_Handler:
def __init__(self, simu_data, scale):
self.track_result = []
self.simu_data = simu_data
self.scale = scale
def __add__(self, new):
self.track_result.append(new)
return self
def get_gt_result(self):
a = self.simu_data.gt
b = []
for i in range(len(self.track_result)):
b.append(np.array([
self.track_result[i]['X0'], self.track_result[i]['Y0'],
self.track_result[i]['Z0']
]))
b = np.stack(b)
return [a, b]
def cal_loss(self):
dist = []
loss = []
for i in range(len(self.simu_data)):
point_gt = self.simu_data.gt[i]
point_estimate = np.array([
self.track_result[i]['X0'], self.track_result[i]['Y0'],
self.track_result[i]['Z0']
])
dist.append(np.linalg.norm(point_gt, 2))
loss.append(np.linalg.norm(point_gt - point_estimate, 2))
dist = 1e2 * np.array(dist)
loss = 1e2 * np.array(loss)
return [self.scale, dist, loss]
def gt_and_route(self):
dist = []
route = []
for i in range(len(self.simu_data)):
point_gt = self.simu_data.gt[i]
dist.append(np.linalg.norm(point_gt, 2))
route.append(np.array([
self.track_result[i]['X0'], self.track_result[i]['Y0'],
self.track_result[i]['Z0']
]))
dist = np.array(dist)
route = np.stack(route, axis=0)
idx = np.argsort(dist)
gt = self.simu_data.gt[idx]
route = route[idx]
return [gt, route]
# plt.plot(dist, loss, label='scale = {}'.format(self.scale))
# plt.legend()
# print('debug')
class Simu_Test:
def __init__(self, start, stop, scales, pSensor=None, resolution=100):
self.scales = scales
self.M = 2.7
self.build_route(start, stop, resolution)
if pSensor is None:
self.build_psensor()
else:
self.pSensor = pSensor
# self.build_expression()
self.params = {
'm': np.log(self.M),
'theta': 0,
'phy': 0,
'gx': 50 / np.sqrt(2) * 1e-6,
'gy': 50 / np.sqrt(2) * 1e-6,
'gz': 0,
}
def build_expression(self):
x, y, z, M, theta, phy, gx, gy, gz, xs, ys, zs = sp.symbols(
'x, y, z, M, theta, phy, gx, gy, gz, xs, ys, zs', real=True)
G = sp.Matrix([[gx], [gy], [gz]])
# theta2 = sp.tanh(theta)
# phy2 = sp.tanh(phy)
vecR = sp.Matrix([xs - x, ys - y, zs - z]).reshape(3, 1)
# vecR = sp.Matrix([x, y, z]).reshape(3, 1)
dis = sp.sqrt(vecR[0]**2 + vecR[1]**2 + vecR[2]**2)
# VecM = M*sp.Matrix([sp.sin(theta2)*sp.cos(phy2),
# sp.sin(theta2)*sp.sin(phy2), sp.cos(theta2)])
VecM = 1e-7 * sp.exp(M) * sp.Matrix([
sp.sin(theta) * sp.cos(phy),
sp.sin(theta) * sp.sin(phy),
sp.cos(theta)
])
VecB = 3 * vecR * (VecM.T * vecR) / dis**5 - VecM / dis**3 + G
VecB *= 1e6
# convert to function for faster evaluation
self.VecB = sp.lambdify(
[gx, gy, gz, xs, ys, zs, x, y, z, M, theta, phy], VecB, 'numpy')
def build_route(self, start, stop, resolution):
# linear route
theta = 90 / 180.0 * np.pi
route = np.linspace(start, stop, resolution)
route = np.stack([route * np.cos(theta), route * np.sin(theta)]).T
route = np.pad(route, ((0, 0), (1, 0)),
mode='constant',
constant_values=0)
self.route = 1e-2 * route
# curvy route
tmp = np.linspace(start, stop, resolution)
route = np.stack([np.sin((tmp-start)/(stop-start) * np.pi * 5),
np.cos((tmp-start)/(stop-start) * np.pi * 5), tmp], axis=0).T
self.route = 1e-2 * route
def build_psensor(self):
self.pSensor = 1e-2 * np.array([
[1, 1, 1],
[-1, 1, 1],
[-1, -1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, 1, -1],
[-1, -1, -1],
[1, -1, -1],
])
def simulate_process(self, scale):
print(scale)
pSensori = scale * self.pSensor
simu = self.estimate_B(pSensori)
simu.store()
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, scale)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
return results.cal_loss()
def gt_and_result(self):
pSensori = 1 * self.pSensor
simu = self.estimate_B(pSensori)
simu.store()
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, 1)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
return results.get_gt_result()
def compare_noise_thread(self, choice):
scale = 5
pSensori = scale * self.pSensor
if choice == 1:
simu = self.estimate_B(pSensori)
elif choice == 0:
simu = self.estimate_B_even_noise(pSensori)
elif choice == 2:
simu = self.estimate_B_singular_noise(pSensori)
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, scale)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
[tmp, dist, loss] = results.cal_loss()
return [choice, dist, loss]
def compare_3_noise(self, loop):
results = []
pool = Pool()
for i in range(loop):
# self.calculate_process(scale)
results.append(
pool.apply_async(self.compare_noise_thread, args=(0, )))
results.append(
pool.apply_async(self.compare_noise_thread, args=(1, )))
results.append(
pool.apply_async(self.compare_noise_thread, args=(2, )))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
msg = ['Even Noise', 'Raw Noise', 'Single Noise']
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label=msg[int(key)])
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
plt.savefig('result/butterfly.jpg', dpi=900)
def compare_noise_type(self, loop):
results = []
pool = Pool()
for i in range(loop):
# self.calculate_process(scale)
results.append(
pool.apply_async(self.compare_noise_type_thread, args=(0, )))
results.append(
pool.apply_async(self.compare_noise_type_thread, args=(1, )))
results.append(
pool.apply_async(self.compare_noise_type_thread, args=(2, )))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
msg = ['ALL Noise', 'Only Noise', 'Only Precision']
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label=msg[int(key)])
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
plt.savefig('result/compare_noise_type.jpg', dpi=900)
def compare_noise_type_thread(self, choice):
scale = 5
pSensori = scale * self.pSensor
simu = self.estimate_B(pSensori, choice)
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, scale)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
[tmp, dist, loss] = results.cal_loss()
return [choice, dist, loss]
def simulate(self, loop=1):
results = []
pool = Pool()
for scale in self.scales:
# self.calculate_process(scale)
# test(self, scale)
for i in range(loop):
# self.simulate_process(scale)
results.append(
pool.apply_async(self.simulate_process, args=(scale, )))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label='scale = {} cm'.format(int(key) * 2))
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
name = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
plt.savefig('result/compare_scale/{}.jpg'.format(name), dpi=900)
def simu_readings(self, pSensor):
simu = self.estimate_B(pSensor, noise_type=3)
simu.store()
def simu_gt_and_result(self, pSensor, route, path, name):
pSensori = pSensor
simu = self.estimate_B(pSensori, route=route)
# simu.store()
# params = np.array([40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, np.log(
# self.M), 1e-2 * route[0, 0], 1e-2 * (route[0, 1]), 1e-2 * (route[0,
# 2]), 0, 0])
model = Solver_jac(1, route[0, 0], route[0, 1], route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
gt_ang = []
rec_ang = []
results = Result_Handler(simu, 1)
for i in tqdm(range(simu.result.shape[0])):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
gt_ang.append(np.array([0, 0, 1]))
t1 = result['theta0'].value
t2 = result['phy0'].value
rec_ang.append(
np.array(
[np.sin(t1) * np.cos(t2),
np.sin(t1) * np.sin(t2),
np.cos(t1)]))
[gt, route] = results.gt_and_route()
gt_ang = np.stack(gt_ang)
rec_ang = np.stack(rec_ang)
if not os.path.exists(path):
os.makedirs(path)
np.savez(os.path.join(path, name), gt=gt * 1e2, result=route *
1e2, gt_ang=gt_ang, result_ang=rec_ang)
def compare_layout_thread(self, index, pSensori):
overall_noise = np.random.randn(3)
simu = self.estimate_B(pSensori)
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, 1)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
[tmp, dist, loss] = results.cal_loss()
return [index, dist, loss]
def compare_layouts(self, pSensors, loop=1):
results = []
pool = Pool()
for index, pSensor in enumerate(pSensors):
# self.calculate_process(scale)
# test(self, scale)
for i in range(loop):
# self.calculate_process(scale)
# self.compare_layout_thread(index, pSensor)
results.append(
pool.apply_async(self.compare_layout_thread,
args=(index, pSensor)))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
# msg = ['Plane Layout(MIT)', 'Our Current Layout', 'Cube Layout']
msg = ['Best Layout', 'Current Layout']
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label=msg[int(key)])
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
name = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# plt.savefig('result/compare_layout/{}.jpg'.format(name), dpi=900)
plt.show()
def estimate_B(
self,
pSensor,
route=None,
noise_type=0,
overall_noise=None):
# noise type: 0: noise+precision, 1:only noise, 2: only precision
# 3:none
result = []
exp = expression()
if route is None:
route = self.route
for i in range(route.shape[0]):
routei = route[i]
tmp = []
for j in range(pSensor.shape[0]):
param = [
self.params['gx'], self.params['gy'], self.params['gz'],
pSensor[j][0], pSensor[j][1], pSensor[j][2], routei[0],
routei[1], routei[2], self.params['m'],
self.params['theta'], self.params['phy']
]
tmp.append(exp.VecB(*param).squeeze())
tmp = np.concatenate(tmp, axis=0).reshape(-1)
result.append(tmp)
result = np.concatenate(result, axis=0).reshape(-1, 3)
Noise_x = 0.8 * np.random.randn(result.shape[0])
Noise_y = 0.8 * np.random.randn(result.shape[0])
Noise_z = 1.2 * np.random.randn(result.shape[0])
Noise = np.stack([Noise_x, Noise_y, Noise_z]).T
if noise_type != 3:
if noise_type != 2:
result += Noise
if overall_noise is not None:
result += overall_noise
# add sensor resolution
if noise_type != 1:
result = np.floor(result * 100.0)
result = result - np.mod(result, 15)
result = 1e-2 * result
# compute SNR
G = 1e6 * np.array(
[self.params['gx'], self.params['gy'], self.params['gz']])
signal_power = np.sum( | np.power(result - Noise, 2) | numpy.power |
# 生成式对抗网络示例
import os
import keras
import numpy as np
from keras import layers
from keras.preprocessing import image
# GAN的生成器网络:将来自潜在空间的向量转换为一张候选图像
latent_dim = 32
height = 32
width = 32
channels = 3
generator_input = keras.Input(shape=(latent_dim, ))
# 将输入转换为大小为16x16的128个通道的特征图
x = layers.Dense(128 * 16 * 16)(generator_input)
x = layers.LeakyReLU()(x)
x = layers.Reshape((16, 16, 128))(x)
x = layers.Conv2D(256, 5, padding='same')(x)
x = layers.LeakyReLU()(x)
# 上采样为32x32
x = layers.Conv2DTranspose(256, 4, strides=2, padding='same')(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(256, 5, padding='same')(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(256, 5, padding='same')(x)
x = layers.LeakyReLU()(x)
# 生成一个大小为32x32的单通道特征图
x = layers.Conv2D(channels, 7, activation='tanh', padding='same')(x)
generator = keras.models.Model(generator_input, x) #实例化模型
generator.summary()
# GAN的判别器网络:接受一张候选图像,判断是生成的图像或真是图像
discriminator_input = layers.Input(shape=(height, width, channels))
x = layers.Conv2D(128, 3)(discriminator_input)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(128, 4, strides=2)(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(128, 4, strides=2)(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(128, 4, strides=2)(x)
x = layers.LeakyReLU()(x)
x = layers.Flatten()(x)
x = layers.Dropout(0.4)(x) # 判别器添加一个dropout层,重要技巧
x = layers.Dense(1, activation='sigmoid')(x) # 分类曾
discriminator = keras.models.Model(discriminator_input, x)
discriminator.summary()
# 优化器使用梯度裁剪,限制梯度值的范围
discriminator_optimizer = keras.optimizers.RMSprop(lr=0.0008,
clipvalue=1.0,
decay=1e-8)
discriminator.compile(optimizer=discriminator_optimizer,
loss='binary_crossentropy')
# 对抗网络:将生成器和判别器连接在一起
# 设置判别器权重为不可训练,不然在训练过程中签字改变,预测结果会始终为真
discriminator.trainable = False
gan_input = keras.Input(shape=(latent_dim, ))
gan_output = discriminator(generator(gan_input)) # 连接生成器和判别器
gan = keras.models.Model(gan_input, gan_output)
gan_optimizer = keras.optimizers.RMSprop(lr=0.0004, clipvalue=1.0, decay=1e-8)
gan.compile(optimizer=gan_optimizer, loss='binary_crossentropy')
# 训练GAN
(x_train, y_train), (_, _) = keras.datasets.cifar10.load_data() # 加载数据
x_train = x_train[y_train.flatten() == 6] # 只选择数据集中的青蛙图像
x_train = x_train.reshape((x_train.shape[0], ) +
(height, width, channels)).astype('float32') / 255.
iterations = 10000
batch_size = 20
save_dir = 'your_dir' # 生成图像保存路径
start = 0
for step in range(iterations):
# 在潜在空间随机采样
random_latent_vectors = np.random.normal(size=(batch_size, latent_dim))
# 随机采样点通过生成器得到虚假图像
generated_images = generator.predict(random_latent_vectors)
stop = start + batch_size
real_images = x_train[start:stop]
# 将虚假图像和真是图像合在一起
combined_images = np.concatenate([generated_images, real_images])
# 合并标签区分真实和虚假图像
labels = np.concatenate(
[np.ones((batch_size, 1)),
np.zeros((batch_size, 1))])
# 想标签添加噪声,重要技巧
labels += 0.05 * | np.random.random(labels.shape) | numpy.random.random |
import re
import warnings
from datetime import datetime, timedelta
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from pandas.testing import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
from woodwork.logical_types import Double, Integer
from rayml.exceptions import (
MethodPropertyNotFoundError,
MissingComponentError,
ParameterNotUsedWarning,
)
from rayml.pipelines import ComponentGraph
from rayml.pipelines.components import (
DateTimeFeaturizer,
DropRowsTransformer,
ElasticNetClassifier,
Estimator,
Imputer,
LogisticRegressionClassifier,
NaturalLanguageFeaturizer,
OneHotEncoder,
RandomForestClassifier,
SelectColumns,
StandardScaler,
TargetImputer,
Transformer,
Undersampler,
)
from rayml.problem_types import is_classification
from rayml.utils import infer_feature_types
class DummyTransformer(Transformer):
name = "Dummy Transformer"
def __init__(self, parameters=None, random_seed=0):
parameters = parameters or {}
super().__init__(
parameters=parameters, component_obj=None, random_seed=random_seed
)
def fit(self, X, y):
return self
def transform(self, X, y=None):
return X
class TransformerA(DummyTransformer):
"""copy class"""
class TransformerB(DummyTransformer):
"""copy class"""
class TransformerC(DummyTransformer):
"""copy class"""
class DummyEstimator(Estimator):
name = "Dummy Estimator"
model_family = None
supported_problem_types = None
def __init__(self, parameters=None, random_seed=0):
parameters = parameters or {}
super().__init__(
parameters=parameters, component_obj=None, random_seed=random_seed
)
def fit(self, X, y):
return self
class EstimatorA(DummyEstimator):
"""copy class"""
class EstimatorB(DummyEstimator):
"""copy class"""
class EstimatorC(DummyEstimator):
"""copy class"""
@pytest.fixture
def dummy_components():
return TransformerA, TransformerB, TransformerC, EstimatorA, EstimatorB, EstimatorC
def test_init(example_graph):
comp_graph = ComponentGraph()
assert len(comp_graph.component_dict) == 0
graph = example_graph
comp_graph = ComponentGraph(graph)
assert len(comp_graph.component_dict) == 6
expected_order = [
"Imputer",
"OneHot_ElasticNet",
"Elastic Net",
"OneHot_RandomForest",
"Random Forest",
"Logistic Regression Classifier",
]
assert comp_graph.compute_order == expected_order
def test_init_str_components():
graph = {
"Imputer": ["Imputer", "X", "y"],
"OneHot_RandomForest": ["One Hot Encoder", "Imputer.x", "y"],
"OneHot_ElasticNet": ["One Hot Encoder", "Imputer.x", "y"],
"Random Forest": ["Random Forest Classifier", "OneHot_RandomForest.x", "y"],
"Elastic Net": ["Elastic Net Classifier", "OneHot_ElasticNet.x", "y"],
"Logistic Regression Classifier": [
"Logistic Regression Classifier",
"Random Forest.x",
"Elastic Net.x",
"y",
],
}
comp_graph = ComponentGraph(graph)
assert len(comp_graph.component_dict) == 6
expected_order = [
"Imputer",
"OneHot_ElasticNet",
"Elastic Net",
"OneHot_RandomForest",
"Random Forest",
"Logistic Regression Classifier",
]
assert comp_graph.compute_order == expected_order
def test_init_instantiated():
graph = {
"Imputer": [
Imputer(numeric_impute_strategy="constant", numeric_fill_value=0),
"X",
"y",
]
}
component_graph = ComponentGraph(graph)
component_graph.instantiate(
{"Imputer": {"numeric_fill_value": 10, "categorical_fill_value": "Fill"}}
)
cg_imputer = component_graph.get_component("Imputer")
assert graph["Imputer"][0] == cg_imputer
assert cg_imputer.parameters["numeric_fill_value"] == 0
assert cg_imputer.parameters["categorical_fill_value"] is None
def test_invalid_init():
invalid_graph = {"Imputer": [Imputer, "X", "y"], "OHE": OneHotEncoder}
with pytest.raises(
ValueError, match="All component information should be passed in as a list"
):
ComponentGraph(invalid_graph)
graph = {
"Imputer": [
None,
"X",
"y",
]
}
with pytest.raises(
ValueError, match="may only contain str or ComponentBase subclasses"
):
ComponentGraph(graph)
graph = {
"Fake": ["Fake Component", "X", "y"],
"Estimator": [ElasticNetClassifier, "Fake.x", "y"],
}
with pytest.raises(MissingComponentError):
ComponentGraph(graph)
def test_init_bad_graphs():
graph_with_cycle = {
"Imputer": [Imputer, "X", "y"],
"OHE": [OneHotEncoder, "Imputer.x", "Estimator.x", "y"],
"Estimator": [RandomForestClassifier, "OHE.x", "y"],
}
with pytest.raises(ValueError, match="given graph contains a cycle"):
ComponentGraph(graph_with_cycle)
graph_with_more_than_one_final_component = {
"Imputer": ["Imputer", "X", "y"],
"OneHot_RandomForest": ["One Hot Encoder", "Imputer.x", "y"],
"OneHot_ElasticNet": ["One Hot Encoder", "Imputer.x", "y"],
"Random Forest": ["Random Forest Classifier", "OneHot_RandomForest.x", "y"],
"Elastic Net": ["Elastic Net Classifier", "X", "y"],
"Logistic Regression Classifier": [
"Logistic Regression Classifier",
"Random Forest.x",
"Elastic Net.x",
"y",
],
}
with pytest.raises(ValueError, match="graph has more than one final"):
ComponentGraph(graph_with_more_than_one_final_component)
graph_with_unconnected_imputer = {
"Imputer": ["Imputer", "X", "y"],
"DateTime": ["DateTime Featurizer", "X", "y"],
"Logistic Regression Classifier": [
"Logistic Regression Classifier",
"DateTime.x",
"y",
],
}
with pytest.raises(ValueError, match="The given graph is not completely connected"):
ComponentGraph(graph_with_unconnected_imputer)
def test_order_x_and_y():
graph = {
"Imputer": [Imputer, "X", "y"],
"OHE": [OneHotEncoder, "Imputer.x", "y"],
"Random Forest": [RandomForestClassifier, "OHE.x", "y"],
}
component_graph = ComponentGraph(graph).instantiate()
assert component_graph.compute_order == ["Imputer", "OHE", "Random Forest"]
def test_list_raises_error():
component_list = ["Imputer", "One Hot Encoder", RandomForestClassifier]
with pytest.raises(
ValueError,
match="component_dict must be a dictionary which specifies the components and edges between components",
):
ComponentGraph(component_list)
def test_instantiate_with_parameters(example_graph):
graph = example_graph
component_graph = ComponentGraph(graph)
assert not isinstance(component_graph.get_component("Imputer"), Imputer)
assert not isinstance(
component_graph.get_component("Elastic Net"), ElasticNetClassifier
)
parameters = {
"OneHot_RandomForest": {"top_n": 3},
"OneHot_ElasticNet": {"top_n": 5},
"Elastic Net": {"max_iter": 100},
}
component_graph.instantiate(parameters)
expected_order = [
"Imputer",
"OneHot_ElasticNet",
"Elastic Net",
"OneHot_RandomForest",
"Random Forest",
"Logistic Regression Classifier",
]
assert component_graph.compute_order == expected_order
assert isinstance(component_graph.get_component("Imputer"), Imputer)
assert isinstance(
component_graph.get_component("Random Forest"), RandomForestClassifier
)
assert isinstance(
component_graph.get_component("Logistic Regression Classifier"),
LogisticRegressionClassifier,
)
assert component_graph.get_component("OneHot_RandomForest").parameters["top_n"] == 3
assert component_graph.get_component("OneHot_ElasticNet").parameters["top_n"] == 5
assert component_graph.get_component("Elastic Net").parameters["max_iter"] == 100
@pytest.mark.parametrize("parameters", [None, {}])
def test_instantiate_without_parameters(parameters, example_graph):
graph = example_graph
component_graph = ComponentGraph(graph)
if parameters is not None:
component_graph.instantiate(parameters)
else:
component_graph.instantiate()
assert (
component_graph.get_component("OneHot_RandomForest").parameters["top_n"] == 10
)
assert component_graph.get_component("OneHot_ElasticNet").parameters["top_n"] == 10
assert component_graph.get_component(
"OneHot_RandomForest"
) is not component_graph.get_component("OneHot_ElasticNet")
expected_order = [
"Imputer",
"OneHot_ElasticNet",
"Elastic Net",
"OneHot_RandomForest",
"Random Forest",
"Logistic Regression Classifier",
]
assert component_graph.compute_order == expected_order
def test_reinstantiate(example_graph):
component_graph = ComponentGraph(example_graph)
component_graph.instantiate()
with pytest.raises(ValueError, match="Cannot reinstantiate a component graph"):
component_graph.instantiate({"OneHot": {"top_n": 7}})
def test_bad_instantiate_can_reinstantiate(example_graph):
component_graph = ComponentGraph(example_graph)
with pytest.raises(ValueError, match="Error received when instantiating component"):
component_graph.instantiate(
parameters={"Elastic Net": {"max_iter": 100, "fake_param": None}}
)
component_graph.instantiate({"Elastic Net": {"max_iter": 22}})
assert component_graph.get_component("Elastic Net").parameters["max_iter"] == 22
def test_get_component(example_graph):
graph = example_graph
component_graph = ComponentGraph(graph)
assert component_graph.get_component("OneHot_ElasticNet") == OneHotEncoder
assert (
component_graph.get_component("Logistic Regression Classifier")
== LogisticRegressionClassifier
)
with pytest.raises(ValueError, match="not in the graph"):
component_graph.get_component("Fake Component")
component_graph.instantiate(
{
"OneHot_RandomForest": {"top_n": 3},
"Random Forest": {"max_depth": 4, "n_estimators": 50},
}
)
assert component_graph.get_component("OneHot_ElasticNet") == OneHotEncoder()
assert component_graph.get_component("OneHot_RandomForest") == OneHotEncoder(
top_n=3
)
assert component_graph.get_component("Random Forest") == RandomForestClassifier(
n_estimators=50, max_depth=4
)
def test_get_estimators(example_graph):
component_graph = ComponentGraph(example_graph)
with pytest.raises(ValueError, match="Cannot get estimators until"):
component_graph.get_estimators()
component_graph.instantiate()
assert component_graph.get_estimators() == [
RandomForestClassifier(),
ElasticNetClassifier(),
LogisticRegressionClassifier(),
]
component_graph = ComponentGraph({"Imputer": ["Imputer", "X", "y"]})
component_graph.instantiate()
assert component_graph.get_estimators() == []
def test_parents(example_graph):
graph = example_graph
component_graph = ComponentGraph(graph)
assert component_graph.get_inputs("Imputer") == ["X", "y"]
assert component_graph.get_inputs("OneHot_RandomForest") == ["Imputer.x", "y"]
assert component_graph.get_inputs("OneHot_ElasticNet") == ["Imputer.x", "y"]
assert component_graph.get_inputs("Random Forest") == ["OneHot_RandomForest.x", "y"]
assert component_graph.get_inputs("Elastic Net") == ["OneHot_ElasticNet.x", "y"]
assert component_graph.get_inputs("Logistic Regression Classifier") == [
"Random Forest.x",
"Elastic Net.x",
"y",
]
with pytest.raises(ValueError, match="not in the graph"):
component_graph.get_inputs("Fake component")
component_graph.instantiate()
assert component_graph.get_inputs("Imputer") == ["X", "y"]
assert component_graph.get_inputs("OneHot_RandomForest") == ["Imputer.x", "y"]
assert component_graph.get_inputs("OneHot_ElasticNet") == ["Imputer.x", "y"]
assert component_graph.get_inputs("Random Forest") == ["OneHot_RandomForest.x", "y"]
assert component_graph.get_inputs("Elastic Net") == ["OneHot_ElasticNet.x", "y"]
assert component_graph.get_inputs("Logistic Regression Classifier") == [
"Random Forest.x",
"Elastic Net.x",
"y",
]
with pytest.raises(ValueError, match="not in the graph"):
component_graph.get_inputs("Fake component")
def test_get_last_component(example_graph):
component_graph = ComponentGraph()
with pytest.raises(
ValueError, match="Cannot get last component from edgeless graph"
):
component_graph.get_last_component()
component_graph = ComponentGraph(example_graph)
assert component_graph.get_last_component() == LogisticRegressionClassifier
component_graph.instantiate()
assert component_graph.get_last_component() == LogisticRegressionClassifier()
component_graph = ComponentGraph({"Imputer": [Imputer, "X", "y"]})
assert component_graph.get_last_component() == Imputer
component_graph = ComponentGraph(
{"Imputer": [Imputer, "X", "y"], "OneHot": [OneHotEncoder, "Imputer.x", "y"]}
)
assert component_graph.get_last_component() == OneHotEncoder
@patch("rayml.pipelines.components.Transformer.fit_transform")
@patch("rayml.pipelines.components.Estimator.fit")
@patch("rayml.pipelines.components.Estimator.predict_proba")
def test_fit_component_graph(
mock_predict_proba, mock_fit, mock_fit_transform, example_graph, X_y_binary
):
X, y = X_y_binary
mock_fit_transform.return_value = pd.DataFrame(X)
mock_predict_proba.return_value = pd.DataFrame(y)
mock_predict_proba.return_value.ww.init()
component_graph = ComponentGraph(example_graph).instantiate()
component_graph.fit(X, y)
assert mock_fit_transform.call_count == 3
assert mock_fit.call_count == 3
assert mock_predict_proba.call_count == 2
@patch("rayml.pipelines.components.TargetImputer.fit_transform")
@patch("rayml.pipelines.components.OneHotEncoder.fit_transform")
def test_fit_correct_inputs(
mock_ohe_fit_transform, mock_imputer_fit_transform, X_y_binary
):
X, y = X_y_binary
X = pd.DataFrame(X)
y = pd.Series(y)
graph = {
"Target Imputer": [TargetImputer, "X", "y"],
"OHE": [OneHotEncoder, "Target Imputer.x", "Target Imputer.y"],
}
expected_x = pd.DataFrame(index=X.index, columns=X.columns).fillna(1.0)
expected_x.ww.init()
expected_y = pd.Series(index=y.index).fillna(0)
mock_imputer_fit_transform.return_value = tuple((expected_x, expected_y))
mock_ohe_fit_transform.return_value = expected_x
component_graph = ComponentGraph(graph).instantiate()
component_graph.fit(X, y)
assert_frame_equal(expected_x, mock_ohe_fit_transform.call_args[0][0])
assert_series_equal(expected_y, mock_ohe_fit_transform.call_args[0][1])
@patch("rayml.pipelines.components.Transformer.fit_transform")
@patch("rayml.pipelines.components.Estimator.fit")
@patch("rayml.pipelines.components.Estimator.predict_proba")
def test_component_graph_fit_and_transform_all_but_final(
mock_predict_proba, mock_fit, mock_fit_transform, example_graph, X_y_binary
):
X, y = X_y_binary
component_graph = ComponentGraph(example_graph)
component_graph.instantiate()
mock_X_t = pd.DataFrame(np.ones(pd.DataFrame(X).shape))
mock_fit_transform.return_value = mock_X_t
mock_fit.return_value = Estimator
mock_predict_proba.return_value = pd.DataFrame(y)
mock_predict_proba.return_value.ww.init()
component_graph.fit_and_transform_all_but_final(X, y)
assert mock_fit_transform.call_count == 3
assert mock_fit.call_count == 2
assert mock_predict_proba.call_count == 2
@patch("rayml.pipelines.components.Estimator.fit")
@patch("rayml.pipelines.components.Estimator.predict_proba")
@patch("rayml.pipelines.components.Estimator.predict")
def test_predict(mock_predict, mock_predict_proba, mock_fit, example_graph, X_y_binary):
X, y = X_y_binary
mock_predict_proba.return_value = pd.DataFrame(y)
mock_predict_proba.return_value.ww.init()
mock_predict.return_value = pd.Series(y)
component_graph = ComponentGraph(example_graph).instantiate()
component_graph.fit(X, y)
component_graph.predict(X)
assert (
mock_predict_proba.call_count == 4
) # Called twice when fitting pipeline, twice when predicting
assert mock_predict.call_count == 1 # Called once during predict
assert mock_fit.call_count == 3 # Only called during fit, not predict
@patch("rayml.pipelines.components.Estimator.fit")
@patch("rayml.pipelines.components.Estimator.predict_proba")
@patch("rayml.pipelines.components.Estimator.predict")
def test_predict_multiclass(
mock_predict, mock_predict_proba, mock_fit, example_graph, X_y_multi
):
X, y = X_y_multi
mock_predict_proba.return_value = pd.DataFrame(
{
0: np.full(X.shape[0], 0.33),
1: np.full(X.shape[0], 0.33),
2: np.full(X.shape[0], 0.33),
}
)
mock_predict_proba.return_value.ww.init()
mock_predict.return_value = pd.Series(y)
component_graph = ComponentGraph(example_graph).instantiate()
component_graph.fit(X, y)
final_estimator_input = component_graph.transform_all_but_final(X, y)
assert final_estimator_input.columns.to_list() == [
"Col 0 Random Forest.x",
"Col 1 Random Forest.x",
"Col 2 Random Forest.x",
"Col 0 Elastic Net.x",
"Col 1 Elastic Net.x",
"Col 2 Elastic Net.x",
]
for col in final_estimator_input:
assert np.array_equal(
final_estimator_input[col].to_numpy(), np.full(X.shape[0], 0.33)
)
component_graph.predict(X)
assert (
mock_predict_proba.call_count == 6
) # Called twice when fitting pipeline, twice to compute final features, and twice when predicting
assert mock_predict.call_count == 1 # Called once during predict
assert mock_fit.call_count == 3 # Only called during fit, not predict
@patch("rayml.pipelines.components.Estimator.fit")
@patch("rayml.pipelines.components.Estimator.predict_proba")
@patch("rayml.pipelines.components.Estimator.predict")
def test_predict_regression(
mock_predict, mock_predict_proba, mock_fit, example_regression_graph, X_y_multi
):
X, y = X_y_multi
mock_predict.return_value = pd.Series(y)
mock_predict_proba.side_effect = MethodPropertyNotFoundError
component_graph = ComponentGraph(example_regression_graph).instantiate()
component_graph.fit(X, y)
final_estimator_input = component_graph.transform_all_but_final(X, y)
assert final_estimator_input.columns.to_list() == [
"Random Forest.x",
"Elastic Net.x",
]
component_graph.predict(X)
assert (
mock_predict_proba.call_count == 6
) # Called twice when fitting pipeline, twice to compute final features, and twice when predicting
assert (
mock_predict.call_count == 7
) # Called because `predict_proba` does not exist for regresssions
assert mock_fit.call_count == 3 # Only called during fit, not predict
@patch("rayml.pipelines.components.Estimator.fit")
@patch("rayml.pipelines.components.Estimator.predict_proba")
@patch("rayml.pipelines.components.Estimator.predict")
def test_predict_repeat_estimator(
mock_predict, mock_predict_proba, mock_fit, X_y_binary
):
X, y = X_y_binary
mock_predict_proba.return_value = pd.DataFrame(y)
mock_predict_proba.return_value.ww.init()
mock_predict.return_value = pd.Series(y)
graph = {
"Imputer": [Imputer, "X", "y"],
"OneHot_RandomForest": [OneHotEncoder, "Imputer.x", "y"],
"OneHot_Logistic": [OneHotEncoder, "Imputer.x", "y"],
"Random Forest": [RandomForestClassifier, "OneHot_RandomForest.x", "y"],
"Logistic Regression Classifier": [
LogisticRegressionClassifier,
"OneHot_Logistic.x",
"y",
],
"Final Estimator": [
LogisticRegressionClassifier,
"Random Forest.x",
"Logistic Regression Classifier.x",
"y",
],
}
component_graph = ComponentGraph(graph)
component_graph.instantiate()
component_graph.fit(X, y)
assert (
not component_graph.get_component(
"Logistic Regression Classifier"
)._component_obj
== component_graph.get_component("Final Estimator")._component_obj
)
component_graph.predict(X)
assert mock_predict_proba.call_count == 4
assert mock_predict.call_count == 1
assert mock_fit.call_count == 3
@patch("rayml.pipelines.components.Imputer.transform")
@patch("rayml.pipelines.components.OneHotEncoder.transform")
@patch("rayml.pipelines.components.RandomForestClassifier.predict_proba")
@patch("rayml.pipelines.components.ElasticNetClassifier.predict_proba")
def test_transform_all_but_final(
mock_en_predict_proba,
mock_rf_predict_proba,
mock_ohe,
mock_imputer,
example_graph,
X_y_binary,
):
X, y = X_y_binary
mock_imputer.return_value = pd.DataFrame(X)
mock_ohe.return_value = pd.DataFrame(X)
mock_en_predict_proba.return_value = pd.DataFrame(
({0: np.zeros(X.shape[0]), 1: np.ones(X.shape[0])})
)
mock_en_predict_proba.return_value.ww.init()
mock_rf_predict_proba.return_value = pd.DataFrame(
({0: np.ones(X.shape[0]), 1: np.zeros(X.shape[0])})
)
mock_rf_predict_proba.return_value.ww.init()
X_expected = pd.DataFrame(
{
"Col 1 Random Forest.x": np.zeros(X.shape[0]),
"Col 1 Elastic Net.x": np.ones(X.shape[0]),
}
)
component_graph = ComponentGraph(example_graph).instantiate()
component_graph.fit(X, y)
X_t = component_graph.transform_all_but_final(X)
assert_frame_equal(X_expected, X_t)
assert mock_imputer.call_count == 2
assert mock_ohe.call_count == 4
@patch(f"{__name__}.DummyTransformer.transform")
def test_transform_all_but_final_single_component(mock_transform, X_y_binary):
X, y = X_y_binary
X = pd.DataFrame(X)
mock_transform.return_value = X
component_graph = ComponentGraph(
{"Dummy Component": [DummyTransformer, "X", "y"]}
).instantiate()
component_graph.fit(X, y)
X_t = component_graph.transform_all_but_final(X)
assert_frame_equal(X, X_t)
@patch("rayml.pipelines.components.Imputer.fit_transform")
def test_fit_y_parent(mock_fit_transform, X_y_binary):
X, y = X_y_binary
graph = {
"Imputer": [Imputer, "X", "y"],
"OHE": [OneHotEncoder, "Imputer.x", "y"],
"Random Forest": [RandomForestClassifier, "OHE.x", "y"],
}
component_graph = ComponentGraph(graph).instantiate()
mock_fit_transform.return_value = tuple((pd.DataFrame(X), pd.Series(y)))
component_graph.fit(X, y)
mock_fit_transform.assert_called_once()
def test_predict_empty_graph(X_y_binary):
X, y = X_y_binary
X = pd.DataFrame(X)
component_graph = ComponentGraph()
component_graph.instantiate()
component_graph.fit(X, y)
X_t = component_graph.transform(X, y)
assert_frame_equal(X, X_t)
X_pred = component_graph.predict(X)
assert_frame_equal(X, X_pred)
def test_no_instantiate_before_fit(X_y_binary):
X, y = X_y_binary
graph = {
"Imputer": [Imputer, "X", "y"],
"OHE": [OneHotEncoder, "Imputer.x", "y"],
"Estimator": [RandomForestClassifier, "OHE.x", "y"],
}
component_graph = ComponentGraph(graph)
with pytest.raises(
ValueError,
match="All components must be instantiated before fitting or predicting",
):
component_graph.fit(X, y)
def test_multiple_y_parents():
graph = {
"Imputer": [Imputer, "X", "y"],
"TargetImputer": [Imputer, "Imputer.x", "y"],
"Estimator": [RandomForestClassifier, "Imputer.x", "y", "TargetImputer.y"],
}
with pytest.raises(ValueError, match="All components must have exactly one target"):
ComponentGraph(graph)
def test_component_graph_order(example_graph):
component_graph = ComponentGraph(example_graph)
expected_order = [
"Imputer",
"OneHot_ElasticNet",
"Elastic Net",
"OneHot_RandomForest",
"Random Forest",
"Logistic Regression Classifier",
]
assert expected_order == component_graph.compute_order
component_graph = ComponentGraph({"Imputer": [Imputer, "X", "y"]})
expected_order = ["Imputer"]
assert expected_order == component_graph.compute_order
@pytest.mark.parametrize(
"index",
[
list(range(-5, 0)),
list(range(100, 105)),
[f"row_{i}" for i in range(5)],
pd.date_range("2020-09-08", periods=5),
],
)
@pytest.mark.parametrize("with_estimator_last_component", [True, False])
def test_component_graph_transform_and_predict_with_custom_index(
index,
with_estimator_last_component,
example_graph,
example_graph_with_transformer_last_component,
):
X = pd.DataFrame(
{"categories": [f"cat_{i}" for i in range(5)], "numbers": np.arange(5)},
index=index,
)
y = pd.Series([1, 2, 1, 2, 1], index=index)
X.ww.init(logical_types={"categories": "categorical"})
graph_to_use = (
example_graph
if with_estimator_last_component
else example_graph_with_transformer_last_component
)
component_graph = ComponentGraph(graph_to_use)
component_graph.instantiate()
component_graph.fit(X, y)
if with_estimator_last_component:
predictions = component_graph.predict(X)
assert_index_equal(predictions.index, X.index)
assert not predictions.isna().any(axis=None)
else:
X_t = component_graph.transform(X)
assert_index_equal(X_t.index, X.index)
assert not X_t.isna().any(axis=None)
y_in = pd.Series([0, 1, 0, 1, 0], index=index)
y_inv = component_graph.inverse_transform(y_in)
assert_index_equal(y_inv.index, y.index)
assert not y_inv.isna().any(axis=None)
@patch(f"{__name__}.EstimatorC.predict")
@patch(f"{__name__}.EstimatorB.predict")
@patch(f"{__name__}.EstimatorA.predict")
@patch(f"{__name__}.TransformerC.transform")
@patch(f"{__name__}.TransformerB.transform")
@patch(f"{__name__}.TransformerA.transform")
def test_component_graph_evaluation_plumbing(
mock_transform_a,
mock_transform_b,
mock_transform_c,
mock_predict_a,
mock_predict_b,
mock_predict_c,
dummy_components,
):
(
TransformerA,
TransformerB,
TransformerC,
EstimatorA,
EstimatorB,
EstimatorC,
) = dummy_components
mock_transform_a.return_value = pd.DataFrame(
{"feature trans": [1, 0, 0, 0, 0, 0], "feature a": np.ones(6)}
)
mock_transform_b.return_value = pd.DataFrame({"feature b": np.ones(6) * 2})
mock_transform_c.return_value = pd.DataFrame({"feature c": np.ones(6) * 3})
mock_predict_a.return_value = pd.Series([0, 0, 0, 1, 0, 0])
mock_predict_b.return_value = pd.Series([0, 0, 0, 0, 1, 0])
mock_predict_c.return_value = pd.Series([0, 0, 0, 0, 0, 1])
graph = {
"transformer a": [TransformerA, "X", "y"],
"transformer b": [TransformerB, "transformer a.x", "y"],
"transformer c": [TransformerC, "transformer a.x", "transformer b.x", "y"],
"estimator a": [EstimatorA, "X", "y"],
"estimator b": [EstimatorB, "transformer a.x", "y"],
"estimator c": [
EstimatorC,
"transformer a.x",
"estimator a.x",
"transformer b.x",
"estimator b.x",
"transformer c.x",
"y",
],
}
component_graph = ComponentGraph(graph)
component_graph.instantiate()
X = pd.DataFrame({"feature1": np.zeros(6), "feature2": np.zeros(6)})
y = pd.Series( | np.zeros(6) | numpy.zeros |
import numpy as np
import argparse
from base_module import Posenet, Camnet, discriminator, Encoder
from mmdgan_mh_enc import Pose_mmdgan_enc
import os
import random
import tensorflow as tf
import scipy.io as sio
import logging, logging.config
import sys
from eval_functions import err_3dpe
import ops
parse = argparse.ArgumentParser()
parse.add_argument("--batchsize", help= "the batch size used in training", default=128, type = int)
parse.add_argument("--epochs", help="number of epochs during training", default=50, type = int)
parse.add_argument("--latent_dim", help="dimension of latent space", default=1024, type = int)
parse.add_argument("--latent_dim_pose", help="dimension for pose in the latent space of discriminator", default=128, type=int)
parse.add_argument("--latent_dim_kcs", help="dimension for kcs in the latent space of discriminator", default=1024, type=int)
parse.add_argument("--d_output_dim", help="dimension for output of discriminator", default=8, type=int)
parse.add_argument("--lr", help="learning rate", default=1e-4, type=float)
parse.add_argument("--architecture", help="which architeture to use[mmdgan, mmdgan_enc]", default='mmdgan_enc', type=str)
parse.add_argument("--beta1", help="beta1 for adamoptimizor", default=0.5, type=float)
parse.add_argument("--diter", help="the number of discriminator updates oer generator updates", default=1, type=int)
parse.add_argument("--kernel", help="kernel type used in mmd[dot, mix_rbf, mix_rq]", default='mix_rq', type=str)
parse.add_argument("--repro_weight", help="weight of reprojection loss", default=10.0, type=float)
parse.add_argument("--cam_weight", help="weight of camera loss", default=10.0, type=float)
parse.add_argument("--gp_weight", help="weight of dot kernel in mix kernel", default=0.1, type=float)
parse.add_argument("--reg_weight", help="weight for regularizer", default=7.5, type=float)
parse.add_argument("--dot_weight", help="weight of dot kernel in mix kernel", default=10.0, type=float)
parse.add_argument("--lr_decay", help="learning rate decay rate", default=0.94, type=float)
parse.add_argument("--enc_weight", help="weight of encoder", default=10.0, type=float)
parse.add_argument("--sampling", help="set to true if generate samples", default=True, type=bool)
parse.add_argument("--checkpoint", help="which model to load", default=0, type=int)
# 931070 for gt data
# 971070 for shft
parse.add_argument("--num_samples", help="number of hypotheses", default=10, type=int)
parse.add_argument("--datatype", help="datatype used for training [GT, SHFT, GTMJ]", default='GT', type=str)
parse.add_argument("--load_path", help="specify the path to load model", default='./models', type=str)
args = parse.parse_args()
actions = ['Directions', 'Discussion', 'Eating', 'Greeting', 'Phoning', 'Photo', 'Posing', 'Purchases', 'Sitting',
'SittingDown', 'Smoking', 'Waiting', 'WalkDog', 'WalkTogether', 'Walking']
pose3d_dim = 16 * 3
pose2d_dim = 16 * 2
cam_dim = 6
lr = args.lr
model_name = '{}_regweight{}_encweight{}_2D{}'.format(args.architecture, args.reg_weight, args.enc_weight, args.datatype)
log_dir = 'logs_eval'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
logging.config.fileConfig('./logging.conf')
logger = logging.getLogger()
fileHandler = logging.FileHandler("{0}/log.txt".format(log_dir))
logger.addHandler(fileHandler)
logger.info("Logs will be written to %s" % log_dir)
def log_arguments():
logger.info('Command: %s', ' '.join(sys.argv))
s = '\n'.join([' {}: {}'.format(arg, getattr(args, arg)) for arg in vars(args)])
s = 'Arguments:\n' + s
logger.info(s)
log_arguments()
posenet = Posenet(args.latent_dim, pose3d_dim)
camnet = Camnet(args.latent_dim, cam_dim)
disc = discriminator(args.latent_dim_pose, args.latent_dim_kcs, args.d_output_dim)
encoder = Encoder(args.latent_dim, args.latent_dim)
mmd_posenet = Pose_mmdgan_enc(posenet, camnet, disc, encoder, args.latent_dim, args.batchsize, log_dir, args.epochs, pose2d_dim, pose3d_dim,
args.kernel, args.repro_weight, args.cam_weight, args.gp_weight, args.reg_weight, args.dot_weight, args.enc_weight)
mmd_posenet.build_model()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
batchsize = args.batchsize
load_dir = os.path.join(args.load_path, model_name)
ckpt = tf.train.get_checkpoint_state(load_dir, latest_filename="checkpoint")
if args.checkpoint > 0:
ckpt_name = os.path.join(os.path.join(load_dir, "checkpoint-{}".format(args.checkpoint)))
else:
ckpt_name = ckpt.model_checkpoint_path
mmd_posenet.saver.restore(sess, ckpt_name)
print('Loading model {}'.format(os.path.basename(ckpt_name)))
path = 'new_data/test/2d{}_3dTEM'.format(args.datatype)
path_cam = 'new_data/test/2d{}_3dCAM'.format(args.datatype)
logger.info('{0:>15} {1:>30} {2:>30}'.format('Action', 'Protocol1', 'Protocol2'))
val_best_all = []
valcam_best_all = []
val_zc_all = []
valcam_zc_all = []
for action in actions:
data_2d_3d_test = sio.loadmat('{}/{}_2d{}_3d_test.mat'.format(path, action, args.datatype))
data_cam = sio.loadmat('{}/{}_2d{}_3d_test.mat'.format(path_cam, action, args.datatype))
poses2d_eval = data_2d_3d_test['poses_2d'][::64, :]
poses3d_eval = data_2d_3d_test['poses_3d'][::64, :] / 1000
poses_3d_cam = data_cam['poses_3d'][::64, :] / 1000
poses_zc = []
posescam_zc = []
# generate results under zero code setting
for eval in range(poses2d_eval.shape[0] // batchsize):
noise_zc = np.zeros([batchsize, args.latent_dim])
poses, cam = mmd_posenet.inference(sess, poses2d_eval[eval * batchsize: (eval + 1) * batchsize],
poses3d_eval[eval * batchsize: (eval + 1) * batchsize], noise_zc,
lr)
poses_reshape = np.reshape(poses, [poses.shape[0], 3, 16])
k = np.reshape(cam, [cam.shape[0], 2, 3])
R = ops.compute_R(k) # recover rotation matrix from camera matrix
poses_cam = np.matmul(R, poses_reshape) # transfer pose from the template frame to the camera frame
poses_cam_reshape = np.reshape(poses_cam, [poses_cam.shape[0], -1])
posescam_zc.append(poses_cam_reshape)
poses_zc.append(poses)
poses_zc = np.vstack(poses_zc)
posescam_zc = np.vstack(posescam_zc)
# compute the error under zero code setting
val_zc = 0.0
valcam_zc = 0.0
for p in range(poses_zc.shape[0]):
err_zc = 1000 * err_3dpe(poses3d_eval[p:p + 1, :], poses_zc[p:p + 1, :], True)
errcam_zc = 1000 * err_3dpe(poses_3d_cam[p:p + 1, :], 1.1 * posescam_zc[p:p + 1, :], False)
# scale the output according to the ratio between poses in camera frame and poses in template frame in the training set
val_zc = val_zc + err_zc
valcam_zc = valcam_zc + errcam_zc
val_zc_all.append(err_zc)
valcam_zc_all.append(errcam_zc)
val_zc = val_zc / poses_zc.shape[0]
valcam_zc = valcam_zc/posescam_zc.shape[0]
# generate results for multiple hypotheses
poses_samples_all = []
posescam_samples_all = []
R_all = []
poses_repro_all = []
for eval in range(poses2d_eval.shape[0] // batchsize):
poses_samples_batch = []
posescam_samples_batch = []
poses_repro_batch = []
for i in range(args.num_samples):
z_test = np.random.normal(0, 1, (batchsize, args.latent_dim))
posespred, campred = mmd_posenet.inference(sess, poses2d_eval[eval * batchsize: (eval + 1) * batchsize],
poses3d_eval[eval * batchsize: (eval + 1) * batchsize], z_test,
lr)
posespred_reshape = np.reshape(posespred, [posespred.shape[0], 3, 16])
poses_samples_batch.append(posespred)
k = | np.reshape(campred, [campred.shape[0], 2, 3]) | numpy.reshape |
# -*- coding: utf-8 -*-
"""Test methods related to popluation of the proposal after training"""
import numpy as np
import pytest
from unittest.mock import MagicMock, Mock, patch, call
from nessai.proposal import FlowProposal
from nessai.livepoint import numpy_array_to_live_points
@pytest.fixture()
def z():
return np.random.randn(2, 2)
@pytest.fixture()
def x(z):
return numpy_array_to_live_points(np.random.randn(*z.shape), ['x', 'y'])
@pytest.fixture()
def log_q(x):
return np.random.randn(x.size)
def test_log_prior_wo_reparameterisation(proposal, x):
"""Test the lop prior method"""
log_prior = -np.ones(x.size)
proposal._reparameterisation = None
proposal.model = MagicMock()
proposal.model.log_prior = MagicMock(return_value=log_prior)
log_prior_out = FlowProposal.log_prior(proposal, x)
assert np.array_equal(log_prior, log_prior_out)
proposal.model.log_prior.assert_called_once_with(x)
def test_log_prior_w_reparameterisation(proposal, x):
"""Test the lop prior method with reparameterisations"""
log_prior = -np.ones(x.size)
proposal._reparameterisation = MagicMock()
proposal._reparameterisation.log_prior = MagicMock(return_value=log_prior)
proposal.model = MagicMock()
proposal.model.log_prior = MagicMock(return_value=log_prior)
log_prior_out = FlowProposal.log_prior(proposal, x)
assert np.array_equal(log_prior_out, -2 * np.ones(x.size))
proposal._reparameterisation.log_prior.assert_called_once_with(x)
proposal.model.log_prior.assert_called_once_with(x)
def test_prime_log_prior(proposal):
"""Make sure the prime prior raises an error by default."""
with pytest.raises(RuntimeError) as excinfo:
FlowProposal.x_prime_log_prior(proposal, 1.0)
assert 'Prime prior is not implemented' in str(excinfo.value)
@pytest.mark.parametrize('acceptance, scale',
[(0.0, 10.0), (0.5, 2.0), (0.01, 10.0), (2.0, 1.0)])
def test_update_poolsize_scale(proposal, acceptance, scale):
"""Test the check the poolsize is correct adjusted based on the acceptance.
"""
proposal.max_poolsize_scale = 10.
FlowProposal.update_poolsize_scale(proposal, acceptance)
assert proposal._poolsize_scale == scale
def test_compute_weights(proposal, x, log_q):
"""Test method for computing rejection sampling weights"""
proposal.use_x_prime_prior = False
proposal.log_prior = MagicMock(return_value=-np.ones(x.size))
log_w = FlowProposal.compute_weights(proposal, x, log_q)
proposal.log_prior.assert_called_once_with(x)
out = (-1 - log_q)
out -= out.max()
assert np.array_equal(log_w, out)
def test_compute_weights_prime_prior(proposal, x, log_q):
"""Test method for computing rejection sampling weights with the prime
prior.
"""
proposal.use_x_prime_prior = True
proposal.x_prime_log_prior = MagicMock(return_value=-np.ones(x.size))
log_w = FlowProposal.compute_weights(proposal, x, log_q)
proposal.x_prime_log_prior.assert_called_once_with(x)
out = (-1 - log_q)
out -= out.max()
assert np.array_equal(log_w, out)
@patch('numpy.random.rand', return_value=np.array([0.1, 0.9]))
def test_rejection_sampling(proposal, z, x, log_q):
"""Test rejection sampling method."""
proposal.use_x_prime_prior = False
proposal.truncate = False
proposal.backward_pass = MagicMock(return_value=(x, log_q))
log_w = np.log(np.array([0.5, 0.5]))
proposal.compute_weights = MagicMock(return_value=log_w)
z_out, x_out = FlowProposal.rejection_sampling(proposal, z)
assert proposal.backward_pass.called_once_with(x, True)
assert proposal.compute_weights.called_once_with(x)
assert x_out.size == 1
assert z_out.shape == (1, 2)
assert np.array_equal(x_out[0], x[0])
assert np.array_equal(z_out[0], z[0])
def test_rejection_sampling_empty(proposal, z):
"""Test rejection sampling method if no valid points are produced by
`backwards_pass`
"""
proposal.use_x_prime_prior = False
proposal.truncate = False
proposal.backward_pass = \
MagicMock(return_value=(np.array([]), np.array([])))
z_out, x_out = FlowProposal.rejection_sampling(proposal, z)
assert x_out.size == 0
assert z_out.size == 0
@patch('numpy.random.rand', return_value=np.array([0.1]))
def test_rejection_sampling_truncate(proposal, z, x):
"""Test rejection sampling method with truncation"""
proposal.use_x_prime_prior = False
proposal.truncate = True
log_q = np.array([0.0, 1.0])
proposal.backward_pass = MagicMock(return_value=(x, log_q))
worst_q = 0.5
log_w = np.log(np.array([0.5]))
proposal.compute_weights = MagicMock(return_value=log_w)
z_out, x_out = \
FlowProposal.rejection_sampling(proposal, z, worst_q=worst_q)
assert proposal.backward_pass.called_once_with(x, True)
assert proposal.compute_weights.called_once_with(x)
assert x_out.size == 1
assert z_out.shape == (1, 2)
assert np.array_equal(x_out[0], x[1])
assert | np.array_equal(z_out[0], z[1]) | numpy.array_equal |
# -*- coding: utf-8 -*-
# Author: XuMing <<EMAIL>>
# Data: 17/10/16
# Brief: cnn网络结构
import csv
import os
import numpy as np
import tensorflow as tf
from tensorflow.contrib import learn
import config
import data_helpers
from util import to_categorical
if config.eval_all_train_data:
x_raw, y = data_helpers.load_data_labels(config.data_dir)
y = to_categorical(y)
y = np.argmax(y, axis=1)
elif config.infer_data_path:
infer_datas = list(open(config.infer_data_path, "r", encoding="utf-8").readlines())
infer_datas = [s.strip() for s in infer_datas]
x_raw = data_helpers.load_infer_data(infer_datas)
y = []
else:
x_raw = data_helpers.load_infer_data(
["do you think it is right.", "everything is off.", "i hate you .", "it is a bad film.",
"good man and bad person.", "价格不是最便宜的,招商还是浦发银行是238*12=2856.00人家还可以分期的。",
u"驱动还有系统要自装,还有显卡太鸡巴低了.还有装系统太麻烦了"
])
y = [1, 0, 0, 0, 1, 0, 1]
# map data into vocabulary
checkpoint_dir = config.checkpoint_dir
vocab_path = os.path.join(checkpoint_dir, "..", "vocab")
print("vocab_path:", vocab_path)
vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)
x_test = np.array(list(vocab_processor.transform(x_raw)))
print("\nEvluating...\n")
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
print("checkpoint file", checkpoint_file)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(allow_soft_placement=config.allow_soft_placement,
log_device_placement=config.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# get the placeholders
input_x = graph.get_operation_by_name("input_x").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# evaluate
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
# generate batches for one epoch
batches = data_helpers.batch_iter(list(x_test), config.batch_size, 1, shuffle=False)
# collect the predictions
all_predictions = []
for x_test_batch in batches:
batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})
all_predictions = np.concatenate([all_predictions, batch_predictions])
# print accuracy if y_test is defined
if y is not None and len(y) > 0:
correct_predictions = float(sum(all_predictions == y))
print("Total number of test examples: {}".format(len(y)))
print("Accuracy: {:g}".format(correct_predictions / float(len(y))))
# save the evaluation to csv
x_raw = [x.encode("utf-8") for x in x_raw]
predictions_human_readable = np.column_stack(( | np.array(x_raw) | numpy.array |
import numpy as np
import os
from six.moves.urllib import request
import unittest
from chainer import testing
from chainercv.evaluations import eval_detection_coco
try:
import pycocotools # NOQA
_available = True
except ImportError:
_available = False
data = {
'pred_bboxes': [
[[0, 0, 10, 10], [0, 0, 20, 20]]],
'pred_labels': [
[0, 0]],
'pred_scores': [
[0.8, 0.9]],
'gt_bboxes': [
[[0, 0, 10, 9]]],
'gt_labels': [
[0, 0]]}
@unittest.skipUnless(_available, 'pycocotools is not installed')
class TestEvalDetectionCOCOSimple(unittest.TestCase):
def setUp(self):
self.pred_bboxes = (np.array(bbox) for bbox in data['pred_bboxes'])
self.pred_labels = (np.array(label) for label in data['pred_labels'])
self.pred_scores = (np.array(score) for score in data['pred_scores'])
self.gt_bboxes = (np.array(bbox) for bbox in data['gt_bboxes'])
self.gt_labels = (np.array(label) for label in data['gt_labels'])
def test_crowded(self):
result = eval_detection_coco(self.pred_bboxes, self.pred_labels,
self.pred_scores,
self.gt_bboxes, self.gt_labels,
gt_crowdeds=[[True]])
# When the only ground truth is crowded, nothing is evaluated.
# In that case, all the results are nan.
self.assertTrue(
np.isnan(result['map/iou=0.50:0.95/area=small/maxDets=100']))
self.assertTrue(
| np.isnan(result['map/iou=0.50:0.95/area=medium/maxDets=100']) | numpy.isnan |
# -*- coding: utf-8 -*-
import os
import cv2
import sys
import time
import numpy as np
S_NOW_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(S_NOW_DIR)
from util__img import *
class GistUtils:
def __init__(self, n_resize=128, n_w=5, ln_orientation=[8, 8, 8, 8], n_block_num=4, n_prefilt=4):
# vector dim(single chanle) = sum(ln_orientation) * n_block_num * n_block_num
# (8+8+8+8)*(4*4) = 512
self.n_resize = n_resize
self.n_boundaryExtension = self.n_resize // 4
self.n_w = n_w
self.ln_orientation = ln_orientation
self.n_block_num = n_block_num # MUST n_resize % n_block_num == 0
self.n_prefilt = n_prefilt
self.__create_gabor()
self.__get_gfmat()
def get_gist_vec(self, np_img_raw, mode="rgb"):
# resize
np_img_resize, n_ret = img_resize(np_img_raw, (self.n_resize, self.n_resize))
if n_ret != 0:
print("image resize error")
return None
# convert gray or rgb
np_gist = None
if mode.lower() == "gray":
np_img_gray, n_ret = img_2gray(np_img_resize)
np_prefilt_img = self.__get_pre_filt(np_img_gray)
np_gist = self.__gist_main(np_prefilt_img)
elif mode.lower() == "rgb" or mode.lower() == "bgr":
np_img_bgr, n_ret = img_2bgr(np_img_resize)
np_img_b = np_img_bgr[:,:,0]
np_img_g = np_img_bgr[:,:,1]
np_img_r = np_img_bgr[:,:,2]
np_gist_b = self.__get_pre_filt(np_img_b)
np_gist_g = self.__get_pre_filt(np_img_g)
np_gist_r = self.__get_pre_filt(np_img_r)
np_gist_b = self.__gist_main(np_gist_b)
np_gist_g = self.__gist_main(np_gist_g)
np_gist_r = self.__gist_main(np_gist_r)
np_gist = np.hstack([np_gist_b, np_gist_g, np_gist_r])
else:
print("input mode error")
return np_gist
def __get_pre_filt(self, np_img):
np_log_img = np.log(np_img + 1.0)
np_pad_img = np.pad(np_log_img,((self.n_w,self.n_w), (self.n_w,self.n_w)), 'symmetric')
np_gf = self.np_gf
np_out = np_pad_img - np.real(np.fft.ifft2(np.fft.fft2(np_pad_img) * np_gf ))
np_local = np.sqrt(np.abs(np.fft.ifft2(np.fft.fft2(np_out **2) * np_gf)))
np_out = np_out / (0.2 + np_local)
n_size = self.n_resize + 2 * self.n_w
return np_out[self.n_w: n_size - self.n_w, self.n_w : n_size - self.n_w]
def __gist_main(self, np_prefilt_img):
n_b = self.n_boundaryExtension
np_pad_img = | np.pad(np_prefilt_img, ((n_b, n_b), (n_b, n_b)), 'symmetric') | numpy.pad |
# This script is taken from https://github.com/mateuszbuda/ml-stat-util.git
import numpy as np
from scipy.stats import percentileofscore
def score_ci(
y_true,
y_pred,
score_fun,
n_bootstraps=2000,
confidence_level=0.95,
seed=None,
reject_one_class_samples=True,
):
"""
Compute confidence interval for given score function based on labels and predictions using bootstrapping.
:param y_true: 1D list or array of labels.
:param y_pred: 1D list or array of predictions corresponding to elements in y_true.
:param score_fun: Score function for which confidence interval is computed. (e.g. sklearn.metrics.accuracy_score)
:param n_bootstraps: The number of bootstraps. (default: 2000)
:param confidence_level: Confidence level for computing confidence interval. (default: 0.95)
:param seed: Random seed for reproducibility. (default: None)
:param reject_one_class_samples: Whether to reject bootstrapped samples with only one label. For scores like AUC we
need at least one positive and one negative sample. (default: True)
:return: Score evaluated on labels and predictions, lower confidence interval, upper confidence interval, array of
bootstrapped scores.
"""
assert len(y_true) == len(y_pred)
score = score_fun(y_true, y_pred)
_, ci_lower, ci_upper, scores = score_stat_ci(
y_true=y_true,
y_preds=y_pred,
score_fun=score_fun,
n_bootstraps=n_bootstraps,
confidence_level=confidence_level,
seed=seed,
reject_one_class_samples=reject_one_class_samples,
)
return score, ci_lower, ci_upper, scores
def score_stat_ci(
y_true,
y_preds,
score_fun,
stat_fun=np.mean,
n_bootstraps=2000,
confidence_level=0.95,
seed=None,
reject_one_class_samples=True,
):
"""
Compute confidence interval for given statistic of a score function based on labels and predictions using
bootstrapping.
:param y_true: 1D list or array of labels.
:param y_preds: A list of lists or 2D array of predictions corresponding to elements in y_true.
:param score_fun: Score function for which confidence interval is computed. (e.g. sklearn.metrics.accuracy_score)
:param stat_fun: Statistic for which confidence interval is computed. (e.g. np.mean)
:param n_bootstraps: The number of bootstraps. (default: 2000)
:param confidence_level: Confidence level for computing confidence interval. (default: 0.95)
:param seed: Random seed for reproducibility. (default: None)
:param reject_one_class_samples: Whether to reject bootstrapped samples with only one label. For scores like AUC we
need at least one positive and one negative sample. (default: True)
:return: Mean score statistic evaluated on labels and predictions, lower confidence interval, upper confidence
interval, array of bootstrapped scores.
"""
y_true = np.array(y_true)
y_preds = np.atleast_2d(y_preds)
assert all(len(y_true) == len(y) for y in y_preds)
np.random.seed(seed)
scores = []
for i in range(n_bootstraps):
readers = np.random.randint(0, len(y_preds), len(y_preds))
indices = np.random.randint(0, len(y_true), len(y_true))
if reject_one_class_samples and len(np.unique(y_true[indices])) < 2:
continue
reader_scores = []
for r in readers:
reader_scores.append(score_fun(y_true[indices], y_preds[r][indices]))
scores.append(stat_fun(reader_scores))
mean_score = np.mean(scores)
sorted_scores = np.array(sorted(scores))
alpha = (1.0 - confidence_level) / 2.0
ci_lower = sorted_scores[int(round(alpha * len(sorted_scores)))]
ci_upper = sorted_scores[int(round((1.0 - alpha) * len(sorted_scores)))]
return mean_score, ci_lower, ci_upper, scores
def pvalue(
y_true,
y_pred1,
y_pred2,
score_fun,
n_bootstraps=2000,
two_tailed=True,
seed=None,
reject_one_class_samples=True,
):
"""
Compute p-value for hypothesis that score function for model I predictions is higher than for model II predictions
using bootstrapping.
:param y_true: 1D list or array of labels.
:param y_pred1: 1D list or array of predictions for model I corresponding to elements in y_true.
:param y_pred2: 1D list or array of predictions for model II corresponding to elements in y_true.
:param score_fun: Score function for which confidence interval is computed. (e.g. sklearn.metrics.accuracy_score)
:param n_bootstraps: The number of bootstraps. (default: 2000)
:param two_tailed: Whether to use two-tailed test. (default: True)
:param seed: Random seed for reproducibility. (default: None)
:param reject_one_class_samples: Whether to reject bootstrapped samples with only one label. For scores like AUC we
need at least one positive and one negative sample. (default: True)
:return: Computed p-value, array of bootstrapped differences of scores.
"""
assert len(y_true) == len(y_pred1)
assert len(y_true) == len(y_pred2)
return pvalue_stat(
y_true=y_true,
y_preds1=y_pred1,
y_preds2=y_pred2,
score_fun=score_fun,
n_bootstraps=n_bootstraps,
two_tailed=two_tailed,
seed=seed,
reject_one_class_samples=reject_one_class_samples,
)
def pvalue_stat(
y_true,
y_preds1,
y_preds2,
score_fun,
stat_fun=np.mean,
n_bootstraps=2000,
two_tailed=True,
seed=None,
reject_one_class_samples=True,
):
"""
Compute p-value for hypothesis that given statistic of score function for model I predictions is higher than for
model II predictions using bootstrapping.
:param y_true: 1D list or array of labels.
:param y_preds1: A list of lists or 2D array of predictions for model I corresponding to elements in y_true.
:param y_preds2: A list of lists or 2D array of predictions for model II corresponding to elements in y_true.
:param score_fun: Score function for which confidence interval is computed. (e.g. sklearn.metrics.accuracy_score)
:param stat_fun: Statistic for which p-value is computed. (e.g. np.mean)
:param n_bootstraps: The number of bootstraps. (default: 2000)
:param two_tailed: Whether to use two-tailed test. (default: True)
:param seed: Random seed for reproducibility. (default: None)
:param reject_one_class_samples: Whether to reject bootstrapped samples with only one label. For scores like AUC we
need at least one positive and one negative sample. (default: True)
:return: Computed p-value, array of bootstrapped differences of scores.
"""
y_true = np.array(y_true)
y_preds1 = np.atleast_2d(y_preds1)
y_preds2 = np.atleast_2d(y_preds2)
assert all(len(y_true) == len(y) for y in y_preds1)
assert all(len(y_true) == len(y) for y in y_preds2)
np.random.seed(seed)
z = []
for i in range(n_bootstraps):
readers1 = np.random.randint(0, len(y_preds1), len(y_preds1))
readers2 = np.random.randint(0, len(y_preds2), len(y_preds2))
indices = np.random.randint(0, len(y_true), len(y_true))
if reject_one_class_samples and len( | np.unique(y_true[indices]) | numpy.unique |
import h5py
import os, sys, glob
import numpy as np
import plotly.offline as offline
from preprocessing import analysis_pp
from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils
from scipy.stats.stats import power_divergence
from scipy.stats import ttest_ind_from_stats
import csv
import scipy.signal as ss
import math
import time
from pandas import DataFrame
from scipy import optimize
import pandas as pd
import matplotlib.pyplot as plt
from collections import deque
class AstrocytePlotter():
def __init__(self, output_folder):
self.output_folder = output_folder
#For correlation plots
self.filter_probs = [0.05, 0.10, 0.25]
self.n_samples_corr_fake = 20
self.num_frames_splits_l = [250, 500, 1000, 3000, 6000, 12000, 24000, 100000]
self.num_frames_splits_m_l = [0.5, 1, 2, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80]
self.num_frames_splits_splits_m_l = [10, 15, 20, 25, 30, 35, 40]
self.max_split_comparison_samples = 100
self.behaviours_list_a = ['default', 'rest', 'running',
'running_start', 'running_before', 'stick',
'stick_start', 'stick_end', 'stick_expect',
'stick_rest', 'whisker_rest_stick', 'whisker_stick']
self.behaviours_list_small = ['whisker_rest_stick', 'default', 'rest', 'running', 'stick']
def setup_plot_folders(self, output_experiment_path):
paths = ['borders', 'behaviour_heatmaps', 'behaviours_basic',
'signal_delays', 'signal_durations', 'triplet', 'behaviour_activity',
'behaviour_areas', 'signal_basic_samples', 'signal_behaviour_samples',
'correlations', 'random_events', 'splits', 'splits_self', 'signal_amplitudes',
'signal_proportion_delays', 'signal_stick_run_samples', 'splits_split_split',
'triplet_bar', 'size_v_time_corr',
'behaviour_heatmaps_threshold_with_random',
'split_behaviour_grids',
'size_histogram_bh_comparison_individual', 'amplitude_histogram_bh_comparison_individual', 'duration_histogram_bh_comparison_individual',]
for p in paths:
try:
os.makedirs(os.path.join(output_experiment_path, 'plots' , p))
except:
pass
def setup_file_folders(self, output_experiment_path):
paths = ['correlations', 'csv']
for p in paths:
try:
print(os.path.join(output_experiment_path, 'files', p))
os.makedirs(os.path.join(output_experiment_path, 'files', p))
except:
print('Folder structure exists?')
def setup_plot_folders_comparison(self, output_experiment_path_comparison):
paths = ['behaviour_heatmaps', 'triplet', 'intersection', 'correlations', 'align',
'intersection_border_xcorr_aligned',]
for p in paths:
try:
os.makedirs(os.path.join(output_experiment_path_comparison, 'plots', p))
except:
print('Folder structure exists?')
def setup_file_folders_comparison(self, output_experiment_path_comparison):
paths = ['correlations', 'csv']
for p in paths:
try:
print(os.path.join(output_experiment_path_comparison, 'files', p))
os.makedirs(os.path.join(output_experiment_path_comparison, 'files', p))
except:
print('Folder structure exists?')
def setup_plot_folders_all_comparison(self, output_experiment_path_all_comparison):
#print(output_experiment_path_all_comparison)
paths = ['size_histogram_comparison', 'amplitude_histogram_comparison', 'duration_histogram_comparison',
'size_histogram_bh_comparison', 'amplitude_histogram_bh_comparison', 'duration_histogram_bh_comparison',
'activity_all', 'activity_all_number_minute', 'waterfall_together', 'signal_proportion_delays',
'signal_proportion_delays_alt_average_proportions',
'behaviour_heatmaps_V2_comparison_scale',
'bar_rest_run_all',
'bar_rest_rest_stick_all',
'bar_run_run_stick_all',
'dot_rest_run_pair_all',
'bar_run_stick_run_transition_all',
'rest_to_run_proportions_alt',
'run_to_rest_proportions_alt',
'run_stick_run_proportions_alt',
'run_stick_run_proportions_alt_filter_max_3_frames',
'run_stick_run_proportions_alt_filter_max_5_frames',
'rest_to_run_amplitudes_default_alt',
'rest_to_run_amplitudes_alt',
'rest_to_run_durations_alt',
'rest_to_run_sizes_alt',
'rest_to_run_speed_alt',
'rest_to_run_pupil_alt',
'run_to_rest_amplitudes_default_alt',
'run_to_rest_amplitudes_alt',
'run_to_rest_durations_alt',
'run_to_rest_sizes_alt',
'rest_to_run_amplitudes_default_outlier_alt',
'rest_to_run_amplitudes_outlier_alt',
'rest_to_run_durations_outlier_alt',
'rest_to_run_sizes_outlier_alt',
'run_to_rest_amplitudes_default_outlier_alt',
'run_to_rest_amplitudes_outlier_alt',
'run_to_rest_durations_outlier_alt',
'run_to_rest_sizes_outlier_alt',
'run_to_rest_speed_alt',
'run_to_rest_pupil_alt',
'run_stick_run_amplitudes_default_alt',
'run_stick_run_amplitudes_alt',
'run_stick_run_durations_alt',
'run_stick_run_sizes_alt',
'run_stick_run_amplitudes_default_outlier_alt',
'run_stick_run_amplitudes_outlier_alt',
'run_stick_run_durations_outlier_alt',
'run_stick_run_sizes_outlier_alt',
'run_stick_run_speed_alt',
'run_stick_run_pupil_alt',
'run_stick_run_amplitudes_default_alt_filter_max_3_frames',
'run_stick_run_amplitudes_alt_filter_max_3_frames',
'run_stick_run_durations_alt_filter_max_3_frames',
'run_stick_run_sizes_alt_filter_max_3_frames',
'run_stick_run_speed_alt_filter_max_3_frames',
'run_stick_run_pupil_alt_filter_max_3_frames',
'run_stick_run_amplitudes_default_alt_filter_max_5_frames',
'run_stick_run_amplitudes_alt_filter_max_5_frames',
'run_stick_run_durations_alt_filter_max_5_frames',
'run_stick_run_sizes_alt_filter_max_5_frames',
'run_stick_run_speed_alt_filter_max_5_frames',
'run_stick_run_pupil_alt_filter_max_5_frames',
'all_amplitudes', 'all_durations', 'all_sizes',
'all_amplitudes_filt_bh', 'all_durations_filt_bh', 'all_sizes_filt_bh',
'correlations',
'correlations_long_events',
'correlations_short_events',
'correlations_no_align',
'correlations_no_align_long_events',
'correlations_no_align_short_events',
'correlations_csv',
'correlations_long_events_csv',
'correlations_short_events_csv',
'correlations_no_align_csv',
'correlations_no_align_long_events_csv',
'correlations_no_align_short_events_csv',
'control',
'outliers',
'triplet_dot_all',
'size_v_time_corr_ALL',
'speed_v_events_ALL',
'split_correlation_all',
'behaviour_over_recording',
'pixel_distribution',
'splits_self_all',
]
data_paths = [
'correlations',
'correlations_long_events',
'correlations_short_events',
'correlations_no_align',
'correlations_no_align_long_events',
'correlations_no_align_short_events',
'control',
'outliers',
'behaviour_ratios',
'top_average_values',
'split_correlation_all',
'splits_self_all'
]
for p in paths:
#print('Trying...', p)
try:
os.makedirs(os.path.join(output_experiment_path_all_comparison, 'plots', p))
except:
print('Folder structure exists?')
for p in data_paths:
try:
os.makedirs(os.path.join(output_experiment_path_all_comparison, 'data', p))
except:
print('Folder structure exists?')
def get_output_experiment_path(self, astroA, output_folder):
experiment_id = '/'.join(astroA.experiment_path.split('/')[-2:])
output_experiment_path = os.path.join(output_folder, experiment_id)
return output_experiment_path
def plot_all_single(self, astroA):
output_experiment_path = self.get_output_experiment_path(astroA, self.output_folder)
print('Making dirs', output_experiment_path)
self.setup_plot_folders(output_experiment_path)
print('Plotting behaviours basic...')
#Behaviour basic
figs_basic_plots = self.get_behaviour_basic_plots(astroA)
for fig_k in figs_basic_plots.keys():
saving_utils.save_plotly_fig(figs_basic_plots[fig_k], os.path.join(output_experiment_path, 'plots', 'behaviours_basic', '{}'.format(fig_k)), width=1000, height=400)
print('Plotting random samples of signals...')
fig_signals = self.get_signal_figs_samples(astroA, 20)
for i, fig_signal in enumerate(fig_signals):
fig_signal_path = os.path.join(output_experiment_path, 'plots', 'signal_basic_samples', 'signal_{}'.format(i))
saving_utils.save_plotly_fig(fig_signal, fig_signal_path)
print('Plotting borders...')
#Borders plot
fig_border = self.get_border_plot(astroA)
saving_utils.save_plotly_fig(fig_border, os.path.join(output_experiment_path, 'plots' , 'borders', 'border'))
print('Plotting behaviour heatmaps...')
#Behaviour heatmaps
fig_heatmap_grids, fig_heatmap_dff_grids = self.get_behaviour_contour_plots(astroA)
heatmap_grid_base_path = os.path.join(output_experiment_path, 'plots', 'behaviour_heatmaps')
for k in fig_heatmap_grids.keys():
saving_utils.save_plotly_fig(fig_heatmap_grids[k], os.path.join(heatmap_grid_base_path, k))
saving_utils.save_plotly_fig(fig_heatmap_dff_grids[k], os.path.join(heatmap_grid_base_path, k + 'dff'))
print('Plotting behaviour activity bar plot...')
behaviour_activity_path = os.path.join(output_experiment_path, 'plots', 'behaviour_activity', 'activity')
fig_behaviour_activity = self.get_behaviour_activity_plot(astroA)
print('BEHAVIOUR ACTIVITY PATH \nn\\n\n\n\n', behaviour_activity_path)
saving_utils.save_plotly_fig(fig_behaviour_activity, behaviour_activity_path, width=1200, height=800)
print('Plotting behaviour event size bar plot...')
behaviour_area_path = os.path.join(output_experiment_path, 'plots', 'behaviour_areas', 'areas')
fig_behaviour_area = self.get_behaviour_area_plot(astroA)
saving_utils.save_plotly_fig(fig_behaviour_area, behaviour_area_path)
print('Plotting behaviour amplitude size bar plot...')
behaviour_amplitude_path = os.path.join(output_experiment_path, 'plots', 'signal_amplitudes', 'amplitudes')
fig_behaviour_amplitude = self.get_behaviour_amplitude_bar_plot(astroA)
saving_utils.save_plotly_fig(fig_behaviour_amplitude, behaviour_amplitude_path)
print('Plotting random samples of signals on different behaviours...')
fig_bk_signals = self.get_signal_bk_figs_samples(astroA, 3)
for bk in fig_bk_signals.keys():
for i, fig_bk_signal in enumerate(fig_bk_signals[bk]):
fig_bk_signal_path = os.path.join(output_experiment_path, 'plots', 'signal_behaviour_samples', 'signal_{}-{}'.format(bk, i))
saving_utils.save_plotly_fig(fig_bk_signal, fig_bk_signal_path)
print('Plotting local signal samples with stick and running...')
stick_run_sample_path = os.path.join(output_experiment_path, 'plots', 'signal_stick_run_samples')
fig_stick_run_samples_l = self.get_stick_run_sample_figs(astroA)
for i, sample_figs in enumerate(fig_stick_run_samples_l):
saving_utils.save_plotly_fig(sample_figs[0], os.path.join(stick_run_sample_path, '{}-running'.format(i)))
saving_utils.save_plotly_fig(sample_figs[1], os.path.join(stick_run_sample_path, '{}-stick'.format(i)))
for j in range(min(10, len(sample_figs[2]))):
saving_utils.save_plotly_fig(sample_figs[2][j], os.path.join(stick_run_sample_path, '{}-signal_{}'.format(i, j)))
bh_l = ['rest', 'stick_rest', 'running', 'stick_run_ind_15']
#Area: None, 60, num_bins = 10
#Duration: None, 30, num_bins = 10
#dff : 0.6, 5, num_bins = 20
print('Comparing behaviour distribution plots for SINGLE...')
for n_bins in [10, 20]:
print('NUM BINS:', n_bins)
for behaviour_l in [bh_l]: #, ['rest', 'running'], ['running', 'stick'], ['rest', 'stick_rest'], ['running', 'stick_run_ind_15']]:
for measure, min_measure, max_measure in [
['area', None, 60],
['dffMax2', 0.6, 5],
['duration', None, 30],
]:
for confidence in [True]:
measure_name = aqua_utils.get_measure_names(measure)
path = os.path.join(output_experiment_path, 'plots', '{}_histogram_bh_comparison_individual'.format(measure_name), 'behaviours-{}-nbins={}-min={}-max={}-conf={}'.format('_'.join(behaviour_l), n_bins, min_measure, max_measure, confidence))
plot, stats_d = self.measure_distribution_bh_compare_plot([astroA], behaviour_l, measure=measure, num_bins=n_bins, min_measure=min_measure, max_measure=max_measure, measure_name=measure_name, confidence=confidence, with_stats=True, mode='MOA')
if measure == 'duration':
plotly_utils.apply_fun_axis_fig(plot, lambda x : x / astroA.fr, axis='x')
saving_utils.save_pth_plt_l_log([plot], [path], axis='x')
saving_utils.save_pth_plt_l_log([plot], [path], axis='y')
#Save results in text file
for i, name in enumerate(stats_d['names']):
#Create folder
data_folder_path = path
try:
os.makedirs(path)
except:
pass
temp_d = {k : stats_d[k][i] for k in stats_d.keys()}
saving_utils.save_csv_dict(temp_d, os.path.join(data_folder_path, '{}.csv'.format(name)), key_order=['names', 'x', 'mean', 'conf_95', 'std'])
np.savetxt(os.path.join(data_folder_path, '{}-data.csv'.format(name)), | np.array(temp_d['data']) | numpy.array |
import configparser
import os
import sys
import time
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from astropy.io import fits
import marg_mcmc as wl
import batman
# Set path to read in get_limb.py from bin_analysis
sys.path.insert(0, '../bin_analysis')
import get_limb as gl
def event_time(date, properties):
"""Program to determine the expected event time
Inputs
date: 1D array of the date of each exposure (MJD)
properties: 1D array containing the last observed eclipse
and the period. (MJD, days)"""
time = properties[1, 0]
time_error = properties[1, 1]
period = properties[4, 0]
period_error = properties[4, 1]
i = 0
while time < date[0]:
i += 1
time += period
epoch_error = | np.sqrt(time_error**2 + (i*period_error)**2) | numpy.sqrt |
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the batch hafnian wrapper function"""
# pylint: disable=no-self-use,redefined-outer-name
from itertools import product
import numpy as np
from scipy.special import eval_hermitenorm, eval_hermite
from thewalrus import hermite_multidimensional, hafnian_batched, hafnian_repeated
def test_hermite_multidimensional_renorm():
""" This tests the renormalized batchhafnian wrapper function to compute photon number statistics for a fixed gaussian state.
"""
B = np.sqrt(0.5) * np.array([[0, 1], [1, 0]]) + 0 * 1j
res = 10
expected = np.diag(0.5 ** (np.arange(0, res) / 2))
array = hermite_multidimensional(-B, res, renorm=True)
assert np.allclose(array, expected)
def test_reduction_to_physicists_polys():
"""Tests that the multidimensional hermite polynomials reduce to the regular physicists' hermite polynomials in the appropriate limit"""
x = np.arange(-1, 1, 0.1)
init = 1
n_max = 5
A = np.ones([init, init], dtype=complex)
vals = np.array(
[hermite_multidimensional(2 * A, n_max, y=np.array([x0], dtype=complex)) for x0 in x]
).T
expected = np.array([eval_hermite(i, x) for i in range(len(vals))])
assert np.allclose(vals, expected)
def test_reduction_to_probabilist_polys():
"""Tests that the multidimensional hermite polynomials reduce to the regular probabilist' hermite polynomials in the appropriate limit"""
x = np.arange(-1, 1, 0.1)
init = 1
n_max = 5
A = np.ones([init, init], dtype=complex)
vals = np.array(
[hermite_multidimensional(A, n_max, y=np.array([x0], dtype=complex)) for x0 in x]
).T
expected = np.array([eval_hermitenorm(i, x) for i in range(len(vals))])
assert np.allclose(vals, expected)
def test_hafnian_batched():
"""Test hafnian_batched against hafnian_repeated for a random symmetric matrix"""
n_modes = 4
A = np.random.rand(n_modes, n_modes) + 1j * np.random.rand(n_modes, n_modes)
A += A.T
n_photon = 5
v1 = np.array([hafnian_repeated(A, q) for q in product(np.arange(n_photon), repeat=n_modes)])
assert np.allclose(hafnian_batched(A, n_photon, make_tensor=False), v1)
def test_hafnian_batched_loops():
"""Test hafnian_batched with loops against hafnian_repeated with loops for a random symmetric matrix
and a random vector of loops
"""
n_modes = 4
A = np.random.rand(n_modes, n_modes) + 1j * np.random.rand(n_modes, n_modes)
A += A.T
mu = np.random.rand(n_modes) + 1j * np.random.rand(n_modes)
n_photon = 5
v1 = np.array(
[
hafnian_repeated(A, q, mu=mu, loop=True)
for q in product(np.arange(n_photon), repeat=n_modes)
]
)
expected = hafnian_batched(A, n_photon, mu=mu, make_tensor=False)
assert np.allclose(expected, v1)
def test_hafnian_batched_loops_no_edges():
"""Test hafnian_batched with loops against hafnian_repeated with loops for a random symmetric matrix
and a random vector of loops
"""
n_modes = 4
A = np.zeros([n_modes, n_modes], dtype=complex)
mu = np.random.rand(n_modes) + 1j * np.random.rand(n_modes)
n_photon = 5
v1 = np.array(
[
hafnian_repeated(A, q, mu=mu, loop=True)
for q in product(np.arange(n_photon), repeat=n_modes)
]
)
expected = hafnian_batched(A, n_photon, mu=mu, make_tensor=False)
assert np.allclose(expected, v1)
def test_hafnian_batched_zero_loops_no_edges():
"""Test hafnian_batched with loops against hafnian_repeated with loops for a the zero matrix
and a loops
"""
n_modes = 4
A = np.zeros([n_modes, n_modes], dtype=complex)
n_photon = 5
v1 = np.array(
[hafnian_repeated(A, q, loop=True) for q in product(np.arange(n_photon), repeat=n_modes)]
)
expected = hafnian_batched(A, n_photon, make_tensor=False)
assert | np.allclose(expected, v1) | numpy.allclose |
#!/usr/bin/env python
import roslib
import sys
import rospy
import cv2
import math
import imutils
import statistics
import numpy as np
from std_msgs.msg import String
from sensor_msgs.msg import Image
from std_msgs.msg import Float64MultiArray, Float64
from cv_bridge import CvBridge, CvBridgeError
from scipy.spatial import distance as dist
class image_converter:
# Defines publisher and subscriber
def __init__(self):
# initialize the node named image_processing
rospy.init_node('image_processing', anonymous=True)
# initialize a publisher to send images from camera1 to a topic named image_topic1
self.image_pub1 = rospy.Publisher("image_topic1", Image, queue_size=1)
self.image_pub2 = rospy.Publisher("image_topic2", Image, queue_size=1)
#Initialize a publisher to send joints angular posiion toa topic called joints_pos
self.joints_pub=rospy.Publisher("joints_pos",Float64MultiArray,queue_size=10)
#initialize a publisher for the four angles
self.robot_joint1_pub = rospy.Publisher("/robot/joint1_position_controller/command", Float64, queue_size=10)
self.robot_joint2_pub = rospy.Publisher("/robot/joint2_position_controller/command", Float64, queue_size=10)
self.robot_joint3_pub = rospy.Publisher("/robot/joint3_position_controller/command", Float64, queue_size=10)
self.robot_joint4_pub = rospy.Publisher("/robot/joint4_position_controller/command", Float64, queue_size=10)
# initialize a subscriber to recieve messages rom a topic named /robot/camera1/image_raw and use callback function to recieve data
self.image_sub1 = rospy.Subscriber("/camera1/robot/image_raw", Image, self.callback1)
self.image_sub2 = rospy.Subscriber("/camera2/robot/image_raw", Image, self.callback2)
#intialize variables to store
self.time_trajectory = rospy.get_time()
self.red = np.array([0.0, 0.0, 0.0, 0.0], dtype='float64')
self.green = np.array([0.0, 0.0, 0.0, 0.0], dtype='float64')
self.p2m = np.array([0.0], dtype='float64')
self.ja4 = np.array([0.0], dtype='float64')
# initialize errors
self.time_previous_step = np.array([rospy.get_time()], dtype='float64')
self.time_previous_step2 = np.array([rospy.get_time()], dtype='float64')
# initialize error and derivative of error for trajectory tracking
# initialize the bridge between openCV and ROS
self.bridge = CvBridge()
# Recieve data from camera 1, process it, and publish
def callback1(self, data):
# Recieve the image
try:
self.image1 = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
def callback2(self, data):
# Recieve the image
try:
self.image2 = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
#Blob detection-------------------------------------------------------
def detect_red(self,image1, image2):
#smooth the image and reduce noise
image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0)
#convert colours to HSV
hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV)
#set the HSV values for red
lower_red1 = np.array([0, 200, 0])
higher_red1 = np.array([0, 255, 255])
#Apply threshold to seperate the blob from rest of the robot
red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1)
res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1)
#convert to grey scale
red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY)
#Detect the edges
canny_edge1 = cv2.Canny(red_s_gray1, 30, 70)
#Find the contours
contours1, hierarchy1 = cv2.findContours(canny_edge1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
#Find the center coordinates and the radius of the blob
(x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0])
#convert to integers
cy, cz1 = (int(x1), int(y1))
radius1 = int(radius1)
#similar to above, but for image 2
image_gau_blur2 = cv2.GaussianBlur(image2, (1, 1), 0)
hsv2 = cv2.cvtColor(image_gau_blur2, cv2.COLOR_BGR2HSV)
lower_red2 = np.array([0, 200, 0])
higher_red2 = np.array([0, 255, 255])
red_range2 = cv2.inRange(hsv2, lower_red2, higher_red2)
res_red2 = cv2.bitwise_and(image_gau_blur2, image_gau_blur2, mask=red_range2)
red_s_gray2 = cv2.cvtColor(res_red2, cv2.COLOR_BGR2GRAY)
canny_edge2 = cv2.Canny(red_s_gray2, 30, 70)
contours2, hierarchy2 = cv2.findContours(canny_edge2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x2, y2), radius2 = cv2.minEnclosingCircle(contours2[0])
cx, cz2 = (int(x2), int(y2))
radius2 = int(radius2)
return np.array([cx, cy, cz1, cz2])
def detect_blue(self,image1, image2):
#similar approach as detect_blue but different colour threshold
image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0)
hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV)
lower_red1 = np.array([70, 0, 0])
higher_red1 = np.array([255, 255, 255])
red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1)
res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1)
red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY)
canny_edge1 = cv2.Canny(red_s_gray1, 30, 70)
contours1, hierarchy1 = cv2.findContours(canny_edge1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0])
cy, cz1 = (int(x1), int(y1))
radius1 = int(radius1)
image_gau_blur2 = cv2.GaussianBlur(image2, (1, 1), 0)
hsv2 = cv2.cvtColor(image_gau_blur2, cv2.COLOR_BGR2HSV)
lower_red2 = np.array([70, 0, 0])
higher_red2 = np.array([255, 255, 255])
red_range2 = cv2.inRange(hsv2, lower_red2, higher_red2)
res_red2 = cv2.bitwise_and(image_gau_blur2, image_gau_blur2, mask=red_range2)
red_s_gray2 = cv2.cvtColor(res_red2, cv2.COLOR_BGR2GRAY)
canny_edge2 = cv2.Canny(red_s_gray2, 30, 70)
contours2, hierarchy2 = cv2.findContours(canny_edge2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x2, y2), radius2 = cv2.minEnclosingCircle(contours2[0])
cx, cz2 = (int(x2), int(y2))
radius2 = int(radius2)
return np.array([cx, cy, cz1, cz2])
def detect_green(self,image1, image2):
#similar approach as detect_blue but different colour threshold
image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0)
hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV)
lower_red1 = np.array([55, 0, 0])
higher_red1 = np.array([100, 255, 255])
red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1)
res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1)
red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY)
canny_edge1 = cv2.Canny(red_s_gray1, 30, 70)
contours1, hierarchy1 = cv2.findContours(canny_edge1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0])
cy, cz1 = (int(x1), int(y1))
radius1 = int(radius1)
image_gau_blur2 = cv2.GaussianBlur(image2, (1, 1), 0)
hsv2 = cv2.cvtColor(image_gau_blur2, cv2.COLOR_BGR2HSV)
lower_red2 = np.array([55, 0, 0])
higher_red2 = np.array([100, 255, 255])
red_range2 = cv2.inRange(hsv2, lower_red2, higher_red2)
res_red2 = cv2.bitwise_and(image_gau_blur2, image_gau_blur2, mask=red_range2)
red_s_gray2 = cv2.cvtColor(res_red2, cv2.COLOR_BGR2GRAY)
canny_edge2 = cv2.Canny(red_s_gray2, 30, 70)
contours2, hierarchy2 = cv2.findContours(canny_edge2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x2, y2), radius2 = cv2.minEnclosingCircle(contours2[0])
cx, cz2 = (int(x2), int(y2))
radius2 = int(radius2)
return np.array([cx, cy, cz1, cz2])
def detect_yellow(self,image1, image2):
#similar approach as detect_blue but different colour threshold
image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0)
hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV)
lower_red1 = np.array([16, 244, 0])
higher_red1 = np.array([51, 255, 255])
red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1)
res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1)
red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY)
canny_edge1 = cv2.Canny(red_s_gray1, 30, 70)
contours1, hierarchy1 = cv2.findContours(canny_edge1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0])
cy, cz1 = (int(x1), int(y1))
radius1 = int(radius1)
image_gau_blur2 = cv2.GaussianBlur(image2, (1, 1), 0)
hsv2 = cv2.cvtColor(image_gau_blur2, cv2.COLOR_BGR2HSV)
lower_red2 = np.array([16, 244, 0])
higher_red2 = np.array([51, 255, 255])
red_range2 = cv2.inRange(hsv2, lower_red2, higher_red2)
res_red2 = cv2.bitwise_and(image_gau_blur2, image_gau_blur2, mask=red_range2)
red_s_gray2 = cv2.cvtColor(res_red2, cv2.COLOR_BGR2GRAY)
canny_edge2 = cv2.Canny(red_s_gray2, 30, 70)
contours2, hierarchy2 = cv2.findContours(canny_edge2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x2, y2), radius2 = cv2.minEnclosingCircle(contours2[0])
cx, cz2 = (int(x2), int(y2))
radius2 = int(radius2)
return | np.array([cx, cy, cz1, cz2]) | numpy.array |
#!/usr/bin/env python
import sys
import numpy as np
from netCDF4 import Dataset
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
def get_subset_indices(min_lat, max_lat, min_lon, max_lon, lats, lons):
# These are the indices of the closest lat/lon values to
# (min_lat, max_lat, min_lon, max_lon)
indices = []
indices.append(int((np.abs(np.array(lats)-min_lat)).argmin()))
indices.append(int((np.abs(np.array(lats)-max_lat)).argmin()))
indices.append(int((np.abs(np.array(lons)-min_lon)).argmin()))
indices.append(int((np.abs(np.array(lons)-max_lon)).argmin()))
# return [min_lat_index, max_lat_index, min_lon_index, max_lon_index]
return indices
def grab_ETOPO1_subset(file_name, min_lat, max_lat, min_lon, max_lon):
ETOPO1 = Dataset(file_name, 'r')
lons = ETOPO1.variables["x"][:]
lats = ETOPO1.variables["y"][:]
# Grab indices for max/min lat/lon bounds
minLat, maxLat, minLon, maxLon = get_subset_indices(min_lat, max_lat, min_lon, max_lon, lats, lons)
bathy = ETOPO1.variables["z"][minLat:maxLat,minLon:maxLon]
lons,lats = np.meshgrid(lons[minLon:maxLon],lats[minLat:maxLat])
print("== Selected {} points ({}x{}) from {}".format(bathy.size,bathy.shape[1],bathy.shape[0],file_name))
print("---- Lats: {} to {}, Lons: {} to {}".format(min_lat, max_lat, min_lon, max_lon))
return lats,lons,bathy
def write_grid(out_file_name, lats, lons, bathy):
outfile = open(out_file_name, 'w')
dlon = abs(np.unique(lons)[1] - np.unique(lons)[0])
dlat = abs(np.unique(lats)[1] - np.unique(lats)[0])
outfile.write("# N_lats\n")
outfile.write("# N_lons\n")
outfile.write("# dlat\n")
outfile.write("# dlon\n")
outfile.write("{} {} {} {}\n".format(lons.shape[0], lons.shape[1], dlat, dlon))
outfile.write("##################################\n")
# Write vertices from top left (Northwest) to bottom right (Southeast)
for i in list(reversed(range(lons.shape[0]))):
for j in range(lons.shape[1]):
outfile.write("{}\t{}\t{}\n".format(lats[i][j],lons[i][j],bathy[i][j]))
outfile.close()
print("output written to {}".format(out_file_name))
def grab_ETOPO1_subset_interpolated(file_name, min_lat, max_lat, min_lon, max_lon, factor=3):
debug = False
ETOPO1 = Dataset(file_name, 'r')
#adjusting lons to be able to cross the dateline
lons = ETOPO1.variables["x"][:]
lons2 = | np.copy(lons) | numpy.copy |
import math
import os
import re
from multiprocessing import Pool
from collections import defaultdict
import tables
import numpy as np
from astropy.io import fits
from astropy.table import Table
from beast.observationmodel.noisemodel.generic_noisemodel import get_noisemodelcat
from beast.physicsmodel.grid import SEDGrid
# from beast.external import eztables
from beast.fitting.fit import save_pdf1d
from beast.fitting.fit_metrics import percentile
from beast.tools import read_beast_data
def uniform_slices(num_points, num_slices):
q = num_points // num_slices
r = num_points % num_slices
slices = []
for i in range(num_slices):
if i < r:
start = i * (q + 1)
stop = start + q + 1
# After the remainder has been taken care of, do strides of q
else:
start = r * (q + 1) + (i - r) * q
stop = start + q
slices.append(slice(start, stop))
return slices
def split_grid(grid_fname, num_subgrids, overwrite=False):
"""
Splits a spectral or sed grid (they are the same class actually)
according to grid point index (so basically, arbitrarily).
Parameters
----------
grid_fname: string
file name of the existing grid to be split up
num_subgrids: integer
the number of parts the grid should be split into
overwrite: bool
any subgrids that already exist will be deleted if set to True.
If set to False, skip over any grids that are already there.
Returns
-------
list of string
the names of the newly created subgrid files
"""
g = SEDGrid(grid_fname, backend="disk")
fnames = []
num_seds = len(g.seds)
slices = uniform_slices(num_seds, num_subgrids)
for i, slc in enumerate(slices):
subgrid_fname = grid_fname.replace(".hd5", "sub{}.hd5".format(i))
fnames.append(subgrid_fname)
if os.path.isfile(subgrid_fname):
if overwrite:
os.remove(subgrid_fname)
else:
print("{} already exists. Skipping.".format(subgrid_fname))
continue
print("constructing subgrid " + str(i))
# Load a slice as a SEDGrid object
sub_g = SEDGrid(
g.lamb[:], seds=g.seds[slc], grid=Table(g.grid[slc]), backend="memory",
)
if g.filters is not None:
sub_g.header["filters"] = " ".join(g.filters)
# Save it to a new file
sub_g.write(subgrid_fname, append=False)
return fnames
def merge_grids(seds_fname, sub_names):
"""
Merges a set of grids into one big grid. The grids need to have the
same columns
Parameters
----------
seds_fname: string
path for the output file
sub_names: list of strings
paths for the input grids
"""
if not os.path.isfile(seds_fname):
for n in sub_names:
print("Appending {} to {}".format(n, seds_fname))
g = SEDGrid(n)
g.write(seds_fname, append=True)
else:
print("{} already exists".format(seds_fname))
def subgrid_info(grid_fname, noise_fname=None):
"""
Generates a list of mins and maxes of all the quantities in the given grid
Parameters
----------
grid_fname: string
path to a beast grid file (hd5 format)
noise_fname: string
Path to the noise model file for the given grid (hd5 format)
(optional). If this is given, the mins/maxes for the full model
fluxes are added too, under the name 'log'+filter+'_wd_bias'
(needs to conform to the name used in fit.py).
Returns
-------
info_dict: dictionary
{name of quantity [string]: {'min': min, 'max': max, 'unique': unique values}}
"""
# Use the disk backend to minimize the memory usage
sedgrid = SEDGrid(grid_fname, backend="disk")
seds = sedgrid.seds
info_dict = {}
qnames = sedgrid.keys()
for q in qnames:
qvals = sedgrid[q]
qmin = np.amin(qvals)
qmax = np.amax(qvals)
qunique = np.unique(qvals)
info_dict[q] = {}
info_dict[q]["min"] = qmin
info_dict[q]["max"] = qmax
info_dict[q]["unique"] = qunique
if noise_fname is not None:
noisemodel = get_noisemodelcat(noise_fname)
# The following is also in fit.py, so we're kind of doing double
# work here, but it's necessary if we want to know the proper
# ranges for these values.
full_model_flux = seds[:] + noisemodel["bias"]
logtempseds = np.array(full_model_flux)
full_model_flux = (
np.sign(logtempseds)
* np.log1p(np.abs(logtempseds * math.log(10)))
/ math.log(10)
)
filters = sedgrid.filters
for i, f in enumerate(filters):
f_fluxes = full_model_flux[:, i]
# Be sure to cut out the -100's in the calculation of the minimum
qmin = | np.amin(f_fluxes[f_fluxes > -99.99]) | numpy.amin |
""" Tests for the model. """
import unittest
import sys
from numpy.testing import assert_array_almost_equal, assert_array_equal
import numpy as np
from numpy import random
from pyhacrf import Hacrf
from pyhacrf.state_machine import GeneralStateMachine, DefaultStateMachine
from pyhacrf.pyhacrf import _GeneralModel, _AdjacentModel
from pyhacrf import StringPairFeatureExtractor
TEST_PRECISION = 3
class TestHacrf(unittest.TestCase):
def test_initialize_parameters(self):
start_states = [0]
transitions = [(0, 0, (1, 1)),
(0, 1, (0, 1)),
(0, 0, (1, 0))]
states_to_classes = {0: 'a'}
state_machine = GeneralStateMachine(start_states=start_states,
transitions=transitions,
states_to_classes=states_to_classes)
n_features = 3
actual_parameters = Hacrf._initialize_parameters(state_machine, n_features)
expected_parameter_shape = (5, 3)
self.assertEqual(actual_parameters.shape, expected_parameter_shape)
def test_fit_predict(self):
incorrect = ['helloooo', 'freshh', 'ffb', 'h0me', 'wonderin', 'relaionship', 'hubby', 'krazii', 'mite', 'tropic']
correct = ['hello', 'fresh', 'facebook', 'home', 'wondering', 'relationship', 'husband', 'crazy', 'might', 'topic']
training = zip(incorrect, correct)
fe = StringPairFeatureExtractor(match=True, numeric=True)
xf = fe.fit_transform(training)
model = Hacrf()
model.fit(xf, [0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
expected_parameters = np.array([[-10.76945326, 144.03414923, 0.],
[31.84369748, -106.41885651, 0.],
[-52.08919467, 4.56943665, 0.],
[31.01495044, -13.0593297, 0.],
[49.77302218, -6.42566204, 0.],
[-28.69877796, 24.47127009, 0.],
[-85.34524911, 21.87370646, 0.],
[106.41949333, 6.18587125, 0.]])
print(model.parameters)
assert_array_almost_equal(model.parameters, expected_parameters,
decimal=TEST_PRECISION)
expected_probas = np.array([[1.00000000e+000, 3.51235685e-039],
[1.00000000e+000, 4.79716208e-039],
[1.00000000e+000, 2.82744641e-139],
[1.00000000e+000, 6.49580729e-012],
[9.99933798e-001, 6.62022561e-005],
[8.78935957e-005, 9.99912106e-001],
[4.84538335e-009, 9.99999995e-001],
[1.25170233e-250, 1.00000000e+000],
[2.46673086e-010, 1.00000000e+000],
[1.03521293e-033, 1.00000000e+000]])
actual_predict_probas = model.predict_proba(xf)
print(actual_predict_probas)
assert_array_almost_equal(actual_predict_probas, expected_probas,
decimal=TEST_PRECISION)
expected_predictions = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
actual_predictions = model.predict(xf)
assert_array_almost_equal(actual_predictions, expected_predictions,
decimal=TEST_PRECISION)
def test_fit_predict_regularized(self):
incorrect = ['helloooo', 'freshh', 'ffb', 'h0me', 'wonderin', 'relaionship', 'hubby', 'krazii', 'mite', 'tropic']
correct = ['hello', 'fresh', 'facebook', 'home', 'wondering', 'relationship', 'husband', 'crazy', 'might', 'topic']
training = zip(incorrect, correct)
fe = StringPairFeatureExtractor(match=True, numeric=True)
xf = fe.fit_transform(training)
model = Hacrf(l2_regularization=10.0)
model.fit(xf, [0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
print(model.parameters)
expected_parameters = np.array([[-0.0569188, 0.07413339, 0.],
[0.00187709, -0.06377866, 0.],
[-0.01908823, 0.00586189, 0.],
[0.01721114, -0.00636556, 0.],
[0.01578279, 0.0078614, 0.],
[-0.0139057, -0.00862948, 0.],
[-0.00623241, 0.02937325, 0.],
[0.00810951, -0.01774676, 0.]])
assert_array_almost_equal(model.parameters, expected_parameters,
decimal=TEST_PRECISION)
expected_probas = np.array([[0.5227226, 0.4772774],
[0.52568993, 0.47431007],
[0.4547091, 0.5452909],
[0.51179222, 0.48820778],
[0.46347576, 0.53652424],
[0.45710098, 0.54289902],
[0.46159657, 0.53840343],
[0.42997978, 0.57002022],
[0.47419724, 0.52580276],
[0.50797852, 0.49202148]])
actual_predict_probas = model.predict_proba(xf)
print(actual_predict_probas)
assert_array_almost_equal(actual_predict_probas, expected_probas,
decimal=TEST_PRECISION)
expected_predictions = np.array([0, 0, 1, 0, 1, 1, 1, 1, 1, 0])
actual_predictions = model.predict(xf)
assert_array_almost_equal(actual_predictions, expected_predictions,
decimal=TEST_PRECISION)
class TestGeneralModel(unittest.TestCase):
def test_build_lattice(self):
n_states = 4 # Because 3 is the max
start_states = [0, 1]
transitions = [(0, 0, (1, 1)),
(0, 1, (0, 1)),
(0, 0, (1, 0)),
(0, 3, lambda i, j, k: (0, 2))]
states_to_classes = {0: 0, 1: 1, 3: 3}
state_machine = GeneralStateMachine(start_states, transitions, states_to_classes)
x = np.zeros((2, 3, 9))
# # ________
# 1. . . # 1 0 - 10 - 31
# # | /_______
# 0. . . # 0 10 -- 1 3
# 0 1 2 # 0 1 2
#
# 1(0, 1), 3(0, 2), 1(1, 1), 1(0, 0) should be pruned because they represent partial alignments.
# Only nodes that are reachable by stepping back from (1, 2) must be included in the lattice.
actual_lattice = state_machine.build_lattice(x)
expected_lattice = np.array([(0, 0, 0, 1, 0, 0, 2 + n_states),
(0, 0, 0, 1, 1, 0, 0 + n_states),
(1, 0, 0, 1, 2, 3, 3 + n_states),
(1, 1, 0, 1, 2, 1, 1 + n_states)])
assert_array_equal(actual_lattice, expected_lattice)
def test_build_lattice_jumps(self):
n_states = 2 # Because 1 is the max
start_states = [0, 1]
transitions = [(0, 0, (1, 1)),
(0, 1, (0, 2)),
(0, 0, (1, 0))]
states_to_classes = {0: 0, 1: 1}
state_machine = GeneralStateMachine(start_states, transitions, states_to_classes)
x = np.zeros((2, 3, 9))
# # ________
# 1. . . # 1 0 . 1
# # | _______
# 0. . . # 0 10 / . 1
# 0 1 2 # 0 1 2
#
# 1(0, 2) should be pruned because they represent partial alignments.
# Only nodes that are reachable by stepping back from (1, 2) must be included in the lattice.
actual_lattice = state_machine.build_lattice(x)
expected_lattice = np.array([(0, 0, 0, 1, 0, 0, 2 + n_states),
(1, 0, 0, 1, 2, 1, 1 + n_states)])
assert_array_equal(actual_lattice, expected_lattice)
def test_forward_single(self):
start_states = [0, 1]
transitions = [(0, 0, (1, 1)),
(0, 1, (0, 1)),
(0, 0, (1, 0)),
(0, 2, lambda i, j, k: (0, 2))]
states_to_classes = {0: 'a', 1: 'a', 2: 'b'} # Dummy
state_machine = GeneralStateMachine(start_states, transitions, states_to_classes)
parameters = np.array(range(-7, 7), dtype='float64').reshape((7, 2))
# parameters =
# 0([[-7, -6],
# 1 [-5, -4],
# 2 [-3, -2],
# 3 [-1, 0],
# 4 [ 1, 2],
# 5 [ 3, 4],
# 6 [ 5, 6]])
x = np.array([[[0, 1],
[1, 0],
[2, 1]],
[[0, 1],
[1, 0],
[1, 0]]], dtype=np.float64)
y = 'a'
# Expected lattice:
# # ________
# 1. . . # 1 0 __0 - 21
# # | /
# 0. . . # 0 0
# 0 1 2 # 0 1 2
expected_alpha = {
(0, 0, 0): np.exp(-6),
(0, 0, 0, 1, 0, 0, 5): np.exp(-6) * np.exp(4),
(0, 0, 0, 1, 1, 0, 3): np.exp(-6) * np.exp(-1),
(1, 0, 0): np.exp(-6) * np.exp(4) * np.exp(-6),
(1, 0, 0, 1, 2, 2, 6): np.exp(-6) * np.exp(4) * np.exp(-6) * np.exp(5),
(1, 1, 0): np.exp(-6) * np.exp(-1) * np.exp(-7),
(1, 1, 0, 1, 2, 1, 4): np.exp(-6) * np.exp(-1) * np.exp(-7) * np.exp(1),
(1, 2, 1): np.exp(-6) * np.exp(-1) * np.exp(-7) * np.exp(1) * np.exp(-5),
(1, 2, 2): np.exp(-6) * np.exp(4) * np.exp(-6) * np.exp(5) * np.exp(-3)
}
expected_alpha = {k: np.emath.log(v) for k, v in expected_alpha.items()}
test_model = _GeneralModel(state_machine, x, y)
x_dot_parameters = np.dot(x, parameters.T) # Pre-compute the dot product
actual_alpha = test_model._forward(x_dot_parameters)
actual_alpha = {k: v for k, v in actual_alpha.items()
if not np.isneginf(v)}
print(actual_alpha)
self.assertEqual(len(actual_alpha), len(expected_alpha))
print
for key in sorted(expected_alpha.keys()):
print(key, (expected_alpha[key]), (actual_alpha[key]))
self.assertEqual(actual_alpha[key], expected_alpha[key])
class TestAdjacentModel(unittest.TestCase):
def test_forward_connected(self):
classes = ['a', 'b']
parameters = np.array(range(-8, 8), dtype=np.float64).reshape((8, 2))
# parameters =
#0([[-8, -7],
#1 [-6, -5],
#2 [-4, -3],
#3 [-2, -1],
#4 [ 0, 1],
#5 [ 2, 3],
#6 [ 4, 5],
#7 [ 6, 7]])
x = np.array([[[0, 1],
[2, 1]],
[[0, 1],
[1, 0]]], dtype=np.float64)
y = 'a'
expected_alpha = {
(0, 0, 0): np.exp(-7),
(0, 0, 0, 0, 1, 0, 4): np.exp(-7) * np.exp(1),
(0, 0, 0, 1, 0, 0, 6): np.exp(-7) * np.exp(5),
(0, 0, 0, 1, 1, 0, 2): np.exp(-7) * np.exp(-4),
(0, 0, 1): np.exp(-5),
(0, 0, 1, 0, 1, 1, 5): np.exp(-5) * np.exp(7),
(0, 0, 1, 1, 0, 1, 7): np.exp(-5) * np.exp(7),
(0, 0, 1, 1, 1, 1, 3): np.exp(-5) * np.exp(-2),
(0, 1, 0): np.exp(-7) * np.exp(1) * np.exp(-23),
(0, 1, 0, 1, 1, 0, 6): np.exp(-7) * np.exp(1) * np.exp(-23) * np.exp(4),
(0, 1, 1): np.exp(-5) * np.exp(7) * np.exp(-17),
(0, 1, 1, 1, 1, 1, 7): np.exp(-5) * np.exp(7) * np.exp(-17) * np.exp(6),
(1, 0, 0): np.exp(-7) * np.exp(5) * np.exp(-7),
(1, 0, 0, 1, 1, 0, 4): np.exp(-7) * np.exp(5) * np.exp(-7) * np.exp(0),
(1, 0, 1): np.exp(-5) * np.exp(7) * np.exp(-5),
(1, 0, 1, 1, 1, 1, 5): np.exp(-5) * np.exp(7) * np.exp(-5) * np.exp(2),
(1, 1, 0): (np.exp(-11) + np.exp(-25) + np.exp(-9)) * np.exp(-8),
(1, 1, 1): ( | np.exp(-1) | numpy.exp |
'''
A script to compute the SAPT0 interaction energy without the the Single-Exchange Approximation
Relevant equations can be found in the following papers:
<NAME>.; <NAME>.; <NAME>. Int. J. Quantum Chem. 1976, 10, 281-297.
Schaffer, R.; <NAME>. Theor. Chem. Acc. 2012, 131, 1235-1244.
Schaffer, R.; Jansen, G. Mol. Phys. 2013, 111, 2570-2584.
'''
__authors__ = "<NAME>"
__credits__ = ["<NAME>"]
__copyright__ = "(c) 2014-2017, The Psi4NumPy Developers"
__license__ = "BSD-3-Clause"
__date__ = "2018-01-09"
import psi4
import numpy as np
np.set_printoptions(precision=5, linewidth=200, threshold=2000, suppress=True)
from helper_SAPT import *
# Set Psi4 & NumPy Memory Options
psi4.set_memory('2 GB')
psi4.core.set_output_file('sapt0_no_S2.dat', False)
numpy_memory = 2
# Set molecule to dimer
dimer = psi4.geometry("""
O -0.066999140 0.000000000 1.494354740
H 0.815734270 0.000000000 1.865866390
H 0.068855100 0.000000000 0.539142770
--
O 0.062547750 0.000000000 -1.422632080
H -0.406965400 -0.760178410 -1.771744500
H -0.406965400 0.760178410 -1.771744500
symmetry c1
""")
psi4.set_options({'basis': 'jun-cc-pVDZ', 'e_convergence': 1e-8, 'd_convergence': 1e-8})
sapt = helper_SAPT(dimer, memory=8)
### Overlap Matrix and Inverse
S_a = np.concatenate((sapt.s('aa'), sapt.s('ab')), axis=1)
S_b = np.concatenate((sapt.s('ba'), sapt.s('bb')), axis=1)
S = np.concatenate((S_a, S_b), axis=0)
# S_{AA} S_{AB}
# S_{BA} S_{BB}
D = np.linalg.inv(S)
D_aa = D[:sapt.ndocc_A, :sapt.ndocc_A]
D_ab = D[:sapt.ndocc_A, sapt.ndocc_A:sapt.ndocc_A + sapt.ndocc_B]
D_ba = D[sapt.ndocc_A:sapt.ndocc_A + sapt.ndocc_B, :sapt.ndocc_A]
D_bb = D[sapt.ndocc_A:, sapt.ndocc_A:]
### E10 Electrostatics
Elst10 = 4 * np.einsum('abab', sapt.vt('abab'))
### Complete E10
v_abaa = sapt.v('abaa')
v_abab = sapt.v('abab')
v_abba = sapt.v('abba')
v_abbb = sapt.v('abbb')
# E10 Full
e1_full = sapt.nuc_rep # Nuclear Repulsion
e1_full += 2 * (np.einsum('aA,Aa->', sapt.potential('aa', 'B'), D_aa) +
np.einsum('ab,ba->', sapt.potential('ab', 'B'), D_ba)) # B potential
e1_full += 2 * (np.einsum('bB,Bb->', sapt.potential('bb', 'A'), D_bb) +
np.einsum('ba,ab->', sapt.potential('ba', 'A'), D_ab)) # A potential
e1_full += 4 * np.einsum('ijkl,ki,lj->', v_abaa, D_aa, D_ab) # Two electron part
e1_full += 4 * np.einsum('ijkl,ki,lj->', v_abab, D_aa, D_bb)
e1_full += 4 * np.einsum('ijkl,ki,lj->', v_abba, D_ba, D_ab)
e1_full += 4 * np.einsum('ijkl,ki,lj->', v_abbb, D_ba, D_bb)
e1_full += -2 * np.einsum('ijlk,ki,lj->', v_abaa, D_aa, D_ab)
e1_full += -2 * np.einsum('ijlk,ki,lj->', v_abba, D_aa, D_bb)
e1_full += -2 * np.einsum('ijlk,ki,lj->', v_abab, D_ba, D_ab)
e1_full += -2 * np.einsum('ijlk,ki,lj->', v_abbb, D_ba, D_bb)
# E10 Exchange
Exch10 = e1_full - Elst10
### E20 Induction and Exchange Induction
# E20 Induction
CPHF_ra, Ind20_ba = sapt.chf('B', ind=True)
CPHF_sb, Ind20_ab = sapt.chf('A', ind=True)
Ind20r = Ind20_ba + Ind20_ab
# E20 Induction Full
T_ar = np.einsum('ij,jk->ik', D_ab, sapt.s('br'))
T_br = np.einsum('ij,jk->ik', D_bb, sapt.s('br'))
T_as = np.einsum('ij,jk->ik', D_aa, sapt.s('as'))
T_bs = np.einsum('ij,jk->ik', D_ba, sapt.s('as'))
B_aa = sapt.potential('aa', 'B')
B_ab = sapt.potential('ab', 'B')
A_ba = sapt.potential('ba', 'A')
A_bb = sapt.potential('bb', 'A')
B_T_ar = np.einsum('ij,jk->ik', B_aa, T_ar) + np.einsum('ij,jk->ik', B_ab, T_br)
B_T_as = np.einsum('ij,jk->ik', B_aa, T_as) + np.einsum('ij,jk->ik', B_ab, T_bs)
A_T_br = np.einsum('ij,jk->ik', A_ba, T_ar) + np.einsum('ij,jk->ik', A_bb, T_br)
A_T_bs = np.einsum('ij,jk->ik', A_ba, T_as) + np.einsum('ij,jk->ik', A_bb, T_bs)
Bt_ar = sapt.potential('ar', 'B') - B_T_ar
Bt_as = sapt.potential('as', 'B') - B_T_as
At_br = sapt.potential('br', 'A') - A_T_br
At_bs = sapt.potential('bs', 'A') - A_T_bs
v_abaa = sapt.v('abaa')
v_abab = sapt.v('abab')
v_abba = sapt.v('abba')
v_abbb = sapt.v('abbb')
v_abra = sapt.v('abra')
v_abar = sapt.v('abar')
v_abrb = sapt.v('abrb')
v_abbr = sapt.v('abbr')
v_absa = sapt.v('absa')
v_abas = sapt.v('abas')
v_absb = sapt.v('absb')
v_abbs = sapt.v('abbs')
v_T_abra = np.einsum('ijkl,ka->ijal', v_abaa, T_ar) + np.einsum('ijkl,ka->ijal', v_abba, T_br)
v_T_abrb = np.einsum('ijkl,ka->ijal', v_abab, T_ar) + np.einsum('ijkl,ka->ijal', v_abbb, T_br)
v_T_abar = np.einsum('ijkl,la->ijka', v_abaa, T_ar) + np.einsum('ijkl,la->ijka', v_abab, T_br)
v_T_abbr = np.einsum('ijkl,la->ijka', v_abba, T_ar) + np.einsum('ijkl,la->ijka', v_abbb, T_br)
v_T_absa = np.einsum('ijkl,ka->ijal', v_abaa, T_as) + np.einsum('ijkl,ka->ijal', v_abba, T_bs)
v_T_absb = np.einsum('ijkl,ka->ijal', v_abab, T_as) + np.einsum('ijkl,ka->ijal', v_abbb, T_bs)
v_T_abas = np.einsum('ijkl,la->ijka', v_abaa, T_as) + np.einsum('ijkl,la->ijka', v_abab, T_bs)
v_T_abbs = np.einsum('ijkl,la->ijka', v_abba, T_as) + np.einsum('ijkl,la->ijka', v_abbb, T_bs)
vt_abra = v_abra - v_T_abra
vt_abar = v_abar - v_T_abar
vt_abrb = v_abrb - v_T_abrb
vt_abbr = v_abbr - v_T_abbr
vt_absa = v_absa - v_T_absa
vt_abas = v_abas - v_T_abas
vt_absb = v_absb - v_T_absb
vt_abbs = v_abbs - v_T_abbs
O_ar = 2 * np.einsum('kj,ik->ij', Bt_ar, D_aa) + 2 * np.einsum('kj,ik->ij', At_br, D_ab)
O_ar += 4 * np.einsum('ijkl,mi,lj->mk', vt_abra, D_aa, D_ab)
O_ar += 4 * np.einsum('ijkl,mi,lj->mk', vt_abrb, D_aa, D_bb)
O_ar -= 2 * np.einsum('ijkl,mj,li->mk', vt_abra, D_ab, D_aa)
O_ar -= 2 * np.einsum('ijkl,mj,li->mk', vt_abrb, D_ab, D_ba)
O_ar -= 2 * np.einsum('ijkl,mi,kj->ml', vt_abar, D_aa, D_ab)
O_ar -= 2 * np.einsum('ijkl,mi,kj->ml', vt_abbr, D_aa, D_bb)
O_ar += 4 * np.einsum('ijkl,mj,ki->ml', vt_abar, D_ab, D_aa)
O_ar += 4 * np.einsum('ijkl,mj,ki->ml', vt_abbr, D_ab, D_ba)
O_bs = 2 * np.einsum('kj,ik->ij', Bt_as, D_ba) + 2 * np.einsum('kj,ik->ij', At_bs, D_bb)
O_bs += 4 * np.einsum('ijkl,mj,ki->ml', vt_abas, D_bb, D_aa)
O_bs += 4 * np.einsum('ijkl,mj,ki->ml', vt_abbs, D_bb, D_ba)
O_bs -= 2 * np.einsum('ijkl,mi,kj->ml', vt_abas, D_ba, D_ab)
O_bs -= 2 * np.einsum('ijkl,mi,kj->ml', vt_abbs, D_ba, D_bb)
O_bs -= 2 * np.einsum('ijkl,mj,li->mk', vt_absa, D_bb, D_aa)
O_bs -= 2 * np.einsum('ijkl,mj,li->mk', vt_absb, D_bb, D_ba)
O_bs += 4 * np.einsum('ijkl,mi,lj->mk', vt_absa, D_ba, D_ab)
O_bs += 4 * np.einsum('ijkl,mi,lj->mk', vt_absb, D_ba, D_bb)
e2_ind_full = np.einsum('ar,ra->', O_ar, CPHF_ra) + np.einsum('bs,sb->', O_bs, CPHF_sb)
# E20 Exchange-Induction
ExchInd20r = e2_ind_full - Ind20r
### E20 Dispersion and Exchange-Dispersion
# E20 Dispersion
v_abrs = sapt.v('abrs')
v_rsab = sapt.v('rsab')
e_rsab = 1 / (-sapt.eps('r', dim=4) - sapt.eps('s', dim=3) + sapt.eps('a', dim=2) + sapt.eps('b'))
Disp20 = 4 * np.einsum('rsab,rsab,abrs->', e_rsab, v_rsab, v_abrs)
# E20 Dispersion Full. Several pieces already produced in E20 Induction Full.
v_T_abRs = np.einsum('ijkl,ka->ijal', v_abas, T_ar) + np.einsum('ijkl,ka->ijal', v_abbs, T_br)
v_T_absR = np.einsum('ijkl,la->ijka', v_absa, T_ar) + np.einsum('ijkl,la->ijka', v_absb, T_br)
v_T_abrS = np.einsum('ijkl,la->ijka', v_abra, T_as) + np.einsum('ijkl,la->ijka', v_abrb, T_bs)
v_T_abSr = np.einsum('ijkl,ka->ijal', v_abar, T_as) + | np.einsum('ijkl,ka->ijal', v_abbr, T_bs) | numpy.einsum |
import numpy as np
from sklearn.neighbors import NearestNeighbors
import cv2
def estimate_normals(p, k=20):
neigh = NearestNeighbors(n_neighbors=k)
neigh.fit(p)
distances, indices = neigh.kneighbors(return_distance=True)
dp = (p[indices] - p[:,None])
U, s, V = np.linalg.svd(dp.transpose(0,2,1))
nv = U[:, :, -1]
return nv / np.linalg.norm(nv)
def test_norm():
np.random.seed(3)
from matplotlib import pyplot as plt
pt_map = np.concatenate([get_line(n=33, s=1.0) for _ in range(3)], axis=0)
pt_map = np.random.normal(loc=pt_map, scale=0.05)
nv = estimate_normals(pt_map)
plt.plot(pt_map[:,0], pt_map[:,1], '.', label='map')
plt.quiver(
pt_map[:,0], pt_map[:,1],
nv[:,0], nv[:,1],
scale_units='xy',
angles='xy'
)
plt.gca().set_aspect('equal', 'datalim')
plt.legend()
plt.show()
def stable_subsample(p):
"""
Geometrically Stable ICP Subsampling with constraints analysis
Gelfand et al. 2003
http://www.people.vcu.edu/~dbandyop/pubh8472/Gelfand_SVC.pdf
https://graphics.stanford.edu/papers/stabicp/stabicp.pdf
"""
# TODO : implement
pass
class ICP():
def __init__(self):
pass
@staticmethod
def best_fit_transform(A, B):
'''
Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
Input:
A: Naxm numpy array of corresponding points
B: Nbxm numpy array of corresponding points
Returns:
T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
R: mxm rotation matrix
t: mx1 translation vector
'''
# assert A.shape == B.shape
# get number of dimensions
m = A.shape[1]
# translate points to their centroids
centroid_A = np.mean(A, axis=0)
centroid_B = np.mean(B, axis=0)
AA = A - centroid_A
BB = B - centroid_B
# rotation matrix
H = np.dot(AA.T, BB)
U, S, Vt = np.linalg.svd(H)
R = np.dot(Vt.T, U.T)
# special reflection case
if np.linalg.det(R) < 0:
Vt[m-1,:] *= -1
R = np.dot(Vt.T, U.T)
# translation
t = centroid_B.T - np.dot(R,centroid_A.T)
# homogeneous transformation
T = np.identity(m+1)
T[:m, :m] = R
T[:m, m] = t
return T, R, t
@staticmethod
def best_fit_transform_point_to_plane(src, dst):
# TODO : generalize to N-d cases?
# TODO : use neighborhood from prior computations?
nvec = estimate_normals(dst, k=20)
# construct according to
# https://gfx.cs.princeton.edu/proj/iccv05_course/iccv05_icp_gr.pdf
A_lhs = np.cross(src, nvec)[:,None]
A_rhs = nvec
Amat = np.concatenate([A_lhs, A_rhs], axis=1) # == Nx3
# == Experimental : Stability Analysis ==
#C = Amat.T.dot(Amat) # 3x3 cov-mat
#w,v = np.linalg.eig(C)
##c = w[0] / w[-1] # stability
#c = w[-1]
##print('w[-1] : {}'.format(w[-1]))
c = None
# =======================================
bvec = - ((src - dst)*(nvec)).sum(axis=-1)
tvec = np.linalg.pinv(Amat).dot(bvec) # == (dh, dx, dy)
R = Rmat( tvec[0] )
t = tvec[1:]
T = np.eye(3, dtype=np.float32)
T[:2, :2] = R
T[:2, 2] = t
return T, R, t, c
@staticmethod
def nearest_neighbor(src, dst):
'''
Find the nearest (Euclidean) neighbor in dst for each point in src
Input:
src: Naxm array of points
dst: Nbxm array of points
Output:
distances: Euclidean distances of the nearest neighbor
indices: dst indices of the nearest neighbor
'''
assert src.shape[1:] == dst.shape[1:]
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(dst)
distances, indices = neigh.kneighbors(src, return_distance=True)
return distances.ravel(), indices.ravel()
@staticmethod
def projected_neighbor(src, dst, origin):
"""
index-matching correspondences, according to Blais and Levine, '95
https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=400574
"""
# TODO : implement
pass
@staticmethod
def icp(A, B, init_pose=None, max_iterations=20, tolerance=0.001):
'''
The Iterative Closest Point method: finds best-fit transform that maps points A on to points B
Input:
A: Naxm numpy array of source mD points
B: Nbxm numpy array of destination mD point
init_pose: (m+1)x(m+1) homogeneous transformation
max_iterations: exit algorithm after max_iterations
tolerance: convergence criteria
Output:
T: final homogeneous transformation that maps A on to B, T.A = B
distances: Euclidean distances (errors) of the nearest neighbor
i: number of iterations to converge
'''
# assert A.shape == B.shape
# get number of dimensions
ndim = A.shape[1]
nA, nB = A.shape[0], B.shape[0]
assert(A.shape[1:] == B.shape[1:])
# make points homogeneous, copy them to maintain the originals
src = np.ones((nA,ndim+1), dtype=np.float32)
dst = | np.ones((nB,ndim+1), dtype=np.float32) | numpy.ones |
import logging
import numpy as np
logger = logging.getLogger(__name__)
def _distance(Z, X, Y, epsilon):
""" Distance function """
Y = Y + epsilon
d = np.dot(X, Y) - np.dot(Z, np.log(Y))
return d
def _initialize_clusters(n_el, n_clusters):
""" Initialize cluster array """
cluster_idx = np.mod(np.arange(n_el), n_clusters)
return np.random.permutation(cluster_idx)
def _setup_cluster_matrix(n_clusters, cluster_idx):
""" Set cluster occupation matrix """
return np.eye(n_clusters, dtype=np.bool)[cluster_idx]
def triclustering(Z, nclusters_row, nclusters_col, nclusters_bnd, errobj,
niters, epsilon, row_clusters_init=None,
col_clusters_init=None, bnd_clusters_init=None):
"""
Run the tri-clustering, Numpy-based implementation
:param Z: d x m x n data matrix
:param nclusters_row: number of row clusters
:param nclusters_col: number of column clusters
:param nclusters_bnd: number of band clusters
:param errobj: convergence threshold for the objective function
:param niters: maximum number of iterations
:param epsilon: numerical parameter, avoids zero arguments in log
:param row_clusters_init: initial row cluster assignment
:param col_clusters_init: initial column cluster assignment
:param bnd_clusters_init: initial band cluster assignment
:return: has converged, number of iterations performed, final row,
column, and band clustering, error value
"""
[d, m, n] = Z.shape
# Setup matrices to ..
Y = np.concatenate(Z, axis=1) # .. update rows
Y1 = np.concatenate(Z, axis=0) # .. update columns
Y2 = Z.reshape(d, m*n) # .. update bands
# Calculate average
Gavg = Y.mean()
# Initialize cluster assignments
row_clusters = row_clusters_init if row_clusters_init is not None \
else _initialize_clusters(m, nclusters_row)
col_clusters = col_clusters_init if col_clusters_init is not None \
else _initialize_clusters(n, nclusters_col)
bnd_clusters = bnd_clusters_init if bnd_clusters_init is not None \
else _initialize_clusters(d, nclusters_bnd)
x_clusters = np.repeat(bnd_clusters, n)
R = _setup_cluster_matrix(nclusters_row, row_clusters)
C = _setup_cluster_matrix(nclusters_col, col_clusters)
B = _setup_cluster_matrix(nclusters_bnd, bnd_clusters)
C1 = _setup_cluster_matrix(nclusters_bnd, x_clusters)
e, old_e = 2 * errobj, 0
s = 0
converged = False
while (not converged) & (s < niters):
logger.debug(f'Iteration # {s} ..')
# Obtain all the cluster based averages
CoCavg = (np.dot(np.dot(R.T, Y), C1) + Gavg * epsilon) / (
np.dot(np.dot(R.T, np.ones((m, n * d))), C1) + epsilon)
# CoCavg is such that for row cluster i and col cluster j:
# mask_row = row_clusters == i
# mask_bnd = bnd_clusters == j
# Z[mask_bnd][:, mask_row].mean(axis=2).mean() ~ CoCavg[i, j]
# Calculate distance based on row approximation
d2 = _distance(Y, np.ones((m, n * d)), np.dot(C1, CoCavg.T), epsilon)
# Assign to best row cluster
row_clusters = np.argmin(d2, axis=1)
R = _setup_cluster_matrix(nclusters_row, row_clusters)
R1 = | np.tile(R, (d, 1)) | numpy.tile |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import numpy.polynomial.polynomial as poly
import warnings
def segment_ratios(nSegments, end_length_ratio=0.02):
"""
Discretize a borehole into segments of different lengths using a
geometrically expanding mesh from the provided end-length-ratio towards the
middle of the borehole. Eskilson (1987) [#Eskilson_1987]_ proposed that
segment lengths increase with a factor of sqrt(2) towards the middle of the
borehole. Here, the expansion factor is inferred from the provided number
of segments and end-length-ratio.
Parameters
----------
nSegments : int
Number of line segments along the borehole.
end_length_ratio: float, optional
The ratio of the height of the borehole that accounts for the end
segment lengths.
Default is 0.02.
Returns
-------
segment_ratios : array
The segment ratios along the borehole, from top to bottom.
Examples
--------
>>> gt.utilities.segment_ratios(5)
array([0.02, 0.12, 0.72, 0.12, 0.02])
References
----------
.. [#Eskilson_1987] <NAME>. (1987). Thermal analysis of heat
extraction boreholes. PhD Thesis. University of Lund, Department of
Mathematical Physics. Lund, Sweden.
"""
def is_even(n):
"Returns True if n is even."
return not(n & 0x1)
assert nSegments >= 1 and isinstance(nSegments, int), \
"The number of segments `nSegments` should be greater or equal " \
"to 1 and of type int."
assert nSegments <= 2 or 0. < end_length_ratio < 0.5 and \
isinstance(end_length_ratio, (float, np.floating)), \
"The end-length-ratio `end_length_ratio` should be greater than " \
"0, less than 0.5 (0 < end_length_ratio < 0.5) and of type float."
# If nSegments == 1, the only segment covers the entire length
if nSegments == 1:
return np.array([1.0])
# If nSegments == 2, split the borehole in two even segments
elif nSegments == 2:
if not np.abs(end_length_ratio - 0.5) < 1e-6:
warnings.warn('nSegments = 2 has been provided. The '
'`end_length_ratio` will be over-ridden. Two '
'segment ratios of [0.5, 0.5] will be returned.')
return np.array([0.5, 0.5])
# If nSegments == 3, then the middle segment is simply the remainder of the
# length
elif nSegments == 3:
segment_ratios = np.array(
[end_length_ratio,
1 - 2 * end_length_ratio,
end_length_ratio])
return segment_ratios
else:
pass
# If end_length_ratio == 1 / nSegments, then the discretization is
# uniform
if np.abs(1. - nSegments * end_length_ratio) < 1e-6:
return np.full(nSegments, 1 / nSegments)
# Find the required constant expansion ratio to fill the borehole length
# from the provided end-length-ratio inwards with the provided nSegments
if is_even(nSegments):
# The ratio is a root of the polynomial expression :
# 0 = (1 - 2 * end_length_ratio)
# - ratio * x
# + 2 * end_length_ratio * x**nz
nz = int(nSegments / 2)
coefs = np.zeros(nz + 1)
coefs[0] = 1 - 2 * end_length_ratio
coefs[1] = -1
coefs[-1] = 2 * end_length_ratio
# Roots of the polynomial
roots = poly.Polynomial(coefs).roots()
# Find the correct root
for x in roots:
if np.isreal(x):
factor = np.real(x)
dz = [factor**i * end_length_ratio for i in range(nz)]
segment_ratios = np.concatenate(
(dz,
dz[::-1]))
if (np.abs(1. - np.sum(segment_ratios)) < 1e-6
and np.all(segment_ratios > 0.)):
break
else:
raise RuntimeError(
f'utilities.segment_ratios failed to generate segment '
f'discretization for the given input parameters : '
f'nSegments={nSegments}, end_length_ratio={end_length_ratio}.')
else:
# The ratio is a root of the polynomial expression
# 0 = (1 - 2 * end_length_ratio) - ratio * x
# + end_length_ratio * x**nz
# + end_length_ratio * x**(nz + 1)
nz = int((nSegments - 1) / 2)
coefs = np.zeros(nz + 2)
coefs[0] = 1 - 2 * end_length_ratio
coefs[1] = -1
coefs[-2] = end_length_ratio
coefs[-1] = end_length_ratio
# Roots of the polynomial
roots = poly.Polynomial(coefs).roots()
# Find the correct root
for x in roots:
if np.isreal(x):
factor = np.real(x)
dz = [factor**i * end_length_ratio for i in range(nz)]
segment_ratios = np.concatenate(
(dz,
np.array([factor**nz]) * end_length_ratio,
dz[::-1]))
if (np.abs(1. - | np.sum(segment_ratios) | numpy.sum |
import numpy as np
from ncephes.cprob import incbet
from numba import vectorize, float64
from significance_from_pvalue import significance_from_pvalue
# This decorator vectorize the function for fast execution
@vectorize([float64(float64, float64, float64)])
def z_bi_cephes(n_on, n_off, alpha):
tau = 1.0 / alpha
aa = n_on
bb = n_off + 1
xx = 1.0 / (1+tau)
# Checks to avoid Nan in some cases
if aa <= 0.0 or bb <= 0.0:
return 0.0
if xx <= 0.0:
return 0.0
if xx >= 1.0:
return 1.0
# I use the incbet from cephes instead of the scipy.special.betainc function because the latter has numerical
# problems in some instances and return Nans, while the incbet from Cephes is more robust
P_Bi = incbet(aa, bb, xx)
return significance_from_pvalue(P_Bi)
def z_bi_vectorized(n, b, alpha):
"""
Use the estimator Z_Bi from Cousins et al. 2008 to compute the significance
:param n: observed counts (can be an array)
:param b: expected background counts (can be an array)
:param alpha: ratio of the source observation efficiency and background observation efficiency (must be the same for
all items in n and b)
:return: the significance (z score) for the measurement(s)
"""
n_ = np.array(n, dtype=float, ndmin=1)
b_ = np.array(b, dtype=float, ndmin=1)
assert n_.shape[0] == b_.shape[0], "n and b must have the same size"
alpha_ = np.array(alpha, dtype=float, ndmin=1)
if alpha_.shape[0] == 1:
alpha_ = np.array([alpha] * n_.shape[0])
else:
assert alpha_.shape[0] == n_.shape[0], "Alpha must be either a scalar or an array of the same length of n"
# Assign sign depending on whether n_ > b_
sign = | np.where(n_ >= alpha * b_, 1, -1) | numpy.where |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This module contains scripts for image manipulation including denoising, enhancement and cropping functions
"""
import numpy as np
def uint16_2_uint8(vidstack):
""" Casts any input image to be of uint8 type.
Note: Though named uint16, converts any input to uint8. We are just implicitly assuming with biological imaging uint16 input.
Parameters
----------
vidstack : numpy array
an input image (any size) as a numpy array.
Returns
-------
uint8_img : numpy array
a numpy array of same size as input rescaled to be of uint8 (range [0,255]).
"""
uint8_img = np.uint8(255.*(vidstack/float(np.max(vidstack))))
return uint8_img
def rescale_intensity_stack(img_stack):
""" rescales the intensity of a series of images given as a (n_imgs x n_rows x n_cols x channels) tensor such that it is [0,255] for uint8 and [0,1] for floats.
Parameters
----------
img_stack : numpy array
an input image of 3 or 4 dimensions:
(n_imgs x n_rows x n_cols): gray-image stack
(n_imgs x n_rows x n_cols x 3): rgb-image stack
Returns
-------
img_stack_rescale : numpy array
intensity rescaled images with range [0,255] for uint8 and [0,1] for floats
"""
from skimage.exposure import rescale_intensity
img_stack_rescale = np.concatenate([rescale_intensity(im)[None,:] for im in img_stack], axis=0)
return img_stack_rescale
def resize_img_stack(img_stack, shape=(256,256)):
""" Resizes a series of images given as a (n_imgs x n_rows x n_cols x channels) tensor.
Parameters
----------
img_stack : numpy array
an input image of 3 or 4 dimensions:
(n_imgs x n_rows x n_cols): gray-image stack
(n_imgs x n_rows x n_cols x 3): rgb-image stack
shape : 2-tuple
(row_size, col_size) tuple giving the desired output image dimension
Returns
-------
img_stack_new : numpy array
a numpy array of resized input:
(n_imgs x shape[0] x shape[1]): gray-image stack
(n_imgs x shape[0] x shape[1] x 3): rgb-image stack
"""
from skimage.transform import resize
img_stack_new = []
for im in imgs:
img_stack_new.append(resize(im, output_shape=shape)[None,:])
img_stack_new = np.concatenate(imgs_, axis=0)
return img_stack_new
def denoise_zstack(zstack):
# from skimage.restoration import denoise_wavelet
from skimage.filters import gaussian
stacked = []
for z in zstack:
# stacked.append(denoise_wavelet(z)[None,:])
stacked.append(gaussian(z, sigma=3)[None,:])
return np.vstack(stacked)
def perona_malik(img, iterations=10, delta=0.14, kappa=15):
""" Runs Perona-Malik anisotropic on a given grayscale image.
Parameters
----------
img : numpy array
(n_rows x n_cols) grayscale image.
iterations : int
Number of iterations to run the diffusion process. Higher gives smoother output.
delta : float
This is the time step :math:`\Delta t` in the diffusion equation.
kappa : float
This regulates the sensitivity to edges in the Perona-Malik formulation.
Returns
-------
filtered_img : numpy array
The filtered output image. Same size as input of type float.
References
----------
.. [1] <NAME> et. al, "Anisotropic diffusion." Geometry-driven diffusion in computer vision. Springer, Dordrecht, 1994. 73-92.
"""
from scipy import misc, ndimage
import numpy as np
# center pixel distances
dx = 1
dy = 1
dd = np.sqrt(2)
u = img.copy()
# 2D finite difference windows
windows = [
np.array(
[[0, 1, 0], [0, -1, 0], [0, 0, 0]], np.float64
),
np.array(
[[0, 0, 0], [0, -1, 0], [0, 1, 0]], np.float64
),
np.array(
[[0, 0, 0], [0, -1, 1], [0, 0, 0]], np.float64
),
np.array(
[[0, 0, 0], [1, -1, 0], [0, 0, 0]], np.float64
),
np.array(
[[0, 0, 1], [0, -1, 0], [0, 0, 0]], np.float64
),
np.array(
[[0, 0, 0], [0, -1, 0], [0, 0, 1]], np.float64
),
np.array(
[[0, 0, 0], [0, -1, 0], [1, 0, 0]], np.float64
),
np.array(
[[1, 0, 0], [0, -1, 0], [0, 0, 0]], np.float64
),
]
for r in range(iterations):
# approximate gradients
nabla = [ ndimage.filters.convolve(u, w) for w in windows ]
# approximate diffusion function
diff = [ 1./(1 + (n/kappa)**2) for n in nabla]
# update image
terms = [diff[i]*nabla[i] for i in range(4)]
terms += [(1/(dd**2))*diff[i]*nabla[i] for i in range(4, 8)]
u = u + delta*(sum(terms))
# Kernel for Gradient in x-direction
Kx = np.array(
[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], np.int32
)
# Kernel for Gradient in y-direction
Ky = np.array(
[[1, 2, 1], [0, 0, 0], [-1, -2, -1]], np.int32
)
# Apply kernels to the image
Ix = ndimage.filters.convolve(u, Kx)
Iy = ndimage.filters.convolve(u, Ky)
# return norm of (Ix, Iy)
filtered_img = np.hypot(Ix, Iy)
return filtered_img
def crop_patches_from_img(zstack, centroids, width=25):
""" Crop image patches from a given input image of given width at given (x,y) centroid coordinates.
Float centroids are first cast into ints.
Parameters
----------
zstack : numpy array
input (n_rows x n_cols x n_channels) numpy array.
centroids : numpy array or list
array of (y,x) centroid coordinates
width : int (odd)
size of cropped image patch is (width x width x n_channels)
Returns
-------
zs : numpy array
an array of cropped patches with length equal to the number of centroids.
"""
zs = []
for cent in centroids:
cent = cent.astype(np.int)
if len(zstack.shape) == 3: # bug if this is a RGB image?
patch = zstack[:,cent[0]-width//2:cent[0]-width//2+width, cent[1]-width//2:cent[1]-width//2+width][None,:]
else:
patch = zstack[cent[0]-width//2:cent[0]-width//2+width, cent[1]-width//2:cent[1]-width//2+width][None,:]
zs.append(patch)
zs = np.concatenate(zs, axis=0)
return zs
def filter_masks( mask, min_area=10, max_area=300, keep_centre=True, dist_thresh=0.5, min_max_area_cutoff=20):
""" filters binary masks to identify the primary large area of interest.
1. consideration of minimum and maximum area range.
2. preferential consideration of areas near the image centre
if the 2nd option is used, and the found area is smaller than an expected area (min_max_area_cut_off) we default to finding the largest area.
Parameters
----------
mask : bool numpy array
input (n_rows x n_cols) binary image.
min_area : int
minimum area of region of interest.
max_area : int
maximum area of region of interest.
keep_centre : bool
if True, preferentially consider the closest connected component to the image centre.
dist_thresh : float (0-1)
what is the upper bound on the distance between the centroid of the segmented area of interest candidate and the image centre given as a fraction of the image patch width.
min_max_area_cutoff : int
what is the minimum size below which we disregard the closest area to the image centre and fallback to the largest area. (only used if keep_centre=True)
Returns
-------
cand_mask : bool numpy array
either a blank image same size as input if nothing is detected or a refined binary mask with only one area of interest of same image size as the input.
"""
from skimage.measure import label, regionprops
from skimage.filters import gaussian
nrows, ncols = mask.shape
labelled = label(mask)
uniq_reg = np.unique(labelled)[1:]
mask_centre = np.array([nrows/2, ncols/2])
if len(uniq_reg) == 1:
area = np.sum(mask)
if (area > min_area) and (area < max_area):
return mask
else:
return np.zeros_like(mask)
else:
reg = regionprops(labelled)
uniq_reg = np.unique(labelled)[1:]
areas = []
centres = []
for re in reg:
y,x = re.centroid
areas.append(re.area)
centres.append([y,x])
if keep_centre:
centres = np.array(centres)
centre_dist = np.sqrt(np.sum((centres - mask_centre)**2, axis=1))
largest_reg = uniq_reg[np.argmin(centre_dist)]
min_dist = centre_dist[np.argmin(centre_dist)]
if largest_reg <= min_max_area_cutoff:
# if too small then take the maximum area.
largest_reg = uniq_reg[np.argmax(areas)]
min_dist = centre_dist[np.argmax(areas)]
if min_dist >= dist_thresh * nrows:
cand_mask = np.zeros_like(mask)
return cand_mask
else:
cand_mask = labelled == largest_reg
if np.sum(cand_mask) > min_area and np.sum(cand_mask) < max_area:
return cand_mask
else:
cand_mask = np.zeros_like(cand_mask)
return cand_mask
else:
# check the maximum area.
largest_reg = uniq_reg[np.argmax(areas)]
cand_mask = labelled == largest_reg
if np.sum(cand_mask) > min_area and np.sum(cand_mask) < max_area:
return cand_mask
else:
cand_mask = np.zeros_like(cand_mask)
return cand_mask
def find_best_focus(zstack):
""" Finds the best focus slice by finding the z-slice that maximises the signal-to-noise ratio given by coefficient of variation (CV).
.. math:: CV = \sigma/\mu
where :math:`\sigma` and :math:`\mu` are the standard deviation and mean of the slice pixel intensities.
Parameters
----------
zstack : numpy array
an input (n_z x n_rows x n_cols) image.
Returns
-------
best_focus_slice : int
index of the z-slice of best focus.
"""
focus_vals = [np.var(z) / (np.mean(z)+1e-8) for z in zstack]
best_focus_slice = np.argmax(focus_vals)
return best_focus_slice
def find_best_focus_stacks(zstacks):
""" Finds the best focus slice of a series of z-slice stacks and constructs an array composed of the best-focus slices.
Parameters
----------
zstacks : numpy array
an input (n_stacks x n_z x n_rows x n_cols) image.
Returns
-------
best_focus_imgs : numpy array
a new numpy array (n_stacks x n_rows x n_cols) composed of the best-focus slices only.
best_focus_slices : numpy array
list of the index of the z-slice of best focus for each z-slice stack.
"""
best_focus_imgs = []
best_focus_slices = []
for zstack in zstacks:
best_slice = find_best_focus(zstack)
best_focus_img = zstack[best_slice]
best_focus_slices.append(best_slice) # the best slice is needed to provide the slice to retrieve in the original video.
best_focus_imgs.append(best_focus_img[None,:])
best_focus_imgs = np.concatenate(best_focus_imgs, axis=0)
best_focus_slices = np.hstack(best_focus_slices)
return best_focus_imgs, best_focus_slices
def locate_centroids_simple(mask):
""" Given an image, locates all centroids of connected components.
Note: This function inherently assumes a threshold of 0 and dilation with disk kernel of 3.
Parameters
----------
mask : numpy array
an input grayscale image.
Returns
-------
centroids : numpy array
an array of (y,x) coordinate pairs giving the peaks in the input image.
"""
from skimage.measure import label, regionprops
from skimage.morphology import binary_dilation, disk
centroids = []
mask_ = mask>0
mask_ = binary_dilation(mask_, disk(3))
labelled = label(mask_)
regions = regionprops(labelled)
for reg in regions:
y,x = reg.centroid
centroids.append([y,x])
centroids = np.array(centroids)
return centroids
def produce_valid_img_mask(img, min_I=0.1, max_area=1000, dilation=3):
""" Example Centriole images may have a ring of high pixel intensity of a much larger structure. This function is designed to identify such large continuous areas in order to filter detections.
Parameters
----------
img : numpy array
an input grayscale image.
min_I : float
the lower threshold for identifying the bright intensity regions. Assumes normalised intensities i.e. image intensities should be between [0,1]
max_area : integer
threshold for identifying 'large' region based on counting the number of pixels within the area.
dilation : int
size of the disk kernel used to postprocess and smoothen resulting binary segmentation.
Returns
-------
invalid_regions : numpy array
a binary image of either 0, 1 pixel intensities indicating the large regions of high intensity i.e. invalid centriole zones.
"""
from scipy.ndimage.morphology import binary_fill_holes
from skimage.filters import threshold_otsu
from skimage.measure import label, regionprops
from skimage.morphology import binary_dilation, disk
thresh = threshold_otsu(img) # determines an Ostu threshold.
if np.mean(img[img>thresh]) > min_I: # is there signal in the image? which is the lower / better threshold to use.
binary = img > thresh
else:
binary = img > min_I # resort to the manual guidance.
# connected component analysis to identify large areas of high intensity.
labelled = label(binary)
regions = regionprops(labelled)
# initialise the mask
invalid_regions = np.zeros(labelled.shape)
for i in range(len(regions)):
area = regions[i].area
# is it large?, if yes
if area > max_area:
invalid_regions[labelled==i+1] = 1 # mark areas that satisfy the check to background
invalid_regions = binary_dilation(binary_fill_holes(invalid_regions>0), disk(dilation)) # dilation is to smooth edges.
return invalid_regions
def filter_noise_centroids_detection(centroids, mask):
""" Given (y,x) coordinates and a binary mask of 0,1 of background regions, removes coordinates that lie in 1 areas (background).
Parameters
----------
centroids : numpy array
array of (y,x) 2D coordinates.
mask : numpy array
boolean or integer mask with values 1 or 0 denoting invalid and valid spatial regions respectively.
Returns
-------
filtered_centroids : numpy array
array of only valid (y,x) 2D coordinates that lie in mask==0 regions.
select : bool array
a binary array either 0 or 1 indicating which centroids are valid.
"""
valid_mask = mask[centroids[:,0].astype(np.int), centroids[:,1].astype(np.int)] #(y,x) format
filtered_centroids = centroids[valid_mask==0]
select = valid_mask == 0
return filtered_centroids, select
def filter_border_centroids_detection(centroids, size, limits):
""" Given (y,x) coordinates and the size of the border, removes all coordinates that lie within the defined border.
Parameters
----------
centroids : numpy array
array of (y,x) 2D coordinates.
size : int
border size, how many pixels from the image edge do you consider the border. Isotropic border is assumed.
limits : tuple-like
(y_max, x_max) pair that define the maximum number of rows, columns respectively of the image.
Returns
-------
filtered_centroids : numpy array
array of only valid (y,x) 2D coordinates that do not lie in the border zone.
select : bool array
a binary array either 0 or 1 indicating which centroids lie within the border zone.
"""
select_y = np.logical_and(centroids[:,0] > size, centroids[:,0] < limits[0]-size)
select_x = np.logical_and(centroids[:,1] > size, centroids[:,1] < limits[1]-size)
filtered_centroids = centroids[ np.logical_and(select_x, select_y)]
select = np.logical_and(select_x, select_y)
return filtered_centroids, select
def filter_centrioles_BCV(centroids, max_slice_im, patch_size, CV_thresh=0.3):
""" Given (y,x) centroid coordinates, the maximum slice whole frame image filter detections based on signal-to-noise (SNR) ratio within local image crops.
The SNR measure used is the coefficient of variation, :math:`\sigma/\mu` where :math:`\sigma` and :math:`\mu` are the standard deviation and mean of the pixel intensities in the image patch.
Parameters
----------
centroids : numpy array
array of (y,x) 2D coordinates.
max_slice_im : numpy array
a grayscale 2D image
patch_size : int (odd)
width of the local area to crop around the given (y,x) centroid
CV_thresh : float
Signal-to-noise ratio cut-off where SNR is measured by CV i.e. centroids are kept if :math:`CV>` CV_thresh
Returns
-------
filtered_centroids : numpy array
array of only valid (y,x) 2D coordinates that have :math:`CV>` CV_thresh.
select : bool array
a binary array either 0 or 1 indicating which centroids have :math:`CV>` CV_thresh.
filtered_CV : array
array with the corresponding CV of filtered_centroids.
"""
# signal (biological coefficient of variation filter)
patches = crop_patches_from_img(max_slice_im, centroids, width=patch_size)
snr_patches = np.hstack([np.std(p)/np.mean(p) for p in patches])
# filter out the bogus detections?
select = snr_patches >= CV_thresh
filtered_centroids = centroids[select]
filtered_CV = snr_patches[select]
return filtered_centroids, select, filtered_CV
def remove_duplicate_centrioles(centroids, min_dist, lam=1000):
""" Removes duplicate (y,x) returning only one (y,x) instance given array of (y,x) centroid coordinates and a minimum distance threshold below which we call two (y,x) duplicates,
Parameters
----------
centroids : numpy array
array of (y,x) 2D coordinates.
min_dist : float
two (y,x) coordinates are a duplicate if the distance between them is less than mid_dist.
lam : float
a very large float, typically just a number larger than the image diagonal to exclude oneself in the pairwise pairing process of (y,x) coordinates.
Returns
-------
filtered_centroids : numpy array
array of unique (y,x) 2D coordinates.
select : bool array
a binary array either 0 or 1 indicating which centroids are taken as unique (y,x) instances.
"""
from sklearn.metrics.pairwise import pairwise_distances
dist_matrix = pairwise_distances(centroids)
dist_matrix += np.diag(lam*np.ones(len(centroids))) # prevent self interaction.
# initialisation.
select_filter = np.ones(len(centroids))
for i in range(len(dist_matrix)):
if select_filter[i] == 1:
dist = dist_matrix[i]
min_dist_arg = np.argmin(dist)
if dist[min_dist_arg] < min_dist:
select_filter[min_dist_arg] = 0 # set to false.
select_filter = select_filter>0 # make binary
filtered_centroids = centroids[select_filter>0]
return filtered_centroids, select_filter
def detect_centrioles_in_img( zstack_img, size, aniso_params, patch_size, CV_thresh=0.3, tslice=0, is_img_slice=False, filter_border=True, filter_high_intensity_bg=True, remove_duplicates=True, filter_CV=True, separation=5, invert=False, minmass=10, minoverlap=10, bg_min_I=0.2, bg_max_area=1000, bg_dilation=3, bg_invalid_check=0.5, debug=False):
""" Primary function that wraps various functions in this module into one API call to detect centrioles given an image or image stack.
Parameters
----------
zstack_img : numpy array
either
i) a temporal z-stack (n_frames x n_z x n_rows x n_cols),
ii) a z-stack (n_z x n_rows x n_cols) or
iii) a grayscale image (n_rows x n_cols)
size : float
Approximate expected width of centriole to detect in image pixels.
aniso_params : Python dict
A Python dictionary giving the parameters for running the anisotropic filtering of Perona-Malik [1]_. This dictionary should contain the following keys: 'iterations', 'delta', kappa', see :meth:`image_fn.perona_malik`
patch_size : int
size of the local image patch to crop for filtering by CV if used, see :meth:`image_fn.filter_centrioles_BCV`
CV_thresh : float
coefficient of variation threshold for keeping high SNR detections as in :meth:`image_fn.filter_centrioles_BCV`
tslice : int
if tslice :math:`>=` 0, takes the corresponding time slice of the temporal z image and returns the max projection image over z. If zstack_img is just a zstack set tslice=-1.
is_img_slice : bool
Set True if input is a grayscale image.
filter_border : bool
If True, removes detections within a defined border zone
filter_high_intensity_bg : bool
If True, removes detections from high intensity background areas.
remove_duplicates : bool
If True, detects potential duplication of (y,x) locations that may by detecting the same centriole.
filter_CV : bool
If True, keeps only (y,x) centriole detections whose CV evaluated over a local image crop is greater than a given threshold.
separation : float
minimum separation distance in pixels between blobs.
invert : bool
if True, features of interest to detect are assumed darker than background, used in trackpy.locate, see [2]_
minmass : float
minimum integrated intensity values of detected blob used in trackpy.locate, see [2]_
minoverlap : float
distance threshold for calling duplicate (y,x) coordinates, see :meth:`image_fn.remove_duplicate_centrioles`
bg_min_I : float
intensity cut-off for defining 'high' intensity image areas as in :meth:`image_fn.produce_valid_img_mask`
bg_max_area : int
area cut-off for defining 'large' background areas as in :meth:`image_fn.produce_valid_img_mask`
bg_dilation : int
disk kernel size to dilate background noise mask as in :meth:`image_fn.produce_valid_img_mask`
bg_invalid_check : float
this is a check to prevent everything in the image being regarded as being invalid if one knows centrioles should be present. It is an upper bound on the total area of the invalid image area mask output of :meth:`image_fn.produce_valid_img_mask`.
debug: bool
if True, will produce all intermediate plotting graphics to help debugging.
Returns
-------
out_dict : Python dict
dictionary which collects the final output detections along with additional detection information.
The dictionary has the following structure
'centriole_centroids':
(y,x) coordinates of detected centrioles
'centriole_pos':
table of all centriole detections with associated intensity statistics
'max_proj_full_img':
maximum projection image
'max_proj_full_img_denoise':
anisotropically filtered maximum projection image
'background_mask':
background image area mask
'valid_detection_mask':
non-background image areas where centrioles are being detected.
'centriole_SNR':
associated :math:`CV` of detected centrioles
References
----------
.. [1] <NAME> et. al, "Anisotropic diffusion." Geometry-driven diffusion in computer vision. Springer, Dordrecht, 1994. 73-92.
.. [2] TrackPy Gaussian blob detection, http://soft-matter.github.io/trackpy/dev/generated/trackpy.locate.html.
"""
import trackpy as tp
from skimage.filters import threshold_otsu
from skimage.exposure import rescale_intensity
import visualization as viz
import pylab as plt
##########################################
#
# Handle different file inputs.
#
##########################################
if is_img_slice==False:
if tslice >= 0:
zstack_time_img = zstack_img[tslice].copy()
else:
zstack_time_img = zstack_img.copy()
# max projection to detect positions.
slice_img = zstack_time_img.max(axis=0)
else:
slice_img = zstack_img.copy() # nothing to do.
##########################################
# Anisotropic filtering to enhance signal to background.
##########################################
slice_img_denoise = rescale_intensity(perona_malik(rescale_intensity(slice_img/255.), iterations=aniso_params['iterations'], kappa=aniso_params['kappa'], delta=aniso_params['delta'])) # denoising, these parameters work well thus far for anisotropic diffusion.
##########################################
# Gaussian blob detection (through TrackPy)
##########################################
f = tp.locate(slice_img_denoise, size, separation=separation, invert=invert, minmass=minmass)
centriole_centroids = np.vstack([f['y'], f['x']]).T
if debug:
"""
Viz 1 : initial detection
"""
fig, ax = plt.subplots(figsize=(10,10))
plt.title('Initial Gaussian Blob Detection')
plt.imshow(slice_img, cmap='gray')
viz.draw_circles(np.vstack([f['y'], f['x']]).T, ax, radii=patch_size*1, col='r', lw=2)
ax.grid('off')
ax.axis('off')
plt.show()
##########################################
# Precompute some binary masks for later (optional) use
##########################################
valid_img_mask = produce_valid_img_mask(rescale_intensity(slice_img/255.), min_I=bg_min_I, max_area=bg_max_area, dilation=bg_dilation)
background_img = slice_img < threshold_otsu(slice_img)
"""
Optionally filter out border centriole detections
"""
if filter_border:
# filter the centroids ( don't care for those at the side. )
centriole_centroids, centriole_centroids_filter = filter_border_centroids_detection(centriole_centroids, size=size, limits = slice_img.shape)
f = f.iloc[centriole_centroids_filter]
f.index = np.arange(len(centriole_centroids)) # re-index.
if debug:
"""
Viz 2 : Filter border detections. Border is highlighted with a yellow transparency mask.
"""
border_mask = np.zeros((slice_img.shape[0], slice_img.shape[1], 3))
border_mask[-size:,:, 0] = 1; border_mask[-size:,:, 1] = 1
border_mask[:size, :, 0] = 1; border_mask[:size, :, 1] = 1
border_mask[:,:size, 0] = 1; border_mask[:,:size, 1] = 1
border_mask[:,-size:, 0] = 1; border_mask[:,-size:, 1] = 1
fig, ax = plt.subplots(figsize=(10,10))
plt.title('Filtering border detections')
plt.imshow(slice_img, cmap='gray')
plt.imshow(border_mask, alpha=0.6)
viz.draw_circles(np.vstack([f['y'], f['x']]).T, ax, radii=patch_size*1, col='r', lw=2)
ax.grid('off')
ax.axis('off')
plt.show()
"""
Optionally filter out centriole detections in spurious large intensity band zones.
"""
if filter_high_intensity_bg:
if np.sum(valid_img_mask) / float(np.product(valid_img_mask.shape)) < bg_invalid_check: # check that not all the image is being highlighted as invalid.
centriole_centroids, centriole_centroids_filter = filter_noise_centroids_detection(centriole_centroids, valid_img_mask)
f = f.iloc[centriole_centroids_filter]
f.index = np.arange(len(centriole_centroids)) # re-index.
valid_img_mask = np.abs(1-valid_img_mask) >0 # invert since the valid_img_mask is really a background.
else:
valid_img_mask = np.ones_like(valid_img_mask)
if debug:
"""
Viz 3 : Filter background detections in spurious high intensity zones.
"""
# compose a colour mask to highlight the invalid image regions
color_slice_valid_mask = np.zeros([valid_img_mask.shape[0], valid_img_mask.shape[1], 3]); color_slice_valid_mask[:,:,0] = np.logical_not(valid_img_mask); color_slice_valid_mask[:,:,1] = np.logical_not(valid_img_mask)
fig, ax = plt.subplots(figsize=(10,10))
plt.title('Filtering high intensity regions')
plt.imshow(slice_img, cmap='gray')
plt.imshow(color_slice_valid_mask, alpha=0.6)
viz.draw_circles(np.vstack([f['y'], f['x']]).T, ax, radii=patch_size*1, col='r', lw=2)
ax.grid('off')
ax.axis('off')
plt.show()
else:
valid_img_mask = np.ones_like(valid_img_mask)
"""
Remove duplicates.
"""
if remove_duplicates:
centriole_centroids, centriole_centroids_filter = remove_duplicate_centrioles(centriole_centroids, min_dist=minoverlap)
f = f.iloc[centriole_centroids_filter]
f.index = np.arange(len(centriole_centroids)) # re-index.
if debug:
"""
Viz 4 : Remove duplicate detections
"""
fig, ax = plt.subplots(figsize=(10,10))
plt.title('Removing duplicates by spatial proximity')
plt.imshow(slice_img, cmap='gray')
viz.draw_circles(np.vstack([f['y'], f['x']]).T, ax, radii=patch_size*1, col='r', lw=2)
ax.grid('off')
ax.axis('off')
plt.show()
"""
Remove low SNR.
"""
if filter_CV:
# signal (biological coefficient of variation filter) [helps reduce false positives.]
centriole_centroids, centriole_centroids_filter, centriole_SNR = filter_centrioles_BCV(centriole_centroids, slice_img, patch_size, CV_thresh=CV_thresh)
f = f.iloc[centriole_centroids_filter]
f.index = np.arange(len(centriole_centroids)) # re-index.
if debug:
"""
Viz 5 : Remove by CV
"""
# final detection with white boxes
fig, ax = plt.subplots(figsize=(10,10))
plt.title('Filtering by CV')
plt.imshow(slice_img, cmap='gray')
viz.draw_squares(np.vstack([f['y'], f['x']]).T, ax, width=patch_size, col='r', lw=2)
ax.grid('off')
ax.axis('off')
plt.show()
else:
centriole_SNR = 0 # not computed.
if debug:
"""
Viz 6 : Final detections
"""
# final detection with white boxes
fig, ax = plt.subplots(figsize=(10,10))
plt.title('Final Detections')
plt.imshow(slice_img, cmap='gray')
viz.draw_squares( | np.vstack([f['y'], f['x']]) | numpy.vstack |
# The MIT License (MIT)
# Copyright (c) 2022 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
# THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# %%
import os
import json
from pathlib import Path
import numpy as np
import cv2
import matplotlib.pyplot as plt
import torch
from torch.utils.data import DataLoader
from torch.utils.data import Dataset as BaseDataset
import segmentation_models_pytorch as smp
# import albumentations as albu
from typing import List, Union
def img_scaler(img:np.array) -> np.array:
""" 0~255の範囲にスケーリングする
Args:
img (np.array): 入力画像
Returns:
np.array: スケーリング画像
Note:
画像である必要はないが、array全体でスケーリングされる点に注意。
"""
img = (img - img.min()) / (img.max() - img.min() + 1e-8)
img = (img * 255).astype(np.uint8)
return img
def canny(img:np.ndarray, low_threshold:int, high_threshold:int)-> np.ndarray:
""" Applies the Canny transform
Args:
img (np.ndarray): グレースケール画像
low_threshold (int): minVal
high_threshold (int): maxVal
Returns:
np.ndarray: エッジ画像
Note: https://docs.opencv.org/4.5.5/da/d22/tutorial_py_canny.html
"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def grayscale(img, is_bgr=False):
"""Applies the Grayscale transform
Args:
img (np.ndarray): 画像
is_bgr (bool, optional): カラー表現がBGRか. Defaults to False.
Note:
OpenCVで画像ファイルをReadした場合はBGR形式で読まれ、
pltはRGB形式で処理するため、変換が必要。
"""
if is_bgr:
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def calc_region(imshape, left_bottom_rate, left_top_rate, right_top_rate, right_bottom_rate):
""" マスク画像の4点を指定。画像は左上が原点
Args:
imshape (list): 元画像のshape。ただし[W, H]に変換している。
left_bottom_rate ([list]): 画像に対する、左下の点の割合
left_top_rate ([type]): 画像に対する、左上の点の割合
right_top_rate ([type]): 画像に対する、右上の点の割合
right_bottom_rate ([type]): 画像に対する、右下の点の割合
Returns:
(List[List[int]]): マスク領域の4点を示すリスト[[w1,h1],[w2,h2],[w3,h3],[w4,h4]]
"""
left_bottom = imshape * np.array(left_bottom_rate)
left_top = imshape * np.array(left_top_rate)
right_top = imshape * np.array(right_top_rate)
right_bottom = imshape * np.array(right_bottom_rate)
region_coord = [left_bottom, left_top, right_top, right_bottom] # 先行車領域の座標4点(左下から時計回り)
return region_coord
# TODO 要改善
# 線分の延長を行う。.Append、や np.poly1d、 np.polyfit(x,y,n)を利用した効率化が必要
# np.polyfit(x,y,n): n次式で2変数x,yの回帰分析
def draw_ext_lines(img, lines, color=[255, 0, 0], thickness=2):
d = 300 # required extend length
for line in lines:
for x1,y1,x2,y2 in line:
if (x2 != x1):
slope = (y2-y1)/(x2-x1)
sita = np.arctan(slope)
if (slope > 0): # 傾きに応じて場合分け
if (x2 > x1):
x3 = int(x2 + d*np.cos(sita))
y3 = int(y2 + d*np.sin(sita))
cv2.line(img, (x3, y3), (x1, y1), color, thickness)
else:
x3 = int(x1 + d*np.cos(sita))
y3 = int(y1 + d*np.sin(sita))
cv2.line(img, (x3, y3), (x2, y2), color, thickness)
elif (slope < 0):
if (x2 > x1):
x3 = int(x1 - d*np.cos(sita))
y3 = int(y1 - d*np.sin(sita))
cv2.line(img, (x3, y3), (x2, y2), color, thickness)
else:
x3 = int(x2 - d*np.cos(sita))
y3 = int(y2 - d* | np.sin(sita) | numpy.sin |
import numpy as np
import random
from numpy.core.fromnumeric import transpose
# 1.Matrix Creation
dim = int(input("Enter Dimension of matrix :"))
matrix = np.random.randint(10, size=(dim, dim))
# 2.Matrix inversion
inverse = np.linalg.inv(matrix)
print ("Inverse matrix : %s" %inverse)
# 3.dot Product won't exactly yield I
I = | np.dot(matrix, inverse) | numpy.dot |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = tm.makeIntIndex(10, name="a")
self.float_index = tm.makeFloatIndex(10, name="a")
self.dt_index = tm.makeDateIndex(10, name="a")
self.dt_tz_index = tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")
self.period_index = tm.makePeriodIndex(10, name="a")
self.string_index = tm.makeStringIndex(10, name="a")
self.unicode_index = tm.makeUnicodeIndex(10, name="a")
arr = np.random.randn(10)
self.bool_series = Series(arr, index=self.bool_index, name="a")
self.int_series = Series(arr, index=self.int_index, name="a")
self.float_series = Series(arr, index=self.float_index, name="a")
self.dt_series = Series(arr, index=self.dt_index, name="a")
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name="a")
self.string_series = Series(arr, index=self.string_index, name="a")
self.unicode_series = Series(arr, index=self.unicode_index, name="a")
types = ["bool", "int", "float", "dt", "dt_tz", "period", "string", "unicode"]
self.indexes = [getattr(self, "{}_index".format(t)) for t in types]
self.series = [getattr(self, "{}_series".format(t)) for t in types]
# To test narrow dtypes, we use narrower *data* elements, not *index* elements
index = self.int_index
self.float32_series = Series(arr.astype(np.float32), index=index, name="a")
arr_int = np.random.choice(10, size=10, replace=False)
self.int8_series = Series(arr_int.astype(np.int8), index=index, name="a")
self.int16_series = Series(arr_int.astype(np.int16), index=index, name="a")
self.int32_series = Series(arr_int.astype(np.int32), index=index, name="a")
self.uint8_series = Series(arr_int.astype(np.uint8), index=index, name="a")
self.uint16_series = Series(arr_int.astype(np.uint16), index=index, name="a")
self.uint32_series = Series(arr_int.astype(np.uint32), index=index, name="a")
nrw_types = ["float32", "int8", "int16", "int32", "uint8", "uint16", "uint32"]
self.narrow_series = [getattr(self, "{}_series".format(t)) for t in nrw_types]
self.objs = self.indexes + self.series + self.narrow_series
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index, op), index=o.index, name="a")
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these could be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(
expected, np.ndarray
):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
err = AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
err = TypeError
with pytest.raises(err):
getattr(o, op)
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_binary_ops_docs(self, klass):
op_map = {
"add": "+",
"sub": "-",
"mul": "*",
"mod": "%",
"pow": "**",
"truediv": "/",
"floordiv": "//",
}
for op_name in op_map:
operand1 = klass.__name__.lower()
operand2 = "other"
op = op_map[op_name]
expected_str = " ".join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = " ".join([operand2, op, operand1])
assert expected_str in getattr(klass, "r" + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
result = None != o # noqa
assert result.iat[0]
assert result.iat[1]
if is_datetime64_dtype(o) or is_datetime64tz_dtype(o):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ["shape", "dtype", "T", "nbytes"]:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ["flags", "strides", "itemsize"]:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, "base")
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
with tm.assert_produces_warning(FutureWarning):
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
# take-based repeat
indices = np.repeat(np.arange(len(o)), range(1, len(o) + 1))
rep = values.take(indices)
o = klass(rep, index=idx, name="a")
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(
range(10, 0, -1), index=expected_index, dtype="int64", name="a"
)
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == "a"
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
assert result.dtype == orig.dtype
elif is_datetime64tz_dtype(o):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(
result.astype(object), orig._values.astype(object)
)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert result.dtype == orig.dtype
assert o.nunique() == len(np.unique(o.values))
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_value_counts_unique_nunique_null(self, null_obj):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._ndarray_values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if is_datetime64tz_dtype(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = pd.NaT
values = o._values
elif needs_i8_conversion(o):
values[0:2] = iNaT
values = o._shallow_copy(values)
else:
values[0:2] = null_obj
# check values has the same dtype as the original
assert values.dtype == o.dtype
# create repeated values, 'n'th element is repeated by n+1
# times
if isinstance(o, (DatetimeIndex, PeriodIndex)):
expected_index = o.copy()
expected_index.name = None
# attach name to klass
o = klass(values.repeat(range(1, len(o) + 1)))
o.name = "a"
else:
if isinstance(o, DatetimeIndex):
expected_index = orig._values._shallow_copy(values)
else:
expected_index = Index(values)
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
# check values has the same dtype as the original
assert o.dtype == orig.dtype
# check values correctly have NaN
nanloc = np.zeros(len(o), dtype=np.bool)
nanloc[:3] = True
if isinstance(o, Index):
tm.assert_numpy_array_equal(pd.isna(o), nanloc)
else:
exp = Series(nanloc, o.index, name="a")
tm.assert_series_equal(pd.isna(o), exp)
expected_s_na = Series(
list(range(10, 2, -1)) + [3],
index=expected_index[9:0:-1],
dtype="int64",
name="a",
)
expected_s = Series(
list(range(10, 2, -1)),
index=expected_index[9:1:-1],
dtype="int64",
name="a",
)
result_s_na = o.value_counts(dropna=False)
tm.assert_series_equal(result_s_na, expected_s_na)
assert result_s_na.index.name is None
assert result_s_na.name == "a"
result_s = o.value_counts()
tm.assert_series_equal(o.value_counts(), expected_s)
assert result_s.index.name is None
assert result_s.name == "a"
result = o.unique()
if isinstance(o, Index):
tm.assert_index_equal(result, Index(values[1:], name="a"))
elif is_datetime64tz_dtype(o):
# unable to compare NaT / nan
tm.assert_extension_array_equal(result[1:], values[2:])
assert result[0] is pd.NaT
else:
tm.assert_numpy_array_equal(result[1:], values[2:])
assert pd.isna(result[0])
assert result.dtype == orig.dtype
assert o.nunique() == 8
assert o.nunique(dropna=False) == 9
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_inferred(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list("acbd")).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list("cdab"))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"])
tm.assert_series_equal(hist, expected)
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_bins(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
# bins
with pytest.raises(TypeError):
s.value_counts(bins=1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({Interval(0.997, 3.0): 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({Interval(0.997, 3.0): 1.0})
tm.assert_series_equal(res1n, exp1n)
if isinstance(s1, Index):
tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.unique(), exp)
assert s1.nunique() == 3
# these return the same
res4 = s1.value_counts(bins=4, dropna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4 = s1.value_counts(bins=4, dropna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series([0.5, 0.25, 0.25, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ["a", "b", "b", "b", np.nan, np.nan, "d", "d", "a", "a", "b"]
s = klass(s_values)
expected = Series([4, 3, 2], index=["b", "a", "d"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(["a", "b", np.nan, "d"])
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.array(["a", "b", np.nan, "d"], dtype=object)
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 3
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected, check_index_type=False)
# returned dtype differs depending on original
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), Index([]), exact=False)
else:
tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False)
assert s.nunique() == 0
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_datetime64(self, klass):
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(
[
"xxyyzz20100101PIE",
"xxyyzz20100101GUM",
"xxyyzz20100101EGG",
"xxyyww20090101EGG",
"foofoo20080909PIE",
"foofoo20080909GUM",
]
)
f = StringIO(txt)
df = pd.read_fwf(
f, widths=[6, 8, 3], names=["person_id", "dt", "food"], parse_dates=["dt"]
)
s = klass(df["dt"].copy())
s.name = None
idx = pd.to_datetime(
["2010-01-01 00:00:00", "2008-09-09 00:00:00", "2009-01-01 00:00:00"]
)
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np_array_datetime64_compat(
["2010-01-01 00:00:00", "2009-01-01 00:00:00", "2008-09-09 00:00:00"],
dtype="datetime64[ns]",
)
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), DatetimeIndex(expected))
else:
tm.assert_numpy_array_equal(s.unique(), expected)
assert s.nunique() == 3
# with NaT
s = df["dt"].copy()
s = klass(list(s.values) + [pd.NaT])
result = s.value_counts()
assert result.index.dtype == "datetime64[ns]"
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
assert unique.dtype == "datetime64[ns]"
# numpy_array_equal cannot compare pd.NaT
if isinstance(s, Index):
exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])
tm.assert_index_equal(unique, exp_idx)
else:
tm.assert_numpy_array_equal(unique[:3], expected)
assert pd.isna(unique[3])
assert s.nunique() == 3
assert s.nunique(dropna=False) == 4
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td, name="dt")
result = td.value_counts()
expected_s = Series([6], index=[Timedelta("1day")], name="dt")
tm.assert_series_equal(result, expected_s)
expected = TimedeltaIndex(["1 days"], name="dt")
if isinstance(td, Index):
tm.assert_index_equal(td.unique(), expected)
else:
tm.assert_numpy_array_equal(td.unique(), expected.values)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2, name="dt")
result2 = td2.value_counts()
tm.assert_series_equal(result2, expected_s)
def test_factorize(self):
for orig in self.objs:
o = orig.copy()
if isinstance(o, Index) and o.is_boolean():
exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp)
exp_uniques = o
exp_uniques = Index([False, True])
else:
exp_arr = np.array(range(len(o)), dtype=np.intp)
exp_uniques = o
codes, uniques = o.factorize()
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig), check_names=False)
else:
# factorize explicitly resets name
tm.assert_index_equal(uniques, exp_uniques, check_names=False)
def test_factorize_repeated(self):
for orig in self.objs:
o = orig.copy()
# don't test boolean
if isinstance(o, Index) and o.is_boolean():
continue
# sort by value, and create duplicates
if isinstance(o, Series):
o = o.sort_values()
n = o.iloc[5:].append(o)
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].append(o)
exp_arr = np.array(
[5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp
)
codes, uniques = n.factorize(sort=True)
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(
uniques, Index(orig).sort_values(), check_names=False
)
else:
tm.assert_index_equal(uniques, o, check_names=False)
exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4], np.intp)
codes, uniques = n.factorize(sort=False)
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
expected = Index(o.iloc[5:10].append(o.iloc[:5]))
tm.assert_index_equal(uniques, expected, check_names=False)
else:
expected = o[5:10].append(o[:5])
tm.assert_index_equal(uniques, expected, check_names=False)
def test_duplicated_drop_duplicates_index(self):
# GH 4060
for original in self.objs:
if isinstance(original, Index):
# special case
if original.is_boolean():
result = original.drop_duplicates()
expected = Index([False, True], name="a")
tm.assert_index_equal(result, expected)
continue
# original doesn't have duplicates
expected = np.array([False] * len(original), dtype=bool)
duplicated = original.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = original.drop_duplicates()
tm.assert_index_equal(result, original)
assert result is not original
# has_duplicates
assert not original.has_duplicates
# create repeated values, 3rd and 5th values are duplicated
idx = original[list(range(len(original))) + [5, 3]]
expected = np.array([False] * len(original) + [True, True], dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
tm.assert_index_equal(idx.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep="last")
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep="last")
tm.assert_index_equal(result, idx[~expected])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep=False)
tm.assert_index_equal(result, idx[~expected])
with pytest.raises(
TypeError,
match=(
r"drop_duplicates\(\) got an " r"unexpected keyword argument"
),
):
idx.drop_duplicates(inplace=True)
else:
expected = Series(
[False] * len(original), index=original.index, name="a"
)
tm.assert_series_equal(original.duplicated(), expected)
result = original.drop_duplicates()
tm.assert_series_equal(result, original)
assert result is not original
idx = original.index[list(range(len(original))) + [5, 3]]
values = original._values[list(range(len(original))) + [5, 3]]
s = Series(values, index=idx, name="a")
expected = Series(
[False] * len(original) + [True, True], index=idx, name="a"
)
tm.assert_series_equal(s.duplicated(), expected)
tm.assert_series_equal(s.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = Series(base, index=idx, name="a")
tm.assert_series_equal(s.duplicated(keep="last"), expected)
tm.assert_series_equal(
s.drop_duplicates(keep="last"), s[~np.array(base)]
)
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = Series(base, index=idx, name="a")
tm.assert_series_equal(s.duplicated(keep=False), expected)
tm.assert_series_equal(
s.drop_duplicates(keep=False), s[~np.array(base)]
)
s.drop_duplicates(inplace=True)
tm.assert_series_equal(s, original)
def test_drop_duplicates_series_vs_dataframe(self):
# GH 14192
df = pd.DataFrame(
{
"a": [1, 1, 1, "one", "one"],
"b": [2, 2, np.nan, np.nan, np.nan],
"c": [3, 3, np.nan, np.nan, "three"],
"d": [1, 2, 3, 4, 4],
"e": [
datetime(2015, 1, 1),
datetime(2015, 1, 1),
datetime(2015, 2, 1),
pd.NaT,
pd.NaT,
],
}
)
for column in df.columns:
for keep in ["first", "last", False]:
dropped_frame = df[[column]].drop_duplicates(keep=keep)
dropped_series = df[column].drop_duplicates(keep=keep)
tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())
def test_fillna(self):
# # GH 11343
# though Index.fillna and Series.fillna has separate impl,
# test here to confirm these works as the same
for orig in self.objs:
o = orig.copy()
values = o.values
# values will not be changed
result = o.fillna(o.astype(object).values[0])
if isinstance(o, Index):
tm.assert_index_equal(o, result)
else:
tm.assert_series_equal(o, result)
# check shallow_copied
assert o is not result
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.copy()
klass = type(o)
if not self._allow_na_ops(o):
continue
if needs_i8_conversion(o):
values = o.astype(object).values
fill_value = values[0]
values[0:2] = pd.NaT
else:
values = o.values.copy()
fill_value = o.values[0]
values[0:2] = null_obj
expected = [fill_value] * 2 + list(values[2:])
expected = klass(expected, dtype=orig.dtype)
o = klass(values)
# check values has the same dtype as the original
assert o.dtype == orig.dtype
result = o.fillna(fill_value)
if isinstance(o, Index):
tm.assert_index_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
# check shallow_copied
assert o is not result
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
for o in self.objs:
res = o.memory_usage()
res_deep = o.memory_usage(deep=True)
if is_object_dtype(o) or (
isinstance(o, Series) and is_object_dtype(o.index)
):
# if there are objects, only deep will pick them up
assert res_deep > res
else:
assert res == res_deep
if isinstance(o, Series):
assert (
o.memory_usage(index=False) + o.index.memory_usage()
) == o.memory_usage(index=True)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = res_deep - sys.getsizeof(o)
assert abs(diff) < 100
def test_searchsorted(self):
# See gh-12238
for o in self.objs:
index = np.searchsorted(o, max(o))
assert 0 <= index <= len(o)
index = np.searchsorted(o, max(o), sorter=range(len(o)))
assert 0 <= index <= len(o)
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
self.int_series.drop_duplicates(inplace=value)
def test_getitem(self):
for i in self.indexes:
s = pd.Series(i)
assert i[0] == s.iloc[0]
assert i[5] == s.iloc[5]
assert i[-1] == s.iloc[-1]
assert i[-1] == i[9]
with pytest.raises(IndexError):
i[20]
with pytest.raises(IndexError):
s.iloc[20]
@pytest.mark.parametrize("indexer_klass", [list, pd.Index])
@pytest.mark.parametrize(
"indexer",
[
[True] * 10,
[False] * 10,
[True, False, True, True, False, False, True, True, False, True],
],
)
def test_bool_indexing(self, indexer_klass, indexer):
# GH 22533
for idx in self.indexes:
exp_idx = [i for i in range(len(indexer)) if indexer[i]]
tm.assert_index_equal(idx[indexer_klass(indexer)], idx[exp_idx])
s = pd.Series(idx)
tm.assert_series_equal(s[indexer_klass(indexer)], s.iloc[exp_idx])
def test_get_indexer_non_unique_dtype_mismatch(self):
# GH 25459
indexes, missing = pd.Index(["A", "B"]).get_indexer_non_unique(pd.Index([0]))
tm.assert_numpy_array_equal(np.array([-1], dtype=np.intp), indexes)
tm.assert_numpy_array_equal(np.array([0], dtype=np.int64), missing)
class TestTranspose(Ops):
errmsg = "the 'axes' parameter is not supported"
def test_transpose(self):
for obj in self.objs:
tm.assert_equal(obj.transpose(), obj)
def test_transpose_non_default_axes(self):
for obj in self.objs:
with pytest.raises(ValueError, match=self.errmsg):
obj.transpose(1)
with pytest.raises(ValueError, match=self.errmsg):
obj.transpose(axes=1)
def test_numpy_transpose(self):
for obj in self.objs:
tm.assert_equal(np.transpose(obj), obj)
with pytest.raises(ValueError, match=self.errmsg):
np.transpose(obj, axes=1)
class TestNoNewAttributesMixin:
def test_mixin(self):
class T(NoNewAttributesMixin):
pass
t = T()
assert not hasattr(t, "__frozen")
t.a = "test"
assert t.a == "test"
t._freeze()
assert "__frozen" in dir(t)
assert getattr(t, "__frozen")
with pytest.raises(AttributeError):
t.b = "test"
assert not hasattr(t, "b")
class TestToIterable:
# test that we convert an iterable to python types
dtypes = [
("int8", int),
("int16", int),
("int32", int),
("int64", int),
("uint8", int),
("uint16", int),
("uint32", int),
("uint64", int),
("float16", float),
("float32", float),
("float64", float),
("datetime64[ns]", Timestamp),
("datetime64[ns, US/Eastern]", Timestamp),
("timedelta64[ns]", Timedelta),
]
@pytest.mark.parametrize("dtype, rdtype", dtypes)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.parametrize("typ", [Series, Index])
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable(self, typ, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype, obj",
[
("object", object, "a"),
("object", int, 1),
("category", object, "a"),
("category", int, 1),
],
)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.parametrize("typ", [Series, Index])
def test_iterable_object_and_category(self, typ, method, dtype, rdtype, obj):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test if items yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
)
@pytest.mark.parametrize("typ", [Series, Index])
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable_map(self, typ, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")]
s = Series(vals)
assert s.dtype == "datetime64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
]
s = Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz == exp.tz
assert res == exp
# timedelta
vals = [Timedelta("1 days"), Timedelta("2 days")]
s = Series(vals)
assert s.dtype == "timedelta64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timedelta)
assert res == exp
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = Series(vals)
assert s.dtype == "Period[M]"
for res, exp in zip(s, vals):
assert isinstance(res, pd.Period)
assert res.freq == "M"
assert res == exp
@pytest.mark.parametrize(
"array, expected_type, dtype",
[
(np.array([0, 1], dtype=np.int64), np.ndarray, "int64"),
(np.array(["a", "b"]), np.ndarray, "object"),
(pd.Categorical(["a", "b"]), pd.Categorical, "category"),
(
pd.DatetimeIndex(["2017", "2018"], tz="US/Central"),
DatetimeArray,
"datetime64[ns, US/Central]",
),
(
pd.PeriodIndex([2018, 2019], freq="A"),
pd.core.arrays.PeriodArray,
pd.core.dtypes.dtypes.PeriodDtype("A-DEC"),
),
(
pd.IntervalIndex.from_breaks([0, 1, 2]),
pd.core.arrays.IntervalArray,
"interval",
),
# This test is currently failing for datetime64[ns] and timedelta64[ns].
# The NumPy type system is sufficient for representing these types, so
# we just use NumPy for Series / DataFrame columns of these types (so
# we get consolidation and so on).
# However, DatetimeIndex and TimedeltaIndex use the DateLikeArray
# abstraction to for code reuse.
# At the moment, we've judged that allowing this test to fail is more
# practical that overriding Series._values to special case
# Series[M8[ns]] and Series[m8[ns]] to return a DateLikeArray.
pytest.param(
pd.DatetimeIndex(["2017", "2018"]),
np.ndarray,
"datetime64[ns]",
marks=[pytest.mark.xfail(reason="datetime _values", strict=True)],
),
pytest.param(
pd.TimedeltaIndex([10 ** 10]),
np.ndarray,
"m8[ns]",
marks=[pytest.mark.xfail(reason="timedelta _values", strict=True)],
),
],
)
def test_values_consistent(array, expected_type, dtype):
l_values = pd.Series(array)._values
r_values = pd.Index(array)._values
assert type(l_values) is expected_type
assert type(l_values) is type(r_values)
tm.assert_equal(l_values, r_values)
@pytest.mark.parametrize(
"array, expected",
[
(np.array([0, 1], dtype=np.int64), np.array([0, 1], dtype=np.int64)),
(np.array(["0", "1"]), | np.array(["0", "1"], dtype=object) | numpy.array |
"""
Native matplotlib support of frequently used 2d projections,
for looking up to the sky.
This file is initially developed as part of skymapper by <NAME>
based on the example in matplotlib.
It is later adopted by me (<NAME>), and I will maintain a copy in
imaginglss for easier access, also because I do plan to clean up
the function signatures and variable naming (breaking compatibility with
old skymapper code).
The current version adds the ability to generate equal area histograms
on HealPix pixels.
It does not depend on healpy, there is a minimal python implementation of
healpix at the end of the file; imported in the javascript/lua style.
The intention is one day we will submit a PR of this to matplotlib.
What does not work:
1. Panning.
2. Color bar is sometimes in the wrong place
3. Label locations are poorly calculated.
What does work:
Evertying else.
Author: <NAME>
<NAME>
"""
from __future__ import unicode_literals
import matplotlib
from matplotlib.axes import Axes
from matplotlib.patches import Rectangle, Polygon
from matplotlib.path import Path
from matplotlib.collections import PolyCollection, TriMesh
from matplotlib.tri.triangulation import Triangulation
from matplotlib.ticker import NullLocator, Formatter, FixedLocator, MaxNLocator
from matplotlib.transforms import Affine2D, BboxTransformTo, Transform, blended_transform_factory, Bbox
from matplotlib.projections import register_projection
import matplotlib.spines as mspines
import matplotlib.axis as maxis
import numpy as np
__author__ = "<NAME>"
__email__ = "<EMAIL>"
class SkymapperAxes(Axes):
"""
A base class for a Skymapper axes that takes in ra0, dec0, dec1, dec2.
The base class takes care of clipping and interpolating with matplotlib.
Subclass and override class method get_projection_class.
"""
# The subclass projection must specify a name. This will be used be the
# user to select the projection.
name = None
@classmethod
def get_projection_class(kls):
raise NotImplementedError('Must implement this in subclass')
def __init__(self, *args, **kwargs):
self.ra0 = None
self.dec0 = None
self.dec1 = None
self.dec2 = None
Axes.__init__(self, *args, **kwargs)
self.cla()
def _init_axis(self):
# Axes._init_axis() -- until HammerAxes.xaxis.cla() works.
self.xaxis = maxis.XAxis(self)
self.spines['bottom'].register_axis(self.xaxis)
self.spines['top'].register_axis(self.xaxis)
self.yaxis = maxis.YAxis(self)
self.spines['left'].register_axis(self.yaxis)
self.spines['right'].register_axis(self.yaxis)
self._update_transScale()
def cla(self):
"""
Override to set up some reasonable defaults.
"""
# Don't forget to call the base class
Axes.cla(self)
# Turn off minor ticking altogether
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
self.xaxis.set_major_locator(MaxNLocator(5, prune='both'))
self.yaxis.set_major_locator(MaxNLocator(5, prune='both'))
# Do not display ticks -- we only want gridlines and text
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.set_center(None, None)
# FIXME: probabaly want to override autoscale_view
# to properly handle wrapping introduced by margin
# and properlty wrap data.
# It doesn't make sense to have xwidth > 360.
self._tight = True
def _set_lim_and_transforms(self):
"""
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
# There are three important coordinate spaces going on here:
#
# 1. Data space: The space of the data itself
#
# 2. Axes space: The unit rectangle (0, 0) to (1, 1)
# covering the entire plot area.
#
# 3. Display space: The coordinates of the resulting image,
# often in pixels or dpi/inch.
# This function makes heavy use of the Transform classes in
# ``lib/matplotlib/transforms.py.`` For more information, see
# the inline documentation there.
# The goal of the first two transformations is to get from the
# data space (in this case meridian and parallel) to axes
# space. It is separated into a non-affine and affine part so
# that the non-affine part does not have to be recomputed when
# a simple affine change to the figure has been made (such as
# resizing the window or changing the dpi).
# 1) The core transformation from data space into
# rectilinear space defined in the HammerTransform class.
self.transProjection = self.get_projection_class()()
self.transProjection.set_center((180, 0))
self.transProjection.set_dec1(-65)
self.transProjection.set_dec2(80)
# 2) The above has an output range that is not in the unit
# rectangle, so scale and translate it so it fits correctly
# within the axes. The peculiar calculations of xscale and
# yscale are specific to a Aitoff-Hammer projection, so don't
# worry about them too much.
# This will be updated after the xy limits are set.
self.transAffine = Affine2D()
# 3) This is the transformation from axes space to display
# space.
self.transAxes = BboxTransformTo(self.bbox)
# Now put these 3 transforms together -- from data all the way
# to display coordinates. Using the '+' operator, these
# transforms will be applied "in order". The transforms are
# automatically simplified, if possible, by the underlying
# transformation framework.
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
self.transClip = \
self.transProjection + \
self.transAffine
# The main data transformation is set up. Now deal with
# gridlines and tick labels.
# Longitude gridlines and ticklabels. The input to these
# transforms are in display space in x and axes space in y.
# Therefore, the input values will be in range (-xmin, 0),
# (xmax, 1). The goal of these transforms is to go from that
# space to display space. The tick labels will be offset 4
# pixels from the equator.
self._xaxis_pretransform = \
Affine2D() \
.scale(1.0, 180) \
.translate(0.0, -90)
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
self._xaxis_pretransform + \
self.transData + \
Affine2D().translate(0.0, -8.0)
self._xaxis_text2_transform = \
self._xaxis_pretransform+ \
self.transData + \
Affine2D().translate(0.0, -8.0)
# Now set up the transforms for the parallel ticks. The input to
# these transforms are in axes space in x and display space in
# y. Therefore, the input values will be in range (0, -ymin),
# (1, ymax). The goal of these transforms is to go from that
# space to display space. The tick labels will be offset 4
# pixels from the edge of the axes ellipse.
self._yaxis_stretch = Affine2D().scale(360, 1.0).translate(0.0, 0.0)
self._yaxis_stretch1 = Affine2D().scale(360, 1.0).translate(0.0, 0.0)
self._yaxis_stretch2 = Affine2D().scale(360, 1.0).translate(0.0, 0.0)
self._yaxis_transform = \
self._yaxis_stretch + \
self.transData
self._yaxis_text1_transform = \
self._yaxis_stretch1 + \
self.transData
# Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
self._yaxis_stretch2 + \
self.transData
# Affine2D().translate(8.0, 0.0)
def _update_affine(self):
# update the transformations and clip paths
# after new lims are set.
if self.ra0 is None:
x0, x1 = self.viewLim.intervalx
ra0 = 0.5 * (x0 + x1)
else:
ra0 = self.ra0
if self.dec0 is None:
y0, y1 = self.viewLim.intervaly
dec0 = 0.5 * (y0 + y1)
else:
dec0 = self.dec0
if self.dec1 is None:
y0, y1 = self.viewLim.intervaly
dec1 = y0 + (y1 - y0) / 12.
else:
dec1 = self.dec1
if self.dec2 is None:
y0, y1 = self.viewLim.intervaly
dec2 = y1 - (y1 - y0) / 12.
else:
dec2 = self.dec2
self.transProjection.set_center((ra0, dec0))
self.transProjection.set_dec1(dec1)
self.transProjection.set_dec2(dec2)
self._yaxis_stretch\
.clear() \
.scale(self.viewLim.width, 1.0) \
.translate(self.viewLim.x0, 0)
self._yaxis_stretch1\
.clear() \
.scale(self.viewLim.width, 1.0) \
.translate(self.viewLim.x0 - 0.00 * self.viewLim.width, 0)
self._yaxis_stretch2\
.clear() \
.scale(self.viewLim.width, 1.0) \
.translate(self.viewLim.x0 + 0.00 * self.viewLim.width, 0)
self._xaxis_pretransform \
.clear() \
.scale(1.0, self.viewLim.height) \
.translate(0.0, self.viewLim.y0)
corners_data = np.array([[self.viewLim.x0, self.viewLim.y0],
[ra0, self.viewLim.y0],
[self.viewLim.x1, self.viewLim.y0],
[self.viewLim.x1, self.viewLim.y1],
[self.viewLim.x0, self.viewLim.y1],])
corners = self.transProjection.transform_non_affine(corners_data)
x0 = corners[0][0]
x1 = corners[2][0]
# special case when x1 is wrapped back to x0
# FIXME: I don't think we need it anymore.
if x0 == x1: x1 = - x0
y0 = corners[1][1]
y1 = max([corners[3][1], corners[4][1]])
xscale = x1 - x0
yscale = y1 - y0
self.transAffine.clear() \
.translate( - (x0 + x1) * 0.5, - (y0 + y1) * 0.5) \
.scale(0.95 / xscale, 0.95 / yscale) \
.translate(0.5, 0.5)
# now update the clipping path
path = Path(corners_data)
path0 = self.transProjection.transform_path(path)
path = self.transClip.transform_path(path)
self.patch.set_xy(path.vertices)
def get_xaxis_transform(self, which='grid'):
"""
Override this method to provide a transformation for the
x-axis grid and ticks.
"""
assert which in ['tick1', 'tick2', 'grid']
return self._xaxis_transform
def get_xaxis_text1_transform(self, pixelPad):
"""
Override this method to provide a transformation for the
x-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._xaxis_text1_transform, 'center', 'center'
def get_xaxis_text2_transform(self, pixelPad):
"""
Override this method to provide a transformation for the
secondary x-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._xaxis_text2_transform, 'center', 'center'
def get_yaxis_transform(self, which='grid'):
"""
Override this method to provide a transformation for the
y-axis grid and ticks.
"""
assert which in ['tick1', 'tick2', 'grid']
return self._yaxis_transform
def get_yaxis_text1_transform(self, pixelPad):
"""
Override this method to provide a transformation for the
y-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._yaxis_text1_transform, 'center', 'center'
def get_yaxis_text2_transform(self, pixelPad):
"""
Override this method to provide a transformation for the
secondary y-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._yaxis_text2_transform, 'center', 'center'
def _gen_axes_patch(self):
"""
ClipPath.
Initially set to a size of 2 box in transAxes.
After xlim and ylim are set, this will be changed to the actual
region in transData.
For unclear reason the very initial clip path is always applied
to the grid. Therefore we set size to 2.0 to avoid bad clipping.
"""
return Polygon([(0, 0), (2, 0), (2, 2), (0, 2)], fill=False)
def _gen_axes_spines(self):
d = {
'left': mspines.Spine.linear_spine(self, spine_type='left'),
'right': mspines.Spine.linear_spine(self, spine_type='right'),
'top': mspines.Spine.linear_spine(self, spine_type='top'),
'bottom': mspines.Spine.linear_spine(self, spine_type='bottom'),
}
d['left'].set_position(('axes', 0))
d['right'].set_position(('axes', 1))
d['top'].set_position(('axes', 0))
d['bottom'].set_position(('axes', 1))
#FIXME: these spines can be moved wit set_position(('axes', ?)) but
# 'data' fails. Because the transformation is non-separatable,
# and because spines / data makes that assumption, we probably
# do not have a easy way to support moving spines via native matplotlib
# api on data axis.
# also the labels currently do not follow the spines. Likely because
# they are not registered?
return d
# Prevent the user from applying scales to one or both of the
# axes. In this particular case, scaling the axes wouldn't make
# sense, so we don't allow it.
def set_xscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
Axes.set_xscale(self, *args, **kwargs)
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
Axes.set_yscale(self, *args, **kwargs)
def set_center(self, ra0, dec0):
""" Set the center of ra """
self.ra0 = ra0
self.dec0 = dec0
self._update_affine()
def set_parallels(self, dec1, dec2):
""" Set the parallels """
self.dec1 = dec1
self.dec2 = dec2
self._update_affine()
# when xlim and ylim are updated, the transformation
# needs to be updated too.
def set_xlim(self, *args, **kwargs):
Axes.set_xlim(self, *args, **kwargs)
# FIXME: wrap x0 x1 to ensure they enclose ra0.
x0, x1 = self.viewLim.intervalx
if self.ra0 is not None:
if not x0 <= self.transProjection.ra0 or \
not x1 > self.transProjection.ra0:
raise ValueError("The given limit in RA does not enclose ra0")
self._update_affine()
def set_ylim(self, *args, **kwargs):
Axes.set_ylim(self, *args, **kwargs)
self._update_affine()
def _histmap(self, show, ra, dec, weights=None, nside=32, perarea=False, mean=False, range=None, **kwargs):
r = histogrammap(ra, dec, weights, nside, perarea=perarea, range=range)
if weights is not None:
w, N = r
else:
w = r
if mean:
mask = N != 0
w[mask] /= N[mask]
else:
mask = w > 0
return w, mask, show(w, mask, nest=False, **kwargs)
def histmap(self, ra, dec, weights=None, nside=32, perarea=False, mean=False, range=None, **kwargs):
return self._histmap(self.mapshow, ra, dec, weights, nside, perarea, mean, range, **kwargs)
def histcontour(self, ra, dec, weights=None, nside=32, perarea=False, mean=False, range=None, **kwargs):
return self._histmap(self.mapcontour, ra, dec, weights, nside, perarea, mean, range, **kwargs)
def mapshow(self, map, mask=None, nest=False, shading='flat', **kwargs):
""" Display a healpix map """
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
defaults = dict(rasterized=True,
alpha=1.0,
linewidth=0)
defaults.update(kwargs)
if mask is None:
mask = map == map
if shading == 'flat':
coll = HealpixCollection(map, mask,
transform=self.transData, **defaults)
else:
coll = HealpixTriCollection(map, mask, transform=self.transData, **defaults)
coll.set_clim(vmin=vmin, vmax=vmax)
self.add_collection(coll)
self._sci(coll)
self.autoscale_view(tight=True)
return coll
def mapcontour(self, map, mask=None, nest=False, **kwargs):
""" Display a healpix map as coutours. This is approximate. """
if mask is None:
mask = map == map
ra, dec = pix2radec(healpix.npix2nside(len(map)), mask.nonzero()[0])
im = self.tricontour(ra, dec, map[mask], **kwargs)
self._sci(im)
self.autoscale_view(tight=True)
return im
def format_coord(self, lon, lat):
"""
Override this method to change how the values are displayed in
the status bar.
In this case, we want them to be displayed in degrees N/S/E/W.
"""
lon = lon
lat = lat
if lat >= 0.0:
ns = 'N'
else:
ns = 'S'
if lon >= 0.0:
ew = 'E'
else:
ew = 'W'
# \u00b0 : degree symbol
return '%f\u00b0%s, %f\u00b0%s' % (abs(lat), ns, abs(lon), ew)
class DegreeFormatter(Formatter):
"""
This is a custom formatter that converts the native unit of
radians into (truncated) degrees and adds a degree symbol.
"""
def __init__(self, round_to=1.0):
self._round_to = round_to
def __call__(self, x, pos=None):
degrees = round(x / self._round_to) * self._round_to
# \u00b0 : degree symbol
return "%d\u00b0" % degrees
def set_meridian_grid(self, degrees):
"""
Set the number of degrees between each meridian grid.
It provides a more convenient interface to set the ticking than set_xticks would.
"""
# Set up a FixedLocator at each of the points, evenly spaced
# by degrees.
x0, x1 = self.get_xlim()
number = abs((x1 - x0) / degrees) + 1
self.xaxis.set_major_locator(
FixedLocator(
np.linspace(x0, x1, number, True)[1:-1]))
# Set the formatter to display the tick labels in degrees,
# rather than radians.
self.xaxis.set_major_formatter(self.DegreeFormatter(degrees))
def set_parallel_grid(self, degrees):
"""
Set the number of degrees between each meridian grid.
It provides a more convenient interface than set_yticks would.
"""
# Set up a FixedLocator at each of the points, evenly spaced
# by degrees.
y0, y1 = self.get_ylim()
number = ((y1 - y0) / degrees) + 1
self.yaxis.set_major_locator(
FixedLocator(
np.linspace(y0, y1, number, True)[1:-1]))
# Set the formatter to display the tick labels in degrees,
# rather than radians.
self.yaxis.set_major_formatter(self.DegreeFormatter(degrees))
# Interactive panning and zooming is not supported with this projection,
# so we override all of the following methods to disable it.
def _in_axes(self, mouseevent):
if hasattr(self._pan_trans):
return True
else:
return Axes._in_axes(self, mouseevent)
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
return True
def start_pan(self, x, y, button):
self._pan_trans = self.transAxes.inverted() + \
blended_transform_factory(
self._yaxis_stretch,
self._xaxis_pretransform,)
def end_pan(self):
delattr(self, '_pan_trans')
def drag_pan(self, button, key, x, y):
pan1 = self._pan_trans.transform([(x, y)])[0]
self.set_ra0(360 - pan1[0])
self.set_dec0(pan1[1])
self._update_affine()
# now define the Albers equal area axes
class AlbersEqualAreaAxes(SkymapperAxes):
"""
A custom class for the Albers Equal Area projection.
https://en.wikipedia.org/wiki/Albers_projection
"""
name = 'aea'
@classmethod
def get_projection_class(kls):
return kls.AlbersEqualAreaTransform
# Now, the transforms themselves.
class AlbersEqualAreaTransform(Transform):
"""
The base Hammer transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, **kwargs):
Transform.__init__(self, **kwargs)
self.dec0 = 0
self.ra0 = 180
self.dec1 = -60
self.dec2 = 30
self._update()
def set_center(self, center):
ra0, dec0 = center
self.ra0 = ra0
self.dec0 = dec0
self._update()
def set_dec1(self, dec1):
self.dec1 = dec1
self._update()
def set_dec2(self, dec2):
self.dec2 = dec2
self._update()
def _update(self):
self.n = 0.5 * (np.sin(np.radians(self.dec1))
+ np.sin(np.radians(self.dec2)))
self.C = np.cos(np.radians(self.dec1))**2 + 2 * self.n * np.sin(np.radians(self.dec1))
self.rho0 = self.__rho__(self.dec0)
def __rho__(self, dec):
if self.n == 0:
return np.sqrt(self.C - 2 * self.n * np.sin(np.radians(dec)))
else:
return np.sqrt(self.C - 2 * self.n * np.sin(np.radians(dec))) / self.n
def transform_non_affine(self, ll):
"""
Override the transform_non_affine method to implement the custom
transform.
The input and output are Nx2 numpy arrays.
"""
ra = ll[:,0]
dec = ll[:,1]
ra0 = self.ra0
ra_ = np.radians(ra - ra0) # Do not inverse for RA
# FIXME: problem with the slices sphere: outer parallel needs to be dubplicated at the expense of the central one
if self.n == 0:
rt = np.array([
self.rho0 * (ra_),
- self.rho0 * (np.sin(np.radians(self.dec0) - np.sin(np.radians(dec)))),
]).T
else:
theta = self.n * ra_
rho = self.__rho__(dec)
rt = np.array([
rho*np.sin(theta),
self.rho0 - rho*np.cos(theta)]).T
#if np.isnan(rt).any():
# raise ValueError('nan occured : ll =%s' % (str(ll)))
return rt
# This is where things get interesting. With this projection,
# straight lines in data space become curves in display space.
# This is done by interpolating new values between the input
# values of the data. Since ``transform`` must not return a
# differently-sized array, any transform that requires
# changing the length of the data array must happen within
# ``transform_path``.
def transform_path_non_affine(self, path):
# Adaptive interpolation:
# we keep adding control points, till all control points
# have an error of less than 0.01 (about 1%)
# or if the number of control points is > 80.
ra0 = self.ra0
path = path.cleaned(curves=False)
v = path.vertices
diff = v[:, 0] - v[0, 0]
v00 = v[0][0] - ra0
while v00 > 180: v00 -= 360
while v00 < -180: v00 += 360
v00 += ra0
v[:, 0] = v00 + diff
nonstop = path.codes > 0
path = Path(v[nonstop], path.codes[nonstop])
isteps = int(path._interpolation_steps * 1.5)
if isteps < 10: isteps = 10
while True:
ipath = path.interpolated(isteps)
tiv = self.transform(ipath.vertices)
itv = Path(self.transform(path.vertices)).interpolated(isteps).vertices
if np.mean(np.abs(tiv - itv)) < 0.01:
break
if isteps > 20:
break
isteps = int(isteps * 1.5)
return Path(tiv, ipath.codes)
transform_path_non_affine.__doc__ = \
Transform.transform_path_non_affine.__doc__
if matplotlib.__version__ < '1.2':
transform = transform_non_affine
transform_path = transform_path_non_affine
transform_path.__doc__ = Transform.transform_path.__doc__
def inverted(self):
return AlbersEqualAreaAxes.InvertedAlbersEqualAreaTransform(self)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedAlbersEqualAreaTransform(Transform):
""" Inverted transform.
This will always only give values in the prime ra0-180 ~ ra0+180 range, I believe.
So it is inherently broken. I wonder when matplotlib actually calls this function,
given that interactive is disabled.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, inverted, **kwargs):
Transform.__init__(self, **kwargs)
self.inverted = inverted
def transform_non_affine(self, xy):
x = xy[:,0]
y = xy[:,1]
inverted = self.inverted
rho = np.sqrt(x**2 + (inverted.rho0 - y)**2)
# make sure that the signs are correct
if inverted.n == 0:
rt = np.degrees(
[
np.radians(inverted.ra0) + x / inverted.rho0,
np.arcsin(y / inverted.rho0 + np.sin(np.radians(inverted.dec0)))
]).T
return rt
elif inverted.n > 0:
theta = np.degrees(np.arctan2(x, inverted.rho0 - y))
else:
theta = np.degrees(np.arctan2(-x, -(inverted.rho0 - y)))
return np.degrees([np.radians(inverted.ra0) + theta/inverted.n,
np.arcsin((inverted.C - (rho * inverted.n)**2)/(2*inverted.n))]).T
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
if matplotlib.__version__ < '1.2':
transform = transform_non_affine
def inverted(self):
# The inverse of the inverse is the original transform... ;)
return self.inverted
inverted.__doc__ = Transform.inverted.__doc__
class HealpixCollection(PolyCollection):
def __init__(self, map, mask, nest=False, **kwargs):
nside = healpix.npix2nside(len(mask))
self.v = pix2quad(nside, mask.nonzero()[0], nest)
PolyCollection.__init__(self, self.v, array=map[mask], **kwargs)
def get_datalim(self, transData):
""" The data lim of a healpix collection.
"""
# FIXME: it is currently set to the full sky.
# This could have been trimmed down.
# We want to set xlim smartly such that the largest
# empty region is chopped off. I think it is possible, by
# doing a histogram in ra, for example.
vmin = (0, -90)
vmax = (360, 90)
return Bbox((vmin, vmax))
import matplotlib.transforms as mtransforms
import warnings
import numpy as np
import numpy.ma as ma
import matplotlib as mpl
import matplotlib.cbook as cbook
import matplotlib.colors as mcolors
import matplotlib.cm as cm
from matplotlib import docstring
import matplotlib.transforms as transforms
import matplotlib.artist as artist
from matplotlib.artist import allow_rasterization
import matplotlib.backend_bases as backend_bases
import matplotlib.path as mpath
from matplotlib import _path
import matplotlib.mlab as mlab
import matplotlib.lines as mlines
from matplotlib.collections import Collection
class HealpixTriCollection(Collection):
"""
Class for the efficient drawing of a triangular mesh using
Gouraud shading.
A triangular mesh is a :class:`~matplotlib.tri.Triangulation`
object.
"""
def __init__(self, map, mask, nest=False, **kwargs):
Collection.__init__(self, **kwargs)
nside = healpix.npix2nside(len(map))
# remove the first axes
verts = pix2tri(nside, mask.nonzero()[0]).reshape(-1, 3, 2)
c = np.ones((verts.shape[0], verts.shape[1])) * np.repeat(map[mask][:, None], 2, axis=0)
self._verts = verts
self._shading = 'gouraud'
self._is_filled = True
self.set_array(c.reshape(-1))
def get_paths(self):
if self._paths is None:
self.set_paths()
return self._paths
def set_paths(self):
self._paths = self.convert_mesh_to_paths(self._verts)
@staticmethod
def convert_mesh_to_paths(verts):
"""
Converts a given mesh into a sequence of
:class:`matplotlib.path.Path` objects for easier rendering by
backends that do not directly support meshes.
This function is primarily of use to backend implementers.
"""
Path = mpath.Path
return [Path(x) for x in verts]
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__class__.__name__)
transform = self.get_transform()
# Get a list of triangles and the color at each vertex.
verts = self._verts
self.update_scalarmappable()
colors = self._facecolors.reshape(-1, 3, 4)
oldshape = list(verts.shape)
verts = transform.transform(verts.reshape(-1, 2)).reshape(oldshape)
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_linewidth(self.get_linewidth()[0])
renderer.draw_gouraud_triangles(gc, verts, colors, mtransforms.IdentityTransform())
gc.restore()
renderer.close_group(self.__class__.__name__)
def get_datalim(self, transData):
""" The data lim of a healpix collection.
"""
# FIXME: it is currently set to the full sky.
# This could have been trimmed down.
# We want to set xlim smartly such that the largest
# empty region is chopped off. I think it is possible, by
# doing a histogram in ra, for example.
vmin = (0, -90)
vmax = (360, 90)
return Bbox((vmin, vmax))
def _wrap360(phi, dir='left'):
phi[np.abs(phi) < 1e-9] = 0
if dir == 'left':
ref = phi.min(axis=-1)
else:
ref = phi.max(axis=-1)
# print('ref', ref, phi, ref % 360 - ref)
diff = (ref % 360) - ref
phi = phi + diff[:, None]
#diff = phi - ref[:, None]
#print('great', (diff > 180).sum())
#diff[diff > 180] -= 360
#print('less', (diff < -180).sum())
#diff[diff < -180] += 360
#phi = ref[:, None] + diff
return phi
# a few helper functions talking to healpy/healpix.
def pix2quad(nside, pix, nest=False):
"""Generate healpix quad vertices for pixels where mask is True
Args:
pix: list of pixel numbers
nest: nested or not
nside: HealPix nside
Returns:
vertices
vertices: (N,4,2), RA/Dec coordinates of 4 boundary points of cell
"""
pix = np.asarray(pix)
vertices = np.zeros((pix.size, 4, 2))
theta, phi = healpix.vertices(nside, pix)
theta = np.degrees(theta)
phi = np.degrees(phi)
vertices[:, :, 0] = phi
vertices[:, :, 1] = 90.0 - theta
# ensure objects are in the same image plane.
vertices[:, :, 0] = _wrap360(phi, 'right')
return vertices
def pix2tri(nside, pix, nest=False):
"""Generate healpix quad vertices for pixels where mask is True
Args:
pix: list of pixel numbers
nest: nested or not
nside: HealPix nside
Returns:
vertices
vertices: (N,3,2,2), RA/Dec coordinates of 3 boundary points of 2 triangles
"""
# each pixel contains 2 triangles.
pix = np.asarray(pix)
vertices = np.zeros((pix.size, 2, 3, 2))
theta, phi = healpix.vertices(nside, pix)
theta = np.degrees(theta)
phi = np.degrees(phi)
vertices[:, 0, :, 0] = _wrap360(phi[:, [0, 1, 3]], 'left')
vertices[:, 0, :, 1] = 90.0 - theta[:, [0, 1, 3]]
vertices[:, 1, :, 0] = _wrap360(phi[:, [1, 2, 3]], 'right')
vertices[:, 1, :, 1] = 90.0 - theta[:, [1, 2, 3]]
return vertices
def pix2radec(nside, pix):
theta, phi = healpix.pix2ang(nside, pix)
return np.degrees(phi), 90 - np.degrees(theta)
def radec2pix(nside, ra, dec):
phi = np.radians(ra)
theta = np.radians(90 - dec)
return healpix.ang2pix(nside, theta, phi)
def histogrammap(ra, dec, weights=None, nside=32, perarea=False, range=None):
if range is not None:
(ra1, ra2), (dec1, dec2) = range
m = (ra >= ra1)& (ra <= ra2)
m &= (dec >= dec1)& (dec <= dec2)
ra = ra[m]
dec = dec[m]
if weights is not None:
weights = weights[m]
ipix = healpix.ang2pix(nside, np.radians(90-dec), np.radians(ra))
npix = healpix.nside2npix(nside)
if perarea:
npix = healpix.nside2npix(nside)
sky = 360. ** 2 / np.pi
area = 1. * (sky / npix)
else:
area = 1
if weights is not None:
w = np.bincount(ipix, weights=weights, minlength=npix)
N = np.bincount(ipix, minlength=npix)
w = w / area
N = N / area
return w, N
else:
w = 1.0 * np.bincount(ipix, minlength=npix)
return w / area
# Now register the projection with matplotlib so the user can select
# it.
register_projection(AlbersEqualAreaAxes)
def create_healpix():
""" A pure python (numpy-based) version of key healpix functions.
The ring scheme is implemented.
Depencency: numpy.
It shall probably be self-hosted as an individual python package.
Author: <NAME> <<EMAIL>>
"""
import numpy
def npix2nside(npix):
# FIXME: this could be buggy for large npix
nside2 = npix // 12
nside = numpy.array(nside2 ** 0.5).astype('i8')
return nside
def nside2npix(nside):
return nside * nside * 12
def ang2pix(nside, theta, phi):
r"""Convert angle :math:`\theta` :math:`\phi` to pixel.
This is translated from chealpix.c; but refer to Section 4.1 of
http://adsabs.harvard.edu/abs/2005ApJ...622..759G
"""
nside, theta, phi = numpy.lib.stride_tricks.broadcast_arrays(nside, theta, phi)
def equatorial(nside, tt, z):
t1 = nside * (0.5 + tt)
t2 = nside * z * 0.75
jp = (t1 - t2).astype('i8')
jm = (t1 + t2).astype('i8')
ir = nside + 1 + jp - jm # in {1, 2n + 1}
kshift = 1 - (ir & 1) # kshift=1 if ir even, 0 odd
ip = (jp + jm - nside + kshift + 1) // 2 # in {0, 4n - 1}
ip = ip % (4 * nside)
return nside * (nside - 1) * 2 + (ir - 1) * 4 * nside + ip
def polecaps(nside, tt, z, s):
tp = tt - numpy.floor(tt)
za = numpy.abs(z)
tmp = nside * s / ((1 + za) / 3) ** 0.5
mp = za > 0.99
tmp[mp] = nside[mp] * (3 *(1-za[mp])) ** 0.5
jp = (tp * tmp).astype('i8')
jm = ((1 - tp) * tmp).astype('i8')
ir = jp + jm + 1
ip = (tt * ir).astype('i8')
ip = ip % (4 * ir)
r1 = 2 * ir * (ir - 1)
r2 = 2 * ir * (ir + 1)
r = numpy.empty_like(r1)
r[z > 0] = r1[z > 0] + ip[z > 0]
r[z < 0] = 12 * nside[z < 0] * nside[z < 0] - r2[z < 0] + ip[z < 0]
return r
z = numpy.cos(theta)
s = numpy.sin(theta)
tt = (phi / (0.5 * numpy.pi) ) % 4 # in [0, 4]
result = numpy.zeros(z.shape, dtype='i8')
mask = (z < 2. / 3) & (z > -2. / 3)
result[mask] = equatorial(nside[mask], tt[mask], z[mask])
result[~mask] = polecaps(nside[~mask], tt[~mask], z[~mask], s[~mask])
return result
def pix2ang(nside, pix):
r"""Convert pixel to angle :math:`\theta` :math:`\phi`.
nside and pix are broadcast with numpy rules.
Returns: theta, phi
This is translated from chealpix.c; but refer to Section 4.1 of
http://adsabs.harvard.edu/abs/2005ApJ...622..759G
"""
nside, pix = numpy.lib.stride_tricks.broadcast_arrays(nside, pix)
ncap = nside * (nside - 1) * 2
npix = 12 * nside * nside
def northpole(pix, npix):
iring = (1 + ((1 + 2 * pix) ** 0.5)).astype('i8') // 2
iphi = (pix + 1) - 2 * iring * (iring - 1)
z = 1.0 - (iring*iring) * 4. / npix
phi = (iphi - 0.5) * 0.5 * numpy.pi / iring
return z, phi
def equatorial(pix, nside, npix, ncap):
ip = pix - ncap
iring = ip // (4 * nside) + nside
iphi = ip % (4 * nside) + 1
fodd = (((iring + nside) &1) + 1.) * 0.5
z = (2 * nside - iring) * nside * 8.0 / npix
phi = (iphi - fodd) * (0.5 * numpy.pi) / nside
return z, phi
def southpole(pix, npix):
ip = npix - pix
iring = (1 + ((2 * ip - 1)**0.5).astype('i8')) // 2
iphi = 4 * iring + 1 - (ip - 2 * iring * (iring - 1))
z = -1 + (iring * iring) * 4. / npix
phi = (iphi - 0.5 ) * 0.5 * numpy.pi / iring
return z, phi
mask1 = pix < ncap
mask2 = (~mask1) & (pix < npix - ncap)
mask3 = pix >= npix - ncap
z = numpy.zeros(pix.shape, dtype='f8')
phi = numpy.zeros(pix.shape, dtype='f8')
z[mask1], phi[mask1] = northpole(pix[mask1], npix[mask1])
z[mask2], phi[mask2] = equatorial(pix[mask2], nside[mask2], npix[mask2], ncap[mask2])
z[mask3], phi[mask3] = southpole(pix[mask3], npix[mask3])
return numpy.arccos(z), phi
def ang2xy(theta, phi):
r"""Convert :math:`\theta` :math:`\phi` to :math:`x_s` :math:`y_s`.
Returns: x, y
Refer to Section 4.4 of http://adsabs.harvard.edu/abs/2005ApJ...622..759G
"""
theta, phi = numpy.lib.stride_tricks.broadcast_arrays(theta, phi)
z = numpy.cos(theta)
x = numpy.empty(theta.shape, dtype='f8')
y = numpy.empty(theta.shape, dtype='f8')
def sigma(z):
return numpy.sign(z) * (2 - (3 * (1- numpy.abs(z))) ** 0.5)
def equatorial(z, phi):
return phi, 3 * numpy.pi / 8 * z
def polarcaps(z, phi):
s = sigma(z)
x = phi - (numpy.abs(s) - 1) * (phi % (0.5 * numpy.pi) - 0.25 * numpy.pi)
y = 0.25 * numpy.pi * s
return x, y
mask = numpy.abs(z) < 2. / 3
x[mask], y[mask] = equatorial(z[mask], phi[mask])
x[~mask], y[~mask] = polarcaps(z[~mask], phi[~mask])
return x, y
def xy2ang(x, y):
r"""Convert :math:`x_s` :math:`y_s` to :math:`\theta` :math:`\phi`.
Returns: theta, phi
Refer to Section 4.4 of http://adsabs.harvard.edu/abs/2005ApJ...622..759G
"""
x, y = numpy.lib.stride_tricks.broadcast_arrays(x, y)
theta = numpy.empty(x.shape, dtype='f8')
phi = numpy.empty(x.shape, dtype='f8')
def equatorial(x, y):
return numpy.arccos(8 * y / (3 * numpy.pi)), x
def polarcaps(x, y):
ya = numpy.abs(y)
xt = x % (0.5 * numpy.pi)
phi = x - (ya - numpy.pi * 0.25) / (ya - numpy.pi * 0.5) * (xt - 0.25 * numpy.pi)
z = (1 - 1. / 3 * (2 - 4 * ya / numpy.pi)**2) * y / ya
return numpy.arccos(z), phi
mask = numpy.abs(y) < numpy.pi * 0.25
theta[mask], phi[mask] = equatorial(x[mask], y[mask])
theta[~mask], phi[~mask] = polarcaps(x[~mask], y[~mask])
return theta, phi
def vertices(nside, pix):
r""" Calculate the vertices for pixels
Returns: theta, phi
for each (nside, pix) pair, a four-vector of theta, and
a four-vector of phi is returned, corresponding to
the theta, phi of each vertex of the pixel boundary.
(left, bottom, right, top)
"""
nside, pix = numpy.lib.stride_tricks.broadcast_arrays(nside, pix)
x = numpy.zeros(nside.shape, dtype=('f8', 4))
y = | numpy.zeros(nside.shape, dtype=('f8', 4)) | numpy.zeros |
import collections
from enum import Enum
from functools import reduce
from operator import mul
from typing import Iterable, Tuple, TypeVar, Dict, NamedTuple, Optional, Union
import warnings
import logging
import numpy as np
import six
from scipy.linalg import block_diag
from scipy.optimize import OptimizeResult
from collections import abc
from autofit.mapper.variable import Variable, VariableData
def try_getitem(value, index, default=None):
try:
return value[index]
except TypeError:
return default
class LogWarnings(warnings.catch_warnings):
def __init__(self, *, module=None, messages=None, action=None, logger=logging.warning):
super().__init__(record=True, module=module)
self.messages = [] if messages is None else messages
self.log = []
self.action = action
self.logger = logger
def log_warning(self, warn):
self.log.append(warn)
warn_message = f"{warn.filename}:{warn.lineno}: {warn.message}"
self.messages.append(warn_message)
self.logger(warn_message)
def __enter__(self):
self.log = super().__enter__()
self._module._showwarnmsg_impl = self.log_warning
if self.action:
warnings.simplefilter(self.action)
return self
def is_variable(v, *args):
return isinstance(v, Variable)
def is_iterable(arg):
return isinstance(arg, abc.Iterable) and not isinstance(
arg, six.string_types
)
def nested_filter(func, *args):
""" Iterates through a potentially nested set of list, tuples and dictionaries,
recursively looping through the structure and returning the arguments
that func return true on,
Example
-------
>>> list(nested_filter(
... lambda x, *args: x==2,
... [1, (2, 3), [3, 2, {1, 2}]]
... ))
[(2,), (2,), (2,)]
>>> list(nested_filter(
... lambda x, *args: x==2,
... [1, (2, 3), [3, 2, {1, 2}]],
... [1, ('a', 3), [3, 'b', {1, 'c'}]]
... ))
[(2, 'a'), (2, 'b'), (2, 'c')]
"""
out, *_ = args
if isinstance(out, dict):
for k in out:
yield from nested_filter(func, *(out[k] for out in args))
elif is_iterable(out):
for elems in zip(*args):
yield from nested_filter(func, *elems)
else:
if func(*args):
yield args
def nested_update(out, to_replace: dict, replace_keys=False):
"""
Given a potentially nested set of list, tuples and dictionaries, recursively loop through the structure and
replace any values that appear in the dict to_replace
can set to replace dictionary keys optionally,
Example
-------
>>> nested_update([1, (2, 3), [3, 2, {1, 2}]], {2: 'a'})
[1, ('a', 3), [3, 'a', {1, 'a'}]]
>>> nested_update([{2: 2}], {2: 'a'})
[{2: 'a'}]
>>> nested_update([{2: 2}], {2: 'a'}, True)
[{'a': 'a'}]
"""
try:
return to_replace[out]
except KeyError:
pass
if isinstance(out, dict):
if replace_keys:
return type(out)(
{
nested_update(k, to_replace, replace_keys): nested_update(
v, to_replace, replace_keys
)
for k, v in out.items()
}
)
else:
return type(out)(
{k: nested_update(v, to_replace, replace_keys) for k, v in out.items()}
)
elif is_iterable(out):
return type(out)(nested_update(elem, to_replace, replace_keys) for elem in out)
return out
class StatusFlag(Enum):
FAILURE = 0
SUCCESS = 1
NO_CHANGE = 2
BAD_PROJECTION = 3
@classmethod
def get_flag(cls, success, n_iter):
if success:
if n_iter > 0:
return cls.SUCCESS
else:
return cls.NO_CHANGE
return cls.FAILURE
class Status(NamedTuple):
success: bool = True
messages: Tuple[str, ...] = ()
updated: bool = True
flag: StatusFlag = StatusFlag.SUCCESS
def __bool__(self):
return self.success
def __str__(self):
if self.success:
return "Optimisation succeeded"
return f"Optimisation failed: {self.messages}"
class FlattenArrays(dict):
"""
>>> shapes = FlattenArrays(a=(1, 2), b=(2, 3))
>>> shapes
FlattenArrays(a=(1, 2), b=(2, 3))
>>> shapes.flatten(
a = np.arange(2).reshape(1, 2),
b = np.arange(6).reshape(2, 3)**2)
array([ 0, 1, 0, 1, 4, 9, 16, 25])
>>> shapes.unflatten(
[ 0, 1, 0, 1, 4, 9, 16, 25])
{'a': array([[0, 1]]), 'b': array([[ 0, 1, 4],
[ 9, 16, 25]])}
"""
def __init__(self, dict_: Dict[Variable, Tuple[int, ...]]):
super().__init__()
self.update(dict_)
self.splits = np.cumsum([np.prod(s) for s in self.values()], dtype=int)
self.inds = [
slice(i0, i1)
for i0, i1 in
# np.arange(i0, i1, dtype=int) for i0, i1 in
zip(np.r_[0, self.splits[:-1]], self.splits)
]
self.sizes = {k: np.prod(s, dtype=int) for k, s in self.items()}
self.k_inds = dict(zip(self, self.inds))
@classmethod
def from_arrays(cls, arrays: Dict[str, np.ndarray]) -> "FlattenArrays":
return cls({k: np.shape(arr) for k, arr in arrays.items()})
def flatten(self, arrays_dict: Dict[Variable, np.ndarray]) -> np.ndarray:
assert all(np.shape(arrays_dict[k]) == shape for k, shape in self.items())
return np.concatenate([np.ravel(arrays_dict[k]) for k in self.keys()])
def extract(self, key, flat, ndim=None):
if ndim is None:
ndim = len(flat.shape)
ind = self.k_inds[key]
return flat[(ind,) * ndim]
def unflatten(self, arr: np.ndarray, ndim=None) -> Dict[str, np.ndarray]:
arr = np.asanyarray(arr)
if ndim is None:
ndim = arr.ndim
arrays = [arr[(ind,) * ndim] for ind in self.inds]
arr_shapes = [arr.shape[ndim:] for arr in arrays]
return VariableData({
k: arr.reshape(shape * ndim + arr_shape)
if shape or arr_shape
else arr.item()
for (k, shape), arr_shape, arr in zip(self.items(), arr_shapes, arrays)
})
def flatten2d(self, values: Dict[Variable, np.ndarray]) -> np.ndarray:
assert all(np.shape(values[k]) == shape * 2 for k, shape in self.items())
return block_diag(
*(np.reshape(values[k], (n, n)) for k, n in self.sizes.items())
)
unflatten2d = unflatten
def __repr__(self):
shapes = ", ".join(map("{0[0]}={0[1]}".format, self.items()))
return f"{type(self).__name__}({shapes})"
@property
def size(self):
return self.splits[-1]
class OptResult(NamedTuple):
mode: Dict[Variable, np.ndarray]
hess_inv: Dict[Variable, np.ndarray]
log_norm: float
full_hess_inv: np.ndarray
result: OptimizeResult
status: Status = Status()
def gen_subsets(n, x, n_iters=None, rng=None):
"""
Generates random subsets of length n of the array x, if the elements of
x are unique then each subset will not contain repeated elements. Each
element is guaranteed to reappear after at most 2*len(x) new elements.
If `x` is a multi-dimensional array, it is only shuffled along its
first index.
if x is an integer, generate subsets of ``np.arange(x)``.
generates n_iters subsets before stopping. If n_iters is None then
generates random subsets for ever
rng is an optionally passed random number generator
Examples
--------
>>> list(gen_subsets(3, 5, n_iters=3))
[array([0, 2, 3]), array([1, 4, 0]), array([2, 3, 4])]
>>> list(gen_subsets(3, [1,10,5,3], n_iters=3))
[array([ 5, 10, 1]), array([3, 5, 1]), array([10, 3, 5])]
"""
rng = rng or np.random.default_rng()
x_shuffled = rng.permutation(x)
tot = len(x_shuffled)
i = 0
stop = tot - n + 1
iters = iter(int, 1) if n_iters is None else range(n_iters)
for j in iters:
if i < stop:
yield x_shuffled[i : i + n]
i += n
else:
x_shuffled = np.r_[x_shuffled[i:], rng.permutation(x_shuffled[:i])]
yield x_shuffled[:n]
i = n
def gen_dict(dict_gen):
"""
Examples
--------
>>> list(gen_dict({1: gen_subsets(3, 4, 3), 2: gen_subsets(2, 5, 3)}))
[{1: array([2, 1, 3]), 2: array([2, 0])},
{1: array([0, 3, 1]), 2: array([3, 1])},
{1: array([2, 0, 1]), 2: array([4, 2])}]
"""
keys = tuple(dict_gen.keys())
for val in zip(*dict_gen.values()):
yield dict(zip(keys, val))
_M = TypeVar("_M")
def prod(iterable: Iterable[_M], *arg: Tuple[_M]) -> _M:
"""calculates the product of the passed iterable,
much like sum, if a second argument is passed,
this is the initial value of the calculation
Examples
--------
>>> prod(range(1, 3))
2
>>> prod(range(1, 3), 2.)
4.
"""
iterable = list(iterable)
return reduce(mul, iterable, *arg)
def r2_score(y_true, y_pred, axis=None):
y_true = np.asanyarray(y_true)
y_pred = np.asanyarray(y_pred)
mse = np.square(y_true - y_pred).mean(axis=axis)
var = y_true.var(axis=axis)
return 1 - mse / var
def propagate_uncertainty(cov: np.ndarray, jac: np.ndarray) -> np.ndarray:
"""Propagates the uncertainty of a covariance matrix given the
passed Jacobian
If the variable arrays are multidimensional then will output in
the shape of the arrays
see https://en.wikipedia.org/wiki/Propagation_of_uncertainty
"""
cov = np.asanyarray(cov)
var_ndim = cov.ndim // 2
det_ndim = jac.ndim - var_ndim
det_shape, var_shape = jac.shape[:det_ndim], jac.shape[det_ndim:]
assert var_shape == cov.shape[:var_ndim] == cov.shape[var_ndim:]
var_size = np.prod(var_shape, dtype=int)
det_size = np.prod(det_shape, dtype=int)
cov2d = cov.reshape((var_size, var_size))
jac2d = jac.reshape((det_size, var_size))
det_cov2d = np.linalg.multi_dot((jac2d, cov2d, jac2d.T))
det_cov = det_cov2d.reshape(det_shape + det_shape)
return det_cov
def rescale_to_artists(artists, ax=None):
import matplotlib.pyplot as plt
ax = ax or plt.gca()
while True:
r = ax.figure.canvas.get_renderer()
extents = [
t.get_window_extent(renderer=r).transformed(ax.transData.inverted())
for t in artists
]
min_extent = np.min([e.min for e in extents], axis=0)
max_extent = np.max([e.max for e in extents], axis=0)
min_lim, max_lim = zip(ax.get_xlim(), ax.get_ylim())
# Sometimes the window doesn't always rescale first time around
if (min_extent < min_lim).any() or (max_extent > max_lim).any():
extent = max_extent - min_extent
max_extent += extent * 0.05
min_extent -= extent * 0.05
xlim, ylim = zip(
np.minimum(min_lim, min_extent), np.maximum(max_lim, max_extent)
)
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
else:
break
return xlim, ylim
# These may no longer be needed?
def add_arrays(*arrays: np.ndarray) -> np.ndarray:
"""Sums over broadcasting multidimensional arrays
whilst preserving the total sum
a = np.arange(10).reshape(1, 2, 1, 5)
b = np.arange(8).reshape(2, 2, 2, 1)
>>> add_arrays(a, b).sum()
73.0
>>> add_arrays(a, b).shape
(2, 2, 2, 5)
>>> a.sum() + b.sum()
73
"""
b = np.broadcast(*arrays)
return sum(a * np.size(a) / b.size for a in arrays)
Axis = Optional[Union[bool, int, Tuple[int, ...]]]
def aggregate(array: np.ndarray, axis: Axis = None, **kwargs) -> np.ndarray:
"""
aggregates the values of array
if axis is False then aggregate returns the unmodified array
otherwise aggrate returns np.sum(array, axis=axis, **kwargs)
"""
if axis is False:
return array
return np.sum(array, axis=axis, **kwargs)
def diag(array: np.ndarray, *ds: Tuple[int, ...]) -> np.ndarray:
array = | np.asanyarray(array) | numpy.asanyarray |
import math
import numpy as np
import paddle
import paddle.nn as nn
import os
import cv2
import paddle.vision.transforms as T
import paddle.distributed as dist
from paddle.io import DistributedBatchSampler, DataLoader
from scheduler import WarmupCosineScheduler
from PIL import Image
from paddle.vision import transforms
import time
from common import Attention as Attention_Pure
from common import Unfold
from common import add_parameter
from common import DropPath, Identity, Mlp
from common import orthogonal_, trunc_normal_, zeros_, ones_
from config import get_config,configs
from dataset import get_dataset
from t2t import t2t_vit_7
from sys import argv
import sys, getopt
#use_gpu = True
#paddle.set_device('gpu:1') if use_gpu else paddle.set_device('cpu')
dist.init_parallel_env()
if __name__ == '__main__':
file_path = './config/t2t_vit_7.yaml'
'''
opts, args = getopt.getopt(sys.argv[1:], "t:")
for op, value in opts:
if op == "-t":
file_path = value
print(file_path)
'''
config = get_config(file_path)
num_epochs = config.NUM_EPOCHS
loss_fun = nn.CrossEntropyLoss()
if config.PRE_TRAIN:
model = t2t_vit_7(pretrained = config.PRE_TRAIN,model_path = config.MODEL_PATH)
else:
model = t2t_vit_7(pretrained = config.PRE_TRAIN)
#model = paddle.Model(model)
model = paddle.DataParallel(model)
model.train()
dataset_train = get_dataset(config.TRAIN_DATASET_PATH,config.TRAIN_DATASET_LABEL_PATH,mode = 'train')
#dataset_train = ImageFolder('ILSVRC2012_w/train', transforms_train)
train_sampler = DistributedBatchSampler(dataset_train, batch_size = config.TRAIN_BATCH_SIZE, drop_last=False,shuffle=True )
dataloader_train = paddle.io.DataLoader(dataset_train,num_workers = config.TRAIN_NUM_WORKS,batch_sampler = train_sampler)
dataset_val = get_dataset( config.VAL_DATASET_PATH, config.VAL_DATASET_LABEL_PATH,mode = 'val' )
val_sampler = DistributedBatchSampler(dataset_val, batch_size = config.VAL_BATCH_SIZE, drop_last=False)
dataloader_val = paddle.io.DataLoader(dataset_val,batch_sampler = val_sampler,num_workers = config.VAL_NUM_WORKS)
if config.USE_WARMUP:
scheduler = WarmupCosineScheduler(learning_rate = config.BASE_LR,
warmup_start_lr = float(config.WARMUP_START_LR),
start_lr = config.BASE_LR,
end_lr = float(config.END_LR),
warmup_epochs = config.WARMUP_EPOCHS,
total_epochs = config.NUM_EPOCHS,
last_epoch = config.LAST_EPOCHS,
)
optimizer = paddle.optimizer.AdamW(
parameters = model.parameters(),
learning_rate = scheduler,
weight_decay = config.WEIGHT_DECAY, #0.03可以试一试
beta1 = 0.9,
beta2 = 0.999,
epsilon = 1e-8,
grad_clip = paddle.nn.ClipGradByGlobalNorm(1.0) )
else:
optimizer = paddle.optimizer.AdamW(
parameters = model.parameters(),
learning_rate = config.BASE_LR,
weight_decay = config.WEIGHT_DECAY, #0.03可以试一试
beta1 = 0.9,
beta2 = 0.999,
epsilon = 1e-8,
grad_clip = paddle.nn.ClipGradByGlobalNorm(1.0) )
#optimizer = paddle.optimizer.Momentum(parameters=model.parameters(), learning_rate=0.05,weight_decay = 0.03)
#model.prepare(optimizer = optimizer,loss = loss_fun,metrics=paddle.metric.Accuracy(topk=(1, 5)))
#model.fit(dataset_train,epochs = num_epochs,batch_size=128,verbose=1)
total_step = len(dataloader_train)
read_start_time = 0
read_time = 0
train_start_time = 0
train_time = 0
flag_lr = 0
max_acc = 0.71
for epoch in range(0,num_epochs):
model.train()
i = 0
train_losses = []
train_accs = []
#read_time
read_start_time = time.time()
for X,Y in dataloader_train:
read_time+=(time.time()-read_start_time)
i = i + 1
#train time
train_start_time = time.time()
pre_y = model(X)
loss = loss_fun(pre_y,Y)
optimizer.clear_grad()
loss.backward()
optimizer.step()
train_time += (time.time()-train_start_time)
train_losses.append(loss.item())
Y = paddle.reshape(Y,shape=[-1, 1])
acc = paddle.metric.accuracy(input=pre_y, label=Y)
train_accs.append(acc)
if i%1000 == 0:
train_loss = np.sum(train_losses) / len(train_losses)
train_acc = np.sum(train_accs) / len(train_accs)
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, read_time: {:.4f}, train_time: {:.4f}, lr: {:.6f}'.format(epoch, num_epochs, i, total_step, train_loss,read_time/1000,train_time/1000,optimizer.get_lr()))
read_time=0
train_time=0
read_start_time = time.time()
scheduler.step()
train_loss = np.sum(train_losses) / len(train_losses)
train_acc = np.sum(train_accs) / len(train_accs)
print('Epoch [{}/{}], avg_Loss: {:.4f}, avg_acc: {:.4f}'.format(epoch, num_epochs, train_loss,train_acc))
if train_acc > 0.70:
model.eval()
val_accs = []
for X,Y in dataloader_val:
pre_y = model(X)
Y =paddle.reshape(Y,shape=[-1, 1])
all_Y = []
paddle.distributed.all_gather(all_Y, Y)
all_labels = paddle.concat(all_Y, 0)
all_pre_y = []
paddle.distributed.all_gather(all_pre_y, pre_y)
all_pre = paddle.concat(all_pre_y, 0)
acc = paddle.metric.accuracy(input=all_pre, label=all_labels)
val_accs.append(acc)
val_acc = np.sum(val_accs) / len(val_accs)
print("ImageNet val acc is:%.4f" %val_acc)
if val_acc > max_acc:
max_acc = val_acc
print('avg_acc: {:.4f} model saved!'.format( val_acc))
paddle.save(model.state_dict(), "./output/t2t_vit_7_max.pdparams")
if(val_acc > 0.7155):
print("model saved!\n")
paddle.save(model.state_dict(), "./output/t2t_vit_7_final.pdparams")
print("train ended!\n")
model.eval()
val_accs = []
for X,Y in dataloader_val:
pre_y = model(X)
Y =paddle.reshape(Y,shape=[-1, 1])
all_Y = []
paddle.distributed.all_gather(all_Y, Y)
all_labels = paddle.concat(all_Y, 0)
all_pre_y = []
paddle.distributed.all_gather(all_pre_y, pre_y)
all_pre = paddle.concat(all_pre_y, 0)
acc = paddle.metric.accuracy(input=all_pre, label=all_labels)
val_accs.append(acc)
val_acc = | np.sum(val_accs) | numpy.sum |
# Copyright 2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hypothesis import settings, given, strategies as st
from hypothesis.extra.numpy import arrays
import numpy as np
import tensorflow as tf
from thewalrus.symplectic import two_mode_squeezing
from mrmustard.lab.gates import Sgate, BSgate, S2gate, Ggate, Interferometer, Ggate
from mrmustard.lab.circuit import Circuit
from mrmustard.utils.training import Optimizer
from mrmustard.utils.parametrized import Parametrized
from mrmustard.lab.states import Vacuum
from mrmustard.physics.gaussian import trace, von_neumann_entropy
from mrmustard import settings
from mrmustard.math import Math
math = Math()
@given(n=st.integers(0, 3))
def test_S2gate_coincidence_prob(n):
"""Testing the optimal probability of obtaining |n,n> from a two mode squeezed vacuum"""
tf.random.set_seed(137)
S = S2gate(
r=abs(np.random.normal()),
phi=np.random.normal(),
r_trainable=True,
phi_trainable=True,
)
def cost_fn():
return -tf.abs((Vacuum(2) >> S[0, 1]).ket(cutoffs=[n + 1, n + 1])[n, n]) ** 2
opt = Optimizer(euclidean_lr=0.01)
opt.minimize(cost_fn, by_optimizing=[S], max_steps=300)
expected = 1 / (n + 1) * (n / (n + 1)) ** n
assert np.allclose(-cost_fn(), expected, atol=1e-5)
@given(i=st.integers(1, 5), k=st.integers(1, 5))
def test_hong_ou_mandel_optimizer(i, k):
"""Finding the optimal beamsplitter transmission to get Hong-Ou-Mandel dip
This generalizes the single photon Hong-Ou-Mandel effect to the many photon setting
see Eq. 20 of https://journals.aps.org/prresearch/pdf/10.1103/PhysRevResearch.3.043065
which lacks a square root in the right hand side.
"""
tf.random.set_seed(137)
r = np.arcsinh(1.0)
ops = [
S2gate(r=r, phi=0.0, phi_trainable=True)[0, 1],
S2gate(r=r, phi=0.0, phi_trainable=True)[2, 3],
BSgate(
theta=np.arccos(np.sqrt(k / (i + k))) + 0.1 * np.random.normal(),
phi=np.random.normal(),
theta_trainable=True,
phi_trainable=True,
)[1, 2],
]
circ = Circuit(ops)
state_in = Vacuum(num_modes=4)
cutoff = 1 + i + k
def cost_fn():
return tf.abs((state_in >> circ).ket(cutoffs=[cutoff] * 4)[i, 1, i + k - 1, k]) ** 2
opt = Optimizer(euclidean_lr=0.01)
opt.minimize(cost_fn, by_optimizing=[circ], max_steps=300)
assert np.allclose(
np.cos(circ.trainable_parameters["euclidean"][2]) ** 2, k / (i + k), atol=1e-2
)
def test_squeezing_hong_ou_mandel_optimizer():
"""Finding the optimal squeezing parameter to get Hong-Ou-Mandel dip in time
see https://www.pnas.org/content/117/52/33107/tab-article-info
"""
tf.random.set_seed(137)
r = np.arcsinh(1.0)
ops = [
S2gate(r=r, phi=0.0, phi_trainable=True)[0, 1],
S2gate(r=r, phi=0.0, phi_trainable=True)[2, 3],
S2gate(r=1.0, phi=np.random.normal(), r_trainable=True, phi_trainable=True)[1, 2],
]
circ = Circuit(ops)
state_in = Vacuum(num_modes=4)
def cost_fn():
return tf.abs((state_in >> circ).ket(cutoffs=[2, 2, 2, 2])[1, 1, 1, 1]) ** 2
opt = Optimizer(euclidean_lr=0.001)
opt.minimize(cost_fn, by_optimizing=[circ], max_steps=300)
assert np.allclose(np.sinh(circ.trainable_parameters["euclidean"][2]) ** 2, 1, atol=1e-2)
def test_learning_two_mode_squeezing():
"""Finding the optimal beamsplitter transmission to make a pair of single photons"""
tf.random.set_seed(137)
ops = [
Sgate(
r=abs(np.random.normal(size=(2))),
phi=np.random.normal(size=(2)),
r_trainable=True,
phi_trainable=True,
),
BSgate(
theta=np.random.normal(),
phi=np.random.normal(),
theta_trainable=True,
phi_trainable=True,
),
]
circ = Circuit(ops)
tf.random.set_seed(20)
state_in = Vacuum(num_modes=2)
def cost_fn():
amps = (state_in >> circ).ket(cutoffs=[2, 2])
return -tf.abs(amps[1, 1]) ** 2 + tf.abs(amps[0, 1]) ** 2
opt = Optimizer(euclidean_lr=0.05)
opt.minimize(cost_fn, by_optimizing=[circ], max_steps=1000)
assert np.allclose(-cost_fn(), 0.25, atol=1e-5)
def test_learning_two_mode_Ggate():
"""Finding the optimal Ggate to make a pair of single photons"""
tf.random.set_seed(137)
G = Ggate(num_modes=2, symplectic_trainable=True)
tf.random.set_seed(20)
def cost_fn():
amps = (Vacuum(2) >> G).ket(cutoffs=[2, 2])
return -tf.abs(amps[1, 1]) ** 2 + tf.abs(amps[0, 1]) ** 2
opt = Optimizer(symplectic_lr=0.5, euclidean_lr=0.01)
opt.minimize(cost_fn, by_optimizing=[G], max_steps=500)
assert np.allclose(-cost_fn(), 0.25, atol=1e-4)
def test_learning_two_mode_Interferometer():
"""Finding the optimal Interferometer to make a pair of single photons"""
np.random.seed(11)
ops = [
Sgate(
r=np.random.normal(size=(2)) ** 2,
phi=np.random.normal(size=(2)),
r_trainable=True,
phi_trainable=True,
),
Interferometer(num_modes=2, orthogonal_trainable=True),
]
circ = Circuit(ops)
state_in = Vacuum(num_modes=2)
def cost_fn():
amps = (state_in >> circ).ket(cutoffs=[2, 2])
return -tf.abs(amps[1, 1]) ** 2 + tf.abs(amps[0, 1]) ** 2
opt = Optimizer(orthogonal_lr=0.5, euclidean_lr=0.01)
opt.minimize(cost_fn, by_optimizing=[circ], max_steps=1000)
assert np.allclose(-cost_fn(), 0.25, atol=1e-5)
def test_learning_four_mode_Interferometer():
"""Finding the optimal Interferometer to make a NOON state with N=2"""
np.random.seed(11)
ops = [
Sgate(
r=np.random.uniform(size=4),
phi=np.random.normal(size=4),
r_trainable=True,
phi_trainable=True,
),
Interferometer(num_modes=4, orthogonal_trainable=True),
]
circ = Circuit(ops)
state_in = Vacuum(num_modes=4)
def cost_fn():
amps = (state_in >> circ).ket(cutoffs=[3, 3, 3, 3])
return (
-tf.abs(
tf.reduce_sum(
amps[1, 1]
* np.array([[0, 0, 1 / np.sqrt(2)], [0, 0, 0], [1 / np.sqrt(2), 0, 0]])
)
)
** 2
)
opt = Optimizer(symplectic_lr=0.5, euclidean_lr=0.01)
opt.minimize(cost_fn, by_optimizing=[circ], max_steps=1000)
assert np.allclose(-cost_fn(), 0.0625, atol=1e-5)
def test_squeezing_hong_ou_mandel_optimizer():
"""Finding the optimal squeezing parameter to get Hong-Ou-Mandel dip in time
see https://www.pnas.org/content/117/52/33107/tab-article-info
"""
tf.random.set_seed(137)
r = np.arcsinh(1.0)
ops = [
S2gate(r=r, phi=0.0, phi_trainable=True)[0, 1],
S2gate(r=r, phi=0.0, phi_trainable=True)[2, 3],
S2gate(r=1.0, phi=np.random.normal(), r_trainable=True, phi_trainable=True)[1, 2],
]
circ = Circuit(ops)
def cost_fn():
return tf.abs((Vacuum(4) >> circ).ket(cutoffs=[2, 2, 2, 2])[1, 1, 1, 1]) ** 2
opt = Optimizer(euclidean_lr=0.001)
opt.minimize(cost_fn, by_optimizing=[circ], max_steps=300)
assert np.allclose(np.sinh(circ.trainable_parameters["euclidean"][2]) ** 2, 1, atol=1e-2)
def test_parameter_passthrough():
"""Same as the test above, but with param passthrough"""
tf.random.set_seed(137)
r = np.arcsinh(1.0)
par = Parametrized(
r=math.new_variable(r, (0.0, None), "r"),
phi=math.new_variable( | np.random.normal() | numpy.random.normal |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 18 17:45:51 2020
Author: <NAME>
License: BSD-3
"""
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import pytest
from statsmodels.regression.linear_model import OLS
import statsmodels.stats.power as smpwr
import statsmodels.stats.oneway as smo # needed for function with `test`
from statsmodels.stats.oneway import (
confint_effectsize_oneway, confint_noncentrality, effectsize_oneway,
anova_oneway,
anova_generic, equivalence_oneway, equivalence_oneway_generic,
power_equivalence_oneway, power_equivalence_oneway0,
f2_to_wellek, fstat_to_wellek, wellek_to_f2)
from statsmodels.stats.robust_compare import scale_transform
from statsmodels.stats.contrast import (
wald_test_noncent_generic, wald_test_noncent, _offset_constraint)
def test_oneway_effectsize():
# examole 3 in Steiger 2004 Beyond the F-test, p. 169
F = 5
df1 = 3
df2 = 76
nobs = 80
ci = confint_noncentrality(F, df1, df2, alpha=0.05,
alternative="two-sided")
ci_es = confint_effectsize_oneway(F, df1, df2, alpha=0.05)
ci_steiger = ci_es.ci_f * np.sqrt(4 / 3)
res_ci_steiger = [0.1764, 0.7367]
res_ci_nc = np.asarray([1.8666, 32.563])
assert_allclose(ci, res_ci_nc, atol=0.0001)
assert_allclose(ci_es.ci_f_corrected, res_ci_steiger, atol=0.00006)
assert_allclose(ci_steiger, res_ci_steiger, atol=0.00006)
assert_allclose(ci_es.ci_f**2, res_ci_nc / nobs, atol=0.00006)
assert_allclose(ci_es.ci_nc, res_ci_nc, atol=0.0001)
def test_effectsize_power():
# example and results from PASS documentation
n_groups = 3
means = [527.86, 660.43, 649.14]
vars_ = 107.4304**2
nobs = 12
es = effectsize_oneway(means, vars_, nobs, use_var="equal", ddof_between=0)
es = np.sqrt(es)
alpha = 0.05
power = 0.8
nobs_t = nobs * n_groups
kwds = {'effect_size': es, 'nobs': nobs_t, 'alpha': alpha, 'power': power,
'k_groups': n_groups}
from statsmodels.stats.power import FTestAnovaPower
res_pow = 0.8251
res_es = 0.559
kwds_ = kwds.copy()
del kwds_['power']
p = FTestAnovaPower().power(**kwds_)
assert_allclose(p, res_pow, atol=0.0001)
assert_allclose(es, res_es, atol=0.0006)
# example unequal sample sizes
nobs = np.array([15, 9, 9])
kwds['nobs'] = nobs
es = effectsize_oneway(means, vars_, nobs, use_var="equal", ddof_between=0)
es = np.sqrt(es)
kwds['effect_size'] = es
p = FTestAnovaPower().power(**kwds_)
res_pow = 0.8297
res_es = 0.590
assert_allclose(p, res_pow, atol=0.005) # lower than print precision
assert_allclose(es, res_es, atol=0.0006)
def test_effectsize_fstat():
# results from R package `effectsize`, confint is 0.9 confidence
# > es = F_to_eta2(45.8, 3, 35)
Eta_Sq_partial = 0.796983758700696
CI_eta2 = 0.685670133284926, 0.855981325777856 # reformated from output
# > es = F_to_epsilon2(45.8, 3, 35)
Epsilon_Sq_partial = 0.779582366589327
CI_eps2 = 0.658727573280777, 0.843636867987386
# > es = F_to_omega2(45.8, 3, 35)
Omega_Sq_partial = 0.775086505190311
CI_omega2 = 0.65286429480169, 0.840179680453464
# > es = F_to_f(45.8, 3, 35)
Cohens_f_partial = 1.98134153686695
CI_f = 1.47694659580859, 2.43793847155554
f_stat, df1, df2 = 45.8, 3, 35
# nobs = df1 + df2 + 1 # not directly used in the following, only df
fes = smo._fstat2effectsize(f_stat, df1, df2)
assert_allclose(np.sqrt(fes.f2), Cohens_f_partial, rtol=1e-13)
assert_allclose(fes.eta2, Eta_Sq_partial, rtol=1e-13)
assert_allclose(fes.eps2, Epsilon_Sq_partial, rtol=1e-13)
assert_allclose(fes.omega2, Omega_Sq_partial, rtol=1e-13)
ci_nc = confint_noncentrality(f_stat, df1, df2, alpha=0.1)
# the following replicates R package effectsize
ci_es = smo._fstat2effectsize(ci_nc / df1, df1, df2)
assert_allclose(ci_es.eta2, CI_eta2, rtol=2e-4)
assert_allclose(ci_es.eps2, CI_eps2, rtol=2e-4)
assert_allclose(ci_es.omega2, CI_omega2, rtol=2e-4)
assert_allclose(np.sqrt(ci_es.f2), CI_f, rtol=2e-4)
def test_effectsize_fstat_stata():
# reference numbers computed with Stata 14
# Stata 16 does not seem to have confint for omega2
# esizei 2 40 7.47403193349075, level(90)
eta2 = 0.2720398648288652
lb_eta2 = 0.0742092468714613
ub_eta2 = 0.4156116886974804
omega2 = 0.2356418580703085
lb_omega2 = 0.0279197092150344
ub_omega2 = 0.3863922731323545
# level = 90
f_stat, df1, df2 = 7.47403193349075, 2, 40
fes = smo._fstat2effectsize(f_stat, df1, df2)
assert_allclose(fes.eta2, eta2, rtol=1e-13)
assert_allclose(fes.omega2, omega2, rtol=0.02) # low agreement
ci_es = smo.confint_effectsize_oneway(f_stat, df1, df2, alpha=0.1)
assert_allclose(ci_es.eta2, (lb_eta2, ub_eta2), rtol=1e-4)
assert_allclose(ci_es.ci_omega2, (lb_omega2, ub_omega2), rtol=0.025)
@pytest.mark.parametrize("center", ['median', 'mean', 'trimmed'])
def test_scale_transform(center):
x = np.random.randn(5, 3)
xt = scale_transform(x, center=center, transform='abs', trim_frac=0.2,
axis=0)
xtt = scale_transform(x.T, center=center, transform='abs', trim_frac=0.2,
axis=1)
assert_allclose(xt.T, xtt, rtol=1e-13)
xt0 = scale_transform(x[:, 0], center=center, transform='abs',
trim_frac=0.2)
assert_allclose(xt0, xt[:, 0], rtol=1e-13)
assert_allclose(xt0, xtt[0, :], rtol=1e-13)
class TestOnewayEquivalenc(object):
@classmethod
def setup_class(cls):
y0 = [112.488, 103.738, 86.344, 101.708, 95.108, 105.931,
95.815, 91.864, 102.479, 102.644]
y1 = [100.421, 101.966, 99.636, 105.983, 88.377, 102.618,
105.486, 98.662, 94.137, 98.626, 89.367, 106.204]
y2 = [84.846, 100.488, 119.763, 103.736, 93.141, 108.254,
99.510, 89.005, 108.200, 82.209, 100.104, 103.706,
107.067]
y3 = [100.825, 100.255, 103.363, 93.230, 95.325, 100.288,
94.750, 107.129, 98.246, 96.365, 99.740, 106.049,
92.691, 93.111, 98.243]
n_groups = 4
arrs_w = [np.asarray(yi) for yi in [y0, y1, y2, y3]]
nobs = np.asarray([len(yi) for yi in arrs_w])
nobs_mean = np.mean(nobs)
means = np.asarray([yi.mean() for yi in arrs_w])
stds = np.asarray([yi.std(ddof=1) for yi in arrs_w])
cls.data = arrs_w # TODO use `data`
cls.means = means
cls.nobs = nobs
cls.stds = stds
cls.n_groups = n_groups
cls.nobs_mean = nobs_mean
def test_equivalence_equal(self):
# reference numbers from Jan and Shieh 2019, p. 5
means = self.means
nobs = self.nobs
stds = self.stds
n_groups = self.n_groups
eps = 0.5
res0 = anova_generic(means, stds**2, nobs, use_var="equal")
f = res0.statistic
res = equivalence_oneway_generic(f, n_groups, nobs.sum(), eps,
res0.df, alpha=0.05,
margin_type="wellek")
assert_allclose(res.pvalue, 0.0083, atol=0.001)
assert_equal(res.df, [3, 46])
# the agreement for f-stat looks too low
assert_allclose(f, 0.0926, atol=0.0006)
res = equivalence_oneway(self.data, eps, use_var="equal",
margin_type="wellek")
assert_allclose(res.pvalue, 0.0083, atol=0.001)
assert_equal(res.df, [3, 46])
def test_equivalence_welch(self):
# reference numbers from Jan and Shieh 2019, p. 6
means = self.means
nobs = self.nobs
stds = self.stds
n_groups = self.n_groups
vars_ = stds**2
eps = 0.5
res0 = anova_generic(means, vars_, nobs, use_var="unequal",
welch_correction=False)
f_stat = res0.statistic
res = equivalence_oneway_generic(f_stat, n_groups, nobs.sum(), eps,
res0.df, alpha=0.05,
margin_type="wellek")
assert_allclose(res.pvalue, 0.0110, atol=0.001)
assert_allclose(res.df, [3.0, 22.6536], atol=0.0006)
# agreement for Welch f-stat looks too low b/c welch_correction=False
assert_allclose(f_stat, 0.1102, atol=0.007)
res = equivalence_oneway(self.data, eps, use_var="unequal",
margin_type="wellek")
assert_allclose(res.pvalue, 0.0110, atol=1e-4)
assert_allclose(res.df, [3.0, 22.6536], atol=0.0006)
assert_allclose(res.f_stat, 0.1102, atol=1e-4) # 0.007)
# check post-hoc power, JS p. 6
pow_ = power_equivalence_oneway0(f_stat, n_groups, nobs, eps, res0.df)
assert_allclose(pow_, 0.1552, atol=0.007)
pow_ = power_equivalence_oneway(eps, eps, nobs.sum(),
n_groups=n_groups, df=None, alpha=0.05,
margin_type="wellek")
assert_allclose(pow_, 0.05, atol=1e-13)
nobs_t = nobs.sum()
es = effectsize_oneway(means, vars_, nobs, use_var="unequal")
es = np.sqrt(es)
es_w0 = f2_to_wellek(es**2, n_groups)
es_w = np.sqrt(fstat_to_wellek(f_stat, n_groups, nobs_t / n_groups))
pow_ = power_equivalence_oneway(es_w, eps, nobs_t,
n_groups=n_groups, df=None, alpha=0.05,
margin_type="wellek")
assert_allclose(pow_, 0.1552, atol=0.007)
assert_allclose(es_w0, es_w, atol=0.007)
margin = wellek_to_f2(eps, n_groups)
pow_ = power_equivalence_oneway(es**2, margin, nobs_t,
n_groups=n_groups, df=None, alpha=0.05,
margin_type="f2")
assert_allclose(pow_, 0.1552, atol=0.007)
class TestOnewayScale(object):
@classmethod
def setup_class(cls):
yt0 = np.array([102., 320., 0., 107., 198., 200., 4., 20., 110., 128.,
7., 119., 309.])
yt1 = np.array([0., 1., 228., 81., 87., 119., 79., 181., 43., 12., 90.,
105., 108., 119., 0., 9.])
yt2 = np.array([33., 294., 134., 216., 83., 105., 69., 20., 20., 63.,
98., 155., 78., 75.])
y0 = np.array([452., 874., 554., 447., 356., 754., 558., 574., 664.,
682., 547., 435., 245.])
y1 = np.array([546., 547., 774., 465., 459., 665., 467., 365., 589.,
534., 456., 651., 654., 665., 546., 537.])
y2 = np.array([785., 458., 886., 536., 669., 857., 821., 772., 732.,
689., 654., 597., 830., 827.])
n_groups = 3
data = [y0, y1, y2]
nobs = np.asarray([len(yi) for yi in data])
nobs_mean = np.mean(nobs)
means = np.asarray([yi.mean() for yi in data])
stds = np.asarray([yi.std(ddof=1) for yi in data])
cls.data = data
cls.data_transformed = [yt0, yt1, yt2]
cls.means = means
cls.nobs = nobs
cls.stds = stds
cls.n_groups = n_groups
cls.nobs_mean = nobs_mean
def test_means(self):
# library onewaystats, BF test for equality of means
# st = bf.test(y ~ g, df3)
statistic = 7.10900606421182
parameter = [2, 31.4207256105052]
p_value = 0.00283841965791224
# method = 'Brown-Forsythe Test'
res = anova_oneway(self.data, use_var="bf")
# R bf.test uses original BF df_num
| assert_allclose(res.pvalue2, p_value, rtol=1e-13) | numpy.testing.assert_allclose |
'''
Neuro Evolution Algorithm by <NAME> author of FitML github blog and repository
https://github.com/FitMachineLearning/FitML/
See the agents in action at
https://www.youtube.com/channel/UCi7_WxajoowBl4_9P0DhzzA/featured
'''
import numpy as np
import keras
import gym
from random import gauss
#import roboschool
import math
from random import randint
import tensorflow as tf
from Lib.Individual import IndividualTF
'''
ENVIRONMENT_NAME = "RoboschoolAnt-v1"
OBSERVATION_SPACE = 28
ACTION_SPACE = 8
'''
ENVIRONMENT_NAME = "LunarLanderContinuous-v2"
OBSERVATION_SPACE = 8
ACTION_SPACE = 2
B_DISCOUNT = 0.98
POPULATION_SIZE = 12
NETWORK_WIDTH = 32
NETWORK_HIDDEN_LAYERS = 0
NUM_TEST_EPISODES = 1
NUM_SELECTED_FOR_REPRODUCTION = 2
NOISE_SIGMA = 0.05
MUTATION_PROB = 0.1
MAX_GENERATIONS = 200000
USE_GAUSSIAN_NOISE = True
HAS_EARLY_TERMINATION_REWARD = False
EARLY_TERMINATION_REWARD = -50
CLIP_ACTIONS = True
MAX_STEPS = 950
all_individuals = []
generations_count = 0
total_population_counter = 0
#numLandings = 0
'''---------ENVIRONMENT INITIALIZATION--------'''
env = gym.make(ENVIRONMENT_NAME)
#env.render(mode="human")
env.reset()
print("-- Observations",env.observation_space)
print("-- actionspace",env.action_space)
#initialize training matrix with random states and actions
apdataX = tf.placeholder("float", [None, OBSERVATION_SPACE])
#apdataY = np.random.random((5,num_env_actions))
apdataY = tf.placeholder("float", [None, ACTION_SPACE])
sess = tf.Session()
'''---------------------'''
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.01))
def apModel(X, apw_h, apw_o):
h = tf.nn.leaky_relu(tf.matmul(X, apw_h)) # this is a basic mlp, think 2 stacked logistic regressions
return tf.matmul(h, apw_o) # note that we dont take the softmax at the end because our cost fn does that for us
def GetRememberedOptimalPolicy(indiv,qstate):
predX = np.zeros(shape=(1,OBSERVATION_SPACE))
predX[0] = qstate
#print("trying to predict reward at qs_a", predX[0])
#pred = action_predictor_model.predict(predX[0].reshape(1,predX.shape[1]))
inputVal = predX[0].reshape(1,predX.shape[1])
pred = sess.run(indiv.appy_x, feed_dict={apdataX: inputVal})
r_remembered_optimal_policy = pred[0]
return r_remembered_optimal_policy
def create_individualTF(network_width, network_hidden_layers, observation_space, action_space):
''' apModel '''
apw_h = init_weights([OBSERVATION_SPACE, 2048]) # create symbolic variables
apw_h2 = init_weights([32, 32]) # create symbolic variables
apw_h3 = init_weights([32, 32]) # create symbolic variable
apw_o = init_weights([2048, ACTION_SPACE])
appy_x = apModel(apdataX, apw_h, apw_o)
apcost = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(apdataY, appy_x))))
apOptimizer = tf.train.AdadeltaOptimizer(1.,0.9,1e-6)
aptrain_op = apOptimizer.minimize(apcost)
''' end apModel '''
sess.run(tf.global_variables_initializer())
#action_predictor_model = create_model(network_width,network_hidden_layers, observation_space, action_space)
indiv = IndividualTF(generationID=generations_count, indivID=total_population_counter ,
apw_h=apw_h,apw_h2=apw_h2,
apw_h3=apw_h3,apw_o=apw_o,appy_x=appy_x)
print("Creating individual ",generations_count)
return indiv
def initialize_population(population_size,network_width,network_hidden_layers, observation_space, action_space, environment_name,total_population_counter):
initial_population = []
for i in range (population_size):
#print("kk", network_width,network_hidden_layers,observation_space,action_space)
indiv = create_individualTF( network_width, network_hidden_layers, observation_space, action_space)
total_population_counter += 1
initial_population.append(indiv)
return initial_population, total_population_counter
def test_individual(indiv,num_test_episodes):
indiv.lifeScore = 0
allRewards = []
terminated_early = False
for i in range(num_test_episodes):
episodeRewards = []
cumulativeRewards = 0
#print("episode "+str(i)+" performing test for indiv ",indiv.printme())
qs = env.reset()
for step in range (5000):
a = GetRememberedOptimalPolicy(indiv, qs)
if CLIP_ACTIONS:
for i in range (np.alen(a)):
if a[i] < -1: a[i]=-0.99999999999
if a[i] > 1: a[i] = 0.99999999999
qs,r,done,info = env.step(a)
if HAS_EARLY_TERMINATION_REWARD and done and step<MAX_STEPS-3:
r = EARLY_TERMINATION_REWARD
terminated_early = True
cumulativeRewards = cumulativeRewards + r
episodeRewards.append(r)
#indiv.lifeScore += r
env.render()
if step > MAX_STEPS:
done = True
if done:
episodeRewards.reverse()
for j in range(len(episodeRewards)):
#if j ==0:
# print("last reward ",episodeRewards[j])
if j > 0:
episodeRewards[j] = episodeRewards[j] + B_DISCOUNT * episodeRewards[j-1]
#avg = sum(episodeRewards)/len(episodeRewards)
#print("episode average ", avg)
for j in range(len(episodeRewards)):
allRewards.append(episodeRewards[j])
#allRewards = allRewards + episodeRewards
epAvg = sum(episodeRewards) / len(episodeRewards)
allRewards.append(epAvg)
#f epAvg >0:
# numLandings = numLandings+1
break
#print("generationID",indiv.generationID,"IndivID",indiv.indivID,"episodeRewards rewards ",epAvg)
avg = sum(allRewards) / len(allRewards)
indiv.lifeScore = avg
#indiv.lifeScore = math.fabs(float(env.unwrapped.walk_target_dist) - 1001.0)
#if terminated_early:
# print("Terminated early")
# indiv.lifeScore = math.fabs(float(env.unwrapped.walk_target_dist) - 1001.0) - ( - EARLY_TERMINATION_REWARD)
print("generationID",indiv.generationID,"indivID - ",indiv.indivID,"numLandings ",0,"lifeScore =",indiv.lifeScore)
def test_all_individuals(num_test_episodes):
for i in range(len(all_individuals)):
test_individual(all_individuals[i],NUM_TEST_EPISODES)
def select_top_individuals(num_selected,population_size):
scores = np.zeros(population_size)
for i in range(np.alen(scores)):
scores[i] = all_individuals[i].lifeScore
#print( scores )
topScores = scores[ scores.argsort()[-num_selected:][::-1] ]
#print ("Top Scores ", topScores)
selected_individuals = []
for i in range(len(all_individuals)):
if all_individuals[i].lifeScore >= topScores.min():
#print("Selecting individual",i," with score ", all_individuals[i].lifeScore,"cuttoff ", topScores.min())
selected_individuals.append(all_individuals[i])
else:
all_individuals[i].indivID = -1
all_individuals[i].generationID = -1
all_individuals[i].lifeScore = -10000
print("Selected individuals ")
for i in range (len(selected_individuals)):
print(selected_individuals[i].printme())
return selected_individuals
# --- Parameter Noising
def add_noise_simple(mu,noiseSigma, largeNoise=False):
x = | np.random.rand(1) | numpy.random.rand |
"""
primitive.py
----------
Process detected planar primitives.
Primitives are supported in vertex group format (.vg, .bvg).
Mapple as in [Easy3D](https://github.com/LiangliangNan/Easy3D)
can be used to generate such primitives from point clouds.
Otherwise, one can refer to the vertex group file format specification
attached to the README document.
"""
from random import random
from pathlib import PosixPath
import struct
import numpy as np
from sklearn.decomposition import PCA
from tqdm import tqdm
from .logger import attach_to_log
logger = attach_to_log()
class VertexGroup:
"""
Class for manipulating planar primitives.
"""
def __init__(self, filepath, process=True):
"""
Init VertexGroup.
Class for manipulating planar primitives.
Parameters
----------
filepath: pathlib.Path
Filepath to vertex group file (.vg) or binary vertex group file (.bvg)
process: bool
Immediate processing if set True
"""
self.filepath = filepath
self.processed = False
self.points = None
self.planes = None
self.bounds = None
self.points_grouped = None
self.points_ungrouped = None
self.vgroup_ascii = self.load_file()
self.vgroup_binary = None
if process:
self.process()
def load_file(self):
"""
Load (ascii / binary) vertex group file.
"""
if self.filepath.suffix == '.vg':
with open(self.filepath, 'r') as fin:
return fin.readlines()
elif self.filepath.suffix == '.bvg':
# define size constants
_SIZE_OF_INT = 4
_SIZE_OF_FLOAT = 4
_SIZE_OF_PARAM = 4
_SIZE_OF_COLOR = 3
vgroup_ascii = ''
with open(self.filepath, 'rb') as fin:
# points
num_points = struct.unpack('i', fin.read(_SIZE_OF_INT))[0]
points = struct.unpack('f' * num_points * 3, fin.read(_SIZE_OF_FLOAT * num_points * 3))
vgroup_ascii += f'num_points: {num_points}\n'
vgroup_ascii += ' '.join(map(str, points)) + '\n'
# colors
num_colors = struct.unpack("i", fin.read(_SIZE_OF_INT))[0]
vgroup_ascii += f'num_colors: {num_colors}\n'
# normals
num_normals = struct.unpack("i", fin.read(_SIZE_OF_INT))[0]
normals = struct.unpack('f' * num_normals * 3, fin.read(_SIZE_OF_FLOAT * num_normals * 3))
vgroup_ascii += f'num_normals: {num_normals}\n'
vgroup_ascii += ' '.join(map(str, normals)) + '\n'
# groups
num_groups = struct.unpack("i", fin.read(_SIZE_OF_INT))[0]
vgroup_ascii += f'num_groups: {num_groups}\n'
group_counter = 0
while group_counter < num_groups:
group_type = struct.unpack("i", fin.read(_SIZE_OF_INT))[0]
num_group_parameters = struct.unpack("i", fin.read(_SIZE_OF_INT))[0]
group_parameters = struct.unpack("f" * _SIZE_OF_PARAM, fin.read(_SIZE_OF_INT * _SIZE_OF_PARAM))
group_label_size = struct.unpack("i", fin.read(_SIZE_OF_INT))[0]
# be reminded that vg <-> bvg in Mapple does not maintain group order
group_label = struct.unpack("c" * group_label_size, fin.read(group_label_size))
group_color = struct.unpack("f" * _SIZE_OF_COLOR, fin.read(_SIZE_OF_FLOAT * _SIZE_OF_COLOR))
group_num_point = struct.unpack("i", fin.read(_SIZE_OF_INT))[0]
group_points = struct.unpack("i" * group_num_point, fin.read(_SIZE_OF_INT * group_num_point))
num_children = struct.unpack("i", fin.read(_SIZE_OF_INT))[0]
vgroup_ascii += f'group_type: {group_type}\n'
vgroup_ascii += f'num_group_parameters: {num_group_parameters}\n'
vgroup_ascii += 'group_parameters: ' + ' '.join(map(str, group_parameters)) + '\n'
vgroup_ascii += 'group_label: ' + ''.join(map(str, group_label)) + '\n'
vgroup_ascii += 'group_color: ' + ' '.join(map(str, group_color)) + '\n'
vgroup_ascii += f'group_num_point: {group_num_point}\n'
vgroup_ascii += ' '.join(map(str, group_points)) + '\n'
vgroup_ascii += f'num_children: {num_children}\n'
group_counter += 1
# convert vgroup_ascii to list
return vgroup_ascii.split('\n')
else:
raise ValueError(f'unable to load {self.filepath}, expected *.vg or .bvg.')
def process(self):
"""
Start processing vertex group.
"""
logger.info('processing {}'.format(self.filepath))
self.points = self.get_points()
self.planes, self.bounds, self.points_grouped, self.points_ungrouped = self.get_primitives()
self.processed = True
def get_points(self, row=1):
"""
Get points from vertex group.
Parameters
----------
row: int
Row number where points are specified, defaults to 1 for filename.vg
Returns
----------
as_float: (n, 3) float
Point cloud
"""
pc = np.fromstring(self.vgroup_ascii[row], sep=' ')
return np.reshape(pc, (-1, 3))
def get_primitives(self):
"""
Get primitives from vertex group.
Returns
----------
params: (n, 4) float
Plane parameters
bounds: (n, 2, 3) float
Bounding box of the primitives
groups: (n, m, 3) float
Groups of points
ungrouped_points: (u, 3) float
Points that belong to no group
"""
is_primitive = [line.startswith('group_num_point') for line in self.vgroup_ascii]
primitives = [self.vgroup_ascii[line] for line in np.where(is_primitive)[0] + 1] # lines of groups in the file
params = []
bounds = []
groups = []
grouped_indices = set() # indices of points being grouped
for i, p in enumerate(primitives):
point_indices = np.fromstring(p, sep=' ').astype(np.int64)
grouped_indices.update(point_indices)
points = self.points[point_indices]
param = self.fit_plane(points, mode='PCA')
if param is None:
continue
params.append(param)
bounds.append(self._points_bound(points))
groups.append(points)
ungrouped_indices = set(range(len(self.points))).difference(grouped_indices)
ungrouped_points = self.points[list(ungrouped_indices)] # points that belong to no groups
return np.array(params), np.array(bounds), | np.array(groups, dtype=object) | numpy.array |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from SimPEG import Mesh, Utils
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from scipy.sparse import spdiags,csr_matrix, eye,kron,hstack,vstack,eye,diags
import copy
from scipy.constants import mu_0
from SimPEG import SolverLU
from scipy.sparse.linalg import spsolve,splu
from SimPEG.EM import TDEM
from SimPEG.EM.Analytics.TDEM import hzAnalyticDipoleT,hzAnalyticCentLoopT
from scipy.interpolate import interp2d,LinearNDInterpolator
from scipy.special import ellipk,ellipe
def rectangular_plane_layout(mesh,corner, closed = False,I=1.):
"""
corner: sorted list of four corners (x,y,z)
2--3
| |
1--4
y
|
|--> x
Output:
Js
"""
Jx = np.zeros(mesh.nEx)
Jy = np.zeros(mesh.nEy)
Jz = np.zeros(mesh.nEz)
indy1 = np.logical_and( \
np.logical_and( \
np.logical_and(mesh.gridEy[:,0]>=corner[0,0],mesh.gridEy[:,0]<=corner[1,0]), \
np.logical_and(mesh.gridEy[:,1] >=corner[0,1] , mesh.gridEy[:,1]<=corner[1,1] )),
(mesh.gridEy[:,2] == corner[0,2]
)
)
indx1 = np.logical_and( \
np.logical_and( \
np.logical_and(mesh.gridEx[:,0]>=corner[1,0],mesh.gridEx[:,0]<=corner[2,0]), \
np.logical_and(mesh.gridEx[:,1] >=corner[1,1] , mesh.gridEx[:,1]<=corner[2,1] )),
(mesh.gridEx[:,2] == corner[1,2]
)
)
indy2 = np.logical_and( \
np.logical_and( \
np.logical_and(mesh.gridEy[:,0]>=corner[2,0],mesh.gridEy[:,0]<=corner[3,0]), \
np.logical_and(mesh.gridEy[:,1] <=corner[2,1] , mesh.gridEy[:,1]>=corner[3,1] )),
(mesh.gridEy[:,2] == corner[2,2]
)
)
if closed:
indx2 = np.logical_and( \
np.logical_and( \
np.logical_and(mesh.gridEx[:,0]>=corner[0,0],mesh.gridEx[:,0]<=corner[3,0]), \
np.logical_and(mesh.gridEx[:,1] >=corner[0,1] , mesh.gridEx[:,1]<=corner[3,1] )),
(mesh.gridEx[:,2] == corner[0,2]
)
)
else:
indx2 = []
Jy[indy1] = -I
Jx[indx1] = -I
Jy[indy2] = I
Jx[indx2] = I
J = np.hstack((Jx,Jy,Jz))
J = J*mesh.edge
return J
def BiotSavart(locs,mesh,Js):
"""
Compute the magnetic field generated by current discretized on a mesh using Biot-Savart law
Input:
locs: observation locations
mesh: mesh on which the current J is discretized
Js: discretized source current in A-m (Finite Volume formulation)
Output:
B: magnetic field [Bx,By,Bz]
"""
c = mu_0/(4*np.pi)
nwire = np.sum(Js!=0.)
ind= np.where(Js!=0.)
ind = ind[0]
B = np.zeros([locs.shape[0],3])
gridE = np.vstack([mesh.gridEx,mesh.gridEy,mesh.gridEz])
for i in range(nwire):
# x wire
if ind[i]<mesh.nEx:
r = locs-gridE[ind[i]]
I = Js[ind[i]]*np.hstack([np.ones([locs.shape[0],1]),np.zeros([locs.shape[0],1]),np.zeros([locs.shape[0],1])])
cr = np.cross(I,r)
rsq = np.linalg.norm(r,axis=1)**3.
B = B + c*cr/rsq[:,None]
# y wire
elif ind[i]<mesh.nEx+mesh.nEy:
r = locs-gridE[ind[i]]
I = Js[ind[i]]*np.hstack([np.zeros([locs.shape[0],1]),np.ones([locs.shape[0],1]),np.zeros([locs.shape[0],1])])
cr = np.cross(I,r)
rsq = np.linalg.norm(r,axis=1)**3.
B = B + c*cr/rsq[:,None]
# z wire
elif ind[i]<mesh.nEx+mesh.nEy+mesh.nEz:
r = locs-gridE[ind[i]]
I = Js[ind[i]]*np.hstack([np.zeros([locs.shape[0],1]),np.zeros([locs.shape[0],1]),np.ones([locs.shape[0],1])])
cr = np.cross(I,r)
rsq = np.linalg.norm(r,axis=1)**3.
B = B + c*cr/rsq[:,None]
else:
print('error: index of J out of bounds (number of edges in the mesh)')
return B
def analytic_infinite_wire(obsloc,wireloc,orientation,I=1.):
"""
Compute the response of an infinite wire with orientation 'orientation'
and current I at the obsvervation locations obsloc
Output:
B: magnetic field [Bx,By,Bz]
"""
n,d = obsloc.shape
t,d = wireloc.shape
d = np.sqrt(np.dot(obsloc**2.,np.ones([d,t]))+np.dot(np.ones([n,d]),(wireloc.T)**2.)
- 2.*np.dot(obsloc,wireloc.T))
distr = np.amin(d, axis=1, keepdims = True)
idxmind = d.argmin(axis=1)
r = obsloc - wireloc[idxmind]
orient = np.c_[[orientation for i in range(obsloc.shape[0])]]
B = (mu_0*I)/(2*np.pi*(distr**2.))*np.cross(orientation,r)
return B
def mag_dipole(m,obsloc):
"""
Compute the response of an infinitesimal mag dipole at location (0,0,0)
with orientation X and magnetic moment 'm'
at the obsvervation locations obsloc
Output:
B: magnetic field [Bx,By,Bz]
"""
loc = np.r_[[[0.,0.,0.]]]
n,d = obsloc.shape
t,d = loc.shape
d = np.sqrt(np.dot(obsloc**2.,np.ones([d,t]))+np.dot(np.ones([n,d]),(loc.T)**2.)
- 2.*np.dot(obsloc,loc.T))
d = d.flatten()
ind = np.where(d==0.)
d[ind] = 1e6
x = obsloc[:,0]
y = obsloc[:,1]
z = obsloc[:,2]
#orient = np.c_[[orientation for i in range(obsloc.shape[0])]]
Bz = (mu_0*m)/(4*np.pi*(d**3.))*(3.*((z**2.)/(d**2.))-1.)
By = (mu_0*m)/(4*np.pi*(d**3.))*(3.*(z*y)/(d**2.))
Bx = (mu_0*m)/(4*np.pi*(d**3.))*(3.*(x*z)/(d**2.))
B = np.vstack([Bx,By,Bz]).T
return B
def circularloop(a,obsloc,I=1.):
"""
From Simpson, Lane, Im<NAME> 2001
Compute the magnetic field B response of a current loop
of radius 'a' with intensity 'I'.
input:
a: radius in m
obsloc: obsvervation locations
Output:
B: magnetic field [Bx,By,Bz]
"""
x = np.atleast_2d(obsloc[:,0]).T
y = np.atleast_2d(obsloc[:,1]).T
z = np.atleast_2d(obsloc[:,2]).T
r = np.linalg.norm(obsloc,axis=1)
loc = np.r_[[[0.,0.,0.]]]
n,d = obsloc.shape
r2 = x**2.+y**2.+z**2.
rho2 = x**2.+y**2.
alpha2 = a**2.+r2-2*a*np.sqrt(rho2)
beta2 = a**2.+r2+2*a*np.sqrt(rho2)
k2 = 1-(alpha2/beta2)
lbda = x**2.-y**2.
C = mu_0*I/np.pi
Bx = ((C*x*z)/(2*alpha2*np.sqrt(beta2)*rho2))*\
((a**2.+r2)*ellipe(k2)-alpha2*ellipk(k2))
Bx[np.isnan(Bx)] = 0.
By = ((C*y*z)/(2*alpha2*np.sqrt(beta2)*rho2))*\
((a**2.+r2)*ellipe(k2)-alpha2*ellipk(k2))
By[ | np.isnan(By) | numpy.isnan |
"""
Some utility functions for dealing with perturbative expansions numerically.
"""
import parafermions.ParafermionUtils as pf
import parafermions.ParafermionUtilsCython as pfcy
import time
import numpy as np
import scipy.sparse as sps
# define a function that gets the U operator to the nth order for given operators
def Ufunc(n, Q, V, P):
"""
The U operator, see Messiah book, chapter 16.
Parameters:
----------
n: int
Order of expansion.
Q: matrix
Projection matrix rescaled by inverse energy difference.
V: matrix
Perturbing part of Hamiltonian.
P: matrix
Projection matrix to degenerate subspace we are perturbing from.
Returns:
--------
matrix
The expansion of U to nth order.
"""
if n == 0:
return P
elif n > 0:
U = V*Ufunc(n-1, Q, V, P)
for p in range(1,n):
U = U - Ufunc(p,Q,V,P)*V*Ufunc(n-p-1,Q,V,P)
return Q*U
def Afunc(n, Q, V, P, As=None):
"""
Function A operators of Bloch's perturbation theory expansion to nth order.
Parameters
-----------
n: int
The order to calculate to.
Q: operator
Normalised projector to all states outside band.
V: operator
Perturbing part of hamiltonian.
P: operator
Projector to band we are perturbing from.
As: dict
Dictionary of precomputed expansions.
Returns
--------
operator
The effective operator to nth order.
dict
Dictionary of operators are each order up to this.
"""
if As is None or type(As) is not dict:
As = dict()
if 0 not in As:
A = P*V*Ufunc(0,Q,V,P)
As[0] = A
else: A = As[0]
for j in range(1, n):
if j not in As:
A_new = P*V*Ufunc(j,Q,V,P)
As[j] = A_new
else:
A_new = As[j]
A += A_new
return A, As
def ProjectToBand(A, E0Band, E0s):
select = np.abs(E0s - E0Band) < 1e-12
return A[select,:][:, select]
def Sfunc(k, P0, Q):
if k == 0:
return -P0
else:
return Q**k
def AKato(n, Q, V, P0):
# project to the unperturbed band to make multiplications more efficient
A = None
bins = n+1
Table = pf.PartitionTable(bins, n)
# print ("Partition table" + str([bins,n]))
for idx in range(Table[bins,n]):
ks = pf.FindPartition(bins, n, idx, Table)
# print ks
Tmp = Sfunc(ks[0], P0, Q)
for k in ks[1:]:
Tmp = Tmp * V * Sfunc(k, P0, Q)
if A is None:
A = Tmp
else:
A = A + Tmp
return -A
def BKato(n, Q, V, P0):
# project to the unperturbed band to make multiplications more efficient
B = None
bins = n+1
Table = pf.PartitionTable(bins, n-1)
# print ("Partition table" + str([bins,n-1]))
for idx in range(Table[bins, n-1]):
ks = pf.FindPartition(bins, n-1, idx, Table)
# print ks
Tmp = Sfunc(ks[0], P0, Q)
for k in ks[1:]:
Tmp = Tmp * V * Sfunc(k, P0, Q)
if B is None:
B = Tmp
else:
B = B + Tmp
return B
def HaKato(n, Q, V, P0):
Ha = P0 * BKato(1, Q, V, P0) * P0
for i in range(2,n+1):
Ha = Ha + P0 * BKato(i, Q, V, P0) * P0
return Ha
def KaKato(n, Q, V, P0):
Ka = P0
for i in range(1,n+1):
Ka = Ka + P0 * AKato(i, Q, V, P0) * P0
return Ka
def pt_operators(N, L, J, theta, f, phi, band_idxs, deltaE, exact=False, qs=None, verbose=False, **kwargs):
"""
Function to calculate the operators needed for perturbation theory given input parameters.
Parameters:
----------
N: int
The number of clock positions.
L: int
The length of the chain.
J: float
J coupling constant value(s).
theta: float
Chiral parameter on J term.
f: float
f coupling constant value(s).
phi: float
Chiral parameter on f term.
band_idxs: array
Band indexes to start from.
deltaE: float
The energy range in which to consider bands.
exact: bool
Flag to indicate that exact values should be calculated using ED (default=False).
qs: list
List of q values to use. If None all values 0,...,N-1 used(default=None).
Returns
-------
matrix
Projector to starting subspace.
matrix
Projector to complement of starting subspace within given energy range
and scaled according to the inverse unperturbed energy difference.
matrix
Unperturbed hamiltonian matrix.
list of matrices
Perturbing hamiltonian for each q requested.
int
The full dimension of the starting band.
float
The unperturbed energy of the starting band.
list of arrays
List of arrays of exact eigenvalues for each requested q or None if exact is false.
"""
if qs is None:
qs = range(N)
BandIdxs = band_idxs
DeltaE = deltaE
if 'theta0' in kwargs:
theta0 = kwargs['theta0']
else:
theta0 = theta
H0Op = pf.ParafermionicChainOp(N, L, J, theta0, 0.0, 0.0, q=0) # we get MPO object for full unperturbed Hamiltonian (note the same in each sector)
HfOps = []
for q in qs:
if 'exclude_side' in kwargs:
if kwargs['exclude_side'] == 'left':
fs = np.ones(L)*f
fs[0] = 0.0
Hf = pf.ParafermionicChainOp(N, L, J, theta, fs, phi, q=q)
elif kwargs['exclude_side'] == 'right':
fs = np.ones(L)*f
fs[-1] = 0.0
Hf = pf.ParafermionicChainOp(N, L, J, theta, fs, phi, q=q)
elif kwargs['exclude_side'] == 'neither':
fs = np.ones(L)*f
fs[0] = 0.0
fs[-1] = 0.0
Hf = pf.ParafermionicChainOp(N, L, J, theta, fs, phi, q=q)
else:
raise Exception('\'exlude_side\' argument should be either left or right')
else:
Hf = pf.ParafermionicChainOp(N, L, J, theta, f, phi, q=q)
Hf.add(H0Op, c1=1.0, c2=-1.0, inplace=True, compress=False)
HfOps.append(Hf)
[Partitions, H0Energies] = H0Op.get_bands_and_energies() # get all the partitions and energies of each
BandEnergy = H0Energies[BandIdxs[0]] # get the energy of the band we start from, this is E0
BandPartitions = list(map(lambda x: Partitions[x], BandIdxs)) # get the
FullBand = np.vstack(list(map(lambda x: pfcy.GetFullBandDW(BandPartitions[x]), range(len(BandIdxs)))))
FullBandDim = len(FullBand)
[NeighbouringBands,] = np.where(np.abs(H0Energies - BandEnergy) < DeltaE) # find other bands within deltaE in energy
FullSubspace = | np.copy(FullBand) | numpy.copy |
import numpy as np
def dist(x, y, norm=2):
# x: N x D
# y: M x D
n = x.shape[0]
m = y.shape[0]
d = x.shape[1]
assert d == y.shape[1]
x = | np.expand_dims(x, axis=1) | numpy.expand_dims |
import numpy as np
import os
from annoy import AnnoyIndex
import trimesh as trm
from scipy.spatial import distance
from scipy.stats import wasserstein_distance
from shape import Shape
from utils import flatten_features_array, read_off
from settings import Settings
# from src.utils import flatten_features_array, read_off
# from src.shape import Shape
# from src.settings import Settings
s = Settings()
def calculate_weights(features: {}):
"""
It determines the weights of the single features for distance computation
The features are compared in pairs to determine the euclidean distance, for simple features,
or the Wassertein distance, for distributions. The weights are computed as 1 over the standard
deviation of the respective set of distances.
The weights are then saved to cache.
----------------------------
Args:
features (obj: 'dict'): The dictionary containing the feature metrics of each shape
"""
d_v, d_a, d_c, d_bb, d_d, d_e, d_a3, d_d1, d_d2, d_d3, d_d4 = [],[],[],[],[],[],[],[],[],[],[]
for i in range(0, len(features.keys())):
featureList1 = list(features.values())[i]
for j in range(i+1, len(features.keys())):
featureList2 = list(features.values())[j]
d_v.append(distance.euclidean(featureList1['volume'], featureList2['volume']))
d_a.append(distance.euclidean(featureList1['area'], featureList2['area']))
d_c.append(distance.euclidean(featureList1['compactness'], featureList2['compactness']))
d_bb.append(distance.euclidean(featureList1['bbox_volume'], featureList2['bbox_volume']))
d_d.append(distance.euclidean(featureList1['diameter'], featureList2['diameter']))
d_e.append(distance.euclidean(featureList1['eccentricity'], featureList2['eccentricity']))
d_a3.append(wasserstein_distance(featureList1['A3'][0], featureList2['A3'][0]))
d_d1.append(wasserstein_distance(featureList1['D1'][0], featureList2['D1'][0]))
d_d2.append(wasserstein_distance(featureList1['D2'][0], featureList2['D2'][0]))
d_d3.append(wasserstein_distance(featureList1['D3'][0], featureList2['D3'][0]))
d_d4.append(wasserstein_distance(featureList1['D4'][0], featureList2['D4'][0]))
weights = {}
weights["w_v"] = 1/np.std(d_v)
weights["w_a"] = 1/np.std(d_a)
weights["w_c"] = 1/np.std(d_c)
weights["w_bb"] = 1/np.std(d_bb)
weights["w_d"] = 1/np.std(d_d)
weights["w_e"] = 1/np.std(d_e)
weights["w_A3"] = 1/np.std(d_a3)
weights["w_D1"] = 1/np.std(d_d1)
weights["w_D2"] = 1/np.std(d_d2)
weights["w_D3"] = 1/np.std(d_d3)
weights["w_D4"] = 1/ | np.std(d_d4) | numpy.std |
# Copyright 2021 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Filters and filter banks"""
import abc
from typing import Mapping, Optional, Tuple, Union
import numpy as np
from pydrobert.speech import AliasedFactory
from pydrobert.speech import config
from pydrobert.speech.scales import MelScaling
from pydrobert.speech.scales import ScalingFunction
from pydrobert.speech.util import alias_factory_subclass_from_arg
from pydrobert.speech.util import angular_to_hertz
from pydrobert.speech.util import hertz_to_angular
__all__ = [
"LinearFilterBank",
"TriangularOverlappingFilterBank",
"GaborFilterBank",
"ComplexGammatoneFilterBank",
"WindowFunction",
"BartlettWindow",
"BlackmanWindow",
"HammingWindow",
"HannWindow",
"GammaWindow",
]
# banks
class LinearFilterBank(AliasedFactory):
"""A collection of linear, time invariant filters
A :class:`LinearFilterBank` instance is expected to provide factory methods for
instantiating a fixed number of LTI filters in either the time or frequency domain.
Filters should be organized lowest frequency first.
Attributes
----------
is_real : bool
is_analytic : bool
is_zero_phase : bool
num_filts : int
sampling_rate : float
centers_hz : tuple
supports_hz : tuple
supports : tuple
supports_ms : tuple
"""
@abc.abstractproperty
def is_real(self) -> bool:
"""Whether the filters are real or complex"""
pass
@abc.abstractproperty
def is_analytic(self) -> bool:
"""Whether the filters are (approximately) analytic"""
pass
@abc.abstractproperty
def is_zero_phase(self) -> bool:
"""Whether the filters are zero phase or not
Zero phase filters are even functions with no imaginary part in the fourier
domain. Their impulse responses center around 0.
"""
pass
@abc.abstractproperty
def num_filts(self) -> int:
"""Number of filters in the bank"""
pass
@abc.abstractproperty
def sampling_rate(self) -> float:
"""Number of samples in a second of a target recording"""
pass
@abc.abstractproperty
def supports_hz(self) -> Tuple:
"""Boundaries of effective support of filter freq responses, in Hz.
Returns a tuple of length `num_filts` containing pairs of floats of the low and
high frequencies. Frequencies outside the span have a response of approximately
(with magnitude up to :obj:`pydrobert.speech.EFFECTIVE_SUPPORT_SIGNAL`) zero.
The boundaries need not be tight, i.e. the region inside the boundaries could be
zero. It is more important to guarantee that the region outside the boundaries
is approximately zero.
The boundaries ignore the Hermitian symmetry of the filter if it is real. Bounds
of ``(10, 20)`` for a real filter imply that the region ``(-20, -10)`` could
also be nonzero.
The user is responsible for adjusting the for the periodicity induced by
sampling. For example, if the boundaries are ``(-5, 10)`` and the filter is
sampled at 15Hz, then all bins of an associated DFT could be nonzero.
"""
pass
@abc.abstractproperty
def supports(self) -> Tuple:
"""Boundaries of effective support of filter impulse resps, in samples
Returns a tuple of length `num_filts` containing pairs of integers of the first
and last (effectively) nonzero samples.
The boundaries need not be tight, i.e. the region inside the boundaries could be
zero. It is more important to guarantee that the region outside the boundaries
is approximately zero.
If a filter is instantiated using a buffer that is unable to fully contain the
supported region, samples will wrap around the boundaries of the buffer.
Noncausal filters will have start indices less than 0. These samples will wrap
to the end of the filter buffer when the filter is instantiated.
"""
pass
@property
def supports_ms(self) -> tuple:
"""Boundaries of effective support of filter impulse resps, in ms"""
return tuple(
(s[0] * 1000 / self.sampling_rate, s[1] * 1000 / self.sampling_rate,)
for s in self.supports
)
@abc.abstractmethod
def get_impulse_response(self, filt_idx: int, width: int) -> np.ndarray:
"""Construct filter impulse response in a fixed-width buffer
Construct the filter in the time domain.
Parameters
----------
filt_idx : int
The index of the filter to generate. Less than `num_filts`
width : int
The length of the buffer, in samples. If less than the support of the
filter, the filter will alias.
Returns
-------
array-like
1D float64 or complex128 numpy array of length `width`
"""
pass
@abc.abstractmethod
def get_frequency_response(
self, filt_idx: int, width: int, half: bool = False
) -> np.ndarray:
"""Construct filter frequency response in a fixed-width buffer
Construct the 2pi-periodized filter in the frequency domain. Zero-phase filters
`is_zero_phase` are returned as 8-byte float arrays. Otherwise, they will be
16-byte complex floats.
Parameters
----------
filt_idx : int
The index of the filter to generate. Less than `num_filts`
width : int
The length of the DFT to output
half : bool, optional
Whether to return only the DFT bins between [0,pi]
Results
-------
array-like
If `half` is `False`, returns a 1D float64 or complex128
numpy array of length `width`. If `half` is `True` and
`width` is even, the returned array is of length
``width // 2 + 1``. If `width` is odd, the returned array
is of length ``(width + 1) // 2``.
"""
pass
@abc.abstractmethod
def get_truncated_response(
self, filt_idx: int, width: int
) -> Tuple[int, np.ndarray]:
"""Get nonzero region of filter frequency response
Many filters will be compactly supported in frequency (or approximately so).
This method generates a tuple `(bin_idx, buf)` of the nonzero region.
In the case of a complex filter, ``bin_idx + len(buf)`` may be greater than
`width`; the filter wraps around in this case. The full frequency response can
be calculated from the truncated response by:
>>> bin_idx, trnc = bank.get_truncated_response(filt_idx, width)
>>> full = numpy.zeros(width, dtype=trnc.dtype)
>>> wrap = min(bin_idx + len(trnc), width) - bin_idx
>>> full[bin_idx:bin_idx + wrap] = trnc[:wrap]
>>> full[:len(trnc) - wrap] = tnc[wrap:]
In the case of a real filter, only the nonzero region between ``[0, pi]``
(half-spectrum) is returned. No wrapping can occur since it would inevitably
interfere with itself due to conjugate symmetry. The half-spectrum can easily be
recovered by:
>>> half_width = (width + width % 2) // 2 + 1 - width % 2
>>> half = numpy.zeros(half_width, dtype=trnc.dtype)
>>> half[bin_idx:bin_idx + len(trnc)] = trnc
And the full spectrum by:
>>> full[bin_idx:bin_idx + len(trnc)] = trnc
>>> full[width - bin_idx - len(trnc) + 1:width - bin_idx + 1] = \\
... trnc[:None if bin_idx else 0:-1].conj()
(the embedded if-statement is necessary when bin_idx is 0, as the full fft
excludes its symmetric bin)
Parameters
----------
filt_idx : int
The index of the filter to generate. Less than `num_filts`
width : int
The length of the DFT to output
Returns
-------
tuple of int, array
"""
pass
class TriangularOverlappingFilterBank(LinearFilterBank):
"""Triangular frequency response whose vertices are along the scale
The vertices of the filters are sampled uniformly along the passed scale. If the
scale is nonlinear, the triangles will be asymmetrical. This is closely related to,
but not identical to, the filters described in [povey2011]_ and [young]_.
Parameters
----------
scaling_function : pydrobert.speech.ScalingFunction, str, or dict
Dictates the layout of filters in the Fourier domain. Can be a
:class:`ScalingFunction` or something compatible with
:func:`pydrobert.speech.alias_factory_subclass_from_arg`
num_filts : int, optional
The number of filters in the bank
high_hz, low_hz : float, optional
The topmost and bottommost edge of the filters, respectively. The default for
`high_hz` is the Nyquist
sampling_rate : float, optional
The sampling rate (cycles/sec) of the target recordings
analytic : bool, optional
Whether to use an analytic form of the bank. The analytic form is easily derived
from the real form in [povey2011]_ and [young]_. Since the filter is compactly
supported in frequency, the analytic form is simply the suppression of the
``[-pi, 0)`` frequencies
Attributes
----------
centers_hz : tuple
is_real : bool
is_analytic : bool
num_filts : int
sampling_rate : float
supports_hz : tuple
supports : tuple
supports_ms : tuple
Raises
------
ValueError
If `high_hz` is above the Nyquist, or `low_hz` is below 0, or
``high_hz <= low_hz``
"""
aliases = {"tri", "triangular"}
def __init__(
self,
scaling_function: Union[ScalingFunction, Mapping, str],
num_filts: int = 40,
high_hz: Optional[float] = None,
low_hz: float = 20.0,
sampling_rate: float = 16000,
analytic: bool = False,
):
scaling_function = alias_factory_subclass_from_arg(
ScalingFunction, scaling_function
)
if low_hz < 0 or (
high_hz and (high_hz <= low_hz or high_hz > sampling_rate // 2)
):
raise ValueError(
"Invalid frequency range: ({:.2f},{:.2f}".format(low_hz, high_hz)
)
self._rate = sampling_rate
if high_hz is None:
high_hz = sampling_rate // 2
# compute vertices
scale_low = scaling_function.hertz_to_scale(low_hz)
scale_high = scaling_function.hertz_to_scale(high_hz)
scale_delta = (scale_high - scale_low) / (num_filts + 1)
self._vertices = tuple(
scaling_function.scale_to_hertz(scale_low + scale_delta * idx)
for idx in range(0, num_filts + 2)
)
self._analytic = analytic
@property
def is_real(self) -> bool:
return not self._analytic
@property
def is_analytic(self) -> bool:
return self._analytic
@property
def is_zero_phase(self) -> bool:
return True
@property
def num_filts(self) -> int:
return len(self._vertices) - 2
@property
def sampling_rate(self) -> float:
return self._rate
@property
def centers_hz(self) -> Tuple[float]:
"""The point of maximum gain in each filter's frequency response, in Hz
This property gives the so-called "center frequencies" - the
point of maximum gain - of each filter.
"""
return self._vertices[1:-1]
@property
def supports_hz(self) -> tuple:
return tuple(
(low, high) for low, high in zip(self._vertices[:-2], self._vertices[2:])
)
@property
def supports(self) -> tuple:
# A given filter is bound from above by
# 2(w_r - w_l) / ((w_c - w_l)(w_r - w_c)t^2pi)
supports = []
for idx in range(len(self._vertices) - 2):
left = hertz_to_angular(self._vertices[idx], self._rate)
mid = hertz_to_angular(self._vertices[idx + 1], self._rate)
right = hertz_to_angular(self._vertices[idx + 2], self._rate)
K = np.sqrt(8 * (right - left) / np.pi)
K /= np.sqrt(config.EFFECTIVE_SUPPORT_THRESHOLD)
K /= np.sqrt(mid - left) * np.sqrt(right - mid)
K = int(np.ceil(K))
supports.append((-K // 2 - 1, K // 2 + 1))
return tuple(supports)
def get_impulse_response(self, filt_idx: int, width: int) -> np.ndarray:
left = hertz_to_angular(self._vertices[filt_idx], self._rate)
mid = hertz_to_angular(self._vertices[filt_idx + 1], self._rate)
right = hertz_to_angular(self._vertices[filt_idx + 2], self._rate)
res = np.zeros(width, dtype=np.complex128 if self._analytic else np.float64)
# for numerical stability (angles can get pretty small)
if right - mid > mid - left:
denom = right - mid
div_term = mid - left
else:
denom = mid - left
div_term = right - mid
denom *= (int(self._analytic) + 1) * np.pi
for t in range(1, width + 1):
if self._analytic:
numer = (right - left) / div_term * np.exp(1j * mid * t)
numer -= (right - mid) / div_term * np.exp(1j * left * t)
numer -= (mid - left) / div_term * np.exp(1j * right * t)
else:
numer = (right - left) / div_term * np.cos(mid * t)
numer -= (right - mid) / div_term * np.cos(left * t)
numer -= (mid - left) / div_term * np.cos(right * t)
val = numer / t ** 2
if t < width:
res[t] += val
res[-t] += val.conj()
else:
res[0] += val
numer = mid / div_term * (right ** 2 - left ** 2)
numer += right / div_term * (left ** 2 - mid ** 2)
numer += left / div_term * (mid ** 2 - right ** 2)
res[0] += numer / 2
res /= denom
return res
def get_frequency_response(
self, filt_idx: int, width: int, half: bool = False
) -> np.ndarray:
left = self._vertices[filt_idx]
mid = self._vertices[filt_idx + 1]
right = self._vertices[filt_idx + 2]
left_idx = int(np.ceil(width * left / self._rate))
right_idx = int(width * right / self._rate)
assert self._rate * (left_idx - 1) / width <= left
assert self._rate * (right_idx + 1) / width >= right, width
dft_size = width
if half:
if width % 2:
dft_size = (width + 1) // 2
else:
dft_size = width // 2 + 1
res = np.zeros(dft_size, dtype=np.float64)
for idx in range(left_idx, min(dft_size, right_idx + 1)):
hz = self._rate * idx / width
if hz <= mid:
val = (hz - left) / (mid - left)
else:
val = (right - hz) / (right - mid)
res[idx] = val
if not half and not self._analytic:
res[-idx] = val
return res
def get_truncated_response(
self, filt_idx: int, width: int
) -> Tuple[int, np.ndarray]:
left = self._vertices[filt_idx]
mid = self._vertices[filt_idx + 1]
right = self._vertices[filt_idx + 2]
left_idx = int(np.ceil(width * left / self._rate))
right_idx = int(width * right / self._rate)
assert self._rate * (left_idx - 1) / width <= left
assert self._rate * (right_idx + 1) / width >= right, width
res = np.zeros(1 + right_idx - left_idx, dtype=np.float64)
for idx in range(left_idx, min(width, right_idx + 1)):
hz = self._rate * idx / width
if hz <= mid:
res[idx - left_idx] = (hz - left) / (mid - left)
else:
res[idx - left_idx] = (right - hz) / (right - mid)
return left_idx, res
class Fbank(LinearFilterBank):
"""A mel-triangular filter bank that is square-rooted
An ``Fbank`` instance is intended to replicate the filters from Kaldi
[povey2011]_ and HTK [young]_. Its scale is fixed to Mel-scale. Like a
``TriangularOverlappingFilterBank``, ``Fbank`` places the vertices of
triangular filters uniformly along the target scale. However, an ``Fbank``
is triangular in the Mel-scale, whereas the triangular bank is triangular
in frequency.
Parameters
----------
num_filts : int, optional
The number of filters in the bank
high_hz, low_hz : float, optional
The topmost and bottommost edge of the filters, respectively.
The default for high_hz is the Nyquist
sampling_rate : float, optional
The sampling rate (cycles/sec) of the target recordings
analytic : bool, optional
Whether to use an analytic form of the bank. The analytic form
is easily derived from the real form in [1]_ and [2]_. Since
the filter is compactly supported in frequency, the analytic
form is simply the suppression of the ``[-pi, 0)`` frequencies
Attributes
----------
centers_hz : tuple
is_real : bool
is_analytic : bool
num_filts : int
sampling_rate : float
supports_hz : tuple
supports : tuple
supports_ms : tuple
Notes
-----
In a standard mel-filterbank spectrogram, the power spectrum is calculated
before filtering. This module's spectrogram takes the power spectrum after
filtering. To recreate the frequency response of the alternate order, we
can take the pointwise square root of the frequency response.
"""
aliases = {"fbank"}
def __init__(
self,
num_filts: int = 40,
high_hz: Optional[float] = None,
low_hz: float = 20.0,
sampling_rate: float = 16000,
analytic: bool = False,
):
scaling_function = MelScaling()
if low_hz < 0 or (
high_hz and (high_hz <= low_hz or high_hz > sampling_rate // 2)
):
raise ValueError(
"Invalid frequency range: ({:.2f},{:.2f}".format(low_hz, high_hz)
)
self._rate = sampling_rate
if high_hz is None:
high_hz = sampling_rate // 2
# compute vertices
scale_low = scaling_function.hertz_to_scale(low_hz)
scale_high = scaling_function.hertz_to_scale(high_hz)
scale_delta = (scale_high - scale_low) / (num_filts + 1)
self._vertices = tuple(
scaling_function.scale_to_hertz(scale_low + scale_delta * idx)
for idx in range(0, num_filts + 2)
)
self._analytic = analytic
@property
def is_real(self) -> bool:
return not self._analytic
@property
def is_analytic(self) -> bool:
return self._analytic
@property
def is_zero_phase(self) -> bool:
return True
@property
def num_filts(self) -> int:
return len(self._vertices) - 2
@property
def sampling_rate(self) -> float:
return self._rate
@property
def centers_hz(self) -> Tuple[float]:
"""The point of maximum gain in each filter's frequency response, in Hz
This property gives the so-called "center frequencies" - the
point of maximum gain - of each filter.
"""
return self._vertices[1:-1]
@property
def supports_hz(self) -> tuple:
return tuple(
(low, high) for low, high in zip(self._vertices[:-2], self._vertices[2:])
)
@property
def supports(self) -> tuple:
# A given filter is bound above for t > 0 by
# ((w_r - w_c) ** .5 + (w_c - w_l) ** .5) /
# (2 ** 3 * t ** 3 * (w_c - w_l) * (w_r - w_c) * pi) ** .5
supports = []
for idx in range(len(self._vertices) - 2):
left = hertz_to_angular(self._vertices[idx], self._rate)
mid = hertz_to_angular(self._vertices[idx + 1], self._rate)
right = hertz_to_angular(self._vertices[idx + 2], self._rate)
K = right - left + 2 * ((right - mid) * (mid - left)) ** 2
K /= config.EFFECTIVE_SUPPORT_THRESHOLD ** 2 * np.pi
K /= (right - mid) * (mid - left)
K /= np.sqrt(config.EFFECTIVE_SUPPORT_THRESHOLD)
K /= np.sqrt(mid - left) * np.sqrt(right - mid)
K **= 0.3333
K = int(np.ceil(K))
supports.append((-K // 2 - 1, K // 2 + 1))
return tuple(supports)
def get_impulse_response(self, filt_idx: int, width: int) -> np.ndarray:
# For the time being, I'll just invert the frequency response
if self.is_analytic:
freq_response = self.get_frequency_response(filt_idx, width, half=False)
return np.fft.ifft(freq_response)
else:
freq_response = self.get_frequency_response(filt_idx, width, half=True)
return np.fft.irfft(freq_response, n=width)
def get_frequency_response(
self, filt_idx: int, width: int, half: bool = False
) -> np.ndarray:
scaling_function = MelScaling()
left_hz = self._vertices[filt_idx]
mid_hz = self._vertices[filt_idx + 1]
right_hz = self._vertices[filt_idx + 2]
left_mel = scaling_function.hertz_to_scale(left_hz)
mid_mel = scaling_function.hertz_to_scale(mid_hz)
right_mel = scaling_function.hertz_to_scale(right_hz)
left_idx = int(np.ceil(width * left_hz / self._rate))
right_idx = int(width * right_hz / self._rate)
assert self._rate * (left_idx - 1) / width <= left_hz
assert self._rate * (right_idx + 1) / width >= right_hz, width
dft_size = width
if half:
if width % 2:
dft_size = (width + 1) // 2
else:
dft_size = width // 2 + 1
res = np.zeros(dft_size, dtype=np.float64)
for idx in range(left_idx, min(dft_size, right_idx + 1)):
hz = self._rate * idx / width
mel = scaling_function.hertz_to_scale(hz)
if mel <= mid_mel:
val = (mel - left_mel) / (mid_mel - left_mel)
else:
val = (right_mel - mel) / (right_mel - mid_mel)
res[idx] = val ** 0.5
if not half and not self._analytic:
res[-idx] = val ** 0.5
return res
def get_truncated_response(self, filt_idx: int, width: int) -> np.ndarray:
scaling_function = MelScaling()
left_hz = self._vertices[filt_idx]
mid_hz = self._vertices[filt_idx + 1]
right_hz = self._vertices[filt_idx + 2]
left_mel = scaling_function.hertz_to_scale(left_hz)
mid_mel = scaling_function.hertz_to_scale(mid_hz)
right_mel = scaling_function.hertz_to_scale(right_hz)
left_idx = int(np.ceil(width * left_hz / self._rate))
right_idx = int(width * right_hz / self._rate)
assert self._rate * (left_idx - 1) / width <= left_hz
assert self._rate * (right_idx + 1) / width >= right_hz, width
res = np.zeros(min(width, right_idx + 1) - left_idx, dtype=np.float64)
for idx in range(left_idx, min(width, right_idx + 1)):
hz = self._rate * idx / width
mel = scaling_function.hertz_to_scale(hz)
if mel <= mid_mel:
res[idx - left_idx] = (mel - left_mel) / (mid_mel - left_mel)
else:
res[idx - left_idx] = (right_mel - mel) / (right_mel - mid_mel)
return left_idx, res ** 0.5
class GaborFilterBank(LinearFilterBank):
r"""Gabor filters with ERBs between points from a scale
Gabor filters are complex, mostly analytic filters that have a Gaussian envelope in
both the time and frequency domains. They are defined as
.. math::
f(t) = C \sigma^{-1/2} \pi^{-1/4}
e^{\frac{-t^2}{2\sigma^2} + i\xi t}
in the time domain and
.. math::
\widehat{f}(\omega) = C \sqrt{2\sigma} \pi^{1/4}
e^{\frac{-\sigma^2(\xi - \omega)^2}{2}}
in the frequency domain. Though Gaussians never truly reach 0, in either domain,
they are effectively compactly supported. Gabor filters are optimal with respect to
their time-bandwidth product.
`scaling_function` is used to split up the frequencies between `high_hz` and
`low_hz` into a series of filters. Every subsequent filter's width is scaled such
that, if the filters are all of the same height, the intersection with the precedent
filter's response matches the filter's Equivalent Rectangular Bandwidth (``erb ==
True``) or its 3dB bandwidths (``erb == False``). The ERB is the width of a
rectangular filter with the same height as the filter's maximum frequency response
that has the same :math:`L^2` norm.
Parameters
----------
scaling_function : pydrobert.speech.ScalingFunction, str, or dict
Dictates the layout of filters in the Fourier domain. Can be a
:class:`ScalingFunction` or something compatible with
:func:`pydrobert.speech.alias_factory_subclass_from_arg`
num_filts : int
The number of filters in the bank
high_hz, low_hz : float, optional
The topmost and bottommost edge of the filters, respectively. The default for
`high_hz` is the Nyquist
sampling_rate : float, optional
The sampling rate (cycles/sec) of the target recordings
scale_l2_norm : bool
Whether to scale the l2 norm of each filter to 1. Otherwise the frequency
response of each filter will max out at an absolute value of 1.
erb : bool
Attributes
----------
centers_hz : tuple
is_real : bool
is_analytic : bool
num_filts : int
sampling_rate : float
supports_hz : tuple
supports : tuple
supports_ms : tuple
scaled_l2_norm : bool
erb : bool
See Also
--------
pydrobert.speech.config.EFFECTIVE_SUPPORT_THRESHOLD
The absolute value below which counts as zero
"""
aliases = {"gabor"}
def __init__(
self,
scaling_function: Union[ScalingFunction, Mapping, str],
num_filts: int = 40,
high_hz: Optional[float] = None,
low_hz: float = 20.0,
sampling_rate: float = 16000,
scale_l2_norm: bool = False,
erb: bool = False,
):
scaling_function = alias_factory_subclass_from_arg(
ScalingFunction, scaling_function
)
self._scale_l2_norm = scale_l2_norm
self._erb = erb
if low_hz < 0 or (
high_hz and (high_hz <= low_hz or high_hz > sampling_rate // 2)
):
raise ValueError(
"Invalid frequency range: ({:.2f},{:.2f}".format(low_hz, high_hz)
)
self._rate = sampling_rate
if high_hz is None:
high_hz = sampling_rate // 2
scale_low = scaling_function.hertz_to_scale(low_hz)
scale_high = scaling_function.hertz_to_scale(high_hz)
scale_delta = (scale_high - scale_low) / (num_filts + 1)
# edges dictate the points where filters should intersect. We
# make a pretend intersection halfway between low_hz and
# the first filter center in the scaled domain. Likewise with
# high_hz and the last filter center. Intersections are spaced
# uniformly in the scaled domain
edges = tuple(
scaling_function.scale_to_hertz(scale_low + scale_delta * (idx + 0.5))
for idx in range(0, num_filts + 1)
)
centers_hz = []
centers_ang = []
stds = []
supports_ang = []
supports = []
wrap_supports_ang = []
self._wrap_below = False
log_2 = np.log(2)
log_pi = np.log(np.pi)
t_support_const = -2 * np.log(config.EFFECTIVE_SUPPORT_THRESHOLD)
f_support_const = t_support_const
if scale_l2_norm:
f_support_const += log_2 + 0.5 * log_pi
t_support_const -= 0.5 * log_pi
else:
t_support_const -= log_2 + log_pi
if erb:
bandwidth_const = np.sqrt(np.pi) / 2
else:
bandwidth_const = np.sqrt(3 / 10 * np.log(10))
for left_intersect, right_intersect in zip(edges[:-1], edges[1:]):
center_hz = (left_intersect + right_intersect) / 2
center_ang = hertz_to_angular(center_hz, self._rate)
std = bandwidth_const / hertz_to_angular(
center_hz - left_intersect, self._rate
)
log_std = np.log(std)
if scale_l2_norm:
diff_ang = np.sqrt(log_std + f_support_const) / std
wrap_diff_ang = np.sqrt(log_std + f_support_const + log_2) / std
diff_samps = int(np.ceil(std * np.sqrt(t_support_const - log_std)))
else:
diff_ang = np.sqrt(f_support_const) / std
wrap_diff_ang = np.sqrt(f_support_const + log_2) / std
diff_samps = int(np.ceil(std * np.sqrt(t_support_const - 2 * log_std)))
supp_ang_low = center_ang - diff_ang
if supp_ang_low < 0:
self._wrap_below = True
centers_hz.append(center_hz)
centers_ang.append(center_ang)
supports_ang.append((center_ang - diff_ang, center_ang + diff_ang))
wrap_supports_ang.append(2 * wrap_diff_ang)
supports.append((-diff_samps, diff_samps))
stds.append(std)
self._centers_ang = tuple(centers_ang)
self._centers_hz = tuple(centers_hz)
self._stds = tuple(stds)
self._supports_ang = tuple(supports_ang)
self._wrap_supports_ang = tuple(wrap_supports_ang)
self._supports_hz = tuple(
(angular_to_hertz(ang_l, self._rate), angular_to_hertz(ang_h, self._rate),)
for ang_l, ang_h in supports_ang
)
self._supports = tuple(supports)
@property
def is_real(self) -> bool:
return False
@property
def is_analytic(self) -> bool:
return not self._wrap_below
@property
def num_filts(self) -> int:
return len(self._centers_hz)
@property
def is_zero_phase(self) -> bool:
return True
@property
def sampling_rate(self) -> float:
return self._rate
@property
def centers_hz(self) -> Tuple[float]:
"""The point of maximum gain in each filter's frequency response, in Hz
This property gives the so-called "center frequencies" - the
point of maximum gain - of each filter.
"""
return self._centers_hz
@property
def supports_hz(self) -> tuple:
return self._supports_hz
@property
def supports(self) -> tuple:
return self._supports
@property
def scaled_l2_norm(self) -> bool:
return self._scale_l2_norm
@property
def erb(self) -> bool:
return self._erb
def get_impulse_response(self, filt_idx: int, width: int) -> np.ndarray:
center_ang = self._centers_ang[filt_idx]
std = self._stds[filt_idx]
res = np.zeros(width, dtype=np.complex128)
if self._scale_l2_norm:
const_term = -0.5 * np.log(std) - 0.25 * np.log(np.pi)
else:
const_term = -0.5 * np.log(2 * np.pi) - np.log(std)
denom_term = 2 * std ** 2
for t in range(width + 1):
val = -(t ** 2) / denom_term + const_term + 1j * center_ang * t
val = np.exp(val)
if t != width:
res[t] += val
if t:
res[-t] += val.conj()
return res
def get_frequency_response(
self, filt_idx: int, width: int, half: bool = False
) -> np.ndarray:
center_ang = self._centers_ang[filt_idx]
lowest_ang, highest_ang = self._supports_ang[filt_idx]
std = self._stds[filt_idx]
dft_size = width
if half:
if width % 2:
dft_size = (width + 1) // 2
else:
dft_size = width // 2 + 1
res = np.zeros(dft_size, dtype=np.float64)
if self._scale_l2_norm:
const_term = 0.5 * np.log(2 * std) + 0.25 * np.log(np.pi)
else:
const_term = 0
num_term = -(std ** 2) / 2
for idx in range(dft_size):
for period in range(
-1 - int(max(-lowest_ang, 0) / (2 * np.pi)),
2 + int(highest_ang / (2 * np.pi)),
):
omega = (idx / width + period) * 2 * np.pi
val = num_term * (center_ang - omega) ** 2 + const_term
val = np.exp(val)
res[idx] += val
return res
def get_truncated_response(self, filt_idx: int, width: int) -> np.ndarray:
# wrap_supports_ang contains the angular supports of each filter
# if the effective support threshold were halved. If this
# support exceeds the 2pi period, overlap from aliasing in the
# periphery will exceed the effective support, meaning the
# entire period lies in the support
if self._wrap_supports_ang[filt_idx] >= 2 * np.pi:
return 0, self.get_frequency_response(filt_idx, width)
center_ang = self._centers_ang[filt_idx]
std = self._stds[filt_idx]
lowest_ang, highest_ang = self._supports_ang[filt_idx]
left_idx = int(np.ceil(width * lowest_ang / (2 * np.pi)))
right_idx = int(width * highest_ang / (2 * np.pi))
res = np.zeros(1 + right_idx - left_idx, dtype=np.float64)
if self._scale_l2_norm:
const_term = 0.5 * np.log(2 * std) + 0.25 * np.log(np.pi)
else:
const_term = 0
num_term = -(std ** 2) / 2
for idx in range(left_idx, right_idx + 1):
for period in range(
-int(max(-lowest_ang, 0) / (2 * np.pi)),
1 + int(highest_ang / (2 * np.pi)),
):
omega = (idx / width + period) * 2 * np.pi
val = num_term * (center_ang - omega) ** 2 + const_term
val = np.exp(val)
res[idx - left_idx] += val
return left_idx % width, res
class ComplexGammatoneFilterBank(LinearFilterBank):
r"""Gammatone filters with complex carriers
A complex gammatone filter [flanagan1960]_ [aertsen1981]_ can be defined as
.. math::
h(t) = c t^{n - 1} e^{- \alpha t + i\xi t} u(t)
in the time domain, where :math:`\alpha` is the bandwidth parameter, :math:`\xi` is
the carrier frequency, :math:`n` is the order of the function, :math:`u(t)` is the
step function, and :math:`c` is a normalization constant. In the frequency domain,
the filter is defined as
.. math::
H(\omega) = \frac{c(n - 1)!)}{\left(
\alpha + i(\omega - \xi) \right)^n}
For large :math:`\xi`, the complex gammatone is approximately analytic.
`scaling_function` is used to split up the frequencies between `high_hz` and
`low_hz` into a series of filters. Every subsequent filter's width is scaled such
that, if the filters are all of the same height, the intersection with the precedent
filter's response matches the filter's Equivalent Rectangular Bandwidth (``erb ==
True``) or its 3dB bandwidths (``erb == False``). The ERB is the width of a
rectangular filter with the same height as the filter's maximum frequency response
that has the same :math:`L^2` norm.
Parameters
----------
scaling_function : pydrobert.speech.ScalingFunction, str, or dict
Dictates the layout of filters in the Fourier domain. Can be a
:class:`ScalingFunction` or something compatible with
:func:`pydrobert.speech.alias_factory_subclass_from_arg`
num_filts : int, optional
The number of filters in the bank
high_hz, low_hz : float, optional
The topmost and bottommost edge of the filters, respectively. The default for
high_hz is the Nyquist
sampling_rate : float, optional
The sampling rate (cycles/sec) of the target recordings
order : int, optional
The :math:`n` parameter in the Gammatone. Should be positive. Larger orders
will make the gammatone more symmetrical.
max_centered : bool, optional
While normally causal, setting `max_centered` to true will shift all filters in
the bank such that the maximum absolute value in time is centered at sample 0.
scale_l2_norm : bool
Whether to scale the l2 norm of each filter to 1. Otherwise the frequency
response of each filter will max out at an absolute value of 1.
erb : bool
Attributes
----------
centers_hz : tuple
is_real : bool
is_analytic : bool
num_filts : int
order : int
sampling_rate : float
supports_hz : tuple
supports : tuple
supports_ms : tuple
scaled_l2_norm : bool
erb : bool
See Also
--------
pydrobert.speech.config.EFFECTIVE_SUPPORT_THRESHOLD
The absolute value below which counts as zero
"""
aliases = {"gammatone", "tonebank"}
def __init__(
self,
scaling_function: Union[ScalingFunction, Mapping, str],
num_filts: int = 40,
high_hz: Optional[float] = None,
low_hz: float = 20.0,
sampling_rate: float = 16000,
order: int = 4,
max_centered: bool = False,
scale_l2_norm: bool = False,
erb: bool = False,
):
scaling_function = alias_factory_subclass_from_arg(
ScalingFunction, scaling_function
)
self._scale_l2_norm = scale_l2_norm
self._erb = erb
if low_hz < 0 or (
high_hz and (high_hz <= low_hz or high_hz > sampling_rate // 2)
):
raise ValueError(
"Invalid frequency range: ({:.2f},{:.2f}".format(low_hz, high_hz)
)
if not isinstance(order, int) or order <= 0:
raise ValueError("order must be a positive integer")
self._order = order
self._rate = sampling_rate
if high_hz is None:
high_hz = sampling_rate // 2
scale_low = scaling_function.hertz_to_scale(low_hz)
scale_high = scaling_function.hertz_to_scale(high_hz)
scale_delta = (scale_high - scale_low) / (num_filts + 1)
# see gabor filters for more info
edges = tuple(
scaling_function.scale_to_hertz(scale_low + scale_delta * (idx + 0.5))
for idx in range(0, num_filts + 1)
)
self._centers_hz = []
self._xis = []
self._alphas = []
self._cs = []
self._offsets = []
self._supports = []
self._supports_ang = []
self._wrap_supports_ang = []
self._wrap_below = False
log_eps = np.log(config.EFFECTIVE_SUPPORT_THRESHOLD)
log_double_factorial = np.log(np.math.factorial(2 * order - 2))
log_factorial = np.log(np.math.factorial(order - 1))
log_2 = np.log(2)
if erb:
alpha_const = (order - 1) * log_2
alpha_const += 2 * log_factorial
alpha_const -= log_double_factorial
else:
alpha_const = -0.5 * np.log(4 * (2 ** (1 / order)) - 4)
for left_intersect, right_intersect in zip(edges[:-1], edges[1:]):
center_hz = (left_intersect + right_intersect) / 2
xi = hertz_to_angular(center_hz, self._rate)
log_alpha = alpha_const + np.log(
hertz_to_angular(right_intersect - left_intersect, self._rate)
)
alpha = np.exp(log_alpha)
if scale_l2_norm:
log_c = 0.5 * (log_2 + log_alpha + log_double_factorial)
log_c -= order * (log_alpha + log_2)
else:
log_c = order * log_alpha - log_factorial
c = np.exp(log_c)
if max_centered:
offset = -(order - 1) / alpha
else:
offset = 0
supp_a = (2 / order) * (log_c + log_factorial - log_eps)
wrap_supp_a = supp_a + (2 / order) * log_2
supp_b = np.exp(2 * log_alpha)
diff_ang = (np.exp(supp_a) - supp_b) ** 0.5
wrap_diff_ang = (np.exp(wrap_supp_a) - supp_b) ** 0.5
self._centers_hz.append(center_hz)
self._xis.append(xi)
self._alphas.append(alpha)
self._cs.append(c)
self._offsets.append(offset)
self._supports.append(self._calculate_temp_support(-1))
self._supports_ang.append((xi - diff_ang, xi + diff_ang))
if self._supports_ang[-1][0] < 0:
self._wrap_below = True
self._wrap_supports_ang.append(2 * wrap_diff_ang)
self._xis = tuple(self._xis)
self._cs = tuple(self._cs)
self._alphas = tuple(self._alphas)
self._offsets = tuple(self._offsets)
self._centers_hz = tuple(self._centers_hz)
self._supports_ang = tuple(self._supports_ang)
self._wrap_supports_ang = tuple(self._wrap_supports_ang)
self._supports_hz = tuple(
(angular_to_hertz(ang_l, self._rate), angular_to_hertz(ang_h, self._rate),)
for ang_l, ang_h in self._supports_ang
)
self._supports = tuple(self._supports)
@property
def is_real(self) -> bool:
return False
@property
def is_analytic(self) -> bool:
return not self._wrap_below
@property
def num_filts(self) -> int:
return len(self._centers_hz)
@property
def order(self) -> int:
return self._order
@property
def is_zero_phase(self) -> bool:
return False
@property
def sampling_rate(self) -> float:
return self._rate
@property
def centers_hz(self) -> Tuple[float]:
"""The point of maximum gain in each filter's frequency response, in Hz
This property gives the so-called "center frequencies" - the
point of maximum gain - of each filter.
"""
return self._centers_hz
@property
def supports_hz(self) -> tuple:
return self._supports_hz
@property
def supports(self) -> tuple:
return self._supports
@property
def scaled_l2_norm(self) -> bool:
return self._scale_l2_norm
@property
def erb(self) -> bool:
return self._erb
def get_impulse_response(self, filt_idx: int, width: int) -> np.ndarray:
left_sup, right_sup = self.supports[filt_idx]
left_period = int(np.floor(left_sup / width))
right_period = int(np.ceil(right_sup / width))
res = np.zeros(width, dtype=np.complex128)
for period in range(left_period, right_period + 1):
for idx in range(width):
t = period * width + idx
res[idx] += self._h(t, filt_idx)
return res
def get_frequency_response(
self, filt_idx: int, width: int, half: bool = False
) -> np.ndarray:
left_sup, right_sup = self._supports_ang[filt_idx]
left_period = int(np.floor(left_sup / 2 / np.pi))
right_period = int(np.ceil(right_sup / 2 / np.pi))
if half:
if width % 2:
dft_size = (width + 1) // 2
else:
dft_size = width // 2 + 1
else:
dft_size = width
res = np.zeros(dft_size, dtype=np.complex128)
for period in range(left_period, right_period + 1):
for idx in range(dft_size):
omega = (idx / width + period) * 2 * np.pi
res[idx] += self._H(omega, filt_idx)
return res
def get_truncated_response(self, filt_idx: int, width: int) -> np.ndarray:
left_sup, right_sup = self._supports_ang[filt_idx]
wrap_ang = self._wrap_supports_ang[filt_idx]
# wrap_ang is the additional support needed to hit
# half the effective support threshold. If that support is
# greater than the 2pi periodization, some points could exceed
# the threshold due to wrapping.
if right_sup - left_sup + wrap_ang >= 2 * np.pi:
return 0, self.get_frequency_response(filt_idx, width)
left_idx = int(np.ceil(width * left_sup / 2 / np.pi))
right_idx = int(width * right_sup / 2 / np.pi)
omega = np.arange(left_idx, right_idx + 1, dtype=np.float64)
omega *= 2 * np.pi / width
return left_idx % width, self._H(omega, filt_idx)
def _h(self, t, idx):
# calculate impulse response of filt idx at sample t
offset = self._offsets[idx]
if t <= offset:
return 0j
alpha = self._alphas[idx]
log_c = np.log(self._cs[idx])
xi = self._xis[idx]
n = self._order
r = log_c + (n - 1) * np.log(t - offset)
r += (-alpha + 1j * xi) * (t - offset)
return np.exp(r)
def _H(self, omega, idx):
# calculate frequency response of filt idx at ang freqs omega
alpha = self._alphas[idx]
c = self._cs[idx]
xi = self._xis[idx]
offset = self._offsets[idx]
n = self._order
numer = np.exp(-1j * omega * offset) * c * np.math.factorial(n - 1)
denom = (alpha + 1j * (omega - xi)) ** n
return numer / denom
def _calculate_temp_support(self, idx):
# calculate the nonzero region of the temp support of filt idx
alpha = self._alphas[idx]
c = self._cs[idx]
# xi = self._xis[idx]
offset = self._offsets[idx]
n = self._order
eps = config.EFFECTIVE_SUPPORT_THRESHOLD
if n == 1:
right = int(np.ceil((np.log(c) - np.log(eps) / alpha)))
else:
def _d(t):
# derivative of abs func
v = c * np.exp(-alpha * t) * t ** (n - 2)
v *= (n - 1) - alpha * t
return v
right = (n - 1 + np.sqrt((n - 1) / 2)) / alpha
h_0 = np.abs(self._h(right, idx))
while h_0 > eps:
d_0 = _d(right)
right -= h_0 / d_0
h_0 = np.abs(self._h(right, idx))
return (int(np.floor(offset)), int(np.ceil(right) + offset))
# windows
class WindowFunction(AliasedFactory):
"""A real linear filter, usually lowpass"""
@abc.abstractmethod
def get_impulse_response(self, width: int) -> np.ndarray:
"""Write the filter into a numpy array of fixed width"""
pass
class BartlettWindow(WindowFunction):
"""A unit-normalized triangular window
See Also
--------
numpy.bartlett
"""
aliases = {"bartlett", "triangular", "tri"}
def get_impulse_response(self, width: int) -> np.ndarray:
window = np.bartlett(width)
window /= max(1, width - 1) / 2
return window
class BlackmanWindow(WindowFunction):
"""A unit-normalized Blackman window
See Also
--------
numpy.blackman
"""
aliases = {"blackman", "black"}
def get_impulse_response(self, width: int) -> np.ndarray:
window = np.blackman(width)
window /= 0.42 * max(1, width - 1)
return window
class HammingWindow(WindowFunction):
"""A unit-normalized Hamming window
See Also
--------
numpy.hamming
"""
aliases = {"hamming"}
def get_impulse_response(self, width: int) -> np.ndarray:
window = np.hamming(width)
window /= 0.54 * max(1, width - 1)
return window
class HannWindow(WindowFunction):
"""A unit-normalized Hann window
See Also
--------
numpy.hanning
"""
aliases = {"hanning", "hann"}
def get_impulse_response(self, width: int) -> np.ndarray:
window = np.hanning(width)
window /= 0.5 * max(1, width - 1)
return window
class GammaWindow(WindowFunction):
r"""A lowpass filter based on the Gamma function
A Gamma function is defined as:
.. math:: p(t; \alpha, n) = t^{n - 1} e^{-\alpha t} u(t)
Where :math:`n` is the order of the function, :math:`\alpha`
controls the bandwidth of the filter, and :math:`u` is the step
function.
This function returns a window based off a reflected Gamma function.
:math:`\alpha` is chosen such that the maximum value of the window
aligns with `peak`. The window is clipped to the width. For
reasonable values of `peak` (i.e. in the last quarter of samples),
the majority of the support should lie in this interval anyways.
Arguments
---------
order : int
peak : float
``peak * width``, where ``width`` is the length of the window
in samples, is where the approximate maximal value of the window
lies
Attributes
----------
order : int
peak : float
"""
aliases = {"gamma"}
def __init__(self, order: int = 4, peak: float = 0.75):
self.order = order
self.peak = peak
def get_impulse_response(self, width: np.ndarray) -> np.ndarray:
if width <= 0:
return | np.array([], dtype=float) | numpy.array |
from numpy.random import seed
import scipy.io
from keras.utils import np_utils
import numpy as np
import pickle
import scipy as sc
def createDataset_12(path):
seed(0)
sample = []
labels = []
subject = []
mat = scipy.io.loadmat(path)
for i in range(mat['muestras']['Experiment_ID'].size):
sample.append(mat['muestras'].item(i)[18][:, 1:4])
subject.append(np_utils.to_categorical(int(mat['muestras'].item(i)[2][0][-1]), 30))
label = mat['muestras'].item(i)[7]
filter_label = lambda label: 1 if label == "Fall" else 0
label = filter_label(label)
labels.append(np_utils.to_categorical(label, 2))
sample = np.expand_dims(sample, 1)
return sample, np.array(labels), np.array(subject)
def createDataset_11(path):
seed(0)
sample = []
labels = []
subject = []
ages = []
mat = scipy.io.loadmat(path)
for i in range(mat['muestras']['Experiment_ID'].size):
sample.append(mat['muestras'].item(i)[18][:, 1:4])
subject.append(np_utils.to_categorical(int(mat['muestras'].item(i)[2][0][-1]), 30))
label = mat['muestras'].item(i)[7]
age = mat['muestras'].item(i)[3]
filter_label = lambda label: 1 if label == "Fall" else 0
label = filter_label(label)
labels.append(np_utils.to_categorical(label, 2))
ages.append(age)
sample = np.expand_dims(sample, 1)
return sample, np.array(labels), np.array(subject), np.array(ages)
def createDataset_15(path):
seed(0)
sample = []
labels = []
subject = []
mat = scipy.io.loadmat(path)
for i in range(mat['muestras']['Experiment_ID'].size):
if np.any(mat['muestras'].item(i)[23][:, 1:4]):
sample.append(mat['muestras'].item(i)[23][:, 1:4])
subject.append(np_utils.to_categorical(int(mat['muestras'].item(i)[2][0][-1]), 30))
label = mat['muestras'].item(i)[7]
filter_label = lambda label: 1 if label == "Fall" else 0
label = filter_label(label)
labels.append(np_utils.to_categorical(label, 2))
sample = np.expand_dims(sample, 1)
return sample, np.array(labels), np.array(subject)
def createDataset_07(path):
seed(0)
sample = []
labels = []
subject = []
mat = scipy.io.loadmat(path)
for i in range(mat['muestras']['Experiment_ID'].size):
if np.any(mat['muestras'].item(i)[19][:, 1:4]):
sample.append(mat['muestras'].item(i)[19][:, 1:4])
subject.append(np_utils.to_categorical(int(mat['muestras'].item(i)[2][0][-1]), 30))
label = mat['muestras'].item(i)[7]
filter_label = lambda label: 1 if label == "Fall" else 0
label = filter_label(label)
labels.append(np_utils.to_categorical(label, 2))
sample = np.expand_dims(sample, 1)
return sample, np.array(labels), np.array(subject)
def createDataset_03(path):
seed(0)
sample = []
labels = []
subject = []
mat = scipy.io.loadmat(path)
for i in range(mat['muestras']['Experiment_ID'].size):
if np.any(mat['muestras'].item(i)[18][:, 1:4]):
sample.append(mat['muestras'].item(i)[18][:, 1:4])
subject.append(np_utils.to_categorical(int(mat['muestras'].item(i)[2][0][-1]), 30))
label = mat['muestras'].item(i)[7]
filter_label = lambda label: 1 if label == "Fall" else 0
label = filter_label(label)
labels.append(np_utils.to_categorical(label, 2))
sample = np.expand_dims(sample, 1)
return sample, np.array(labels), np.array(subject)
def createDataset_05(path):
data_adl = getAllDataAsListNew('adl')
data_adl = data_adl[:, :, 125:176]
data_adl = | np.stack(data_adl, 2) | numpy.stack |
"""
SECS utils
"""
import numpy as np
d2r = np.pi/180
MU0 = 4 * np.pi * 1e-7
RE = 6371.2 * 1e3
def dpclip(x, delta = 1e-7):
"""
dot product clip:
clip x to values between -1 + delta and 1 - delta
"""
return np.clip(x, -1 + delta, 1 - delta)
def get_theta(lat, lon, lat_secs, lon_secs, return_degrees = False):
"""" calculate theta angle - the angle between data point and secs node.
Parameters
----------
lat: array-like
Array of latitudes of evaluation points [deg]
Flattened array must have same size as lon
lon: array-like
Array of longitudes of evaluation points [deg].
Flattened array must have same size as lat
lat_secs: array-like
Array of SECS pole latitudes [deg]
Flattened array must have same size as lon_secs
lon_secs: array-like
Array of SECS pole longitudes [deg]
Flattened array must havef same size as lat_secs
Output will be a 2D array with shape (mlat.size, mlat_secs.size)
return_degrees: bool, optional
Set to True if you want output in degrees. Default is False (radians)
Returns
-------
theta: 2D array (lat.size, lat_secs.size)
Array of polar angles, angular distances between the points
described by (lat, lon) and the points described by
(lat_secs, lon_secs). Unit in radians unless return_degrees is set
to True
"""
# reshape angles and convert to radians:
la = np.array(lat).flatten()[:, np.newaxis] * d2r
lo = np.array(lon).flatten()[:, np.newaxis] * d2r
la_s = np.array(lat_secs).flatten()[np.newaxis, :] * d2r
lo_s = np.array(lon_secs).flatten()[np.newaxis, :] * d2r
# ECEF position vectors of data points - should be N by 3, where N is number of data points
ecef_r_data = np.hstack((np.cos(la ) * np.cos(lo ), np.cos(la ) * np.sin(lo ), np.sin(la )))
# position vectors SECS poles - should be 3 by M, where M is number of SECS - these are the z axes of each SECS
ecef_r_secs = np.vstack((np.cos(la_s) * np.cos(lo_s), np.cos(la_s) * np.sin(lo_s), np.sin(la_s))).T
# the polar angles (N, M):
theta = np.arccos(dpclip(np.einsum('ij, kj -> ik', ecef_r_data, ecef_r_secs)))
if return_degrees:
theta = theta / d2r
return theta
def get_SECS_J_G_matrices(lat, lon, lat_secs, lon_secs,
current_type = 'divergence_free', constant = 1./(4*np.pi),
RI = RE + 110 * 1e3,
singularity_limit = 0):
""" Calculate matrices Ge and Gn which relate SECS amplitudes to current density
vector components.
Parameters
----------
lat: array-like
Array of latitudes of evaluation points [deg]
Flattened array must have same size as lon
lon: array-like
Array of longitudes of evaluation points [deg].
Flattened array must have same size as lat
lat_secs: array-like
Array of SECS pole latitudes [deg]
Flattened array must have same size as lon_secs
lon_secs: array-like
Array of SECS pole longitudes [deg]
Flattened array must havef same size as lat_secs
current_type: string, optional
The type of SECS function. This must be either
'divergence_free' (default): divergence-free basis functions
'curl_free': curl-free basis functions
'potential': scalar field whose negative gradient is curl-free SECS
'scalar':
constant: float, optional
The SECS functions are scaled by the factor 1/(4pi), which is
the default value of 'constant'. Change if you want something
different.
RI: float (optional)
Radius of SECS poles. Default is Earth radius + 110,000 m
singularity_limit: float (optional)
A modified version of the SECS functions will be used at
points that are closer than singularity_limit. The modification
is given by equations 2.43 (CF) and 2.44 (DF) in Vanhamaki and
Juusola (2020), and singularity_limit / RI is equal to theta0
in these equations. Default is 0, which means that the original
version of the SECS functions are used (with singularities).
singularity_limit is ignored if current_type is 'potential' or 'scalar'
Returns
-------
If current_type is 'divergence_free' or 'curl_free':
Ge: 2D array
2D array with shape (lat.size, lat_secs.size), relating SECS amplitudes
m to the eastward current densities at (lat, lon) via 'je = Ge.dot(m)'
Gn: 2D array
2D array with shape (lat.size, lat_secs.size), relating SECS amplitudes
m to the northward current densities at (lat, lon) via 'jn = Gn.dot(m)'
If current_type is 'potential' or 'scalar':
G: 2D array
2D array with shape (lat.size, lat_secs.size), relating amplitudes m
to scalar field magnitude at (lat, lon) via 'z = G.dot(m)'
"""
# reshape angles and convert to radians:
la = np.array(lat).flatten()[:, np.newaxis] * d2r
lo = np.array(lon).flatten()[:, np.newaxis] * d2r
la_s = np.array(lat_secs).flatten()[np.newaxis, :] * d2r
lo_s = np.array(lon_secs).flatten()[np.newaxis, :] * d2r
# ECEF position vectors of data points - should be N by 3, where N is number of data points
ecef_r_data = np.hstack((np.cos(la ) * np.cos(lo ), np.cos(la ) * np.sin(lo ), np.sin(la )))
# position vectors SECS poles - should be 3 by M, where M is number of SECS - these are the z axes of each SECS
ecef_r_secs = np.vstack((np.cos(la_s) * np.cos(lo_s), np.cos(la_s) * np.sin(lo_s), np.sin(la_s))).T
# unit vector pointing from SECS to data points - (M, N, 3)
ecef_t = ecef_r_secs[np.newaxis, :, :] - ecef_r_data[:, np.newaxis, :] # difference vector - not tangential yet
ecef_t = ecef_t - np.einsum('ijk,ik->ij', ecef_t, ecef_r_data)[:, :, np.newaxis] * ecef_r_data[:, np.newaxis, :] # subtract radial part of the vector to make it tangential
ecef_t = ecef_t/np.linalg.norm(ecef_t, axis = 2)[:, :, np.newaxis] # normalize the result
# make N rotation matrices to rotate ecef_t to enu_t - one rotation matrix per SECS:
R = np.hstack( (np.dstack((-np.sin(lo) , np.cos(lo) , np.zeros_like(la) )),
np.dstack((-np.cos(lo) * np.sin(la), -np.sin(lo) * np.sin(la), np.cos( la) )),
np.dstack(( np.cos(lo) * np.cos(la), np.sin(lo) * np.cos(la), np.sin( la) ))) )
# apply rotation matrices to make enu vectors pointing from data points to SECS
enu_t = np.einsum('lij, lkj->lki', R, ecef_t)[:, :, :-1] # remove last component (up), which should deviate from zero only by machine precicion
if current_type == 'divergence_free':
# rotate these vectors to get vectors pointing eastward with respect to SECS systems at each data point
enu_vec = np.dstack((enu_t[:, :, 1], -enu_t[:, :, 0])) # north -> east and east -> south
elif current_type == 'curl_free':
enu_vec = -enu_t # outward from SECS
elif current_type in ['potential', 'scalar']:
enu_vec = 1
else:
raise Exception('type must be "divergence_free", "curl_free", "potential", or "sclar"')
# get the scalar part of Amm's divergence-free SECS:
theta = np.arccos(dpclip(np.einsum('ij,kj->ik', ecef_r_secs, ecef_r_data)))
if current_type in ['divergence_free', 'curl_free']:
coeff = constant /np.tan(theta/2)/ RI
# apply modifications to handle singularities:
theta0 = singularity_limit / RI
if theta0 > 0:
alpha = 1 / np.tan(theta0/2)**2
coeff[theta < theta0] = constant * alpha * np.tan(theta[theta < theta0]/2) / RI
# G matrices
Ge = coeff * enu_vec[:, :, 0].T
Gn = coeff * enu_vec[:, :, 1].T
return Ge.T, Gn.T
else: # current_type is 'potential' or 'scalar'
if current_type == 'potential':
return -2*constant*np.log(np.sin(theta/2)).T
elif current_type == 'scalar':
return constant / np.tan(theta/2).T
def get_SECS_B_G_matrices(lat, lon, r, lat_secs, lon_secs,
current_type = 'divergence_free', constant = 1./(4*np.pi),
RI = RE + 110 * 1e3,
singularity_limit = 0,
induction_nullification_radius = None):
""" Calculate matrices Ge, Gn, and Gr which relate SECS amplitudes to magnetic field
Based on equations (9) and (10) of Amm and Viljanen 1999, or (2.13)-(2.14) in Vanhamaki
and Juusola 2020.
If singularity_limit > 0, the magnetic field of curl-free currents is modified, but
not the magnetic field of divergence-free currents (!). See Section 2.10.2 and
equation (2.46) in Vanhamaki and Juusola 2020.
Parameters
----------
lat: array-like
Array of latitudes of evaluation points [deg]
Flattened array must have same size as lon
lon: array-like
Array of longitudes of evaluation points [deg].
Flattened array must have same size as lat
r: array-like
Array of radii of evaluation points. Flattened
array must either have size 1, in which case one
radius is used for all points, or have same size as
lat. Unit should be the same as RI
lat_secs: array-like
Array of SECS pole latitudes [deg]
Flattened array must have same size as lon_secs
lon_secs: array-like
Array of SECS pole longitudes [deg]
Flattened array must havef same size as lat_secs
current_type: string, optional
The type of SECS function. This must be either
'divergence_free' (default): divergence-free basis functions
'curl_free': curl-free basis functions
constant: float, optional
The SECS functions are scaled by the factor 1/(4pi), which is
the default value of 'constant'. Change if you want something
different.
RI: float (optional)
Radius of SECS poles. Default is Earth radius + 110,000 m
singularity_limit: float (optional)
A modified version of the SECS functions will be used at
points that are closer than singularity_limit. The modification
is given by equations 2.43 (CF) and 2.44 (DF) in Vanhamaki and
Juusola (2020), and singularity_limit / RI is equal to theta0
in these equations. Default is 0, which means that the original
version of the SECS functions are used (with singularities).
induction_nullification_radius: float or None, optional
The radius at which ground induced image currents cancel the radial
magnetic field. Default in None, in which case there are no
induced image currents. This part is based on equations of Appendix A
in "<NAME>., <NAME>., <NAME>., <NAME>., and
<NAME>. (2016), Comparison of auroral ionospheric and field‐
aligned currents derived from Swarm and ground magnetic field measurements,
J. Geophys. Res. Space Physics, 121, 9256– 9283, doi:10.1002/2016JA022961."
Returns
-------
Ge: 2D array
2D array with shape (lat.size, lat_secs.size), relating SECS amplitudes
m to the eastward magnetic field at (lat, lon) via 'Be = Ge.dot(m)'
Gn: 2D array
2D array with shape (lat.size, lat_secs.size), relating SECS amplitudes
m to the northward magnetic field at (lat, lon) via 'Bn = Gn.dot(m)'
Gr: 2D array
2D array with shape (lat.size, lat_secs.size), relating SECS amplitudes
m to the radial magnetic field at (lat, lon) via 'Br = Gr.dot(m)'
"""
# reshape angles and convert to radians:
la = np.array(lat).flatten()[:, np.newaxis] * d2r
lo = np.array(lon).flatten()[:, np.newaxis] * d2r
la_s = | np.array(lat_secs) | numpy.array |
import numpy as np
import math
import pyvista as pv
import tree as T
import assignment as AS
import time
import pickle
from tqdm import tqdm_notebook
from pathlib import Path
from scipy.optimize import linear_sum_assignment
from pyvista import examples
from operator import itemgetter
dataset_teapot = examples.download_teapot()
dataset_bunny = examples.download_bunny_coarse()
teapot_points, temp = T.generate_points(dataset_teapot)
bunny_points, temp = T.generate_points(dataset_bunny)
source = teapot_points
destination = bunny_points * 10
source_points, destination_points = [], []
src_pts_path = Path("source_pts.pkl")
dst_pts_path = Path("dst_pts.pkl")
if src_pts_path.exists():
with open(src_pts_path, "rb") as fp:
source_points = pickle.load(fp)
with open(dst_pts_path, "rb") as fp:
destination_points = pickle.load(fp)
else :
start = time.time()
Morphing = AS.Assignment(source, destination)
Morphing.calculate()
source_points, destination_points = Morphing.get_result()
print("time : ", time.time() - start)
if not src_pts_path.exists():
with open(src_pts_path, "wb") as fp:
pickle.dump(source_points, fp)
with open("dst_pts.pkl", "wb") as fp:
pickle.dump(destination_points, fp)
# Test
FRAME = 240
filename = "test.mp4"
# Frame Image
start_dataset = pv.PolyData(np.array(source_points))
source_dataset = pv.PolyData(np.array(source_points))
#source_dataset.plot(show_edges = True)
destination_dataset = pv.PolyData( | np.array(destination_points) | numpy.array |
"""Demo code shows how to estimate human head pose.
Currently, human face is detected by a detector from an OpenCV DNN module.
Then the face box is modified a little to suits the need of landmark
detection. The facial landmark detection is done by a custom Convolutional
Neural Network trained with TensorFlow. After that, head pose is estimated
by solving a PnP problem.
"""
from argparse import ArgumentParser
from multiprocessing import Process, Queue
import cv2
import numpy as np
from mark_detector import MarkDetector
from os_detector import detect_os
from pose_estimator import PoseEstimator
from stabilizer import Stabilizer
import math
print("OpenCV version: {}".format(cv2.__version__))
# multiprocessing may not work on Windows and macOS, check OS for safety.
detect_os()
CNN_INPUT_SIZE = 128
real_width = 114
focal_length = 474
# Take arguments from user input.
parser = ArgumentParser()
parser.add_argument("--video", type=str, default=None,
help="Video file to be processed.")
parser.add_argument("--cam", type=int, default=None,
help="The webcam index.")
args = parser.parse_args()
def rotate_vector(v, x_angle, y_angle):
rotated_v = np.copy(v)
rotated_v[1] = -rotated_v[1]
# rotate around y axis, e.g. in horizontal plane
x_prime = v[0] * math.cos(x_angle) + v[2] * math.sin(x_angle)
y_prime = v[1]
z_prime = -v[0] * math.sin(x_angle) + v[2] * math.cos(x_angle)
rotated_v[0] = x_prime
rotated_v[1] = y_prime
rotated_v[2] = z_prime
# rotate around x axis, e.g. in vertical plane
x_prime = rotated_v[0]
y_prime = rotated_v[1] * math.cos(y_angle) - rotated_v[2] * math.sin(y_angle)
z_prime = rotated_v[1] * math.sin(y_angle) + rotated_v[2] * math.cos(y_angle)
return rotated_v
def sharpen(gray):
blurred = gray.copy()
cv2.GaussianBlur(gray, (5, 5), 0.8, blurred)
sharpened = gray.copy()
cv2.addWeighted(gray, 1.5, blurred, 0.5, 0, sharpened)
return sharpened
def get_face(detector, img_queue, box_queue):
"""Get face from image queue. This function is used for multiprocessing"""
while True:
image = img_queue.get()
box = detector.extract_cnn_facebox(image)
box_queue.put(box)
def adjust_angle(eye_frame_part, x, y):
adjusted_x = x - eye_frame_part.shape[0] / 2
adjusted_y = y - eye_frame_part.shape[1] / 2
proportion_x = adjusted_x / eye_frame_part.shape[0]
proportion_y = adjusted_y / eye_frame_part.shape[1]
vertical_max_angle = 75
horizontal_max_angle = 75
return proportion_x * horizontal_max_angle, proportion_y * vertical_max_angle
def process_eyes(frame, landmarks):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = sharpen(gray)
left_eye = gray[int(landmarks[38][1]) - 7: int(landmarks[41][1]) + 7, int(landmarks[36][0]) - 7: int(landmarks[39][0]) + 7]
right_eye = gray[int(landmarks[44][1]) - 7: int(landmarks[46][1]) + 7, int(landmarks[42][0]) - 7: int(landmarks[45][0]) + 7]
eyes = [left_eye, right_eye]
centers = []
origins = [[int(landmarks[36][0]), int(landmarks[38][1])], [int(landmarks[42][0]), int(landmarks[44][1])]]
for eye, origin in zip(eyes, origins):
circles = cv2.HoughCircles(eye, cv2.HOUGH_GRADIENT, 1, eye.shape[0]/64, param1=200, param2=13, minRadius=5, maxRadius=30)
eye_center = (eye.shape[0] / 2, eye.shape[1] / 2)
if circles is None:
# no circles were detected on either or both eyes
# so return prematurely
return {}
circles = np.uint16(np.around(circles))
needed_circles = circles[0, :].tolist()
needed_circles.sort(key=lambda circle: math.sqrt((eye_center[0] - circle[0])**2 + (eye_center[1] - circle[1])**2))
the_circle = needed_circles[0]
# simplePutText(eye, str(len(circles)))
# for i in circles[0, :]:
# cv2.circle(eye, (i[0], i[1]), i[2], (255, 255, 255), 2)
angles = adjust_angle(eye, the_circle[0], the_circle[1])
cv2.circle(frame, (the_circle[0] + origin[0] - 7, the_circle[1] + origin[1] - 7), 2, (255, 255, 255))
centers.append(angles)
centers = {'right': centers[0], 'left': centers[1]}
return centers
def angle_to_radian(angle_degrees):
return math.pi * angle_degrees / 180
def cvt_to_radians(eye_angles):
right = eye_angles['right']
eye_angles['right'] = (angle_to_radian(right[0]), angle_to_radian(right[1]))
left = eye_angles['left']
eye_angles['left'] = (angle_to_radian(left[0]), angle_to_radian(left[1]))
return eye_angles
def main():
"""MAIN"""
# Video source from webcam or video file.
video_src = args.cam if args.cam is not None else args.video
if video_src is None:
print("Warning: video source not assigned, default webcam will be used.")
video_src = 0
cap = cv2.VideoCapture(video_src)
if video_src == 0:
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
_, sample_frame = cap.read()
# Introduce mark_detector to detect landmarks.
mark_detector = MarkDetector()
# Setup process and queues for multiprocessing.
img_queue = Queue()
box_queue = Queue()
img_queue.put(sample_frame)
box_process = Process(target=get_face, args=(
mark_detector, img_queue, box_queue,))
box_process.start()
# Introduce pose estimator to solve pose. Get one frame to setup the
# estimator according to the image size.
height, width = sample_frame.shape[:2]
pose_estimator = PoseEstimator(img_size=(height, width))
# Introduce scalar stabilizers for pose.
pose_stabilizers = [Stabilizer(
state_num=2,
measure_num=1,
cov_process=0.1,
cov_measure=0.1) for _ in range(6)]
tm = cv2.TickMeter()
while True:
# Read frame, crop it, flip it, suits your needs.
frame_got, frame = cap.read()
if frame_got is False:
break
# Crop it if frame is larger than expected.
# frame = frame[0:480, 300:940]
# If frame comes from webcam, flip it so it looks like a mirror.
if video_src == 0:
frame = cv2.flip(frame, 2)
# Pose estimation by 3 steps:
# 1. detect face;
# 2. detect landmarks;
# 3. estimate pose
# Feed frame to image queue.
img_queue.put(frame)
# Get face from box queue.
facebox = box_queue.get()
if facebox is not None:
# Detect landmarks from image of 128x128.
face_img = frame[facebox[1]: facebox[3],
facebox[0]: facebox[2]]
face_img = cv2.resize(face_img, (CNN_INPUT_SIZE, CNN_INPUT_SIZE))
face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)
tm.start()
marks = mark_detector.detect_marks([face_img])
tm.stop()
# Convert the marks locations from local CNN to global image.
marks *= (facebox[2] - facebox[0])
marks[:, 0] += facebox[0]
marks[:, 1] += facebox[1]
# Uncomment following line to show raw marks.
mark_detector.draw_marks(
frame, marks, color=(0, 255, 0))
right_corner = tuple([int(i) for i in marks[36]])
left_corner = tuple([int(i) for i in marks[45]])
# print(marks[36], marks[45])
cv2.line(frame, right_corner, left_corner, (255, 0, 0), 2)
pixel_distance = int(math.sqrt((right_corner[0] - left_corner[0]) ** 2 + (right_corner[1] - left_corner[1]) ** 2))
estimated_distance = (real_width * focal_length) / pixel_distance
cv2.putText(frame, str(round(estimated_distance, 2)), (100, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0))
# Uncomment following line to show facebox.
# mark_detector.draw_box(frame, [facebox])
# Try pose estimation with 68 points.
pose = pose_estimator.solve_pose_by_68_points(marks)
# Stabilize the pose.
steady_pose = []
pose_np = | np.array(pose) | numpy.array |
# ■ 3장 목차
#
# 1. 활성화함수(activation function)
# - 계단 함수
# - sigmoid 함수
# - relu 함수
# 2. 행렬의 내적
#
# ■ 3.1 활성화 함수
# *퍼셉트론과 신경망의 차이점:
# - 퍼셉트론: 원하는 결과를 출력하도록 가중치의 값을 적절히 정하는 작업을 사람이 수동으로 해야한다.
# - 신경망: 가중치 매개변수의 적절한 값을 기계가 데이터로부터 자동으로 학습해서 알아낸다.
#
# 단층 퍼셉트론: 계단 함수
# 다층 퍼셉트론: sigmoid, relu....를 써야 다층의 출력: 0 or 1 의미가 생긴다.
print('====================================================================================================')
print('== 문제 35. 파이썬으로 계단함수를 구현하시오.')
print('====================================================================================================\n')
def step_function(x):
if x > 0:
return 1
else:
return 0
import numpy as np
def step_function(x):
y = x > 0
return y.astype(np.int) # astype은 true는 1로 변경, false는 0으로 변경한다.
x_data = np.array([-1, 0, 1])
print(step_function(x_data))
print('====================================================================================================')
print('== 문제 36. 위의 step_function 함수를 이용해서 계단함수를 그리시오.')
print('====================================================================================================\n')
import numpy as np
import matplotlib.pylab as plt
def step_function(x):
y = x > 0
return y.astype(np.int) # astype은 true는 1로 변경, false는 0으로 변경한다.
x_data = np.arange(-5, 5, 0.1)
y = step_function(x_data)
plt.plot(x_data, y)
plt.ylim(-0.1, 1.1)
plt.show()
print(step_function(x_data))
print('====================================================================================================')
print('== 문제 37. 아래와 같이 그래프가 출력되게 하시오.')
print('====================================================================================================\n')
import numpy as np
import matplotlib.pylab as plt
def step_function(x):
y = x < 0
return y.astype(np.int) # astype은 true는 1로 변경, false는 0으로 변경한다.
x_data = np.arange(-5, 5, 0.1)
y = step_function(x_data)
plt.plot(x_data, y)
plt.ylim(-0.1, 1.1)
plt.show()
print(step_function(x_data))
print('====================================================================================================')
print('== 문제 38. (점심시간 문제)')
print('====================================================================================================\n')
import numpy as np
def step_function(x):
y = x > 0
return y.astype(np.int) # astype은 true는 1로 변경, false는 0으로 변경한다.
x = np.array([-1, 0, 0])
w = np.array([0.3, 0.4, 0.1])
print(step_function(sum(x * w)))
print('====================================================================================================')
print('== 문제 39. 시그모이드 함수를 파이썬으로 구현하시오.')
print('====================================================================================================\n')
import numpy as np
def sigmoid(a):
return 1/(1+np.exp(-a) + 0.00001)
print(sigmoid(2.0))
print('====================================================================================================')
print('== 문제 40. 시그모이드 함수를 그래프로 그리시오.')
print('====================================================================================================\n')
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(a):
return 1/(1+np.exp(-a) + 0.00001)
x = np.arange(-5.0, 5.0, 0.1)
y = sigmoid(x)
plt.plot(x, y)
plt.ylim(-0.1, 1.1)
plt.show()
print('====================================================================================================')
print('== 문제 41. 아래와 같이 그래프가 출력되게 하시오.')
print('====================================================================================================\n')
def sigmoid(a):
return 1/(1+ | np.exp(a) | numpy.exp |
import glob
import numpy as np
import os
from tqdm import tqdm
from lib.utils import mkdir_p
from lib.pc_utils import save_point_cloud
import MinkowskiEngine as ME
STANFORD_3D_IN_PATH = '/cvgl/group/Stanford3dDataset_v1.2/'
STANFORD_3D_OUT_PATH = '/home/chrischoy/datasets/Stanford3D'
STANFORD_3D_TO_SEGCLOUD_LABEL = {
4: 0,
8: 1,
12: 2,
1: 3,
6: 4,
13: 5,
7: 6,
5: 7,
11: 8,
3: 9,
9: 10,
2: 11,
0: 12,
}
class Stanford3DDatasetConverter:
CLASSES = [
'clutter', 'beam', 'board', 'bookcase', 'ceiling', 'chair', 'column', 'door', 'floor', 'sofa',
'stairs', 'table', 'wall', 'window'
]
TRAIN_TEXT = 'train'
VAL_TEXT = 'val'
TEST_TEXT = 'test'
@classmethod
def read_txt(cls, txtfile):
# Read txt file and parse its content.
with open(txtfile) as f:
pointcloud = [l.split() for l in f]
# Load point cloud to named numpy array.
pointcloud = np.array(pointcloud).astype(np.float32)
assert pointcloud.shape[1] == 6
xyz = pointcloud[:, :3].astype(np.float32)
rgb = pointcloud[:, 3:].astype(np.uint8)
return xyz, rgb
@classmethod
def convert_to_ply(cls, root_path, out_path):
"""Convert Stanford3DDataset to PLY format that is compatible with
Synthia dataset. Assumes file structure as given by the dataset.
Outputs the processed PLY files to `STANFORD_3D_OUT_PATH`.
"""
txtfiles = glob.glob(os.path.join(root_path, '*/*/*.txt'))
for txtfile in tqdm(txtfiles):
file_sp = os.path.normpath(txtfile).split(os.path.sep)
target_path = os.path.join(out_path, file_sp[-3])
out_file = os.path.join(target_path, file_sp[-2] + '.ply')
if os.path.exists(out_file):
print(out_file, ' exists')
continue
annotation, _ = os.path.split(txtfile)
subclouds = glob.glob(os.path.join(annotation, 'Annotations/*.txt'))
coords, feats, labels = [], [], []
for inst, subcloud in enumerate(subclouds):
# Read ply file and parse its rgb values.
xyz, rgb = cls.read_txt(subcloud)
_, annotation_subfile = os.path.split(subcloud)
clsidx = cls.CLASSES.index(annotation_subfile.split('_')[0])
coords.append(xyz)
feats.append(rgb)
labels.append(np.ones((len(xyz), 1), dtype=np.int32) * clsidx)
if len(coords) == 0:
print(txtfile, ' has 0 files.')
else:
# Concat
coords = np.concatenate(coords, 0)
feats = np.concatenate(feats, 0)
labels = | np.concatenate(labels, 0) | numpy.concatenate |
"""
Author: <NAME>
Ray Tracing functions for internal waves with satGEM T,S,u,and v fields
CURRENT STATUS:
load in satGEM data and rewrite functions to remove all assumptions and run in a 4d field.
- Figure out how to make k and l and m vary in all 4 dimensions (x,y,z, and t)
- need a solid conversion method for x and y distances to lat and long (long is the tricker one)
"""
import numpy as np
import scipy
import pandas as pd
import gsw
import oceans as oc
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
import matplotlib.colors as colors
import matplotlib.dates as mdates
import cmocean
import h5py
from datetime import datetime, timedelta
from netCDF4 import Dataset
def instructions():
"""
Print instructions
"""
text = '''
Ray Tracing Instructions:
------------------------
1. Generate a "wave" object : rt.wave(inputs)
- enter wave frequency, horizontal and vertical wavenumber components, and initial depth
- view properties to check if things were entered correctly
- when loading and calculating N2, take chunks out at a time otherwise it will crash. (too big of a file)
satGEM Details
--------------
\t This ray tracing model utilizes the 4D velocity and density field constructed by Dr. <NAME>. Full details are available in Meijers 2013.
'''
print(text)
# Wave ray tracing equations
def Kvec(k, l, m):
"""
Returns magnitude of wavenumber vector
"""
return k**2 + l**2 + m**2
def dispersion(f, N2, k, l, m):
"""
WKB Disperision Relation as a function of and K(k, l, m)
"""
W = np.sqrt((f**2 * m**2 + N2*(k**2 + l**2))\
/ ((k**2 +l**2 + m**2)))
return W
def CGz(Omega, k, l, m, f, N2, w=0):
"""
Vertical Group Speed (includes vertical flow but is 0 by default)
"""
K2 = k**2 + l**2 + m**2
return (-1*(k**2 + l**2) * m * (N2 - f**2)) / (K2**2 * Omega)
def CGx(N2, Omega, k, l, m, u, f):
"""
Horizontal group speed in x-direction in a flow
"""
# K2 = k**2 + l**2 + m**2
cgx = ((k * m**2 * (N2 - f**2))/((k**2 + l**2 + m**2)**2 * Omega)) + u
return cgx
def CGy(N2, Omega, k, l, m, v, f):
"""
Horizontal group speed in y-direction in a flow
"""
K2 = k**2 + l**2 + m**2
cgy = (l * m**2 * (N2 - f**2))/(K2**2 * Omega) + v
return cgy
def EoZ(N2, w0, f, ):
"""
Wave ray energy when variations can only occur in the vertical (i.e. N2 and
flow only vary with depth not horizontally) - Olbers 1981
"""
Ez = np.squeeze((w0**2 * (N2 - f**2))
/ ((w0**2 - f**2)**(3 / 2) * (N2 - w0**2)**(1 / 2)))
return Ez
def refraction(N, k, l, m, dN, di, Omega):
"""
Refraction index of internal wave
"""
K = k**2 + l**2 + m**2
return ((N*(k**2 + l**2)) / (K * Omega)) * (dN/di)
def dk(dU, dV,dx, k, l , m, dN, N, Omega):
"""
Change of wavenumber k in time
"""
ri = refraction(N, k, l, m, dN, dx, Omega)
dk = -1 * (ri + k * (dU/dx) + l * (dV/dx))
return dk
def dl(dU, dV, dy, k, l, m, dN, N, Omega):
"""
Change of wavenumber k in time
"""
ri = refraction(N, k, l, m, dN, dy, Omega)
dl = -1 * (ri + k * (dU / dy) + l * (dV / dy))
return dl
def dm(dU, dV, dz, k, l, m, dN, N, Omega):
"""
Discretized Change of wavenumber k in time
"""
ri = refraction(N, k, l, m, dN, dz, Omega)
dm = -1 * (ri + k * (dU / dz) + l * (dV / dz))
return dm
def dOmega(rx, ry, rz, k, l, dU, dV):
"""
Change in intrinsic frequency / dispersion relation
"""
dW = (rx + ry + rx) + k * dU + l * dV
return dW
def make_segments(x, y):
"""
Create list of line segments from x and y coordinates, in the correct format
for LineCollection: an array of the form numlines x (points per line) x 2 (x
and y) array
"""
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
def inverse_hav(x, y, lon1, lat1):
"""
Uses the inverse haversine function to convert x and y distance to a new lat and long coordinate. (see ray tracing docs for full formula)
Parameters
----------
x: x distance traveled (east-west)
y: y distance traveled (north-south)
lon1: starting longitude (Degrees)
lat1: starting latitude (Degrees)
Returns
-------
lon2: final longitude (Degrees)
lat2: final latitude (Degrees)
"""
r = 6371e3 # radius of the earth
d = np.sqrt(x**2 + y**2) # total distance traveled
lat2 = lat1 + (y/111.11e3) # convert y distance to a new latitude point
# Convert to radians for use in trig functions
latrev1 = np.deg2rad(lat1)
latrev2 = np.deg2rad(lat2)
# inverse haversine formula
shift = 0.5 * np.rad2deg(np.arccos(1 - 2 * ((np.sin(d / (2 * r))**2
- np.sin((latrev2 - latrev1)/2)**2) /
(np.cos(latrev1) * np.cos(latrev2)))))
if x < 0:
lon2 = lon1 - shift
else:
lon2 = lon1 + shift
return lon2, lat2 # in degrees
class Wave(object):
"""
Creates a wave which has varying functionality including:
- time forward modelling
- time reverse modelling
- variable velocity and density field inputs
- plotting and saving rays
- HOPEFULLY: built in gui
"""
# Add functionality for a default Buoyancy Frequncy and Velocity Profile
def __init__(self, k=10*1000, l=10*1000, t0=datetime(2012, 11, 2, 3, 0, 0),
m=500, w0=-1.3e-4, z0=500, lat=-55, lon=-55):
# Convert wavelengths into wavenumbers
# Save initial values becuase running the model will change
# the wave features.
self.k = np.array([k], dtype='float')
self.l = np.array([l], dtype='float')
self.m = np.array([m], dtype='float')
self.w0 = np.array([w0], dtype='float')
self.kh = np.array([np.sqrt(self.k**2 + self.l**2)])
self.z0 = np.array([z0], dtype='float')
self.lat0 = np.array([lat], dtype='float')
self.lon0 = np.array([lon], dtype='float')
self.t0 = t0
# These are empty for now- they get filled in during model runs.
self.x_all = []
self.y_all = []
self.z_all = []
self.m_all = []
self.w0_all = []
self.E_all = []
self.Ac_all = []
def help(self):
"""
Print instructions on how to use wave class
"""
text = '''
Instructions for using ray tracing model.
\nGenerate a wave with chosen properties or use the defualt Parameters
'''
print(text)
def model_error_message(self, x, y, z, m, idx, idx2):
error_message = '''
current variables:
-----------------
x = {}
y = {}
z = {}
N2 = {}
U = {}
V = {}
m = {}
'''.format(x, y, z,
self.N2[idx2], self.U[idx], self.V[idx], m)
return error_message
def properties(self):
"""
Print wave properties
"""
txt = '''Wave Properties:
---------------
k: {}
l: {}
m: {}
kh: {}
Frequency: {}
'''.format(np.array2string(self.k), self.l, self.m, self.kh, self.w0)
print(txt)
class satGEM_field(object):
"""
load in the satGEM data as an object (this might be wierd though becuase the h5py module loads in each file as an object so not sure...)
The objects built in functions can then be used to easily access the data set without ever having to load the whole thing in.
Also Contains bathymetry data
"""
def __init__(self):
# Load satGEM data as h5py file objects
gamma_file = h5py.File('DIMES_GAMMA_09_12_upd.mat')
vel_file = h5py.File('DIMES_vel_09_12_upd.mat')
ts_file = h5py.File('DIMES_TS_09_12_upd.mat')
gamma = gamma_file['satGEM_gamma']
self.u = vel_file['satGEM_east']
self.v = vel_file['satGEM_north']
self.temp = ts_file['satGEM_temp']
self.sal = ts_file['satGEM_sal']
# Data grids
time = np.squeeze(np.array(gamma_file['time']))
# convert from matlab to python date time.
self.time = np.array([oc.matlab2datetime(timeIn) for timeIn in time])
self.depth_grid = gamma_file['depthlvl']
self.lon = gamma_file['lons']
self.lat = gamma_file['lats']
# The u and v grids are one point off each so I need
# to figure out how to handle this
self.centerlat = vel_file['centerlat']
self.centerlon = vel_file['centerlon']
###################################
# Bathymetry file
self.bathy = Dataset('bathy.nc')
def locate(self, lon, lat, depth, time):
"""
Locate point/points within the satgem data set
Parameters
----------
lon: longitude of point
lat: latitude of point
depth: depth of point
time: of point
Returns
-------
lon_id: index along longitude axis
lat_id: index along latitude axis
depth_id: index along latitude axis
time_id: index along time axis
These are for the velocity grids
centerlon_id: index along centerlon axis
centerlat_id: index along centerlat axis
"""
# Add warning for out of time and space boundaries.
lon_id = np.argmin(np.abs(self.lon[:] - lon))
lat_id = np.argmin(np.abs(self.lat[:] - lat))
depth_id = np.argmin(np.abs(self.depth_grid[:] - depth))
time_id = np.argmin(np.abs(self.time[:] - time))
centerlon_id = np.argmin( | np.abs(self.centerlon[:] - lon) | numpy.abs |
"""
CellProfiler is distributed under the GNU General Public License.
See the accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2014 Broad Institute
All rights reserved.
Please see the AUTHORS file for credits.
Website: http://www.cellprofiler.org
"""
'''<b>Atrous filter</b> - an image processing module which applies the "A Trous" wavelet transform to an image.
<hr>
This is a module that takes one image as an input and
produces a second image for downstream processing.
'''
#################################
#
# Imports from useful Python libraries
#
#################################
import platform
import numpy as np
import imp
import libatrous
from scipy.ndimage import gaussian_gradient_magnitude, correlate1d
#################################
#
# Imports from CellProfiler
#
# The package aliases are the standard ones we use
# throughout the code.
#
##################################
import cellprofiler.cpimage as cpi
try:
import cellprofiler.module as cpm
except:
import cellprofiler.cpmodule as cpm
import cellprofiler.settings as cps
###################################
#
# Constants
#
# It's good programming practice to replace things like strings with
# constants if they will appear more than once in your program. That way,
# if someone wants to change the text, that text will change everywhere.
# Also, you can't misspell it by accident.
###################################
KERNEL_CHOICES = libatrous.get_names() #["Linear 3x3", "B-Spline 5x5"]
###################################
#
# The module class
#
# Your module should "inherit" from cellprofiler.cpmodule.CPModule.
# This means that your module will use the methods from CPModule unless
# you re-implement them. You can let CPModule do most of the work and
# implement only what you need.
#
###################################
class Atrous(cpm.CPModule):
###############################################
#
# The module starts by declaring the name that's used for display,
# the category under which it is stored and the variable revision
# number which can be used to provide backwards compatibility if
# you add user-interface functionality later.
#
###############################################
module_name = "AtrousFilter"
category = "Image Processing"
variable_revision_number = 1
###############################################
#
# create_settings is where you declare the user interface elements
# (the "settings") which the user will use to customize your module.
#
# You can look at other modules and in cellprofiler.settings for
# settings you can use.
#
################################################
def create_settings(self):
#
# The ImageNameSubscriber "subscribes" to all ImageNameProviders in
# prior modules. Modules before yours will put images into CellProfiler.
# The ImageSubscriber gives your user a list of these images
# which can then be used as inputs in your module.
#
self.input_image_name = cps.ImageNameSubscriber(
# The text to the left of the edit box
"Input image name:",
# HTML help that gets displayed when the user presses the
# help button to the right of the edit box
doc = """This is the image that the module operates on. You can
choose any image that is made available by a prior module.
<br>
<b>AtrousFilter</b> will produce a filtered version of the input image.
""")
#
# The ImageNameProvider makes the image available to subsequent
# modules.
#
self.output_image_name = cps.ImageNameProvider(
"Output image name:",
# The second parameter holds a suggested name for the image.
"OutputImage",
doc = """This is the image resulting from the operation.""")
#
# Here's a choice box - the user gets a drop-down list of what
# can be done.
#
n_kernel = len(KERNEL_CHOICES)
doc = "Choose which kernel to filter with: <ul>"
for i in range(n_kernel):
kernel = libatrous.get_kernel(i)
doc += "<li><i>%s:</i> %s</li>" % (KERNEL_CHOICES[i],str(kernel))
doc += "</ul>"
self.atrous_choice = cps.Choice(
"Kernel type:",
# The choice takes a list of possibilities. The first one
# is the default - the one the user will typically choose.
KERNEL_CHOICES,
#
# Here, in the documentation, we do a little trick so that
# we use the actual text that's displayed in the documentation.
#
# %(KERNEL_CHOICES[0])s will get changed into "Linear 3x3"
# etc. Python will look in globals() for the "ATROUS_" names
# and paste them in where it sees %(ATROUS_...)s
#
# The <ul> and <li> tags make a neat bullet-point list in the docs
#
doc = doc % globals()
)
self.atrous_scalerange = cps.IntegerRange(
"Band-pass filter width (smallest / largest scale):", (1, 8),
minval=1, maxval=10,
doc="""The smallest and largest scale to include in the filter determine the width
of the band-pass filter. Single scale filters are entered by using the same scale
index for the smallest and largest scale.<br><br>
A High-pass filter would start at scale 1 and not include the residual low-pass
filter, whereas a low-pass filter would start at a small scale greater than 1
and would include the residual low-pass filter.<br><br>
Any filter whose smallest scale is 1 and includes the residual low-pass filter
would output the input image.
""")
self.atrous_lowpass = cps.Binary(
"Do you want to include the residual Low Pass image?",
False,
doc = """Add the residual Low Pass image to the filtered image.
""")
self.atrous_threshold = cps.Binary(
"Do you want to threshold the output?",
True,
doc = """The output image will be thresholded
""")
self.atrous_threshrange = cps.FloatRange(
"Threshold range:", (0, 100),
doc="""The lower and upper limits for the threshold range.
""")
self.atrous_normalise = cps.Binary(
"Do you want to normalise the output?",
False,
doc = """The output image will be normalised.
""")
#
# This method ensures that the scales are within range
def validate_scales(self):
low_scale,high_scale = self.atrous_scalerange.value
if low_scale > high_scale:
low_scale = high_scale
if high_scale < low_scale:
high_scale = low_scale
self.atrous_scalerange.value = (low_scale,high_scale)
#
# The "settings" method tells CellProfiler about the settings you
# have in your module. CellProfiler uses the list for saving
# and restoring values for your module when it saves or loads a
# pipeline file.
#
def settings(self):
self.validate_scales()
return [self.input_image_name, self.output_image_name,
self.atrous_choice, self.atrous_scalerange,
self.atrous_lowpass,
self.atrous_threshold,self.atrous_threshrange,
self.atrous_normalise]
#
# visible_settings tells CellProfiler which settings should be
# displayed and in what order.
#
# You don't have to implement "visible_settings" - if you delete
# visible_settings, CellProfiler will use "settings" to pick settings
# for display.
#
def visible_settings(self):
self.validate_scales()
result = [self.input_image_name, self.output_image_name,
self.atrous_choice, self.atrous_scalerange,
self.atrous_lowpass,
self.atrous_threshold]
#only show the min/max threshold values if threshold is ticked
if self.atrous_threshold:
result += [self.atrous_threshrange]
result += [self.atrous_normalise]
return result
#
# CellProfiler calls "run" on each image set in your pipeline.
# This is where you do the real work.
#
def run(self, workspace):
#
# Get the input and output image names. You need to get the .value
# because otherwise you'll get the setting object instead of
# the string name.
#
input_image_name = self.input_image_name.value
output_image_name = self.output_image_name.value
#
# Get the image set. The image set has all of the images in it.
#
image_set = workspace.image_set
#
# Get the input image object. We want a grayscale image here.
# The image set will convert a color image to a grayscale one
# and warn the user.
#
input_image = workspace.image_set.get_image(input_image_name,
must_be_grayscale = True)
#
# Get the pixels - these are a 2-d Numpy array.
#
value_max = input_image.scale
pixels = input_image.pixel_data
#
# Get the wavelet parameters
#
kernel_index = KERNEL_CHOICES.index(self.atrous_choice)
kernel = libatrous.get_kernel(kernel_index)
low_scale,high_scale = self.atrous_scalerange.value
low_thresh,high_thresh = self.atrous_threshrange.value
low_thresh /= value_max
high_thresh /= value_max
#
# build the output_pixels array iteratively
#
lowpass = pixels.astype(np.float32)
output_pixels = np.zeros(pixels.shape,np.float32)
for i in range(high_scale):
bandpass,lowpass = libatrous.iterscale(lowpass,kernel,i)
if i >= (low_scale-1):
output_pixels += bandpass
if self.atrous_lowpass:
output_pixels += lowpass
#
# Do the thresholding (if needed -- always)
#
if self.atrous_threshold:
mi = np.min(output_pixels)
ma = np.max(output_pixels)
output_pixels[output_pixels < low_thresh] = low_thresh
output_pixels[output_pixels > high_thresh] = high_thresh
#
# Do we normalise?
#
if self.atrous_normalise:
mi = np.min(output_pixels)
ma = | np.max(output_pixels) | numpy.max |
import numpy as np
def _recall_values(labels, x_absolute=False, y_absolute=False):
n_docs = len(labels)
n_pos_docs = sum(labels)
x = np.arange(1, n_docs + 1)
recall = np.cumsum(labels)
if not x_absolute:
x = x / n_docs
if y_absolute:
y = recall
else:
y = recall / n_pos_docs
return x.tolist(), y.tolist()
def _wss_values(labels, x_absolute=False, y_absolute=False):
n_docs = len(labels)
n_pos_docs = sum(labels)
docs_found = np.cumsum(labels)
docs_found_random = np.round(np.linspace(0, n_pos_docs, n_docs))
# Get the first occurrence of 1, 2, 3, ..., n_pos_docs in both arrays.
when_found = np.searchsorted(docs_found, np.arange(1, n_pos_docs + 1))
when_found_random = np.searchsorted(docs_found_random,
np.arange(1, n_pos_docs + 1))
n_found_earlier = when_found_random - when_found
x = np.arange(1, n_pos_docs + 1)
if not x_absolute:
x = x / n_pos_docs
if y_absolute:
y = n_found_earlier
else:
y = n_found_earlier / n_docs
return x.tolist(), y.tolist()
def _erf_values(labels, x_absolute=False, y_absolute=False):
n_docs = len(labels)
n_pos_docs = sum(labels)
docs_found = np.cumsum(labels)
docs_found_random = np.round(np.linspace(0, n_pos_docs, n_docs))
extra_records_found = docs_found - docs_found_random
x = | np.arange(1, n_docs + 1) | numpy.arange |
print("\n===================================================================================================")
import argparse
import copy
import gc
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import h5py
import os
import random
from tqdm import tqdm
import torch
import torchvision
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torchvision.utils import save_image
import timeit
from PIL import Image
from opts import parse_opts
args = parse_opts()
wd = args.root_path
os.chdir(wd)
from utils import *
from models import *
from Train_cGAN import *
from Train_CcGAN import *
from eval_metrics import cal_FID, cal_labelscore
#######################################################################################
''' Settings '''
#######################################################################################
#-----------------------------
# images
NC = args.num_channels #number of channels
IMG_SIZE = args.img_size
#--------------------------------
# system
NGPU = torch.cuda.device_count()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#-------------------------------
# seeds
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
cudnn.benchmark = False
np.random.seed(args.seed)
#-------------------------------
# output folders
save_models_folder = wd + '/output/saved_models'
os.makedirs(save_models_folder, exist_ok=True)
save_images_folder = wd + '/output/saved_images'
os.makedirs(save_images_folder, exist_ok=True)
save_traincurves_folder = wd + '/output/training_loss_fig'
os.makedirs(save_traincurves_folder, exist_ok=True)
#######################################################################################
''' Data loader '''
#######################################################################################
# data loader
data_filename = args.data_path + '/Cell200_{}x{}.h5'.format(IMG_SIZE, IMG_SIZE)
hf = h5py.File(data_filename, 'r')
counts = hf['CellCounts'][:]
counts = counts.astype(float)
images = hf['IMGs_grey'][:]
hf.close()
raw_images = copy.deepcopy(images)
raw_counts = copy.deepcopy(counts)
##############
### show some real images
if args.show_real_imgs:
unique_counts_show = sorted(list(set(counts)))
nrow = len(unique_counts_show); ncol = 10
images_show = np.zeros((nrow*ncol, images.shape[1], images.shape[2], images.shape[3]))
for i in range(nrow):
curr_label = unique_counts_show[i]
indx_curr_label = np.where(counts==curr_label)[0][0:ncol]
for j in range(ncol):
images_show[i*ncol+j,:,:,:] = images[indx_curr_label[j]]
print(images_show.shape)
images_show = (images_show/255.0-0.5)/0.5
images_show = torch.from_numpy(images_show)
save_image(images_show.data, save_images_folder +'/real_images_grid_{}x{}.png'.format(nrow, ncol), nrow=ncol, normalize=True)
##############
# images for training GAN
# for each cell count select n_imgs_per_cellcount images
n_imgs_per_cellcount = args.num_imgs_per_count
selected_cellcounts = np.arange(args.start_count, args.end_count+1, args.stepsize_count)
n_unique_cellcount = len(selected_cellcounts)
images_subset = np.zeros((n_imgs_per_cellcount*n_unique_cellcount, NC, IMG_SIZE, IMG_SIZE), dtype=np.uint8)
counts_subset = np.zeros(n_imgs_per_cellcount*n_unique_cellcount)
for i in range(n_unique_cellcount):
curr_cellcount = selected_cellcounts[i]
index_curr_cellcount = np.where(counts==curr_cellcount)[0]
if i == 0:
images_subset = images[index_curr_cellcount[0:n_imgs_per_cellcount]]
counts_subset = counts[index_curr_cellcount[0:n_imgs_per_cellcount]]
else:
images_subset = np.concatenate((images_subset, images[index_curr_cellcount[0:n_imgs_per_cellcount]]), axis=0)
counts_subset = np.concatenate((counts_subset, counts[index_curr_cellcount[0:n_imgs_per_cellcount]]))
# for i
images = images_subset
counts = counts_subset
del images_subset, counts_subset; gc.collect()
print("Number of images: %d" % len(images))
if args.GAN == "cGAN": #treated as classification; convert cell counts to class labels
unique_counts = np.sort(np.array(list(set(raw_counts)))) #not counts because we want the last element is the max_count
num_unique_counts = len(unique_counts)
print("{} unique counts are split into {} classes".format(num_unique_counts, args.cGAN_num_classes))
## convert cell counts to class labels and vice versa
### step 1: prepare two dictionaries
label2class = dict()
class2label = dict()
num_labels_per_class = num_unique_counts//args.cGAN_num_classes
class_cutoff_points = [unique_counts[0]] #the cutoff points on [min_label, max_label] to determine classes; each interval is a class
curr_class = 0
for i in range(num_unique_counts):
label2class[unique_counts[i]]=curr_class
if (i+1)%num_labels_per_class==0 and (curr_class+1)!=args.cGAN_num_classes:
curr_class += 1
class_cutoff_points.append(unique_counts[i+1])
class_cutoff_points.append(unique_counts[-1])
assert len(class_cutoff_points)-1 == args.cGAN_num_classes
### the cell count of each interval equals to the average of the two end points
for i in range(args.cGAN_num_classes):
class2label[i] = (class_cutoff_points[i]+class_cutoff_points[i+1])/2
### step 2: convert cell counts to class labels
counts_new = -1*np.ones(len(counts))
for i in range(len(counts)):
counts_new[i] = label2class[counts[i]]
assert np.sum(counts_new<0)==0
counts = counts_new
del counts_new; gc.collect()
unique_counts = np.sort(np.array(list(set(counts)))).astype(int)
else:
counts /= args.end_count # normalize to [0,1]
if args.kernel_sigma<0:
std_count = np.std(counts)
args.kernel_sigma =1.06*std_count*(len(counts))**(-1/5)
print("\n Use rule-of-thumb formula to compute kernel_sigma >>>")
print("\n The std of {} cell counts is {} so the kernel sigma is {}".format(len(counts), std_count, args.kernel_sigma))
if args.kappa<0:
unique_counts_norm = np.sort(np.array(list(set(counts))))
diff_list = []
for i in range(1,len(unique_counts_norm)):
diff_list.append(unique_counts_norm[i] - unique_counts_norm[i-1])
kappa_base = np.abs(args.kappa)*np.max(np.array(diff_list))
if args.threshold_type=="hard":
args.kappa = kappa_base
else:
args.kappa = 1/kappa_base**2
#end if
#######################################################################################
''' GAN training '''
#######################################################################################
print("{}, Sigma is {}, Kappa is {}".format(args.threshold_type, args.kernel_sigma, args.kappa))
if args.GAN == 'CcGAN':
save_GANimages_InTrain_folder = save_images_folder + '/{}_{}_{}_{}_InTrain'.format(args.GAN, args.threshold_type, args.kernel_sigma, args.kappa)
else:
save_GANimages_InTrain_folder = save_images_folder + '/{}_InTrain'.format(args.GAN)
os.makedirs(save_GANimages_InTrain_folder, exist_ok=True)
start = timeit.default_timer()
print("\n Begin Training %s:" % args.GAN)
#----------------------------------------------
# cGAN: treated as a classification dataset
if args.GAN == "cGAN":
Filename_GAN = save_models_folder + '/ckpt_{}_niters_{}_nclass_{}_seed_{}.pth'.format(args.GAN, args.niters_gan, args.cGAN_num_classes, args.seed)
print(Filename_GAN)
if not os.path.isfile(Filename_GAN):
print("There are {} unique cell counts".format(len(unique_counts)))
netG = cond_cnn_generator(nz=args.dim_gan, num_classes=args.cGAN_num_classes)
netD = cond_cnn_discriminator(num_classes=args.cGAN_num_classes)
netG = nn.DataParallel(netG)
netD = nn.DataParallel(netD)
# Start training
netG, netD = train_cGAN(images, counts, netG, netD, save_images_folder=save_GANimages_InTrain_folder, save_models_folder = save_models_folder)
# store model
torch.save({
'netG_state_dict': netG.state_dict(),
'netD_state_dict': netD.state_dict(),
}, Filename_GAN)
else:
print("Loading pre-trained generator >>>")
checkpoint = torch.load(Filename_GAN)
netG = cond_cnn_generator(args.dim_gan, num_classes=args.cGAN_num_classes).to(device)
netG = nn.DataParallel(netG)
netG.load_state_dict(checkpoint['netG_state_dict'])
# function for sampling from a trained GAN
def fn_sampleGAN_given_label(nfake, count, batch_size):
fake_counts = np.ones(nfake) * count #normalized count
count = int(count*args.end_count) #back to original scale of cell count
fake_images, _ = SampcGAN_given_label(netG, count, class_cutoff_points=class_cutoff_points, NFAKE = nfake, batch_size = batch_size)
return fake_images, fake_counts
#----------------------------------------------
# Concitnuous cGAN
elif args.GAN == "CcGAN":
Filename_GAN = save_models_folder + '/ckpt_{}_niters_{}_seed_{}_{}_{}_{}.pth'.format(args.GAN, args.niters_gan, args.seed, args.threshold_type, args.kernel_sigma, args.kappa)
print(Filename_GAN)
if not os.path.isfile(Filename_GAN):
netG = cont_cond_cnn_generator(nz=args.dim_gan)
netD = cont_cond_cnn_discriminator()
netG = nn.DataParallel(netG)
netD = nn.DataParallel(netD)
# Start training
netG, netD = train_CcGAN(args.kernel_sigma, args.kappa, images, counts, netG, netD, save_images_folder=save_GANimages_InTrain_folder, save_models_folder = save_models_folder)
# store model
torch.save({
'netG_state_dict': netG.state_dict(),
'netD_state_dict': netD.state_dict(),
}, Filename_GAN)
else:
print("Loading pre-trained generator >>>")
checkpoint = torch.load(Filename_GAN)
netG = cont_cond_cnn_generator(args.dim_gan).to(device)
netG = nn.DataParallel(netG)
netG.load_state_dict(checkpoint['netG_state_dict'])
def fn_sampleGAN_given_label(nfake, label, batch_size):
fake_images, fake_counts = SampCcGAN_given_label(netG, label, path=None, NFAKE = nfake, batch_size = batch_size)
return fake_images, fake_counts
stop = timeit.default_timer()
print("GAN training finished; Time elapses: {}s".format(stop - start))
#######################################################################################
''' Evaluation '''
#######################################################################################
if args.comp_FID:
#for FID
PreNetFID = encoder(dim_bottleneck=512).to(device)
PreNetFID = nn.DataParallel(PreNetFID)
Filename_PreCNNForEvalGANs = save_models_folder + '/ckpt_AE_epoch_50_seed_2020_CVMode_False.pth'
checkpoint_PreNet = torch.load(Filename_PreCNNForEvalGANs)
PreNetFID.load_state_dict(checkpoint_PreNet['net_encoder_state_dict'])
# for LS
PreNetLS = ResNet34_regre(ngpu = NGPU).to(device)
Filename_PreCNNForEvalGANs = save_models_folder + '/ckpt_PreCNNForEvalGANs_ResNet34_regre_epoch_200_seed_2020_Transformation_True_Cell_200.pth'
checkpoint_PreNet = torch.load(Filename_PreCNNForEvalGANs)
PreNetLS.load_state_dict(checkpoint_PreNet['net_state_dict'])
#####################
# generate nfake images
print("Start sampling {} fake images per label from GAN >>>".format(args.nfake_per_label))
eval_labels_norm = np.arange(args.start_count, args.end_count + 1) / args.end_count
num_eval_labels = len(eval_labels_norm)
## wo dump
for i in tqdm(range(num_eval_labels)):
curr_label = eval_labels_norm[i]
curr_fake_images, curr_fake_labels = fn_sampleGAN_given_label(args.nfake_per_label, curr_label, args.samp_batch_size)
if i == 0:
fake_images = curr_fake_images
fake_labels_assigned = curr_fake_labels.reshape(-1)
else:
fake_images = np.concatenate((fake_images, curr_fake_images), axis=0)
fake_labels_assigned = np.concatenate((fake_labels_assigned, curr_fake_labels.reshape(-1)))
assert len(fake_images) == args.nfake_per_label*num_eval_labels
assert len(fake_labels_assigned) == args.nfake_per_label*num_eval_labels
print("End sampling!")
print("\n We got {} fake images.".format(len(fake_images)))
## dump fake images for evaluation: NIQE
if args.dump_fake_for_NIQE:
if args.GAN == "cGAN":
dump_fake_images_folder = wd + "/dump_fake_data/fake_images_cGAN_nclass_{}_nsamp_{}".format(args.cGAN_num_classes, len(fake_images))
else:
if args.kernel_sigma>1e-30:
dump_fake_images_folder = wd + "/dump_fake_data/fake_images_CcGAN_{}_nsamp_{}".format(args.threshold_type, len(fake_images))
else:
dump_fake_images_folder = wd + "/dump_fake_data/fake_images_CcGAN_limit_nsamp_{}".format(len(fake_images))
for i in tqdm(range(len(fake_images))):
label_i = round(fake_labels_assigned[i]*args.end_count)
filename_i = dump_fake_images_folder + "/{}_{}.png".format(i, label_i)
os.makedirs(os.path.dirname(filename_i), exist_ok=True)
image_i = fake_images[i]
image_i = ((image_i*0.5+0.5)*255.0).astype(np.uint8)
image_i_pil = Image.fromarray(image_i[0])
image_i_pil.save(filename_i)
#end for i
print("End sampling {} fake images per label from GAN >>>".format(args.nfake_per_label))
#####################
# normalize real images and labels
real_images = (raw_images/255.0-0.5)/0.5
real_labels = raw_counts/args.end_count
nfake_all = len(fake_images)
nreal_all = len(real_images)
#####################
# Evaluate FID within a sliding window with a radius R on the label's range (i.e., [args.start_count,args.end_count]). The center of the sliding window locate on [R+args.start_count,2,3,...,args.end_count-R].
center_start = args.start_count+args.FID_radius
center_stop = args.end_count-args.FID_radius
centers_loc = np.arange(center_start, center_stop+1)
FID_over_centers = np.zeros(len(centers_loc))
labelscores_over_centers = np.zeros(len(centers_loc)) #label score at each center
num_realimgs_over_centers = np.zeros(len(centers_loc))
for i in range(len(centers_loc)):
center = centers_loc[i]
interval_start = (center - args.FID_radius)/args.end_count
interval_stop = (center + args.FID_radius)/args.end_count
indx_real = np.where((real_labels>=interval_start)*(real_labels<=interval_stop)==True)[0]
np.random.shuffle(indx_real)
real_images_curr = real_images[indx_real]
num_realimgs_over_centers[i] = len(real_images_curr)
indx_fake = np.where((fake_labels_assigned>=interval_start)*(fake_labels_assigned<=interval_stop)==True)[0]
np.random.shuffle(indx_fake)
fake_images_curr = fake_images[indx_fake]
fake_labels_assigned_curr = fake_labels_assigned[indx_fake]
# FID
FID_over_centers[i] = cal_FID(PreNetFID, real_images_curr, fake_images_curr, batch_size = 200, resize = None)
# Label score
labelscores_over_centers[i], _ = cal_labelscore(PreNetLS, fake_images_curr, fake_labels_assigned_curr, min_label_before_shift=0, max_label_after_shift=args.end_count, batch_size = 200, resize = None)
print("\r Center:{}; Real:{}; Fake:{}; FID:{}; LS:{}.".format(center, len(real_images_curr), len(fake_images_curr), FID_over_centers[i], labelscores_over_centers[i]))
# average over all centers
print("\n {} SFID: {}({}); min/max: {}/{}.".format(args.GAN, np.mean(FID_over_centers), np.std(FID_over_centers), np.min(FID_over_centers), np.max(FID_over_centers)))
print("\n {} LS over centers: {}({}); min/max: {}/{}.".format(args.GAN, np.mean(labelscores_over_centers), | np.std(labelscores_over_centers) | numpy.std |
import os
import sys
import glob
import gzip
import json
import argparse
import numpy as np
import scipy.sparse as sp
from sklearn.linear_model import LogisticRegression
def load_sparse_embeddings(path, words_to_keep=None, max_words=-1):
"""
Reads in the sparse embedding file.
Parameters
----------
path : str
Location of the gzipped sparse embedding file
If None, no filtering takes place.
words_to_keep : list, optional
list of words to keep
max_words : int, optional
Indicates the number of lines to read in.
If negative, the entire file gets processed.
Returns
-------
tuple:
w2i:
Wordform to identifier dictionary,
i2w:
Identifier to wordform dictionary,
W:
The sparse embedding matrix
"""
i2w = {}
data, indices, indptr = [], [], [0]
with gzip.open(path, 'rt') as f:
for line_number, line in enumerate(f):
if line_number == max_words:
break
parts = line.rstrip().split(' ')
if words_to_keep is not None and parts[0] not in words_to_keep:
continue
i2w[len(i2w)] = parts[0]
for i, value in enumerate(parts[1:]):
value = float(value)
if value != 0:
data.append(float(value))
indices.append(i)
indptr.append(len(indices))
return {w: i for i, w in i2w.items()}, i2w, sp.csr_matrix((data, indices, indptr), shape=(len(indptr) - 1, i + 1))
def get_word_index(w2i, token):
if token in w2i:
return w2i[token]
elif token.lower() in w2i:
return w2i[token.lower()]
else:
return -1
def sparse_pmi(indices, vals, row_marginal, col_marginal, total, nonneg_pmi=True):
pmis = np.ma.log((total * vals) / (row_marginal * col_marginal)).filled(0)
pmis /= -np.ma.log(vals/total).filled(1)
indices_to_return, pmis_to_return = [], []
for idx in range(len(indices)):
if not nonneg_pmi or pmis[0,idx] > 0:
indices_to_return.append(indices[idx])
pmis_to_return.append(pmis[0,idx])
return indices_to_return, pmis_to_return
def calc_pmi(M):
total, row_sum, col_sum = M.sum(), M.sum(axis=1), M.sum(axis=0)+1e-11
data, indices, ind_ptr = [], [], [0]
for i, r in enumerate(M):
if np.any(r.data==0):
zero_idx = np.where(r.data==0)[0]
#logging.warning(("contains 0: ",i,self.id_to_label[i], [r.indices[z] for z in zero_idx]))
idxs, pmi_values = sparse_pmi(r.indices, r.data, row_sum[i,0], col_sum[0, r.indices], total)
indices.extend(idxs)
data.extend(pmi_values)
ind_ptr.append(len(data))
return sp.csr_matrix((data, indices, ind_ptr), shape=(M.shape[0], M.shape[1]))
def get_rank(gold, list_predictions, max_k=3):
list_predictions = list_predictions[:max_k]
try:
rank = list_predictions.index(gold) + 1
except ValueError:
rank = max_k + 1
return rank
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input_files', nargs='+', default='models/sparse_DLSC_cbow_1_2000.gz models/sparse_DLSC_cbow_3_1500.gz models/sparse_DLSC_cbow_3_2000.gz models/sparse_DLSC_cbow_4_1000.gz models/sparse_DLSC_cbow_4_1500.gz models/sparse_DLSC_cbow_5_1000.gz models/sparse_DLSC_cbow_5_2000.gz'.split())
parser.add_argument('--out_dir', type=str, default='final_submission')
parser.add_argument('--verbose', dest='verbose', action='store_true')
parser.add_argument('--no-verbose', dest='verbose', action='store_false')
parser.set_defaults(verbosity=False)
parser.add_argument('--normalize', dest='normalize', action='store_true')
parser.add_argument('--no-normalize', dest='normalize', action='store_false')
parser.set_defaults(normalize=False)
args = parser.parse_args()
training_data = json.load(open('data/terms/train.json'))
test_data = json.load(open('data/terms/test.json'))
tags = {i:t['label'] for i, t in enumerate(json.load(open('data/tagset/finsim.json')))}
golds = {}
if os.path.exists('data/terms/gold.json'):
golds = {g['term']:g['label'] for g in json.load(open('data/terms/gold.json'))}
labels_to_ids = {v:k for k,v in tags.items()}
if not os.path.exists(args.out_dir):
os.mkdir(args.out_dir)
aggregated_ranks, aggregated_corrects = [], []
aggregated_train_predictions = [[] for _ in range(len(training_data))]
aggregated_test_predictions = [[] for _ in range(len(test_data))]
if args.input_files:
files_used = sorted(args.input_files)
else:
files_used = sorted(glob.glob('models/*.gz'))
print(len(files_used))
for fn in files_used:
print(fn)
w2i, i2w, S = load_sparse_embeddings(fn)
labels_to_vecs = {}
oovs = {}
for t in training_data:
label = t['label']
label_id = labels_to_ids[label]
term_tokens = t['term'].split()
oovs[t['term']] = []
for ti, tt in enumerate([ttt for T in term_tokens for ttt in T.split('-')]):
ind = get_word_index(w2i, tt)
if ind==-1:
oovs[t['term']].append(tt)
continue
vec = S[ind,:]
if 'vec' in t:
t['vec'] += vec
else:
t['vec'] = vec
if 'vec' in t and args.normalize:
if 'sparse' in fn:
t['vec'].data /= t['vec'].sum()
else:
t['vec'].data /= np.linalg.norm(t['vec'].data)
elif not 'vec' in t:
t['vec'] = sp.csr_matrix((1, S.shape[1]))
if label_id in labels_to_vecs:
labels_to_vecs[label_id] += t['vec']
else:
labels_to_vecs[label_id] = t['vec']
mtx = sp.vstack([labels_to_vecs[row] for row in sorted(labels_to_vecs)])
etalon, predictions = [],[]
ranking_scores = {}
for i,t in enumerate(training_data):
gold_label = labels_to_ids[t['label']]
etalon.append(gold_label)
mtx[gold_label] -= t['vec']
if 'sparse' in fn:
product = (-t['vec'] @ calc_pmi(mtx).T).todense()
else:
row_norms = np.linalg.norm(mtx.todense(), axis=1)
M = mtx / row_norms[:, np.newaxis]
product = np.array(-t['vec'] @ M.T)
ranking_scores[t['term']] = product
aggregated_train_predictions[i].append(product)
ranked_labels = np.argsort(product)
ranked_labels = [ranked_labels[0,r] for r in range(len(tags))][0:5]
mtx[gold_label] += t['vec']
if args.verbose and ranked_labels[0]!=gold_label:
term = t['term']
print('{}\t{}\t{}\t{}\tOOVs: {}'.format(i, term, t['label'], ' '.join([tags[r] for r in ranked_labels]), ' '.join(oovs[term])))
predictions.append(ranked_labels)
del training_data[i]['vec']
corrects = 100*sum([1 if p[0]==g else 0 for g,p in zip(etalon, predictions)]) / len(etalon)
aggregated_corrects.append(corrects)
avg_rank_metric = np.mean([get_rank(g, p) for g,p in zip(etalon, predictions)])
aggregated_ranks.append(avg_rank_metric)
print("Accuracy_loo, rank: ", corrects, avg_rank_metric)
if 'sparse' in fn:
M = calc_pmi(mtx).toarray().T
else:
row_norms = np.linalg.norm(mtx.todense(), axis=1)
M = np.transpose(mtx / row_norms[:, np.newaxis])
gold_etalons, gold_predictions = [], []
for i,t in enumerate(test_data):
t['label'] = None
gold_etalons.append(golds[t['term']])
term_tokens = t['term'].split()
for ti, tt in enumerate([ttt for T in term_tokens for ttt in T.split('-')]):
ind = get_word_index(w2i, tt)
if ind==-1: continue
vec = S[ind,:]
if 'vec' in t:
t['vec'] += vec
else:
t['vec'] = vec
if not 'vec' in t:
t['vec'] = sp.csr_matrix((1, S.shape[1]))
product = (-t['vec'] @ M)
aggregated_test_predictions[i].append(product)
ranked_labels = np.argsort(product)
ranked_labels = [ranked_labels[0,r] for r in range(len(tags))]
t['predicted_labels'] = [tags[r] for r in ranked_labels][0:5]
gold_predictions.append(t['predicted_labels'])
del t['vec']
#print(len(test_data), t)
corrects = 100*sum([1 if p[0]==g else 0 for g,p in zip(gold_etalons, gold_predictions)]) / len(gold_etalons)
avg_rank_metric = np.mean([get_rank(g, p) for g,p in zip(gold_etalons, gold_predictions)])
print("Accuracy_test, rank: ", corrects, avg_rank_metric)
bn = os.path.basename(fn)
with open('{}/{}.json'.format(args.out_dir, bn), 'w') as outfile:
json.dump(test_data, outfile)
correct = 3*[0]
ranks = [[] for _ in range(3)]
for i,(p,c) in enumerate(zip(aggregated_train_predictions, etalon)):
stacked_scores = np.vstack(p)
rankings = np.argsort(stacked_scores, axis=1)
scores1 = np.zeros(rankings.shape[1])
for r in np.array(rankings):
for j,v in enumerate(r):
scores1[v] += j
row_norms = np.linalg.norm(stacked_scores, axis=1)
scores2 = np.array(np.sum(stacked_scores / (row_norms[:, np.newaxis]+1e-9), axis=0)).flatten()
scores3 = np.array(np.sum(stacked_scores, axis=0)).flatten()
for si, scores in enumerate([scores1, scores2, scores3]):
ranked_labels = | np.argsort(scores) | numpy.argsort |
import numpy as np
def p_basis(x, orders=[1]):
y = np.concatenate([np.power(x, o) for o in orders], axis=0)
return y.flatten()
class LDS(object):
"""
Describes trajectory distribution of agent moving in a plane.
State given by x in R^4, action given by u in R^2.
action = k * x + N(0, std)
state = A * x + B * u + N(0, 1)
"""
def __init__(self,
K,
sigma,
polynomial_degrees=[1],
L=20):
# Environment constants
dimension = 2
A = np.eye(2 * dimension)
B = np.vstack([0.5 * np.eye(dimension), np.eye(dimension)])
def step(x, u):
return A.dot(x) + B.dot(u)
self._step = step
self._horizon = L
# goal is point (5, 5)
goal = np.zeros(2 * dimension)
for i in range(dimension):
goal[2 * i] = 5
def cost(x, u):
return -1 * np.linalg.norm(x - goal)
self._cost = cost
# Agent policy
self._K = np.array(K, copy=True)
self._sigma = sigma * np.ones(dimension, dtype=float)
base_in_dim = 2 * dimension
if polynomial_degrees:
in_dim = len(polynomial_degrees) * 2 * dimension
else:
in_dim = 1
def input_fn(x):
# if polynomial_degree == 1:
# return x
rows = []
for row in x.reshape(-1, base_in_dim):
rows.append(p_basis(row, orders=polynomial_degrees))
y = np.concatenate(rows, axis=0).reshape(-1, in_dim)
return y
# Class private member variables
self._dimension = dimension
self._in_dim = in_dim
self._input_fn = input_fn
def sample(self, n=1, policy=None):
paths = []
gs = np.zeros(n)
for i in range(n):
state = np.zeros(2 * self._dimension)
x = np.zeros((self._horizon, 2 * self._dimension))
u = np.zeros((self._horizon, self._dimension))
r = np.zeros(self._horizon)
pi_noise = np.random.normal(0, self._sigma, (self._horizon, self._dimension))
noise = np.random.normal(0, 0.05, (self._horizon, self._dimension * 2))
for t in range(self._horizon):
control = self.mean(self._input_fn(state)) + pi_noise[t]
x[t] = state
u[t] = control
state = self._step(state, control) + noise[t]
state = np.clip(state, -10, 10)
r[t] = self._cost(state, u)
g = np.sum(r)
gs[i] = g
paths.append({'x': x, 'u': u, 'r': r})
return paths, gs
def expected_value(self):
_, fs = self.sample(n=100000)
print('True Eval CI %f' % (np.std(fs) * 1.96 * 0.01))
return np.mean(fs)
def mean(self, x):
mean = x.dot(self._K)
return mean.flatten()
def mle_fit(self, paths):
"""
Fit _K with ordinary least squares. Fit _sigma with residuals.
paths: dict, must include keys 'x' and 'u' which are numpy arrays.
"""
xs = np.concatenate([path['x'] for path in paths])
xs = self._input_fn(xs)
us = | np.concatenate([path['u'] for path in paths]) | numpy.concatenate |
import sys
import numpy as np
from river.drift import D3
from time import process_time as timer
from sklearn.ensemble import RandomForestClassifier
from TrainingFunctions.LoadDataStream import load_data_stream
from TrainingFunctions.PreprocessingWithoutLabels import preprocessing_without_labels
from TrainingFunctions.PreprocessingWithLabels import preprocessing_with_labels
from EvaluationFunctions.GeneratePerformanceMetrics import generate_performance_metrics
from EvaluationFunctions.GenerateResultsTableIterations import generate_results_table_iterations
from Utility_Functions.CreateResultsFileName import create_results_file_name
# Not possible to execute on server!
# Installation using GitHub necessary: https://github.com/ogozuacik/d3-discriminative-drift-detector-concept-drift
# Set parameters
n_iterations = 10
# 1. Load Data
# Differentiate whether data is provided in separate train and test file
separate_train_test_file = False
image_data = False
drift_labels_known = True
proxy_evaluation = False
if not drift_labels_known and not proxy_evaluation:
print("Error: Change detection evaluation and/or proxy evaluation missing!")
sys.exit()
if not proxy_evaluation:
acc_vector = False
if not drift_labels_known:
drift_labels = False
# Set name of data set and path
if separate_train_test_file:
path = "IBDD_Datasets/benchmark_real/"
# dataset = "Yoga"
# dataset = "StarLightCurves"
# dataset = "Heartbeats"
elif image_data:
path = "Generated_Streams/Image_Data_Drift_And_Classifier_Labels/"
# dataset = "RandomMNIST_and_FashionMNIST_SortAllNumbers19DR_2021-08-06_11.07.pickle"
else:
if drift_labels_known:
if proxy_evaluation:
path = "Generated_Streams/Drift_And_Classifier_Labels/"
# dataset = "RandomRandomRBF_50DR_100Dims_50Centroids_1MinDriftCentroids_300MinL_2000MaxL_2021-08-06_10.57.pickle"
else:
path = "Generated_Streams/Drift_Labels/"
# Experiments Evaluation
# dataset = "RandomNumpyRandomNormalUniform_onlyMeanDrift_var0.01_50DR_100Dims_1MinDimBroken_300MinL_2000MaxL_2021-08-10_10.32.pickle"
# dataset = "RandomNumpyRandomNormalUniform_onlyMeanDrift_var0.05_50DR_100Dims_1MinDimBroken_300MinL_2000MaxL_2021-08-06_10.42.pickle"
# dataset = "RandomNumpyRandomNormalUniform_onlyMeanDrift_var0.25_50DR_100Dims_1MinDimBroken_300MinL_2000MaxL_2021-08-06_10.45.pickle"
# dataset = "RandomNumpyRandomNormalUniform_onlyVarianceDrift_50DR_100Dims_1MinDimBroken_300MinL_2000MaxL_2021-08-06_11.15.pickle"
# dataset = "RandomNumpyRandomNormalUniform_50DR_100Dims_100MinDimBroken_300MinL_2000MaxL_2021-08-06_10.54.pickle"
# dataset = "RandomNumpyRandomNormalUniform_50DR_100Dims_1MinDimBroken_300MinL_2000MaxL_2021-08-06_10.53.pickle"
# dataset = "Mixed_300MinDistance_DATASET_A_RandomNumpyRandomNormalUniform_50DR_100Dims_1MinDimBroken_DATASET_B_RandomRandomRBF_50DR_100Dims_50Centroids_1MinDriftCentroids.pickle"
# Experiments Time Complexity
# "Time_RandomNumpyRandomNormalUniform_10DR_10Dims_1MinDimBroken_300MinL_2000MaxL_2021-09-06_22.24.pickle",
# "Time_RandomNumpyRandomNormalUniform_10DR_50Dims_5MinDimBroken_300MinL_2000MaxL_2021-09-06_22.25.pickle",
# "Time_RandomNumpyRandomNormalUniform_10DR_100Dims_10MinDimBroken_300MinL_2000MaxL_2021-09-06_22.26.pickle",
# "Time_RandomNumpyRandomNormalUniform_10DR_500Dims_50MinDimBroken_300MinL_2000MaxL_2021-09-06_22.26.pickle",
# "Time_RandomNumpyRandomNormalUniform_10DR_1000Dims_100MinDimBroken_300MinL_2000MaxL_2021-09-06_22.27.pickle"
else:
path = "Generated_Streams/Classifier_Labels/"
dataset = ""
print("Current dataset:")
print(dataset)
# Load data stream
data_stream = load_data_stream(dataset=dataset, path=path, separate_train_test_file=separate_train_test_file,
image_data=image_data, drift_labels_known=drift_labels_known,
proxy_evaluation=proxy_evaluation)
# Set number of instances
n_instances = data_stream.shape[0]
# Set number of train data, validation data, and test data
if dataset == "Yoga":
n_train_data = 300
elif dataset == "Heartbeats":
n_train_data = 500
else:
n_train_data = 1000
n_val_data = 0
n_test_data = int(n_instances - n_val_data - n_train_data)
# 2. Pre-processing
# Separate data stream and drift labels
if drift_labels_known:
data_stream, drift_labels = data_stream[:, :-1], data_stream[:, -1]
drift_labels = drift_labels[(len(drift_labels) - n_test_data):]
# Preprocess data stream
if proxy_evaluation:
train_X, train_y, val_X, val_y, test_X, test_y = preprocessing_with_labels(
data_stream, n_instances, n_train_data, n_val_data, n_test_data, image_data)
else:
train_X, val_X, test_X = preprocessing_without_labels(
data_stream, n_instances, n_train_data, n_val_data, n_test_data)
# Set number of dimensions
n_dimensions = train_X.shape[1]
# Start global iterations
all_performance_metrics = []
all_accuracies = []
all_times_per_example = []
for iteration in range(n_iterations):
print("Global Iteration:")
print(iteration)
# 3. Train classifier for Evaluation
if proxy_evaluation:
model_classifier = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=0)
model_classifier.fit(np.concatenate((train_X, val_X), axis=0), np.concatenate((train_y, val_y), axis=0))
acc_vector = np.zeros(len(test_y), dtype=int)
# 4. Drift Detection with D3
start = timer()
d3 = D3()
# Transform data to array with dictionaries
data_stream_dict = []
for idx in range(len(train_X)):
new_element = dict(enumerate(train_X[idx], 1))
data_stream_dict.append(new_element)
for idx in range(len(val_X)):
new_element = dict(enumerate(val_X[idx], 1))
data_stream_dict.append(new_element)
for idx in range(len(test_X)):
new_element = dict(enumerate(test_X[idx], 1))
data_stream_dict.append(new_element)
# Start saving drift decisions when test data start
drift_decisions = [False] * len(data_stream_dict)
for idx in range(len(data_stream_dict)):
test_idx = idx - (n_train_data + n_val_data)
# If test data start and proxy evaluation, start prediction
if test_idx >= 0 and proxy_evaluation:
# Test: Make prediction for element with classifier
y_pred = model_classifier.predict(test_X[test_idx].reshape(1, -1))
if y_pred == test_y[test_idx]:
acc_vector[test_idx] = 1
# Detect drift
in_drift, in_warning = d3.update(data_stream_dict[idx])
# If drift is detected
if in_drift:
print(f"Change detected at index {idx}")
drift_decisions[idx] = True
if test_idx >= d3.new_data_window_size and proxy_evaluation:
# Train model again on new window data
window_train_X = test_X[(test_idx - d3.new_data_window_size):test_idx]
window_train_y = test_y[(test_idx - d3.new_data_window_size):test_idx]
model_classifier.fit(window_train_X, window_train_y)
# Save only drift decisions for test data
drift_decisions_in_test_data = drift_decisions[(n_train_data + n_val_data):]
# Measure the elapsed time
end = timer()
execution_time = end - start
print('Time per example: {} sec'.format(np.round(execution_time / len(test_X), 4)))
print('Total time: {} sec'.format(np.round(execution_time, 2)))
all_times_per_example.append(execution_time / len(test_X))
# 5. Evaluation
# 5.1 Proxy Evaluation
if proxy_evaluation:
# Calculate mean accuracy of classifier
mean_acc = np.mean(acc_vector) * 100
print('Average classification accuracy: {}%'.format( | np.round(mean_acc, 2) | numpy.round |
import cv2
import numpy as np
#cwd(),dir()
import os
#ocr
from PIL import Image
import pytesseract
import argparse
#small object removal
from skimage import morphology
#
import matplotlib.pyplot as plt
from matplotlib import interactive
def Show(img,title="image"):
#comment line below in and out if you want to see results at every step
return
cv2.imshow(title,img)
cv2.waitKey()
def main():
img1 = cv2.imread("troll_template.png", cv2.IMREAD_GRAYSCALE)
#Show(img1)
## img2 = cv2.imread("img2.png")
## diff = cv2.absdiff(img1, img2)
# threshold the diff image so that we get the foreground
_,im_bin = cv2.threshold(img1, 25, 255, cv2.THRESH_BINARY)
(trollmask,trollsliced)=segment(im_bin,img1)
trollmoment=cv2.moments(trollmask)
trollmoment = [i for i in trollmoment.values()]
trollmoment=( np.log10(np.abs(trollmoment)), np.sign(trollmoment) )
namelist = os.listdir('data')
N=len(namelist)
errors = np.zeros( N )
errors_moment = np.zeros( N )
nos = np.linspace(1,N,N)
for i,fname in enumerate(namelist):
## if (i<48) | (i>49):
## continue
print('---'*30)
print('image '+str(i+1)+'/'+str(N)+' : '+fname)
img=preprocIm(fname)
img=segment(img)
cv2.imwrite('gener/'+fname,img)
merged = overlap(img, trollsliced)
cv2.imwrite('merged/'+fname,merged)
## Show(img,'Sliced')
## cv2.waitKey()
## cv2.destroyAllWindows()
errors[i] = computeErr(img, trollmask)
errors_moment[i] = computeErrMoment(img, trollmoment)
## fname = namelist[0]
M = np.stack((nos,errors,errors_moment))
#sort by 2nd row, reverse
## a[:,a[1,:].argsort()[::-1] ]
M = M[:,M[1,:].argsort()[::-1] ]
interactive(True)
fig,ax=plt.subplots()
Zoom=20
ax.plot(M[1,:Zoom])
#labels=[str(int(i)) for i in M[0,:Zoom]]
#ax.set_xticklabels(labels)
labels=[int(i)-1 for i in M[0,:Zoom]]
labels=[namelist[i].replace('.jpg','') for i in labels]
ax.set_xticklabels(labels)
ax.set_xticks(nos[:Zoom]-1)
ax.set_title('pixel diff')
plt.xticks(rotation=90)
plt.tight_layout()
plt.show()
M = M[:,M[2,:].argsort()]
fig,ax=plt.subplots()
Zoom=20
ax.plot(M[2,:Zoom])
labels=[int(i)-1 for i in M[0,:Zoom]]
labels=[namelist[i].replace('.jpg','') for i in labels]
ax.set_xticklabels(labels)
ax.set_xticks(nos[:Zoom]-1)
ax.set_title('rms hu moment diff')
plt.xticks(rotation=90)
plt.tight_layout()
plt.show()
def computeErr(img, mask):
if img.ndim<2:
print("failed")
return -1
hi,wi = img.shape[:2]
hm,wm = mask.shape[:2]
#fit mask to img dims
if (hi != hm) & ( wi != wm):
mask = cv2.resize(mask, (wi,hi), interpolation=cv2.INTER_CUBIC)
_,mask = cv2.threshold(mask, 25, 255, cv2.THRESH_BINARY)
#compute same part, subtract different
net = cv2.bitwise_and(img,mask)
sub = cv2.bitwise_and(img,cv2.bitwise_not(mask))
SS = np.sum(net/255) - np.sum(sub/255)
SS/=(hi*wi)
## print(SS)
## Show(mask,"mask")
Show(net,"product")
return SS
def computeErrMoment(img, moment):
m1 = cv2.moments(img)
m1=[i for i in m1.values()]
#clean up zero moments (empty image)
L1 = [np.log10(i) if i>0 else 10 for i in np.abs(m1)]
S1 = [i if i!=0 else 1 for i in np.sign(m1)]
## print(L1,S1)
print('len: '+str(len(L1)))
SS = np.multiply(L1,S1)-np.multiply(moment[0],moment[1])
SS = np.linalg.norm(SS)/len(L1)
## print(SS)
return SS
def overlap(img1, img2):
hi,wi = img1.shape[:2]
hm,wm = img2.shape[:2]
#fit img2 to img dims
if (hi != hm) & ( wi != wm):
img2 = cv2.resize(img2, (wi,hi), interpolation=cv2.INTER_CUBIC)
print('hi:',hi,wi)
#assume img1,img2 are bw
M=cv2.bitwise_and(np.ones((hi,wi),dtype='uint8'), img1)
#background black -> white
Bg=cv2.bitwise_not(M)
## print(type(Bg),type(Bg[0][0]),Bg[0][0])
R=26*M+Bg
G=240*M+Bg
B=40*M+Bg
#build color from mask
img1 = cv2.merge((B,G,R))
#restore 3channel from bw
img2 = cv2.cvtColor(img2,cv2.COLOR_GRAY2RGB)
out = cv2.addWeighted(img1, 0.6, img2, 0.4, gamma=0)
## Show(img1,'colored')
## Show(out,'mixed')
return out
def segment(image, colored=[]):
# Copy the thresholded image.
im_floodfill = cv2.bitwise_not(image.copy())
im_bin_inv = cv2.bitwise_not(image)
# Mask used to flood filling.
# Notice the size needs to be 2 pixels than the image.
h, w = image.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
# Floodfill from point (1,1)
cv2.floodFill(im_floodfill, mask, (1,1), 255);
im_floodfill_inv = cv2.bitwise_not(im_floodfill)
# Combine the two images to get the foreground.
## out = cv2.bitwise_not(im_bin_inv | im_floodfill_inv)
out = im_bin_inv | im_floodfill_inv
# Display images.
## cv2.imshow("OG Image", image)
## cv2.imshow("Floodfilled Image", im_floodfill)
## cv2.imshow("Inverted Floodfilled Image", im_floodfill_inv)
## cv2.imshow("Inv thres", im_bin_inv)
## cv2.imshow("Inv thres + inv flood", out)
#remove small islands
## se1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8,8) )
## se2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2,2) )
## out = cv2.morphologyEx(out, cv2.MORPH_CLOSE, se1)
## out = cv2.morphologyEx(out, cv2.MORPH_OPEN, se2)
#remove with morph
lumpSize=np.round( np.min([w, h])/6 )
processed = morphology.remove_small_objects(out.astype(bool), min_size=lumpSize, connectivity=1).astype(int)
# black out pixels
mask_x, mask_y = np.where(processed == 0)
out[mask_x, mask_y] = 0
Show(out,"Eroded")
# get the contours in the thresholded image
## contours, hierarchy = cv2.findContours(out, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
## x,y,w,h = cv2.boundingRect(contours[0])
## cv2.rectangle(out,(x,y),(x+w,y+h),(0,255,0),2)
## cv2.imwrite("troll_mask.png",out)
# get the outline for proper scaling
pos = np.where(out==255)
py,px = pos
if len(px)==0:
#empty outline
out = | np.zeros((300,300),dtype='uint8') | numpy.zeros |
## Imports
import os
import random
import numpy as np
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from keras import backend as K
## Seeding
seed = 2019
random.seed = seed
np.random.seed = seed
tf.seed = seed
def dice_coef(y_true, y_pred, smooth=1.):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
#Data Generator
class DataGen(keras.utils.Sequence):
def __init__(self, ids, path, batch_size=8, image_size=128):
self.ids = ids
self.path = path
self.batch_size = batch_size
self.image_size = image_size
self.on_epoch_end()
def __load__(self, id_name):
## Path
image_path = os.path.join(self.path, "images\\", id_name)
mask_path = os.path.join(self.path, "masks\\", id_name)
## Reading Image
image = cv2.imread(image_path, 1)
image = cv2.resize(image, (self.image_size, self.image_size))
## Reading Mask
mask = np.zeros((self.image_size, self.image_size, 1))
_mask = cv2.imread(mask_path, -1)
_mask = cv2.resize(_mask, (self.image_size, self.image_size))
_mask = np.expand_dims(_mask, axis=-1)
mask = np.maximum(mask, _mask)
## Multiple Masks reading
# all_masks = os.listdir(mask_path)
# for name in all_masks:
# _mask_path = mask_path + name
# _mask_image = cv2.imread(_mask_path, -1)
# _mask_image = cv2.resize(_mask_image, (self.image_size, self.image_size)) # 128x128
# _mask_image = np.expand_dims(_mask_image, axis=-1)
# mask = np.maximum(mask, _mask_image)
## Normalizaing
image = image / 255.0
mask = mask / 255.0
return image, mask
def __getitem__(self, index):
if (index + 1) * self.batch_size > len(self.ids):
self.batch_size = len(self.ids) - index * self.batch_size
files_batch = self.ids[index * self.batch_size: (index + 1) * self.batch_size]
image = []
mask = []
for id_name in files_batch:
_img, _mask = self.__load__(id_name)
image.append(_img)
mask.append(_mask)
image = np.array(image)
mask = | np.array(mask) | numpy.array |
"""
2D Disc models
==============
Classes: Rosenfeld2d, General2d, Velocity, Intensity, Cube, Tools
"""
#TODO in show(): Perhaps use text labels on line profiles to distinguish profiles for more than 2 cubes.
#TODO in make_model(): Find a smart way to detect and pass only the coords needed by a prop attribute.
#TODO in run_mcmc(): Enable an arg to allow the user see the position of parameter walkers every 'arg' steps.
#TODO in General2d: Implement irregular grids (see e.g. meshio from nschloe on github) for the disc grid.
#TODO in General2d: Compute props in the interpolated grid (not in the original grid) to avoid interpolation of props and save time.
#TODO in General2d: Allow the lower surface to have independent intensity and line width parametrisations.
#TODO in General2d: Implement pressure support term
#TODO in make_model(): Allow for warped emitting surfaces, check notes for ideas as to how to solve for multiple intersections between l.o.s and emission surface.
#TODO in __main__(): show intro message when python -m disc2d
#TODO in run_mcmc(): use get() methods instead of allowing the user to use self obj attributes.
#TODO in make_model(): Allow R_disc to be a free parameter.
#TODO in make_model(): Enable 3D velocities too when subpixel algorithm is used
#TODO in v1.0: migrate to astropy units
from __future__ import print_function
from ..utils import constants as sfc
from ..utils import units as sfu
from astropy.convolution import Gaussian2DKernel, convolve
from scipy.interpolate import griddata, interp1d
from scipy.special import ellipk, ellipe
from scipy.optimize import curve_fit
from scipy.integrate import quad
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from matplotlib import ticker
import numpy as np
import matplotlib
import itertools
import warnings
import numbers
import pprint
import copy
import time
import sys
import os
from multiprocessing import Pool
os.environ["OMP_NUM_THREADS"] = "1"
try:
import termtables
found_termtables = True
except ImportError:
print ("\n*** For nicer outputs we recommend installing 'termtables' by typing in terminal: pip install termtables ***")
found_termtables = False
#warnings.filterwarnings("error")
__all__ = ['Cube', 'Tools', 'Intensity', 'Velocity', 'General2d', 'Rosenfeld2d']
path_file = os.path.dirname(os.path.realpath(__file__))+'/'
"""
matplotlib.rcParams['font.family'] = 'monospace'
matplotlib.rcParams['font.weight'] = 'normal'
matplotlib.rcParams['lines.linewidth'] = 1.5
matplotlib.rcParams['axes.linewidth'] = 3.0
matplotlib.rcParams['xtick.major.width']=1.6
matplotlib.rcParams['ytick.major.width']=1.6
matplotlib.rc('font', size=MEDIUM_SIZE) # controls default text sizes
matplotlib.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of axes title
matplotlib.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of x and y labels
matplotlib.rc('xtick', labelsize=MEDIUM_SIZE-2) # fontsize of y tick labels
matplotlib.rc('ytick', labelsize=MEDIUM_SIZE-2) # fontsize of x tick labels
matplotlib.rc('legend', fontsize=SMALL_SIZE-1) # legend fontsize
matplotlib.rc('figure', titlesize=BIGGER_SIZE) # fontsize of figure title
params = {'xtick.major.size': 6.5,
'ytick.major.size': 6.5
}
matplotlib.rcParams.update(params)
"""
SMALL_SIZE = 10
MEDIUM_SIZE = 15
BIGGER_SIZE = 22
hypot_func = lambda x,y: np.sqrt(x**2 + y**2) #Slightly faster than np.hypot<np.linalg.norm<scipydistance. Checked precision up to au**2 orders and seemed ok.
class InputError(Exception):
"""Exception raised for errors in the input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, expression, message):
self.expression = expression
self.message = message
def __str__(self):
return '%s --> %s'%(self.expression, self.message)
class Tools:
@staticmethod
def _rotate_sky_plane(x, y, ang):
xy = np.array([x,y])
cos_ang = np.cos(ang)
sin_ang = np.sin(ang)
rot = np.array([[cos_ang, -sin_ang],
[sin_ang, cos_ang]])
return | np.dot(rot, xy) | numpy.dot |
import collections
import glob
import cv2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
from calibration_functions import calibrateCamera_SLOW, undistort
from globals import xm_per_pix, ym_per_pix
from perspective_function import birdview
from threshold_functions import binarize_image
# Define a class to receive the characteristics of each line detection
class Line():
def __init__(self, buffer_length=10):
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_xfitted = []
# polynomial coefficients for the most recent fit
self.last_fit_pixel = None
self.last_fit_meter = None
# list of polynomial coefficients of the last N iterations
self.recent_fits_pixel = collections.deque(maxlen=2 * buffer_length)
self.recent_fits_meter = collections.deque(maxlen=2 * buffer_length)
# distance in meters of vehicle center from the line
self.line_base_pos = None
# difference in fit coefficients between last and new fits
self.diffs = np.array([0, 0, 0], dtype='float')
# x values for detected line pixels
self.allx = None
# y values for detected line pixels
self.ally = None
def draw(self, mask, color=(0, 255, 0), line_width=50, average=False):
"""
Draw the line on a color mask image.
"""
h, w, c = mask.shape
plot_y = np.linspace(0, h - 1, h)
coeffs = self.average_fit if average else self.last_fit_pixel
line_center = coeffs[0] * plot_y ** 2 + coeffs[1] * plot_y + coeffs[2]
line_left_side = line_center - line_width // 2
line_right_side = line_center + line_width // 2
# recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array(list(zip(line_left_side, plot_y)))
pts_right = np.array(np.flipud(list(zip(line_right_side, plot_y))))
pts = np.hstack([pts_left, pts_right])
# Draw the lane onto the warped blank image
return cv2.fillPoly(mask, [np.int32(pts)], color)
def update_line(self, new_fit_pixel, new_fit_meter, detected, clear_buffer=False):
"""
Update Line with new fitted coefficients.
:param new_fit_pixel: new polynomial coefficients (pixel)
:param new_fit_meter: new polynomial coefficients (meter)
:param detected: if the Line was detected or inferred
:param clear_buffer: if True, reset state
:return: None
"""
self.detected = detected
if clear_buffer:
self.recent_fits_pixel = []
self.recent_fits_meter = []
self.last_fit_pixel = new_fit_pixel
self.last_fit_meter = new_fit_meter
self.recent_fits_pixel.append(self.last_fit_pixel)
self.recent_fits_meter.append(self.last_fit_meter)
# PROPERTIES
@property
# polynomial coefficients averaged over the last n iterations
def best_fit_pixels(self):
return np.mean(self.recent_fits_pixel, axis=0)
@property
# polynomial coefficients averaged over the last n iterations
def best_fit_meters(self):
return np.mean(self.recent_fits_meter, axis=0)
@property
# radius of curvature of the line in some units
def radius_of_curvature_pixels(self):
y_eval = 0
coeffs = self.best_fit_pixels
return ((1 + (2 * coeffs[0] * y_eval + coeffs[1]) ** 2) ** 1.5) / np.absolute(2 * coeffs[0])
@property
# radius of curvature of the line in some units
def radius_of_curvature_meters(self):
y_eval = 0
coeffs = self.last_fit_meter
return ((1 + (2 * coeffs[0] * y_eval + coeffs[1]) ** 2) ** 1.5) / np.absolute(2 * coeffs[0])
def find_lane_pixels(binary_warped, line_L, line_R, nwindows=9, verbose=False):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:, :], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# HYPERPARAMETERS
"""
# Choose the number of sliding windows
nwindows = 9 """
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
### TO-DO: Find the four below boundaries of the window ###
win_xleft_low = leftx_current - margin # Update this
win_xleft_high = leftx_current + margin # Update this
win_xright_low = rightx_current - margin # Update this
win_xright_high = rightx_current + margin # Update this
# Draw the windows on the visualization image
out_img = cv2.rectangle(
out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0, 255, 0), 2)
out_img = cv2.rectangle(
out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (0, 255, 0), 2)
### TO-DO: Identify the nonzero pixels in x and y within the window ###
good_left_inds = ((nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high) &
(nonzeroy >= win_y_low) & (nonzeroy < win_y_high)).nonzero()[0]
good_right_inds = ((nonzerox >= win_xright_low) & (nonzerox < win_xright_high) &
(nonzeroy >= win_y_low) & (nonzeroy < win_y_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
### TO-DO: If you found > minpix pixels, recenter next window ###
### (`right` or `leftx_current`) on their mean position ###
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
# leftx = nonzerox[left_lane_inds]
# lefty = nonzeroy[left_lane_inds]
# rightx = nonzerox[right_lane_inds]
# righty = nonzeroy[right_lane_inds]
leftx = line_L.allx = nonzerox[left_lane_inds]
lefty = line_L.ally = nonzeroy[left_lane_inds]
rightx = line_R.allx = nonzerox[right_lane_inds]
righty = line_R.ally = nonzeroy[right_lane_inds]
detected = True
if not list(line_L.allx) or not list(line_L.ally):
left_fit_pixel = line_L.last_fit_pixel
left_fit_meter = line_L.last_fit_meter
detected = False
else:
left_fit_pixel = np.polyfit(line_L.ally, line_L.allx, 2)
left_fit_meter = np.polyfit(
line_L.ally * ym_per_pix, line_L.allx * xm_per_pix, 2)
if not list(line_R.allx) or not list(line_R.ally):
right_fit_pixel = line_R.last_fit_pixel
right_fit_meter = line_R.last_fit_meter
detected = False
else:
right_fit_pixel = np.polyfit(line_R.ally, line_R.allx, 2)
right_fit_meter = np.polyfit(
line_R.ally * ym_per_pix, line_R.allx * xm_per_pix, 2)
#Fit a second order polynomial to each using `np.polyfit` ###
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
left_fit_meter = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit_meter = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)
line_L.update_line(left_fit_pixel, left_fit_meter, detected=detected)
line_R.update_line(right_fit_pixel, right_fit_meter, detected=detected)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0])
try:
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('The function failed to fit a line!')
left_fitx = 1*ploty**2 + 1*ploty
right_fitx = 1*ploty**2 + 1*ploty
## Visualization ##
# Colors in the left and right lane regions
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
if verbose:
# Plots the left and right polynomials on the lane lines
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.imshow(out_img, cmap='gray')
figManager = plt.get_current_fig_manager() # to control the figure to be showen
# maximaize the window of the plot to cover the whole screen
figManager.window.showMaximized()
plt.show()
return line_L, line_R, out_img
def get_fits_by_previous_fits(birdeye_binary, line_L, line_R, verbose=False):
"""
Get polynomial coefficients for lane-lines detected in an binary image.
This function starts from previously detected lane-lines to speed-up the search of lane-lines in the current frame.
:param birdeye_binary: input bird's eye view binary image
:param line_L: left lane-line previously detected
:param line_R: left lane-line previously detected
:param verbose: if True, display intermediate output
:return: updated lane lines and output image
"""
height, width = birdeye_binary.shape
left_fit_pixel = line_L.last_fit_pixel
right_fit_pixel = line_R.last_fit_pixel
# Identify the x and y positions of all nonzero pixels in the image
nonzero = birdeye_binary.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Set the width of the windows +/- margin
margin = 100
# Identify the nonzero pixels in x and y within the previous detected line-lane
left_lane_inds = (
(nonzerox > (left_fit_pixel[0] * (nonzeroy ** 2) + left_fit_pixel[1] * nonzeroy + left_fit_pixel[2] - margin)) & (
nonzerox < (left_fit_pixel[0] * (nonzeroy ** 2) + left_fit_pixel[1] * nonzeroy + left_fit_pixel[2] + margin)))
right_lane_inds = (
(nonzerox > (right_fit_pixel[0] * (nonzeroy ** 2) + right_fit_pixel[1] * nonzeroy + right_fit_pixel[2] - margin)) & (
nonzerox < (right_fit_pixel[0] * (nonzeroy ** 2) + right_fit_pixel[1] * nonzeroy + right_fit_pixel[2] + margin)))
# Extract left and right line pixel positions
line_L.allx, line_L.ally = nonzerox[left_lane_inds], nonzeroy[left_lane_inds]
line_R.allx, line_R.ally = nonzerox[right_lane_inds], nonzeroy[right_lane_inds]
# check if lane-line are detected in the prefious frame, if so, then load the fitting coefficents from the last frame. if not, then
detected = True
if not list(line_L.allx) or not list(line_L.ally):
# left_fit_pixel = line_L.best_fit_pixel
# left_fit_meter = line_L.best_fit_meter
left_fit_pixel = line_L.last_fit_pixel
left_fit_meter = line_L.last_fit_meter
detected = False
else:
# left_fit_pixel = line_L.best_fit_pixels
# left_fit_meter = line_L.best_fit_meters
left_fit_pixel = np.polyfit(line_L.ally, line_L.allx, 2)
left_fit_meter = np.polyfit(line_L.ally * ym_per_pix, line_L.allx * xm_per_pix, 2)
if not list(line_R.allx) or not list(line_R.ally):
# right_fit_pixel = line_R.best_fit_pixel
# right_fit_meter = line_R.best_fit_meter
right_fit_pixel = line_R.last_fit_pixel
right_fit_meter = line_R.last_fit_meter
detected = False
else:
# right_fit_pixel = line_R.best_fit_pixels
# right_fit_meter = line_R.best_fit_meters
right_fit_pixel = np.polyfit(line_R.ally, line_R.allx, 2)
right_fit_meter = np.polyfit(line_R.ally * ym_per_pix, line_R.allx * xm_per_pix, 2)
line_L.update_line(left_fit_pixel, left_fit_meter, detected=detected)
line_R.update_line(right_fit_pixel, right_fit_meter, detected=detected)
# AVG the lane-lines data detected over N iterations for both Left and Right lanes.
line_L.last_fit_pixel = left_fit_pixel = line_L.best_fit_pixels
line_L.last_fit_meter = left_fit_meter = line_L.best_fit_meters
line_R.last_fit_pixel = right_fit_pixel = line_R.best_fit_pixels
line_R.last_fit_meter = right_fit_meter = line_R.best_fit_meters
# Generate x and y values for plotting
ploty = | np.linspace(0, height - 1, height) | numpy.linspace |
import numpy as np
from scipy.misc import *
import os, struct
def load_MNIST_test():
path = './MNIST'
fname_img = os.path.join(path, 't10k-images.idx3-ubyte')
fname_lbl = os.path.join(path, 't10k-labels.idx1-ubyte')
n_cluster = 10
with open(fname_lbl, 'rb') as flbl:
magic, num = struct.unpack(">II", flbl.read(8))
labels = | np.fromfile(flbl, dtype=np.int8) | numpy.fromfile |
import gym
from gym.spaces import Discrete, MultiDiscrete, Tuple
import numpy as np
from mujoco_worldgen.util.rotation import mat2quat
from mae_envs.wrappers.util import update_obs_space
from mae_envs.util.geometry import dist_pt_to_cuboid
from copy import deepcopy
from itertools import compress
class GrabObjWrapper(gym.Wrapper):
'''
Allows agents to grab an object using a weld constraint.
Args:
body_names (list): list of body names that the agent can grab
radius_multiplier (float): How far away can this be activated (multiplier on box size)
grab_dist (float): If set, the object is held at a specific distance during
grabbing (default: None).
Note: This does not work well with oblong objects
grab_exclusive (bool): If set true, each object can only be grabbed by
a single agent. If several agents attempt to
grab the same object, only the closer agents succeeds.
obj_in_game_metadata_keys (list of string): keys in metadata with boolean array saying
which objects are currently in the game. This is used in the event we are randomizing
number of objects
'''
def __init__(self, env, body_names, radius_multiplier=1.7,
grab_dist=None, grab_exclusive=False,
obj_in_game_metadata_keys=None):
super().__init__(env)
self.n_agents = self.unwrapped.n_agents
self.body_names = body_names
self.n_obj = len(body_names)
self.obj_in_game_metadata_keys = obj_in_game_metadata_keys
self.action_space.spaces['action_pull'] = (
Tuple([MultiDiscrete([2] * self.n_obj) for _ in range(self.n_agents)]))
self.observation_space = update_obs_space(
env, {'obj_pull': (self.n_obj, 1),
'you_pull': (self.n_obj, self.n_agents)})
self.grab_radius = radius_multiplier * self.metadata['box_size']
self.grab_dist = grab_dist
self.grab_exclusive = grab_exclusive
def observation(self, obs):
obs['you_pull'] = self.obj_grabbed.T
obs['obj_pull'] = np.any(obs['you_pull'], axis=-1, keepdims=True)
return obs
def reset(self):
obs = self.env.reset()
sim = self.unwrapped.sim
if self.obj_in_game_metadata_keys is not None:
self.actual_body_slice = np.concatenate([self.metadata[k] for k in self.obj_in_game_metadata_keys])
else:
self.actual_body_slice = np.ones((len(self.body_names))).astype(np.bool)
actual_body_names = list(compress(self.body_names, self.actual_body_slice))
self.n_obj = len(actual_body_names)
# Cache body ids
self.obj_body_idxs = np.array([sim.model.body_name2id(body_name) for body_name in actual_body_names])
self.agent_body_idxs = np.array([sim.model.body_name2id(f"agent{i}:particle") for i in range(self.n_agents)])
# Cache geom ids
self.obj_geom_ids = np.array([sim.model.geom_name2id(body_name) for body_name in actual_body_names])
self.agent_geom_ids = np.array([sim.model.geom_name2id(f'agent{i}:agent') for i in range(self.n_agents)])
# Cache constraint ids
self.agent_eq_ids = np.array(
[i for i, obj1 in enumerate(sim.model.eq_obj1id)
if sim.model.body_names[obj1] == f"agent{i}:particle"])
assert len(self.agent_eq_ids) == self.n_agents
# turn off equality constraints
sim.model.eq_active[self.agent_eq_ids] = 0
self.obj_grabbed = np.zeros((self.n_agents, self.n_obj), dtype=bool)
self.last_obj_grabbed = np.zeros((self.n_agents, self.n_obj), dtype=bool)
return self.observation(obs)
def grab_obj(self, action):
'''
Implements object grabbing for all agents
Args:
action: Action dictionary
'''
action_pull = action['action_pull'][:, self.actual_body_slice]
sim = self.unwrapped.sim
agent_pos = sim.data.body_xpos[self.agent_body_idxs]
obj_pos = sim.data.body_xpos[self.obj_body_idxs]
obj_width = sim.model.geom_size[self.obj_geom_ids]
obj_quat = sim.data.body_xquat[self.obj_body_idxs]
assert len(obj_width) == len(obj_quat), (
"Number of object widths must be equal to number of quaternions for direct distance calculation method. " +
"This might be caused by a body that contains several geoms.")
obj_dist = dist_pt_to_cuboid(agent_pos, obj_pos, obj_width, obj_quat)
allowed_and_desired = np.logical_and(action_pull, obj_dist <= self.grab_radius)
obj_dist_masked = obj_dist.copy() # Mask the obj dists to find a valid argmin
obj_dist_masked[~allowed_and_desired] = np.inf
if self.grab_exclusive:
closest_obj = np.zeros((self.n_agents,), dtype=int)
while np.any(obj_dist_masked < np.inf):
# find agent and object of closest object distance
agent_idx, obj_idx = np.unravel_index(np.argmin(obj_dist_masked), obj_dist_masked.shape)
# set closest object for this agent
closest_obj[agent_idx] = obj_idx
# ensure exclusivity of grabbing
obj_dist_masked[:, obj_idx] = np.inf
obj_dist_masked[agent_idx, :] = np.inf
# mark same object as undesired for all other agents
allowed_and_desired[:agent_idx, obj_idx] = False
allowed_and_desired[(agent_idx + 1):, obj_idx] = False
else:
closest_obj = np.argmin(obj_dist_masked, axis=-1)
valid_grabs = np.any(allowed_and_desired, axis=-1) # (n_agent,) which agents have valid grabs
# Turn on/off agents with valid grabs
sim.model.eq_active[self.agent_eq_ids] = valid_grabs
sim.model.eq_obj2id[self.agent_eq_ids] = self.obj_body_idxs[closest_obj]
# keep track of which object is being grabbed
self.obj_grabbed = np.zeros((self.n_agents, self.n_obj), dtype=bool)
agent_with_valid_grab = np.argwhere(valid_grabs)[:, 0]
self.obj_grabbed[agent_with_valid_grab, closest_obj[agent_with_valid_grab]] = 1
# If there are new grabs, then setup the weld constraint parameters
new_grabs = np.logical_and(
valid_grabs, np.any(self.obj_grabbed != self.last_obj_grabbed, axis=-1))
for agent_idx in np.argwhere(new_grabs)[:, 0]:
agent_rot = sim.data.body_xmat[self.agent_body_idxs[agent_idx]].reshape((3, 3))
obj_rot = sim.data.body_xmat[self.obj_body_idxs[closest_obj[agent_idx]]].reshape((3, 3))
# Need to use the geom xpos rather than the qpos
obj_pos = sim.data.body_xpos[self.obj_body_idxs[closest_obj[agent_idx]]]
agent_pos = sim.data.body_xpos[self.agent_body_idxs[agent_idx]]
grab_vec = agent_pos - obj_pos
if self.grab_dist is not None:
grab_vec = self.grab_dist / (1e-3 + np.linalg.norm(grab_vec)) * grab_vec
# The distance constraint needs to be rotated into the frame of reference of the agent
sim.model.eq_data[self.agent_eq_ids[agent_idx], :3] = np.matmul(agent_rot.T, grab_vec)
# The angle constraint is the difference between the agents frame and the objects frame
sim.model.eq_data[self.agent_eq_ids[agent_idx], 3:] = mat2quat(np.matmul(agent_rot.T, obj_rot))
self.last_obj_grabbed = self.obj_grabbed
def step(self, action):
self.grab_obj(action)
obs, rew, done, info = self.env.step(action)
return self.observation(obs), rew, done, info
class GrabClosestWrapper(gym.ActionWrapper):
'''
Convert the action_pull (either grab or pull) to a binary action rather than having the
dimension of boxes. The grab wrapper will only grab the closest box, so we convert
the new action into an all 1's action.
'''
def __init__(self, env):
super().__init__(env)
self.action_space = deepcopy(self.action_space)
self.n_obj = len(self.action_space.spaces['action_pull'].spaces[0].nvec)
self.action_space.spaces['action_pull'] = (
Tuple([Discrete(2) for _ in range(self.unwrapped.n_agents)]))
def action(self, action):
action = deepcopy(action)
action['action_pull'] = np.repeat(action['action_pull'][:, None], self.n_obj, -1)
return action
class LockObjWrapper(gym.Wrapper):
'''
Allows agents to lock objects at their current position.
Args:
body_names (list): list of body names that the agent can lock
radius_multiplier (float): How far away can this be activated (multiplier on box size)
agent_idx_allowed_to_lock (np array of ints): Indicies of agents that are allowed to lock.
Defaults to all
lock_type (string): Options are
any_lock: if any agent wants to lock an object it will get locked
all_lock: all agents that are close enough must want to lock the object
any_lock_specific: if any agent wants to lock an object it will get locked. However,
now the lock is agent specific, and only the agent that locked the object can unlock it.
all_lock_team_specific: like all_lock, but only team members of the agent that
locked the object can unlock it.
ac_obs_prefix (string): prefix for the action and observation keys. This is useful if using
the lock wrapper more than once.
obj_in_game_metadata_keys (list of string): keys in metadata with boolean array saying
which objects are currently in the game. This is used in the event we are randomizing
number of objects
agent_allowed_to_lock_keys (list of string): keys in obs determining whether agent is allowed
to lock a certain object. Each key should be a mask matrix of dim (n_agents, n_obj)
'''
def __init__(self, env, body_names, radius_multiplier=1.5, agent_idx_allowed_to_lock=None,
lock_type="any_lock", ac_obs_prefix='', obj_in_game_metadata_keys=None,
agent_allowed_to_lock_keys=None):
super().__init__(env)
self.n_agents = self.unwrapped.n_agents
self.n_obj = len(body_names)
self.body_names = body_names
self.agent_idx_allowed_to_lock = np.arange(self.n_agents) if agent_idx_allowed_to_lock is None else agent_idx_allowed_to_lock
self.lock_type = lock_type
self.ac_obs_prefix = ac_obs_prefix
self.obj_in_game_metadata_keys = obj_in_game_metadata_keys
self.agent_allowed_to_lock_keys = agent_allowed_to_lock_keys
self.action_space.spaces[f'action_{ac_obs_prefix}glue'] = (
Tuple([MultiDiscrete([2] * self.n_obj) for _ in range(self.n_agents)]))
self.observation_space = update_obs_space(env, {f'{ac_obs_prefix}obj_lock': (self.n_obj, 1),
f'{ac_obs_prefix}you_lock': (self.n_agents, self.n_obj, 1),
f'{ac_obs_prefix}team_lock': (self.n_agents, self.n_obj, 1)})
self.lock_radius = radius_multiplier*self.metadata['box_size']
self.obj_locked = np.zeros((self.n_obj,), dtype=int)
def observation(self, obs):
obs[f'{self.ac_obs_prefix}obj_lock'] = self.obj_locked[:, None]
you_lock = np.arange(self.n_agents)[:, None] == self.which_locked[None, :]
obs[f'{self.ac_obs_prefix}you_lock'] = np.expand_dims(you_lock * obs[f'{self.ac_obs_prefix}obj_lock'].T, axis=-1)
obs[f'{self.ac_obs_prefix}team_lock'] = np.zeros((self.n_agents, self.n_obj, 1))
for team in np.unique(self.metadata['team_index']):
team_mask = self.metadata['team_index'] == team
obs[f'{self.ac_obs_prefix}team_lock'][team_mask] = np.any(obs[f'{self.ac_obs_prefix}you_lock'][team_mask], 0)
return obs
def reset(self):
obs = self.env.reset()
sim = self.unwrapped.sim
if self.obj_in_game_metadata_keys is not None:
self.actual_body_slice = np.concatenate([self.metadata[k] for k in self.obj_in_game_metadata_keys])
else:
self.actual_body_slice = np.ones((len(self.body_names))).astype(np.bool)
actual_body_names = list(compress(self.body_names, self.actual_body_slice))
self.n_obj = len(actual_body_names)
# Cache ids
self.obj_body_idxs = np.array([sim.model.body_name2id(body_name) for body_name in actual_body_names])
self.obj_jnt_idxs = [np.where(sim.model.jnt_bodyid == body_idx)[0] for body_idx in self.obj_body_idxs]
self.obj_geom_ids = [np.where(sim.model.geom_bodyid == body_idx)[0] for body_idx in self.obj_body_idxs]
self.agent_body_idxs = np.array([sim.model.body_name2id(f"agent{i}:particle") for i in range(self.n_agents)])
self.agent_body_idxs = self.agent_body_idxs[self.agent_idx_allowed_to_lock]
self.agent_geom_ids = np.array([sim.model.geom_name2id(f'agent{i}:agent') for i in range(self.n_agents)])
self.agent_geom_ids = self.agent_geom_ids[self.agent_idx_allowed_to_lock]
self.unlock_objs()
self.obj_locked = np.zeros((self.n_obj,), dtype=bool)
self.which_locked = np.zeros((self.n_obj,), dtype=int)
if self.agent_allowed_to_lock_keys is not None:
self.agent_allowed_to_lock_mask = np.concatenate([obs[k] for k in self.agent_allowed_to_lock_keys])
else:
self.agent_allowed_to_lock_mask = np.ones((self.n_agents, self.n_obj))
return self.observation(obs)
def lock_obj(self, action_lock):
'''
Implements object gluing for all agents
Args:
lock: (n_agent, n_obj) boolean matrix
'''
sim = self.unwrapped.sim
action_lock = action_lock[self.agent_idx_allowed_to_lock]
action_lock = action_lock[:, self.actual_body_slice]
agent_pos = sim.data.body_xpos[self.agent_body_idxs]
obj_pos = sim.data.body_xpos[self.obj_body_idxs]
obj_width = sim.model.geom_size[np.concatenate(self.obj_geom_ids)]
obj_quat = sim.data.body_xquat[self.obj_body_idxs]
assert len(obj_width) == len(obj_quat), (
"Number of object widths must be equal to number of quaternions for direct distance calculation method. " +
"This might be caused by a body that contains several geoms.")
obj_dist = dist_pt_to_cuboid(agent_pos, obj_pos, obj_width, obj_quat)
allowed_and_desired = np.logical_and(action_lock, obj_dist <= self.lock_radius)
allowed_and_desired = np.logical_and(allowed_and_desired, self.agent_allowed_to_lock_mask)
allowed_and_not_desired = np.logical_and(1 - action_lock, obj_dist <= self.lock_radius)
allowed_and_not_desired = np.logical_and(allowed_and_not_desired, self.agent_allowed_to_lock_mask)
# objs_to_lock should _all_ be locked this round. new_objs_to_lock are objs that were not locked last round
# objs_to_unlock are objs that no one wants to lock this round
if self.lock_type == "any_lock": # If any agent wants to lock, the obj becomes locked
objs_to_lock = np.any(allowed_and_desired, axis=0)
objs_to_unlock = np.logical_and(np.any(allowed_and_not_desired, axis=0), ~objs_to_lock)
new_objs_to_lock = np.logical_and(objs_to_lock, ~self.obj_locked)
elif self.lock_type == "all_lock": # All agents that are close enough must want to lock the obj
objs_to_unlock = np.any(allowed_and_not_desired, axis=0)
objs_to_lock = np.logical_and(np.any(allowed_and_desired, axis=0), ~objs_to_unlock)
new_objs_to_lock = np.logical_and(objs_to_lock, ~self.obj_locked)
elif self.lock_type == "any_lock_specific": # If any agent wants to lock, the obj becomes locked
allowed_to_unlock = np.arange(self.n_agents)[:, None] == self.which_locked[None, :] # (n_agent, n_obj)
allowed_to_unlock = np.logical_and(allowed_to_unlock, self.obj_locked[None, :]) # Can't unlock an obj that isn't locked
allowed_and_not_desired = np.logical_and(allowed_to_unlock[self.agent_idx_allowed_to_lock],
allowed_and_not_desired)
objs_to_unlock = np.any(allowed_and_not_desired, axis=0)
objs_to_lock = np.any(allowed_and_desired, axis=0)
objs_to_relock = np.logical_and(objs_to_unlock, objs_to_lock)
new_objs_to_lock = np.logical_and( | np.logical_and(objs_to_lock, ~objs_to_relock) | numpy.logical_and |
import copy
import warnings
from collections.abc import Iterable, Iterator
import numpy as np
import scipy
import scipy.optimize
import scipy.stats
from stingray.exceptions import StingrayError
from stingray.gti import bin_intervals_from_gtis, check_gtis, cross_two_gtis
from stingray.largememory import createChunkedSpectra, saveData
from stingray.utils import genDataPath, rebin_data, rebin_data_log, simon
from .events import EventList
from .lightcurve import Lightcurve
from .utils import show_progress
# location of factorial moved between scipy versions
try:
from scipy.misc import factorial
except ImportError:
from scipy.special import factorial
try:
from pyfftw.interfaces.scipy_fft import fft, fftfreq
except ImportError:
warnings.warn("pyfftw not installed. Using standard scipy fft")
from scipy.fft import fft, fftfreq
__all__ = [
"Crossspectrum", "AveragedCrossspectrum", "coherence", "time_lag",
"cospectra_pvalue", "normalize_crossspectrum"
]
def normalize_crossspectrum(unnorm_power, tseg, nbins, nphots1, nphots2, norm="none", power_type="real"):
"""
Normalize the real part of the cross spectrum to Leahy, absolute rms^2,
fractional rms^2 normalization, or not at all.
Parameters
----------
unnorm_power: numpy.ndarray
The unnormalized cross spectrum.
tseg: int
The length of the Fourier segment, in seconds.
nbins : int
Number of bins in the light curve
nphots1 : int
Number of photons in the light curve no. 1
nphots2 : int
Number of photons in the light curve no. 2
Other parameters
----------------
norm : str
One of `'leahy'` (Leahy+83), `'frac'` (fractional rms), `'abs'`
(absolute rms)
power_type : str
One of `'real'` (real part), `'all'` (all complex powers), `'abs'`
(absolute value)
Returns
-------
power: numpy.nd.array
The normalized co-spectrum (real part of the cross spectrum). For
'none' normalization, imaginary part is returned as well.
"""
# The "effective" counts/bin is the geometrical mean of the counts/bin
# of the two light curves. Same goes for counts/second in meanrate.
log_nphots1 = np.log(nphots1)
log_nphots2 = np.log(nphots2)
actual_nphots = np.float64(np.sqrt(np.exp(log_nphots1 + log_nphots2)))
if power_type == "all":
c_num = unnorm_power
elif power_type == "real":
c_num = unnorm_power.real
elif power_type == "absolute":
c_num = np.absolute(unnorm_power)
else:
raise ValueError("`power_type` not recognized!")
if norm.lower() == 'leahy':
power = c_num * 2. / actual_nphots
elif norm.lower() == 'frac':
meancounts1 = nphots1 / nbins
meancounts2 = nphots2 / nbins
actual_mean = np.sqrt(meancounts1 * meancounts2)
assert actual_mean > 0.0, \
"Mean count rate is <= 0. Something went wrong."
c = c_num / float(nbins ** 2.)
power = c * 2. * tseg / (actual_mean ** 2.0)
elif norm.lower() == 'abs':
meanrate = np.sqrt(nphots1 * nphots2) / tseg
power = c_num * 2. * meanrate / actual_nphots
elif norm.lower() == 'none':
power = unnorm_power
else:
raise ValueError("Value for `norm` not recognized.")
return power
def normalize_crossspectrum_gauss(
unnorm_power, mean_flux, var, dt, N, norm="none", power_type="real"):
"""
Normalize the real part of the cross spectrum to Leahy, absolute rms^2,
fractional rms^2 normalization, or not at all.
Parameters
----------
unnorm_power: numpy.ndarray
The unnormalized cross spectrum.
mean_flux: float
The mean flux of the light curve (if a cross spectrum, the geometrical
mean of the flux in the two channels)
var: float
The variance of the light curve (if a cross spectrum, the geometrical
mean of the variance in the two channels)
dt: float
The sampling time of the light curve
N: int
The number of bins in the light curve
Other parameters
----------------
norm : str
One of `'leahy'` (Leahy+83), `'frac'` (fractional rms), `'abs'`
(absolute rms)
power_type : str
One of `'real'` (real part), `'all'` (all complex powers), `'abs'`
(absolute value)
Returns
-------
power: numpy.nd.array
The normalized co-spectrum (real part of the cross spectrum). For
'none' normalization, imaginary part is returned as well.
Examples
--------
>>> lc_c = np.random.poisson(10000, 10000)
>>> lc_c_var = 10000
>>> lc = lc_c / 17.3453
>>> lc_var = (100 / 17.3453)**2
>>> pds_c = np.absolute(np.fft.fft(lc_c))**2
>>> pds = np.absolute(np.fft.fft(lc))**2
>>> norm_c = normalize_crossspectrum_gauss(pds_c, np.mean(lc_c), lc_c_var, 0.1, len(lc_c), norm='leahy')
>>> norm = normalize_crossspectrum_gauss(pds, np.mean(lc), lc_var, 0.1, len(lc), norm='leahy')
>>> np.allclose(norm, norm_c)
True
>>> np.isclose(np.mean(norm[1:]), 2, atol=0.1)
True
>>> norm_c = normalize_crossspectrum_gauss(pds_c, np.mean(lc_c), np.mean(lc_c), 0.1, len(lc_c), norm='frac')
>>> norm = normalize_crossspectrum_gauss(pds, np.mean(lc), lc_var, 0.1, len(lc), norm='frac')
>>> np.allclose(norm, norm_c)
True
>>> norm_c = normalize_crossspectrum_gauss(pds_c, np.mean(lc_c), np.mean(lc_c), 0.1, len(lc_c), norm='abs')
>>> norm = normalize_crossspectrum_gauss(pds, np.mean(lc), lc_var, 0.1, len(lc), norm='abs')
>>> np.allclose(norm / np.mean(lc)**2, norm_c / np.mean(lc_c)**2)
True
>>> np.isclose(np.mean(norm_c[2:]), 2 * np.mean(lc_c * 0.1), rtol=0.1)
True
"""
# The "effective" counts/bin is the geometrical mean of the counts/bin
# of the two light curves. Same goes for counts/second in meanrate.
if power_type == "all":
c_num = unnorm_power
elif power_type == "real":
c_num = unnorm_power.real
elif power_type == "absolute":
c_num = np.absolute(unnorm_power)
else:
raise ValueError("`power_type` not recognized!")
common_factor = 2 * dt / N
rate_mean = mean_flux * dt
if norm.lower() == 'leahy':
norm = 2 / var / N
elif norm.lower() == 'frac':
norm = common_factor / rate_mean**2
elif norm.lower() == 'abs':
norm = common_factor
elif norm.lower() == 'none':
norm = 1
else:
raise ValueError("Value for `norm` not recognized.")
return norm * c_num
def _averaged_cospectra_cdf(xcoord, n):
"""
Function calculating the cumulative distribution function for
averaged cospectra, Equation 19 of Huppenkothen & Bachetti (2018).
Parameters
----------
xcoord : float or iterable
The cospectral power for which to calculate the CDF.
n : int
The number of averaged cospectra
Returns
-------
cdf : float
The value of the CDF at `xcoord` for `n` averaged cospectra
"""
if np.size(xcoord) == 1:
xcoord = [xcoord]
cdf = np.zeros_like(xcoord)
for i, x in enumerate(xcoord):
prefac_bottom1 = factorial(n - 1)
for j in range(n):
prefac_top = factorial(n - 1 + j)
prefac_bottom2 = factorial(
n - 1 - j) * factorial(j)
prefac_bottom3 = 2.0 ** (n + j)
prefac = prefac_top / (prefac_bottom1 * prefac_bottom2 *
prefac_bottom3)
gf = -j + n
first_fac = scipy.special.gamma(gf)
if x >= 0:
second_fac = scipy.special.gammaincc(gf, n * x) * first_fac
fac = 2.0 * first_fac - second_fac
else:
fac = scipy.special.gammaincc(gf, -n * x) * first_fac
cdf[i] += (prefac * fac)
if np.size(xcoord) == 1:
return cdf[i]
else:
continue
return cdf
def cospectra_pvalue(power, nspec):
"""
This function computes the single-trial p-value that the power was
observed under the null hypothesis that there is no signal in
the data.
Important: the underlying assumption that make this calculation valid
is that the powers in the power spectrum follow a Laplace distribution,
and this requires that:
1. the co-spectrum is normalized according to [Leahy 1983]_
2. there is only white noise in the light curve. That is, there is no
aperiodic variability that would change the overall shape of the power
spectrum.
Also note that the p-value is for a *single trial*, i.e. the power
currently being tested. If more than one power or more than one power
spectrum are being tested, the resulting p-value must be corrected for the
number of trials (Bonferroni correction).
Mathematical formulation in [Huppenkothen 2017]_.
Parameters
----------
power : float
The squared Fourier amplitude of a spectrum to be evaluated
nspec : int
The number of spectra or frequency bins averaged in ``power``.
This matters because averaging spectra or frequency bins increases
the signal-to-noise ratio, i.e. makes the statistical distributions
of the noise narrower, such that a smaller power might be very
significant in averaged spectra even though it would not be in a single
power spectrum.
Returns
-------
pval : float
The classical p-value of the observed power being consistent with
the null hypothesis of white noise
References
----------
* .. [Leahy 1983] https://ui.adsabs.harvard.edu/#abs/1983ApJ...266..160L/abstract
* .. [Huppenkothen 2017] http://adsabs.harvard.edu/abs/2018ApJS..236...13H
"""
if not np.all(np.isfinite(power)):
raise ValueError("power must be a finite floating point number!")
# if power < 0:
# raise ValueError("power must be a positive real number!")
if not np.isfinite(nspec):
raise ValueError("nspec must be a finite integer number")
if not np.isclose(nspec % 1, 0):
raise ValueError("nspec must be an integer number!")
if nspec < 1:
raise ValueError("nspec must be larger or equal to 1")
elif nspec == 1:
lapl = scipy.stats.laplace(0, 1)
pval = lapl.sf(power)
elif nspec > 50:
exp_sigma = np.sqrt(2) / np.sqrt(nspec)
gauss = scipy.stats.norm(0, exp_sigma)
pval = gauss.sf(power)
else:
pval = 1. - _averaged_cospectra_cdf(power, nspec)
return pval
def coherence(lc1, lc2):
"""
Estimate coherence function of two light curves.
For details on the definition of the coherence, see Vaughan and Nowak,
1996 [#]_.
Parameters
----------
lc1: :class:`stingray.Lightcurve` object
The first light curve data for the channel of interest.
lc2: :class:`stingray.Lightcurve` object
The light curve data for reference band
Returns
-------
coh : ``np.ndarray``
The array of coherence versus frequency
References
----------
.. [#] http://iopscience.iop.org/article/10.1086/310430/pdf
"""
if not isinstance(lc1, Lightcurve):
raise TypeError("lc1 must be a lightcurve.Lightcurve object")
if not isinstance(lc2, Lightcurve):
raise TypeError("lc2 must be a lightcurve.Lightcurve object")
cs = Crossspectrum(lc1, lc2, norm='none')
return cs.coherence()
def time_lag(lc1, lc2):
"""
Estimate the time lag of two light curves.
Calculate time lag and uncertainty.
Equation from Bendat & Piersol, 2011 [bendat-2011]_.
Returns
-------
lag : np.ndarray
The time lag
lag_err : np.ndarray
The uncertainty in the time lag
References
----------
.. [bendat-2011] https://www.wiley.com/en-us/Random+Data%3A+Analysis+and+Measurement+Procedures%2C+4th+Edition-p-9780470248775
"""
if not isinstance(lc1, Lightcurve):
raise TypeError("lc1 must be a lightcurve.Lightcurve object")
if not isinstance(lc2, Lightcurve):
raise TypeError("lc2 must be a lightcurve.Lightcurve object")
cs = Crossspectrum(lc1, lc2, norm='none')
lag = cs.time_lag()
return lag
class Crossspectrum(object):
"""
Make a cross spectrum from a (binned) light curve.
You can also make an empty :class:`Crossspectrum` object to populate with your
own Fourier-transformed data (this can sometimes be useful when making
binned power spectra). Stingray uses the scipy.fft standards for the sign
of the Nyquist frequency.
Parameters
----------
data1: :class:`stingray.Lightcurve` or :class:`stingray.events.EventList`, optional, default ``None``
The first light curve data for the channel/band of interest.
data2: :class:`stingray.Lightcurve` or :class:`stingray.events.EventList`, optional, default ``None``
The light curve data for the reference band.
norm: {``frac``, ``abs``, ``leahy``, ``none``}, default ``none``
The normalization of the (real part of the) cross spectrum.
power_type: string, optional, default ``real``
Parameter to choose among complete, real part and magnitude of the cross spectrum.
fullspec: boolean, optional, default ``False``
If False, keep only the positive frequencies, or if True, keep all of them .
Other Parameters
----------------
gti: 2-d float array
``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.
This choice overrides the GTIs in the single light curves. Use with
care!
lc1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects
For backwards compatibility only. Like ``data1``, but no
:class:`stingray.events.EventList` objects allowed
lc2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects
For backwards compatibility only. Like ``data2``, but no
:class:`stingray.events.EventList` objects allowed
dt: float
The time resolution of the light curve. Only needed when constructing
light curves in the case where ``data1``, ``data2`` are
:class:`EventList` objects
Attributes
----------
freq: numpy.ndarray
The array of mid-bin frequencies that the Fourier transform samples
power: numpy.ndarray
The array of cross spectra (complex numbers)
power_err: numpy.ndarray
The uncertainties of ``power``.
An approximation for each bin given by ``power_err= power/sqrt(m)``.
Where ``m`` is the number of power averaged in each bin (by frequency
binning, or averaging more than one spectra). Note that for a single
realization (``m=1``) the error is equal to the power.
df: float
The frequency resolution
m: int
The number of averaged cross-spectra amplitudes in each bin.
n: int
The number of data points/time bins in one segment of the light
curves.
nphots1: float
The total number of photons in light curve 1
nphots2: float
The total number of photons in light curve 2
"""
def __init__(self, data1=None, data2=None, norm='none', gti=None,
lc1=None, lc2=None, power_type="real", dt=None, fullspec=False):
if isinstance(norm, str) is False:
raise TypeError("norm must be a string")
if norm.lower() not in ["frac", "abs", "leahy", "none"]:
raise ValueError("norm must be 'frac', 'abs', 'leahy', or 'none'!")
self.norm = norm.lower()
# check if input data is a Lightcurve object, if not make one or
# make an empty Crossspectrum object if lc1 == ``None`` or lc2 == ``None``
if lc1 is not None or lc2 is not None:
warnings.warn("The lcN keywords are now deprecated. Use dataN "
"instead", DeprecationWarning)
# for backwards compatibility
if data1 is None:
data1 = lc1
if data2 is None:
data2 = lc2
if data1 is None or data2 is None:
if data1 is not None or data2 is not None:
raise TypeError("You can't do a cross spectrum with just one "
"light curve!")
else:
self.freq = None
self.power = None
self.power_err = None
self.df = None
self.nphots1 = None
self.nphots2 = None
self.m = 1
self.n = None
return
if (isinstance(data1, EventList) or isinstance(data2, EventList)) and \
dt is None:
raise ValueError("If using event lists, please specify the bin "
"time to generate lightcurves.")
if not isinstance(data1, EventList):
lc1 = data1
else:
lc1 = data1.to_lc(dt)
if not isinstance(data2, EventList):
lc2 = data2
elif isinstance(data2, EventList) and data2 is not data1:
lc2 = data2.to_lc(dt)
elif data2 is data1:
lc2 = lc1
self.gti = gti
self.lc1 = lc1
self.lc2 = lc2
self.power_type = power_type
self.fullspec = fullspec
self._make_crossspectrum(lc1, lc2, fullspec)
# These are needed to calculate coherence
self._make_auxil_pds(lc1, lc2)
def _make_auxil_pds(self, lc1, lc2):
"""
Helper method to create the power spectrum of both light curves
independently.
Parameters
----------
lc1, lc2 : :class:`stingray.Lightcurve` objects
Two light curves used for computing the cross spectrum.
"""
if lc1 is not lc2 and isinstance(lc1, Lightcurve):
self.pds1 = Crossspectrum(lc1, lc1, norm='none')
self.pds2 = Crossspectrum(lc2, lc2, norm='none')
def _make_crossspectrum(self, lc1, lc2, fullspec=False):
"""
Auxiliary method computing the normalized cross spectrum from two
light curves. This includes checking for the presence of and
applying Good Time Intervals, computing the unnormalized Fourier
cross-amplitude, and then renormalizing using the required
normalization. Also computes an uncertainty estimate on the cross
spectral powers.
Parameters
----------
lc1, lc2 : :class:`stingray.Lightcurve` objects
Two light curves used for computing the cross spectrum.
fullspec: boolean, default ``False``
Return full frequency array (True) or just positive frequencies (False)
"""
# make sure the inputs work!
if not isinstance(lc1, Lightcurve):
raise TypeError("lc1 must be a lightcurve.Lightcurve object")
if not isinstance(lc2, Lightcurve):
raise TypeError("lc2 must be a lightcurve.Lightcurve object")
if self.lc2.mjdref != self.lc1.mjdref:
raise ValueError("MJDref is different in the two light curves")
# Then check that GTIs make sense
if self.gti is None:
self.gti = cross_two_gtis(lc1.gti, lc2.gti)
check_gtis(self.gti)
if self.gti.shape[0] != 1:
raise TypeError("Non-averaged Cross Spectra need "
"a single Good Time Interval")
lc1 = lc1.split_by_gti()[0]
lc2 = lc2.split_by_gti()[0]
# total number of photons is the sum of the
# counts in the light curve
self.meancounts1 = lc1.meancounts
self.meancounts2 = lc2.meancounts
self.nphots1 = np.float64(np.sum(lc1.counts))
self.nphots2 = np.float64(np.sum(lc2.counts))
self.err_dist = 'poisson'
if lc1.err_dist == 'poisson':
self.var1 = lc1.meancounts
else:
self.var1 = np.mean(lc1.counts_err) ** 2
self.err_dist = 'gauss'
if lc2.err_dist == 'poisson':
self.var2 = lc2.meancounts
else:
self.var2 = np.mean(lc2.counts_err) ** 2
self.err_dist = 'gauss'
if lc1.n != lc2.n:
raise StingrayError("Light curves do not have same number "
"of time bins per segment.")
# If dt differs slightly, its propagated error must not be more than
# 1/100th of the bin
if not np.isclose(lc1.dt, lc2.dt, rtol=0.01 * lc1.dt / lc1.tseg):
raise StingrayError("Light curves do not have same time binning "
"dt.")
# In case a small difference exists, ignore it
lc1.dt = lc2.dt
self.dt = lc1.dt
self.n = lc1.n
# the frequency resolution
self.df = 1.0 / lc1.tseg
# the number of averaged periodograms in the final output
# This should *always* be 1 here
self.m = 1
# make the actual Fourier transform and compute cross spectrum
self.freq, self.unnorm_power = self._fourier_cross(lc1, lc2, fullspec)
# If co-spectrum is desired, normalize here. Otherwise, get raw back
# with the imaginary part still intact.
self.power = self._normalize_crossspectrum(self.unnorm_power, lc1.tseg)
if lc1.err_dist.lower() != lc2.err_dist.lower():
simon("Your lightcurves have different statistics."
"The errors in the Crossspectrum will be incorrect.")
elif lc1.err_dist.lower() != "poisson":
simon("Looks like your lightcurve statistic is not poisson."
"The errors in the Powerspectrum will be incorrect.")
if self.__class__.__name__ in ['Powerspectrum',
'AveragedPowerspectrum']:
self.power_err = self.power / np.sqrt(self.m)
elif self.__class__.__name__ in ['Crossspectrum',
'AveragedCrossspectrum']:
# This is clearly a wild approximation.
simon("Errorbars on cross spectra are not thoroughly tested. "
"Please report any inconsistencies.")
unnorm_power_err = np.sqrt(2) / np.sqrt(self.m) # Leahy-like
unnorm_power_err /= (2 / np.sqrt(self.nphots1 * self.nphots2))
unnorm_power_err += np.zeros_like(self.power)
self.power_err = \
self._normalize_crossspectrum(unnorm_power_err, lc1.tseg)
else:
self.power_err = np.zeros(len(self.power))
def _fourier_cross(self, lc1, lc2, fullspec=False):
"""
Fourier transform the two light curves, then compute the cross spectrum.
Computed as CS = lc1 x lc2* (where lc2 is the one that gets
complex-conjugated). The user has the option to either get just the
positive frequencies or the full spectrum.
Parameters
----------
lc1: :class:`stingray.Lightcurve` object
One light curve to be Fourier transformed. Ths is the band of
interest or channel of interest.
lc2: :class:`stingray.Lightcurve` object
Another light curve to be Fourier transformed.
This is the reference band.
fullspec: boolean. Default is False.
If True, return the whole array of frequencies, or only positive frequencies (False).
Returns
-------
fr: numpy.ndarray
The squared absolute value of the Fourier amplitudes
"""
fourier_1 = fft(lc1.counts) # do Fourier transform 1
fourier_2 = fft(lc2.counts) # do Fourier transform 2
freqs = scipy.fft.fftfreq(lc1.n, lc1.dt)
cross = np.multiply(fourier_1, np.conj(fourier_2))
if fullspec is True:
return freqs, cross
else:
return freqs[freqs > 0], cross[freqs > 0]
def rebin(self, df=None, f=None, method="mean"):
"""
Rebin the cross spectrum to a new frequency resolution ``df``.
Parameters
----------
df: float
The new frequency resolution
Other Parameters
----------------
f: float
the rebin factor. If specified, it substitutes df with ``f*self.df``
Returns
-------
bin_cs = :class:`Crossspectrum` (or one of its subclasses) object
The newly binned cross spectrum or power spectrum.
Note: this object will be of the same type as the object
that called this method. For example, if this method is called
from :class:`AveragedPowerspectrum`, it will return an object of class
:class:`AveragedPowerspectrum`, too.
"""
if f is None and df is None:
raise ValueError('You need to specify at least one between f and '
'df')
elif f is not None:
df = f * self.df
# rebin cross spectrum to new resolution
binfreq, bincs, binerr, step_size = \
rebin_data(self.freq, self.power, df, self.power_err,
method=method, dx=self.df)
# make an empty cross spectrum object
# note: syntax deliberate to work with subclass Powerspectrum
bin_cs = copy.copy(self)
# store the binned periodogram in the new object
bin_cs.freq = binfreq
bin_cs.power = bincs
bin_cs.df = df
bin_cs.n = self.n
bin_cs.norm = self.norm
bin_cs.nphots1 = self.nphots1
bin_cs.power_err = binerr
if hasattr(self, 'unnorm_power'):
_, binpower_unnorm, _, _ = \
rebin_data(self.freq, self.unnorm_power, df,
method=method, dx=self.df)
bin_cs.unnorm_power = binpower_unnorm
if hasattr(self, 'cs_all'):
cs_all = []
for c in self.cs_all:
cs_all.append(c.rebin(df=df, f=f, method=method))
bin_cs.cs_all = cs_all
if hasattr(self, 'pds1'):
bin_cs.pds1 = self.pds1.rebin(df=df, f=f, method=method)
if hasattr(self, 'pds2'):
bin_cs.pds2 = self.pds2.rebin(df=df, f=f, method=method)
try:
bin_cs.nphots2 = self.nphots2
except AttributeError:
if self.type == 'powerspectrum':
pass
else:
raise AttributeError(
'Spectrum has no attribute named nphots2.')
bin_cs.m = np.rint(step_size * self.m)
return bin_cs
def _normalize_crossspectrum(self, unnorm_power, tseg):
"""
Normalize the real part of the cross spectrum to Leahy, absolute rms^2,
fractional rms^2 normalization, or not at all.
Parameters
----------
unnorm_power: numpy.ndarray
The unnormalized cross spectrum.
tseg: int
The length of the Fourier segment, in seconds.
Returns
-------
power: numpy.nd.array
The normalized co-spectrum (real part of the cross spectrum). For
'none' normalization, imaginary part is returned as well.
"""
if self.err_dist == 'poisson':
return normalize_crossspectrum(
unnorm_power, tseg, self.n, self.nphots1, self.nphots2, self.norm,
self.power_type)
return normalize_crossspectrum_gauss(
unnorm_power, np.sqrt(self.meancounts1 * self.meancounts2),
np.sqrt(self.var1 * self.var2),
dt=self.dt,
N=self.n,
norm=self.norm,
power_type=self.power_type)
def rebin_log(self, f=0.01):
"""
Logarithmic rebin of the periodogram.
The new frequency depends on the previous frequency
modified by a factor f:
.. math::
d\\nu_j = d\\nu_{j-1} (1+f)
Parameters
----------
f: float, optional, default ``0.01``
parameter that steers the frequency resolution
Returns
-------
new_spec : :class:`Crossspectrum` (or one of its subclasses) object
The newly binned cross spectrum or power spectrum.
Note: this object will be of the same type as the object
that called this method. For example, if this method is called
from :class:`AveragedPowerspectrum`, it will return an object of class
"""
binfreq, binpower, binpower_err, nsamples = \
rebin_data_log(self.freq, self.power, f,
y_err=self.power_err, dx=self.df)
# the frequency resolution
df = np.diff(binfreq)
# shift the lower bin edges to the middle of the bin and drop the
# last right bin edge
binfreq = binfreq[:-1] + df / 2
new_spec = copy.copy(self)
new_spec.freq = binfreq
new_spec.power = binpower
new_spec.power_err = binpower_err
new_spec.m = nsamples * self.m
if hasattr(self, 'unnorm_power'):
_, binpower_unnorm, _, _ = \
rebin_data_log(self.freq, self.unnorm_power, f, dx=self.df)
new_spec.unnorm_power = binpower_unnorm
if hasattr(self, 'pds1'):
new_spec.pds1 = self.pds1.rebin_log(f)
if hasattr(self, 'pds2'):
new_spec.pds2 = self.pds2.rebin_log(f)
if hasattr(self, 'cs_all'):
cs_all = []
for c in self.cs_all:
cs_all.append(c.rebin_log(f))
new_spec.cs_all = cs_all
return new_spec
def coherence(self):
""" Compute Coherence function of the cross spectrum.
Coherence is defined in Vaughan and Nowak, 1996 [#]_.
It is a Fourier frequency dependent measure of the linear correlation
between time series measured simultaneously in two energy channels.
Returns
-------
coh : numpy.ndarray
Coherence function
References
----------
.. [#] http://iopscience.iop.org/article/10.1086/310430/pdf
"""
# this computes the averaged power spectrum, but using the
# cross spectrum code to avoid circular imports
return self.unnorm_power.real / (self.pds1.power.real *
self.pds2.power.real)
def _phase_lag(self):
"""Return the fourier phase lag of the cross spectrum."""
return np.angle(self.unnorm_power)
def time_lag(self):
"""
Calculate the fourier time lag of the cross spectrum. The time lag is
calculate using the center of the frequency bins.
"""
if self.__class__ in [Crossspectrum, AveragedCrossspectrum]:
ph_lag = self._phase_lag()
return ph_lag / (2 * np.pi * self.freq)
else:
raise AttributeError("Object has no attribute named 'time_lag' !")
def plot(self, labels=None, axis=None, title=None, marker='-', save=False,
filename=None):
"""
Plot the amplitude of the cross spectrum vs. the frequency using ``matplotlib``.
Parameters
----------
labels : iterable, default ``None``
A list of tuple with ``xlabel`` and ``ylabel`` as strings.
axis : list, tuple, string, default ``None``
Parameter to set axis properties of the ``matplotlib`` figure. For example
it can be a list like ``[xmin, xmax, ymin, ymax]`` or any other
acceptable argument for the``matplotlib.pyplot.axis()`` method.
title : str, default ``None``
The title of the plot.
marker : str, default '-'
Line style and color of the plot. Line styles and colors are
combined in a single format string, as in ``'bo'`` for blue
circles. See ``matplotlib.pyplot.plot`` for more options.
save : boolean, optional, default ``False``
If ``True``, save the figure with specified filename.
filename : str
File name of the image to save. Depends on the boolean ``save``.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for plot()")
plt.figure('crossspectrum')
plt.plot(self.freq,
np.abs(self.power),
marker,
color='b',
label='Amplitude')
plt.plot(self.freq,
np.abs(self.power.real),
marker,
color='r',
alpha=0.5,
label='Real Part')
plt.plot(self.freq,
np.abs(self.power.imag),
marker,
color='g',
alpha=0.5,
label='Imaginary Part')
if labels is not None:
try:
plt.xlabel(labels[0])
plt.ylabel(labels[1])
except TypeError:
simon("``labels`` must be either a list or tuple with "
"x and y labels.")
raise
except IndexError:
simon("``labels`` must have two labels for x and y "
"axes.")
# Not raising here because in case of len(labels)==1, only
# x-axis will be labelled.
plt.legend(loc='best')
if axis is not None:
plt.axis(axis)
if title is not None:
plt.title(title)
if save:
if filename is None:
plt.savefig('spec.png')
else:
plt.savefig(filename)
else:
plt.show(block=False)
def classical_significances(self, threshold=1, trial_correction=False):
"""
Compute the classical significances for the powers in the power
spectrum, assuming an underlying noise distribution that follows a
chi-square distributions with 2M degrees of freedom, where M is the
number of powers averaged in each bin.
Note that this function will *only* produce correct results when the
following underlying assumptions are fulfilled:
1. The power spectrum is Leahy-normalized
2. There is no source of variability in the data other than the
periodic signal to be determined with this method. This is important!
If there are other sources of (aperiodic) variability in the data, this
method will *not* produce correct results, but instead produce a large
number of spurious false positive detections!
3. There are no significant instrumental effects changing the
statistical distribution of the powers (e.g. pile-up or dead time)
By default, the method produces ``(index,p-values)`` for all powers in
the power spectrum, where index is the numerical index of the power in
question. If a ``threshold`` is set, then only powers with p-values
*below* that threshold with their respective indices. If
``trial_correction`` is set to ``True``, then the threshold will be corrected
for the number of trials (frequencies) in the power spectrum before
being used.
Parameters
----------
threshold : float, optional, default ``1``
The threshold to be used when reporting p-values of potentially
significant powers. Must be between 0 and 1.
Default is ``1`` (all p-values will be reported).
trial_correction : bool, optional, default ``False``
A Boolean flag that sets whether the ``threshold`` will be corrected
by the number of frequencies before being applied. This decreases
the ``threshold`` (p-values need to be lower to count as significant).
Default is ``False`` (report all powers) though for any application
where `threshold`` is set to something meaningful, this should also
be applied!
Returns
-------
pvals : iterable
A list of ``(index, p-value)`` tuples for all powers that have p-values
lower than the threshold specified in ``threshold``.
"""
if not self.norm == "leahy":
raise ValueError("This method only works on "
"Leahy-normalized power spectra!")
if np.size(self.m) == 1:
# calculate p-values for all powers
# leave out zeroth power since it just encodes the number of photons!
pv = np.array([cospectra_pvalue(power, self.m)
for power in self.power])
else:
pv = np.array([cospectra_pvalue(power, m)
for power, m in zip(self.power, self.m)])
# if trial correction is used, then correct the threshold for
# the number of powers in the power spectrum
if trial_correction:
threshold /= self.power.shape[0]
# need to add 1 to the indices to make up for the fact that
# we left out the first power above!
indices = np.where(pv < threshold)[0]
pvals = np.vstack([pv[indices], indices])
return pvals
class AveragedCrossspectrum(Crossspectrum):
"""
Make an averaged cross spectrum from a light curve by segmenting two
light curves, Fourier-transforming each segment and then averaging the
resulting cross spectra.
Parameters
----------
data1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects OR :class:`stingray.EventList` object
A light curve from which to compute the cross spectrum. In some cases, this would
be the light curve of the wavelength/energy/frequency band of interest.
data2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects OR :class:`stingray.EventList` object
A second light curve to use in the cross spectrum. In some cases, this would be
the wavelength/energy/frequency reference band to compare the band of interest with.
segment_size: float
The size of each segment to average. Note that if the total
duration of each :class:`Lightcurve` object in ``lc1`` or ``lc2`` is not an
integer multiple of the ``segment_size``, then any fraction left-over
at the end of the time series will be lost. Otherwise you introduce
artifacts.
norm: {``frac``, ``abs``, ``leahy``, ``none``}, default ``none``
The normalization of the (real part of the) cross spectrum.
Other Parameters
----------------
gti: 2-d float array
``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.
This choice overrides the GTIs in the single light curves. Use with
care!
dt : float
The time resolution of the light curve. Only needed when constructing
light curves in the case where data1 or data2 are of :class:EventList
power_type: string, optional, default ``real``
Parameter to choose among complete, real part and magnitude of
the cross spectrum.
silent : bool, default False
Do not show a progress bar when generating an averaged cross spectrum.
Useful for the batch execution of many spectra
lc1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects
For backwards compatibility only. Like ``data1``, but no
:class:`stingray.events.EventList` objects allowed
lc2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects
For backwards compatibility only. Like ``data2``, but no
:class:`stingray.events.EventList` objects allowed
fullspec: boolean, optional, default ``False``
If True, return the full array of frequencies, otherwise return just the
positive frequencies.
large_data : bool, default False
Use only for data larger than 10**7 data points!! Uses zarr and dask for computation.
save_all : bool, default False
Save all intermediate PDSs used for the final average. Use with care.
This is likely to fill up your RAM on medium-sized datasets, and to
slow down the computation when rebinning.
Attributes
----------
freq: numpy.ndarray
The array of mid-bin frequencies that the Fourier transform samples
power: numpy.ndarray
The array of cross spectra
power_err: numpy.ndarray
The uncertainties of ``power``.
An approximation for each bin given by ``power_err= power/sqrt(m)``.
Where ``m`` is the number of power averaged in each bin (by frequency
binning, or averaging powerspectrum). Note that for a single
realization (``m=1``) the error is equal to the power.
df: float
The frequency resolution
m: int
The number of averaged cross spectra
n: int
The number of time bins per segment of light curve
nphots1: float
The total number of photons in the first (interest) light curve
nphots2: float
The total number of photons in the second (reference) light curve
gti: 2-d float array
``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.
They are calculated by taking the common GTI between the
two light curves
"""
def __init__(self, data1=None, data2=None, segment_size=None, norm='none',
gti=None, power_type="real", silent=False, lc1=None, lc2=None,
dt=None, fullspec=False, large_data=False, save_all=False):
if lc1 is not None or lc2 is not None:
warnings.warn("The lcN keywords are now deprecated. Use dataN "
"instead", DeprecationWarning)
# for backwards compatibility
if data1 is None:
data1 = lc1
if data2 is None:
data2 = lc2
if segment_size is None and data1 is not None:
raise ValueError("segment_size must be specified")
if segment_size is not None and not np.isfinite(segment_size):
raise ValueError("segment_size must be finite!")
if large_data and data1 is not None and data2 is not None:
if isinstance(data1, EventList):
input_data = 'EventList'
elif isinstance(data1, Lightcurve):
input_data = 'Lightcurve'
chunks = int(np.rint(segment_size // data1.dt))
segment_size = chunks * data1.dt
else:
raise ValueError(
f'Invalid input data type: {type(data1).__name__}')
dir_path1 = saveData(data1, persist=False, chunks=chunks)
dir_path2 = saveData(data2, persist=False, chunks=chunks)
data_path1 = genDataPath(dir_path1)
data_path2 = genDataPath(dir_path2)
spec = createChunkedSpectra(input_data,
'AveragedCrossspectrum',
data_path=list(data_path1 +
data_path2),
segment_size=segment_size,
norm=norm,
gti=gti,
power_type=power_type,
silent=silent,
dt=dt)
for key, val in spec.__dict__.items():
setattr(self, key, val)
return
self.type = "crossspectrum"
self.segment_size = segment_size
self.power_type = power_type
self.fullspec = fullspec
self.show_progress = not silent
self.dt = dt
self.save_all = save_all
if isinstance(data1, EventList):
lengths = data1.gti[:, 1] - data1.gti[:, 0]
good = lengths >= segment_size
data1.gti = data1.gti[good]
data1 = list(data1.to_lc_list(dt))
if isinstance(data2, EventList):
lengths = data2.gti[:, 1] - data2.gti[:, 0]
good = lengths >= segment_size
data2.gti = data2.gti[good]
data2 = list(data2.to_lc_list(dt))
Crossspectrum.__init__(self, data1, data2, norm, gti=gti,
power_type=power_type, dt=dt, fullspec=fullspec)
return
def _make_auxil_pds(self, lc1, lc2):
"""
Helper method to create the power spectrum of both light curves
independently.
Parameters
----------
lc1, lc2 : :class:`stingray.Lightcurve` objects
Two light curves used for computing the cross spectrum.
"""
is_event = isinstance(lc1, EventList)
is_lc = isinstance(lc1, Lightcurve)
is_lc_iter = isinstance(lc1, Iterator)
is_lc_list = isinstance(lc1, Iterable) and not is_lc_iter
# A way to say that this is actually not a power spectrum
if self.type != "powerspectrum" and \
(lc1 is not lc2) and (is_event or is_lc or is_lc_list):
self.pds1 = AveragedCrossspectrum(lc1, lc1,
segment_size=self.segment_size,
norm='none', gti=self.gti,
power_type=self.power_type,
dt=self.dt, fullspec=self.fullspec,
save_all=self.save_all)
self.pds2 = AveragedCrossspectrum(lc2, lc2,
segment_size=self.segment_size,
norm='none', gti=self.gti,
power_type=self.power_type,
dt=self.dt, fullspec=self.fullspec,
save_all=self.save_all)
def _make_segment_spectrum(self, lc1, lc2, segment_size, silent=False):
"""
Split the light curves into segments of size ``segment_size``, and calculate a cross spectrum for
each.
Parameters
----------
lc1, lc2 : :class:`stingray.Lightcurve` objects
Two light curves used for computing the cross spectrum.
segment_size : ``numpy.float``
Size of each light curve segment to use for averaging.
Other parameters
----------------
silent : bool, default False
Suppress progress bars
Returns
-------
cs_all : list of :class:`Crossspectrum`` objects
A list of cross spectra calculated independently from each light curve segment
nphots1_all, nphots2_all : ``numpy.ndarray` for each of ``lc1`` and ``lc2``
Two lists containing the number of photons for all segments calculated from ``lc1`` and ``lc2``.
"""
assert isinstance(lc1, Lightcurve)
assert isinstance(lc2, Lightcurve)
if lc1.tseg != lc2.tseg:
simon("Lightcurves do not have same tseg. This means that the data"
"from the two channels are not completely in sync. This "
"might or might not be an issue. Keep an eye on it.")
# If dt differs slightly, its propagated error must not be more than
# 1/100th of the bin
if not np.isclose(lc1.dt, lc2.dt, rtol=0.01 * lc1.dt / lc1.tseg):
raise ValueError("Light curves do not have same time binning dt.")
# In case a small difference exists, ignore it
lc1.dt = lc2.dt
current_gtis = cross_two_gtis(lc1.gti, lc2.gti)
lc1.gti = lc2.gti = current_gtis
lc1.apply_gtis()
lc2.apply_gtis()
if self.gti is None:
self.gti = current_gtis
else:
if not np.allclose(self.gti, current_gtis):
self.gti = np.vstack([self.gti, current_gtis])
check_gtis(current_gtis)
cs_all = []
nphots1_all = []
nphots2_all = []
start_inds, end_inds = \
bin_intervals_from_gtis(current_gtis, segment_size, lc1.time,
dt=lc1.dt)
simon("Errorbars on cross spectra are not thoroughly tested. "
"Please report any inconsistencies.")
local_show_progress = show_progress
if not self.show_progress or silent:
local_show_progress = lambda a: a
for start_ind, end_ind in \
local_show_progress(zip(start_inds, end_inds)):
time_1 = copy.deepcopy(lc1.time[start_ind:end_ind])
counts_1 = copy.deepcopy(lc1.counts[start_ind:end_ind])
counts_1_err = copy.deepcopy(lc1.counts_err[start_ind:end_ind])
time_2 = copy.deepcopy(lc2.time[start_ind:end_ind])
counts_2 = copy.deepcopy(lc2.counts[start_ind:end_ind])
counts_2_err = copy.deepcopy(lc2.counts_err[start_ind:end_ind])
if np.sum(counts_1) == 0 or np.sum(counts_2) == 0:
warnings.warn(
"No counts in interval {}--{}s".format(time_1[0],
time_1[-1]))
continue
gti1 = np.array([[time_1[0] - lc1.dt / 2,
time_1[-1] + lc1.dt / 2]])
gti2 = np.array([[time_2[0] - lc2.dt / 2,
time_2[-1] + lc2.dt / 2]])
lc1_seg = Lightcurve(time_1, counts_1, err=counts_1_err,
err_dist=lc1.err_dist,
gti=gti1,
dt=lc1.dt, skip_checks=True)
lc2_seg = Lightcurve(time_2, counts_2, err=counts_2_err,
err_dist=lc2.err_dist,
gti=gti2,
dt=lc2.dt, skip_checks=True)
with warnings.catch_warnings(record=True) as w:
cs_seg = Crossspectrum(lc1_seg, lc2_seg, norm=self.norm,
power_type=self.power_type, fullspec=self.fullspec)
cs_all.append(cs_seg)
nphots1_all.append(np.sum(lc1_seg.counts))
nphots2_all.append(np.sum(lc2_seg.counts))
return cs_all, nphots1_all, nphots2_all
def _make_crossspectrum(self, lc1, lc2, fullspec=False):
"""
Auxiliary method computing the normalized cross spectrum from two light curves.
This includes checking for the presence of and applying Good Time Intervals, computing the
unnormalized Fourier cross-amplitude, and then renormalizing using the required normalization.
Also computes an uncertainty estimate on the cross spectral powers. Stingray uses the
scipy.fft standards for the sign of the Nyquist frequency.
Parameters
----------
lc1, lc2 : :class:`stingray.Lightcurve` objects
Two light curves used for computing the cross spectrum.
fullspec: boolean, default ``False``,
If True, return all frequencies otherwise return only positive frequencies
"""
local_show_progress = show_progress
if not self.show_progress:
local_show_progress = lambda a: a
# chop light curves into segments
if isinstance(lc1, Lightcurve) and \
isinstance(lc2, Lightcurve):
if self.type == "crossspectrum":
cs_all, nphots1_all, nphots2_all = \
self._make_segment_spectrum(lc1, lc2, self.segment_size)
elif self.type == "powerspectrum":
cs_all, nphots1_all = \
self._make_segment_spectrum(lc1, self.segment_size)
else:
raise ValueError("Type of spectrum not recognized!")
else:
cs_all, nphots1_all, nphots2_all = [], [], []
for lc1_seg, lc2_seg in local_show_progress(zip(lc1, lc2)):
if self.type == "crossspectrum":
cs_sep, nphots1_sep, nphots2_sep = \
self._make_segment_spectrum(lc1_seg, lc2_seg,
self.segment_size,
silent=True)
nphots2_all.append(nphots2_sep)
elif self.type == "powerspectrum":
cs_sep, nphots1_sep = \
self._make_segment_spectrum(lc1_seg, self.segment_size,
silent=True)
else:
raise ValueError("Type of spectrum not recognized!")
cs_all.append(cs_sep)
nphots1_all.append(nphots1_sep)
cs_all = np.hstack(cs_all)
nphots1_all = np.hstack(nphots1_all)
if self.type == "crossspectrum":
nphots2_all = np.hstack(nphots2_all)
m = len(cs_all)
nphots1 = np.mean(nphots1_all)
power_avg = np.zeros_like(cs_all[0].power)
power_err_avg = np.zeros_like(cs_all[0].power_err)
unnorm_power_avg = np.zeros_like(cs_all[0].unnorm_power)
for cs in cs_all:
power_avg += cs.power
unnorm_power_avg += cs.unnorm_power
power_err_avg += (cs.power_err) ** 2
power_avg /= float(m)
power_err_avg = np.sqrt(power_err_avg) / m
unnorm_power_avg /= float(m)
self.freq = cs_all[0].freq
self.power = power_avg
self.unnorm_power = unnorm_power_avg
self.m = m
self.power_err = power_err_avg
self.df = cs_all[0].df
self.n = cs_all[0].n
self.nphots1 = nphots1
if self.save_all:
self.cs_all = cs_all
if self.type == "crossspectrum":
self.nphots1 = nphots1
nphots2 = np.mean(nphots2_all)
self.nphots2 = nphots2
def coherence(self):
"""Averaged Coherence function.
Coherence is defined in Vaughan and Nowak, 1996 [#]_.
It is a Fourier frequency dependent measure of the linear correlation
between time series measured simultaneously in two energy channels.
Compute an averaged Coherence function of cross spectrum by computing
coherence function of each segment and averaging them. The return type
is a tuple with first element as the coherence function and the second
element as the corresponding uncertainty associated with it.
Note : The uncertainty in coherence function is strictly valid for Gaussian \
statistics only.
Returns
-------
(coh, uncertainty) : tuple of np.ndarray
Tuple comprising the coherence function and uncertainty.
References
----------
.. [#] http://iopscience.iop.org/article/10.1086/310430/pdf
"""
if | np.any(self.m < 50) | numpy.any |
import numpy as np
from matplotlib import pylab as plt
in_range = lambda x, lim: [i for i, v in enumerate(x) if (v >=
lim[0]) and (v <= lim[1])]
def scale1(arr, mval=None):
if mval is None:
mval = np.max(arr)
return | np.array([i / mval for i in arr]) | numpy.array |
import os.path
from data.base_dataset import BaseDataset
from data.image_folder import make_dataset
import numpy as np
import torch
import data.exrlib as exrlib
class ExrHeightDataset(BaseDataset):
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
assert(opt.image_type == 'exr')
#self.A = os.path.join(opt.dataroot, opt.phase + '_input')
self.A1 = os.path.join(opt.dataroot, opt.phase + '_input_terraform')
self.B = os.path.join(opt.dataroot, opt.phase + '_output')
#self.A_paths = sorted(make_dataset(self.A, opt.max_dataset_size))
self.A1_paths = sorted(make_dataset(self.A1, opt.max_dataset_size))
self.B_paths = sorted(make_dataset(self.B, opt.max_dataset_size))
#self.A_size = len(self.A_paths) # get the size of dataset A
self.A1_size = len(self.A1_paths)
self.B_size = len(self.B_paths) # get the size of dataset B
btoA = self.opt.direction == 'BtoA'
input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image
output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image
self.A1_test_paths = sorted(make_dataset(os.path.join(opt.dataroot, 'test_input_terraform')))
self.B_test_paths = sorted(make_dataset(os.path.join(opt.dataroot, 'test_output')))
self.A1_test_size = len(self.A1_test_paths)
self.B_test_size = len(self.B_test_paths)
self.input_names = np.array(["RockDetailMask.RockDetailMask", "SoftDetailMask.SoftDetailMask", "cliffs.cliffs", "height.height", "mesa.mesa", "slope.slope", "slopex.slopex", "slopez.slopez"])
self.output_names = np.array(["RockDetailMask.RockDetailMask", "SoftDetailMask.SoftDetailMask", "bedrock.bedrock", "cliffs.cliffs", "flow.flow", "flowx.flowx", "flowz.flowz", "height.height", "mesa.mesa", "sediment.sediment", "water.water"])
self.input_channels = np.array([3, 6, 7]) #height, slopex, slopez
self.output_channels = np.array([7]) #height
if not self.opt.compute_bounds:
self.i_channels_min = np.array([[[0, -400, -400]]])
self.i_channels_max = np.array([[[824, 20, 20]]])
self.o_channels_min = np.array([[[-4]]])
self.o_channels_max = np.array([[[819]]])
return
channels_min = np.array([2**16 for _ in self.input_channels])
channels_max = np.array([0 for _ in self.input_channels])
examples = 0
for A1_path in self.A1_paths:
A1_img = exrlib.read_exr_float32(A1_path, list(self.input_names[self.input_channels]), 512, 512).transpose(2, 0, 1).reshape(len(self.input_channels), -1)
channels_min = np.min(np.concatenate((np.expand_dims(channels_min, 1), np.expand_dims( | np.min(A1_img, 1) | numpy.min |
# Copyright 2021 <NAME>. All Rights Reserved.
#
# Licensed under the MIT License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
""" A collection of functions which are useful for getting the necessary
information from the volume in order to compute nephrometry metrics """
from pathlib import Path
import numpy as np
import pydicom
from scipy.signal import convolve2d
from scipy.ndimage.measurements import label
from scipy.stats import mode
from scipy.spatial.distance import pdist, squareform
from pyfastnns import NNS
import time
import cv2
def get_centroid(volume):
coordinates = np.transpose(np.array(np.nonzero(volume)))
centroid = np.mean(coordinates, axis=0)
return centroid
def _blur_thresh(vol):
kernel = np.ones((3,3))/9.0
ret = np.zeros(np.shape(vol), dtype=np.float32)
for i in range(vol.shape[0]):
ret[i] = convolve2d(
vol[i], kernel, mode="same", boundary="fill", fillvalue=0
)
return ret
def _get_distance(c1, c2, x_width=1, y_width=1, z_width=1):
return np.linalg.norm(
np.multiply(c1 - c2, np.array((x_width, y_width, z_width))), ord=2
)
def distance_between_regions(first_coordinates, second_coordinates):
nns = NNS(first_coordinates)
_, distance = nns.search(second_coordinates)
min_distance = np.min(distance)
return min_distance
def nearest_pair(first_coordinates, second_coordinates):
nns = NNS(first_coordinates)
pts, distances = nns.search(second_coordinates)
min_distance_idx = np.argmin(distances)
sp = second_coordinates[min_distance_idx]
fp = first_coordinates[pts[min_distance_idx]]
return fp, sp
def furthest_pair_distance(coordinates):
coordinates = np.array(coordinates).T
D = pdist(coordinates)
return np.nanmax(D)
def get_nearest_rim_point(region_boundaries, pixel_width, slice_thickness):
# Get coordinates of collecting system voxels
rim_bin = np.equal(region_boundaries, 5).astype(np.int32)
rim_coordinates = np.transpose(np.array(np.nonzero(rim_bin)))
if rim_coordinates.shape[0] == 0:
raise ValueError("Renal rim could not be identified")
# Get coordinates of tumor voxels
tumor_bin = np.equal(region_boundaries, 2).astype(np.int32)
tumor_coordinates = np.transpose(np.array(np.nonzero(tumor_bin)))
# Scale coordinates such that they correspond to the real world (mm)
multiplier = np.array(
[[slice_thickness, pixel_width, pixel_width]]
).astype(np.float32)
rim_coordinates = np.multiply(rim_coordinates, multiplier)
tumor_coordinates = np.multiply(tumor_coordinates, multiplier)
nearest_pt, _ = nearest_pair(rim_coordinates, tumor_coordinates)
return np.divide(nearest_pt, multiplier[0])
def get_distance_to_collecting_system(region_boundaries, pixel_width,
slice_thickness):
# Get coordinates of collecting system voxels
ucs_bin = np.equal(region_boundaries, 4).astype(np.int32)
ucs_coordinates = np.transpose(np.array(np.nonzero(ucs_bin)))
if ucs_coordinates.shape[0] == 0:
return get_distance_to_sinus(
region_boundaries, pixel_width, slice_thickness
)
# raise ValueError("Collecting system could not be identified")
# Get coordinates of tumor voxels
tumor_bin = | np.equal(region_boundaries, 2) | numpy.equal |
from __future__ import print_function
from sklearn.metrics.pairwise import cosine_similarity
import nltk
from sklearn.preprocessing import Normalizer
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import metrics
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import CountVectorizer
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
import pandas as pd
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
# work-around for Jupyter notebook and IPython console
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
# Bring in standard stopwords
with np.errstate(divide='ignore'):
np.float64(1.0) / 0.0
from nltk.corpus import stopwords
data = []
# Bring in the default English NLTK stop words
stoplist = stopwords.words('english')
# Define additional stopwords in a string
additional_stopwords = """To [ ] I you am As it can't <<...>> sincerely, . > - < <NAME>/Corp/Enron@Enron Best regards Sincerely From Sent Original Message Q <-> * | /\ 100% 12345678910 () """
# Split the the additional stopwords string on each word and then add
# those words to the NLTK stopwords list
stoplist += additional_stopwords.split()
stopWords = stopwords.words('english')
print ("\nCalculating document Dissimilarity and similarity scores...")
# Open and read a bunch of files
f = open('ken-lay_body.txt')
doc1 = str(f.read())
f = open('jeff-skilling_body.txt')
doc2 = str(f.read())
f = open('Richard-shapiro_body.txt')
doc3 = str(f.read())
f = open('kay-mann_body.txt')
doc4 = str(f.read())
f = open('Jeff-dasovich_body.txt',)
doc5 = str(f.read())
f = open('tana jones_body.txt')
doc6 = str(f.read())
f = open('steven kean_body.txt')
doc7 = str(f.read())
f = open('shackleton sara_body.txt')
doc8 = str(f.read())
f = open('<NAME>es_body.txt')
doc9 = str(f.read())
f = open('Mark taylor_body.txt')
doc10 = str(f.read())
f = open('davis pete_body.txt')
doc11 = str(f.read())
f = open('Chris g_body.txt')
doc12 = str(f.read())
f = open('kate symes_body.txt')
doc13 = str(f.read())
f = open('Mcconnell.body.txt')
doc14 = str(f.read())
f = open('kaminski_body.txt')
doc15 = str(f.read())
#train_string = 'By these proceedings for judicial review the Claimant seeks to challenge the decision of the Defendant dated the 23rd of May 2014 refusing the Claimant’s application of the 3rd of January 2012 for naturalisation as a British citizen'
# Construct the training set as a list
document = [ doc1, doc2, doc3, doc4, doc5, doc6,doc7, doc8, doc9, doc10, doc11, doc12, doc13, doc14, doc15]
# Set up the vectoriser, passing in the stop words
tfidf_vectorizer = TfidfVectorizer(stop_words=stopWords)
tfidf_matrix_train = tfidf_vectorizer.fit_transform(document)
# Apply the vectoriser to the training set
Cardinality=0
for files in document:
if files.endswith('.txt'):
Cardinality+=1
counts = CountVectorizer(input='document')
dtm = counts.fit_transform(document) # a sparse matrix
vocab = counts.get_feature_names() # a list
#type(dtm)
dtm = dtm.toarray() # convert to a regular array
#print (dtm.shape)
N, K = dtm.shape
ind = np.arange(N) # points on the x-axis
width = 0.2
vocab = np.array(vocab)
n, _ = dtm.shape
dist = np.zeros((n, n))
#dissimilarity
Dissimilarity=dist
for i in range(n):
for j in range(n):
x, y = dtm[i, :], dtm[j, :]
dist[i, j] = np.sqrt(np.sum((x - y)**2))
vectorizer = CountVectorizer(min_df=5, max_df=0.9,
stop_words='english', lowercase=True,
token_pattern='[a-zA-Z\-][a-zA-Z\-]{2,}')
matrix = tfidf_vectorizer.fit_transform(document)
dtm = vectorizer.fit_transform(document)
t=vectorizer.get_feature_names()
#print (t)
x= ' '.join(t)
stop = set(stopwords.words('english'))
#sentence = "this is a foo bar sentence"
g=[i for i in x.lower().split() if i not in stop]
#print (nltk.pos_tag(g))
dd= ', '.join(str(x) for x in g)
#','.join(map(str,g) )
#print (dd)
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(dd)
#print (dd)
terms = vectorizer.get_feature_names()
from nltk.corpus import wordnet as wn
sents = dd
tokens = nltk.word_tokenize(dd)
tags = nltk.pos_tag(tokens)
nouns = [word for word,pos in tags if (pos == 'NN' or pos == 'NNP' or pos == 'NNS' or pos == 'NNPS' or pos == 'VBZ'or pos == 'VB')]
print(nouns)
lsa = TruncatedSVD(2, algorithm = 'arpack')
dtm_lsa = lsa.fit_transform(dtm)
dtm_lsa = Normalizer(copy=False).fit_transform(dtm_lsa)
similarity = np.asarray(np.asmatrix(dtm_lsa) * np.asmatrix(dtm_lsa).T)
print (similarity)
#pd.DataFrame(similarity,index=nouns, columns=nouns).head(10)
true_k = 5
model = KMeans(n_clusters=true_k, init='k-means++', max_iter=1000, n_init=1)
x=model.fit(matrix)
labels = x.labels_
print ("Top terms per labels:", labels)
print("Top terms per cluster:")
n_topics = 10
NUM_TOPICS = 10
# Build a Latent Semantic Indexing Model
lsi_model = TruncatedSVD(n_components=NUM_TOPICS)
lsi_Z = lsi_model.fit_transform(dtm)
print(lsi_Z.shape) # (NO_DOCUMENTS, NO_TOPICS)
# Let's see how the first document in the corpus looks like in different topic spaces
print(lsi_Z[0])
def print_topics(model, vectorizer, top_n=10):
for idx, topic in enumerate(model.components_):
print("Concepts %d:" % (idx))
print([(vectorizer.get_feature_names()[i], topic[i])
for i in topic.argsort()[:-top_n - 1:-1]])
true_k = 10
print("LSI Model:")
print_topics(lsi_model, vectorizer)
print("=" * 20)
# #############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=10000, batch_size=10000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=1000, n_init=1, algorithm = 'arpack',
verbose=opts.verbose)
#km.labels_=labels
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(matrix)
#km.labels_=labels
print ("Top terms per labels:",km.labels_)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(matrix, km.labels_, metric='cosine', sample_size=10000))
nbins=len(set(km.labels_))
vals,bins=np.histogram(km.labels_,bins=nbins)
print (20*' ','hist-min,max',np.min(vals),np.max(vals) )
print()
from sklearn.metrics import r2_score
from sklearn.metrics import v_measure_score
print(metrics.v_measure_score(labels, km.labels_))
print(r2_score(labels, km.labels_))
from sklearn.metrics import accuracy_score
print (accuracy_score(labels, km.labels_))
#word_list= nouns
#word_list = ['Jellicle', 'Cats', 'are', 'black', 'and', 'white,', 'Jellicle', 'Cats', 'are', 'rather', 'small;', 'Jellicle', 'Cats', 'are', 'merry', 'and', 'bright,', 'And', 'pleasant', 'to', 'hear', 'when', 'they', 'caterwaul.', 'Jellicle', 'Cats', 'have', 'cheerful', 'faces,', 'Jellicle', 'Cats', 'have', 'bright', 'black', 'eyes;', 'They', 'like', 'to', 'practise', 'their', 'airs', 'and', 'graces', 'And', 'wait', 'for', 'the', 'Jellicle', 'Moon', 'to', 'rise.', '']
word_counter = {}
for word in word_list:
if word in word_counter:
word_counter[word] += 1
else:
word_counter[word] = 1
popular_words = sorted(word_counter, key = word_counter.get, reverse = True)
top_ = popular_words[:100]
tfidf_vectorizer = TfidfVectorizer(stop_words=stopWords)
tfidf_matrix_train = tfidf_vectorizer.fit_transform(word_list)
# Apply the vectoriser to the training set
Cardinality=0
for files in document:
if files.endswith('.txt'):
Cardinality+=1
counts = CountVectorizer(input='word_list')
dtm = counts.fit_transform(tt) # a sparse matrix
vocab = counts.get_feature_names() # a list
#type(dtm)
dtm = dtm.toarray() # convert to a regular array
#print (dtm.shape)
N, K = dtm.shape
ind = np.arange(N) # points on the x-axis
width = 0.2
vocab = | np.array(vocab) | numpy.array |
import warnings
import copy
import math as m
import numpy as nu
from scipy import integrate, optimize
import scipy
if int(scipy.__version__.split('.')[1]) < 10: #pragma: no cover
from scipy.maxentropy import logsumexp
else:
from scipy.misc import logsumexp
from galpy.potential_src.Potential import evaluateRforces, evaluatezforces,\
evaluatePotentials, evaluatephiforces, evaluateDensities
from galpy.util import galpyWarning
import galpy.util.bovy_plot as plot
import galpy.util.bovy_symplecticode as symplecticode
import galpy.util.bovy_coords as coords
#try:
from galpy.orbit_src.integrateFullOrbit import integrateFullOrbit_c, _ext_loaded
ext_loaded= _ext_loaded
from galpy.util.bovy_conversion import physical_conversion
from galpy.orbit_src.OrbitTop import OrbitTop
_ORBFITNORMRADEC= 360.
_ORBFITNORMDIST= 10.
_ORBFITNORMPMRADEC= 4.
_ORBFITNORMVLOS= 200.
class FullOrbit(OrbitTop):
"""Class that holds and integrates orbits in full 3D potentials"""
def __init__(self,vxvv=[1.,0.,0.9,0.,0.1],vo=220.,ro=8.0,zo=0.025,
solarmotion=nu.array([-10.1,4.0,6.7])):
"""
NAME:
__init__
PURPOSE:
intialize a full orbit
INPUT:
vxvv - initial condition [R,vR,vT,z,vz,phi]
vo - circular velocity at ro (km/s)
ro - distance from vantage point to GC (kpc)
zo - offset toward the NGP of the Sun wrt the plane (kpc)
solarmotion - value in [-U,V,W] (km/s)
OUTPUT:
(none)
HISTORY:
2010-08-01 - Written - Bovy (NYU)
2014-06-11 - Added conversion kwargs to physical coordinates - Bovy (IAS)
"""
OrbitTop.__init__(self,vxvv=vxvv,
ro=ro,zo=zo,vo=vo,solarmotion=solarmotion)
return None
def integrate(self,t,pot,method='symplec4_c',dt=None):
"""
NAME:
integrate
PURPOSE:
integrate the orbit
INPUT:
t - list of times at which to output (0 has to be in this!)
pot - potential instance or list of instances
method= 'odeint' for scipy's odeint
'leapfrog' for a simple leapfrog implementation
'leapfrog_c' for a simple leapfrog implementation in C
'rk4_c' for a 4th-order Runge-Kutta integrator in C
'rk6_c' for a 6-th order Runge-Kutta integrator in C
'dopr54_c' for a Dormand-Prince integrator in C (generally the fastest)
dt= (None) if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize
OUTPUT:
(none) (get the actual orbit using getOrbit()
HISTORY:
2010-08-01 - Written - Bovy (NYU)
"""
#Reset things that may have been defined by a previous integration
if hasattr(self,'_orbInterp'): delattr(self,'_orbInterp')
if hasattr(self,'rs'): delattr(self,'rs')
self.t= nu.array(t)
self._pot= pot
self.orbit= _integrateFullOrbit(self.vxvv,pot,t,method,dt)
@physical_conversion('energy')
def Jacobi(self,*args,**kwargs):
"""
NAME:
Jacobi
PURPOSE:
calculate the Jacobi integral of the motion
INPUT:
Omega - pattern speed of rotating frame
t= time
pot= potential instance or list of such instances
OUTPUT:
Jacobi integral
HISTORY:
2011-04-18 - Written - Bovy (NYU)
"""
if not 'OmegaP' in kwargs or kwargs['OmegaP'] is None:
OmegaP= 1.
if not 'pot' in kwargs or kwargs['pot'] is None:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit or specify pot=")
else:
pot= kwargs['pot']
if isinstance(pot,list):
for p in pot:
if hasattr(p,'OmegaP'):
OmegaP= p.OmegaP()
break
else:
if hasattr(pot,'OmegaP'):
OmegaP= pot.OmegaP()
kwargs.pop('OmegaP',None)
else:
OmegaP= kwargs.pop('OmegaP')
#Make sure you are not using physical coordinates
old_physical= kwargs.get('use_physical',None)
kwargs['use_physical']= False
if not isinstance(OmegaP,(int,float)) and len(OmegaP) == 3:
if isinstance(OmegaP,list): thisOmegaP= nu.array(OmegaP)
else: thisOmegaP= OmegaP
out= self.E(*args,**kwargs)-nu.dot(thisOmegaP,
self.L(*args,**kwargs).T).T
else:
out= self.E(*args,**kwargs)-OmegaP*self.L(*args,**kwargs)[:,2]
if not old_physical is None:
kwargs['use_physical']= old_physical
else:
kwargs.pop('use_physical')
return out
@physical_conversion('energy')
def E(self,*args,**kwargs):
"""
NAME:
E
PURPOSE:
calculate the energy
INPUT:
t - (optional) time at which to get the energy
pot= potential instance or list of such instances
OUTPUT:
energy
HISTORY:
2010-09-15 - Written - Bovy (NYU)
"""
if not 'pot' in kwargs or kwargs['pot'] is None:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit or specify pot=")
if 'pot' in kwargs and kwargs['pot'] is None:
kwargs.pop('pot')
else:
pot= kwargs.pop('pot')
if len(args) > 0:
t= args[0]
else:
t= 0.
#Get orbit
thiso= self(*args,**kwargs)
onet= (len(thiso.shape) == 1)
if onet:
return evaluatePotentials(thiso[0],thiso[3],pot,
phi=thiso[5],t=t)\
+thiso[1]**2./2.\
+thiso[2]**2./2.\
+thiso[4]**2./2.
else:
return nu.array([evaluatePotentials(thiso[0,ii],thiso[3,ii],
pot,phi=thiso[5,ii],
t=t[ii])\
+thiso[1,ii]**2./2.\
+thiso[2,ii]**2./2.\
+thiso[4,ii]**2./2. for ii in range(len(t))])
@physical_conversion('energy')
def ER(self,*args,**kwargs):
"""
NAME:
ER
PURPOSE:
calculate the radial energy
INPUT:
t - (optional) time at which to get the energy
pot= potential instance or list of such instances
OUTPUT:
radial energy
HISTORY:
2013-11-30 - Written - Bovy (IAS)
"""
if not 'pot' in kwargs or kwargs['pot'] is None:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit or specify pot=")
if 'pot' in kwargs and kwargs['pot'] is None:
kwargs.pop('pot')
else:
pot= kwargs.pop('pot')
if len(args) > 0:
t= args[0]
else:
t= 0.
#Get orbit
thiso= self(*args,**kwargs)
onet= (len(thiso.shape) == 1)
if onet:
return evaluatePotentials(thiso[0],0.,pot,
phi=thiso[5],t=t)\
+thiso[1]**2./2.\
+thiso[2]**2./2.
else:
return nu.array([evaluatePotentials(thiso[0,ii],0.,
pot,phi=thiso[5,ii],
t=t[ii])\
+thiso[1,ii]**2./2.\
+thiso[2,ii]**2./2. for ii in range(len(t))])
@physical_conversion('energy')
def Ez(self,*args,**kwargs):
"""
NAME:
Ez
PURPOSE:
calculate the vertical energy
INPUT:
t - (optional) time at which to get the energy
pot= potential instance or list of such instances
OUTPUT:
vertical energy
HISTORY:
2013-11-30 - Written - Bovy (IAS)
"""
if not 'pot' in kwargs or kwargs['pot'] is None:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit or specify pot=")
if 'pot' in kwargs and kwargs['pot'] is None:
kwargs.pop('pot')
else:
pot= kwargs.pop('pot')
if len(args) > 0:
t= args[0]
else:
t= 0.
#Get orbit
thiso= self(*args,**kwargs)
onet= (len(thiso.shape) == 1)
if onet:
return evaluatePotentials(thiso[0],thiso[3],pot,
phi=thiso[5],t=t)\
-evaluatePotentials(thiso[0],0.,pot,
phi=thiso[5],t=t)\
+thiso[4]**2./2.
else:
return nu.array([evaluatePotentials(thiso[0,ii],thiso[3,ii],
pot,phi=thiso[5,ii],
t=t[ii])\
-evaluatePotentials(thiso[0,ii],0.,
pot,phi=thiso[5,ii],
t=t[ii])\
+thiso[4,ii]**2./2. for ii in range(len(t))])
def e(self,analytic=False,pot=None):
"""
NAME:
e
PURPOSE:
calculate the eccentricity
INPUT:
analytic - compute this analytically
pot - potential to use for analytical calculation
OUTPUT:
eccentricity
HISTORY:
2010-09-15 - Written - Bovy (NYU)
"""
if analytic:
self._setupaA(pot=pot,type='adiabatic')
(rperi,rap)= self._aA.calcRapRperi(self)
return (rap-rperi)/(rap+rperi)
if not hasattr(self,'orbit'):
raise AttributeError("Integrate the orbit first")
if not hasattr(self,'rs'):
self.rs= nu.sqrt(self.orbit[:,0]**2.+self.orbit[:,3]**2.)
return (nu.amax(self.rs)-nu.amin(self.rs))/(nu.amax(self.rs)+nu.amin(self.rs))
@physical_conversion('position')
def rap(self,analytic=False,pot=None,**kwargs):
"""
NAME:
rap
PURPOSE:
return the apocenter radius
INPUT:
analytic - compute this analytically
pot - potential to use for analytical calculation
OUTPUT:
R_ap
HISTORY:
2010-09-20 - Written - Bovy (NYU)
"""
if analytic:
self._setupaA(pot=pot,type='adiabatic')
(rperi,rap)= self._aA.calcRapRperi(self)
return rap
if not hasattr(self,'orbit'):
raise AttributeError("Integrate the orbit first")
if not hasattr(self,'rs'):
self.rs= nu.sqrt(self.orbit[:,0]**2.+self.orbit[:,3]**2.)
return nu.amax(self.rs)
@physical_conversion('position')
def rperi(self,analytic=False,pot=None,**kwargs):
"""
NAME:
rperi
PURPOSE:
return the pericenter radius
INPUT:
analytic - compute this analytically
pot - potential to use for analytical calculation
OUTPUT:
R_peri
HISTORY:
2010-09-20 - Written - Bovy (NYU)
"""
if analytic:
self._setupaA(pot=pot,type='adiabatic')
(rperi,rap)= self._aA.calcRapRperi(self)
return rperi
if not hasattr(self,'orbit'):
raise AttributeError("Integrate the orbit first")
if not hasattr(self,'rs'):
self.rs= nu.sqrt(self.orbit[:,0]**2.+self.orbit[:,3]**2.)
return nu.amin(self.rs)
@physical_conversion('position')
def zmax(self,analytic=False,pot=None,**kwargs):
"""
NAME:
zmax
PURPOSE:
return the maximum vertical height
INPUT:
analytic - compute this analytically
pot - potential to use for analytical calculation
OUTPUT:
Z_max
HISTORY:
2010-09-20 - Written - Bovy (NYU)
2012-06-01 - Added analytic calculation - Bovy (IAS)
"""
if analytic:
self._setupaA(pot=pot,type='adiabatic')
zmax= self._aA.calczmax(self)
return zmax
if not hasattr(self,'orbit'):
raise AttributeError("Integrate the orbit first")
return nu.amax(nu.fabs(self.orbit[:,3]))
def fit(self,vxvv,vxvv_err=None,pot=None,radec=False,lb=False,
customsky=False,lb_to_customsky=None,pmllpmbb_to_customsky=None,
tintJ=10,ntintJ=1000,integrate_method='dopr54_c',
disp=False,
**kwargs):
"""
NAME:
fit
PURPOSE:
fit an Orbit to data using the current orbit as the initial
condition
INPUT:
vxvv - [:,6] array of positions and velocities along the orbit
vxvv_err= [:,6] array of errors on positions and velocities along the orbit (if None, these are set to 0.01)
pot= Potential to fit the orbit in
Keywords related to the input data:
radec= if True, input vxvv and vxvv_err are [ra,dec,d,mu_ra, mu_dec,vlos] in [deg,deg,kpc,mas/yr,mas/yr,km/s] (all J2000.0; mu_ra = mu_ra * cos dec); the attributes of the current Orbit are used to convert between these coordinates and Galactocentric coordinates
lb= if True, input vxvv and vxvv_err are [long,lat,d,mu_ll, mu_bb,vlos] in [deg,deg,kpc,mas/yr,mas/yr,km/s] (mu_ll = mu_ll * cos lat); the attributes of the current Orbit are used to convert between these coordinates and Galactocentric coordinates
customsky= if True, input vxvv and vxvv_err are [custom long,custom lat,d,mu_customll, mu_custombb,vlos] in [deg,deg,kpc,mas/yr,mas/yr,km/s] (mu_ll = mu_ll * cos lat) where custom longitude and custom latitude are a custom set of sky coordinates (e.g., ecliptic) and the proper motions are also expressed in these coordinats; you need to provide the functions lb_to_customsky and pmllpmbb_to_customsky to convert to the custom sky coordinates (these should have the same inputs and outputs as lb_to_radec and pmllpmbb_to_pmrapmdec); the attributes of the current Orbit are used to convert between these coordinates and Galactocentric coordinates
obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer
(in kpc and km/s) (default=Object-wide default)
Cannot be an Orbit instance with the orbit of the reference point, as w/ the ra etc. functions
ro= distance in kpc corresponding to R=1. (default: taken from object)
vo= velocity in km/s corresponding to v=1. (default: taken from object)
lb_to_customsky= function that converts l,b,degree=False to the custom sky coordinates (like lb_to_radec); needs to be given when customsky=True
pmllpmbb_to_customsky= function that converts pmll,pmbb,l,b,degree=False to proper motions in the custom sky coordinates (like pmllpmbb_to_pmrapmdec); needs to be given when customsky=True
Keywords related to the orbit integrations:
tintJ= (default: 10) time to integrate orbits for fitting the orbit
ntintJ= (default: 1000) number of time-integration points
integrate_method= (default: 'dopr54_c') integration method to use
disp= (False) display the optimizer's convergence message
OUTPUT:
max of log likelihood
HISTORY:
2014-06-17 - Written - Bovy (IAS)
TEST:
from galpy.potential import LogarithmicHaloPotential; lp= LogarithmicHaloPotential(normalize=1.); from galpy.orbit import Orbit; o= Orbit(vxvv=[1.,0.1,1.1,0.1,0.02,0.]); ts= numpy.linspace(0,10,1000); o.integrate(ts,lp); outts= [0.,0.1,0.2,0.3,0.4]; vxvv= numpy.array([o.R(outts),o.vR(outts),o.vT(outts),o.z(outts),o.vz(outts),o.phi(outts)]).T; of= Orbit(vxvv=[1.02,0.101,1.101,0.101,0.0201,0.001]); of._orb.fit(vxvv,pot=lp,radec=False,tintJ=10,ntintJ=1000)
"""
if pot is None:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit first or specify pot=")
if radec or lb or customsky:
obs, ro, vo= self._parse_radec_kwargs(kwargs,vel=True,dontpop=True)
else:
obs, ro, vo= None, None, None
if customsky \
and (lb_to_customsky is None or pmllpmbb_to_customsky is None):
raise IOError('if customsky=True, the functions lb_to_customsky and pmllpmbb_to_customsky need to be given')
new_vxvv, maxLogL= _fit_orbit(self,vxvv,vxvv_err,pot,radec=radec,lb=lb,
customsky=customsky,
lb_to_customsky=lb_to_customsky,
pmllpmbb_to_customsky=pmllpmbb_to_customsky,
tintJ=tintJ,ntintJ=ntintJ,
integrate_method=integrate_method,
ro=ro,vo=vo,obs=obs,disp=disp)
#Setup with these new initial conditions
self.vxvv= new_vxvv
return maxLogL
def plotEz(self,*args,**kwargs):
"""
NAME:
plotEz
PURPOSE:
plot Ez(.) along the orbit
INPUT:
bovy_plot.bovy_plot inputs
OUTPUT:
figure to output device
HISTORY:
2014-06-16 - Written - Bovy (IAS)
"""
if kwargs.pop('normed',False):
kwargs['d2']= 'Eznorm'
else:
kwargs['d2']= 'Ez'
self.plot(*args,**kwargs)
def plotER(self,*args,**kwargs):
"""
NAME:
plotER
PURPOSE:
plot ER(.) along the orbit
INPUT:
bovy_plot.bovy_plot inputs
OUTPUT:
figure to output device
HISTORY:
2014-06-16 - Written - Bovy (IAS)
"""
if kwargs.pop('normed',False):
kwargs['d2']= 'ERnorm'
else:
kwargs['d2']= 'ER'
self.plot(*args,**kwargs)
def plotEzJz(self,*args,**kwargs):
"""
NAME:
plotEzJz
PURPOSE:
plot E_z(.)/sqrt(dens(R)) along the orbit
INPUT:
pot= Potential instance or list of instances in which the orbit was
integrated
d1= - plot Ez vs d1: e.g., 't', 'z', 'R', 'vR', 'vT', 'vz'
+bovy_plot.bovy_plot inputs
OUTPUT:
figure to output device
HISTORY:
2010-08-08 - Written - Bovy (NYU)
"""
labeldict= {'t':r'$t$','R':r'$R$','vR':r'$v_R$','vT':r'$v_T$',
'z':r'$z$','vz':r'$v_z$','phi':r'$\phi$',
'x':r'$x$','y':r'$y$','vx':r'$v_x$','vy':r'$v_y$'}
if not 'pot' in kwargs:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit first or specify pot=")
else:
pot= kwargs.pop('pot')
d1= kwargs.pop('d1','t')
self.EzJz= [(evaluatePotentials(self.orbit[ii,0],self.orbit[ii,3],
pot,t=self.t[ii])-
evaluatePotentials(self.orbit[ii,0],0.,pot,
phi= self.orbit[ii,5],t=self.t[ii])+
self.orbit[ii,4]**2./2.)/\
nu.sqrt(evaluateDensities(self.orbit[ii,0],0.,pot,phi=self.orbit[ii,5],t=self.t[ii]))\
for ii in range(len(self.t))]
if not 'xlabel' in kwargs:
kwargs['xlabel']= labeldict[d1]
if not 'ylabel' in kwargs:
kwargs['ylabel']= r'$E_z/\sqrt{\rho}$'
if d1 == 't':
plot.bovy_plot(nu.array(self.t),nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == 'z':
plot.bovy_plot(self.orbit[:,3],nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == 'R':
plot.bovy_plot(self.orbit[:,0],nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == 'vR':
plot.bovy_plot(self.orbit[:,1],nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == 'vT':
plot.bovy_plot(self.orbit[:,2], | nu.array(self.EzJz) | numpy.array |
"""
test_standard.py - This module provides unit tests on the qoc.standard module.
"""
### qoc.standard.constants ###
def test_constants():
import numpy as np
from qoc.standard.constants import (get_creation_operator,
get_annihilation_operator)
big = 100
# Use the fact that (create)(annihilate) is the number operator
# to test the creation and annihilation operator methods.
for i in range(1, big):
analytic_number_operator = np.diag(np.arange(i))
generated_number_operator = np.matmul(get_creation_operator(i), get_annihilation_operator(i))
assert np.allclose(generated_number_operator, analytic_number_operator)
### qoc.standard.costs ###
# TODO: implement me
def test_controlarea():
pass
# TODO: implement me
def test_controlnorm():
pass
# TODO: implement me
def test_controlvariation():
pass
def test_forbiddensities():
import numpy as np
from qoc.standard import conjugate_transpose
from qoc.standard.costs.forbiddensities import ForbidDensities
system_eval_count = 11
state0 = np.array([[1], [0]])
density0 = np.matmul(state0, conjugate_transpose(state0))
forbid0_0 = np.array([[1], [0]])
density0_0 = np.matmul(forbid0_0, conjugate_transpose(forbid0_0))
forbid0_1 = np.divide(np.array([[1], [1]]), np.sqrt(2))
density0_1 = np.matmul(forbid0_1, conjugate_transpose(forbid0_1))
state1 = np.array([[0], [1]])
density1 = np.matmul(state1, conjugate_transpose(state1))
forbid1_0 = np.divide(np.array([[1], [1]]), np.sqrt(2))
density1_0 = np.matmul(forbid1_0, conjugate_transpose(forbid1_0))
forbid1_1 = np.divide(np.array([[1j], [1j]]), np.sqrt(2))
density1_1 = np.matmul(forbid1_1, conjugate_transpose(forbid1_1))
densities = np.stack((density0, density1,))
forbidden_densities0 = np.stack((density0_0, density0_1,))
forbidden_densities1 = np.stack((density1_0, density1_1,))
forbidden_densities = np.stack((forbidden_densities0, forbidden_densities1,))
fd = ForbidDensities(forbidden_densities, system_eval_count)
cost = fd.cost(None, densities, None)
expected_cost = 7 / 640
assert(np.allclose(cost, expected_cost,))
def test_forbidstates():
import numpy as np
from qoc.standard.costs.forbidstates import ForbidStates
system_eval_count = 11
state0 = np.array([[1], [0]])
forbid0_0 = np.array([[1], [0]])
forbid0_1 = np.divide(np.array([[1], [1]]), np.sqrt(2))
state1 = np.array([[0], [1]])
forbid1_0 = np.divide(np.array([[1], [1]]), np.sqrt(2))
forbid1_1 = np.divide(np.array([[1j], [1j]]), np.sqrt(2))
states = np.stack((state0, state1,))
forbidden_states0 = np.stack((forbid0_0, forbid0_1,))
forbidden_states1 = np.stack((forbid1_0, forbid1_1,))
forbidden_states = np.stack((forbidden_states0, forbidden_states1,))
fs = ForbidStates(forbidden_states, system_eval_count)
cost = fs.cost(None, states, None)
expected_cost = np.divide(5, 80)
assert(np.allclose(cost, expected_cost,))
def test_targetdensityinfidelity():
import numpy as np
from qoc.standard import conjugate_transpose
from qoc.standard.costs.targetdensityinfidelity import TargetDensityInfidelity
state0 = np.array([[0], [1]])
density0 = np.matmul(state0, conjugate_transpose(state0))
target_state0 = np.array([[1], [0]])
target_density0 = np.matmul(target_state0, conjugate_transpose(target_state0))
densities = np.stack((density0,), axis=0)
targets = np.stack((target_density0,), axis=0)
ti = TargetDensityInfidelity(targets)
cost = ti.cost(None, densities, None)
assert(np.allclose(cost, 1))
ti = TargetDensityInfidelity(densities)
cost = ti.cost(None, densities, None)
assert(np.allclose(cost, 0.5))
state0 = np.array([[1], [0]])
state1 = (np.array([[1j], [1]]) / np.sqrt(2))
density0 = np.matmul(state0, conjugate_transpose(state0))
density1 = np.matmul(state1, conjugate_transpose(state1))
target_state0 = np.array([[1j], [0]])
target_state1 = np.array([[1], [0]])
target_density0 = np.matmul(target_state0, conjugate_transpose(target_state0))
target_density1 = np.matmul(target_state1, conjugate_transpose(target_state1))
densities = np.stack((density0, density1,), axis=0)
targets = np.stack((target_density0, target_density1,), axis=0)
ti = TargetDensityInfidelity(targets)
cost = ti.cost(None, densities, None)
expected_cost = 0.625
assert(np.allclose(cost, expected_cost))
def test_targetdensityinfidelitytime():
import numpy as np
from qoc.standard import conjugate_transpose
from qoc.standard.costs.targetdensityinfidelitytime import TargetDensityInfidelityTime
system_eval_count = 11
state0 = np.array([[0], [1]])
density0 = np.matmul(state0, conjugate_transpose(state0))
target_state0 = np.array([[1], [0]])
target_density0 = np.matmul(target_state0, conjugate_transpose(target_state0))
densities = | np.stack((density0,), axis=0) | numpy.stack |
from itertools import product
import numpy as np
import pytest
import sympy as sym
import symopt.config as config
from symopt.problem import OptimizationProblem
tol = 1.0e-8
wrap_using_values = ['lambdify', 'autowrap']
def needs_ipopt(test_func):
def new_test_func(solver, wrap_using):
if solver == 'ipopt' and not config.HAS_IPOPT:
pytest.skip(
"Test requires optional dependency ipopt, which is not "
"installed.")
else:
return test_func(solver, wrap_using)
return new_test_func
@pytest.mark.parametrize("solver,wrap_using",
product(["ipopt", "slsqp"], wrap_using_values))
@needs_ipopt
def test_prob18(solver, wrap_using):
""" problem 18 from the Hock-Schittkowski test suite """
if solver == "ipopt" and not config.HAS_IPOPT:
pytest.skip(
"Test requires optional dependency ipopt, which is not installed.")
x = sym.MatrixSymbol('x', 2, 1)
p = sym.Symbol('p')
prob = OptimizationProblem(mode='min', wrap_using=wrap_using)
prob.add_parameter(p)
prob.add_variable(x, lb=[2, 0], ub=[p, p])
prob.add_constraints_from([x[0] * x[1] >= 25,
x[0] ** 2 + x[1] ** 2 >= 25])
prob.obj = x[0] ** 2 / 100 + x[1] ** 2
x0 = [2, 2]
res_50 = prob.solve(x0, 50, solver=solver, tol=tol)
assert res_50['success']
assert np.allclose(res_50['x'], np.array([15.8114, 1.58114]))
res_20 = prob.solve(x0, 20, solver=solver, tol=tol)
assert res_20['success']
assert np.allclose(res_20['x'], | np.array([15.8114, 1.58114]) | numpy.array |
from __future__ import division
import numpy as np
from scipy import interpolate
import pandas as pd
def load_run(run_num, df):
"""Load in trial data.
Parameters
----------
run_num : int
Which run to load.
df : DataFrame
The DataFrame loaded from the original excel file.
Returns
-------
pos : array
(x, z) positions
tvec : array
Time vector for the run.
dt : float
Sampling interval between data points.
"""
# sampling rate
# http://rsif.royalsocietypublishing.org/content/10/80/20120794/suppl/DC1
if run_num <= 7:
dt = 1 / 60.
else:
dt = 1 / 125.
xkey = "'Caribou_Trial_{0:02d}_Xvalues'".format(run_num)
zkey = "'Caribou_Trial_{0:02d}_Zvalues'".format(run_num)
d = df[[xkey, zkey]]
d = np.array(d)
# get rid of nans and a bunch of junky zeros starting at row 301
start_bad = np.where(np.isnan(d))[0]
if len(start_bad) > 0:
start_bad = start_bad[0]
d = d[:start_bad]
# get rid of zeros (if we get past rows 301...)
start_bad = np.where(d == 0.)[0]
if len(d) > 300 and len(start_bad) > 0:
start_bad = start_bad[0]
d = d[:start_bad]
tvec = np.arange(0, len(d)) * dt
return d, tvec, dt
def calc_vel(pos_data, dt):
"""Velocity in the x and z directions.
Parameters
----------
pos_data : array
(x, z) position information
dt : float
Sampling rate
Returns
-------
vel : array
(vx, vz)
"""
vx = np.gradient(pos_data[:, 0], dt)
vy = np.gradient(pos_data[:, 1], dt)
return np.c_[vx, vy]
def calc_accel(vel_data, dt):
"""Acceleration in the x and z directions.
Parameters
----------
vel_data : array
(vx, vz) velocity data
dt : float
Sampling rate
Returns
-------
accel : array
(ax, az)
"""
ax = np.gradient(vel_data[:, 0], dt)
ay = np.gradient(vel_data[:, 1], dt)
return np.c_[ax, ay]
def calc_vel_mag(vel_data):
"""Velocity magnitude.
Parameters
----------
vel_data : array
(vx, vz) velocity data
Returns
-------
vel_mag : array
np.sqrt(vx**2 + vz**2)
"""
return np.sqrt(vel_data[:, 0]**2 + vel_data[:, 1]**2)
def calc_gamma(vel_data):
"""Glide angle.
Parameters
----------
vel_data : array
(vx, vz)
Returns
-------
gamma : array
Glide angle in rad
"""
return -np.arctan2(vel_data[:, 1], vel_data[:, 0])
def splfit_all(data, tvec, k=5, s=.5):
"""Fit a spline to the data.
"""
posx = interpolate.UnivariateSpline(tvec, data[:, 0], k=k, s=s)
posz = interpolate.UnivariateSpline(tvec, data[:, 1], k=k, s=s)
velx = posx.derivative(1)
velz = posz.derivative(1)
accx = posx.derivative(2)
accz = posz.derivative(2)
pos = np.c_[posx(tvec), posz(tvec)]
vel = np.c_[velx(tvec), velz(tvec)]
acc = np.c_[accx(tvec), accz(tvec)]
return pos, vel, acc
def polyfit(data, tvec, intfun):
"""Fit a spline to the data.
"""
posx = intfun(tvec, data[:, 0])
posz = intfun(tvec, data[:, 1])
velx = posx.derivative(1)
velz = posz.derivative(1)
accx = posx.derivative(2)
accz = posz.derivative(2)
pos = np.c_[posx(tvec), posz(tvec)]
vel = np.c_[velx(tvec), velz(tvec)]
acc = np.c_[accx(tvec), accz(tvec)]
return pos, vel, acc
def polyfit_all(data, tvec, deg, wn=0):
"""Fit a spline to the data.
TODO: this does not to the mirroring correctly!
"""
start = data[:wn][::-1]
stop = data[-wn:][::-1]
datanew = np.r_[start, data, stop]
tvecnew = np.r_[tvec[:wn][::-1], tvec, tvec[-wn:][::-1]]
posx = np.polyfit(tvecnew, datanew[:, 0], deg)
posz = np.polyfit(tvecnew, datanew[:, 1], deg)
velx = np.polyder(posx, 1)
velz = np.polyder(posz, 1)
accx = np.polyder(posx, 2)
accz = np.polyder(posz, 2)
pos = np.c_[np.polyval(posx, tvec), np.polyval(posz, tvec)]
vel = np.c_[np.polyval(velx, tvec), np.polyval(velz, tvec)]
acc = np.c_[np.polyval(accx, tvec), np.polyval(accz, tvec)]
return pos, vel, acc
def fill_df(pos, vel, acc, gamma, velmag, tvec, i):
"""Put one trial's data into a DataFrame.
Parameters
----------
pos : (n x 2) array
x and z position data
vel : (n x 2) array
x and z velocity data
acc : (n x 2) array
x and z acceleration data
gamma : (n x 1) array
Glide angles in deg
velmag : (n x 1) array
Velocity magnitude
tvec : (n x 1) array
Time points
i : int
Trial number that becomes the column name
Returns
-------
posx, posz, velx, velz, accx, accz, gamm, vmag : DataFrame
Data in a DataFrame
"""
posx = pd.DataFrame(pos[:, 0], index=tvec, columns=[str(i)])
posz = pd.DataFrame(pos[:, 1], index=tvec, columns=[str(i)])
velx = pd.DataFrame(vel[:, 0], index=tvec, columns=[str(i)])
velz = pd.DataFrame(vel[:, 1], index=tvec, columns=[str(i)])
accx = pd.DataFrame(acc[:, 0], index=tvec, columns=[str(i)])
accz = pd.DataFrame(acc[:, 1], index=tvec, columns=[str(i)])
gamm = pd.DataFrame(gamma, index=tvec, columns=[str(i)])
vmag = pd.DataFrame(velmag, index=tvec, columns=[str(i)])
return posx, posz, velx, velz, accx, accz, gamm, vmag
def window_bounds(i, n, wn):
"""Start and stop indices for a moving window.
Parameters
----------
i : int
Current index
n : int
Total number of points
wn : int, odd
Total window size
Returns
-------
start : int
Start index
stop : int
Stop index
at_end : bool
Whether we are truncating the window
"""
at_end = False
hw = wn // 2
start = i - hw
stop = i + hw + 1
if start < 0:
at_end = True
start = 0
elif stop > n:
at_end = True
stop = n
return start, stop, at_end
def moving_window_pts(data, tvec, wn, deg=2, drop_deg=False):
"""Perform moving window smoothing.
Parameters
----------
data : (n x 2) array
Data to smooth and take derivatives of
tvec : (n x 1) array
Time vector
wn : int, odd
Total window size
deg : int, default=2
Polynomial degree to fit to data
drop_deg : bool, default=False
Whether to drop in interpolating polynomial at the
ends of the time series, since the truncated window can
negatively affect things.
Returns
-------
spos : (n x 2) array
x and z smoothed data
svel : (n x 2) array
First derivatives of smoothed data (velocity)
sacc : (n x 2) array
Second derivatives of smoothed data (acceleration)
"""
deg_orig = deg
posx, posz = data.T
npts = len(posx)
spos = np.zeros((npts, 2))
svel = | np.zeros((npts, 2)) | numpy.zeros |
import numpy as np
# import pandas as pd
import matplotlib.pyplot as plt
import joblib
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split, GridSearchCV, learning_curve
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import SelectKBest, mutual_info_classif, RFECV
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import RidgeClassifierCV
from sklearn.metrics import matthews_corrcoef, confusion_matrix, classification_report
rng = np.random.RandomState(0) # initialize random number generator
# %%
# Load the wine dataset: n_samples=178, n_features=13.
data, y = load_wine(return_X_y=True, as_frame=True)
print(data.info())
# %%
# Select best features based on mutual information.
select_features = SelectKBest(score_func=mutual_info_classif, k=11).fit(data, y)
# Plot MI scores
fig, ax = plt.subplots(2, 1, figsize=(6, 10), dpi=100)
ax[0].bar(np.arange(data.columns.shape[0]), select_features.scores_)
ax[0].set_xticks(np.arange(data.shape[1]))
ax[0].set(title='Mutual Information scores for features',
xlabel='Feature #', ylabel='MI')
# Arbitrary choice: eliminate 2 features with the lowest MI scores.
print("#: FEATURE NAME")
for i, col in enumerate(data.columns):
print(f'{i}: {col}')
print('\nCan eliminate two features with lowest MI score: ',
data.columns[2], ', ', data.columns[7], '.', sep='')
del i, col
# Get new dataset (convert to dataframe) with reduced number of features
# X = pd.DataFrame(select_features.transform(data), columns=data.columns.delete([2, 7]))
# Try recursive feature elimination (with cross-validation) using SVM with linear kernel.
clf = SVC(kernel='linear')
rfecv = RFECV(clf, step=1, min_features_to_select=1, cv=5, scoring='accuracy')
rfecv.fit(data, y)
print(f"\nOptimal number of features using RFECV: {rfecv.n_features_}")
# Plot number of features vs. cross-validation scores
ax[1].plot(np.arange(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_, '.-r')
ax[1].set(title='Recursive feature elimination using SVM with linear kernel',
xlabel="Number of features selected", ylabel="Cross validation score (accuracy)")
ax[1].set_xticks(np.arange(rfecv.grid_scores_.shape[0] + 1))
fig.savefig('featureselection.png')
# RFE result: keep all features.
# Same result when two features with low MI were already eliminated.
print('\nKeeping all 13 features.')
# %%
# Split data infor train and test sets.
X_train, X_test, y_train, y_test = train_test_split(data, y, random_state=rng,
test_size=0.2, stratify=y)
# %%
# Try different estimators
estimators = [GaussianNB(),
RidgeClassifierCV(alphas=np.logspace(-3, 1, num=10)),
SVC(kernel='linear'),
RandomForestClassifier(random_state=rng)]
models = dict()
for estimator in estimators:
estimator_name = str(estimator)[:str(estimator).index('(')]
# Make a pipeline
pipe = make_pipeline(StandardScaler(), estimator)
# print(pipe.get_params())
if 'GaussianNB' in estimator_name:
print("\nEstimator: Gaussian Naive Bayes Classifier.")
model = pipe.fit(X_train, y_train)
elif 'Ridge' in estimator_name:
print("\nEstimator: Ridge Classifier with cross-validation.")
model = pipe.fit(X_train, y_train)
elif 'SVC' in estimator_name:
print("\nEstimator: Support Vector Machine Classifier.")
model = pipe.fit(X_train, y_train)
else:
hyperparams = {"randomforestclassifier__max_features": ["auto", "sqrt"],
"randomforestclassifier__max_leaf_nodes": [None, 2, 3, 5],
"randomforestclassifier__max_depth": [None, 1, 3]}
model = GridSearchCV(pipe, hyperparams, cv=10)
model.fit(X_train, y_train)
print("\nEstimator: Random Forest Classifier. \n"
"Best parameters after grid search with cross-validation (cv=10): \n"
f"{model.best_params_}\nwith score {model.best_score_}")
# If model.refit is true (default), the model automatically refits to all of X_train.
print(f"Automatic refit to full X_train: {model.refit}")
y_pred = model.predict(X_test) # Predict classes in test set
# *** Calculate metrics of prediction quality ***
# print('Matthews correlation coefficient=', matthews_corrcoef(y_test, y_pred))
# print('Confusion matrix:\n', confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
# Append model to 'models' dict (requires Python 3.9)
models |= {estimator_name: {'model': model,
'y_pred': y_pred,
'matthews': matthews_corrcoef(y_test, y_pred),
'confusion': confusion_matrix(y_test, y_pred)}
}
# Compute learning curve
lc_sizes, train_scores, cv_scores = learning_curve(pipe, X_train, y_train, cv=5,
train_sizes=np.linspace(0.1, 1.0, 10),
scoring='accuracy')
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
cv_scores_mean = np.mean(cv_scores, axis=1)
cv_scores_std = | np.std(cv_scores, axis=1) | numpy.std |
# From http://www.hs.uni-hamburg.de/DE/Ins/Per/Czesla/PyA/PyA/pyaslDoc/aslDoc/unredDoc.html
import numpy as np
import scipy.interpolate as interpolate
def unred(wave, flux, ebv, R_V=3.1, LMC2=False, AVGLMC=False):
"""
Deredden a flux vector using the Fitzpatrick (1999) parameterization
Parameters
----------
wave : array
Wavelength in Angstrom
flux : array
Calibrated flux vector, same number of elements as wave.
ebv : float, optional
Color excess E(B-V). If a negative ebv is supplied,
then fluxes will be reddened rather than dereddened.
The default is 3.1.
AVGLMC : boolean
If True, then the default fit parameters c1,c2,c3,c4,gamma,x0
are set to the average values determined for reddening in the
general Large Magellanic Cloud (LMC) field by
Misselt et al. (1999, ApJ, 515, 128). The default is
False.
LMC2 : boolean
If True, the fit parameters are set to the values determined
for the LMC2 field (including 30 Dor) by Misselt et al.
Note that neither `AVGLMC` nor `LMC2` will alter the default value
of R_V, which is poorly known for the LMC.
Returns
-------
new_flux : array
Dereddened flux vector, same units and number of elements
as input flux.
Notes
-----
.. note:: This function was ported from the IDL Astronomy User's Library.
:IDL - Documentation:
PURPOSE:
Deredden a flux vector using the Fitzpatrick (1999) parameterization
EXPLANATION:
The R-dependent Galactic extinction curve is that of Fitzpatrick & Massa
(Fitzpatrick, 1999, PASP, 111, 63; astro-ph/9809387 ).
Parameterization is valid from the IR to the far-UV (3.5 microns to 0.1
microns). UV extinction curve is extrapolated down to 912 Angstroms.
CALLING SEQUENCE:
FM_UNRED, wave, flux, ebv, [ funred, R_V = , /LMC2, /AVGLMC, ExtCurve=
gamma =, x0=, c1=, c2=, c3=, c4= ]
INPUT:
WAVE - wavelength vector (Angstroms)
FLUX - calibrated flux vector, same number of elements as WAVE
If only 3 parameters are supplied, then this vector will
updated on output to contain the dereddened flux.
EBV - color excess E(B-V), scalar. If a negative EBV is supplied,
then fluxes will be reddened rather than dereddened.
OUTPUT:
FUNRED - unreddened flux vector, same units and number of elements
as FLUX
OPTIONAL INPUT KEYWORDS
R_V - scalar specifying the ratio of total to selective extinction
R(V) = A(V) / E(B - V). If not specified, then R = 3.1
Extreme values of R(V) range from 2.3 to 5.3
/AVGLMC - if set, then the default fit parameters c1,c2,c3,c4,gamma,x0
are set to the average values determined for reddening in the
general Large Magellanic Cloud (LMC) field by Misselt et al.
(1999, ApJ, 515, 128)
/LMC2 - if set, then the fit parameters are set to the values determined
for the LMC2 field (including 30 Dor) by Misselt et al.
Note that neither /AVGLMC or /LMC2 will alter the default value
of R_V which is poorly known for the LMC.
The following five input keyword parameters allow the user to customize
the adopted extinction curve. For example, see Clayton et al. (2003,
ApJ, 588, 871) for examples of these parameters in different interstellar
environments.
x0 - Centroid of 2200 A bump in microns (default = 4.596)
gamma - Width of 2200 A bump in microns (default =0.99)
c3 - Strength of the 2200 A bump (default = 3.23)
c4 - FUV curvature (default = 0.41)
c2 - Slope of the linear UV extinction component
(default = -0.824 + 4.717/R)
c1 - Intercept of the linear UV extinction component
(default = 2.030 - 3.007*c2
"""
x = 10000./ wave # Convert to inverse microns
curve = x*0.
# Set some standard values:
x0 = 4.596
gamma = 0.99
c3 = 3.23
c4 = 0.41
c2 = -0.824 + 4.717/R_V
c1 = 2.030 - 3.007*c2
if LMC2:
x0 = 4.626
gamma = 1.05
c4 = 0.42
c3 = 1.92
c2 = 1.31
c1 = -2.16
elif AVGLMC:
x0 = 4.596
gamma = 0.91
c4 = 0.64
c3 = 2.73
c2 = 1.11
c1 = -1.28
# Compute UV portion of A(lambda)/E(B-V) curve using FM fitting function and
# R-dependent coefficients
xcutuv = np.array([10000.0/2700.0])
xspluv = 10000.0/np.array([2700.0,2600.0])
iuv = np.where(x >= xcutuv)[0]
N_UV = len(iuv)
iopir = | np.where(x < xcutuv) | numpy.where |
"""Test HDF5 I/O tools."""
import os
import h5py
import numpy as np
import pytest
from becquerel.io.h5 import ensure_string, is_h5_filename, open_h5, write_h5, read_h5
TEST_OUTPUTS = os.path.join(os.path.split(__file__)[0], "test_outputs")
if not os.path.exists(TEST_OUTPUTS):
os.mkdir(TEST_OUTPUTS)
DSETS = {
"dset_1d": np.ones(100, dtype=int),
"dset_2d": | np.ones((30, 30), dtype=float) | numpy.ones |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the wind_components.ResolveWindComponents plugin."""
import unittest
import iris
import numpy as np
from iris.coord_systems import OSGB
from iris.coords import DimCoord
from iris.tests import IrisTest
from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube
from improver.wind_calculations.wind_components import ResolveWindComponents
RAD_TO_DEG = 180.0 / np.pi
def set_up_cube(data_2d, name, unit):
"""Set up a 2D test cube of wind direction or speed"""
cube = set_up_variable_cube(
data_2d.astype(np.float32), name=name, units=unit, spatial_grid="equalarea"
)
cube.coord("projection_x_coordinate").points = np.linspace(
150000, 250000, data_2d.shape[1]
)
cube.coord("projection_y_coordinate").points = np.linspace(
0, 600000, data_2d.shape[0]
)
for axis in ["x", "y"]:
cube.coord(axis=axis).units = "metres"
cube.coord(axis=axis).coord_system = OSGB()
cube.coord(axis=axis).bounds = None
return cube
def add_new_dimension(cube, npoints, name, unit):
"""Add a new dimension with npoints by copying cube data"""
cubelist = iris.cube.CubeList([])
for i in range(npoints):
newcube = cube.copy(cube.data)
newcube.add_aux_coord(DimCoord(i, name, unit))
cubelist.append(newcube)
merged_cube = cubelist.merge_cube()
return merged_cube
class Test__repr__(IrisTest):
"""Tests the __repr__ method"""
def test_basic(self):
"""Tests the output string is as expected"""
result = str(ResolveWindComponents())
self.assertEqual(result, "<ResolveWindComponents>")
class Test_calc_true_north_offset(IrisTest):
"""Tests the calc_true_north_offset function"""
def setUp(self):
"""Set up a target cube with OSGB projection"""
wind_angle = np.zeros((3, 5), dtype=np.float32)
self.directions = set_up_cube(wind_angle, "wind_to_direction", "degrees")
self.plugin = ResolveWindComponents()
def test_basic(self):
"""Test function returns correct type"""
result = self.plugin.calc_true_north_offset(self.directions)
self.assertIsInstance(result, np.ndarray)
def test_values(self):
"""Test that for UK National Grid coordinates the angle adjustments
are sensible"""
expected_result = np.array(
[
[2.651483, 2.386892, 2.122119, 1.857182, 1.592121],
[2.921058, 2.629620, 2.337963, 2.046132, 1.754138],
[3.223816, 2.902300, 2.580523, 2.258494, 1.936247],
],
dtype=np.float32,
)
result = self.plugin.calc_true_north_offset(self.directions)
self.assertArrayAlmostEqual(RAD_TO_DEG * result, expected_result)
class Test_resolve_wind_components(IrisTest):
"""Tests the resolve_wind_components method"""
def setUp(self):
"""Set up some arrays to convert"""
self.plugin = ResolveWindComponents()
wind_speed = 10.0 * np.ones((4, 4), dtype=np.float32)
wind_angle = np.array(
[
[0.0, 30.0, 45.0, 60.0],
[90.0, 120.0, 135.0, 150.0],
[180.0, 210.0, 225.0, 240.0],
[270.0, 300.0, 315.0, 330.0],
],
dtype=np.float32,
)
self.wind_cube = set_up_cube(wind_speed, "wind_speed", "knots")
self.directions = set_up_cube(wind_angle, "wind_to_direction", "degrees")
self.adjustments = np.zeros((4, 4), dtype=np.float32)
def test_basic(self):
"""Test function returns correct type"""
uspeed, vspeed = self.plugin.resolve_wind_components(
self.wind_cube, self.directions, self.adjustments
)
self.assertIsInstance(uspeed, iris.cube.Cube)
self.assertIsInstance(vspeed, iris.cube.Cube)
def test_values(self):
"""Test correct values are returned for well-behaved angles"""
expected_uspeed = 5.0 * np.array(
[
[0.0, 1.0, np.sqrt(2.0), np.sqrt(3.0)],
[2.0, np.sqrt(3.0), np.sqrt(2.0), 1.0],
[0.0, -1.0, -np.sqrt(2.0), -np.sqrt(3.0)],
[-2.0, -np.sqrt(3.0), -np.sqrt(2.0), -1.0],
],
dtype=np.float32,
)
expected_vspeed = 5 * np.array(
[
[2.0, np.sqrt(3.0), | np.sqrt(2.0) | numpy.sqrt |
Subsets and Splits